FreeBSD kernel kern code
vfs_bio.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2004 Poul-Henning Kamp
5 * Copyright (c) 1994,1997 John S. Dyson
6 * Copyright (c) 2013 The FreeBSD Foundation
7 * All rights reserved.
8 *
9 * Portions of this software were developed by Konstantin Belousov
10 * under sponsorship from the FreeBSD Foundation.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33
34/*
35 * this file contains a new buffer I/O scheme implementing a coherent
36 * VM object and buffer cache scheme. Pains have been taken to make
37 * sure that the performance degradation associated with schemes such
38 * as this is not realized.
39 *
40 * Author: John S. Dyson
41 * Significant help during the development and debugging phases
42 * had been provided by David Greenman, also of the FreeBSD core team.
43 *
44 * see man buf(9) for more info.
45 */
46
47#include <sys/cdefs.h>
48__FBSDID("$FreeBSD$");
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/asan.h>
53#include <sys/bio.h>
54#include <sys/bitset.h>
55#include <sys/boottrace.h>
56#include <sys/buf.h>
57#include <sys/conf.h>
58#include <sys/counter.h>
59#include <sys/devicestat.h>
60#include <sys/eventhandler.h>
61#include <sys/fail.h>
62#include <sys/ktr.h>
63#include <sys/limits.h>
64#include <sys/lock.h>
65#include <sys/malloc.h>
66#include <sys/mount.h>
67#include <sys/mutex.h>
68#include <sys/kernel.h>
69#include <sys/kthread.h>
70#include <sys/proc.h>
71#include <sys/racct.h>
72#include <sys/refcount.h>
73#include <sys/resourcevar.h>
74#include <sys/rwlock.h>
75#include <sys/smp.h>
76#include <sys/sysctl.h>
77#include <sys/syscallsubr.h>
78#include <sys/vmem.h>
79#include <sys/vmmeter.h>
80#include <sys/vnode.h>
81#include <sys/watchdog.h>
82#include <geom/geom.h>
83#include <vm/vm.h>
84#include <vm/vm_param.h>
85#include <vm/vm_kern.h>
86#include <vm/vm_object.h>
87#include <vm/vm_page.h>
88#include <vm/vm_pageout.h>
89#include <vm/vm_pager.h>
90#include <vm/vm_extern.h>
91#include <vm/vm_map.h>
92#include <vm/swap_pager.h>
93
94static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
95
96struct bio_ops bioops; /* I/O operation notification */
97
98struct buf_ops buf_ops_bio = {
99 .bop_name = "buf_ops_bio",
100 .bop_write = bufwrite,
101 .bop_strategy = bufstrategy,
102 .bop_sync = bufsync,
103 .bop_bdflush = bufbdflush,
104};
105
106struct bufqueue {
107 struct mtx_padalign bq_lock;
108 TAILQ_HEAD(, buf) bq_queue;
109 uint8_t bq_index;
110 uint16_t bq_subqueue;
111 int bq_len;
112} __aligned(CACHE_LINE_SIZE);
113
114#define BQ_LOCKPTR(bq) (&(bq)->bq_lock)
115#define BQ_LOCK(bq) mtx_lock(BQ_LOCKPTR((bq)))
116#define BQ_UNLOCK(bq) mtx_unlock(BQ_LOCKPTR((bq)))
117#define BQ_ASSERT_LOCKED(bq) mtx_assert(BQ_LOCKPTR((bq)), MA_OWNED)
118
119struct bufdomain {
120 struct bufqueue bd_subq[MAXCPU + 1]; /* Per-cpu sub queues + global */
123 struct mtx_padalign bd_run_lock;
124 /* Constants */
135 /* atomics */
138 int __aligned(CACHE_LINE_SIZE) bd_numdirtybuffers;
139 int __aligned(CACHE_LINE_SIZE) bd_running;
140 long __aligned(CACHE_LINE_SIZE) bd_bufspace;
141 int __aligned(CACHE_LINE_SIZE) bd_freebuffers;
142} __aligned(CACHE_LINE_SIZE);
143
144#define BD_LOCKPTR(bd) (&(bd)->bd_cleanq->bq_lock)
145#define BD_LOCK(bd) mtx_lock(BD_LOCKPTR((bd)))
146#define BD_UNLOCK(bd) mtx_unlock(BD_LOCKPTR((bd)))
147#define BD_ASSERT_LOCKED(bd) mtx_assert(BD_LOCKPTR((bd)), MA_OWNED)
148#define BD_RUN_LOCKPTR(bd) (&(bd)->bd_run_lock)
149#define BD_RUN_LOCK(bd) mtx_lock(BD_RUN_LOCKPTR((bd)))
150#define BD_RUN_UNLOCK(bd) mtx_unlock(BD_RUN_LOCKPTR((bd)))
151#define BD_DOMAIN(bd) (bd - bdomain)
152
153static char *buf; /* buffer header pool */
154static struct buf *
155nbufp(unsigned i)
156{
157 return ((struct buf *)(buf + (sizeof(struct buf) +
158 sizeof(vm_page_t) * atop(maxbcachebuf)) * i));
159}
160
162
163/* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */
164struct proc *bufdaemonproc;
165
166static void vm_hold_free_pages(struct buf *bp, int newbsize);
167static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
168 vm_offset_t to);
169static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
170static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
171 vm_page_t m);
172static void vfs_clean_pages_dirty_buf(struct buf *bp);
173static void vfs_setdirty_range(struct buf *bp);
174static void vfs_vmio_invalidate(struct buf *bp);
175static void vfs_vmio_truncate(struct buf *bp, int npages);
176static void vfs_vmio_extend(struct buf *bp, int npages, int size);
177static int vfs_bio_clcheck(struct vnode *vp, int size,
178 daddr_t lblkno, daddr_t blkno);
179static void breada(struct vnode *, daddr_t *, int *, int, struct ucred *, int,
180 void (*)(struct buf *));
181static int buf_flush(struct vnode *vp, struct bufdomain *, int);
182static int flushbufqueues(struct vnode *, struct bufdomain *, int, int);
183static void buf_daemon(void);
184static __inline void bd_wakeup(void);
185static int sysctl_runningspace(SYSCTL_HANDLER_ARGS);
186static void bufkva_reclaim(vmem_t *, int);
187static void bufkva_free(struct buf *);
188static int buf_import(void *, void **, int, int, int);
189static void buf_release(void *, void **, int);
190static void maxbcachebuf_adjust(void);
191static inline struct bufdomain *bufdomain(struct buf *);
192static void bq_remove(struct bufqueue *bq, struct buf *bp);
193static void bq_insert(struct bufqueue *bq, struct buf *bp, bool unlock);
194static int buf_recycle(struct bufdomain *, bool kva);
195static void bq_init(struct bufqueue *bq, int qindex, int cpu,
196 const char *lockname);
197static void bd_init(struct bufdomain *bd);
198static int bd_flushall(struct bufdomain *bd);
199static int sysctl_bufdomain_long(SYSCTL_HANDLER_ARGS);
200static int sysctl_bufdomain_int(SYSCTL_HANDLER_ARGS);
201
202static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
203int vmiodirenable = TRUE;
204SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
205 "Use the VM system for directory writes");
207SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
208 "Amount of presently outstanding async buffer io");
209SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
210 NULL, 0, sysctl_bufspace, "L", "Physical memory used for buffers");
211static counter_u64_t bufkvaspace;
212SYSCTL_COUNTER_U64(_vfs, OID_AUTO, bufkvaspace, CTLFLAG_RD, &bufkvaspace,
213 "Kernel virtual memory used for buffers");
214static long maxbufspace;
215SYSCTL_PROC(_vfs, OID_AUTO, maxbufspace,
216 CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &maxbufspace,
217 __offsetof(struct bufdomain, bd_maxbufspace), sysctl_bufdomain_long, "L",
218 "Maximum allowed value of bufspace (including metadata)");
219static long bufmallocspace;
220SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
221 "Amount of malloced memory for buffers");
223SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace,
224 0, "Maximum amount of malloced memory for buffers");
225static long lobufspace;
226SYSCTL_PROC(_vfs, OID_AUTO, lobufspace,
227 CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &lobufspace,
228 __offsetof(struct bufdomain, bd_lobufspace), sysctl_bufdomain_long, "L",
229 "Minimum amount of buffers we want to have");
231SYSCTL_PROC(_vfs, OID_AUTO, hibufspace,
232 CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &hibufspace,
233 __offsetof(struct bufdomain, bd_hibufspace), sysctl_bufdomain_long, "L",
234 "Maximum allowed value of bufspace (excluding metadata)");
237 CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &bufspacethresh,
238 __offsetof(struct bufdomain, bd_bufspacethresh), sysctl_bufdomain_long, "L",
239 "Bufspace consumed before waking the daemon to free some");
240static counter_u64_t buffreekvacnt;
241SYSCTL_COUNTER_U64(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt,
242 "Number of times we have freed the KVA space from some buffer");
243static counter_u64_t bufdefragcnt;
244SYSCTL_COUNTER_U64(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt,
245 "Number of times we have had to repeat buffer allocation to defragment");
246static long lorunningspace;
247SYSCTL_PROC(_vfs, OID_AUTO, lorunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
248 CTLFLAG_RW, &lorunningspace, 0, sysctl_runningspace, "L",
249 "Minimum preferred space used for in-progress I/O");
250static long hirunningspace;
251SYSCTL_PROC(_vfs, OID_AUTO, hirunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
252 CTLFLAG_RW, &hirunningspace, 0, sysctl_runningspace, "L",
253 "Maximum amount of space to use for in-progress I/O");
256 0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
258SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
259 0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
261SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW | CTLFLAG_STATS,
262 &altbufferflushes, 0, "Number of fsync flushes to limit dirty buffers");
264SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW | CTLFLAG_STATS,
265 &recursiveflushes, 0, "Number of flushes skipped due to being recursive");
266static int sysctl_numdirtybuffers(SYSCTL_HANDLER_ARGS);
267SYSCTL_PROC(_vfs, OID_AUTO, numdirtybuffers,
268 CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RD, NULL, 0, sysctl_numdirtybuffers, "I",
269 "Number of buffers that are dirty (has unwritten changes) at the moment");
270static int lodirtybuffers;
272 CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &lodirtybuffers,
273 __offsetof(struct bufdomain, bd_lodirtybuffers), sysctl_bufdomain_int, "I",
274 "How many buffers we want to have free before bufdaemon can sleep");
275static int hidirtybuffers;
277 CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &hidirtybuffers,
278 __offsetof(struct bufdomain, bd_hidirtybuffers), sysctl_bufdomain_int, "I",
279 "When the number of dirty buffers is considered severe");
282 CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &dirtybufthresh,
283 __offsetof(struct bufdomain, bd_dirtybufthresh), sysctl_bufdomain_int, "I",
284 "Number of bdwrite to bawrite conversions to clear dirty buffers");
285static int numfreebuffers;
286SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
287 "Number of free buffers");
288static int lofreebuffers;
290 CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &lofreebuffers,
291 __offsetof(struct bufdomain, bd_lofreebuffers), sysctl_bufdomain_int, "I",
292 "Target number of free buffers");
293static int hifreebuffers;
295 CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &hifreebuffers,
296 __offsetof(struct bufdomain, bd_hifreebuffers), sysctl_bufdomain_int, "I",
297 "Threshold for clean buffer recycling");
298static counter_u64_t getnewbufcalls;
299SYSCTL_COUNTER_U64(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RD,
300 &getnewbufcalls, "Number of calls to getnewbuf");
301static counter_u64_t getnewbufrestarts;
302SYSCTL_COUNTER_U64(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RD,
304 "Number of times getnewbuf has had to restart a buffer acquisition");
305static counter_u64_t mappingrestarts;
306SYSCTL_COUNTER_U64(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RD,
308 "Number of times getblk has had to restart a buffer mapping for "
309 "unmapped buffer");
310static counter_u64_t numbufallocfails;
311SYSCTL_COUNTER_U64(_vfs, OID_AUTO, numbufallocfails, CTLFLAG_RW,
312 &numbufallocfails, "Number of times buffer allocations failed");
313static int flushbufqtarget = 100;
314SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0,
315 "Amount of work to do in flushbufqueues when helping bufdaemon");
316static counter_u64_t notbufdflushes;
318 "Number of dirty buffer flushes done by the bufdaemon helpers");
319static long barrierwrites;
320SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW | CTLFLAG_STATS,
321 &barrierwrites, 0, "Number of barrier writes");
322SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
323 &unmapped_buf_allowed, 0,
324 "Permit the use of the unmapped i/o");
325int maxbcachebuf = MAXBCACHEBUF;
326SYSCTL_INT(_vfs, OID_AUTO, maxbcachebuf, CTLFLAG_RDTUN, &maxbcachebuf, 0,
327 "Maximum size of a buffer cache block");
328
329/*
330 * This lock synchronizes access to bd_request.
331 */
332static struct mtx_padalign __exclusive_cache_line bdlock;
333
334/*
335 * This lock protects the runningbufreq and synchronizes runningbufwakeup and
336 * waitrunningbufspace().
337 */
338static struct mtx_padalign __exclusive_cache_line rbreqlock;
339
340/*
341 * Lock that protects bdirtywait.
342 */
343static struct mtx_padalign __exclusive_cache_line bdirtylock;
344
345/*
346 * bufdaemon shutdown request and sleep channel.
347 */
348static bool bd_shutdown;
349
350/*
351 * Wakeup point for bufdaemon, as well as indicator of whether it is already
352 * active. Set to 1 when the bufdaemon is already "on" the queue, 0 when it
353 * is idling.
354 */
355static int bd_request;
356
357/*
358 * Request for the buf daemon to write more buffers than is indicated by
359 * lodirtybuf. This may be necessary to push out excess dependencies or
360 * defragment the address space where a simple count of the number of dirty
361 * buffers is insufficient to characterize the demand for flushing them.
362 */
363static int bd_speedupreq;
364
365/*
366 * Synchronization (sleep/wakeup) variable for active buffer space requests.
367 * Set when wait starts, cleared prior to wakeup().
368 * Used in runningbufwakeup() and waitrunningbufspace().
369 */
370static int runningbufreq;
371
372/*
373 * Synchronization for bwillwrite() waiters.
374 */
375static int bdirtywait;
376
377/*
378 * Definitions for the buffer free lists.
379 */
380#define QUEUE_NONE 0 /* on no queue */
381#define QUEUE_EMPTY 1 /* empty buffer headers */
382#define QUEUE_DIRTY 2 /* B_DELWRI buffers */
383#define QUEUE_CLEAN 3 /* non-B_DELWRI buffers */
384#define QUEUE_SENTINEL 4 /* not an queue index, but mark for sentinel */
385
386/* Maximum number of buffer domains. */
387#define BUF_DOMAINS 8
388
389struct bufdomainset bdlodirty; /* Domains > lodirty */
390struct bufdomainset bdhidirty; /* Domains > hidirty */
391
392/* Configured number of clean queues. */
394
396struct bufdomain __exclusive_cache_line bdomain[BUF_DOMAINS];
397struct bufqueue __exclusive_cache_line bqempty;
398
399/*
400 * per-cpu empty buffer cache.
401 */
402uma_zone_t buf_zone;
403
404static int
405sysctl_runningspace(SYSCTL_HANDLER_ARGS)
406{
407 long value;
408 int error;
409
410 value = *(long *)arg1;
411 error = sysctl_handle_long(oidp, &value, 0, req);
412 if (error != 0 || req->newptr == NULL)
413 return (error);
414 mtx_lock(&rbreqlock);
415 if (arg1 == &hirunningspace) {
416 if (value < lorunningspace)
417 error = EINVAL;
418 else
420 } else {
421 KASSERT(arg1 == &lorunningspace,
422 ("%s: unknown arg1", __func__));
423 if (value > hirunningspace)
424 error = EINVAL;
425 else
427 }
428 mtx_unlock(&rbreqlock);
429 return (error);
430}
431
432static int
433sysctl_bufdomain_int(SYSCTL_HANDLER_ARGS)
434{
435 int error;
436 int value;
437 int i;
438
439 value = *(int *)arg1;
440 error = sysctl_handle_int(oidp, &value, 0, req);
441 if (error != 0 || req->newptr == NULL)
442 return (error);
443 *(int *)arg1 = value;
444 for (i = 0; i < buf_domains; i++)
445 *(int *)(uintptr_t)(((uintptr_t)&bdomain[i]) + arg2) =
447
448 return (error);
449}
450
451static int
452sysctl_bufdomain_long(SYSCTL_HANDLER_ARGS)
453{
454 long value;
455 int error;
456 int i;
457
458 value = *(long *)arg1;
459 error = sysctl_handle_long(oidp, &value, 0, req);
460 if (error != 0 || req->newptr == NULL)
461 return (error);
462 *(long *)arg1 = value;
463 for (i = 0; i < buf_domains; i++)
464 *(long *)(uintptr_t)(((uintptr_t)&bdomain[i]) + arg2) =
466
467 return (error);
468}
469
470#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
471 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
472static int
473sysctl_bufspace(SYSCTL_HANDLER_ARGS)
474{
475 long lvalue;
476 int ivalue;
477 int i;
478
479 lvalue = 0;
480 for (i = 0; i < buf_domains; i++)
481 lvalue += bdomain[i].bd_bufspace;
482 if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long))
483 return (sysctl_handle_long(oidp, &lvalue, 0, req));
484 if (lvalue > INT_MAX)
485 /* On overflow, still write out a long to trigger ENOMEM. */
486 return (sysctl_handle_long(oidp, &lvalue, 0, req));
487 ivalue = lvalue;
488 return (sysctl_handle_int(oidp, &ivalue, 0, req));
489}
490#else
491static int
492sysctl_bufspace(SYSCTL_HANDLER_ARGS)
493{
494 long lvalue;
495 int i;
496
497 lvalue = 0;
498 for (i = 0; i < buf_domains; i++)
499 lvalue += bdomain[i].bd_bufspace;
500 return (sysctl_handle_long(oidp, &lvalue, 0, req));
501}
502#endif
503
504static int
505sysctl_numdirtybuffers(SYSCTL_HANDLER_ARGS)
506{
507 int value;
508 int i;
509
510 value = 0;
511 for (i = 0; i < buf_domains; i++)
512 value += bdomain[i].bd_numdirtybuffers;
513 return (sysctl_handle_int(oidp, &value, 0, req));
514}
515
516/*
517 * bdirtywakeup:
518 *
519 * Wakeup any bwillwrite() waiters.
520 */
521static void
523{
524 mtx_lock(&bdirtylock);
525 if (bdirtywait) {
526 bdirtywait = 0;
528 }
529 mtx_unlock(&bdirtylock);
530}
531
532/*
533 * bd_clear:
534 *
535 * Clear a domain from the appropriate bitsets when dirtybuffers
536 * is decremented.
537 */
538static void
540{
541
542 mtx_lock(&bdirtylock);
543 if (bd->bd_numdirtybuffers <= bd->bd_lodirtybuffers)
544 BIT_CLR(BUF_DOMAINS, BD_DOMAIN(bd), &bdlodirty);
545 if (bd->bd_numdirtybuffers <= bd->bd_hidirtybuffers)
546 BIT_CLR(BUF_DOMAINS, BD_DOMAIN(bd), &bdhidirty);
547 mtx_unlock(&bdirtylock);
548}
549
550/*
551 * bd_set:
552 *
553 * Set a domain in the appropriate bitsets when dirtybuffers
554 * is incremented.
555 */
556static void
557bd_set(struct bufdomain *bd)
558{
559
560 mtx_lock(&bdirtylock);
561 if (bd->bd_numdirtybuffers > bd->bd_lodirtybuffers)
562 BIT_SET(BUF_DOMAINS, BD_DOMAIN(bd), &bdlodirty);
563 if (bd->bd_numdirtybuffers > bd->bd_hidirtybuffers)
564 BIT_SET(BUF_DOMAINS, BD_DOMAIN(bd), &bdhidirty);
565 mtx_unlock(&bdirtylock);
566}
567
568/*
569 * bdirtysub:
570 *
571 * Decrement the numdirtybuffers count by one and wakeup any
572 * threads blocked in bwillwrite().
573 */
574static void
575bdirtysub(struct buf *bp)
576{
577 struct bufdomain *bd;
578 int num;
579
580 bd = bufdomain(bp);
581 num = atomic_fetchadd_int(&bd->bd_numdirtybuffers, -1);
582 if (num == (bd->bd_lodirtybuffers + bd->bd_hidirtybuffers) / 2)
583 bdirtywakeup();
584 if (num == bd->bd_lodirtybuffers || num == bd->bd_hidirtybuffers)
585 bd_clear(bd);
586}
587
588/*
589 * bdirtyadd:
590 *
591 * Increment the numdirtybuffers count by one and wakeup the buf
592 * daemon if needed.
593 */
594static void
595bdirtyadd(struct buf *bp)
596{
597 struct bufdomain *bd;
598 int num;
599
600 /*
601 * Only do the wakeup once as we cross the boundary. The
602 * buf daemon will keep running until the condition clears.
603 */
604 bd = bufdomain(bp);
605 num = atomic_fetchadd_int(&bd->bd_numdirtybuffers, 1);
606 if (num == (bd->bd_lodirtybuffers + bd->bd_hidirtybuffers) / 2)
607 bd_wakeup();
608 if (num == bd->bd_lodirtybuffers || num == bd->bd_hidirtybuffers)
609 bd_set(bd);
610}
611
612/*
613 * bufspace_daemon_wakeup:
614 *
615 * Wakeup the daemons responsible for freeing clean bufs.
616 */
617static void
619{
620
621 /*
622 * avoid the lock if the daemon is running.
623 */
624 if (atomic_fetchadd_int(&bd->bd_running, 1) == 0) {
625 BD_RUN_LOCK(bd);
626 atomic_store_int(&bd->bd_running, 1);
627 wakeup(&bd->bd_running);
628 BD_RUN_UNLOCK(bd);
629 }
630}
631
632/*
633 * bufspace_adjust:
634 *
635 * Adjust the reported bufspace for a KVA managed buffer, possibly
636 * waking any waiters.
637 */
638static void
639bufspace_adjust(struct buf *bp, int bufsize)
640{
641 struct bufdomain *bd;
642 long space;
643 int diff;
644
645 KASSERT((bp->b_flags & B_MALLOC) == 0,
646 ("bufspace_adjust: malloc buf %p", bp));
647 bd = bufdomain(bp);
648 diff = bufsize - bp->b_bufsize;
649 if (diff < 0) {
650 atomic_subtract_long(&bd->bd_bufspace, -diff);
651 } else if (diff > 0) {
652 space = atomic_fetchadd_long(&bd->bd_bufspace, diff);
653 /* Wake up the daemon on the transition. */
654 if (space < bd->bd_bufspacethresh &&
655 space + diff >= bd->bd_bufspacethresh)
657 }
658 bp->b_bufsize = bufsize;
659}
660
661/*
662 * bufspace_reserve:
663 *
664 * Reserve bufspace before calling allocbuf(). metadata has a
665 * different space limit than data.
666 */
667static int
668bufspace_reserve(struct bufdomain *bd, int size, bool metadata)
669{
670 long limit, new;
671 long space;
672
673 if (metadata)
674 limit = bd->bd_maxbufspace;
675 else
676 limit = bd->bd_hibufspace;
677 space = atomic_fetchadd_long(&bd->bd_bufspace, size);
678 new = space + size;
679 if (new > limit) {
680 atomic_subtract_long(&bd->bd_bufspace, size);
681 return (ENOSPC);
682 }
683
684 /* Wake up the daemon on the transition. */
685 if (space < bd->bd_bufspacethresh && new >= bd->bd_bufspacethresh)
687
688 return (0);
689}
690
691/*
692 * bufspace_release:
693 *
694 * Release reserved bufspace after bufspace_adjust() has consumed it.
695 */
696static void
697bufspace_release(struct bufdomain *bd, int size)
698{
699
700 atomic_subtract_long(&bd->bd_bufspace, size);
701}
702
703/*
704 * bufspace_wait:
705 *
706 * Wait for bufspace, acting as the buf daemon if a locked vnode is
707 * supplied. bd_wanted must be set prior to polling for space. The
708 * operation must be re-tried on return.
709 */
710static void
711bufspace_wait(struct bufdomain *bd, struct vnode *vp, int gbflags,
712 int slpflag, int slptimeo)
713{
714 struct thread *td;
715 int error, fl, norunbuf;
716
717 if ((gbflags & GB_NOWAIT_BD) != 0)
718 return;
719
720 td = curthread;
721 BD_LOCK(bd);
722 while (bd->bd_wanted) {
723 if (vp != NULL && vp->v_type != VCHR &&
724 (td->td_pflags & TDP_BUFNEED) == 0) {
725 BD_UNLOCK(bd);
726 /*
727 * getblk() is called with a vnode locked, and
728 * some majority of the dirty buffers may as
729 * well belong to the vnode. Flushing the
730 * buffers there would make a progress that
731 * cannot be achieved by the buf_daemon, that
732 * cannot lock the vnode.
733 */
734 norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
735 (td->td_pflags & TDP_NORUNNINGBUF);
736
737 /*
738 * Play bufdaemon. The getnewbuf() function
739 * may be called while the thread owns lock
740 * for another dirty buffer for the same
741 * vnode, which makes it impossible to use
742 * VOP_FSYNC() there, due to the buffer lock
743 * recursion.
744 */
745 td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
746 fl = buf_flush(vp, bd, flushbufqtarget);
747 td->td_pflags &= norunbuf;
748 BD_LOCK(bd);
749 if (fl != 0)
750 continue;
751 if (bd->bd_wanted == 0)
752 break;
753 }
754 error = msleep(&bd->bd_wanted, BD_LOCKPTR(bd),
755 (PRIBIO + 4) | slpflag, "newbuf", slptimeo);
756 if (error != 0)
757 break;
758 }
759 BD_UNLOCK(bd);
760}
761
762static void
763bufspace_daemon_shutdown(void *arg, int howto __unused)
764{
765 struct bufdomain *bd = arg;
766 int error;
767
768 BD_RUN_LOCK(bd);
769 bd->bd_shutdown = true;
770 wakeup(&bd->bd_running);
771 error = msleep(&bd->bd_shutdown, BD_RUN_LOCKPTR(bd), 0,
772 "bufspace_shutdown", 60 * hz);
773 BD_RUN_UNLOCK(bd);
774 if (error != 0)
775 printf("bufspacedaemon wait error: %d\n", error);
776}
777
778/*
779 * bufspace_daemon:
780 *
781 * buffer space management daemon. Tries to maintain some marginal
782 * amount of free buffer space so that requesting processes neither
783 * block nor work to reclaim buffers.
784 */
785static void
787{
788 struct bufdomain *bd = arg;
789
790 EVENTHANDLER_REGISTER(shutdown_pre_sync, bufspace_daemon_shutdown, bd,
791 SHUTDOWN_PRI_LAST + 100);
792
793 BD_RUN_LOCK(bd);
794 while (!bd->bd_shutdown) {
795 BD_RUN_UNLOCK(bd);
796
797 /*
798 * Free buffers from the clean queue until we meet our
799 * targets.
800 *
801 * Theory of operation: The buffer cache is most efficient
802 * when some free buffer headers and space are always
803 * available to getnewbuf(). This daemon attempts to prevent
804 * the excessive blocking and synchronization associated
805 * with shortfall. It goes through three phases according
806 * demand:
807 *
808 * 1) The daemon wakes up voluntarily once per-second
809 * during idle periods when the counters are below
810 * the wakeup thresholds (bufspacethresh, lofreebuffers).
811 *
812 * 2) The daemon wakes up as we cross the thresholds
813 * ahead of any potential blocking. This may bounce
814 * slightly according to the rate of consumption and
815 * release.
816 *
817 * 3) The daemon and consumers are starved for working
818 * clean buffers. This is the 'bufspace' sleep below
819 * which will inefficiently trade bufs with bqrelse
820 * until we return to condition 2.
821 */
822 while (bd->bd_bufspace > bd->bd_lobufspace ||
823 bd->bd_freebuffers < bd->bd_hifreebuffers) {
824 if (buf_recycle(bd, false) != 0) {
825 if (bd_flushall(bd))
826 continue;
827 /*
828 * Speedup dirty if we've run out of clean
829 * buffers. This is possible in particular
830 * because softdep may held many bufs locked
831 * pending writes to other bufs which are
832 * marked for delayed write, exhausting
833 * clean space until they are written.
834 */
835 bd_speedup();
836 BD_LOCK(bd);
837 if (bd->bd_wanted) {
838 msleep(&bd->bd_wanted, BD_LOCKPTR(bd),
839 PRIBIO|PDROP, "bufspace", hz/10);
840 } else
841 BD_UNLOCK(bd);
842 }
843 maybe_yield();
844 }
845
846 /*
847 * Re-check our limits and sleep. bd_running must be
848 * cleared prior to checking the limits to avoid missed
849 * wakeups. The waker will adjust one of bufspace or
850 * freebuffers prior to checking bd_running.
851 */
852 BD_RUN_LOCK(bd);
853 if (bd->bd_shutdown)
854 break;
855 atomic_store_int(&bd->bd_running, 0);
856 if (bd->bd_bufspace < bd->bd_bufspacethresh &&
857 bd->bd_freebuffers > bd->bd_lofreebuffers) {
858 msleep(&bd->bd_running, BD_RUN_LOCKPTR(bd),
859 PRIBIO, "-", hz);
860 } else {
861 /* Avoid spurious wakeups while running. */
862 atomic_store_int(&bd->bd_running, 1);
863 }
864 }
865 wakeup(&bd->bd_shutdown);
866 BD_RUN_UNLOCK(bd);
867 kthread_exit();
868}
869
870/*
871 * bufmallocadjust:
872 *
873 * Adjust the reported bufspace for a malloc managed buffer, possibly
874 * waking any waiters.
875 */
876static void
877bufmallocadjust(struct buf *bp, int bufsize)
878{
879 int diff;
880
881 KASSERT((bp->b_flags & B_MALLOC) != 0,
882 ("bufmallocadjust: non-malloc buf %p", bp));
883 diff = bufsize - bp->b_bufsize;
884 if (diff < 0)
885 atomic_subtract_long(&bufmallocspace, -diff);
886 else
887 atomic_add_long(&bufmallocspace, diff);
888 bp->b_bufsize = bufsize;
889}
890
891/*
892 * runningwakeup:
893 *
894 * Wake up processes that are waiting on asynchronous writes to fall
895 * below lorunningspace.
896 */
897static void
899{
900
901 mtx_lock(&rbreqlock);
902 if (runningbufreq) {
903 runningbufreq = 0;
905 }
906 mtx_unlock(&rbreqlock);
907}
908
909/*
910 * runningbufwakeup:
911 *
912 * Decrement the outstanding write count according.
913 */
914void
916{
917 long space, bspace;
918
919 bspace = bp->b_runningbufspace;
920 if (bspace == 0)
921 return;
922 space = atomic_fetchadd_long(&runningbufspace, -bspace);
923 KASSERT(space >= bspace, ("runningbufspace underflow %ld %ld",
924 space, bspace));
925 bp->b_runningbufspace = 0;
926 /*
927 * Only acquire the lock and wakeup on the transition from exceeding
928 * the threshold to falling below it.
929 */
930 if (space < lorunningspace)
931 return;
932 if (space - bspace > lorunningspace)
933 return;
935}
936
937/*
938 * waitrunningbufspace()
939 *
940 * runningbufspace is a measure of the amount of I/O currently
941 * running. This routine is used in async-write situations to
942 * prevent creating huge backups of pending writes to a device.
943 * Only asynchronous writes are governed by this function.
944 *
945 * This does NOT turn an async write into a sync write. It waits
946 * for earlier writes to complete and generally returns before the
947 * caller's write has reached the device.
948 */
949void
951{
952
953 mtx_lock(&rbreqlock);
955 runningbufreq = 1;
956 msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
957 }
958 mtx_unlock(&rbreqlock);
959}
960
961/*
962 * vfs_buf_test_cache:
963 *
964 * Called when a buffer is extended. This function clears the B_CACHE
965 * bit if the newly extended portion of the buffer does not contain
966 * valid data.
967 */
968static __inline void
969vfs_buf_test_cache(struct buf *bp, vm_ooffset_t foff, vm_offset_t off,
970 vm_offset_t size, vm_page_t m)
971{
972
973 /*
974 * This function and its results are protected by higher level
975 * synchronization requiring vnode and buf locks to page in and
976 * validate pages.
977 */
978 if (bp->b_flags & B_CACHE) {
979 int base = (foff + off) & PAGE_MASK;
980 if (vm_page_is_valid(m, base, size) == 0)
981 bp->b_flags &= ~B_CACHE;
982 }
983}
984
985/* Wake up the buffer daemon if necessary */
986static void
988{
989
990 mtx_lock(&bdlock);
991 if (bd_request == 0) {
992 bd_request = 1;
994 }
995 mtx_unlock(&bdlock);
996}
997
998/*
999 * Adjust the maxbcachbuf tunable.
1000 */
1001static void
1003{
1004 int i;
1005
1006 /*
1007 * maxbcachebuf must be a power of 2 >= MAXBSIZE.
1008 */
1009 i = 2;
1010 while (i * 2 <= maxbcachebuf)
1011 i *= 2;
1012 maxbcachebuf = i;
1013 if (maxbcachebuf < MAXBSIZE)
1014 maxbcachebuf = MAXBSIZE;
1015 if (maxbcachebuf > maxphys)
1017 if (bootverbose != 0 && maxbcachebuf != MAXBCACHEBUF)
1018 printf("maxbcachebuf=%d\n", maxbcachebuf);
1019}
1020
1021/*
1022 * bd_speedup - speedup the buffer cache flushing code
1023 */
1024void
1026{
1027 int needwake;
1028
1029 mtx_lock(&bdlock);
1030 needwake = 0;
1031 if (bd_speedupreq == 0 || bd_request == 0)
1032 needwake = 1;
1033 bd_speedupreq = 1;
1034 bd_request = 1;
1035 if (needwake)
1037 mtx_unlock(&bdlock);
1038}
1039
1040#ifdef __i386__
1041#define TRANSIENT_DENOM 5
1042#else
1043#define TRANSIENT_DENOM 10
1044#endif
1045
1046/*
1047 * Calculating buffer cache scaling values and reserve space for buffer
1048 * headers. This is called during low level kernel initialization and
1049 * may be called more then once. We CANNOT write to the memory area
1050 * being reserved at this time.
1051 */
1052caddr_t
1053kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
1054{
1055 int tuned_nbuf;
1056 long maxbuf, maxbuf_sz, buf_sz, biotmap_sz;
1057
1058 /*
1059 * With KASAN or KMSAN enabled, the kernel map is shadowed. Account for
1060 * this when sizing maps based on the amount of physical memory
1061 * available.
1062 */
1063#if defined(KASAN)
1064 physmem_est = (physmem_est * KASAN_SHADOW_SCALE) /
1065 (KASAN_SHADOW_SCALE + 1);
1066#elif defined(KMSAN)
1067 physmem_est /= 3;
1068
1069 /*
1070 * KMSAN cannot reliably determine whether buffer data is initialized
1071 * unless it is updated through a KVA mapping.
1072 */
1073 unmapped_buf_allowed = 0;
1074#endif
1075
1076 /*
1077 * physmem_est is in pages. Convert it to kilobytes (assumes
1078 * PAGE_SIZE is >= 1K)
1079 */
1080 physmem_est = physmem_est * (PAGE_SIZE / 1024);
1081
1083 /*
1084 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
1085 * For the first 64MB of ram nominally allocate sufficient buffers to
1086 * cover 1/4 of our ram. Beyond the first 64MB allocate additional
1087 * buffers to cover 1/10 of our ram over 64MB. When auto-sizing
1088 * the buffer cache we limit the eventual kva reservation to
1089 * maxbcache bytes.
1090 *
1091 * factor represents the 1/4 x ram conversion.
1092 */
1093 if (nbuf == 0) {
1094 int factor = 4 * BKVASIZE / 1024;
1095
1096 nbuf = 50;
1097 if (physmem_est > 4096)
1098 nbuf += min((physmem_est - 4096) / factor,
1099 65536 / factor);
1100 if (physmem_est > 65536)
1101 nbuf += min((physmem_est - 65536) * 2 / (factor * 5),
1102 32 * 1024 * 1024 / (factor * 5));
1103
1104 if (maxbcache && nbuf > maxbcache / BKVASIZE)
1105 nbuf = maxbcache / BKVASIZE;
1106 tuned_nbuf = 1;
1107 } else
1108 tuned_nbuf = 0;
1109
1110 /* XXX Avoid unsigned long overflows later on with maxbufspace. */
1111 maxbuf = (LONG_MAX / 3) / BKVASIZE;
1112 if (nbuf > maxbuf) {
1113 if (!tuned_nbuf)
1114 printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
1115 maxbuf);
1116 nbuf = maxbuf;
1117 }
1118
1119 /*
1120 * Ideal allocation size for the transient bio submap is 10%
1121 * of the maximal space buffer map. This roughly corresponds
1122 * to the amount of the buffer mapped for typical UFS load.
1123 *
1124 * Clip the buffer map to reserve space for the transient
1125 * BIOs, if its extent is bigger than 90% (80% on i386) of the
1126 * maximum buffer map extent on the platform.
1127 *
1128 * The fall-back to the maxbuf in case of maxbcache unset,
1129 * allows to not trim the buffer KVA for the architectures
1130 * with ample KVA space.
1131 */
1132 if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) {
1133 maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE;
1134 buf_sz = (long)nbuf * BKVASIZE;
1135 if (buf_sz < maxbuf_sz / TRANSIENT_DENOM *
1136 (TRANSIENT_DENOM - 1)) {
1137 /*
1138 * There is more KVA than memory. Do not
1139 * adjust buffer map size, and assign the rest
1140 * of maxbuf to transient map.
1141 */
1142 biotmap_sz = maxbuf_sz - buf_sz;
1143 } else {
1144 /*
1145 * Buffer map spans all KVA we could afford on
1146 * this platform. Give 10% (20% on i386) of
1147 * the buffer map to the transient bio map.
1148 */
1149 biotmap_sz = buf_sz / TRANSIENT_DENOM;
1150 buf_sz -= biotmap_sz;
1151 }
1152 if (biotmap_sz / INT_MAX > maxphys)
1153 bio_transient_maxcnt = INT_MAX;
1154 else
1155 bio_transient_maxcnt = biotmap_sz / maxphys;
1156 /*
1157 * Artificially limit to 1024 simultaneous in-flight I/Os
1158 * using the transient mapping.
1159 */
1160 if (bio_transient_maxcnt > 1024)
1161 bio_transient_maxcnt = 1024;
1162 if (tuned_nbuf)
1163 nbuf = buf_sz / BKVASIZE;
1164 }
1165
1166 if (nswbuf == 0) {
1167 nswbuf = min(nbuf / 4, 256);
1168 if (nswbuf < NSWBUF_MIN)
1169 nswbuf = NSWBUF_MIN;
1170 }
1171
1172 /*
1173 * Reserve space for the buffer cache buffers
1174 */
1175 buf = (char *)v;
1176 v = (caddr_t)buf + (sizeof(struct buf) + sizeof(vm_page_t) *
1177 atop(maxbcachebuf)) * nbuf;
1178
1179 return (v);
1180}
1181
1182/*
1183 * Single global constant for BUF_WMESG, to avoid getting multiple
1184 * references.
1185 */
1186static const char buf_wmesg[] = "bufwait";
1187
1188/* Initialize the buffer subsystem. Called before use of any buffers. */
1189void
1191{
1192 struct buf *bp;
1193 int i;
1194
1195 KASSERT(maxbcachebuf >= MAXBSIZE,
1196 ("maxbcachebuf (%d) must be >= MAXBSIZE (%d)\n", maxbcachebuf,
1197 MAXBSIZE));
1198 bq_init(&bqempty, QUEUE_EMPTY, -1, "bufq empty lock");
1199 mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
1200 mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
1201 mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF);
1202
1203 unmapped_buf = (caddr_t)kva_alloc(maxphys);
1204
1205 /* finally, initialize each buffer header and stick on empty q */
1206 for (i = 0; i < nbuf; i++) {
1207 bp = nbufp(i);
1208 bzero(bp, sizeof(*bp) + sizeof(vm_page_t) * atop(maxbcachebuf));
1209 bp->b_flags = B_INVAL;
1210 bp->b_rcred = NOCRED;
1211 bp->b_wcred = NOCRED;
1212 bp->b_qindex = QUEUE_NONE;
1213 bp->b_domain = -1;
1214 bp->b_subqueue = mp_maxid + 1;
1215 bp->b_xflags = 0;
1216 bp->b_data = bp->b_kvabase = unmapped_buf;
1217 LIST_INIT(&bp->b_dep);
1218 BUF_LOCKINIT(bp, buf_wmesg);
1219 bq_insert(&bqempty, bp, false);
1220 }
1221
1222 /*
1223 * maxbufspace is the absolute maximum amount of buffer space we are
1224 * allowed to reserve in KVM and in real terms. The absolute maximum
1225 * is nominally used by metadata. hibufspace is the nominal maximum
1226 * used by most other requests. The differential is required to
1227 * ensure that metadata deadlocks don't occur.
1228 *
1229 * maxbufspace is based on BKVASIZE. Allocating buffers larger then
1230 * this may result in KVM fragmentation which is not handled optimally
1231 * by the system. XXX This is less true with vmem. We could use
1232 * PAGE_SIZE.
1233 */
1234 maxbufspace = (long)nbuf * BKVASIZE;
1235 hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - maxbcachebuf * 10);
1236 lobufspace = (hibufspace / 20) * 19; /* 95% */
1238
1239 /*
1240 * Note: The 16 MiB upper limit for hirunningspace was chosen
1241 * arbitrarily and may need further tuning. It corresponds to
1242 * 128 outstanding write IO requests (if IO size is 128 KiB),
1243 * which fits with many RAID controllers' tagged queuing limits.
1244 * The lower 1 MiB limit is the historical upper limit for
1245 * hirunningspace.
1246 */
1247 hirunningspace = lmax(lmin(roundup(hibufspace / 64, maxbcachebuf),
1248 16 * 1024 * 1024), 1024 * 1024);
1249 lorunningspace = roundup((hirunningspace * 2) / 3, maxbcachebuf);
1250
1251 /*
1252 * Limit the amount of malloc memory since it is wired permanently into
1253 * the kernel space. Even though this is accounted for in the buffer
1254 * allocation, we don't want the malloced region to grow uncontrolled.
1255 * The malloc scheme improves memory utilization significantly on
1256 * average (small) directories.
1257 */
1259
1260 /*
1261 * Reduce the chance of a deadlock occurring by limiting the number
1262 * of delayed-write dirty buffers we allow to stack up.
1263 */
1264 hidirtybuffers = nbuf / 4 + 20;
1265 dirtybufthresh = hidirtybuffers * 9 / 10;
1266 /*
1267 * To support extreme low-memory systems, make sure hidirtybuffers
1268 * cannot eat up all available buffer space. This occurs when our
1269 * minimum cannot be met. We try to size hidirtybuffers to 3/4 our
1270 * buffer space assuming BKVASIZE'd buffers.
1271 */
1272 while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
1273 hidirtybuffers >>= 1;
1274 }
1276
1277 /*
1278 * lofreebuffers should be sufficient to avoid stalling waiting on
1279 * buf headers under heavy utilization. The bufs in per-cpu caches
1280 * are counted as free but will be unavailable to threads executing
1281 * on other cpus.
1282 *
1283 * hifreebuffers is the free target for the bufspace daemon. This
1284 * should be set appropriately to limit work per-iteration.
1285 */
1286 lofreebuffers = MIN((nbuf / 25) + (20 * mp_ncpus), 128 * mp_ncpus);
1287 hifreebuffers = (3 * lofreebuffers) / 2;
1289
1290 /* Setup the kva and free list allocators. */
1291 vmem_set_reclaim(buffer_arena, bufkva_reclaim);
1292 buf_zone = uma_zcache_create("buf free cache",
1293 sizeof(struct buf) + sizeof(vm_page_t) * atop(maxbcachebuf),
1294 NULL, NULL, NULL, NULL, buf_import, buf_release, NULL, 0);
1295
1296 /*
1297 * Size the clean queue according to the amount of buffer space.
1298 * One queue per-256mb up to the max. More queues gives better
1299 * concurrency but less accurate LRU.
1300 */
1301 buf_domains = MIN(howmany(maxbufspace, 256*1024*1024), BUF_DOMAINS);
1302 for (i = 0 ; i < buf_domains; i++) {
1303 struct bufdomain *bd;
1304
1305 bd = &bdomain[i];
1306 bd_init(bd);
1307 bd->bd_freebuffers = nbuf / buf_domains;
1310 bd->bd_bufspace = 0;
1315 bd->bd_numdirtybuffers = 0;
1319 /* Don't allow more than 2% of bufs in the per-cpu caches. */
1320 bd->bd_lim = nbuf / buf_domains / 50 / mp_ncpus;
1321 }
1327 buffreekvacnt = counter_u64_alloc(M_WAITOK);
1328 bufdefragcnt = counter_u64_alloc(M_WAITOK);
1329 bufkvaspace = counter_u64_alloc(M_WAITOK);
1330}
1331
1332#ifdef INVARIANTS
1333static inline void
1334vfs_buf_check_mapped(struct buf *bp)
1335{
1336
1337 KASSERT(bp->b_kvabase != unmapped_buf,
1338 ("mapped buf: b_kvabase was not updated %p", bp));
1339 KASSERT(bp->b_data != unmapped_buf,
1340 ("mapped buf: b_data was not updated %p", bp));
1341 KASSERT(bp->b_data < unmapped_buf || bp->b_data >= unmapped_buf +
1342 maxphys, ("b_data + b_offset unmapped %p", bp));
1343}
1344
1345static inline void
1346vfs_buf_check_unmapped(struct buf *bp)
1347{
1348
1349 KASSERT(bp->b_data == unmapped_buf,
1350 ("unmapped buf: corrupted b_data %p", bp));
1351}
1352
1353#define BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp)
1354#define BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp)
1355#else
1356#define BUF_CHECK_MAPPED(bp) do {} while (0)
1357#define BUF_CHECK_UNMAPPED(bp) do {} while (0)
1358#endif
1359
1360static int
1361isbufbusy(struct buf *bp)
1362{
1363 if (((bp->b_flags & B_INVAL) == 0 && BUF_ISLOCKED(bp)) ||
1364 ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI))
1365 return (1);
1366 return (0);
1367}
1368
1369/*
1370 * Shutdown the system cleanly to prepare for reboot, halt, or power off.
1371 */
1372void
1374{
1375 static int first_buf_printf = 1;
1376 struct buf *bp;
1377 int i, iter, nbusy, pbusy;
1378#ifndef PREEMPTION
1379 int subiter;
1380#endif
1381
1382 /*
1383 * Sync filesystems for shutdown
1384 */
1385 wdog_kern_pat(WD_LASTVAL);
1386 kern_sync(curthread);
1387
1388 /*
1389 * With soft updates, some buffers that are
1390 * written will be remarked as dirty until other
1391 * buffers are written.
1392 */
1393 for (iter = pbusy = 0; iter < 20; iter++) {
1394 nbusy = 0;
1395 for (i = nbuf - 1; i >= 0; i--) {
1396 bp = nbufp(i);
1397 if (isbufbusy(bp))
1398 nbusy++;
1399 }
1400 if (nbusy == 0) {
1401 if (first_buf_printf)
1402 printf("All buffers synced.");
1403 break;
1404 }
1405 if (first_buf_printf) {
1406 printf("Syncing disks, buffers remaining... ");
1407 first_buf_printf = 0;
1408 }
1409 printf("%d ", nbusy);
1410 if (nbusy < pbusy)
1411 iter = 0;
1412 pbusy = nbusy;
1413
1414 wdog_kern_pat(WD_LASTVAL);
1415 kern_sync(curthread);
1416
1417#ifdef PREEMPTION
1418 /*
1419 * Spin for a while to allow interrupt threads to run.
1420 */
1421 DELAY(50000 * iter);
1422#else
1423 /*
1424 * Context switch several times to allow interrupt
1425 * threads to run.
1426 */
1427 for (subiter = 0; subiter < 50 * iter; subiter++) {
1428 thread_lock(curthread);
1429 mi_switch(SW_VOL);
1430 DELAY(1000);
1431 }
1432#endif
1433 }
1434 printf("\n");
1435 /*
1436 * Count only busy local buffers to prevent forcing
1437 * a fsck if we're just a client of a wedged NFS server
1438 */
1439 nbusy = 0;
1440 for (i = nbuf - 1; i >= 0; i--) {
1441 bp = nbufp(i);
1442 if (isbufbusy(bp)) {
1443#if 0
1444/* XXX: This is bogus. We should probably have a BO_REMOTE flag instead */
1445 if (bp->b_dev == NULL) {
1446 TAILQ_REMOVE(&mountlist,
1447 bp->b_vp->v_mount, mnt_list);
1448 continue;
1449 }
1450#endif
1451 nbusy++;
1452 if (show_busybufs > 0) {
1453 printf(
1454 "%d: buf:%p, vnode:%p, flags:%0x, blkno:%jd, lblkno:%jd, buflock:",
1455 nbusy, bp, bp->b_vp, bp->b_flags,
1456 (intmax_t)bp->b_blkno,
1457 (intmax_t)bp->b_lblkno);
1458 BUF_LOCKPRINTINFO(bp);
1459 if (show_busybufs > 1)
1460 vn_printf(bp->b_vp,
1461 "vnode content: ");
1462 }
1463 }
1464 }
1465 if (nbusy) {
1466 /*
1467 * Failed to sync all blocks. Indicate this and don't
1468 * unmount filesystems (thus forcing an fsck on reboot).
1469 */
1470 BOOTTRACE("shutdown failed to sync buffers");
1471 printf("Giving up on %d buffers\n", nbusy);
1472 DELAY(5000000); /* 5 seconds */
1473 swapoff_all();
1474 } else {
1475 BOOTTRACE("shutdown sync complete");
1476 if (!first_buf_printf)
1477 printf("Final sync complete\n");
1478
1479 /*
1480 * Unmount filesystems and perform swapoff, to quiesce
1481 * the system as much as possible. In particular, no
1482 * I/O should be initiated from top levels since it
1483 * might be abruptly terminated by reset, or otherwise
1484 * erronously handled because other parts of the
1485 * system are disabled.
1486 *
1487 * Swapoff before unmount, because file-backed swap is
1488 * non-operational after unmount of the underlying
1489 * filesystem.
1490 */
1491 if (!KERNEL_PANICKED()) {
1492 swapoff_all();
1494 }
1495 BOOTTRACE("shutdown unmounted all filesystems");
1496 }
1497 DELAY(100000); /* wait for console output to finish */
1498}
1499
1500static void
1502{
1503
1504 BUF_CHECK_MAPPED(bp);
1505
1506 /*
1507 * bp->b_data is relative to bp->b_offset, but
1508 * bp->b_offset may be offset into the first page.
1509 */
1510 bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
1511 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
1512 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
1513 (vm_offset_t)(bp->b_offset & PAGE_MASK));
1514}
1515
1516static inline struct bufdomain *
1517bufdomain(struct buf *bp)
1518{
1519
1520 return (&bdomain[bp->b_domain]);
1521}
1522
1523static struct bufqueue *
1524bufqueue(struct buf *bp)
1525{
1526
1527 switch (bp->b_qindex) {
1528 case QUEUE_NONE:
1529 /* FALLTHROUGH */
1530 case QUEUE_SENTINEL:
1531 return (NULL);
1532 case QUEUE_EMPTY:
1533 return (&bqempty);
1534 case QUEUE_DIRTY:
1535 return (&bufdomain(bp)->bd_dirtyq);
1536 case QUEUE_CLEAN:
1537 return (&bufdomain(bp)->bd_subq[bp->b_subqueue]);
1538 default:
1539 break;
1540 }
1541 panic("bufqueue(%p): Unhandled type %d\n", bp, bp->b_qindex);
1542}
1543
1544/*
1545 * Return the locked bufqueue that bp is a member of.
1546 */
1547static struct bufqueue *
1549{
1550 struct bufqueue *bq, *nbq;
1551
1552 /*
1553 * bp can be pushed from a per-cpu queue to the
1554 * cleanq while we're waiting on the lock. Retry
1555 * if the queues don't match.
1556 */
1557 bq = bufqueue(bp);
1558 BQ_LOCK(bq);
1559 for (;;) {
1560 nbq = bufqueue(bp);
1561 if (bq == nbq)
1562 break;
1563 BQ_UNLOCK(bq);
1564 BQ_LOCK(nbq);
1565 bq = nbq;
1566 }
1567 return (bq);
1568}
1569
1570/*
1571 * binsfree:
1572 *
1573 * Insert the buffer into the appropriate free list. Requires a
1574 * locked buffer on entry and buffer is unlocked before return.
1575 */
1576static void
1577binsfree(struct buf *bp, int qindex)
1578{
1579 struct bufdomain *bd;
1580 struct bufqueue *bq;
1581
1582 KASSERT(qindex == QUEUE_CLEAN || qindex == QUEUE_DIRTY,
1583 ("binsfree: Invalid qindex %d", qindex));
1584 BUF_ASSERT_XLOCKED(bp);
1585
1586 /*
1587 * Handle delayed bremfree() processing.
1588 */
1589 if (bp->b_flags & B_REMFREE) {
1590 if (bp->b_qindex == qindex) {
1591 bp->b_flags |= B_REUSE;
1592 bp->b_flags &= ~B_REMFREE;
1593 BUF_UNLOCK(bp);
1594 return;
1595 }
1596 bq = bufqueue_acquire(bp);
1597 bq_remove(bq, bp);
1598 BQ_UNLOCK(bq);
1599 }
1600 bd = bufdomain(bp);
1601 if (qindex == QUEUE_CLEAN) {
1602 if (bd->bd_lim != 0)
1603 bq = &bd->bd_subq[PCPU_GET(cpuid)];
1604 else
1605 bq = bd->bd_cleanq;
1606 } else
1607 bq = &bd->bd_dirtyq;
1608 bq_insert(bq, bp, true);
1609}
1610
1611/*
1612 * buf_free:
1613 *
1614 * Free a buffer to the buf zone once it no longer has valid contents.
1615 */
1616static void
1617buf_free(struct buf *bp)
1618{
1619
1620 if (bp->b_flags & B_REMFREE)
1621 bremfreef(bp);
1622 if (bp->b_vflags & BV_BKGRDINPROG)
1623 panic("losing buffer 1");
1624 if (bp->b_rcred != NOCRED) {
1625 crfree(bp->b_rcred);
1626 bp->b_rcred = NOCRED;
1627 }
1628 if (bp->b_wcred != NOCRED) {
1629 crfree(bp->b_wcred);
1630 bp->b_wcred = NOCRED;
1631 }
1632 if (!LIST_EMPTY(&bp->b_dep))
1633 buf_deallocate(bp);
1634 bufkva_free(bp);
1635 atomic_add_int(&bufdomain(bp)->bd_freebuffers, 1);
1636 MPASS((bp->b_flags & B_MAXPHYS) == 0);
1637 BUF_UNLOCK(bp);
1638 uma_zfree(buf_zone, bp);
1639}
1640
1641/*
1642 * buf_import:
1643 *
1644 * Import bufs into the uma cache from the buf list. The system still
1645 * expects a static array of bufs and much of the synchronization
1646 * around bufs assumes type stable storage. As a result, UMA is used
1647 * only as a per-cpu cache of bufs still maintained on a global list.
1648 */
1649static int
1650buf_import(void *arg, void **store, int cnt, int domain, int flags)
1651{
1652 struct buf *bp;
1653 int i;
1654
1655 BQ_LOCK(&bqempty);
1656 for (i = 0; i < cnt; i++) {
1657 bp = TAILQ_FIRST(&bqempty.bq_queue);
1658 if (bp == NULL)
1659 break;
1660 bq_remove(&bqempty, bp);
1661 store[i] = bp;
1662 }
1664
1665 return (i);
1666}
1667
1668/*
1669 * buf_release:
1670 *
1671 * Release bufs from the uma cache back to the buffer queues.
1672 */
1673static void
1674buf_release(void *arg, void **store, int cnt)
1675{
1676 struct bufqueue *bq;
1677 struct buf *bp;
1678 int i;
1679
1680 bq = &bqempty;
1681 BQ_LOCK(bq);
1682 for (i = 0; i < cnt; i++) {
1683 bp = store[i];
1684 /* Inline bq_insert() to batch locking. */
1685 TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
1686 bp->b_flags &= ~(B_AGE | B_REUSE);
1687 bq->bq_len++;
1688 bp->b_qindex = bq->bq_index;
1689 }
1690 BQ_UNLOCK(bq);
1691}
1692
1693/*
1694 * buf_alloc:
1695 *
1696 * Allocate an empty buffer header.
1697 */
1698static struct buf *
1700{
1701 struct buf *bp;
1702 int freebufs, error;
1703
1704 /*
1705 * We can only run out of bufs in the buf zone if the average buf
1706 * is less than BKVASIZE. In this case the actual wait/block will
1707 * come from buf_reycle() failing to flush one of these small bufs.
1708 */
1709 bp = NULL;
1710 freebufs = atomic_fetchadd_int(&bd->bd_freebuffers, -1);
1711 if (freebufs > 0)
1712 bp = uma_zalloc(buf_zone, M_NOWAIT);
1713 if (bp == NULL) {
1714 atomic_add_int(&bd->bd_freebuffers, 1);
1716 counter_u64_add(numbufallocfails, 1);
1717 return (NULL);
1718 }
1719 /*
1720 * Wake-up the bufspace daemon on transition below threshold.
1721 */
1722 if (freebufs == bd->bd_lofreebuffers)
1724
1725 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
1726 KASSERT(error == 0, ("%s: BUF_LOCK on free buf %p: %d.", __func__, bp,
1727 error));
1728 (void)error;
1729
1730 KASSERT(bp->b_vp == NULL,
1731 ("bp: %p still has vnode %p.", bp, bp->b_vp));
1732 KASSERT((bp->b_flags & (B_DELWRI | B_NOREUSE)) == 0,
1733 ("invalid buffer %p flags %#x", bp, bp->b_flags));
1734 KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
1735 ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags));
1736 KASSERT(bp->b_npages == 0,
1737 ("bp: %p still has %d vm pages\n", bp, bp->b_npages));
1738 KASSERT(bp->b_kvasize == 0, ("bp: %p still has kva\n", bp));
1739 KASSERT(bp->b_bufsize == 0, ("bp: %p still has bufspace\n", bp));
1740 MPASS((bp->b_flags & B_MAXPHYS) == 0);
1741
1742 bp->b_domain = BD_DOMAIN(bd);
1743 bp->b_flags = 0;
1744 bp->b_ioflags = 0;
1745 bp->b_xflags = 0;
1746 bp->b_vflags = 0;
1747 bp->b_vp = NULL;
1748 bp->b_blkno = bp->b_lblkno = 0;
1749 bp->b_offset = NOOFFSET;
1750 bp->b_iodone = 0;
1751 bp->b_error = 0;
1752 bp->b_resid = 0;
1753 bp->b_bcount = 0;
1754 bp->b_npages = 0;
1755 bp->b_dirtyoff = bp->b_dirtyend = 0;
1756 bp->b_bufobj = NULL;
1757 bp->b_data = bp->b_kvabase = unmapped_buf;
1758 bp->b_fsprivate1 = NULL;
1759 bp->b_fsprivate2 = NULL;
1760 bp->b_fsprivate3 = NULL;
1761 LIST_INIT(&bp->b_dep);
1762
1763 return (bp);
1764}
1765
1766/*
1767 * buf_recycle:
1768 *
1769 * Free a buffer from the given bufqueue. kva controls whether the
1770 * freed buf must own some kva resources. This is used for
1771 * defragmenting.
1772 */
1773static int
1774buf_recycle(struct bufdomain *bd, bool kva)
1775{
1776 struct bufqueue *bq;
1777 struct buf *bp, *nbp;
1778
1779 if (kva)
1780 counter_u64_add(bufdefragcnt, 1);
1781 nbp = NULL;
1782 bq = bd->bd_cleanq;
1783 BQ_LOCK(bq);
1784 KASSERT(BQ_LOCKPTR(bq) == BD_LOCKPTR(bd),
1785 ("buf_recycle: Locks don't match"));
1786 nbp = TAILQ_FIRST(&bq->bq_queue);
1787
1788 /*
1789 * Run scan, possibly freeing data and/or kva mappings on the fly
1790 * depending.
1791 */
1792 while ((bp = nbp) != NULL) {
1793 /*
1794 * Calculate next bp (we can only use it if we do not
1795 * release the bqlock).
1796 */
1797 nbp = TAILQ_NEXT(bp, b_freelist);
1798
1799 /*
1800 * If we are defragging then we need a buffer with
1801 * some kva to reclaim.
1802 */
1803 if (kva && bp->b_kvasize == 0)
1804 continue;
1805
1806 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1807 continue;
1808
1809 /*
1810 * Implement a second chance algorithm for frequently
1811 * accessed buffers.
1812 */
1813 if ((bp->b_flags & B_REUSE) != 0) {
1814 TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1815 TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
1816 bp->b_flags &= ~B_REUSE;
1817 BUF_UNLOCK(bp);
1818 continue;
1819 }
1820
1821 /*
1822 * Skip buffers with background writes in progress.
1823 */
1824 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
1825 BUF_UNLOCK(bp);
1826 continue;
1827 }
1828
1829 KASSERT(bp->b_qindex == QUEUE_CLEAN,
1830 ("buf_recycle: inconsistent queue %d bp %p",
1831 bp->b_qindex, bp));
1832 KASSERT(bp->b_domain == BD_DOMAIN(bd),
1833 ("getnewbuf: queue domain %d doesn't match request %d",
1834 bp->b_domain, (int)BD_DOMAIN(bd)));
1835 /*
1836 * NOTE: nbp is now entirely invalid. We can only restart
1837 * the scan from this point on.
1838 */
1839 bq_remove(bq, bp);
1840 BQ_UNLOCK(bq);
1841
1842 /*
1843 * Requeue the background write buffer with error and
1844 * restart the scan.
1845 */
1846 if ((bp->b_vflags & BV_BKGRDERR) != 0) {
1847 bqrelse(bp);
1848 BQ_LOCK(bq);
1849 nbp = TAILQ_FIRST(&bq->bq_queue);
1850 continue;
1851 }
1852 bp->b_flags |= B_INVAL;
1853 brelse(bp);
1854 return (0);
1855 }
1856 bd->bd_wanted = 1;
1857 BQ_UNLOCK(bq);
1858
1859 return (ENOBUFS);
1860}
1861
1862/*
1863 * bremfree:
1864 *
1865 * Mark the buffer for removal from the appropriate free list.
1866 *
1867 */
1868void
1869bremfree(struct buf *bp)
1870{
1871
1872 CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1873 KASSERT((bp->b_flags & B_REMFREE) == 0,
1874 ("bremfree: buffer %p already marked for delayed removal.", bp));
1875 KASSERT(bp->b_qindex != QUEUE_NONE,
1876 ("bremfree: buffer %p not on a queue.", bp));
1877 BUF_ASSERT_XLOCKED(bp);
1878
1879 bp->b_flags |= B_REMFREE;
1880}
1881
1882/*
1883 * bremfreef:
1884 *
1885 * Force an immediate removal from a free list. Used only in nfs when
1886 * it abuses the b_freelist pointer.
1887 */
1888void
1889bremfreef(struct buf *bp)
1890{
1891 struct bufqueue *bq;
1892
1893 bq = bufqueue_acquire(bp);
1894 bq_remove(bq, bp);
1895 BQ_UNLOCK(bq);
1896}
1897
1898static void
1899bq_init(struct bufqueue *bq, int qindex, int subqueue, const char *lockname)
1900{
1901
1902 mtx_init(&bq->bq_lock, lockname, NULL, MTX_DEF);
1903 TAILQ_INIT(&bq->bq_queue);
1904 bq->bq_len = 0;
1905 bq->bq_index = qindex;
1906 bq->bq_subqueue = subqueue;
1907}
1908
1909static void
1911{
1912 int i;
1913
1914 bd->bd_cleanq = &bd->bd_subq[mp_maxid + 1];
1915 bq_init(bd->bd_cleanq, QUEUE_CLEAN, mp_maxid + 1, "bufq clean lock");
1916 bq_init(&bd->bd_dirtyq, QUEUE_DIRTY, -1, "bufq dirty lock");
1917 for (i = 0; i <= mp_maxid; i++)
1918 bq_init(&bd->bd_subq[i], QUEUE_CLEAN, i,
1919 "bufq clean subqueue lock");
1920 mtx_init(&bd->bd_run_lock, "bufspace daemon run lock", NULL, MTX_DEF);
1921}
1922
1923/*
1924 * bq_remove:
1925 *
1926 * Removes a buffer from the free list, must be called with the
1927 * correct qlock held.
1928 */
1929static void
1930bq_remove(struct bufqueue *bq, struct buf *bp)
1931{
1932
1933 CTR3(KTR_BUF, "bq_remove(%p) vp %p flags %X",
1934 bp, bp->b_vp, bp->b_flags);
1935 KASSERT(bp->b_qindex != QUEUE_NONE,
1936 ("bq_remove: buffer %p not on a queue.", bp));
1937 KASSERT(bufqueue(bp) == bq,
1938 ("bq_remove: Remove buffer %p from wrong queue.", bp));
1939
1940 BQ_ASSERT_LOCKED(bq);
1941 if (bp->b_qindex != QUEUE_EMPTY) {
1942 BUF_ASSERT_XLOCKED(bp);
1943 }
1944 KASSERT(bq->bq_len >= 1,
1945 ("queue %d underflow", bp->b_qindex));
1946 TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1947 bq->bq_len--;
1948 bp->b_qindex = QUEUE_NONE;
1949 bp->b_flags &= ~(B_REMFREE | B_REUSE);
1950}
1951
1952static void
1953bd_flush(struct bufdomain *bd, struct bufqueue *bq)
1954{
1955 struct buf *bp;
1956
1957 BQ_ASSERT_LOCKED(bq);
1958 if (bq != bd->bd_cleanq) {
1959 BD_LOCK(bd);
1960 while ((bp = TAILQ_FIRST(&bq->bq_queue)) != NULL) {
1961 TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1962 TAILQ_INSERT_TAIL(&bd->bd_cleanq->bq_queue, bp,
1963 b_freelist);
1964 bp->b_subqueue = bd->bd_cleanq->bq_subqueue;
1965 }
1966 bd->bd_cleanq->bq_len += bq->bq_len;
1967 bq->bq_len = 0;
1968 }
1969 if (bd->bd_wanted) {
1970 bd->bd_wanted = 0;
1971 wakeup(&bd->bd_wanted);
1972 }
1973 if (bq != bd->bd_cleanq)
1974 BD_UNLOCK(bd);
1975}
1976
1977static int
1979{
1980 struct bufqueue *bq;
1981 int flushed;
1982 int i;
1983
1984 if (bd->bd_lim == 0)
1985 return (0);
1986 flushed = 0;
1987 for (i = 0; i <= mp_maxid; i++) {
1988 bq = &bd->bd_subq[i];
1989 if (bq->bq_len == 0)
1990 continue;
1991 BQ_LOCK(bq);
1992 bd_flush(bd, bq);
1993 BQ_UNLOCK(bq);
1994 flushed++;
1995 }
1996
1997 return (flushed);
1998}
1999
2000static void
2001bq_insert(struct bufqueue *bq, struct buf *bp, bool unlock)
2002{
2003 struct bufdomain *bd;
2004
2005 if (bp->b_qindex != QUEUE_NONE)
2006 panic("bq_insert: free buffer %p onto another queue?", bp);
2007
2008 bd = bufdomain(bp);
2009 if (bp->b_flags & B_AGE) {
2010 /* Place this buf directly on the real queue. */
2011 if (bq->bq_index == QUEUE_CLEAN)
2012 bq = bd->bd_cleanq;
2013 BQ_LOCK(bq);
2014 TAILQ_INSERT_HEAD(&bq->bq_queue, bp, b_freelist);
2015 } else {
2016 BQ_LOCK(bq);
2017 TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
2018 }
2019 bp->b_flags &= ~(B_AGE | B_REUSE);
2020 bq->bq_len++;
2021 bp->b_qindex = bq->bq_index;
2022 bp->b_subqueue = bq->bq_subqueue;
2023
2024 /*
2025 * Unlock before we notify so that we don't wakeup a waiter that
2026 * fails a trylock on the buf and sleeps again.
2027 */
2028 if (unlock)
2029 BUF_UNLOCK(bp);
2030
2031 if (bp->b_qindex == QUEUE_CLEAN) {
2032 /*
2033 * Flush the per-cpu queue and notify any waiters.
2034 */
2035 if (bd->bd_wanted || (bq != bd->bd_cleanq &&
2036 bq->bq_len >= bd->bd_lim))
2037 bd_flush(bd, bq);
2038 }
2039 BQ_UNLOCK(bq);
2040}
2041
2042/*
2043 * bufkva_free:
2044 *
2045 * Free the kva allocation for a buffer.
2046 *
2047 */
2048static void
2049bufkva_free(struct buf *bp)
2050{
2051
2052#ifdef INVARIANTS
2053 if (bp->b_kvasize == 0) {
2054 KASSERT(bp->b_kvabase == unmapped_buf &&
2055 bp->b_data == unmapped_buf,
2056 ("Leaked KVA space on %p", bp));
2057 } else if (buf_mapped(bp))
2058 BUF_CHECK_MAPPED(bp);
2059 else
2061#endif
2062 if (bp->b_kvasize == 0)
2063 return;
2064
2065 vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, bp->b_kvasize);
2066 counter_u64_add(bufkvaspace, -bp->b_kvasize);
2067 counter_u64_add(buffreekvacnt, 1);
2068 bp->b_data = bp->b_kvabase = unmapped_buf;
2069 bp->b_kvasize = 0;
2070}
2071
2072/*
2073 * bufkva_alloc:
2074 *
2075 * Allocate the buffer KVA and set b_kvasize and b_kvabase.
2076 */
2077static int
2078bufkva_alloc(struct buf *bp, int maxsize, int gbflags)
2079{
2080 vm_offset_t addr;
2081 int error;
2082
2083 KASSERT((gbflags & GB_UNMAPPED) == 0 || (gbflags & GB_KVAALLOC) != 0,
2084 ("Invalid gbflags 0x%x in %s", gbflags, __func__));
2085 MPASS((bp->b_flags & B_MAXPHYS) == 0);
2086 KASSERT(maxsize <= maxbcachebuf,
2087 ("bufkva_alloc kva too large %d %u", maxsize, maxbcachebuf));
2088
2089 bufkva_free(bp);
2090
2091 addr = 0;
2092 error = vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr);
2093 if (error != 0) {
2094 /*
2095 * Buffer map is too fragmented. Request the caller
2096 * to defragment the map.
2097 */
2098 return (error);
2099 }
2100 bp->b_kvabase = (caddr_t)addr;
2101 bp->b_kvasize = maxsize;
2102 counter_u64_add(bufkvaspace, bp->b_kvasize);
2103 if ((gbflags & GB_UNMAPPED) != 0) {
2104 bp->b_data = unmapped_buf;
2106 } else {
2107 bp->b_data = bp->b_kvabase;
2108 BUF_CHECK_MAPPED(bp);
2109 }
2110 return (0);
2111}
2112
2113/*
2114 * bufkva_reclaim:
2115 *
2116 * Reclaim buffer kva by freeing buffers holding kva. This is a vmem
2117 * callback that fires to avoid returning failure.
2118 */
2119static void
2121{
2122 bool done;
2123 int q;
2124 int i;
2125
2126 done = false;
2127 for (i = 0; i < 5; i++) {
2128 for (q = 0; q < buf_domains; q++)
2129 if (buf_recycle(&bdomain[q], true) != 0)
2130 done = true;
2131 if (done)
2132 break;
2133 }
2134 return;
2135}
2136
2137/*
2138 * Attempt to initiate asynchronous I/O on read-ahead blocks. We must
2139 * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
2140 * the buffer is valid and we do not have to do anything.
2141 */
2142static void
2143breada(struct vnode * vp, daddr_t * rablkno, int * rabsize, int cnt,
2144 struct ucred * cred, int flags, void (*ckhashfunc)(struct buf *))
2145{
2146 struct buf *rabp;
2147 struct thread *td;
2148 int i;
2149
2150 td = curthread;
2151
2152 for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
2153 if (inmem(vp, *rablkno))
2154 continue;
2155 rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
2156 if ((rabp->b_flags & B_CACHE) != 0) {
2157 brelse(rabp);
2158 continue;
2159 }
2160#ifdef RACCT
2161 if (racct_enable) {
2162 PROC_LOCK(curproc);
2163 racct_add_buf(curproc, rabp, 0);
2164 PROC_UNLOCK(curproc);
2165 }
2166#endif /* RACCT */
2167 td->td_ru.ru_inblock++;
2168 rabp->b_flags |= B_ASYNC;
2169 rabp->b_flags &= ~B_INVAL;
2170 if ((flags & GB_CKHASH) != 0) {
2171 rabp->b_flags |= B_CKHASH;
2172 rabp->b_ckhashcalc = ckhashfunc;
2173 }
2174 rabp->b_ioflags &= ~BIO_ERROR;
2175 rabp->b_iocmd = BIO_READ;
2176 if (rabp->b_rcred == NOCRED && cred != NOCRED)
2177 rabp->b_rcred = crhold(cred);
2178 vfs_busy_pages(rabp, 0);
2179 BUF_KERNPROC(rabp);
2180 rabp->b_iooffset = dbtob(rabp->b_blkno);
2181 bstrategy(rabp);
2182 }
2183}
2184
2185/*
2186 * Entry point for bread() and breadn() via #defines in sys/buf.h.
2187 *
2188 * Get a buffer with the specified data. Look in the cache first. We
2189 * must clear BIO_ERROR and B_INVAL prior to initiating I/O. If B_CACHE
2190 * is set, the buffer is valid and we do not have to do anything, see
2191 * getblk(). Also starts asynchronous I/O on read-ahead blocks.
2192 *
2193 * Always return a NULL buffer pointer (in bpp) when returning an error.
2194 *
2195 * The blkno parameter is the logical block being requested. Normally
2196 * the mapping of logical block number to disk block address is done
2197 * by calling VOP_BMAP(). However, if the mapping is already known, the
2198 * disk block address can be passed using the dblkno parameter. If the
2199 * disk block address is not known, then the same value should be passed
2200 * for blkno and dblkno.
2201 */
2202int
2203breadn_flags(struct vnode *vp, daddr_t blkno, daddr_t dblkno, int size,
2204 daddr_t *rablkno, int *rabsize, int cnt, struct ucred *cred, int flags,
2205 void (*ckhashfunc)(struct buf *), struct buf **bpp)
2206{
2207 struct buf *bp;
2208 struct thread *td;
2209 int error, readwait, rv;
2210
2211 CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
2212 td = curthread;
2213 /*
2214 * Can only return NULL if GB_LOCK_NOWAIT or GB_SPARSE flags
2215 * are specified.
2216 */
2217 error = getblkx(vp, blkno, dblkno, size, 0, 0, flags, &bp);
2218 if (error != 0) {
2219 *bpp = NULL;
2220 return (error);
2221 }
2222 KASSERT(blkno == bp->b_lblkno,
2223 ("getblkx returned buffer for blkno %jd instead of blkno %jd",
2224 (intmax_t)bp->b_lblkno, (intmax_t)blkno));
2225 flags &= ~GB_NOSPARSE;
2226 *bpp = bp;
2227
2228 /*
2229 * If not found in cache, do some I/O
2230 */
2231 readwait = 0;
2232 if ((bp->b_flags & B_CACHE) == 0) {
2233#ifdef RACCT
2234 if (racct_enable) {
2235 PROC_LOCK(td->td_proc);
2236 racct_add_buf(td->td_proc, bp, 0);
2237 PROC_UNLOCK(td->td_proc);
2238 }
2239#endif /* RACCT */
2240 td->td_ru.ru_inblock++;
2241 bp->b_iocmd = BIO_READ;
2242 bp->b_flags &= ~B_INVAL;
2243 if ((flags & GB_CKHASH) != 0) {
2244 bp->b_flags |= B_CKHASH;
2245 bp->b_ckhashcalc = ckhashfunc;
2246 }
2247 if ((flags & GB_CVTENXIO) != 0)
2248 bp->b_xflags |= BX_CVTENXIO;
2249 bp->b_ioflags &= ~BIO_ERROR;
2250 if (bp->b_rcred == NOCRED && cred != NOCRED)
2251 bp->b_rcred = crhold(cred);
2252 vfs_busy_pages(bp, 0);
2253 bp->b_iooffset = dbtob(bp->b_blkno);
2254 bstrategy(bp);
2255 ++readwait;
2256 }
2257
2258 /*
2259 * Attempt to initiate asynchronous I/O on read-ahead blocks.
2260 */
2261 breada(vp, rablkno, rabsize, cnt, cred, flags, ckhashfunc);
2262
2263 rv = 0;
2264 if (readwait) {
2265 rv = bufwait(bp);
2266 if (rv != 0) {
2267 brelse(bp);
2268 *bpp = NULL;
2269 }
2270 }
2271 return (rv);
2272}
2273
2274/*
2275 * Write, release buffer on completion. (Done by iodone
2276 * if async). Do not bother writing anything if the buffer
2277 * is invalid.
2278 *
2279 * Note that we set B_CACHE here, indicating that buffer is
2280 * fully valid and thus cacheable. This is true even of NFS
2281 * now so we set it generally. This could be set either here
2282 * or in biodone() since the I/O is synchronous. We put it
2283 * here.
2284 */
2285int
2286bufwrite(struct buf *bp)
2287{
2288 int oldflags;
2289 struct vnode *vp;
2290 long space;
2291 int vp_md;
2292
2293 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2294 if ((bp->b_bufobj->bo_flag & BO_DEAD) != 0) {
2295 bp->b_flags |= B_INVAL | B_RELBUF;
2296 bp->b_flags &= ~B_CACHE;
2297 brelse(bp);
2298 return (ENXIO);
2299 }
2300 if (bp->b_flags & B_INVAL) {
2301 brelse(bp);
2302 return (0);
2303 }
2304
2305 if (bp->b_flags & B_BARRIER)
2306 atomic_add_long(&barrierwrites, 1);
2307
2308 oldflags = bp->b_flags;
2309
2310 KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
2311 ("FFS background buffer should not get here %p", bp));
2312
2313 vp = bp->b_vp;
2314 if (vp)
2315 vp_md = vp->v_vflag & VV_MD;
2316 else
2317 vp_md = 0;
2318
2319 /*
2320 * Mark the buffer clean. Increment the bufobj write count
2321 * before bundirty() call, to prevent other thread from seeing
2322 * empty dirty list and zero counter for writes in progress,
2323 * falsely indicating that the bufobj is clean.
2324 */
2325 bufobj_wref(bp->b_bufobj);
2326 bundirty(bp);
2327
2328 bp->b_flags &= ~B_DONE;
2329 bp->b_ioflags &= ~BIO_ERROR;
2330 bp->b_flags |= B_CACHE;
2331 bp->b_iocmd = BIO_WRITE;
2332
2333 vfs_busy_pages(bp, 1);
2334
2335 /*
2336 * Normal bwrites pipeline writes
2337 */
2338 bp->b_runningbufspace = bp->b_bufsize;
2339 space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace);
2340
2341#ifdef RACCT
2342 if (racct_enable) {
2343 PROC_LOCK(curproc);
2344 racct_add_buf(curproc, bp, 1);
2345 PROC_UNLOCK(curproc);
2346 }
2347#endif /* RACCT */
2348 curthread->td_ru.ru_oublock++;
2349 if (oldflags & B_ASYNC)
2350 BUF_KERNPROC(bp);
2351 bp->b_iooffset = dbtob(bp->b_blkno);
2352 buf_track(bp, __func__);
2353 bstrategy(bp);
2354
2355 if ((oldflags & B_ASYNC) == 0) {
2356 int rtval = bufwait(bp);
2357 brelse(bp);
2358 return (rtval);
2359 } else if (space > hirunningspace) {
2360 /*
2361 * don't allow the async write to saturate the I/O
2362 * system. We will not deadlock here because
2363 * we are blocking waiting for I/O that is already in-progress
2364 * to complete. We do not block here if it is the update
2365 * or syncer daemon trying to clean up as that can lead
2366 * to deadlock.
2367 */
2368 if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
2370 }
2371
2372 return (0);
2373}
2374
2375void
2376bufbdflush(struct bufobj *bo, struct buf *bp)
2377{
2378 struct buf *nbp;
2379 struct bufdomain *bd;
2380
2381 bd = &bdomain[bo->bo_domain];
2382 if (bo->bo_dirty.bv_cnt > bd->bd_dirtybufthresh + 10) {
2383 (void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
2385 } else if (bo->bo_dirty.bv_cnt > bd->bd_dirtybufthresh) {
2386 BO_LOCK(bo);
2387 /*
2388 * Try to find a buffer to flush.
2389 */
2390 TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
2391 if ((nbp->b_vflags & BV_BKGRDINPROG) ||
2392 BUF_LOCK(nbp,
2393 LK_EXCLUSIVE | LK_NOWAIT, NULL))
2394 continue;
2395 if (bp == nbp)
2396 panic("bdwrite: found ourselves");
2397 BO_UNLOCK(bo);
2398 /* Don't countdeps with the bo lock held. */
2399 if (buf_countdeps(nbp, 0)) {
2400 BO_LOCK(bo);
2401 BUF_UNLOCK(nbp);
2402 continue;
2403 }
2404 if (nbp->b_flags & B_CLUSTEROK) {
2405 vfs_bio_awrite(nbp);
2406 } else {
2407 bremfree(nbp);
2408 bawrite(nbp);
2409 }
2411 break;
2412 }
2413 if (nbp == NULL)
2414 BO_UNLOCK(bo);
2415 }
2416}
2417
2418/*
2419 * Delayed write. (Buffer is marked dirty). Do not bother writing
2420 * anything if the buffer is marked invalid.
2421 *
2422 * Note that since the buffer must be completely valid, we can safely
2423 * set B_CACHE. In fact, we have to set B_CACHE here rather then in
2424 * biodone() in order to prevent getblk from writing the buffer
2425 * out synchronously.
2426 */
2427void
2428bdwrite(struct buf *bp)
2429{
2430 struct thread *td = curthread;
2431 struct vnode *vp;
2432 struct bufobj *bo;
2433
2434 CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2435 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2436 KASSERT((bp->b_flags & B_BARRIER) == 0,
2437 ("Barrier request in delayed write %p", bp));
2438
2439 if (bp->b_flags & B_INVAL) {
2440 brelse(bp);
2441 return;
2442 }
2443
2444 /*
2445 * If we have too many dirty buffers, don't create any more.
2446 * If we are wildly over our limit, then force a complete
2447 * cleanup. Otherwise, just keep the situation from getting
2448 * out of control. Note that we have to avoid a recursive
2449 * disaster and not try to clean up after our own cleanup!
2450 */
2451 vp = bp->b_vp;
2452 bo = bp->b_bufobj;
2453 if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
2454 td->td_pflags |= TDP_INBDFLUSH;
2455 BO_BDFLUSH(bo, bp);
2456 td->td_pflags &= ~TDP_INBDFLUSH;
2457 } else
2459
2460 bdirty(bp);
2461 /*
2462 * Set B_CACHE, indicating that the buffer is fully valid. This is
2463 * true even of NFS now.
2464 */
2465 bp->b_flags |= B_CACHE;
2466
2467 /*
2468 * This bmap keeps the system from needing to do the bmap later,
2469 * perhaps when the system is attempting to do a sync. Since it
2470 * is likely that the indirect block -- or whatever other datastructure
2471 * that the filesystem needs is still in memory now, it is a good
2472 * thing to do this. Note also, that if the pageout daemon is
2473 * requesting a sync -- there might not be enough memory to do
2474 * the bmap then... So, this is important to do.
2475 */
2476 if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
2477 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
2478 }
2479
2480 buf_track(bp, __func__);
2481
2482 /*
2483 * Set the *dirty* buffer range based upon the VM system dirty
2484 * pages.
2485 *
2486 * Mark the buffer pages as clean. We need to do this here to
2487 * satisfy the vnode_pager and the pageout daemon, so that it
2488 * thinks that the pages have been "cleaned". Note that since
2489 * the pages are in a delayed write buffer -- the VFS layer
2490 * "will" see that the pages get written out on the next sync,
2491 * or perhaps the cluster will be completed.
2492 */
2494 bqrelse(bp);
2495
2496 /*
2497 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
2498 * due to the softdep code.
2499 */
2500}
2501
2502/*
2503 * bdirty:
2504 *
2505 * Turn buffer into delayed write request. We must clear BIO_READ and
2506 * B_RELBUF, and we must set B_DELWRI. We reassign the buffer to
2507 * itself to properly update it in the dirty/clean lists. We mark it
2508 * B_DONE to ensure that any asynchronization of the buffer properly
2509 * clears B_DONE ( else a panic will occur later ).
2510 *
2511 * bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
2512 * might have been set pre-getblk(). Unlike bwrite/bdwrite, bdirty()
2513 * should only be called if the buffer is known-good.
2514 *
2515 * Since the buffer is not on a queue, we do not update the numfreebuffers
2516 * count.
2517 *
2518 * The buffer must be on QUEUE_NONE.
2519 */
2520void
2521bdirty(struct buf *bp)
2522{
2523
2524 CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
2525 bp, bp->b_vp, bp->b_flags);
2526 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2527 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2528 ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
2529 bp->b_flags &= ~(B_RELBUF);
2530 bp->b_iocmd = BIO_WRITE;
2531
2532 if ((bp->b_flags & B_DELWRI) == 0) {
2533 bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
2534 reassignbuf(bp);
2535 bdirtyadd(bp);
2536 }
2537}
2538
2539/*
2540 * bundirty:
2541 *
2542 * Clear B_DELWRI for buffer.
2543 *
2544 * Since the buffer is not on a queue, we do not update the numfreebuffers
2545 * count.
2546 *
2547 * The buffer must be on QUEUE_NONE.
2548 */
2549
2550void
2551bundirty(struct buf *bp)
2552{
2553
2554 CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2555 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2556 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2557 ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
2558
2559 if (bp->b_flags & B_DELWRI) {
2560 bp->b_flags &= ~B_DELWRI;
2561 reassignbuf(bp);
2562 bdirtysub(bp);
2563 }
2564 /*
2565 * Since it is now being written, we can clear its deferred write flag.
2566 */
2567 bp->b_flags &= ~B_DEFERRED;
2568}
2569
2570/*
2571 * bawrite:
2572 *
2573 * Asynchronous write. Start output on a buffer, but do not wait for
2574 * it to complete. The buffer is released when the output completes.
2575 *
2576 * bwrite() ( or the VOP routine anyway ) is responsible for handling
2577 * B_INVAL buffers. Not us.
2578 */
2579void
2580bawrite(struct buf *bp)
2581{
2582
2583 bp->b_flags |= B_ASYNC;
2584 (void) bwrite(bp);
2585}
2586
2587/*
2588 * babarrierwrite:
2589 *
2590 * Asynchronous barrier write. Start output on a buffer, but do not
2591 * wait for it to complete. Place a write barrier after this write so
2592 * that this buffer and all buffers written before it are committed to
2593 * the disk before any buffers written after this write are committed
2594 * to the disk. The buffer is released when the output completes.
2595 */
2596void
2598{
2599
2600 bp->b_flags |= B_ASYNC | B_BARRIER;
2601 (void) bwrite(bp);
2602}
2603
2604/*
2605 * bbarrierwrite:
2606 *
2607 * Synchronous barrier write. Start output on a buffer and wait for
2608 * it to complete. Place a write barrier after this write so that
2609 * this buffer and all buffers written before it are committed to
2610 * the disk before any buffers written after this write are committed
2611 * to the disk. The buffer is released when the output completes.
2612 */
2613int
2615{
2616
2617 bp->b_flags |= B_BARRIER;
2618 return (bwrite(bp));
2619}
2620
2621/*
2622 * bwillwrite:
2623 *
2624 * Called prior to the locking of any vnodes when we are expecting to
2625 * write. We do not want to starve the buffer cache with too many
2626 * dirty buffers so we block here. By blocking prior to the locking
2627 * of any vnodes we attempt to avoid the situation where a locked vnode
2628 * prevents the various system daemons from flushing related buffers.
2629 */
2630void
2632{
2633
2634 if (buf_dirty_count_severe()) {
2635 mtx_lock(&bdirtylock);
2636 while (buf_dirty_count_severe()) {
2637 bdirtywait = 1;
2638 msleep(&bdirtywait, &bdirtylock, (PRIBIO + 4),
2639 "flswai", 0);
2640 }
2641 mtx_unlock(&bdirtylock);
2642 }
2643}
2644
2645/*
2646 * Return true if we have too many dirty buffers.
2647 */
2648int
2650{
2651
2652 return (!BIT_EMPTY(BUF_DOMAINS, &bdhidirty));
2653}
2654
2655/*
2656 * brelse:
2657 *
2658 * Release a busy buffer and, if requested, free its resources. The
2659 * buffer will be stashed in the appropriate bufqueue[] allowing it
2660 * to be accessed later as a cache entity or reused for other purposes.
2661 */
2662void
2663brelse(struct buf *bp)
2664{
2665 struct mount *v_mnt;
2666 int qindex;
2667
2668 /*
2669 * Many functions erroneously call brelse with a NULL bp under rare
2670 * error conditions. Simply return when called with a NULL bp.
2671 */
2672 if (bp == NULL)
2673 return;
2674 CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
2675 bp, bp->b_vp, bp->b_flags);
2676 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2677 ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2678 KASSERT((bp->b_flags & B_VMIO) != 0 || (bp->b_flags & B_NOREUSE) == 0,
2679 ("brelse: non-VMIO buffer marked NOREUSE"));
2680
2681 if (BUF_LOCKRECURSED(bp)) {
2682 /*
2683 * Do not process, in particular, do not handle the
2684 * B_INVAL/B_RELBUF and do not release to free list.
2685 */
2686 BUF_UNLOCK(bp);
2687 return;
2688 }
2689
2690 if (bp->b_flags & B_MANAGED) {
2691 bqrelse(bp);
2692 return;
2693 }
2694
2695 if (LIST_EMPTY(&bp->b_dep)) {
2696 bp->b_flags &= ~B_IOSTARTED;
2697 } else {
2698 KASSERT((bp->b_flags & B_IOSTARTED) == 0,
2699 ("brelse: SU io not finished bp %p", bp));
2700 }
2701
2702 if ((bp->b_vflags & (BV_BKGRDINPROG | BV_BKGRDERR)) == BV_BKGRDERR) {
2703 BO_LOCK(bp->b_bufobj);
2704 bp->b_vflags &= ~BV_BKGRDERR;
2705 BO_UNLOCK(bp->b_bufobj);
2706 bdirty(bp);
2707 }
2708
2709 if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
2710 (bp->b_flags & B_INVALONERR)) {
2711 /*
2712 * Forced invalidation of dirty buffer contents, to be used
2713 * after a failed write in the rare case that the loss of the
2714 * contents is acceptable. The buffer is invalidated and
2715 * freed.
2716 */
2717 bp->b_flags |= B_INVAL | B_RELBUF | B_NOCACHE;
2718 bp->b_flags &= ~(B_ASYNC | B_CACHE);
2719 }
2720
2721 if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
2722 (bp->b_error != ENXIO || !LIST_EMPTY(&bp->b_dep)) &&
2723 !(bp->b_flags & B_INVAL)) {
2724 /*
2725 * Failed write, redirty. All errors except ENXIO (which
2726 * means the device is gone) are treated as being
2727 * transient.
2728 *
2729 * XXX Treating EIO as transient is not correct; the
2730 * contract with the local storage device drivers is that
2731 * they will only return EIO once the I/O is no longer
2732 * retriable. Network I/O also respects this through the
2733 * guarantees of TCP and/or the internal retries of NFS.
2734 * ENOMEM might be transient, but we also have no way of
2735 * knowing when its ok to retry/reschedule. In general,
2736 * this entire case should be made obsolete through better
2737 * error handling/recovery and resource scheduling.
2738 *
2739 * Do this also for buffers that failed with ENXIO, but have
2740 * non-empty dependencies - the soft updates code might need
2741 * to access the buffer to untangle them.
2742 *
2743 * Must clear BIO_ERROR to prevent pages from being scrapped.
2744 */
2745 bp->b_ioflags &= ~BIO_ERROR;
2746 bdirty(bp);
2747 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
2748 (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
2749 /*
2750 * Either a failed read I/O, or we were asked to free or not
2751 * cache the buffer, or we failed to write to a device that's
2752 * no longer present.
2753 */
2754 bp->b_flags |= B_INVAL;
2755 if (!LIST_EMPTY(&bp->b_dep))
2756 buf_deallocate(bp);
2757 if (bp->b_flags & B_DELWRI)
2758 bdirtysub(bp);
2759 bp->b_flags &= ~(B_DELWRI | B_CACHE);
2760 if ((bp->b_flags & B_VMIO) == 0) {
2761 allocbuf(bp, 0);
2762 if (bp->b_vp)
2763 brelvp(bp);
2764 }
2765 }
2766
2767 /*
2768 * We must clear B_RELBUF if B_DELWRI is set. If vfs_vmio_truncate()
2769 * is called with B_DELWRI set, the underlying pages may wind up
2770 * getting freed causing a previous write (bdwrite()) to get 'lost'
2771 * because pages associated with a B_DELWRI bp are marked clean.
2772 *
2773 * We still allow the B_INVAL case to call vfs_vmio_truncate(), even
2774 * if B_DELWRI is set.
2775 */
2776 if (bp->b_flags & B_DELWRI)
2777 bp->b_flags &= ~B_RELBUF;
2778
2779 /*
2780 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer
2781 * constituted, not even NFS buffers now. Two flags effect this. If
2782 * B_INVAL, the struct buf is invalidated but the VM object is kept
2783 * around ( i.e. so it is trivial to reconstitute the buffer later ).
2784 *
2785 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
2786 * invalidated. BIO_ERROR cannot be set for a failed write unless the
2787 * buffer is also B_INVAL because it hits the re-dirtying code above.
2788 *
2789 * Normally we can do this whether a buffer is B_DELWRI or not. If
2790 * the buffer is an NFS buffer, it is tracking piecemeal writes or
2791 * the commit state and we cannot afford to lose the buffer. If the
2792 * buffer has a background write in progress, we need to keep it
2793 * around to prevent it from being reconstituted and starting a second
2794 * background write.
2795 */
2796
2797 v_mnt = bp->b_vp != NULL ? bp->b_vp->v_mount : NULL;
2798
2799 if ((bp->b_flags & B_VMIO) && (bp->b_flags & B_NOCACHE ||
2800 (bp->b_ioflags & BIO_ERROR && bp->b_iocmd == BIO_READ)) &&
2801 (v_mnt == NULL || (v_mnt->mnt_vfc->vfc_flags & VFCF_NETWORK) == 0 ||
2802 vn_isdisk(bp->b_vp) || (bp->b_flags & B_DELWRI) == 0)) {
2804 allocbuf(bp, 0);
2805 }
2806
2807 if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0 ||
2808 (bp->b_flags & (B_DELWRI | B_NOREUSE)) == B_NOREUSE) {
2809 allocbuf(bp, 0);
2810 bp->b_flags &= ~B_NOREUSE;
2811 if (bp->b_vp != NULL)
2812 brelvp(bp);
2813 }
2814
2815 /*
2816 * If the buffer has junk contents signal it and eventually
2817 * clean up B_DELWRI and diassociate the vnode so that gbincore()
2818 * doesn't find it.
2819 */
2820 if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
2821 (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
2822 bp->b_flags |= B_INVAL;
2823 if (bp->b_flags & B_INVAL) {
2824 if (bp->b_flags & B_DELWRI)
2825 bundirty(bp);
2826 if (bp->b_vp)
2827 brelvp(bp);
2828 }
2829
2830 buf_track(bp, __func__);
2831
2832 /* buffers with no memory */
2833 if (bp->b_bufsize == 0) {
2834 buf_free(bp);
2835 return;
2836 }
2837 /* buffers with junk contents */
2838 if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
2839 (bp->b_ioflags & BIO_ERROR)) {
2840 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
2841 if (bp->b_vflags & BV_BKGRDINPROG)
2842 panic("losing buffer 2");
2843 qindex = QUEUE_CLEAN;
2844 bp->b_flags |= B_AGE;
2845 /* remaining buffers */
2846 } else if (bp->b_flags & B_DELWRI)
2847 qindex = QUEUE_DIRTY;
2848 else
2849 qindex = QUEUE_CLEAN;
2850
2851 if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
2852 panic("brelse: not dirty");
2853
2854 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_RELBUF | B_DIRECT);
2855 bp->b_xflags &= ~(BX_CVTENXIO);
2856 /* binsfree unlocks bp. */
2857 binsfree(bp, qindex);
2858}
2859
2860/*
2861 * Release a buffer back to the appropriate queue but do not try to free
2862 * it. The buffer is expected to be used again soon.
2863 *
2864 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
2865 * biodone() to requeue an async I/O on completion. It is also used when
2866 * known good buffers need to be requeued but we think we may need the data
2867 * again soon.
2868 *
2869 * XXX we should be able to leave the B_RELBUF hint set on completion.
2870 */
2871void
2872bqrelse(struct buf *bp)
2873{
2874 int qindex;
2875
2876 CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2877 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2878 ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2879
2880 qindex = QUEUE_NONE;
2881 if (BUF_LOCKRECURSED(bp)) {
2882 /* do not release to free list */
2883 BUF_UNLOCK(bp);
2884 return;
2885 }
2886 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
2887 bp->b_xflags &= ~(BX_CVTENXIO);
2888
2889 if (LIST_EMPTY(&bp->b_dep)) {
2890 bp->b_flags &= ~B_IOSTARTED;
2891 } else {
2892 KASSERT((bp->b_flags & B_IOSTARTED) == 0,
2893 ("bqrelse: SU io not finished bp %p", bp));
2894 }
2895
2896 if (bp->b_flags & B_MANAGED) {
2897 if (bp->b_flags & B_REMFREE)
2898 bremfreef(bp);
2899 goto out;
2900 }
2901
2902 /* buffers with stale but valid contents */
2903 if ((bp->b_flags & B_DELWRI) != 0 || (bp->b_vflags & (BV_BKGRDINPROG |
2904 BV_BKGRDERR)) == BV_BKGRDERR) {
2905 BO_LOCK(bp->b_bufobj);
2906 bp->b_vflags &= ~BV_BKGRDERR;
2907 BO_UNLOCK(bp->b_bufobj);
2908 qindex = QUEUE_DIRTY;
2909 } else {
2910 if ((bp->b_flags & B_DELWRI) == 0 &&
2911 (bp->b_xflags & BX_VNDIRTY))
2912 panic("bqrelse: not dirty");
2913 if ((bp->b_flags & B_NOREUSE) != 0) {
2914 brelse(bp);
2915 return;
2916 }
2917 qindex = QUEUE_CLEAN;
2918 }
2919 buf_track(bp, __func__);
2920 /* binsfree unlocks bp. */
2921 binsfree(bp, qindex);
2922 return;
2923
2924out:
2925 buf_track(bp, __func__);
2926 /* unlock */
2927 BUF_UNLOCK(bp);
2928}
2929
2930/*
2931 * Complete I/O to a VMIO backed page. Validate the pages as appropriate,
2932 * restore bogus pages.
2933 */
2934static void
2936{
2937 vm_ooffset_t foff;
2938 vm_page_t m;
2939 vm_object_t obj;
2940 struct vnode *vp __unused;
2941 int i, iosize, resid;
2942 bool bogus;
2943
2944 obj = bp->b_bufobj->bo_object;
2945 KASSERT(blockcount_read(&obj->paging_in_progress) >= bp->b_npages,
2946 ("vfs_vmio_iodone: paging in progress(%d) < b_npages(%d)",
2947 blockcount_read(&obj->paging_in_progress), bp->b_npages));
2948
2949 vp = bp->b_vp;
2950 VNPASS(vp->v_holdcnt > 0, vp);
2951 VNPASS(vp->v_object != NULL, vp);
2952
2953 foff = bp->b_offset;
2954 KASSERT(bp->b_offset != NOOFFSET,
2955 ("vfs_vmio_iodone: bp %p has no buffer offset", bp));
2956
2957 bogus = false;
2958 iosize = bp->b_bcount - bp->b_resid;
2959 for (i = 0; i < bp->b_npages; i++) {
2960 resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
2961 if (resid > iosize)
2962 resid = iosize;
2963
2964 /*
2965 * cleanup bogus pages, restoring the originals
2966 */
2967 m = bp->b_pages[i];
2968 if (m == bogus_page) {
2969 bogus = true;
2970 m = vm_page_relookup(obj, OFF_TO_IDX(foff));
2971 if (m == NULL)
2972 panic("biodone: page disappeared!");
2973 bp->b_pages[i] = m;
2974 } else if ((bp->b_iocmd == BIO_READ) && resid > 0) {
2975 /*
2976 * In the write case, the valid and clean bits are
2977 * already changed correctly ( see bdwrite() ), so we
2978 * only need to do this here in the read case.
2979 */
2980 KASSERT((m->dirty & vm_page_bits(foff & PAGE_MASK,
2981 resid)) == 0, ("vfs_vmio_iodone: page %p "
2982 "has unexpected dirty bits", m));
2983 vfs_page_set_valid(bp, foff, m);
2984 }
2985 KASSERT(OFF_TO_IDX(foff) == m->pindex,
2986 ("vfs_vmio_iodone: foff(%jd)/pindex(%ju) mismatch",
2987 (intmax_t)foff, (uintmax_t)m->pindex));
2988
2989 vm_page_sunbusy(m);
2990 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
2991 iosize -= resid;
2992 }
2993 vm_object_pip_wakeupn(obj, bp->b_npages);
2994 if (bogus && buf_mapped(bp)) {
2995 BUF_CHECK_MAPPED(bp);
2996 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
2997 bp->b_pages, bp->b_npages);
2998 }
2999}
3000
3001/*
3002 * Perform page invalidation when a buffer is released. The fully invalid
3003 * pages will be reclaimed later in vfs_vmio_truncate().
3004 */
3005static void
3007{
3008 vm_object_t obj;
3009 vm_page_t m;
3010 int flags, i, resid, poffset, presid;
3011
3012 if (buf_mapped(bp)) {
3013 BUF_CHECK_MAPPED(bp);
3014 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
3015 } else
3017 /*
3018 * Get the base offset and length of the buffer. Note that
3019 * in the VMIO case if the buffer block size is not
3020 * page-aligned then b_data pointer may not be page-aligned.
3021 * But our b_pages[] array *IS* page aligned.
3022 *
3023 * block sizes less then DEV_BSIZE (usually 512) are not
3024 * supported due to the page granularity bits (m->valid,
3025 * m->dirty, etc...).
3026 *
3027 * See man buf(9) for more information
3028 */
3029 flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0;
3030 obj = bp->b_bufobj->bo_object;
3031 resid = bp->b_bufsize;
3032 poffset = bp->b_offset & PAGE_MASK;
3033 VM_OBJECT_WLOCK(obj);
3034 for (i = 0; i < bp->b_npages; i++) {
3035 m = bp->b_pages[i];
3036 if (m == bogus_page)
3037 panic("vfs_vmio_invalidate: Unexpected bogus page.");
3038 bp->b_pages[i] = NULL;
3039
3040 presid = resid > (PAGE_SIZE - poffset) ?
3041 (PAGE_SIZE - poffset) : resid;
3042 KASSERT(presid >= 0, ("brelse: extra page"));
3043 vm_page_busy_acquire(m, VM_ALLOC_SBUSY);
3044 if (pmap_page_wired_mappings(m) == 0)
3045 vm_page_set_invalid(m, poffset, presid);
3046 vm_page_sunbusy(m);
3047 vm_page_release_locked(m, flags);
3048 resid -= presid;
3049 poffset = 0;
3050 }
3051 VM_OBJECT_WUNLOCK(obj);
3052 bp->b_npages = 0;
3053}
3054
3055/*
3056 * Page-granular truncation of an existing VMIO buffer.
3057 */
3058static void
3059vfs_vmio_truncate(struct buf *bp, int desiredpages)
3060{
3061 vm_object_t obj;
3062 vm_page_t m;
3063 int flags, i;
3064
3065 if (bp->b_npages == desiredpages)
3066 return;
3067
3068 if (buf_mapped(bp)) {
3069 BUF_CHECK_MAPPED(bp);
3070 pmap_qremove((vm_offset_t)trunc_page((vm_offset_t)bp->b_data) +
3071 (desiredpages << PAGE_SHIFT), bp->b_npages - desiredpages);
3072 } else
3074
3075 /*
3076 * The object lock is needed only if we will attempt to free pages.
3077 */
3078 flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0;
3079 if ((bp->b_flags & B_DIRECT) != 0) {
3080 flags |= VPR_TRYFREE;
3081 obj = bp->b_bufobj->bo_object;
3082 VM_OBJECT_WLOCK(obj);
3083 } else {
3084 obj = NULL;
3085 }
3086 for (i = desiredpages; i < bp->b_npages; i++) {
3087 m = bp->b_pages[i];
3088 KASSERT(m != bogus_page, ("allocbuf: bogus page found"));
3089 bp->b_pages[i] = NULL;
3090 if (obj != NULL)
3091 vm_page_release_locked(m, flags);
3092 else
3093 vm_page_release(m, flags);
3094 }
3095 if (obj != NULL)
3096 VM_OBJECT_WUNLOCK(obj);
3097 bp->b_npages = desiredpages;
3098}
3099
3100/*
3101 * Byte granular extension of VMIO buffers.
3102 */
3103static void
3104vfs_vmio_extend(struct buf *bp, int desiredpages, int size)
3105{
3106 /*
3107 * We are growing the buffer, possibly in a
3108 * byte-granular fashion.
3109 */
3110 vm_object_t obj;
3111 vm_offset_t toff;
3112 vm_offset_t tinc;
3113 vm_page_t m;
3114
3115 /*
3116 * Step 1, bring in the VM pages from the object, allocating
3117 * them if necessary. We must clear B_CACHE if these pages
3118 * are not valid for the range covered by the buffer.
3119 */
3120 obj = bp->b_bufobj->bo_object;
3121 if (bp->b_npages < desiredpages) {
3122 KASSERT(desiredpages <= atop(maxbcachebuf),
3123 ("vfs_vmio_extend past maxbcachebuf %p %d %u",
3124 bp, desiredpages, maxbcachebuf));
3125
3126 /*
3127 * We must allocate system pages since blocking
3128 * here could interfere with paging I/O, no
3129 * matter which process we are.
3130 *
3131 * Only exclusive busy can be tested here.
3132 * Blocking on shared busy might lead to
3133 * deadlocks once allocbuf() is called after
3134 * pages are vfs_busy_pages().
3135 */
3136 (void)vm_page_grab_pages_unlocked(obj,
3137 OFF_TO_IDX(bp->b_offset) + bp->b_npages,
3138 VM_ALLOC_SYSTEM | VM_ALLOC_IGN_SBUSY |
3139 VM_ALLOC_NOBUSY | VM_ALLOC_WIRED,
3140 &bp->b_pages[bp->b_npages], desiredpages - bp->b_npages);
3141 bp->b_npages = desiredpages;
3142 }
3143
3144 /*
3145 * Step 2. We've loaded the pages into the buffer,
3146 * we have to figure out if we can still have B_CACHE
3147 * set. Note that B_CACHE is set according to the
3148 * byte-granular range ( bcount and size ), not the
3149 * aligned range ( newbsize ).
3150 *
3151 * The VM test is against m->valid, which is DEV_BSIZE
3152 * aligned. Needless to say, the validity of the data
3153 * needs to also be DEV_BSIZE aligned. Note that this
3154 * fails with NFS if the server or some other client
3155 * extends the file's EOF. If our buffer is resized,
3156 * B_CACHE may remain set! XXX
3157 */
3158 toff = bp->b_bcount;
3159 tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
3160 while ((bp->b_flags & B_CACHE) && toff < size) {
3161 vm_pindex_t pi;
3162
3163 if (tinc > (size - toff))
3164 tinc = size - toff;
3165 pi = ((bp->b_offset & PAGE_MASK) + toff) >> PAGE_SHIFT;
3166 m = bp->b_pages[pi];
3167 vfs_buf_test_cache(bp, bp->b_offset, toff, tinc, m);
3168 toff += tinc;
3169 tinc = PAGE_SIZE;
3170 }
3171
3172 /*
3173 * Step 3, fixup the KVA pmap.
3174 */
3175 if (buf_mapped(bp))
3176 bpmap_qenter(bp);
3177 else
3179}
3180
3181/*
3182 * Check to see if a block at a particular lbn is available for a clustered
3183 * write.
3184 */
3185static int
3186vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
3187{
3188 struct buf *bpa;
3189 int match;
3190
3191 match = 0;
3192
3193 /* If the buf isn't in core skip it */
3194 if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
3195 return (0);
3196
3197 /* If the buf is busy we don't want to wait for it */
3198 if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
3199 return (0);
3200
3201 /* Only cluster with valid clusterable delayed write buffers */
3202 if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
3203 (B_DELWRI | B_CLUSTEROK))
3204 goto done;
3205
3206 if (bpa->b_bufsize != size)
3207 goto done;
3208
3209 /*
3210 * Check to see if it is in the expected place on disk and that the
3211 * block has been mapped.
3212 */
3213 if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
3214 match = 1;
3215done:
3216 BUF_UNLOCK(bpa);
3217 return (match);
3218}
3219
3220/*
3221 * vfs_bio_awrite:
3222 *
3223 * Implement clustered async writes for clearing out B_DELWRI buffers.
3224 * This is much better then the old way of writing only one buffer at
3225 * a time. Note that we may not be presented with the buffers in the
3226 * correct order, so we search for the cluster in both directions.
3227 */
3228int
3230{
3231 struct bufobj *bo;
3232 int i;
3233 int j;
3234 daddr_t lblkno = bp->b_lblkno;
3235 struct vnode *vp = bp->b_vp;
3236 int ncl;
3237 int nwritten;
3238 int size;
3239 int maxcl;
3240 int gbflags;
3241
3242 bo = &vp->v_bufobj;
3243 gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0;
3244 /*
3245 * right now we support clustered writing only to regular files. If
3246 * we find a clusterable block we could be in the middle of a cluster
3247 * rather then at the beginning.
3248 */
3249 if ((vp->v_type == VREG) &&
3250 (vp->v_mount != 0) && /* Only on nodes that have the size info */
3251 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
3252 size = vp->v_mount->mnt_stat.f_iosize;
3253 maxcl = maxphys / size;
3254
3255 BO_RLOCK(bo);
3256 for (i = 1; i < maxcl; i++)
3257 if (vfs_bio_clcheck(vp, size, lblkno + i,
3258 bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
3259 break;
3260
3261 for (j = 1; i + j <= maxcl && j <= lblkno; j++)
3262 if (vfs_bio_clcheck(vp, size, lblkno - j,
3263 bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
3264 break;
3265 BO_RUNLOCK(bo);
3266 --j;
3267 ncl = i + j;
3268 /*
3269 * this is a possible cluster write
3270 */
3271 if (ncl != 1) {
3272 BUF_UNLOCK(bp);
3273 nwritten = cluster_wbuild(vp, size, lblkno - j, ncl,
3274 gbflags);
3275 return (nwritten);
3276 }
3277 }
3278 bremfree(bp);
3279 bp->b_flags |= B_ASYNC;
3280 /*
3281 * default (old) behavior, writing out only one block
3282 *
3283 * XXX returns b_bufsize instead of b_bcount for nwritten?
3284 */
3285 nwritten = bp->b_bufsize;
3286 (void) bwrite(bp);
3287
3288 return (nwritten);
3289}
3290
3291/*
3292 * getnewbuf_kva:
3293 *
3294 * Allocate KVA for an empty buf header according to gbflags.
3295 */
3296static int
3297getnewbuf_kva(struct buf *bp, int gbflags, int maxsize)
3298{
3299
3300 if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_UNMAPPED) {
3301 /*
3302 * In order to keep fragmentation sane we only allocate kva
3303 * in BKVASIZE chunks. XXX with vmem we can do page size.
3304 */
3305 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
3306
3307 if (maxsize != bp->b_kvasize &&
3308 bufkva_alloc(bp, maxsize, gbflags))
3309 return (ENOSPC);
3310 }
3311 return (0);
3312}
3313
3314/*
3315 * getnewbuf:
3316 *
3317 * Find and initialize a new buffer header, freeing up existing buffers
3318 * in the bufqueues as necessary. The new buffer is returned locked.
3319 *
3320 * We block if:
3321 * We have insufficient buffer headers
3322 * We have insufficient buffer space
3323 * buffer_arena is too fragmented ( space reservation fails )
3324 * If we have to flush dirty buffers ( but we try to avoid this )
3325 *
3326 * The caller is responsible for releasing the reserved bufspace after
3327 * allocbuf() is called.
3328 */
3329static struct buf *
3330getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int maxsize, int gbflags)
3331{
3332 struct bufdomain *bd;
3333 struct buf *bp;
3334 bool metadata, reserved;
3335
3336 bp = NULL;
3337 KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3338 ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3339 if (!unmapped_buf_allowed)
3340 gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3341
3342 if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 ||
3343 vp->v_type == VCHR)
3344 metadata = true;
3345 else
3346 metadata = false;
3347 if (vp == NULL)
3348 bd = &bdomain[0];
3349 else
3350 bd = &bdomain[vp->v_bufobj.bo_domain];
3351
3352 counter_u64_add(getnewbufcalls, 1);
3353 reserved = false;
3354 do {
3355 if (reserved == false &&
3356 bufspace_reserve(bd, maxsize, metadata) != 0) {
3357 counter_u64_add(getnewbufrestarts, 1);
3358 continue;
3359 }
3360 reserved = true;
3361 if ((bp = buf_alloc(bd)) == NULL) {
3362 counter_u64_add(getnewbufrestarts, 1);
3363 continue;
3364 }
3365 if (getnewbuf_kva(bp, gbflags, maxsize) == 0)
3366 return (bp);
3367 break;
3368 } while (buf_recycle(bd, false) == 0);
3369
3370 if (reserved)
3371 bufspace_release(bd, maxsize);
3372 if (bp != NULL) {
3373 bp->b_flags |= B_INVAL;
3374 brelse(bp);
3375 }
3376 bufspace_wait(bd, vp, gbflags, slpflag, slptimeo);
3377
3378 return (NULL);
3379}
3380
3381/*
3382 * buf_daemon:
3383 *
3384 * buffer flushing daemon. Buffers are normally flushed by the
3385 * update daemon but if it cannot keep up this process starts to
3386 * take the load in an attempt to prevent getnewbuf() from blocking.
3387 */
3388static struct kproc_desc buf_kp = {
3389 "bufdaemon",
3390 buf_daemon,
3392};
3393SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
3394
3395static int
3396buf_flush(struct vnode *vp, struct bufdomain *bd, int target)
3397{
3398 int flushed;
3399
3400 flushed = flushbufqueues(vp, bd, target, 0);
3401 if (flushed == 0) {
3402 /*
3403 * Could not find any buffers without rollback
3404 * dependencies, so just write the first one
3405 * in the hopes of eventually making progress.
3406 */
3407 if (vp != NULL && target > 2)
3408 target /= 2;
3409 flushbufqueues(vp, bd, target, 1);
3410 }
3411 return (flushed);
3412}
3413
3414static void
3415buf_daemon_shutdown(void *arg __unused, int howto __unused)
3416{
3417 int error;
3418
3419 mtx_lock(&bdlock);
3420 bd_shutdown = true;
3422 error = msleep(&bd_shutdown, &bdlock, 0, "buf_daemon_shutdown",
3423 60 * hz);
3424 mtx_unlock(&bdlock);
3425 if (error != 0)
3426 printf("bufdaemon wait error: %d\n", error);
3427}
3428
3429static void
3431{
3432 struct bufdomain *bd;
3433 int speedupreq;
3434 int lodirty;
3435 int i;
3436
3437 /*
3438 * This process needs to be suspended prior to shutdown sync.
3439 */
3440 EVENTHANDLER_REGISTER(shutdown_pre_sync, buf_daemon_shutdown, NULL,
3441 SHUTDOWN_PRI_LAST + 100);
3442
3443 /*
3444 * Start the buf clean daemons as children threads.
3445 */
3446 for (i = 0 ; i < buf_domains; i++) {
3447 int error;
3448
3449 error = kthread_add((void (*)(void *))bufspace_daemon,
3450 &bdomain[i], curproc, NULL, 0, 0, "bufspacedaemon-%d", i);
3451 if (error)
3452 panic("error %d spawning bufspace daemon", error);
3453 }
3454
3455 /*
3456 * This process is allowed to take the buffer cache to the limit
3457 */
3458 curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
3459 mtx_lock(&bdlock);
3460 while (!bd_shutdown) {
3461 bd_request = 0;
3462 mtx_unlock(&bdlock);
3463
3464 /*
3465 * Save speedupreq for this pass and reset to capture new
3466 * requests.
3467 */
3468 speedupreq = bd_speedupreq;
3469 bd_speedupreq = 0;
3470
3471 /*
3472 * Flush each domain sequentially according to its level and
3473 * the speedup request.
3474 */
3475 for (i = 0; i < buf_domains; i++) {
3476 bd = &bdomain[i];
3477 if (speedupreq)
3478 lodirty = bd->bd_numdirtybuffers / 2;
3479 else
3480 lodirty = bd->bd_lodirtybuffers;
3481 while (bd->bd_numdirtybuffers > lodirty) {
3482 if (buf_flush(NULL, bd,
3483 bd->bd_numdirtybuffers - lodirty) == 0)
3484 break;
3485 kern_yield(PRI_USER);
3486 }
3487 }
3488
3489 /*
3490 * Only clear bd_request if we have reached our low water
3491 * mark. The buf_daemon normally waits 1 second and
3492 * then incrementally flushes any dirty buffers that have
3493 * built up, within reason.
3494 *
3495 * If we were unable to hit our low water mark and couldn't
3496 * find any flushable buffers, we sleep for a short period
3497 * to avoid endless loops on unlockable buffers.
3498 */
3499 mtx_lock(&bdlock);
3500 if (bd_shutdown)
3501 break;
3502 if (BIT_EMPTY(BUF_DOMAINS, &bdlodirty)) {
3503 /*
3504 * We reached our low water mark, reset the
3505 * request and sleep until we are needed again.
3506 * The sleep is just so the suspend code works.
3507 */
3508 bd_request = 0;
3509 /*
3510 * Do an extra wakeup in case dirty threshold
3511 * changed via sysctl and the explicit transition
3512 * out of shortfall was missed.
3513 */
3514 bdirtywakeup();
3516 runningwakeup();
3517 msleep(&bd_request, &bdlock, PVM, "psleep", hz);
3518 } else {
3519 /*
3520 * We couldn't find any flushable dirty buffers but
3521 * still have too many dirty buffers, we
3522 * have to sleep and try again. (rare)
3523 */
3524 msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
3525 }
3526 }
3528 mtx_unlock(&bdlock);
3529 kthread_exit();
3530}
3531
3532/*
3533 * flushbufqueues:
3534 *
3535 * Try to flush a buffer in the dirty queue. We must be careful to
3536 * free up B_INVAL buffers instead of write them, which NFS is
3537 * particularly sensitive to.
3538 */
3539static int flushwithdeps = 0;
3540SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW | CTLFLAG_STATS,
3541 &flushwithdeps, 0,
3542 "Number of buffers flushed with dependencies that require rollbacks");
3543
3544static int
3545flushbufqueues(struct vnode *lvp, struct bufdomain *bd, int target,
3546 int flushdeps)
3547{
3548 struct bufqueue *bq;
3549 struct buf *sentinel;
3550 struct vnode *vp;
3551 struct mount *mp;
3552 struct buf *bp;
3553 int hasdeps;
3554 int flushed;
3555 int error;
3556 bool unlock;
3557
3558 flushed = 0;
3559 bq = &bd->bd_dirtyq;
3560 bp = NULL;
3561 sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO);
3562 sentinel->b_qindex = QUEUE_SENTINEL;
3563 BQ_LOCK(bq);
3564 TAILQ_INSERT_HEAD(&bq->bq_queue, sentinel, b_freelist);
3565 BQ_UNLOCK(bq);
3566 while (flushed != target) {
3567 maybe_yield();
3568 BQ_LOCK(bq);
3569 bp = TAILQ_NEXT(sentinel, b_freelist);
3570 if (bp != NULL) {
3571 TAILQ_REMOVE(&bq->bq_queue, sentinel, b_freelist);
3572 TAILQ_INSERT_AFTER(&bq->bq_queue, bp, sentinel,
3573 b_freelist);
3574 } else {
3575 BQ_UNLOCK(bq);
3576 break;
3577 }
3578 /*
3579 * Skip sentinels inserted by other invocations of the
3580 * flushbufqueues(), taking care to not reorder them.
3581 *
3582 * Only flush the buffers that belong to the
3583 * vnode locked by the curthread.
3584 */
3585 if (bp->b_qindex == QUEUE_SENTINEL || (lvp != NULL &&
3586 bp->b_vp != lvp)) {
3587 BQ_UNLOCK(bq);
3588 continue;
3589 }
3590 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL);
3591 BQ_UNLOCK(bq);
3592 if (error != 0)
3593 continue;
3594
3595 /*
3596 * BKGRDINPROG can only be set with the buf and bufobj
3597 * locks both held. We tolerate a race to clear it here.
3598 */
3599 if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
3600 (bp->b_flags & B_DELWRI) == 0) {
3601 BUF_UNLOCK(bp);
3602 continue;
3603 }
3604 if (bp->b_flags & B_INVAL) {
3605 bremfreef(bp);
3606 brelse(bp);
3607 flushed++;
3608 continue;
3609 }
3610
3611 if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
3612 if (flushdeps == 0) {
3613 BUF_UNLOCK(bp);
3614 continue;
3615 }
3616 hasdeps = 1;
3617 } else
3618 hasdeps = 0;
3619 /*
3620 * We must hold the lock on a vnode before writing
3621 * one of its buffers. Otherwise we may confuse, or
3622 * in the case of a snapshot vnode, deadlock the
3623 * system.
3624 *
3625 * The lock order here is the reverse of the normal
3626 * of vnode followed by buf lock. This is ok because
3627 * the NOWAIT will prevent deadlock.
3628 */
3629 vp = bp->b_vp;
3630 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
3631 BUF_UNLOCK(bp);
3632 continue;
3633 }
3634 if (lvp == NULL) {
3635 unlock = true;
3636 error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
3637 } else {
3638 ASSERT_VOP_LOCKED(vp, "getbuf");
3639 unlock = false;
3640 error = VOP_ISLOCKED(vp) == LK_EXCLUSIVE ? 0 :
3641 vn_lock(vp, LK_TRYUPGRADE);
3642 }
3643 if (error == 0) {
3644 CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
3645 bp, bp->b_vp, bp->b_flags);
3646 if (curproc == bufdaemonproc) {
3647 vfs_bio_awrite(bp);
3648 } else {
3649 bremfree(bp);
3650 bwrite(bp);
3651 counter_u64_add(notbufdflushes, 1);
3652 }
3654 if (unlock)
3655 VOP_UNLOCK(vp);
3656 flushwithdeps += hasdeps;
3657 flushed++;
3658
3659 /*
3660 * Sleeping on runningbufspace while holding
3661 * vnode lock leads to deadlock.
3662 */
3663 if (curproc == bufdaemonproc &&
3666 continue;
3667 }
3669 BUF_UNLOCK(bp);
3670 }
3671 BQ_LOCK(bq);
3672 TAILQ_REMOVE(&bq->bq_queue, sentinel, b_freelist);
3673 BQ_UNLOCK(bq);
3674 free(sentinel, M_TEMP);
3675 return (flushed);
3676}
3677
3678/*
3679 * Check to see if a block is currently memory resident.
3680 */
3681struct buf *
3682incore(struct bufobj *bo, daddr_t blkno)
3683{
3684 return (gbincore_unlocked(bo, blkno));
3685}
3686
3687/*
3688 * Returns true if no I/O is needed to access the
3689 * associated VM object. This is like incore except
3690 * it also hunts around in the VM system for the data.
3691 */
3692bool
3693inmem(struct vnode * vp, daddr_t blkno)
3694{
3695 vm_object_t obj;
3696 vm_offset_t toff, tinc, size;
3697 vm_page_t m, n;
3698 vm_ooffset_t off;
3699 int valid;
3700
3701 ASSERT_VOP_LOCKED(vp, "inmem");
3702
3703 if (incore(&vp->v_bufobj, blkno))
3704 return (true);
3705 if (vp->v_mount == NULL)
3706 return (false);
3707 obj = vp->v_object;
3708 if (obj == NULL)
3709 return (false);
3710
3711 size = PAGE_SIZE;
3712 if (size > vp->v_mount->mnt_stat.f_iosize)
3713 size = vp->v_mount->mnt_stat.f_iosize;
3714 off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
3715
3716 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
3717 m = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff));
3718recheck:
3719 if (m == NULL)
3720 return (false);
3721
3722 tinc = size;
3723 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
3724 tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
3725 /*
3726 * Consider page validity only if page mapping didn't change
3727 * during the check.
3728 */
3729 valid = vm_page_is_valid(m,
3730 (vm_offset_t)((toff + off) & PAGE_MASK), tinc);
3731 n = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff));
3732 if (m != n) {
3733 m = n;
3734 goto recheck;
3735 }
3736 if (!valid)
3737 return (false);
3738 }
3739 return (true);
3740}
3741
3742/*
3743 * Set the dirty range for a buffer based on the status of the dirty
3744 * bits in the pages comprising the buffer. The range is limited
3745 * to the size of the buffer.
3746 *
3747 * Tell the VM system that the pages associated with this buffer
3748 * are clean. This is used for delayed writes where the data is
3749 * going to go to disk eventually without additional VM intevention.
3750 *
3751 * Note that while we only really need to clean through to b_bcount, we
3752 * just go ahead and clean through to b_bufsize.
3753 */
3754static void
3756{
3757 vm_ooffset_t foff, noff, eoff;
3758 vm_page_t m;
3759 int i;
3760
3761 if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
3762 return;
3763
3764 foff = bp->b_offset;
3765 KASSERT(bp->b_offset != NOOFFSET,
3766 ("vfs_clean_pages_dirty_buf: no buffer offset"));
3767
3770 for (i = 0; i < bp->b_npages; i++) {
3771 noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3772 eoff = noff;
3773 if (eoff > bp->b_offset + bp->b_bufsize)
3774 eoff = bp->b_offset + bp->b_bufsize;
3775 m = bp->b_pages[i];
3776 vfs_page_set_validclean(bp, foff, m);
3777 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
3778 foff = noff;
3779 }
3781}
3782
3783static void
3785{
3786 vm_offset_t boffset;
3787 vm_offset_t eoffset;
3788 int i;
3789
3790 /*
3791 * test the pages to see if they have been modified directly
3792 * by users through the VM system.
3793 */
3794 for (i = 0; i < bp->b_npages; i++)
3795 vm_page_test_dirty(bp->b_pages[i]);
3796
3797 /*
3798 * Calculate the encompassing dirty range, boffset and eoffset,
3799 * (eoffset - boffset) bytes.
3800 */
3801
3802 for (i = 0; i < bp->b_npages; i++) {
3803 if (bp->b_pages[i]->dirty)
3804 break;
3805 }
3806 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3807
3808 for (i = bp->b_npages - 1; i >= 0; --i) {
3809 if (bp->b_pages[i]->dirty) {
3810 break;
3811 }
3812 }
3813 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3814
3815 /*
3816 * Fit it to the buffer.
3817 */
3818
3819 if (eoffset > bp->b_bcount)
3820 eoffset = bp->b_bcount;
3821
3822 /*
3823 * If we have a good dirty range, merge with the existing
3824 * dirty range.
3825 */
3826
3827 if (boffset < eoffset) {
3828 if (bp->b_dirtyoff > boffset)
3829 bp->b_dirtyoff = boffset;
3830 if (bp->b_dirtyend < eoffset)
3831 bp->b_dirtyend = eoffset;
3832 }
3833}
3834
3835/*
3836 * Allocate the KVA mapping for an existing buffer.
3837 * If an unmapped buffer is provided but a mapped buffer is requested, take
3838 * also care to properly setup mappings between pages and KVA.
3839 */
3840static void
3841bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
3842{
3843 int bsize, maxsize, need_mapping, need_kva;
3844 off_t offset;
3845
3846 need_mapping = bp->b_data == unmapped_buf &&
3847 (gbflags & GB_UNMAPPED) == 0;
3848 need_kva = bp->b_kvabase == unmapped_buf &&
3849 bp->b_data == unmapped_buf &&
3850 (gbflags & GB_KVAALLOC) != 0;
3851 if (!need_mapping && !need_kva)
3852 return;
3853
3855
3856 if (need_mapping && bp->b_kvabase != unmapped_buf) {
3857 /*
3858 * Buffer is not mapped, but the KVA was already
3859 * reserved at the time of the instantiation. Use the
3860 * allocated space.
3861 */
3862 goto has_addr;
3863 }
3864
3865 /*
3866 * Calculate the amount of the address space we would reserve
3867 * if the buffer was mapped.
3868 */
3869 bsize = vn_isdisk(bp->b_vp) ? DEV_BSIZE : bp->b_bufobj->bo_bsize;
3870 KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
3871 offset = blkno * bsize;
3872 maxsize = size + (offset & PAGE_MASK);
3873 maxsize = imax(maxsize, bsize);
3874
3875 while (bufkva_alloc(bp, maxsize, gbflags) != 0) {
3876 if ((gbflags & GB_NOWAIT_BD) != 0) {
3877 /*
3878 * XXXKIB: defragmentation cannot
3879 * succeed, not sure what else to do.
3880 */
3881 panic("GB_NOWAIT_BD and GB_UNMAPPED %p", bp);
3882 }
3883 counter_u64_add(mappingrestarts, 1);
3884 bufspace_wait(bufdomain(bp), bp->b_vp, gbflags, 0, 0);
3885 }
3886has_addr:
3887 if (need_mapping) {
3888 /* b_offset is handled by bpmap_qenter. */
3889 bp->b_data = bp->b_kvabase;
3890 BUF_CHECK_MAPPED(bp);
3891 bpmap_qenter(bp);
3892 }
3893}
3894
3895struct buf *
3896getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo,
3897 int flags)
3898{
3899 struct buf *bp;
3900 int error;
3901
3902 error = getblkx(vp, blkno, blkno, size, slpflag, slptimeo, flags, &bp);
3903 if (error != 0)
3904 return (NULL);
3905 return (bp);
3906}
3907
3908/*
3909 * getblkx:
3910 *
3911 * Get a block given a specified block and offset into a file/device.
3912 * The buffers B_DONE bit will be cleared on return, making it almost
3913 * ready for an I/O initiation. B_INVAL may or may not be set on
3914 * return. The caller should clear B_INVAL prior to initiating a
3915 * READ.
3916 *
3917 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
3918 * an existing buffer.
3919 *
3920 * For a VMIO buffer, B_CACHE is modified according to the backing VM.
3921 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
3922 * and then cleared based on the backing VM. If the previous buffer is
3923 * non-0-sized but invalid, B_CACHE will be cleared.
3924 *
3925 * If getblk() must create a new buffer, the new buffer is returned with
3926 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
3927 * case it is returned with B_INVAL clear and B_CACHE set based on the
3928 * backing VM.
3929 *
3930 * getblk() also forces a bwrite() for any B_DELWRI buffer whose
3931 * B_CACHE bit is clear.
3932 *
3933 * What this means, basically, is that the caller should use B_CACHE to
3934 * determine whether the buffer is fully valid or not and should clear
3935 * B_INVAL prior to issuing a read. If the caller intends to validate
3936 * the buffer by loading its data area with something, the caller needs
3937 * to clear B_INVAL. If the caller does this without issuing an I/O,
3938 * the caller should set B_CACHE ( as an optimization ), else the caller
3939 * should issue the I/O and biodone() will set B_CACHE if the I/O was
3940 * a write attempt or if it was a successful read. If the caller
3941 * intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
3942 * prior to issuing the READ. biodone() will *not* clear B_INVAL.
3943 *
3944 * The blkno parameter is the logical block being requested. Normally
3945 * the mapping of logical block number to disk block address is done
3946 * by calling VOP_BMAP(). However, if the mapping is already known, the
3947 * disk block address can be passed using the dblkno parameter. If the
3948 * disk block address is not known, then the same value should be passed
3949 * for blkno and dblkno.
3950 */
3951int
3952getblkx(struct vnode *vp, daddr_t blkno, daddr_t dblkno, int size, int slpflag,
3953 int slptimeo, int flags, struct buf **bpp)
3954{
3955 struct buf *bp;
3956 struct bufobj *bo;
3957 daddr_t d_blkno;
3958 int bsize, error, maxsize, vmio;
3959 off_t offset;
3960
3961 CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
3962 KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3963 ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3964 if (vp->v_type != VCHR)
3965 ASSERT_VOP_LOCKED(vp, "getblk");
3966 if (size > maxbcachebuf)
3967 panic("getblk: size(%d) > maxbcachebuf(%d)\n", size,
3968 maxbcachebuf);
3969 if (!unmapped_buf_allowed)
3970 flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3971
3972 bo = &vp->v_bufobj;
3973 d_blkno = dblkno;
3974
3975 /* Attempt lockless lookup first. */
3976 bp = gbincore_unlocked(bo, blkno);
3977 if (bp == NULL) {
3978 /*
3979 * With GB_NOCREAT we must be sure about not finding the buffer
3980 * as it may have been reassigned during unlocked lookup.
3981 */
3982 if ((flags & GB_NOCREAT) != 0)
3983 goto loop;
3984 goto newbuf_unlocked;
3985 }
3986
3987 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL, "getblku", 0,
3988 0);
3989 if (error != 0)
3990 goto loop;
3991
3992 /* Verify buf identify has not changed since lookup. */
3993 if (bp->b_bufobj == bo && bp->b_lblkno == blkno)
3994 goto foundbuf_fastpath;
3995
3996 /* It changed, fallback to locked lookup. */
3997 BUF_UNLOCK_RAW(bp);
3998
3999loop:
4000 BO_RLOCK(bo);
4001 bp = gbincore(bo, blkno);
4002 if (bp != NULL) {
4003 int lockflags;
4004
4005 /*
4006 * Buffer is in-core. If the buffer is not busy nor managed,
4007 * it must be on a queue.
4008 */
4009 lockflags = LK_EXCLUSIVE | LK_INTERLOCK |
4010 ((flags & GB_LOCK_NOWAIT) != 0 ? LK_NOWAIT : LK_SLEEPFAIL);
4011#ifdef WITNESS
4012 lockflags |= (flags & GB_NOWITNESS) != 0 ? LK_NOWITNESS : 0;
4013#endif
4014
4015 error = BUF_TIMELOCK(bp, lockflags,
4016 BO_LOCKPTR(bo), "getblk", slpflag, slptimeo);
4017
4018 /*
4019 * If we slept and got the lock we have to restart in case
4020 * the buffer changed identities.
4021 */
4022 if (error == ENOLCK)
4023 goto loop;
4024 /* We timed out or were interrupted. */
4025 else if (error != 0)
4026 return (error);
4027
4028foundbuf_fastpath:
4029 /* If recursed, assume caller knows the rules. */
4030 if (BUF_LOCKRECURSED(bp))
4031 goto end;
4032
4033 /*
4034 * The buffer is locked. B_CACHE is cleared if the buffer is
4035 * invalid. Otherwise, for a non-VMIO buffer, B_CACHE is set
4036 * and for a VMIO buffer B_CACHE is adjusted according to the
4037 * backing VM cache.
4038 */
4039 if (bp->b_flags & B_INVAL)
4040 bp->b_flags &= ~B_CACHE;
4041 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
4042 bp->b_flags |= B_CACHE;
4043 if (bp->b_flags & B_MANAGED)
4044 MPASS(bp->b_qindex == QUEUE_NONE);
4045 else
4046 bremfree(bp);
4047
4048 /*
4049 * check for size inconsistencies for non-VMIO case.
4050 */
4051 if (bp->b_bcount != size) {
4052 if ((bp->b_flags & B_VMIO) == 0 ||
4053 (size > bp->b_kvasize)) {
4054 if (bp->b_flags & B_DELWRI) {
4055 bp->b_flags |= B_NOCACHE;
4056 bwrite(bp);
4057 } else {
4058 if (LIST_EMPTY(&bp->b_dep)) {
4059 bp->b_flags |= B_RELBUF;
4060 brelse(bp);
4061 } else {
4062 bp->b_flags |= B_NOCACHE;
4063 bwrite(bp);
4064 }
4065 }
4066 goto loop;
4067 }
4068 }
4069
4070 /*
4071 * Handle the case of unmapped buffer which should
4072 * become mapped, or the buffer for which KVA
4073 * reservation is requested.
4074 */
4075 bp_unmapped_get_kva(bp, blkno, size, flags);
4076
4077 /*
4078 * If the size is inconsistent in the VMIO case, we can resize
4079 * the buffer. This might lead to B_CACHE getting set or
4080 * cleared. If the size has not changed, B_CACHE remains
4081 * unchanged from its previous state.
4082 */
4083 allocbuf(bp, size);
4084
4085 KASSERT(bp->b_offset != NOOFFSET,
4086 ("getblk: no buffer offset"));
4087
4088 /*
4089 * A buffer with B_DELWRI set and B_CACHE clear must
4090 * be committed before we can return the buffer in
4091 * order to prevent the caller from issuing a read
4092 * ( due to B_CACHE not being set ) and overwriting
4093 * it.
4094 *
4095 * Most callers, including NFS and FFS, need this to
4096 * operate properly either because they assume they
4097 * can issue a read if B_CACHE is not set, or because
4098 * ( for example ) an uncached B_DELWRI might loop due
4099 * to softupdates re-dirtying the buffer. In the latter
4100 * case, B_CACHE is set after the first write completes,
4101 * preventing further loops.
4102 * NOTE! b*write() sets B_CACHE. If we cleared B_CACHE
4103 * above while extending the buffer, we cannot allow the
4104 * buffer to remain with B_CACHE set after the write
4105 * completes or it will represent a corrupt state. To
4106 * deal with this we set B_NOCACHE to scrap the buffer
4107 * after the write.
4108 *
4109 * We might be able to do something fancy, like setting
4110 * B_CACHE in bwrite() except if B_DELWRI is already set,
4111 * so the below call doesn't set B_CACHE, but that gets real
4112 * confusing. This is much easier.
4113 */
4114
4115 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
4116 bp->b_flags |= B_NOCACHE;
4117 bwrite(bp);
4118 goto loop;
4119 }
4120 bp->b_flags &= ~B_DONE;
4121 } else {
4122 /*
4123 * Buffer is not in-core, create new buffer. The buffer
4124 * returned by getnewbuf() is locked. Note that the returned
4125 * buffer is also considered valid (not marked B_INVAL).
4126 */
4127 BO_RUNLOCK(bo);
4128newbuf_unlocked:
4129 /*
4130 * If the user does not want us to create the buffer, bail out
4131 * here.
4132 */
4133 if (flags & GB_NOCREAT)
4134 return (EEXIST);
4135
4136 bsize = vn_isdisk(vp) ? DEV_BSIZE : bo->bo_bsize;
4137 KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
4138 offset = blkno * bsize;
4139 vmio = vp->v_object != NULL;
4140 if (vmio) {
4141 maxsize = size + (offset & PAGE_MASK);
4142 } else {
4143 maxsize = size;
4144 /* Do not allow non-VMIO notmapped buffers. */
4145 flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
4146 }
4147 maxsize = imax(maxsize, bsize);
4148 if ((flags & GB_NOSPARSE) != 0 && vmio &&
4149 !vn_isdisk(vp)) {
4150 error = VOP_BMAP(vp, blkno, NULL, &d_blkno, 0, 0);
4151 KASSERT(error != EOPNOTSUPP,
4152 ("GB_NOSPARSE from fs not supporting bmap, vp %p",
4153 vp));
4154 if (error != 0)
4155 return (error);
4156 if (d_blkno == -1)
4157 return (EJUSTRETURN);
4158 }
4159
4160 bp = getnewbuf(vp, slpflag, slptimeo, maxsize, flags);
4161 if (bp == NULL) {
4162 if (slpflag || slptimeo)
4163 return (ETIMEDOUT);
4164 /*
4165 * XXX This is here until the sleep path is diagnosed
4166 * enough to work under very low memory conditions.
4167 *
4168 * There's an issue on low memory, 4BSD+non-preempt
4169 * systems (eg MIPS routers with 32MB RAM) where buffer
4170 * exhaustion occurs without sleeping for buffer
4171 * reclaimation. This just sticks in a loop and
4172 * constantly attempts to allocate a buffer, which
4173 * hits exhaustion and tries to wakeup bufdaemon.
4174 * This never happens because we never yield.
4175 *
4176 * The real solution is to identify and fix these cases
4177 * so we aren't effectively busy-waiting in a loop
4178 * until the reclaimation path has cycles to run.
4179 */
4180 kern_yield(PRI_USER);
4181 goto loop;
4182 }
4183
4184 /*
4185 * This code is used to make sure that a buffer is not
4186 * created while the getnewbuf routine is blocked.
4187 * This can be a problem whether the vnode is locked or not.
4188 * If the buffer is created out from under us, we have to
4189 * throw away the one we just created.
4190 *
4191 * Note: this must occur before we associate the buffer
4192 * with the vp especially considering limitations in
4193 * the splay tree implementation when dealing with duplicate
4194 * lblkno's.
4195 */
4196 BO_LOCK(bo);
4197 if (gbincore(bo, blkno)) {
4198 BO_UNLOCK(bo);
4199 bp->b_flags |= B_INVAL;
4200 bufspace_release(bufdomain(bp), maxsize);
4201 brelse(bp);
4202 goto loop;
4203 }
4204
4205 /*
4206 * Insert the buffer into the hash, so that it can
4207 * be found by incore.
4208 */
4209 bp->b_lblkno = blkno;
4210 bp->b_blkno = d_blkno;
4211 bp->b_offset = offset;
4212 bgetvp(vp, bp);
4213 BO_UNLOCK(bo);
4214
4215 /*
4216 * set B_VMIO bit. allocbuf() the buffer bigger. Since the
4217 * buffer size starts out as 0, B_CACHE will be set by
4218 * allocbuf() for the VMIO case prior to it testing the
4219 * backing store for validity.
4220 */
4221
4222 if (vmio) {
4223 bp->b_flags |= B_VMIO;
4224 KASSERT(vp->v_object == bp->b_bufobj->bo_object,
4225 ("ARGH! different b_bufobj->bo_object %p %p %p\n",
4226 bp, vp->v_object, bp->b_bufobj->bo_object));
4227 } else {
4228 bp->b_flags &= ~B_VMIO;
4229 KASSERT(bp->b_bufobj->bo_object == NULL,
4230 ("ARGH! has b_bufobj->bo_object %p %p\n",
4231 bp, bp->b_bufobj->bo_object));
4232 BUF_CHECK_MAPPED(bp);
4233 }
4234
4235 allocbuf(bp, size);
4236 bufspace_release(bufdomain(bp), maxsize);
4237 bp->b_flags &= ~B_DONE;
4238 }
4239 CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
4240end:
4241 buf_track(bp, __func__);
4242 KASSERT(bp->b_bufobj == bo,
4243 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
4244 *bpp = bp;
4245 return (0);
4246}
4247
4248/*
4249 * Get an empty, disassociated buffer of given size. The buffer is initially
4250 * set to B_INVAL.
4251 */
4252struct buf *
4253geteblk(int size, int flags)
4254{
4255 struct buf *bp;
4256 int maxsize;
4257
4258 maxsize = (size + BKVAMASK) & ~BKVAMASK;
4259 while ((bp = getnewbuf(NULL, 0, 0, maxsize, flags)) == NULL) {
4260 if ((flags & GB_NOWAIT_BD) &&
4261 (curthread->td_pflags & TDP_BUFNEED) != 0)
4262 return (NULL);
4263 }
4264 allocbuf(bp, size);
4265 bufspace_release(bufdomain(bp), maxsize);
4266 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */
4267 return (bp);
4268}
4269
4270/*
4271 * Truncate the backing store for a non-vmio buffer.
4272 */
4273static void
4274vfs_nonvmio_truncate(struct buf *bp, int newbsize)
4275{
4276
4277 if (bp->b_flags & B_MALLOC) {
4278 /*
4279 * malloced buffers are not shrunk
4280 */
4281 if (newbsize == 0) {
4282 bufmallocadjust(bp, 0);
4283 free(bp->b_data, M_BIOBUF);
4284 bp->b_data = bp->b_kvabase;
4285 bp->b_flags &= ~B_MALLOC;
4286 }
4287 return;
4288 }
4289 vm_hold_free_pages(bp, newbsize);
4290 bufspace_adjust(bp, newbsize);
4291}
4292
4293/*
4294 * Extend the backing for a non-VMIO buffer.
4295 */
4296static void
4297vfs_nonvmio_extend(struct buf *bp, int newbsize)
4298{
4299 caddr_t origbuf;
4300 int origbufsize;
4301
4302 /*
4303 * We only use malloced memory on the first allocation.
4304 * and revert to page-allocated memory when the buffer
4305 * grows.
4306 *
4307 * There is a potential smp race here that could lead
4308 * to bufmallocspace slightly passing the max. It
4309 * is probably extremely rare and not worth worrying
4310 * over.
4311 */
4312 if (bp->b_bufsize == 0 && newbsize <= PAGE_SIZE/2 &&
4314 bp->b_data = malloc(newbsize, M_BIOBUF, M_WAITOK);
4315 bp->b_flags |= B_MALLOC;
4316 bufmallocadjust(bp, newbsize);
4317 return;
4318 }
4319
4320 /*
4321 * If the buffer is growing on its other-than-first
4322 * allocation then we revert to the page-allocation
4323 * scheme.
4324 */
4325 origbuf = NULL;
4326 origbufsize = 0;
4327 if (bp->b_flags & B_MALLOC) {
4328 origbuf = bp->b_data;
4329 origbufsize = bp->b_bufsize;
4330 bp->b_data = bp->b_kvabase;
4331 bufmallocadjust(bp, 0);
4332 bp->b_flags &= ~B_MALLOC;
4333 newbsize = round_page(newbsize);
4334 }
4335 vm_hold_load_pages(bp, (vm_offset_t) bp->b_data + bp->b_bufsize,
4336 (vm_offset_t) bp->b_data + newbsize);
4337 if (origbuf != NULL) {
4338 bcopy(origbuf, bp->b_data, origbufsize);
4339 free(origbuf, M_BIOBUF);
4340 }
4341 bufspace_adjust(bp, newbsize);
4342}
4343
4344/*
4345 * This code constitutes the buffer memory from either anonymous system
4346 * memory (in the case of non-VMIO operations) or from an associated
4347 * VM object (in the case of VMIO operations). This code is able to
4348 * resize a buffer up or down.
4349 *
4350 * Note that this code is tricky, and has many complications to resolve
4351 * deadlock or inconsistent data situations. Tread lightly!!!
4352 * There are B_CACHE and B_DELWRI interactions that must be dealt with by
4353 * the caller. Calling this code willy nilly can result in the loss of data.
4354 *
4355 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with
4356 * B_CACHE for the non-VMIO case.
4357 */
4358int
4359allocbuf(struct buf *bp, int size)
4360{
4361 int newbsize;
4362
4363 if (bp->b_bcount == size)
4364 return (1);
4365
4366 if (bp->b_kvasize != 0 && bp->b_kvasize < size)
4367 panic("allocbuf: buffer too small");
4368
4369 newbsize = roundup2(size, DEV_BSIZE);
4370 if ((bp->b_flags & B_VMIO) == 0) {
4371 if ((bp->b_flags & B_MALLOC) == 0)
4372 newbsize = round_page(newbsize);
4373 /*
4374 * Just get anonymous memory from the kernel. Don't
4375 * mess with B_CACHE.
4376 */
4377 if (newbsize < bp->b_bufsize)
4378 vfs_nonvmio_truncate(bp, newbsize);
4379 else if (newbsize > bp->b_bufsize)
4380 vfs_nonvmio_extend(bp, newbsize);
4381 } else {
4382 int desiredpages;
4383
4384 desiredpages = (size == 0) ? 0 :
4385 num_pages((bp->b_offset & PAGE_MASK) + newbsize);
4386
4387 if (bp->b_flags & B_MALLOC)
4388 panic("allocbuf: VMIO buffer can't be malloced");
4389 /*
4390 * Set B_CACHE initially if buffer is 0 length or will become
4391 * 0-length.
4392 */
4393 if (size == 0 || bp->b_bufsize == 0)
4394 bp->b_flags |= B_CACHE;
4395
4396 if (newbsize < bp->b_bufsize)
4397 vfs_vmio_truncate(bp, desiredpages);
4398 /* XXX This looks as if it should be newbsize > b_bufsize */
4399 else if (size > bp->b_bcount)
4400 vfs_vmio_extend(bp, desiredpages, size);
4401 bufspace_adjust(bp, newbsize);
4402 }
4403 bp->b_bcount = size; /* requested buffer size. */
4404 return (1);
4405}
4406
4407extern int inflight_transient_maps;
4408
4409static struct bio_queue nondump_bios;
4410
4411void
4412biodone(struct bio *bp)
4413{
4414 struct mtx *mtxp;
4415 void (*done)(struct bio *);
4416 vm_offset_t start, end;
4417
4418 biotrack(bp, __func__);
4419
4420 /*
4421 * Avoid completing I/O when dumping after a panic since that may
4422 * result in a deadlock in the filesystem or pager code. Note that
4423 * this doesn't affect dumps that were started manually since we aim
4424 * to keep the system usable after it has been resumed.
4425 */
4426 if (__predict_false(dumping && SCHEDULER_STOPPED())) {
4427 TAILQ_INSERT_HEAD(&nondump_bios, bp, bio_queue);
4428 return;
4429 }
4430 if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) {
4431 bp->bio_flags &= ~BIO_TRANSIENT_MAPPING;
4432 bp->bio_flags |= BIO_UNMAPPED;
4433 start = trunc_page((vm_offset_t)bp->bio_data);
4434 end = round_page((vm_offset_t)bp->bio_data + bp->bio_length);
4435 bp->bio_data = unmapped_buf;
4436 pmap_qremove(start, atop(end - start));
4437 vmem_free(transient_arena, start, end - start);
4438 atomic_add_int(&inflight_transient_maps, -1);
4439 }
4440 done = bp->bio_done;
4441 /*
4442 * The check for done == biodone is to allow biodone to be
4443 * used as a bio_done routine.
4444 */
4445 if (done == NULL || done == biodone) {
4446 mtxp = mtx_pool_find(mtxpool_sleep, bp);
4447 mtx_lock(mtxp);
4448 bp->bio_flags |= BIO_DONE;
4449 wakeup(bp);
4450 mtx_unlock(mtxp);
4451 } else
4452 done(bp);
4453}
4454
4455/*
4456 * Wait for a BIO to finish.
4457 */
4458int
4459biowait(struct bio *bp, const char *wmesg)
4460{
4461 struct mtx *mtxp;
4462
4463 mtxp = mtx_pool_find(mtxpool_sleep, bp);
4464 mtx_lock(mtxp);
4465 while ((bp->bio_flags & BIO_DONE) == 0)
4466 msleep(bp, mtxp, PRIBIO, wmesg, 0);
4467 mtx_unlock(mtxp);
4468 if (bp->bio_error != 0)
4469 return (bp->bio_error);
4470 if (!(bp->bio_flags & BIO_ERROR))
4471 return (0);
4472 return (EIO);
4473}
4474
4475void
4476biofinish(struct bio *bp, struct devstat *stat, int error)
4477{
4478
4479 if (error) {
4480 bp->bio_error = error;
4481 bp->bio_flags |= BIO_ERROR;
4482 }
4483 if (stat != NULL)
4485 biodone(bp);
4486}
4487
4488#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4489void
4490biotrack_buf(struct bio *bp, const char *location)
4491{
4492
4493 buf_track(bp->bio_track_bp, location);
4494}
4495#endif
4496
4497/*
4498 * bufwait:
4499 *
4500 * Wait for buffer I/O completion, returning error status. The buffer
4501 * is left locked and B_DONE on return. B_EINTR is converted into an EINTR
4502 * error and cleared.
4503 */
4504int
4505bufwait(struct buf *bp)
4506{
4507 if (bp->b_iocmd == BIO_READ)
4508 bwait(bp, PRIBIO, "biord");
4509 else
4510 bwait(bp, PRIBIO, "biowr");
4511 if (bp->b_flags & B_EINTR) {
4512 bp->b_flags &= ~B_EINTR;
4513 return (EINTR);
4514 }
4515 if (bp->b_ioflags & BIO_ERROR) {
4516 return (bp->b_error ? bp->b_error : EIO);
4517 } else {
4518 return (0);
4519 }
4520}
4521
4522/*
4523 * bufdone:
4524 *
4525 * Finish I/O on a buffer, optionally calling a completion function.
4526 * This is usually called from an interrupt so process blocking is
4527 * not allowed.
4528 *
4529 * biodone is also responsible for setting B_CACHE in a B_VMIO bp.
4530 * In a non-VMIO bp, B_CACHE will be set on the next getblk()
4531 * assuming B_INVAL is clear.
4532 *
4533 * For the VMIO case, we set B_CACHE if the op was a read and no
4534 * read error occurred, or if the op was a write. B_CACHE is never
4535 * set if the buffer is invalid or otherwise uncacheable.
4536 *
4537 * bufdone does not mess with B_INVAL, allowing the I/O routine or the
4538 * initiator to leave B_INVAL set to brelse the buffer out of existence
4539 * in the biodone routine.
4540 */
4541void
4542bufdone(struct buf *bp)
4543{
4544 struct bufobj *dropobj;
4545 void (*biodone)(struct buf *);
4546
4547 buf_track(bp, __func__);
4548 CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
4549 dropobj = NULL;
4550
4551 KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
4552
4553 runningbufwakeup(bp);
4554 if (bp->b_iocmd == BIO_WRITE)
4555 dropobj = bp->b_bufobj;
4556 /* call optional completion function if requested */
4557 if (bp->b_iodone != NULL) {
4558 biodone = bp->b_iodone;
4559 bp->b_iodone = NULL;
4560 (*biodone) (bp);
4561 if (dropobj)
4562 bufobj_wdrop(dropobj);
4563 return;
4564 }
4565 if (bp->b_flags & B_VMIO) {
4566 /*
4567 * Set B_CACHE if the op was a normal read and no error
4568 * occurred. B_CACHE is set for writes in the b*write()
4569 * routines.
4570 */
4571 if (bp->b_iocmd == BIO_READ &&
4572 !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
4573 !(bp->b_ioflags & BIO_ERROR))
4574 bp->b_flags |= B_CACHE;
4575 vfs_vmio_iodone(bp);
4576 }
4577 if (!LIST_EMPTY(&bp->b_dep))
4578 buf_complete(bp);
4579 if ((bp->b_flags & B_CKHASH) != 0) {
4580 KASSERT(bp->b_iocmd == BIO_READ,
4581 ("bufdone: b_iocmd %d not BIO_READ", bp->b_iocmd));
4582 KASSERT(buf_mapped(bp), ("bufdone: bp %p not mapped", bp));
4583 (*bp->b_ckhashcalc)(bp);
4584 }
4585 /*
4586 * For asynchronous completions, release the buffer now. The brelse
4587 * will do a wakeup there if necessary - so no need to do a wakeup
4588 * here in the async case. The sync case always needs to do a wakeup.
4589 */
4590 if (bp->b_flags & B_ASYNC) {
4591 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) ||
4592 (bp->b_ioflags & BIO_ERROR))
4593 brelse(bp);
4594 else
4595 bqrelse(bp);
4596 } else
4597 bdone(bp);
4598 if (dropobj)
4599 bufobj_wdrop(dropobj);
4600}
4601
4602/*
4603 * This routine is called in lieu of iodone in the case of
4604 * incomplete I/O. This keeps the busy status for pages
4605 * consistent.
4606 */
4607void
4609{
4610 int i;
4611 vm_object_t obj;
4612 vm_page_t m;
4613
4614 runningbufwakeup(bp);
4615 if (!(bp->b_flags & B_VMIO))
4616 return;
4617
4618 obj = bp->b_bufobj->bo_object;
4619 for (i = 0; i < bp->b_npages; i++) {
4620 m = bp->b_pages[i];
4621 if (m == bogus_page) {
4622 m = vm_page_relookup(obj, OFF_TO_IDX(bp->b_offset) + i);
4623 if (!m)
4624 panic("vfs_unbusy_pages: page missing\n");
4625 bp->b_pages[i] = m;
4626 if (buf_mapped(bp)) {
4627 BUF_CHECK_MAPPED(bp);
4628 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4629 bp->b_pages, bp->b_npages);
4630 } else
4632 }
4633 vm_page_sunbusy(m);
4634 }
4635 vm_object_pip_wakeupn(obj, bp->b_npages);
4636}
4637
4638/*
4639 * vfs_page_set_valid:
4640 *
4641 * Set the valid bits in a page based on the supplied offset. The
4642 * range is restricted to the buffer's size.
4643 *
4644 * This routine is typically called after a read completes.
4645 */
4646static void
4647vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4648{
4649 vm_ooffset_t eoff;
4650
4651 /*
4652 * Compute the end offset, eoff, such that [off, eoff) does not span a
4653 * page boundary and eoff is not greater than the end of the buffer.
4654 * The end of the buffer, in this case, is our file EOF, not the
4655 * allocation size of the buffer.
4656 */
4657 eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
4658 if (eoff > bp->b_offset + bp->b_bcount)
4659 eoff = bp->b_offset + bp->b_bcount;
4660
4661 /*
4662 * Set valid range. This is typically the entire buffer and thus the
4663 * entire page.
4664 */
4665 if (eoff > off)
4666 vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off);
4667}
4668
4669/*
4670 * vfs_page_set_validclean:
4671 *
4672 * Set the valid bits and clear the dirty bits in a page based on the
4673 * supplied offset. The range is restricted to the buffer's size.
4674 */
4675static void
4676vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4677{
4678 vm_ooffset_t soff, eoff;
4679
4680 /*
4681 * Start and end offsets in buffer. eoff - soff may not cross a
4682 * page boundary or cross the end of the buffer. The end of the
4683 * buffer, in this case, is our file EOF, not the allocation size
4684 * of the buffer.
4685 */
4686 soff = off;
4687 eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4688 if (eoff > bp->b_offset + bp->b_bcount)
4689 eoff = bp->b_offset + bp->b_bcount;
4690
4691 /*
4692 * Set valid range. This is typically the entire buffer and thus the
4693 * entire page.
4694 */
4695 if (eoff > soff) {
4696 vm_page_set_validclean(
4697 m,
4698 (vm_offset_t) (soff & PAGE_MASK),
4699 (vm_offset_t) (eoff - soff)
4700 );
4701 }
4702}
4703
4704/*
4705 * Acquire a shared busy on all pages in the buf.
4706 */
4707void
4709{
4710 int i;
4711
4712 for (i = 0; i < bp->b_npages; i++)
4713 vm_page_busy_acquire(bp->b_pages[i], VM_ALLOC_SBUSY);
4714}
4715
4716void
4718{
4719 int i;
4720
4721 for (i = 0; i < bp->b_npages; i++)
4722 vm_page_sunbusy(bp->b_pages[i]);
4723}
4724
4725/*
4726 * This routine is called before a device strategy routine.
4727 * It is used to tell the VM system that paging I/O is in
4728 * progress, and treat the pages associated with the buffer
4729 * almost as being exclusive busy. Also the object paging_in_progress
4730 * flag is handled to make sure that the object doesn't become
4731 * inconsistent.
4732 *
4733 * Since I/O has not been initiated yet, certain buffer flags
4734 * such as BIO_ERROR or B_INVAL may be in an inconsistent state
4735 * and should be ignored.
4736 */
4737void
4738vfs_busy_pages(struct buf *bp, int clear_modify)
4739{
4740 vm_object_t obj;
4741 vm_ooffset_t foff;
4742 vm_page_t m;
4743 int i;
4744 bool bogus;
4745
4746 if (!(bp->b_flags & B_VMIO))
4747 return;
4748
4749 obj = bp->b_bufobj->bo_object;
4750 foff = bp->b_offset;
4751 KASSERT(bp->b_offset != NOOFFSET,
4752 ("vfs_busy_pages: no buffer offset"));
4753 if ((bp->b_flags & B_CLUSTER) == 0) {
4754 vm_object_pip_add(obj, bp->b_npages);
4756 }
4757 if (bp->b_bufsize != 0)
4759 bogus = false;
4760 for (i = 0; i < bp->b_npages; i++) {
4761 m = bp->b_pages[i];
4762 vm_page_assert_sbusied(m);
4763
4764 /*
4765 * When readying a buffer for a read ( i.e
4766 * clear_modify == 0 ), it is important to do
4767 * bogus_page replacement for valid pages in
4768 * partially instantiated buffers. Partially
4769 * instantiated buffers can, in turn, occur when
4770 * reconstituting a buffer from its VM backing store
4771 * base. We only have to do this if B_CACHE is
4772 * clear ( which causes the I/O to occur in the
4773 * first place ). The replacement prevents the read
4774 * I/O from overwriting potentially dirty VM-backed
4775 * pages. XXX bogus page replacement is, uh, bogus.
4776 * It may not work properly with small-block devices.
4777 * We need to find a better way.
4778 */
4779 if (clear_modify) {
4780 pmap_remove_write(m);
4781 vfs_page_set_validclean(bp, foff, m);
4782 } else if (vm_page_all_valid(m) &&
4783 (bp->b_flags & B_CACHE) == 0) {
4784 bp->b_pages[i] = bogus_page;
4785 bogus = true;
4786 }
4787 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4788 }
4789 if (bogus && buf_mapped(bp)) {
4790 BUF_CHECK_MAPPED(bp);
4791 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4792 bp->b_pages, bp->b_npages);
4793 }
4794}
4795
4796/*
4797 * vfs_bio_set_valid:
4798 *
4799 * Set the range within the buffer to valid. The range is
4800 * relative to the beginning of the buffer, b_offset. Note that
4801 * b_offset itself may be offset from the beginning of the first
4802 * page.
4803 */
4804void
4805vfs_bio_set_valid(struct buf *bp, int base, int size)
4806{
4807 int i, n;
4808 vm_page_t m;
4809
4810 if (!(bp->b_flags & B_VMIO))
4811 return;
4812
4813 /*
4814 * Fixup base to be relative to beginning of first page.
4815 * Set initial n to be the maximum number of bytes in the
4816 * first page that can be validated.
4817 */
4818 base += (bp->b_offset & PAGE_MASK);
4819 n = PAGE_SIZE - (base & PAGE_MASK);
4820
4821 /*
4822 * Busy may not be strictly necessary here because the pages are
4823 * unlikely to be fully valid and the vnode lock will synchronize
4824 * their access via getpages. It is grabbed for consistency with
4825 * other page validation.
4826 */
4828 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4829 m = bp->b_pages[i];
4830 if (n > size)
4831 n = size;
4832 vm_page_set_valid_range(m, base & PAGE_MASK, n);
4833 base += n;
4834 size -= n;
4835 n = PAGE_SIZE;
4836 }
4838}
4839
4840/*
4841 * vfs_bio_clrbuf:
4842 *
4843 * If the specified buffer is a non-VMIO buffer, clear the entire
4844 * buffer. If the specified buffer is a VMIO buffer, clear and
4845 * validate only the previously invalid portions of the buffer.
4846 * This routine essentially fakes an I/O, so we need to clear
4847 * BIO_ERROR and B_INVAL.
4848 *
4849 * Note that while we only theoretically need to clear through b_bcount,
4850 * we go ahead and clear through b_bufsize.
4851 */
4852void
4854{
4855 int i, j, mask, sa, ea, slide;
4856
4857 if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
4858 clrbuf(bp);
4859 return;
4860 }
4861 bp->b_flags &= ~B_INVAL;
4862 bp->b_ioflags &= ~BIO_ERROR;
4864 sa = bp->b_offset & PAGE_MASK;
4865 slide = 0;
4866 for (i = 0; i < bp->b_npages; i++, sa = 0) {
4867 slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize);
4868 ea = slide & PAGE_MASK;
4869 if (ea == 0)
4870 ea = PAGE_SIZE;
4871 if (bp->b_pages[i] == bogus_page)
4872 continue;
4873 j = sa / DEV_BSIZE;
4874 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
4875 if ((bp->b_pages[i]->valid & mask) == mask)
4876 continue;
4877 if ((bp->b_pages[i]->valid & mask) == 0)
4878 pmap_zero_page_area(bp->b_pages[i], sa, ea - sa);
4879 else {
4880 for (; sa < ea; sa += DEV_BSIZE, j++) {
4881 if ((bp->b_pages[i]->valid & (1 << j)) == 0) {
4882 pmap_zero_page_area(bp->b_pages[i],
4883 sa, DEV_BSIZE);
4884 }
4885 }
4886 }
4887 vm_page_set_valid_range(bp->b_pages[i], j * DEV_BSIZE,
4888 roundup2(ea - sa, DEV_BSIZE));
4889 }
4891 bp->b_resid = 0;
4892}
4893
4894void
4895vfs_bio_bzero_buf(struct buf *bp, int base, int size)
4896{
4897 vm_page_t m;
4898 int i, n;
4899
4900 if (buf_mapped(bp)) {
4901 BUF_CHECK_MAPPED(bp);
4902 bzero(bp->b_data + base, size);
4903 } else {
4905 n = PAGE_SIZE - (base & PAGE_MASK);
4906 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4907 m = bp->b_pages[i];
4908 if (n > size)
4909 n = size;
4910 pmap_zero_page_area(m, base & PAGE_MASK, n);
4911 base += n;
4912 size -= n;
4913 n = PAGE_SIZE;
4914 }
4915 }
4916}
4917
4918/*
4919 * Update buffer flags based on I/O request parameters, optionally releasing the
4920 * buffer. If it's VMIO or direct I/O, the buffer pages are released to the VM,
4921 * where they may be placed on a page queue (VMIO) or freed immediately (direct
4922 * I/O). Otherwise the buffer is released to the cache.
4923 */
4924static void
4925b_io_dismiss(struct buf *bp, int ioflag, bool release)
4926{
4927
4928 KASSERT((ioflag & IO_NOREUSE) == 0 || (ioflag & IO_VMIO) != 0,
4929 ("buf %p non-VMIO noreuse", bp));
4930
4931 if ((ioflag & IO_DIRECT) != 0)
4932 bp->b_flags |= B_DIRECT;
4933 if ((ioflag & IO_EXT) != 0)
4934 bp->b_xflags |= BX_ALTDATA;
4935 if ((ioflag & (IO_VMIO | IO_DIRECT)) != 0 && LIST_EMPTY(&bp->b_dep)) {
4936 bp->b_flags |= B_RELBUF;
4937 if ((ioflag & IO_NOREUSE) != 0)
4938 bp->b_flags |= B_NOREUSE;
4939 if (release)
4940 brelse(bp);
4941 } else if (release)
4942 bqrelse(bp);
4943}
4944
4945void
4946vfs_bio_brelse(struct buf *bp, int ioflag)
4947{
4948
4949 b_io_dismiss(bp, ioflag, true);
4950}
4951
4952void
4953vfs_bio_set_flags(struct buf *bp, int ioflag)
4954{
4955
4956 b_io_dismiss(bp, ioflag, false);
4957}
4958
4959/*
4960 * vm_hold_load_pages and vm_hold_free_pages get pages into
4961 * a buffers address space. The pages are anonymous and are
4962 * not associated with a file object.
4963 */
4964static void
4965vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
4966{
4967 vm_offset_t pg;
4968 vm_page_t p;
4969 int index;
4970
4971 BUF_CHECK_MAPPED(bp);
4972
4973 to = round_page(to);
4974 from = round_page(from);
4975 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4976 MPASS((bp->b_flags & B_MAXPHYS) == 0);
4977 KASSERT(to - from <= maxbcachebuf,
4978 ("vm_hold_load_pages too large %p %#jx %#jx %u",
4979 bp, (uintmax_t)from, (uintmax_t)to, maxbcachebuf));
4980
4981 for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
4982 /*
4983 * note: must allocate system pages since blocking here
4984 * could interfere with paging I/O, no matter which
4985 * process we are.
4986 */
4987 p = vm_page_alloc_noobj(VM_ALLOC_SYSTEM | VM_ALLOC_WIRED |
4988 VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT) | VM_ALLOC_WAITOK);
4989 pmap_qenter(pg, &p, 1);
4990 bp->b_pages[index] = p;
4991 }
4992 bp->b_npages = index;
4993}
4994
4995/* Return pages associated with this buf to the vm system */
4996static void
4997vm_hold_free_pages(struct buf *bp, int newbsize)
4998{
4999 vm_offset_t from;
5000 vm_page_t p;
5001 int index, newnpages;
5002
5003 BUF_CHECK_MAPPED(bp);
5004
5005 from = round_page((vm_offset_t)bp->b_data + newbsize);
5006 newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
5007 if (bp->b_npages > newnpages)
5008 pmap_qremove(from, bp->b_npages - newnpages);
5009 for (index = newnpages; index < bp->b_npages; index++) {
5010 p = bp->b_pages[index];
5011 bp->b_pages[index] = NULL;
5012 vm_page_unwire_noq(p);
5013 vm_page_free(p);
5014 }
5015 bp->b_npages = newnpages;
5016}
5017
5018/*
5019 * Map an IO request into kernel virtual address space.
5020 *
5021 * All requests are (re)mapped into kernel VA space.
5022 * Notice that we use b_bufsize for the size of the buffer
5023 * to be mapped. b_bcount might be modified by the driver.
5024 *
5025 * Note that even if the caller determines that the address space should
5026 * be valid, a race or a smaller-file mapped into a larger space may
5027 * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
5028 * check the return value.
5029 *
5030 * This function only works with pager buffers.
5031 */
5032int
5033vmapbuf(struct buf *bp, void *uaddr, size_t len, int mapbuf)
5034{
5035 vm_prot_t prot;
5036 int pidx;
5037
5038 MPASS((bp->b_flags & B_MAXPHYS) != 0);
5039 prot = VM_PROT_READ;
5040 if (bp->b_iocmd == BIO_READ)
5041 prot |= VM_PROT_WRITE; /* Less backwards than it looks */
5042 pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
5043 (vm_offset_t)uaddr, len, prot, bp->b_pages, PBUF_PAGES);
5044 if (pidx < 0)
5045 return (-1);
5046 bp->b_bufsize = len;
5047 bp->b_npages = pidx;
5048 bp->b_offset = ((vm_offset_t)uaddr) & PAGE_MASK;
5049 if (mapbuf || !unmapped_buf_allowed) {
5050 pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_pages, pidx);
5051 bp->b_data = bp->b_kvabase + bp->b_offset;
5052 } else
5053 bp->b_data = unmapped_buf;
5054 return (0);
5055}
5056
5057/*
5058 * Free the io map PTEs associated with this IO operation.
5059 * We also invalidate the TLB entries and restore the original b_addr.
5060 *
5061 * This function only works with pager buffers.
5062 */
5063void
5064vunmapbuf(struct buf *bp)
5065{
5066 int npages;
5067
5068 npages = bp->b_npages;
5069 if (buf_mapped(bp))
5070 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
5071 vm_page_unhold_pages(bp->b_pages, npages);
5072
5073 bp->b_data = unmapped_buf;
5074}
5075
5076void
5077bdone(struct buf *bp)
5078{
5079 struct mtx *mtxp;
5080
5081 mtxp = mtx_pool_find(mtxpool_sleep, bp);
5082 mtx_lock(mtxp);
5083 bp->b_flags |= B_DONE;
5084 wakeup(bp);
5085 mtx_unlock(mtxp);
5086}
5087
5088void
5089bwait(struct buf *bp, u_char pri, const char *wchan)
5090{
5091 struct mtx *mtxp;
5092
5093 mtxp = mtx_pool_find(mtxpool_sleep, bp);
5094 mtx_lock(mtxp);
5095 while ((bp->b_flags & B_DONE) == 0)
5096 msleep(bp, mtxp, pri, wchan, 0);
5097 mtx_unlock(mtxp);
5098}
5099
5100int
5101bufsync(struct bufobj *bo, int waitfor)
5102{
5103
5104 return (VOP_FSYNC(bo2vnode(bo), waitfor, curthread));
5105}
5106
5107void
5108bufstrategy(struct bufobj *bo, struct buf *bp)
5109{
5110 int i __unused;
5111 struct vnode *vp;
5112
5113 vp = bp->b_vp;
5114 KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
5115 KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
5116 ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
5117 i = VOP_STRATEGY(vp, bp);
5118 KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
5119}
5120
5121/*
5122 * Initialize a struct bufobj before use. Memory is assumed zero filled.
5123 */
5124void
5125bufobj_init(struct bufobj *bo, void *private)
5126{
5127 static volatile int bufobj_cleanq;
5128
5129 bo->bo_domain =
5130 atomic_fetchadd_int(&bufobj_cleanq, 1) % buf_domains;
5131 rw_init(BO_LOCKPTR(bo), "bufobj interlock");
5132 bo->bo_private = private;
5133 TAILQ_INIT(&bo->bo_clean.bv_hd);
5134 TAILQ_INIT(&bo->bo_dirty.bv_hd);
5135}
5136
5137void
5138bufobj_wrefl(struct bufobj *bo)
5139{
5140
5141 KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
5142 ASSERT_BO_WLOCKED(bo);
5143 bo->bo_numoutput++;
5144}
5145
5146void
5147bufobj_wref(struct bufobj *bo)
5148{
5149
5150 KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
5151 BO_LOCK(bo);
5152 bo->bo_numoutput++;
5153 BO_UNLOCK(bo);
5154}
5155
5156void
5157bufobj_wdrop(struct bufobj *bo)
5158{
5159
5160 KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
5161 BO_LOCK(bo);
5162 KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
5163 if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
5164 bo->bo_flag &= ~BO_WWAIT;
5165 wakeup(&bo->bo_numoutput);
5166 }
5167 BO_UNLOCK(bo);
5168}
5169
5170int
5171bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
5172{
5173 int error;
5174
5175 KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
5176 ASSERT_BO_WLOCKED(bo);
5177 error = 0;
5178 while (bo->bo_numoutput) {
5179 bo->bo_flag |= BO_WWAIT;
5180 error = msleep(&bo->bo_numoutput, BO_LOCKPTR(bo),
5181 slpflag | (PRIBIO + 1), "bo_wwait", timeo);
5182 if (error)
5183 break;
5184 }
5185 return (error);
5186}
5187
5188/*
5189 * Set bio_data or bio_ma for struct bio from the struct buf.
5190 */
5191void
5192bdata2bio(struct buf *bp, struct bio *bip)
5193{
5194
5195 if (!buf_mapped(bp)) {
5196 KASSERT(unmapped_buf_allowed, ("unmapped"));
5197 bip->bio_ma = bp->b_pages;
5198 bip->bio_ma_n = bp->b_npages;
5199 bip->bio_data = unmapped_buf;
5200 bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
5201 bip->bio_flags |= BIO_UNMAPPED;
5202 KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) /
5203 PAGE_SIZE == bp->b_npages,
5204 ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset,
5205 (long long)bip->bio_length, bip->bio_ma_n));
5206 } else {
5207 bip->bio_data = bp->b_data;
5208 bip->bio_ma = NULL;
5209 }
5210}
5211
5212/*
5213 * The MIPS pmap code currently doesn't handle aliased pages.
5214 * The VIPT caches may not handle page aliasing themselves, leading
5215 * to data corruption.
5216 *
5217 * As such, this code makes a system extremely unhappy if said
5218 * system doesn't support unaliasing the above situation in hardware.
5219 * Some "recent" systems (eg some mips24k/mips74k cores) don't enable
5220 * this feature at build time, so it has to be handled in software.
5221 *
5222 * Once the MIPS pmap/cache code grows to support this function on
5223 * earlier chips, it should be flipped back off.
5224 */
5225#ifdef __mips__
5226static int buf_pager_relbuf = 1;
5227#else
5228static int buf_pager_relbuf = 0;
5229#endif
5230SYSCTL_INT(_vfs, OID_AUTO, buf_pager_relbuf, CTLFLAG_RWTUN,
5231 &buf_pager_relbuf, 0,
5232 "Make buffer pager release buffers after reading");
5233
5234/*
5235 * The buffer pager. It uses buffer reads to validate pages.
5236 *
5237 * In contrast to the generic local pager from vm/vnode_pager.c, this
5238 * pager correctly and easily handles volumes where the underlying
5239 * device block size is greater than the machine page size. The
5240 * buffer cache transparently extends the requested page run to be
5241 * aligned at the block boundary, and does the necessary bogus page
5242 * replacements in the addends to avoid obliterating already valid
5243 * pages.
5244 *
5245 * The only non-trivial issue is that the exclusive busy state for
5246 * pages, which is assumed by the vm_pager_getpages() interface, is
5247 * incompatible with the VMIO buffer cache's desire to share-busy the
5248 * pages. This function performs a trivial downgrade of the pages'
5249 * state before reading buffers, and a less trivial upgrade from the
5250 * shared-busy to excl-busy state after the read.
5251 */
5252int
5253vfs_bio_getpages(struct vnode *vp, vm_page_t *ma, int count,
5254 int *rbehind, int *rahead, vbg_get_lblkno_t get_lblkno,
5255 vbg_get_blksize_t get_blksize)
5256{
5257 vm_page_t m;
5258 vm_object_t object;
5259 struct buf *bp;
5260 struct mount *mp;
5261 daddr_t lbn, lbnp;
5262 vm_ooffset_t la, lb, poff, poffe;
5263 long bo_bs, bsize;
5264 int br_flags, error, i, pgsin, pgsin_a, pgsin_b;
5265 bool redo, lpart;
5266
5267 object = vp->v_object;
5268 mp = vp->v_mount;
5269 error = 0;
5270 la = IDX_TO_OFF(ma[count - 1]->pindex);
5271 if (la >= object->un_pager.vnp.vnp_size)
5272 return (VM_PAGER_BAD);
5273
5274 /*
5275 * Change the meaning of la from where the last requested page starts
5276 * to where it ends, because that's the end of the requested region
5277 * and the start of the potential read-ahead region.
5278 */
5279 la += PAGE_SIZE;
5280 lpart = la > object->un_pager.vnp.vnp_size;
5281 error = get_blksize(vp, get_lblkno(vp, IDX_TO_OFF(ma[0]->pindex)),
5282 &bo_bs);
5283 if (error != 0)
5284 return (VM_PAGER_ERROR);
5285
5286 /*
5287 * Calculate read-ahead, behind and total pages.
5288 */
5289 pgsin = count;
5290 lb = IDX_TO_OFF(ma[0]->pindex);
5291 pgsin_b = OFF_TO_IDX(lb - rounddown2(lb, bo_bs));
5292 pgsin += pgsin_b;
5293 if (rbehind != NULL)
5294 *rbehind = pgsin_b;
5295 pgsin_a = OFF_TO_IDX(roundup2(la, bo_bs) - la);
5296 if (la + IDX_TO_OFF(pgsin_a) >= object->un_pager.vnp.vnp_size)
5297 pgsin_a = OFF_TO_IDX(roundup2(object->un_pager.vnp.vnp_size,
5298 PAGE_SIZE) - la);
5299 pgsin += pgsin_a;
5300 if (rahead != NULL)
5301 *rahead = pgsin_a;
5302 VM_CNT_INC(v_vnodein);
5303 VM_CNT_ADD(v_vnodepgsin, pgsin);
5304
5305 br_flags = (mp != NULL && (mp->mnt_kern_flag & MNTK_UNMAPPED_BUFS)
5306 != 0) ? GB_UNMAPPED : 0;
5307again:
5308 for (i = 0; i < count; i++) {
5309 if (ma[i] != bogus_page)
5310 vm_page_busy_downgrade(ma[i]);
5311 }
5312
5313 lbnp = -1;
5314 for (i = 0; i < count; i++) {
5315 m = ma[i];
5316 if (m == bogus_page)
5317 continue;
5318
5319 /*
5320 * Pages are shared busy and the object lock is not
5321 * owned, which together allow for the pages'
5322 * invalidation. The racy test for validity avoids
5323 * useless creation of the buffer for the most typical
5324 * case when invalidation is not used in redo or for
5325 * parallel read. The shared->excl upgrade loop at
5326 * the end of the function catches the race in a
5327 * reliable way (protected by the object lock).
5328 */
5329 if (vm_page_all_valid(m))
5330 continue;
5331
5332 poff = IDX_TO_OFF(m->pindex);
5333 poffe = MIN(poff + PAGE_SIZE, object->un_pager.vnp.vnp_size);
5334 for (; poff < poffe; poff += bsize) {
5335 lbn = get_lblkno(vp, poff);
5336 if (lbn == lbnp)
5337 goto next_page;
5338 lbnp = lbn;
5339
5340 error = get_blksize(vp, lbn, &bsize);
5341 if (error == 0)
5342 error = bread_gb(vp, lbn, bsize,
5343 curthread->td_ucred, br_flags, &bp);
5344 if (error != 0)
5345 goto end_pages;
5346 if (bp->b_rcred == curthread->td_ucred) {
5347 crfree(bp->b_rcred);
5348 bp->b_rcred = NOCRED;
5349 }
5350 if (LIST_EMPTY(&bp->b_dep)) {
5351 /*
5352 * Invalidation clears m->valid, but
5353 * may leave B_CACHE flag if the
5354 * buffer existed at the invalidation
5355 * time. In this case, recycle the
5356 * buffer to do real read on next
5357 * bread() after redo.
5358 *
5359 * Otherwise B_RELBUF is not strictly
5360 * necessary, enable to reduce buf
5361 * cache pressure.
5362 */
5363 if (buf_pager_relbuf ||
5364 !vm_page_all_valid(m))
5365 bp->b_flags |= B_RELBUF;
5366
5367 bp->b_flags &= ~B_NOCACHE;
5368 brelse(bp);
5369 } else {
5370 bqrelse(bp);
5371 }
5372 }
5373 KASSERT(1 /* racy, enable for debugging */ ||
5374 vm_page_all_valid(m) || i == count - 1,
5375 ("buf %d %p invalid", i, m));
5376 if (i == count - 1 && lpart) {
5377 if (!vm_page_none_valid(m) &&
5378 !vm_page_all_valid(m))
5379 vm_page_zero_invalid(m, TRUE);
5380 }
5381next_page:;
5382 }
5383end_pages:
5384
5385 redo = false;
5386 for (i = 0; i < count; i++) {
5387 if (ma[i] == bogus_page)
5388 continue;
5389 if (vm_page_busy_tryupgrade(ma[i]) == 0) {
5390 vm_page_sunbusy(ma[i]);
5391 ma[i] = vm_page_grab_unlocked(object, ma[i]->pindex,
5392 VM_ALLOC_NORMAL);
5393 }
5394
5395 /*
5396 * Since the pages were only sbusy while neither the
5397 * buffer nor the object lock was held by us, or
5398 * reallocated while vm_page_grab() slept for busy
5399 * relinguish, they could have been invalidated.
5400 * Recheck the valid bits and re-read as needed.
5401 *
5402 * Note that the last page is made fully valid in the
5403 * read loop, and partial validity for the page at
5404 * index count - 1 could mean that the page was
5405 * invalidated or removed, so we must restart for
5406 * safety as well.
5407 */
5408 if (!vm_page_all_valid(ma[i]))
5409 redo = true;
5410 }
5411 if (redo && error == 0)
5412 goto again;
5413 return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK);
5414}
5415
5416#include "opt_ddb.h"
5417#ifdef DDB
5418#include <ddb/ddb.h>
5419
5420/* DDB command to show buffer data */
5421DB_SHOW_COMMAND(buffer, db_show_buffer)
5422{
5423 /* get args */
5424 struct buf *bp = (struct buf *)addr;
5425#ifdef FULL_BUF_TRACKING
5426 uint32_t i, j;
5427#endif
5428
5429 if (!have_addr) {
5430 db_printf("usage: show buffer <addr>\n");
5431 return;
5432 }
5433
5434 db_printf("buf at %p\n", bp);
5435 db_printf("b_flags = 0x%b, b_xflags=0x%b\n",
5436 (u_int)bp->b_flags, PRINT_BUF_FLAGS,
5437 (u_int)bp->b_xflags, PRINT_BUF_XFLAGS);
5438 db_printf("b_vflags=0x%b b_ioflags0x%b\n",
5439 (u_int)bp->b_vflags, PRINT_BUF_VFLAGS,
5440 (u_int)bp->b_ioflags, PRINT_BIO_FLAGS);
5441 db_printf(
5442 "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
5443 "b_bufobj = (%p), b_data = %p\n, b_blkno = %jd, b_lblkno = %jd, "
5444 "b_vp = %p, b_dep = %p\n",
5445 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
5446 bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
5447 (intmax_t)bp->b_lblkno, bp->b_vp, bp->b_dep.lh_first);
5448 db_printf("b_kvabase = %p, b_kvasize = %d\n",
5449 bp->b_kvabase, bp->b_kvasize);
5450 if (bp->b_npages) {
5451 int i;
5452 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
5453 for (i = 0; i < bp->b_npages; i++) {
5454 vm_page_t m;
5455 m = bp->b_pages[i];
5456 if (m != NULL)
5457 db_printf("(%p, 0x%lx, 0x%lx)", m->object,
5458 (u_long)m->pindex,
5459 (u_long)VM_PAGE_TO_PHYS(m));
5460 else
5461 db_printf("( ??? )");
5462 if ((i + 1) < bp->b_npages)
5463 db_printf(",");
5464 }
5465 db_printf("\n");
5466 }
5467 BUF_LOCKPRINTINFO(bp);
5468#if defined(FULL_BUF_TRACKING)
5469 db_printf("b_io_tracking: b_io_tcnt = %u\n", bp->b_io_tcnt);
5470
5471 i = bp->b_io_tcnt % BUF_TRACKING_SIZE;
5472 for (j = 1; j <= BUF_TRACKING_SIZE; j++) {
5473 if (bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)] == NULL)
5474 continue;
5475 db_printf(" %2u: %s\n", j,
5476 bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)]);
5477 }
5478#elif defined(BUF_TRACKING)
5479 db_printf("b_io_tracking: %s\n", bp->b_io_tracking);
5480#endif
5481 db_printf(" ");
5482}
5483
5484DB_SHOW_COMMAND(bufqueues, bufqueues)
5485{
5486 struct bufdomain *bd;
5487 struct buf *bp;
5488 long total;
5489 int i, j, cnt;
5490
5491 db_printf("bqempty: %d\n", bqempty.bq_len);
5492
5493 for (i = 0; i < buf_domains; i++) {
5494 bd = &bdomain[i];
5495 db_printf("Buf domain %d\n", i);
5496 db_printf("\tfreebufs\t%d\n", bd->bd_freebuffers);
5497 db_printf("\tlofreebufs\t%d\n", bd->bd_lofreebuffers);
5498 db_printf("\thifreebufs\t%d\n", bd->bd_hifreebuffers);
5499 db_printf("\n");
5500 db_printf("\tbufspace\t%ld\n", bd->bd_bufspace);
5501 db_printf("\tmaxbufspace\t%ld\n", bd->bd_maxbufspace);
5502 db_printf("\thibufspace\t%ld\n", bd->bd_hibufspace);
5503 db_printf("\tlobufspace\t%ld\n", bd->bd_lobufspace);
5504 db_printf("\tbufspacethresh\t%ld\n", bd->bd_bufspacethresh);
5505 db_printf("\n");
5506 db_printf("\tnumdirtybuffers\t%d\n", bd->bd_numdirtybuffers);
5507 db_printf("\tlodirtybuffers\t%d\n", bd->bd_lodirtybuffers);
5508 db_printf("\thidirtybuffers\t%d\n", bd->bd_hidirtybuffers);
5509 db_printf("\tdirtybufthresh\t%d\n", bd->bd_dirtybufthresh);
5510 db_printf("\n");
5511 total = 0;
5512 TAILQ_FOREACH(bp, &bd->bd_cleanq->bq_queue, b_freelist)
5513 total += bp->b_bufsize;
5514 db_printf("\tcleanq count\t%d (%ld)\n",
5515 bd->bd_cleanq->bq_len, total);
5516 total = 0;
5517 TAILQ_FOREACH(bp, &bd->bd_dirtyq.bq_queue, b_freelist)
5518 total += bp->b_bufsize;
5519 db_printf("\tdirtyq count\t%d (%ld)\n",
5520 bd->bd_dirtyq.bq_len, total);
5521 db_printf("\twakeup\t\t%d\n", bd->bd_wanted);
5522 db_printf("\tlim\t\t%d\n", bd->bd_lim);
5523 db_printf("\tCPU ");
5524 for (j = 0; j <= mp_maxid; j++)
5525 db_printf("%d, ", bd->bd_subq[j].bq_len);
5526 db_printf("\n");
5527 cnt = 0;
5528 total = 0;
5529 for (j = 0; j < nbuf; j++) {
5530 bp = nbufp(j);
5531 if (bp->b_domain == i && BUF_ISLOCKED(bp)) {
5532 cnt++;
5533 total += bp->b_bufsize;
5534 }
5535 }
5536 db_printf("\tLocked buffers: %d space %ld\n", cnt, total);
5537 cnt = 0;
5538 total = 0;
5539 for (j = 0; j < nbuf; j++) {
5540 bp = nbufp(j);
5541 if (bp->b_domain == i) {
5542 cnt++;
5543 total += bp->b_bufsize;
5544 }
5545 }
5546 db_printf("\tTotal buffers: %d space %ld\n", cnt, total);
5547 }
5548}
5549
5550DB_SHOW_COMMAND(lockedbufs, lockedbufs)
5551{
5552 struct buf *bp;
5553 int i;
5554
5555 for (i = 0; i < nbuf; i++) {
5556 bp = nbufp(i);
5557 if (BUF_ISLOCKED(bp)) {
5558 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5559 db_printf("\n");
5560 if (db_pager_quit)
5561 break;
5562 }
5563 }
5564}
5565
5566DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
5567{
5568 struct vnode *vp;
5569 struct buf *bp;
5570
5571 if (!have_addr) {
5572 db_printf("usage: show vnodebufs <addr>\n");
5573 return;
5574 }
5575 vp = (struct vnode *)addr;
5576 db_printf("Clean buffers:\n");
5577 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
5578 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5579 db_printf("\n");
5580 }
5581 db_printf("Dirty buffers:\n");
5582 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
5583 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5584 db_printf("\n");
5585 }
5586}
5587
5588DB_COMMAND(countfreebufs, db_coundfreebufs)
5589{
5590 struct buf *bp;
5591 int i, used = 0, nfree = 0;
5592
5593 if (have_addr) {
5594 db_printf("usage: countfreebufs\n");
5595 return;
5596 }
5597
5598 for (i = 0; i < nbuf; i++) {
5599 bp = nbufp(i);
5600 if (bp->b_qindex == QUEUE_EMPTY)
5601 nfree++;
5602 else
5603 used++;
5604 }
5605
5606 db_printf("Counted %d free, %d used (%d tot)\n", nfree, used,
5607 nfree + used);
5608 db_printf("numfreebuffers is %d\n", numfreebuffers);
5609}
5610#endif /* DDB */
int * count
Definition: cpufreq_if.m:63
TAILQ_HEAD(note_info_list, note_info)
int bootverbose
Definition: init_main.c:131
void kthread_exit(void)
Definition: kern_kthread.c:328
void kproc_start(const void *udata)
Definition: kern_kthread.c:62
int kthread_add(void(*func)(void *), void *arg, struct proc *p, struct thread **newtdp, int flags, int pages, const char *fmt,...)
Definition: kern_kthread.c:255
void *() malloc(size_t size, struct malloc_type *mtp, int flags)
Definition: kern_malloc.c:632
void free(void *addr, struct malloc_type *mtp)
Definition: kern_malloc.c:907
struct mtx_pool __read_mostly * mtxpool_sleep
Definition: kern_mtxpool.c:84
struct mtx * mtx_pool_find(struct mtx_pool *pool, void *ptr)
Definition: kern_mtxpool.c:101
struct ucred * crhold(struct ucred *cr)
Definition: kern_prot.c:2014
void crfree(struct ucred *cr)
Definition: kern_prot.c:2035
int __read_mostly dumping
static int show_busybufs
void panic(const char *fmt,...)
void kern_yield(int prio)
Definition: kern_synch.c:660
void mi_switch(int flags)
Definition: kern_synch.c:491
void maybe_yield(void)
Definition: kern_synch.c:652
void wakeup(const void *ident)
Definition: kern_synch.c:349
int sysctl_handle_long(SYSCTL_HANDLER_ARGS)
Definition: kern_sysctl.c:1700
int sysctl_handle_int(SYSCTL_HANDLER_ARGS)
Definition: kern_sysctl.c:1644
void *** start
Definition: linker_if.m:98
caddr_t value
Definition: linker_if.m:63
struct iommu_domain ** domain
Definition: msi_if.m:96
uint64_t * addr
Definition: msi_if.m:89
long bd_maxbufspace
Definition: vfs_bio.c:125
int bd_wanted
Definition: vfs_bio.c:136
int bd_lodirtybuffers
Definition: vfs_bio.c:132
long bd_hibufspace
Definition: vfs_bio.c:126
bool bd_shutdown
Definition: vfs_bio.c:137
struct mtx_padalign bd_run_lock
Definition: vfs_bio.c:123
struct bufqueue * bd_cleanq
Definition: vfs_bio.c:122
int bd_lofreebuffers
Definition: vfs_bio.c:130
struct bufqueue bd_dirtyq
Definition: vfs_bio.c:121
struct bufqueue bd_subq[MAXCPU+1]
Definition: vfs_bio.c:120
long bd_bufspacethresh
Definition: vfs_bio.c:128
int bd_lim
Definition: vfs_bio.c:134
long bd_lobufspace
Definition: vfs_bio.c:127
int bd_hidirtybuffers
Definition: vfs_bio.c:131
int bd_hifreebuffers
Definition: vfs_bio.c:129
int bd_dirtybufthresh
Definition: vfs_bio.c:133
struct mtx_padalign bq_lock
Definition: vfs_bio.c:107
int mask
Definition: subr_acl_nfs4.c:70
static bool kasan_enabled __read_mostly
Definition: subr_asan.c:95
counter_u64_t counter_u64_alloc(int flags)
Definition: subr_counter.c:61
void devstat_end_transaction_bio(struct devstat *ds, const struct bio *bp)
Definition: subr_devstat.c:348
int nswbuf
Definition: subr_param.c:98
u_long maxbcache
Definition: subr_param.c:101
int bio_transient_maxcnt
Definition: subr_param.c:96
int hz
Definition: subr_param.c:85
int nbuf
Definition: subr_param.c:95
u_long maxphys
Definition: subr_param.c:103
int printf(const char *fmt,...)
Definition: subr_prf.c:397
u_int mp_maxid
Definition: subr_smp.c:77
int mp_ncpus
Definition: subr_smp.c:72
uint16_t flags
Definition: subr_stats.c:2
int vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
Definition: subr_vmem.c:1326
void vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn)
Definition: subr_vmem.c:1224
void vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
Definition: subr_vmem.c:1466
struct mtx mtx
Definition: uipc_ktls.c:0
int bd_dirtybufthresh
Definition: vfs_bio.c:13
struct bufdomainset bdlodirty
Definition: vfs_bio.c:389
#define BUF_CHECK_UNMAPPED(bp)
Definition: vfs_bio.c:1357
void biofinish(struct bio *bp, struct devstat *stat, int error)
Definition: vfs_bio.c:4476
static void vfs_vmio_iodone(struct buf *bp)
Definition: vfs_bio.c:2935
struct bufdomainset bdhidirty
Definition: vfs_bio.c:390
static void bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
Definition: vfs_bio.c:3841
#define BQ_ASSERT_LOCKED(bq)
Definition: vfs_bio.c:117
int dirtybufthresh
Definition: vfs_bio.c:280
static int flushbufqtarget
Definition: vfs_bio.c:313
int bufsync(struct bufobj *bo, int waitfor)
Definition: vfs_bio.c:5101
static struct bufqueue * bufqueue(struct buf *bp)
Definition: vfs_bio.c:1524
long hibufspace
Definition: vfs_bio.c:230
void runningbufwakeup(struct buf *bp)
Definition: vfs_bio.c:915
static int sysctl_bufdomain_long(SYSCTL_HANDLER_ARGS)
Definition: vfs_bio.c:452
long bd_lobufspace
Definition: vfs_bio.c:7
static void bd_init(struct bufdomain *bd)
Definition: vfs_bio.c:1910
static void bq_init(struct bufqueue *bq, int qindex, int cpu, const char *lockname)
Definition: vfs_bio.c:1899
void bufstrategy(struct bufobj *bo, struct buf *bp)
Definition: vfs_bio.c:5108
void bufdone(struct buf *bp)
Definition: vfs_bio.c:4542
static int runningbufreq
Definition: vfs_bio.c:370
static int bufkva_alloc(struct buf *bp, int maxsize, int gbflags)
Definition: vfs_bio.c:2078
static struct bufqueue * bufqueue_acquire(struct buf *bp)
Definition: vfs_bio.c:1548
#define BUF_DOMAINS
Definition: vfs_bio.c:387
bool inmem(struct vnode *vp, daddr_t blkno)
Definition: vfs_bio.c:3693
#define BD_LOCK(bd)
Definition: vfs_bio.c:145
void bufobj_wrefl(struct bufobj *bo)
Definition: vfs_bio.c:5138
uma_zone_t buf_zone
Definition: vfs_bio.c:402
#define BD_LOCKPTR(bd)
Definition: vfs_bio.c:144
void bufbdflush(struct bufobj *bo, struct buf *bp)
Definition: vfs_bio.c:2376
#define BUF_CHECK_MAPPED(bp)
Definition: vfs_bio.c:1356
static int lofreebuffers
Definition: vfs_bio.c:288
static char * buf
Definition: vfs_bio.c:153
caddr_t __read_mostly unmapped_buf
Definition: vfs_bio.c:161
static counter_u64_t numbufallocfails
Definition: vfs_bio.c:310
static void bpmap_qenter(struct buf *bp)
Definition: vfs_bio.c:1501
void bremfree(struct buf *bp)
Definition: vfs_bio.c:1869
static int bufspace_reserve(struct bufdomain *bd, int size, bool metadata)
Definition: vfs_bio.c:668
static int buf_import(void *, void **, int, int, int)
Definition: vfs_bio.c:1650
static void bd_set(struct bufdomain *bd)
Definition: vfs_bio.c:557
int inflight_transient_maps
void biodone(struct bio *bp)
Definition: vfs_bio.c:4412
static int buf_recycle(struct bufdomain *, bool kva)
Definition: vfs_bio.c:1774
#define QUEUE_CLEAN
Definition: vfs_bio.c:383
static void bd_clear(struct bufdomain *bd)
Definition: vfs_bio.c:539
int bufwait(struct buf *bp)
Definition: vfs_bio.c:4505
long runningbufspace
Definition: vfs_bio.c:206
static void bufspace_adjust(struct buf *bp, int bufsize)
Definition: vfs_bio.c:639
static void buf_daemon_shutdown(void *arg __unused, int howto __unused)
Definition: vfs_bio.c:3415
int bbarrierwrite(struct buf *bp)
Definition: vfs_bio.c:2614
static void bufspace_daemon(void *arg)
Definition: vfs_bio.c:786
int breadn_flags(struct vnode *vp, daddr_t blkno, daddr_t dblkno, int size, daddr_t *rablkno, int *rabsize, int cnt, struct ucred *cred, int flags, void(*ckhashfunc)(struct buf *), struct buf **bpp)
Definition: vfs_bio.c:2203
static struct mtx_padalign __exclusive_cache_line rbreqlock
Definition: vfs_bio.c:338
static long lobufspace
Definition: vfs_bio.c:225
static int getnewbuf_kva(struct buf *bp, int gbflags, int maxsize)
Definition: vfs_bio.c:3297
static int lodirtybuffers
Definition: vfs_bio.c:270
void bufobj_init(struct bufobj *bo, void *private)
Definition: vfs_bio.c:5125
static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
Definition: vfs_bio.c:4676
struct bufqueue bd_dirtyq
Definition: vfs_bio.c:1
void waitrunningbufspace(void)
Definition: vfs_bio.c:950
long bd_maxbufspace
Definition: vfs_bio.c:5
#define QUEUE_NONE
Definition: vfs_bio.c:380
static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
Definition: vfs_bio.c:4647
static int buf_flush(struct vnode *vp, struct bufdomain *, int)
Definition: vfs_bio.c:3396
int maxbcachebuf
Definition: vfs_bio.c:325
static struct mtx_padalign __exclusive_cache_line bdlock
Definition: vfs_bio.c:332
#define QUEUE_DIRTY
Definition: vfs_bio.c:382
static void bdirtysub(struct buf *bp)
Definition: vfs_bio.c:575
static void vfs_vmio_invalidate(struct buf *bp)
Definition: vfs_bio.c:3006
void bufshutdown(int show_busybufs)
Definition: vfs_bio.c:1373
#define BD_RUN_UNLOCK(bd)
Definition: vfs_bio.c:150
#define BD_DOMAIN(bd)
Definition: vfs_bio.c:151
int bufwrite(struct buf *bp)
Definition: vfs_bio.c:2286
static struct buf * getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int maxsize, int gbflags)
Definition: vfs_bio.c:3330
struct bufqueue bd_subq[MAXCPU+1]
Definition: vfs_bio.c:0
static int sysctl_bufdomain_int(SYSCTL_HANDLER_ARGS)
Definition: vfs_bio.c:433
static counter_u64_t getnewbufrestarts
Definition: vfs_bio.c:301
#define QUEUE_SENTINEL
Definition: vfs_bio.c:384
static int hifreebuffers
Definition: vfs_bio.c:293
int bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
Definition: vfs_bio.c:5171
static int bd_speedupreq
Definition: vfs_bio.c:363
static void bufkva_reclaim(vmem_t *, int)
Definition: vfs_bio.c:2120
void bdone(struct buf *bp)
Definition: vfs_bio.c:5077
int vmapbuf(struct buf *bp, void *uaddr, size_t len, int mapbuf)
Definition: vfs_bio.c:5033
static int isbufbusy(struct buf *bp)
Definition: vfs_bio.c:1361
static int sysctl_runningspace(SYSCTL_HANDLER_ARGS)
Definition: vfs_bio.c:405
SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD, NULL, 0, sysctl_bufspace, "L", "Physical memory used for buffers")
static struct buf * nbufp(unsigned i)
Definition: vfs_bio.c:155
void bdirty(struct buf *bp)
Definition: vfs_bio.c:2521
static void bdirtywakeup(void)
Definition: vfs_bio.c:522
BITSET_DEFINE(bufdomainset, BUF_DOMAINS)
void vfs_bio_set_flags(struct buf *bp, int ioflag)
Definition: vfs_bio.c:4953
void babarrierwrite(struct buf *bp)
Definition: vfs_bio.c:2597
static void binsfree(struct buf *bp, int qindex)
Definition: vfs_bio.c:1577
void vfs_unbusy_pages(struct buf *bp)
Definition: vfs_bio.c:4608
void bd_speedup(void)
Definition: vfs_bio.c:1025
bool bd_shutdown
Definition: vfs_bio.c:17
int vmiodirenable
Definition: vfs_bio.c:203
struct proc * bufdaemonproc
Definition: vfs_bio.c:164
static int sysctl_numdirtybuffers(SYSCTL_HANDLER_ARGS)
Definition: vfs_bio.c:505
static counter_u64_t buffreekvacnt
Definition: vfs_bio.c:240
int bd_hidirtybuffers
Definition: vfs_bio.c:11
void bufobj_wdrop(struct bufobj *bo)
Definition: vfs_bio.c:5157
static int buf_pager_relbuf
Definition: vfs_bio.c:5228
void bawrite(struct buf *bp)
Definition: vfs_bio.c:2580
static void vfs_setdirty_range(struct buf *bp)
Definition: vfs_bio.c:3784
static void vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
Definition: vfs_bio.c:4965
void vunmapbuf(struct buf *bp)
Definition: vfs_bio.c:5064
#define BQ_LOCKPTR(bq)
Definition: vfs_bio.c:114
struct buf * incore(struct bufobj *bo, daddr_t blkno)
Definition: vfs_bio.c:3682
struct bufqueue __exclusive_cache_line bqempty
Definition: vfs_bio.c:397
void bufinit(void)
Definition: vfs_bio.c:1190
static int bdirtywait
Definition: vfs_bio.c:375
static int hidirtybuffers
Definition: vfs_bio.c:275
void vfs_bio_bzero_buf(struct buf *bp, int base, int size)
Definition: vfs_bio.c:4895
static int bd_flushall(struct bufdomain *bd)
Definition: vfs_bio.c:1978
#define BQ_LOCK(bq)
Definition: vfs_bio.c:115
struct bio_ops bioops
Definition: vfs_bio.c:96
#define QUEUE_EMPTY
Definition: vfs_bio.c:381
__FBSDID("$FreeBSD$")
SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0, "Use the VM system for directory writes")
static __inline void bd_wakeup(void)
Definition: vfs_bio.c:987
void vfs_busy_pages_acquire(struct buf *bp)
Definition: vfs_bio.c:4708
int biowait(struct bio *bp, const char *wmesg)
Definition: vfs_bio.c:4459
int allocbuf(struct buf *bp, int size)
Definition: vfs_bio.c:4359
static __inline void vfs_buf_test_cache(struct buf *bp, vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, vm_page_t m)
Definition: vfs_bio.c:969
static void buf_free(struct buf *bp)
Definition: vfs_bio.c:1617
static int recursiveflushes
Definition: vfs_bio.c:263
struct buf_ops buf_ops_bio
Definition: vfs_bio.c:98
struct buf * getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo, int flags)
Definition: vfs_bio.c:3896
static void bd_flush(struct bufdomain *bd, struct bufqueue *bq)
Definition: vfs_bio.c:1953
static void vfs_vmio_truncate(struct buf *bp, int npages)
Definition: vfs_bio.c:3059
static void breada(struct vnode *, daddr_t *, int *, int, struct ucred *, int, void(*)(struct buf *))
Definition: vfs_bio.c:2143
int bd_hifreebuffers
Definition: vfs_bio.c:9
static void bq_remove(struct bufqueue *bq, struct buf *bp)
Definition: vfs_bio.c:1930
void bufobj_wref(struct bufobj *bo)
Definition: vfs_bio.c:5147
static void bufspace_release(struct bufdomain *bd, int size)
Definition: vfs_bio.c:697
struct bufdomain __exclusive_cache_line bdomain[BUF_DOMAINS]
Definition: vfs_bio.c:396
struct bufqueue __aligned(CACHE_LINE_SIZE)
static int __read_mostly buf_domains
Definition: vfs_bio.c:393
void vfs_busy_pages(struct buf *bp, int clear_modify)
Definition: vfs_bio.c:4738
static void runningwakeup(void)
Definition: vfs_bio.c:898
static void bufkva_free(struct buf *)
Definition: vfs_bio.c:2049
static struct mtx_padalign __exclusive_cache_line bdirtylock
Definition: vfs_bio.c:343
void bdata2bio(struct buf *bp, struct bio *bip)
Definition: vfs_bio.c:5192
#define BD_RUN_LOCK(bd)
Definition: vfs_bio.c:149
int altbufferflushes
Definition: vfs_bio.c:260
int bd_lodirtybuffers
Definition: vfs_bio.c:12
static void b_io_dismiss(struct buf *bp, int ioflag, bool release)
Definition: vfs_bio.c:4925
void vfs_bio_clrbuf(struct buf *bp)
Definition: vfs_bio.c:4853
void bundirty(struct buf *bp)
Definition: vfs_bio.c:2551
long bufspacethresh
Definition: vfs_bio.c:235
static long maxbufmallocspace
Definition: vfs_bio.c:222
long bd_hibufspace
Definition: vfs_bio.c:6
int getblkx(struct vnode *vp, daddr_t blkno, daddr_t dblkno, int size, int slpflag, int slptimeo, int flags, struct buf **bpp)
Definition: vfs_bio.c:3952
static struct kproc_desc buf_kp
Definition: vfs_bio.c:3388
void bremfreef(struct buf *bp)
Definition: vfs_bio.c:1889
static long bufmallocspace
Definition: vfs_bio.c:219
SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0, "Amount of presently outstanding async buffer io")
static int flushwithdeps
Definition: vfs_bio.c:3539
static const char buf_wmesg[]
Definition: vfs_bio.c:1186
static counter_u64_t notbufdflushes
Definition: vfs_bio.c:316
static int bd_request
Definition: vfs_bio.c:355
static long hirunningspace
Definition: vfs_bio.c:250
static counter_u64_t getnewbufcalls
Definition: vfs_bio.c:298
void bwait(struct buf *bp, u_char pri, const char *wchan)
Definition: vfs_bio.c:5089
static void bufmallocadjust(struct buf *bp, int bufsize)
Definition: vfs_bio.c:877
int bdwriteskip
Definition: vfs_bio.c:257
static counter_u64_t mappingrestarts
Definition: vfs_bio.c:305
struct buf * geteblk(int size, int flags)
Definition: vfs_bio.c:4253
static struct bufdomain * bufdomain(struct buf *)
Definition: vfs_bio.c:1517
static int flushbufqueues(struct vnode *, struct bufdomain *, int, int)
Definition: vfs_bio.c:3545
static counter_u64_t bufdefragcnt
Definition: vfs_bio.c:243
void vfs_bio_set_valid(struct buf *bp, int base, int size)
Definition: vfs_bio.c:4805
static void buf_daemon(void)
Definition: vfs_bio.c:3430
#define BD_UNLOCK(bd)
Definition: vfs_bio.c:146
static void buf_release(void *, void **, int)
Definition: vfs_bio.c:1674
#define BQ_UNLOCK(bq)
Definition: vfs_bio.c:116
void vfs_busy_pages_release(struct buf *bp)
Definition: vfs_bio.c:4717
static void maxbcachebuf_adjust(void)
Definition: vfs_bio.c:1002
static void bufspace_daemon_wakeup(struct bufdomain *bd)
Definition: vfs_bio.c:618
int dirtybufferflushes
Definition: vfs_bio.c:254
static long lorunningspace
Definition: vfs_bio.c:246
#define BD_RUN_LOCKPTR(bd)
Definition: vfs_bio.c:148
caddr_t kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
Definition: vfs_bio.c:1053
void bwillwrite(void)
Definition: vfs_bio.c:2631
static void bufspace_daemon_shutdown(void *arg, int howto __unused)
Definition: vfs_bio.c:763
static void bq_insert(struct bufqueue *bq, struct buf *bp, bool unlock)
Definition: vfs_bio.c:2001
static struct bio_queue nondump_bios
Definition: vfs_bio.c:4409
void bqrelse(struct buf *bp)
Definition: vfs_bio.c:2872
static long maxbufspace
Definition: vfs_bio.c:214
void bdwrite(struct buf *bp)
Definition: vfs_bio.c:2428
void vfs_bio_brelse(struct buf *bp, int ioflag)
Definition: vfs_bio.c:4946
static void vfs_clean_pages_dirty_buf(struct buf *bp)
Definition: vfs_bio.c:3755
static void vfs_nonvmio_extend(struct buf *bp, int newbsize)
Definition: vfs_bio.c:4297
static void vfs_vmio_extend(struct buf *bp, int npages, int size)
Definition: vfs_bio.c:3104
int vfs_bio_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind, int *rahead, vbg_get_lblkno_t get_lblkno, vbg_get_blksize_t get_blksize)
Definition: vfs_bio.c:5253
static void bufspace_wait(struct bufdomain *bd, struct vnode *vp, int gbflags, int slpflag, int slptimeo)
Definition: vfs_bio.c:711
static struct buf * buf_alloc(struct bufdomain *bd)
Definition: vfs_bio.c:1699
static int numfreebuffers
Definition: vfs_bio.c:285
SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp)
int vfs_bio_awrite(struct buf *bp)
Definition: vfs_bio.c:3229
int bd_lofreebuffers
Definition: vfs_bio.c:10
static long barrierwrites
Definition: vfs_bio.c:319
void brelse(struct buf *bp)
Definition: vfs_bio.c:2663
int buf_dirty_count_severe(void)
Definition: vfs_bio.c:2649
static counter_u64_t bufkvaspace
Definition: vfs_bio.c:211
SYSCTL_COUNTER_U64(_vfs, OID_AUTO, bufkvaspace, CTLFLAG_RD, &bufkvaspace, "Kernel virtual memory used for buffers")
static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer")
static int sysctl_bufspace(SYSCTL_HANDLER_ARGS)
Definition: vfs_bio.c:492
static void vfs_nonvmio_truncate(struct buf *bp, int newbsize)
Definition: vfs_bio.c:4274
long bd_bufspacethresh
Definition: vfs_bio.c:8
static void vm_hold_free_pages(struct buf *bp, int newbsize)
Definition: vfs_bio.c:4997
static int vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
Definition: vfs_bio.c:3186
#define TRANSIENT_DENOM
Definition: vfs_bio.c:1043
static void bdirtyadd(struct buf *bp)
Definition: vfs_bio.c:595
int cluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len, int gbflags)
Definition: vfs_cluster.c:811
struct mntlist mountlist
Definition: vfs_mount.c:121
struct buf * gbincore_unlocked(struct bufobj *bo, daddr_t lblkno)
Definition: vfs_subr.c:2486
void bgetvp(struct vnode *vp, struct buf *bp)
Definition: vfs_subr.c:2501
void vfs_unmountall(void)
Definition: vfs_subr.c:4768
void vn_printf(struct vnode *vp, const char *fmt,...)
Definition: vfs_subr.c:4134
void brelvp(struct buf *bp)
Definition: vfs_subr.c:2526
bool vn_isdisk(struct vnode *vp)
Definition: vfs_subr.c:5215
void reassignbuf(struct buf *bp)
Definition: vfs_subr.c:2855
struct buf * gbincore(struct bufobj *bo, daddr_t lblkno)
Definition: vfs_subr.c:2468
int kern_sync(struct thread *td)
Definition: vfs_syscalls.c:139
int vn_start_write(struct vnode *vp, struct mount **mpp, int flags)
Definition: vfs_vnops.c:1901
void vn_finished_write(struct mount *mp)
Definition: vfs_vnops.c:2009