45#include <sys/devctl.h>
46#include <sys/eventhandler.h>
49#include <sys/kernel.h>
51#include <sys/libkern.h>
52#include <sys/limits.h>
53#include <sys/malloc.h>
59#include <sys/filedesc.h>
60#include <sys/reboot.h>
62#include <sys/syscallsubr.h>
63#include <sys/sysproto.h>
65#include <sys/sysctl.h>
66#include <sys/sysent.h>
68#include <sys/taskqueue.h>
74#include <machine/stdarg.h>
76#include <security/audit/audit.h>
77#include <security/mac/mac_framework.h>
79#define VFS_MOUNTARG_SIZE_MAX (1024 * 64)
81static int vfs_domount(
struct thread *td,
const char *fstype,
char *fspath,
82 uint64_t fsflags,
struct vfsoptlist **optlist);
87 "Unprivileged users may mount and unmount file systems");
91 "Retry failed r/w mount as r/o if no explicit ro/rw option is specified");
96 " when a file system is forcibly unmounted");
99 CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"deferred unmount controls");
102SYSCTL_UINT(_vfs_deferred_unmount, OID_AUTO, retry_limit, CTLFLAG_RW,
104 "Maximum number of retries for deferred unmount failure");
107SYSCTL_INT(_vfs_deferred_unmount, OID_AUTO, retry_delay_hz, CTLFLAG_RW,
109 "Delay in units of [1/kern.hz]s when retrying a failed deferred unmount");
112SYSCTL_INT(_vfs_deferred_unmount, OID_AUTO, total_retries, CTLFLAG_RD,
114 "Total number of retried deferred unmounts");
135 STAILQ_HEAD_INITIALIZER(deferred_unmount_list);
143static const char *global_opts[] = {
159 mp = (
struct mount *)mem;
160 mtx_init(&mp->mnt_mtx,
"struct mount mtx", NULL, MTX_DEF);
161 mtx_init(&mp->mnt_listmtx,
"struct mount vlist mtx", NULL, MTX_DEF);
162 lockinit(&mp->mnt_explock, PVFS,
"explock", 0, 0);
163 mp->mnt_pcpu = uma_zalloc_pcpu(
pcpu_zone_16, M_WAITOK | M_ZERO);
166 mp->mnt_rootvnode = NULL;
175 mp = (
struct mount *)mem;
178 mtx_destroy(&mp->mnt_listmtx);
179 mtx_destroy(&mp->mnt_mtx);
188 mount_zone = uma_zcreate(
"Mountpoints",
sizeof(
struct mount), NULL,
204 TAILQ_REMOVE(opts, opt, link);
205 free(opt->name, M_MOUNT);
206 if (opt->value != NULL)
207 free(opt->value, M_MOUNT);
217 while (!TAILQ_EMPTY(opts)) {
218 opt = TAILQ_FIRST(opts);
227 struct vfsopt *opt, *temp;
231 TAILQ_FOREACH_SAFE(opt, opts, link, temp) {
232 if (strcmp(opt->name,
name) == 0)
241 if (strcmp(opt,
"ro") == 0 || strcmp(opt,
"rdonly") == 0 ||
242 strcmp(opt,
"norw") == 0)
251 if (strcmp(opt,
"rw") == 0 || strcmp(opt,
"noro") == 0)
265 if (strcmp(opt1, opt2) == 0)
268 if (strncmp(opt1,
"no", 2) == 0 && strcmp(opt1 + 2, opt2) == 0)
271 if (strncmp(opt2,
"no", 2) == 0 && strcmp(opt1, opt2 + 2) == 0)
273 while ((p = strchr(opt1,
'.')) != NULL &&
274 !strncmp(opt1, opt2, ++p - opt1)) {
278 if (strncmp(opt1,
"no", 2) == 0 && strcmp(opt1 + 2, opt2) == 0)
281 if (strncmp(opt2,
"no", 2) == 0 && strcmp(opt1, opt2 + 2) == 0)
299 struct vfsopt *opt, *opt2, *tmp;
301 TAILQ_FOREACH_REVERSE(opt, opts, vfsoptlist, link) {
302 opt2 = TAILQ_PREV(opt, vfsoptlist, link);
303 while (opt2 != NULL) {
305 tmp = TAILQ_PREV(opt2, vfsoptlist, link);
309 opt2 = TAILQ_PREV(opt2, vfsoptlist, link);
321 struct vfsoptlist *opts;
323 size_t memused, namelen, optlen;
324 unsigned int i, iovcnt;
327 opts =
malloc(
sizeof(
struct vfsoptlist), M_MOUNT, M_WAITOK);
330 iovcnt = auio->uio_iovcnt;
331 for (i = 0; i < iovcnt; i += 2) {
332 namelen = auio->uio_iov[i].iov_len;
333 optlen = auio->uio_iov[i + 1].iov_len;
334 memused +=
sizeof(
struct vfsopt) + optlen + namelen;
346 opt =
malloc(
sizeof(
struct vfsopt), M_MOUNT, M_WAITOK);
347 opt->name =
malloc(namelen, M_MOUNT, M_WAITOK);
357 TAILQ_INSERT_TAIL(opts, opt, link);
359 if (auio->uio_segflg == UIO_SYSSPACE) {
360 bcopy(auio->uio_iov[i].iov_base, opt->name, namelen);
362 error = copyin(auio->uio_iov[i].iov_base, opt->name,
368 if (namelen == 0 || opt->name[namelen - 1] !=
'\0') {
374 opt->value =
malloc(optlen, M_MOUNT, M_WAITOK);
375 if (auio->uio_segflg == UIO_SYSSPACE) {
376 bcopy(auio->uio_iov[i + 1].iov_base, opt->value,
379 error = copyin(auio->uio_iov[i + 1].iov_base,
405 struct vfsopt *opt, *
new;
407 TAILQ_FOREACH(opt, oldopts, link) {
408 new =
malloc(
sizeof(
struct vfsopt), M_MOUNT, M_WAITOK);
409 new->name = strdup(opt->name, M_MOUNT);
411 new->value =
malloc(opt->len, M_MOUNT, M_WAITOK);
412 bcopy(opt->value, new->value, opt->len);
416 new->seen = opt->seen;
417 TAILQ_INSERT_HEAD(toopts,
new, link);
425#ifndef _SYS_SYSPROTO_H_
447 AUDIT_ARG_FFLAGS(
flags);
448 CTR4(KTR_VFS,
"%s: iovp %p with iovcnt %d and flags %d", __func__,
458 flags &= ~MNT_ROOTFS;
465 if ((iovcnt & 1) || (iovcnt < 4)) {
466 CTR2(KTR_VFS,
"%s: failed for invalid iovcnt %d", __func__,
473 CTR2(KTR_VFS,
"%s: failed for invalid uio op with %d errno",
499 struct mount_pcpu *mpcpu;
501 mp = atomic_load_ptr(&vp->v_mount);
502 if (__predict_false(mp == NULL)) {
505 if (vfs_op_thread_enter(mp, mpcpu)) {
506 if (__predict_true(mp == vp->v_mount)) {
507 vfs_mp_count_add_pcpu(mpcpu, ref, 1);
508 vfs_op_thread_exit(mp, mpcpu);
510 vfs_op_thread_exit(mp, mpcpu);
515 if (mp == vp->v_mount) {
529 struct mount_pcpu *mpcpu;
531 CTR2(KTR_VFS,
"%s: mp %p", __func__, mp);
532 if (vfs_op_thread_enter(mp, mpcpu)) {
533 vfs_mp_count_add_pcpu(mpcpu, ref, 1);
534 vfs_op_thread_exit(mp, mpcpu);
557 struct mount_upper_node *upper)
561 mp = atomic_load_ptr(&vp->v_mount);
565 if (mp != vp->v_mount ||
566 ((mp->mnt_kern_flag & (MNTK_UNMOUNT | MNTK_RECURSE)) != 0)) {
570 KASSERT(ump != mp, (
"upper and lower mounts are identical"));
573 TAILQ_INSERT_TAIL(&mp->mnt_uppers, upper, mnt_upper_link);
590 struct mount_upper_node *upper)
594 TAILQ_INSERT_TAIL(&mp->mnt_notify, upper, mnt_upper_link);
601 mtx_assert(MNT_MTX(mp), MA_OWNED);
602 while (mp->mnt_upper_pending != 0) {
603 mp->mnt_kern_flag |= MNTK_UPPER_WAITER;
604 msleep(&mp->mnt_uppers, MNT_MTX(mp), 0,
"mntupw", 0);
615 struct mount_upper_node *upper)
619 TAILQ_REMOVE(&mp->mnt_notify, upper, mnt_upper_link);
631 KASSERT((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0,
632 (
"registered upper with pending unmount"));
634 TAILQ_REMOVE(&mp->mnt_uppers, upper, mnt_upper_link);
635 if ((mp->mnt_kern_flag & MNTK_TASKQUEUE_WAITER) != 0 &&
636 TAILQ_EMPTY(&mp->mnt_uppers)) {
637 mp->mnt_kern_flag &= ~MNTK_TASKQUEUE_WAITER;
638 wakeup(&mp->mnt_taskqueue_link);
647 struct mount_pcpu *mpcpu;
649 CTR2(KTR_VFS,
"%s: mp %p", __func__, mp);
650 if (vfs_op_thread_enter(mp, mpcpu)) {
651 vfs_mp_count_sub_pcpu(mpcpu, ref, 1);
652 vfs_op_thread_exit(mp, mpcpu);
671 bzero(&mp->mnt_startzero,
672 __rangeof(
struct mount, mnt_startzero, mnt_endzero));
673 mp->mnt_kern_flag = 0;
675 mp->mnt_rootvnode = NULL;
676 mp->mnt_vnodecovered = NULL;
679 TAILQ_INIT(&mp->mnt_nvnodelist);
680 mp->mnt_nvnodelistsize = 0;
681 TAILQ_INIT(&mp->mnt_lazyvnodelist);
682 mp->mnt_lazyvnodelistsize = 0;
683 if (mp->mnt_ref != 0 || mp->mnt_lockref != 0 ||
684 mp->mnt_writeopcount != 0)
685 panic(
"%s: non-zero counters on new mp %p\n", __func__, mp);
686 if (mp->mnt_vfs_ops != 1)
687 panic(
"%s: vfs_ops should be 1 but %d found\n", __func__,
690 atomic_add_acq_int(&vfsp->vfc_refcount, 1);
691 mp->mnt_op = vfsp->vfc_vfsops;
693 mp->mnt_stat.f_type = vfsp->vfc_typenum;
695 strlcpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
696 mp->mnt_vnodecovered = vp;
697 mp->mnt_cred =
crdup(cred);
698 mp->mnt_stat.f_owner = cred->cr_uid;
699 strlcpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN);
700 mp->mnt_iosize_max = DFLTPHYS;
703 mac_mount_create(cred, mp);
705 arc4rand(&mp->mnt_hashseed,
sizeof mp->mnt_hashseed, 0);
706 mp->mnt_upper_pending = 0;
707 TAILQ_INIT(&mp->mnt_uppers);
708 TAILQ_INIT(&mp->mnt_notify);
709 mp->mnt_taskqueue_flags = 0;
710 mp->mnt_unmount_retries = 0;
721 if (mp->mnt_vfs_ops == 0)
722 panic(
"%s: entered with zero vfs_ops\n", __func__);
724 vfs_assert_mount_counters(mp);
727 mp->mnt_kern_flag |= MNTK_REFEXPIRE;
728 if (mp->mnt_kern_flag & MNTK_MWAIT) {
729 mp->mnt_kern_flag &= ~MNTK_MWAIT;
733 msleep(mp, MNT_MTX(mp), PVFS,
"mntref", 0);
734 KASSERT(mp->mnt_ref == 0,
735 (
"%s: invalid refcount in the drain path @ %s:%d", __func__,
736 __FILE__, __LINE__));
737 if (mp->mnt_writeopcount != 0)
738 panic(
"vfs_mount_destroy: nonzero writeopcount");
739 if (mp->mnt_secondary_writes != 0)
740 panic(
"vfs_mount_destroy: nonzero secondary_writes");
741 atomic_subtract_rel_int(&mp->mnt_vfc->vfc_refcount, 1);
742 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) {
745 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes)
747 panic(
"unmount: dangling vnode");
749 KASSERT(mp->mnt_upper_pending == 0, (
"mnt_upper_pending"));
750 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), (
"mnt_uppers"));
751 KASSERT(TAILQ_EMPTY(&mp->mnt_notify), (
"mnt_notify"));
752 if (mp->mnt_nvnodelistsize != 0)
753 panic(
"vfs_mount_destroy: nonzero nvnodelistsize");
754 if (mp->mnt_lazyvnodelistsize != 0)
755 panic(
"vfs_mount_destroy: nonzero lazyvnodelistsize");
756 if (mp->mnt_lockref != 0)
757 panic(
"vfs_mount_destroy: nonzero lock refcount");
760 if (mp->mnt_vfs_ops != 1)
761 panic(
"%s: vfs_ops should be 1 but %d found\n", __func__,
764 if (mp->mnt_rootvnode != NULL)
765 panic(
"%s: mount point still has a root vnode %p\n", __func__,
768 if (mp->mnt_vnodecovered != NULL)
769 vrele(mp->mnt_vnodecovered);
771 mac_mount_destroy(mp);
773 if (mp->mnt_opt != NULL)
783 if ((fsflags & MNT_UPDATE) != 0)
786 if ((fsflags & MNT_RDONLY) != 0)
805vfs_donmount(
struct thread *td, uint64_t fsflags,
struct uio *fsoptions)
807 struct vfsoptlist *optlist;
808 struct vfsopt *opt, *tmp_opt;
809 char *fstype, *fspath, *errmsg;
810 int error, fstypelen, fspathlen, errmsg_len, errmsg_pos;
813 errmsg = fspath = NULL;
814 errmsg_len = fspathlen = 0;
822 if (
vfs_getopt(optlist,
"errmsg", (
void **)&errmsg, &errmsg_len) == 0)
831 error =
vfs_getopt(optlist,
"fstype", (
void **)&fstype, &fstypelen);
832 if (error || fstypelen <= 0 || fstype[fstypelen - 1] !=
'\0') {
835 strncpy(errmsg,
"Invalid fstype", errmsg_len);
839 error =
vfs_getopt(optlist,
"fspath", (
void **)&fspath, &fspathlen);
840 if (error || fspathlen <= 0 || fspath[fspathlen - 1] !=
'\0') {
843 strncpy(errmsg,
"Invalid fspath", errmsg_len);
853 TAILQ_FOREACH_SAFE(opt, optlist, link, tmp_opt) {
856 if (strcmp(opt->name,
"update") == 0) {
857 fsflags |= MNT_UPDATE;
860 else if (strcmp(opt->name,
"async") == 0)
861 fsflags |= MNT_ASYNC;
862 else if (strcmp(opt->name,
"force") == 0) {
863 fsflags |= MNT_FORCE;
866 else if (strcmp(opt->name,
"reload") == 0) {
867 fsflags |= MNT_RELOAD;
870 else if (strcmp(opt->name,
"multilabel") == 0)
871 fsflags |= MNT_MULTILABEL;
872 else if (strcmp(opt->name,
"noasync") == 0)
873 fsflags &= ~MNT_ASYNC;
874 else if (strcmp(opt->name,
"noatime") == 0)
875 fsflags |= MNT_NOATIME;
876 else if (strcmp(opt->name,
"atime") == 0) {
877 free(opt->name, M_MOUNT);
878 opt->name = strdup(
"nonoatime", M_MOUNT);
880 else if (strcmp(opt->name,
"noclusterr") == 0)
881 fsflags |= MNT_NOCLUSTERR;
882 else if (strcmp(opt->name,
"clusterr") == 0) {
883 free(opt->name, M_MOUNT);
884 opt->name = strdup(
"nonoclusterr", M_MOUNT);
886 else if (strcmp(opt->name,
"noclusterw") == 0)
887 fsflags |= MNT_NOCLUSTERW;
888 else if (strcmp(opt->name,
"clusterw") == 0) {
889 free(opt->name, M_MOUNT);
890 opt->name = strdup(
"nonoclusterw", M_MOUNT);
892 else if (strcmp(opt->name,
"noexec") == 0)
893 fsflags |= MNT_NOEXEC;
894 else if (strcmp(opt->name,
"exec") == 0) {
895 free(opt->name, M_MOUNT);
896 opt->name = strdup(
"nonoexec", M_MOUNT);
898 else if (strcmp(opt->name,
"nosuid") == 0)
899 fsflags |= MNT_NOSUID;
900 else if (strcmp(opt->name,
"suid") == 0) {
901 free(opt->name, M_MOUNT);
902 opt->name = strdup(
"nonosuid", M_MOUNT);
904 else if (strcmp(opt->name,
"nosymfollow") == 0)
905 fsflags |= MNT_NOSYMFOLLOW;
906 else if (strcmp(opt->name,
"symfollow") == 0) {
907 free(opt->name, M_MOUNT);
908 opt->name = strdup(
"nonosymfollow", M_MOUNT);
910 else if (strcmp(opt->name,
"noro") == 0) {
911 fsflags &= ~MNT_RDONLY;
914 else if (strcmp(opt->name,
"rw") == 0) {
915 fsflags &= ~MNT_RDONLY;
918 else if (strcmp(opt->name,
"ro") == 0) {
919 fsflags |= MNT_RDONLY;
922 else if (strcmp(opt->name,
"rdonly") == 0) {
923 free(opt->name, M_MOUNT);
924 opt->name = strdup(
"ro", M_MOUNT);
925 fsflags |= MNT_RDONLY;
928 else if (strcmp(opt->name,
"autoro") == 0) {
932 else if (strcmp(opt->name,
"suiddir") == 0)
933 fsflags |= MNT_SUIDDIR;
934 else if (strcmp(opt->name,
"sync") == 0)
935 fsflags |= MNT_SYNCHRONOUS;
936 else if (strcmp(opt->name,
"union") == 0)
937 fsflags |= MNT_UNION;
938 else if (strcmp(opt->name,
"automounted") == 0) {
939 fsflags |= MNT_AUTOMOUNTED;
941 }
else if (strcmp(opt->name,
"nocover") == 0) {
942 fsflags |= MNT_NOCOVER;
944 }
else if (strcmp(opt->name,
"cover") == 0) {
945 fsflags &= ~MNT_NOCOVER;
947 }
else if (strcmp(opt->name,
"emptydir") == 0) {
948 fsflags |= MNT_EMPTYDIR;
950 }
else if (strcmp(opt->name,
"noemptydir") == 0) {
951 fsflags &= ~MNT_EMPTYDIR;
963 if (fstypelen > MFSNAMELEN || fspathlen > MNAMELEN) {
964 error = ENAMETOOLONG;
968 error =
vfs_domount(td, fstype, fspath, fsflags, &optlist);
969 if (error == ENOENT) {
972 strncpy(errmsg,
"Invalid fstype", errmsg_len);
983 printf(
"%s: R/W mount failed, possibly R/O media,"
984 " trying R/O mount\n", __func__);
985 fsflags |= MNT_RDONLY;
986 error =
vfs_domount(td, fstype, fspath, fsflags, &optlist);
990 if (errmsg_pos != -1 && ((2 * errmsg_pos + 1) < fsoptions->uio_iovcnt)
991 && errmsg_len > 0 && errmsg != NULL) {
992 if (fsoptions->uio_segflg == UIO_SYSSPACE) {
994 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_base,
995 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_len);
998 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_base,
999 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_len);
1003 if (optlist != NULL)
1011#ifndef _SYS_SYSPROTO_H_
1025 struct mntarg *ma = NULL;
1036 AUDIT_ARG_FFLAGS(
flags);
1045 flags &= ~MNT_ROOTFS;
1047 fstype =
malloc(MFSNAMELEN, M_TEMP, M_WAITOK);
1048 error = copyinstr(uap->
type, fstype, MFSNAMELEN, NULL);
1050 free(fstype, M_TEMP);
1054 AUDIT_ARG_TEXT(fstype);
1056 free(fstype, M_TEMP);
1059 if (((vfsp->vfc_flags & VFCF_SBDRY) != 0 &&
1060 vfsp->vfc_vfsops_sd->vfs_cmount == NULL) ||
1061 ((vfsp->vfc_flags & VFCF_SBDRY) == 0 &&
1062 vfsp->vfc_vfsops->vfs_cmount == NULL))
1063 return (EOPNOTSUPP);
1071 if ((vfsp->vfc_flags & VFCF_SBDRY) != 0)
1072 return (vfsp->vfc_vfsops_sd->vfs_cmount(ma, uap->
data,
flags));
1073 return (vfsp->vfc_vfsops->vfs_cmount(ma, uap->
data,
flags));
1086 struct vfsoptlist **optlist
1091 struct vnode *newdp, *rootvp;
1095 ASSERT_VOP_ELOCKED(vp, __func__);
1096 KASSERT((fsflags & MNT_UPDATE) == 0, (
"MNT_UPDATE shouldn't be here"));
1102 if (jailed(td->td_ucred) && (!
prison_allow(td->td_ucred,
1103 vfsp->vfc_prison_flag) || vp == td->td_ucred->cr_prison->pr_root)) {
1112 error = VOP_GETATTR(vp, &va, td->td_ucred);
1113 if (error == 0 && va.va_uid != td->td_ucred->cr_uid)
1117 if (error == 0 && vp->v_type != VDIR)
1119 if (error == 0 && (fsflags & MNT_EMPTYDIR) != 0)
1123 if ((vp->v_iflag & VI_MOUNT) == 0 && vp->v_mountedhere == NULL)
1124 vp->v_iflag |= VI_MOUNT;
1139 mp->mnt_optnew = *optlist;
1141 mp->mnt_flag = (fsflags &
1142 (MNT_UPDATEMASK | MNT_ROOTFS | MNT_RDONLY | MNT_FORCE));
1151 if ((error = VFS_MOUNT(mp)) != 0 ||
1152 (error1 = VFS_STATFS(mp, &mp->mnt_stat)) != 0 ||
1153 (error1 = VFS_ROOT(mp, LK_EXCLUSIVE, &newdp)) != 0) {
1158 if (rootvp != NULL) {
1164 mp->mnt_kern_flag |= MNTK_UNMOUNT | MNTK_UNMOUNTF;
1167 error = VFS_UNMOUNT(mp, 0);
1171 "failed post-mount (%d): rollback unmount returned %d\n",
1178 mp->mnt_vnodecovered = NULL;
1184 vp->v_iflag &= ~VI_MOUNT;
1186 if (rootvp != NULL) {
1197 if (mp->mnt_opt != NULL)
1199 mp->mnt_opt = mp->mnt_optnew;
1205 mp->mnt_optnew = NULL;
1208 if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
1209 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
1210 mp->mnt_kern_flag |= MNTK_ASYNC;
1212 mp->mnt_kern_flag &= ~MNTK_ASYNC;
1217 vp->v_mountedhere = mp;
1230 vp->v_iflag &= ~VI_MOUNT;
1234 TAILQ_INSERT_TAIL(&
mountlist, mp, mnt_list);
1238 EVENTHANDLER_DIRECT_INVOKE(vfs_mounted, mp, newdp, td);
1245 if ((mp->mnt_flag & MNT_RDONLY) == 0)
1260 struct vfsoptlist **optlist
1263 struct export_args export;
1264 struct o2export_args o2export;
1265 struct vnode *rootvp;
1268 int error, export_error, i, len;
1272 ASSERT_VOP_ELOCKED(vp, __func__);
1273 KASSERT((fsflags & MNT_UPDATE) != 0, (
"MNT_UPDATE should be here"));
1276 if ((vp->v_vflag & VV_ROOT) == 0) {
1277 if (
vfs_copyopt(*optlist,
"export", &export,
sizeof(export))
1290 flag = mp->mnt_flag;
1291 if ((fsflags & MNT_RELOAD) != 0 && (
flag & MNT_RDONLY) == 0) {
1293 return (EOPNOTSUPP);
1309 if ((vp->v_iflag & VI_MOUNT) != 0 || vp->v_mountedhere != NULL) {
1315 vp->v_iflag |= VI_MOUNT;
1324 if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) {
1329 mp->mnt_flag &= ~MNT_UPDATEMASK;
1330 mp->mnt_flag |= fsflags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE |
1331 MNT_SNAPSHOT | MNT_ROOTFS | MNT_UPDATEMASK | MNT_RDONLY);
1332 if ((mp->mnt_flag & MNT_ASYNC) == 0)
1333 mp->mnt_kern_flag &= ~MNTK_ASYNC;
1336 mp->mnt_optnew = *optlist;
1344 error = VFS_MOUNT(mp);
1348 if (error == 0 &&
vfs_getopt(mp->mnt_optnew,
"export", &bufp,
1352 case (
sizeof(
struct oexport_args)):
1353 bzero(&o2export,
sizeof(o2export));
1355 case (
sizeof(o2export)):
1356 bcopy(bufp, &o2export, len);
1357 export.ex_flags = (uint64_t)o2export.ex_flags;
1358 export.ex_root = o2export.ex_root;
1359 export.ex_uid = o2export.ex_anon.cr_uid;
1360 export.ex_groups = NULL;
1361 export.ex_ngroups = o2export.ex_anon.cr_ngroups;
1362 if (export.ex_ngroups > 0) {
1363 if (export.ex_ngroups <= XU_NGROUPS) {
1364 export.ex_groups =
malloc(
1365 export.ex_ngroups *
sizeof(gid_t),
1367 for (i = 0; i < export.ex_ngroups; i++)
1368 export.ex_groups[i] =
1369 o2export.ex_anon.cr_groups[i];
1371 export_error = EINVAL;
1372 }
else if (export.ex_ngroups < 0)
1373 export_error = EINVAL;
1374 export.ex_addr = o2export.ex_addr;
1375 export.ex_addrlen = o2export.ex_addrlen;
1376 export.ex_mask = o2export.ex_mask;
1377 export.ex_masklen = o2export.ex_masklen;
1378 export.ex_indexfile = o2export.ex_indexfile;
1379 export.ex_numsecflavors = o2export.ex_numsecflavors;
1380 if (export.ex_numsecflavors < MAXSECFLAVORS) {
1381 for (i = 0; i < export.ex_numsecflavors; i++)
1382 export.ex_secflavors[i] =
1383 o2export.ex_secflavors[i];
1385 export_error = EINVAL;
1386 if (export_error == 0)
1388 free(export.ex_groups, M_TEMP);
1390 case (
sizeof(export)):
1391 bcopy(bufp, &export, len);
1393 if (export.ex_ngroups > 0) {
1394 if (export.ex_ngroups <= NGROUPS_MAX) {
1395 grps =
malloc(export.ex_ngroups *
1396 sizeof(gid_t), M_TEMP, M_WAITOK);
1397 export_error = copyin(export.ex_groups,
1398 grps, export.ex_ngroups *
1400 if (export_error == 0)
1401 export.ex_groups = grps;
1403 export_error = EINVAL;
1404 }
else if (export.ex_ngroups == 0)
1405 export.ex_groups = NULL;
1407 export_error = EINVAL;
1408 if (export_error == 0)
1413 export_error = EINVAL;
1420 mp->mnt_flag &= ~(MNT_UPDATE | MNT_RELOAD | MNT_FORCE |
1430 mp->mnt_flag = (mp->mnt_flag & MNT_QUOTA) | (
flag & ~MNT_QUOTA);
1432 if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
1433 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
1434 mp->mnt_kern_flag |= MNTK_ASYNC;
1436 mp->mnt_kern_flag &= ~MNTK_ASYNC;
1443 if (mp->mnt_opt != NULL)
1445 mp->mnt_opt = mp->mnt_optnew;
1447 (void)VFS_STATFS(mp, &mp->mnt_stat);
1452 mp->mnt_optnew = NULL;
1454 if ((mp->mnt_flag & MNT_RDONLY) == 0)
1460 if (rootvp != NULL) {
1467 vp->v_iflag &= ~VI_MOUNT;
1470 return (error != 0 ? error : export_error);
1482 struct vfsoptlist **optlist
1486 struct nameidata nd;
1496 if (strlen(fstype) >= MFSNAMELEN || strlen(fspath) >= MNAMELEN)
1497 return (ENAMETOOLONG);
1499 if (jailed(td->td_ucred) ||
usermount == 0) {
1500 if ((error =
priv_check(td, PRIV_VFS_MOUNT)) != 0)
1507 if (fsflags & MNT_EXPORTED) {
1508 error =
priv_check(td, PRIV_VFS_MOUNT_EXPORTED);
1512 if (fsflags & MNT_SUIDDIR) {
1513 error =
priv_check(td, PRIV_VFS_MOUNT_SUIDDIR);
1520 if ((fsflags & (MNT_NOSUID | MNT_USER)) != (MNT_NOSUID | MNT_USER)) {
1521 if (
priv_check(td, PRIV_VFS_MOUNT_NONUSER) != 0)
1522 fsflags |= MNT_NOSUID | MNT_USER;
1527 if ((fsflags & MNT_UPDATE) == 0) {
1529 if (fsflags & MNT_ROOTFS) {
1541 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1, UIO_SYSSPACE,
1546 NDFREE(&nd, NDF_ONLY_PNBUF);
1548 if ((fsflags & MNT_UPDATE) == 0) {
1549 if ((vp->v_vflag & VV_ROOT) != 0 &&
1550 (fsflags & MNT_NOCOVER) != 0) {
1554 pathbuf =
malloc(MNAMELEN, M_TEMP, M_WAITOK);
1555 strcpy(pathbuf, fspath);
1561 free(pathbuf, M_TEMP);
1574#ifndef _SYS_SYSPROTO_H_
1591 struct nameidata nd;
1593 char *fsidbuf, *pathbuf;
1597 AUDIT_ARG_VALUE(
flags);
1598 if (jailed(td->td_ucred) ||
usermount == 0) {
1604 if (
flags & MNT_BYFSID) {
1605 fsidbuf =
malloc(MNAMELEN, M_TEMP, M_WAITOK);
1606 error = copyinstr(
path, fsidbuf, MNAMELEN, NULL);
1608 free(fsidbuf, M_TEMP);
1612 AUDIT_ARG_TEXT(fsidbuf);
1614 if (
sscanf(fsidbuf,
"FSID:%d:%d", &fsid.val[0], &fsid.val[1]) != 2) {
1615 free(fsidbuf, M_TEMP);
1620 free(fsidbuf, M_TEMP);
1625 pathbuf =
malloc(MNAMELEN, M_TEMP, M_WAITOK);
1626 error = copyinstr(
path, pathbuf, MNAMELEN, NULL);
1628 free(pathbuf, M_TEMP);
1635 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
1636 UIO_SYSSPACE, pathbuf);
1637 if (
namei(&nd) == 0) {
1638 NDFREE(&nd, NDF_ONLY_PNBUF);
1645 TAILQ_FOREACH_REVERSE(mp, &
mountlist, mntlist, mnt_list) {
1646 if (strcmp(mp->mnt_stat.f_mntonname, pathbuf) == 0) {
1652 free(pathbuf, M_TEMP);
1667 if (mp->mnt_flag & MNT_ROOTFS) {
1685 struct vnode *vp, *mvp;
1687 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1688 if ((vp->v_vflag & VV_ROOT) == 0 && vp->v_type != VNON &&
1689 vp->v_usecount != 0) {
1691 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1704 mtx_assert(MNT_MTX(mp), MA_OWNED);
1705 mp->mnt_kern_flag &= ~mntkflags;
1706 if ((mp->mnt_kern_flag & MNTK_MWAIT) != 0) {
1707 mp->mnt_kern_flag &= ~MNTK_MWAIT;
1712 if (coveredvp != NULL) {
1713 VOP_UNLOCK(coveredvp);
1729 struct mount_pcpu *mpcpu;
1734 if (mp->mnt_vfs_ops > 1) {
1740 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
1742 mp->mnt_ref += mpcpu->mntp_ref;
1743 mpcpu->mntp_ref = 0;
1745 mp->mnt_lockref += mpcpu->mntp_lockref;
1746 mpcpu->mntp_lockref = 0;
1748 mp->mnt_writeopcount += mpcpu->mntp_writeopcount;
1749 mpcpu->mntp_writeopcount = 0;
1751 if (mp->mnt_ref <= 0 || mp->mnt_lockref < 0 || mp->mnt_writeopcount < 0)
1752 panic(
"%s: invalid count(s) on mp %p: ref %d lockref %d writeopcount %d\n",
1753 __func__, mp, mp->mnt_ref, mp->mnt_lockref, mp->mnt_writeopcount);
1755 vfs_assert_mount_counters(mp);
1762 mtx_assert(MNT_MTX(mp), MA_OWNED);
1764 if (mp->mnt_vfs_ops <= 0)
1765 panic(
"%s: invalid vfs_ops count %d for mp %p\n",
1766 __func__, mp->mnt_vfs_ops, mp);
1781 struct smp_rendezvous_cpus_retry_arg
srcra;
1793 if (!vfs_op_thread_entered(mp))
1802 struct mount_pcpu *mpcpu;
1807 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
1808 while (atomic_load_int(&mpcpu->mntp_thread_in_ops))
1829vfs_assert_mount_counters(
struct mount *
mp)
1831 struct mount_pcpu *mpcpu;
1834 if (mp->mnt_vfs_ops == 0)
1838 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
1839 if (mpcpu->mntp_ref != 0 ||
1840 mpcpu->mntp_lockref != 0 ||
1841 mpcpu->mntp_writeopcount != 0)
1842 vfs_dump_mount_counters(mp);
1847vfs_dump_mount_counters(
struct mount *mp)
1849 struct mount_pcpu *mpcpu;
1850 int ref, lockref, writeopcount;
1853 printf(
"%s: mp %p vfs_ops %d\n", __func__, mp, mp->mnt_vfs_ops);
1858 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
1859 printf(
"%d ", mpcpu->mntp_ref);
1860 ref += mpcpu->mntp_ref;
1864 lockref = mp->mnt_lockref;
1866 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
1867 printf(
"%d ", mpcpu->mntp_lockref);
1868 lockref += mpcpu->mntp_lockref;
1871 printf(
"writeopcount: ");
1872 writeopcount = mp->mnt_writeopcount;
1874 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
1875 printf(
"%d ", mpcpu->mntp_writeopcount);
1876 writeopcount += mpcpu->mntp_writeopcount;
1880 printf(
"counter struct total\n");
1881 printf(
"ref %-5d %-5d\n", mp->mnt_ref, ref);
1882 printf(
"lockref %-5d %-5d\n", mp->mnt_lockref, lockref);
1883 printf(
"writeopcount %-5d %-5d\n", mp->mnt_writeopcount, writeopcount);
1885 panic(
"invalid counts on struct mount");
1892 struct mount_pcpu *mpcpu;
1899 case MNT_COUNT_LOCKREF:
1900 sum = mp->mnt_lockref;
1902 case MNT_COUNT_WRITEOPCOUNT:
1903 sum = mp->mnt_writeopcount;
1908 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
1911 sum += mpcpu->mntp_ref;
1913 case MNT_COUNT_LOCKREF:
1914 sum += mpcpu->mntp_lockref;
1916 case MNT_COUNT_WRITEOPCOUNT:
1917 sum += mpcpu->mntp_writeopcount;
1932 if ((mp->mnt_taskqueue_flags & MNT_DEFERRED) == 0 || requeue) {
1933 mp->mnt_taskqueue_flags =
flags | MNT_DEFERRED;
1934 STAILQ_INSERT_TAIL(&deferred_unmount_list, mp,
1935 mnt_taskqueue_link);
1956 struct mount *mp, *tmp;
1958 unsigned int retries;
1961 STAILQ_INIT(&local_unmounts);
1963 STAILQ_CONCAT(&local_unmounts, &deferred_unmount_list);
1966 STAILQ_FOREACH_SAFE(mp, &local_unmounts, mnt_taskqueue_link, tmp) {
1967 flags = mp->mnt_taskqueue_flags;
1968 KASSERT((
flags & MNT_DEFERRED) != 0,
1969 (
"taskqueue unmount without MNT_DEFERRED"));
1973 unmounted = ((mp->mnt_kern_flag & MNTK_REFEXPIRE) != 0);
1981 retries = (mp->mnt_unmount_retries)++;
1988 printf(
"giving up on deferred unmount "
1989 "of %s after %d retries, error %d\n",
1990 mp->mnt_stat.f_mntonname, retries, error);
2004 struct mount_upper_node *upper;
2005 struct vnode *coveredvp, *rootvp;
2007 uint64_t async_flag;
2009 unsigned int retries;
2011 KASSERT((
flags & MNT_DEFERRED) == 0 ||
2012 (
flags & (MNT_RECURSE | MNT_FORCE)) == (MNT_RECURSE | MNT_FORCE),
2013 (
"MNT_DEFERRED requires MNT_RECURSE | MNT_FORCE"));
2024 if ((
flags & MNT_DEFERRED) != 0 &&
2028 return (EINPROGRESS);
2043 KASSERT((
flags & MNT_DEFERRED) == 0,
2044 (
"taskqueue unmount with insufficient privilege"));
2050 flags |= MNT_RECURSE;
2052 if ((
flags & MNT_RECURSE) != 0) {
2053 KASSERT((
flags & MNT_FORCE) != 0,
2054 (
"MNT_RECURSE requires MNT_FORCE"));
2064 mp->mnt_kern_flag |= MNTK_RECURSE;
2065 mp->mnt_upper_pending++;
2066 TAILQ_FOREACH(upper, &mp->mnt_uppers, mnt_upper_link) {
2067 retries = upper->mp->mnt_unmount_retries;
2080 mp->mnt_upper_pending--;
2081 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 &&
2082 mp->mnt_upper_pending == 0) {
2083 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER;
2093 if ((
flags & MNT_DEFERRED) == 0) {
2094 while (error == 0 && !TAILQ_EMPTY(&mp->mnt_uppers)) {
2095 mp->mnt_kern_flag |= MNTK_TASKQUEUE_WAITER;
2096 error = msleep(&mp->mnt_taskqueue_link,
2097 MNT_MTX(mp), PCATCH,
"umntqw", 0);
2104 }
else if (!TAILQ_EMPTY(&mp->mnt_uppers)) {
2111 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), (
"mnt_uppers not empty"));
2115 if ((
flags & MNT_DEFERRED) != 0)
2118 if ((coveredvp = mp->mnt_vnodecovered) != NULL) {
2119 mnt_gen_r = mp->mnt_gen;
2122 vn_lock(coveredvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY);
2127 if (coveredvp->v_mountedhere != mp ||
2128 coveredvp->v_mountedhere->mnt_gen != mnt_gen_r) {
2129 VOP_UNLOCK(coveredvp);
2140 if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 ||
2141 (mp->mnt_flag & MNT_UPDATE) != 0 ||
2142 !TAILQ_EMPTY(&mp->mnt_uppers)) {
2146 mp->mnt_kern_flag |= MNTK_UNMOUNT;
2148 if (coveredvp != NULL)
2150 if (
flags & MNT_NONBUSY) {
2157 if (rootvp != NULL) {
2165 if (
flags & MNT_FORCE) {
2166 mp->mnt_kern_flag |= MNTK_UNMOUNTF;
2176 if (mp->mnt_lockref) {
2177 mp->mnt_kern_flag |= MNTK_DRAINING;
2178 error = msleep(&mp->mnt_lockref, MNT_MTX(mp), PVFS,
2182 KASSERT(mp->mnt_lockref == 0,
2183 (
"%s: invalid lock refcount in the drain path @ %s:%d",
2184 __func__, __FILE__, __LINE__));
2186 (
"%s: invalid return value for msleep in the drain path @ %s:%d",
2187 __func__, __FILE__, __LINE__));
2194 if (rootvp != NULL) {
2199 if (mp->mnt_flag & MNT_EXPUBLIC)
2204 async_flag = mp->mnt_flag & MNT_ASYNC;
2205 mp->mnt_flag &= ~MNT_ASYNC;
2206 mp->mnt_kern_flag &= ~MNTK_ASYNC;
2209 error = VFS_UNMOUNT(mp,
flags);
2217 if (error && error != ENXIO) {
2219 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
2224 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
2225 mp->mnt_flag |= async_flag;
2226 if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
2227 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
2228 mp->mnt_kern_flag |= MNTK_ASYNC;
2229 if (mp->mnt_kern_flag & MNTK_MWAIT) {
2230 mp->mnt_kern_flag &= ~MNTK_MWAIT;
2237 VOP_UNLOCK(coveredvp);
2240 if (rootvp != NULL) {
2250 EVENTHANDLER_DIRECT_INVOKE(vfs_unmounted, mp, td);
2251 if (coveredvp != NULL) {
2254 coveredvp->v_mountedhere = NULL;
2256 VI_UNLOCK(coveredvp);
2257 VOP_UNLOCK(coveredvp);
2261 if (rootvp != NULL) {
2272 if ((
flags & MNT_DEFERRED) != 0)
2284 struct vfsoptlist *moptlist = mp->mnt_optnew;
2289 error =
vfs_getopt(moptlist,
"errmsg", (
void **)&errmsg, &len);
2290 if (error || errmsg == NULL || len <= 0)
2294 vsnprintf(errmsg, (
size_t)len, fmt, ap);
2305 error =
vfs_getopt(opts,
"errmsg", (
void **)&errmsg, &len);
2306 if (error || errmsg == NULL || len <= 0)
2310 vsnprintf(errmsg, (
size_t)len, fmt, ap);
2327 const char **t, *p, *q;
2330 TAILQ_FOREACH(opt, opts, link) {
2333 if (p[0] ==
'n' && p[1] ==
'o')
2335 for(t = global_opts; *t != NULL; t++) {
2336 if (strcmp(*t, p) == 0)
2339 if (strcmp(*t, q) == 0)
2345 for(t = legal; *t != NULL; t++) {
2346 if (strcmp(*t, p) == 0)
2349 if (strcmp(*t, q) == 0)
2356 "mount option <%s> is unknown", p);
2360 TAILQ_FOREACH(opt, opts, link) {
2361 if (strcmp(opt->name,
"errmsg") == 0) {
2362 strncpy((
char *)opt->value, errmsg, opt->len);
2385 KASSERT(opts != NULL, (
"vfs_getopt: caller passed 'opts' as NULL"));
2387 TAILQ_FOREACH(opt, opts, link) {
2388 if (strcmp(
name, opt->name) == 0) {
2408 TAILQ_FOREACH(opt, opts, link) {
2409 if (strcmp(
name, opt->name) == 0) {
2420 char *opt_value, *vtp;
2427 if (opt_len == 0 || opt_value == NULL)
2429 if (opt_value[0] ==
'\0' || opt_value[opt_len - 1] !=
'\0')
2431 iv = strtoq(opt_value, &vtp, 0);
2432 if (vtp == opt_value || (vtp[0] !=
'\0' && vtp[1] !=
'\0'))
2464 TAILQ_FOREACH(opt, opts, link) {
2465 if (strcmp(
name, opt->name) != 0)
2468 if (opt->len == 0 ||
2469 ((
char *)opt->value)[opt->len - 1] !=
'\0') {
2473 return (opt->value);
2485 TAILQ_FOREACH(opt, opts, link) {
2486 if (strcmp(
name, opt->name) == 0) {
2505 KASSERT(opts != NULL, (
"vfs_getopt: caller passed 'opts' as NULL"));
2507 TAILQ_FOREACH(opt, opts, link) {
2508 if (strcmp(
name, opt->name) != 0)
2511 if (opt->len == 0 || opt->value == NULL)
2513 if (((
char *)opt->value)[opt->len - 1] !=
'\0')
2516 ret =
vsscanf(opt->value, fmt, ap);
2528 TAILQ_FOREACH(opt, opts, link) {
2529 if (strcmp(
name, opt->name) != 0)
2532 if (opt->value == NULL)
2535 if (opt->len != len)
2537 bcopy(
value, opt->value, len);
2549 TAILQ_FOREACH(opt, opts, link) {
2550 if (strcmp(
name, opt->name) != 0)
2553 if (opt->value == NULL)
2559 bcopy(
value, opt->value, len);
2571 TAILQ_FOREACH(opt, opts, link) {
2572 if (strcmp(
name, opt->name) != 0)
2575 if (opt->value == NULL)
2576 opt->len = strlen(
value) + 1;
2577 else if (strlcpy(opt->value,
value, opt->len) >= opt->len)
2597 KASSERT(opts != NULL, (
"vfs_copyopt: caller passed 'opts' as NULL"));
2599 TAILQ_FOREACH(opt, opts, link) {
2600 if (strcmp(
name, opt->name) == 0) {
2602 if (len != opt->len)
2604 bcopy(opt->value, dest, opt->len);
2619 if (sbp != &mp->mnt_stat)
2620 memcpy(sbp, &mp->mnt_stat,
sizeof(*sbp));
2625 sbp->f_version = STATFS_VERSION;
2626 sbp->f_namemax = NAME_MAX;
2627 sbp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
2629 return (mp->mnt_op->vfs_statfs(mp, sbp));
2636 bzero(mp->mnt_stat.f_mntfromname,
sizeof mp->mnt_stat.f_mntfromname);
2637 strlcpy(mp->mnt_stat.f_mntfromname, from,
2638 sizeof mp->mnt_stat.f_mntfromname);
2675 KASSERT(
name[0] ==
'n' &&
name[1] ==
'o',
2676 (
"mount_argb(...,%s): name must start with 'no'",
name));
2693 ma =
malloc(
sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
2694 SLIST_INIT(&ma->list);
2701 ma->
v[ma->
len].iov_base = (
void *)(uintptr_t)
name;
2702 ma->
v[ma->
len].iov_len = strlen(
name) + 1;
2705 sb = sbuf_new_auto();
2711 maa =
malloc(
sizeof *maa + len, M_MOUNT, M_WAITOK | M_ZERO);
2712 SLIST_INSERT_HEAD(&ma->list, maa, next);
2716 ma->
v[ma->
len].iov_base = maa + 1;
2717 ma->
v[ma->
len].iov_len = len;
2735 ma =
malloc(
sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
2736 SLIST_INIT(&ma->list);
2740 maa =
malloc(
sizeof *maa + len, M_MOUNT, M_WAITOK | M_ZERO);
2741 SLIST_INSERT_HEAD(&ma->list, maa, next);
2742 tbuf = (
void *)(maa + 1);
2743 ma->
error = copyinstr(val, tbuf, len, NULL);
2757 ma =
malloc(
sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
2758 SLIST_INIT(&ma->list);
2765 ma->
v[ma->
len].iov_base = (
void *)(uintptr_t)
name;
2766 ma->
v[ma->
len].iov_len = strlen(
name) + 1;
2769 ma->
v[ma->
len].iov_base = (
void *)(uintptr_t)val;
2771 ma->
v[ma->
len].iov_len = strlen(val) + 1;
2786 while (!SLIST_EMPTY(&ma->list)) {
2787 maa = SLIST_FIRST(&ma->list);
2788 SLIST_REMOVE_HEAD(&ma->list, next);
2791 free(ma->
v, M_MOUNT);
2804 KASSERT(ma != NULL, (
"kernel_mount NULL ma"));
2805 KASSERT(ma->
v != NULL, (
"kernel_mount NULL ma->v"));
2806 KASSERT(!(ma->
len & 1), (
"kernel_mount odd ma->len (%d)", ma->
len));
2808 auio.uio_iov = ma->
v;
2809 auio.uio_iovcnt = ma->
len;
2810 auio.uio_segflg = UIO_SYSSPACE;
2824#define DEVCTL_LEN 1024
2829 struct mntoptnames *fp;
2831 struct statfs *sfp = &mp->mnt_stat;
2845 cp = (
const uint8_t *)&sfp->f_fsid.val[0];
2846 for (
int i = 0; i <
sizeof(sfp->f_fsid); i++)
2848 sbuf_printf(&sb,
" owner=%u flags=\"", sfp->f_owner);
2849 for (fp =
optnames; fp->o_opt != 0; fp++) {
2850 if ((mp->mnt_flag & fp->o_opt) != 0) {
2883 struct vfsoptlist *opts;
2885 struct vnode *vp_covered, *rootvp;
2888 KASSERT(mp->mnt_lockref > 0,
2889 (
"vfs_remount_ro: mp %p is not busied", mp));
2890 KASSERT((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0,
2891 (
"vfs_remount_ro: mp %p is being unmounted (and busy?)", mp));
2894 vp_covered = mp->mnt_vnodecovered;
2895 error =
vget(vp_covered, LK_EXCLUSIVE | LK_NOWAIT);
2898 VI_LOCK(vp_covered);
2899 if ((vp_covered->v_iflag & VI_MOUNT) != 0) {
2900 VI_UNLOCK(vp_covered);
2904 vp_covered->v_iflag |= VI_MOUNT;
2905 VI_UNLOCK(vp_covered);
2910 if ((mp->mnt_flag & MNT_RDONLY) != 0) {
2915 mp->mnt_flag |= MNT_UPDATE | MNT_FORCE | MNT_RDONLY;
2919 opts =
malloc(
sizeof(
struct vfsoptlist), M_MOUNT, M_WAITOK | M_ZERO);
2921 opt =
malloc(
sizeof(
struct vfsopt), M_MOUNT, M_WAITOK | M_ZERO);
2922 opt->name = strdup(
"ro", M_MOUNT);
2924 TAILQ_INSERT_TAIL(opts, opt, link);
2926 mp->mnt_optnew = opts;
2928 error = VFS_MOUNT(mp);
2932 mp->mnt_flag &= ~(MNT_UPDATE | MNT_FORCE);
2935 if (mp->mnt_opt != NULL)
2937 mp->mnt_opt = mp->mnt_optnew;
2940 mp->mnt_flag &= ~(MNT_UPDATE | MNT_FORCE | MNT_RDONLY);
2944 mp->mnt_optnew = NULL;
2948 VI_LOCK(vp_covered);
2949 vp_covered->v_iflag &= ~VI_MOUNT;
2950 VI_UNLOCK(vp_covered);
2953 if (rootvp != NULL) {
2976 TAILQ_FOREACH_REVERSE(mp, &
mountlist, mntlist, mnt_list) {
2977 error =
vfs_busy(mp, MBF_MNTLSTLOCK | MBF_NOWAIT);
2980 if ((mp->mnt_flag & (MNT_RDONLY | MNT_LOCAL)) != MNT_LOCAL ||
2981 (mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
2989 MPASS((mp->mnt_kern_flag & MNTK_SUSPEND_ALL) == 0);
2990 mp->mnt_kern_flag |= MNTK_SUSPEND_ALL;
2994 printf(
"suspend of %s failed, error %d\n",
2995 mp->mnt_stat.f_mntonname, error);
3009 TAILQ_FOREACH(mp, &
mountlist, mnt_list) {
3010 if ((mp->mnt_kern_flag & MNTK_SUSPEND_ALL) == 0)
3014 MPASS((mp->mnt_kern_flag & MNTK_SUSPEND) != 0);
3015 mp->mnt_kern_flag &= ~MNTK_SUSPEND_ALL;
device_property_type_t type
void mountcheckdirs(struct vnode *olddp, struct vnode *newdp)
SLIST_HEAD(et_eventtimers_list, eventtimer)
TASKQUEUE_DEFINE_THREAD(kqueue_ctx)
int prison_allow(struct ucred *cred, unsigned flag)
void lockdestroy(struct lock *lk)
void lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
void *() malloc(size_t size, struct malloc_type *mtp, int flags)
void * realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
void free(void *addr, struct malloc_type *mtp)
int priv_check_cred(struct ucred *cred, int priv)
int priv_check(struct thread *td, int priv)
struct ucred * crdup(struct ucred *cr)
void crfree(struct ucred *cr)
void panic(const char *fmt,...)
void wakeup(const void *ident)
struct smp_rendezvous_cpus_retry_arg srcra
void devctl_notify(const char *system, const char *subsystem, const char *type, const char *data)
Send a 'notification' to userland, using standard ways.
void devctl_safe_quote_sb(struct sbuf *sb, const char *src)
safely quotes strings that might have double quotes in them.
int vsnprintf(char *str, size_t size, const char *format, va_list ap)
int printf(const char *fmt,...)
int snprintf(char *str, size_t size, const char *format,...)
int sbuf_finish(struct sbuf *s)
int sbuf_putc(struct sbuf *s, int c)
void sbuf_delete(struct sbuf *s)
int sbuf_printf(struct sbuf *s, const char *fmt,...)
int sbuf_vprintf(struct sbuf *s, const char *fmt, va_list ap)
ssize_t sbuf_len(struct sbuf *s)
char * sbuf_data(struct sbuf *s)
int sbuf_error(const struct sbuf *s)
int sbuf_cpy(struct sbuf *s, const char *str)
struct sbuf * sbuf_new(struct sbuf *s, char *buf, int length, int flags)
int sbuf_cat(struct sbuf *s, const char *str)
int sscanf(const char *ibuf, const char *fmt,...)
int vsscanf(const char *inp, char const *fmt0, va_list ap)
void smp_no_rendezvous_barrier(void *dummy)
void smp_rendezvous_cpus_retry(cpuset_t map, void(*setup_func)(void *), void(*action_func)(void *), void(*teardown_func)(void *), void(*wait_func)(void *, int), struct smp_rendezvous_cpus_retry_arg *arg)
void smp_rendezvous_cpus_done(struct smp_rendezvous_cpus_retry_arg *arg)
int taskqueue_member(struct taskqueue *queue, struct thread *td)
int taskqueue_enqueue_timeout(struct taskqueue *queue, struct timeout_task *ttask, int ticks)
int copyinuio(const struct iovec *iovp, u_int iovcnt, struct uio **uiop)
int vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path, u_int pathlen)
void cache_purge(struct vnode *vp)
int vfs_export(struct mount *mp, struct export_args *argp)
int vfs_setpublicfs(struct mount *mp, struct netexport *nep, struct export_args *argp)
struct vfsconfhead vfsconf
struct vfsconf * vfs_byname_kld(const char *fstype, struct thread *td, int *error)
struct vfsconf * vfs_byname(const char *name)
void() NDFREE(struct nameidata *ndp, const u_int flags)
int namei(struct nameidata *ndp)
struct mntarg * mount_argf(struct mntarg *ma, const char *name, const char *fmt,...)
void vfs_mount_error(struct mount *mp, const char *fmt,...)
void vfs_freeopts(struct vfsoptlist *opts)
static int vfs_isopt_ro(const char *opt)
static void vfs_drain_upper_locked(struct mount *mp)
void vfs_opterror(struct vfsoptlist *opts, const char *fmt,...)
struct mtx_padalign __exclusive_cache_line mountlist_mtx
static bool default_autoro
static void vfs_deferred_unmount(void *arg, int pending)
#define VFS_MOUNTARG_SIZE_MAX
static bool recursive_forced_unmount
MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount structure")
static void dounmount_cleanup(struct mount *mp, struct vnode *coveredvp, int mntkflags)
struct mount * vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp, const char *fspath, struct ucred *cred)
int vfs_scanopt(struct vfsoptlist *opts, const char *name, const char *fmt,...)
static bool vfs_should_downgrade_to_ro_mount(uint64_t fsflags, int error)
int vfs_copyopt(struct vfsoptlist *opts, const char *name, void *dest, int len)
static SYSCTL_NODE(_vfs, OID_AUTO, deferred_unmount, CTLFLAG_RD|CTLFLAG_MPSAFE, 0, "deferred unmount controls")
int __vfs_statfs(struct mount *mp, struct statfs *sbp)
void vfs_unregister_for_notification(struct mount *mp, struct mount_upper_node *upper)
char * vfs_getopts(struct vfsoptlist *opts, const char *name, int *error)
static int vfs_equalopts(const char *opt1, const char *opt2)
static int vfs_check_usecounts(struct mount *mp)
static struct mtx deferred_unmount_lock
int vfs_getopt_size(struct vfsoptlist *opts, const char *name, off_t *value)
static void vfs_op_wait_func(void *arg, int cpu)
int vfs_getopt_pos(struct vfsoptlist *opts, const char *name)
static struct mntoptnames optnames[]
static void mount_devctl_event(const char *type, struct mount *mp, bool donew)
static void vfs_mount_init(void *dummy __unused)
int vfs_remount_ro(struct mount *mp)
void vfs_mountedfrom(struct mount *mp, const char *from)
static void vfs_mergeopts(struct vfsoptlist *toopts, struct vfsoptlist *oldopts)
int vfs_flagopt(struct vfsoptlist *opts, const char *name, uint64_t *w, uint64_t val)
int vfs_setopt_part(struct vfsoptlist *opts, const char *name, void *value, int len)
void vfs_deleteopt(struct vfsoptlist *opts, const char *name)
static void vfs_op_action_func(void *arg)
static void vfs_freeopt(struct vfsoptlist *opts, struct vfsopt *opt)
static void free_mntarg(struct mntarg *ma)
int vfs_filteropt(struct vfsoptlist *opts, const char **legal)
SYSCTL_BOOL(_vfs, OID_AUTO, default_autoro, CTLFLAG_RW, &default_autoro, 0, "Retry failed r/w mount as r/o if no explicit ro/rw option is specified")
void vfs_op_enter(struct mount *mp)
int sys_mount(struct thread *td, struct mount_args *uap)
int vfs_setopts(struct vfsoptlist *opts, const char *name, const char *value)
EVENTHANDLER_LIST_DEFINE(vfs_mounted)
int kern_unmount(struct thread *td, const char *path, int flags)
static void mount_fini(void *mem, int size)
void vfs_op_barrier_wait(struct mount *mp)
MTX_SYSINIT(deferred_unmount, &deferred_unmount_lock, "deferred_unmount", MTX_DEF)
static void vfs_sanitizeopts(struct vfsoptlist *opts)
int dounmount(struct mount *mp, uint64_t flags, struct thread *td)
static int vfs_domount_first(struct thread *td, struct vfsconf *vfsp, char *fspath, struct vnode *vp, uint64_t fsflags, struct vfsoptlist **optlist)
void vfs_rel(struct mount *mp)
void vfs_op_exit_locked(struct mount *mp)
int vfs_setopt(struct vfsoptlist *opts, const char *name, void *value, int len)
static int deferred_unmount_total_retries
int sys_unmount(struct thread *td, struct unmount_args *uap)
static uma_zone_t mount_zone
int vfs_getopt(struct vfsoptlist *opts, const char *name, void **buf, int *len)
struct mount * vfs_ref_from_vp(struct vnode *vp)
static STAILQ_HEAD(mount)
SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, "Unprivileged users may mount and unmount file systems")
int kernel_mount(struct mntarg *ma, uint64_t flags)
int sys_nmount(struct thread *td, struct nmount_args *uap)
struct mount * vfs_register_upper_from_vp(struct vnode *vp, struct mount *ump, struct mount_upper_node *upper)
static int deferred_unmount_retry_delay_hz
void vfs_mount_destroy(struct mount *mp)
void vfs_op_exit(struct mount *mp)
static int mount_init(void *mem, int size, int flags)
SYSCTL_UINT(_vfs_deferred_unmount, OID_AUTO, retry_limit, CTLFLAG_RW, &deferred_unmount_retry_limit, 0, "Maximum number of retries for deferred unmount failure")
static int vfs_domount_update(struct thread *td, struct vnode *vp, uint64_t fsflags, struct vfsoptlist **optlist)
void vfs_ref(struct mount *mp)
int vfs_mount_fetch_counter(struct mount *mp, enum mount_counter which)
static int vfs_isopt_rw(const char *opt)
static int vfs_domount(struct thread *td, const char *fstype, char *fspath, uint64_t fsflags, struct vfsoptlist **optlist)
void vfs_register_for_notification(struct mount *mp, struct mount *ump, struct mount_upper_node *upper)
void suspend_all_fs(void)
int vfs_buildopts(struct uio *auio, struct vfsoptlist **options)
struct mntarg * mount_argb(struct mntarg *ma, int flag, const char *name)
static unsigned int deferred_unmount_retry_limit
static bool deferred_unmount_enqueue(struct mount *mp, uint64_t flags, bool requeue, int timeout_ticks)
SYSINIT(vfs_mount, SI_SUB_VFS, SI_ORDER_ANY, vfs_mount_init, NULL)
void vfs_unregister_upper(struct mount *mp, struct mount_upper_node *upper)
struct mntarg * mount_argsu(struct mntarg *ma, const char *name, const void *val, int len)
static struct timeout_task deferred_unmount_task
int vfs_donmount(struct thread *td, uint64_t fsflags, struct uio *fsoptions)
struct mntarg * mount_arg(struct mntarg *ma, const char *name, const void *val, int len)
void vhold(struct vnode *vp)
void vfs_allocate_syncvnode(struct mount *mp)
void vfs_periodic(struct mount *mp, int flags)
void vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused)
void vn_printf(struct vnode *vp, const char *fmt,...)
void vn_irflag_unset_locked(struct vnode *vp, short tounset)
void vn_seqc_write_begin(struct vnode *vp)
void vfs_unbusy(struct mount *mp)
void vn_irflag_set_locked(struct vnode *vp, short toset)
int vfs_suser(struct mount *mp, struct thread *td)
void vrele(struct vnode *vp)
struct mount * vfs_getvfs(fsid_t *fsid)
void vn_seqc_write_end_locked(struct vnode *vp)
void vput(struct vnode *vp)
struct vnode * vfs_cache_root_clear(struct mount *mp)
int vfs_busy(struct mount *mp, int flags)
int vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo)
int vfs_emptydir(struct vnode *vp)
void vn_seqc_write_end(struct vnode *vp)
void vdrop(struct vnode *vp)
void vfs_deallocate_syncvnode(struct mount *mp)
int vget(struct vnode *vp, int flags)
int vfs_write_suspend(struct mount *mp, int flags)
int vn_start_write(struct vnode *vp, struct mount **mpp, int flags)
void vn_lock_pair(struct vnode *vp1, bool vp1_locked, struct vnode *vp2, bool vp2_locked)
void vn_finished_write(struct mount *mp)
void vfs_write_resume(struct mount *mp, int flags)