66#include "opt_debug_lockf.h"
71#include <sys/kernel.h>
72#include <sys/limits.h>
78#include <sys/unistd.h>
80#include <sys/malloc.h>
83#include <sys/taskqueue.h>
86#include <sys/sysctl.h>
88#include <ufs/ufs/extattr.h>
89#include <ufs/ufs/quota.h>
90#include <ufs/ufs/ufsmount.h>
91#include <ufs/ufs/inode.h>
93static int lockf_debug = 0;
94SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0,
"");
101struct owner_vertex_list;
104#define NOLOCKF (struct lockf_entry *)0
108static int lf_hash_owner(caddr_t,
struct vnode *,
struct flock *,
int);
111static struct lockf_entry *
114static int lf_clearlock(
struct lockf *,
struct lockf_entry *);
115static int lf_overlaps(
struct lockf_entry *,
struct lockf_entry *);
116static int lf_blocks(
struct lockf_entry *,
struct lockf_entry *);
118static struct lockf_edge *
121static int lf_add_edge(
struct lockf_entry *,
struct lockf_entry *);
127static int lf_findoverlap(
struct lockf_entry **,
struct lockf_entry *,
129static struct lockf_entry *
131static int lf_getlock(
struct lockf *,
struct lockf_entry *,
struct flock *);
135 int all,
struct lockf_entry_list *);
136static void lf_set_start(
struct lockf *,
struct lockf_entry *, off_t,
137 struct lockf_entry_list*);
138static void lf_set_end(
struct lockf *,
struct lockf_entry *, off_t,
139 struct lockf_entry_list*);
140static int lf_setlock(
struct lockf *,
struct lockf_entry *,
141 struct vnode *,
void **cookiep);
142static int lf_cancel(
struct lockf *,
struct lockf_entry *,
void *);
143static void lf_split(
struct lockf *,
struct lockf_entry *,
144 struct lockf_entry *,
struct lockf_entry_list *);
147 struct owner_vertex_list *
path);
148static void graph_check(
struct owner_graph *g,
int checkorder);
149static void graph_print_vertices(
struct owner_vertex_list *
set);
153 struct owner_vertex_list *delta);
156 struct owner_vertex_list *delta);
158 struct owner_vertex_list *
set);
160 int nextunused,
struct owner_vertex_list *
set);
171static void lf_print(
char *,
struct lockf_entry *);
172static void lf_printlist(
char *,
struct lockf_entry *);
173static void lf_print_owner(
struct lock_owner *);
194#define LOCK_OWNER_HASH_SIZE 256
262 struct owner_edge_list v_outedges;
263 struct owner_edge_list v_inedges;
308 if (
flags & F_REMOTE) {
309 h = HASHSTEP(0, fl->l_pid);
310 h = HASHSTEP(h, fl->l_sysid);
311 }
else if (
flags & F_FLOCK) {
312 h = ((uintptr_t)
id) >> 7;
314 h = ((uintptr_t) vp) >> 7;
328 if (
flags & F_REMOTE) {
329 return lo->lo_pid == fl->l_pid
330 && lo->lo_sysid == fl->l_sysid;
332 return lo->lo_id == id;
336static struct lockf_entry *
339 struct lockf_entry *lf;
341 lf =
malloc(
sizeof(
struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO);
345 printf(
"Allocated lock %p\n", lf);
360 struct sx *chainlock;
362 KASSERT(lock->lf_refs > 0, (
"lockf_entry negative ref count %p", lock));
363 if (--lock->lf_refs > 0)
372 KASSERT(LIST_EMPTY(&lock->lf_outedges),
373 (
"freeing lock with dependencies"));
374 KASSERT(LIST_EMPTY(&lock->lf_inedges),
375 (
"freeing lock with dependants"));
378 KASSERT(lo->lo_refs > 0, (
"lock owner refcount"));
380 if (lo->lo_refs == 0) {
383 printf(
"lf_free_lock: freeing lock owner %p\n",
392 LIST_REMOVE(lo, lo_link);
396 printf(
"Freed lock owner %p\n", lo);
399 sx_unlock(chainlock);
401 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) {
402 vrele(lock->lf_vnode);
403 lock->lf_vnode = NULL;
407 printf(
"Freed lock %p\n", lock);
421 struct flock *fl = ap->a_fl;
422 struct lockf_entry *lock;
423 struct vnode *vp = ap->a_vp;
424 caddr_t
id = ap->a_id;
425 int flags = ap->a_flags;
428 off_t
start, end, oadd;
435 if (ap->a_op == F_UNLCKSYS) {
443 switch (fl->l_whence) {
454 if (size > OFF_MAX ||
455 (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
457 start = size + fl->l_start;
472 }
else if (fl->l_len == 0) {
475 oadd = fl->l_len - 1;
476 if (oadd > OFF_MAX -
start)
486 if (ap->a_op != F_SETLK && (*statep) == NULL) {
488 if ((*statep) == NULL) {
489 fl->l_type = F_UNLCK;
515 printf(
"Allocated lock owner %p\n", lo);
519 lo->lo_flags =
flags;
522 if (
flags & F_REMOTE) {
523 lo->lo_pid = fl->l_pid;
524 lo->lo_sysid = fl->l_sysid;
525 }
else if (
flags & F_FLOCK) {
529 struct proc *p = (
struct proc *)
id;
530 lo->lo_pid = p->p_pid;
533 lo->lo_vertex = NULL;
536 if (lockf_debug & 1) {
537 printf(
"lf_advlockasync: new lock owner %p ", lo);
561 lock->lf_start =
start;
565 if (
flags & F_REMOTE) {
580 lock->lf_inode = (
struct inode *)0;
581 lock->lf_type = fl->l_type;
582 LIST_INIT(&lock->lf_outedges);
583 LIST_INIT(&lock->lf_inedges);
584 lock->lf_async_task = ap->a_task;
585 lock->lf_flags = ap->a_flags;
594 if (VN_IS_DOOMED(vp)) {
609 ls =
malloc(
sizeof(
struct lockf), M_LOCKF, M_WAITOK|M_ZERO);
610 sx_init(&ls->ls_lock,
"ls_lock");
611 LIST_INIT(&ls->ls_active);
612 LIST_INIT(&ls->ls_pending);
624 if (VN_IS_DOOMED(vp)) {
627 LIST_REMOVE(ls, ls_link);
634 if ((*statep) == NULL) {
635 state = *statep = ls;
639 MPASS(state->ls_threads >= 0);
644 LIST_REMOVE(ls, ls_link);
650 MPASS(state->ls_threads >= 0);
655 sx_xlock(&state->ls_lock);
661 if (VN_IS_DOOMED(vp)) {
663 MPASS(state->ls_threads > 0);
667 sx_xunlock(&state->ls_lock);
674 error =
lf_setlock(state, lock, vp, ap->a_cookiep);
689 error =
lf_cancel(state, lock, *ap->a_cookiep);
708 LIST_FOREACH(lock, &state->ls_active, lf_link) {
709 struct lockf_entry *lf;
710 if (LIST_NEXT(lock, lf_link))
711 KASSERT((lock->lf_start
712 <= LIST_NEXT(lock, lf_link)->lf_start),
713 (
"locks disordered"));
714 LIST_FOREACH(lf, &state->ls_active, lf_link) {
718 (
"two conflicting active locks"));
719 if (lock->lf_owner == lf->lf_owner)
721 (
"two overlapping locks from same owner"));
724 LIST_FOREACH(lock, &state->ls_pending, lf_link) {
725 KASSERT(!LIST_EMPTY(&lock->lf_outedges),
726 (
"pending lock which should be active"));
729 sx_xunlock(&state->ls_lock);
732 MPASS(state->ls_threads > 0);
734 if (state->ls_threads != 0) {
739 if (error == EDOOFUS) {
740 KASSERT(ap->a_op == F_SETLK, (
"EDOOFUS"));
747lf_advlock(
struct vop_advlock_args *ap,
struct lockf **statep, u_quad_t size)
749 struct vop_advlockasync_args a;
755 a.a_flags = ap->a_flags;
766 struct lockf_entry *lock, *nlock;
776 KASSERT(VN_IS_DOOMED(vp),
777 (
"lf_purgelocks: vp %p has not vgone yet", vp));
784 if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
785 KASSERT(LIST_EMPTY(&state->ls_pending),
786 (
"freeing state with pending locks"));
790 MPASS(state->ls_threads >= 0);
794 sx_xlock(&state->ls_lock);
796 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
797 LIST_REMOVE(lock, lf_link);
806 if (lock->lf_async_task) {
809 lock->lf_flags |= F_INTR;
814 sx_xunlock(&state->ls_lock);
821 while (state->ls_threads > 1)
822 msleep(state, VI_MTX(vp), 0,
"purgelocks", 0);
831 KASSERT(LIST_EMPTY(&state->ls_pending),
832 (
"lock pending for %p", state));
833 LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) {
834 LIST_REMOVE(lock, lf_link);
839 LIST_REMOVE(state, ls_link);
842 free(state, M_LOCKF);
852 return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start);
862 return x->lf_owner != y->lf_owner
863 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK)
870static struct lockf_edge *
874 return (
malloc(
sizeof(
struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO));
896 if (!lock->lf_owner->lo_vertex)
897 lock->lf_owner->lo_vertex =
909 struct lockf_edge *e;
913 LIST_FOREACH(e, &x->lf_outedges, le_outlink)
914 KASSERT(e->le_to != y, (
"adding lock edge twice"));
924 y->lf_owner->lo_vertex);
929 LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink);
930 LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink);
944 struct lockf_entry *x = e->le_from;
945 struct lockf_entry *y = e->le_to;
948 LIST_REMOVE(e, le_outlink);
949 LIST_REMOVE(e, le_inlink);
961 struct lockf_edge *e;
963 while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) {
974 struct lockf_edge *e;
976 while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) {
988 struct lockf_entry *overlap;
991 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
996 if (overlap->lf_start > lock->lf_end)
1026 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1056 struct lockf_entry *overlap;
1059 sx_assert(&state->ls_lock, SX_XLOCKED);
1060 if (LIST_EMPTY(&state->ls_pending))
1065 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1096 struct lockf_entry *lf, *lfprev;
1098 if (LIST_EMPTY(&state->ls_active)) {
1099 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link);
1104 LIST_FOREACH(lf, &state->ls_active, lf_link) {
1105 if (lf->lf_start > lock->lf_start) {
1106 LIST_INSERT_BEFORE(lf, lock, lf_link);
1111 LIST_INSERT_AFTER(lfprev, lock, lf_link);
1128 LIST_REMOVE(wakelock, lf_link);
1130 if (lockf_debug & 1)
1131 lf_print(
"lf_wakeup_lock: awakening", wakelock);
1133 if (wakelock->lf_async_task) {
1149 struct lockf_entry_list *granted)
1151 struct lockf_edge *e, *ne;
1152 struct lockf_entry *deplock;
1154 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) {
1155 deplock = e->le_from;
1160 if (LIST_EMPTY(&deplock->lf_outedges)) {
1162 LIST_INSERT_HEAD(granted, deplock, lf_link);
1173lf_set_start(
struct lockf *state,
struct lockf_entry *lock, off_t new_start,
1174 struct lockf_entry_list *granted)
1177 KASSERT(new_start >= lock->lf_start, (
"can't increase lock"));
1178 lock->lf_start = new_start;
1179 LIST_REMOVE(lock, lf_link);
1189lf_set_end(
struct lockf *state,
struct lockf_entry *lock, off_t new_end,
1190 struct lockf_entry_list *granted)
1193 KASSERT(new_end <= lock->lf_end, (
"can't increase lock"));
1194 lock->lf_end = new_end;
1216 struct lockf_entry *overlap, *lf;
1217 struct lockf_entry_list granted;
1220 LIST_INIT(&granted);
1221 LIST_INSERT_HEAD(&granted, lock, lf_link);
1223 while (!LIST_EMPTY(&granted)) {
1224 lock = LIST_FIRST(&granted);
1225 LIST_REMOVE(lock, lf_link);
1231 overlap = LIST_FIRST(&state->ls_active);
1236 if (ovcase && (lockf_debug & 2)) {
1237 printf(
"lf_setlock: overlap %d", ovcase);
1238 lf_print(
"", overlap);
1261 LIST_REMOVE(overlap, lf_link);
1271 lf_split(state, overlap, lock, &granted);
1279 lf = LIST_NEXT(overlap, lf_link);
1280 LIST_REMOVE(overlap, lf_link);
1292 lf_set_end(state, overlap, lock->lf_start - 1,
1294 overlap = LIST_NEXT(overlap, lf_link);
1309 if (lockf_debug & 1) {
1310 if (lock->lf_type != F_UNLCK)
1311 lf_print(
"lf_activate_lock: activated", lock);
1313 lf_print(
"lf_activate_lock: unlocked", lock);
1314 lf_printlist(
"lf_activate_lock", lock);
1317 if (lock->lf_type != F_UNLCK)
1329 struct lockf_entry_list granted;
1346 LIST_REMOVE(lock, lf_link);
1360 LIST_INIT(&granted);
1367 while (!LIST_EMPTY(&granted)) {
1368 lock = LIST_FIRST(&granted);
1369 LIST_REMOVE(lock, lf_link);
1378lf_setlock(
struct lockf *state,
struct lockf_entry *lock,
struct vnode *vp,
1381 static char lockstr[] =
"lockf";
1382 int error,
priority, stops_deferred;
1385 if (lockf_debug & 1)
1386 lf_print(
"lf_setlock", lock);
1393 if (lock->lf_type == F_WRLCK)
1395 if (!(lock->lf_flags & F_NOINTR))
1404 if ((lock->lf_flags & F_WAIT) == 0
1405 && lock->lf_async_task == NULL) {
1416 if ((lock->lf_flags & F_FLOCK) &&
1417 lock->lf_type == F_WRLCK) {
1418 lock->lf_type = F_UNLCK;
1420 lock->lf_type = F_WRLCK;
1435 if (lockf_debug & 1)
1436 lf_print(
"lf_setlock: deadlock", lock);
1446 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link);
1448 if (lockf_debug & 1) {
1449 struct lockf_edge *e;
1450 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) {
1451 lf_print(
"lf_setlock: blocking on", e->le_to);
1452 lf_printlist(
"lf_setlock", e->le_to);
1457 if ((lock->lf_flags & F_WAIT) == 0) {
1464 *cookiep = (
void *) lock;
1465 error = EINPROGRESS;
1470 stops_deferred = sigdeferstop(SIGDEFERSTOP_ERESTART);
1471 error = sx_sleep(lock, &state->ls_lock,
priority, lockstr, 0);
1472 sigallowstop(stops_deferred);
1499 if (lock->lf_flags & F_INTR) {
1504 if (LIST_EMPTY(&lock->lf_outedges)) {
1511 if (lockf_debug & 1) {
1512 lf_print(
"lf_setlock: granted", lock);
1525 if (lockf_debug & 1)
1526 lf_print(
"lf_setlock: deadlock", lock);
1552 struct lockf_entry *overlap;
1554 overlap = LIST_FIRST(&state->ls_active);
1559 if (unlock->lf_type != F_UNLCK)
1560 panic(
"lf_clearlock: bad type");
1561 if (lockf_debug & 1)
1562 lf_print(
"lf_clearlock", unlock);
1575lf_getlock(
struct lockf *state,
struct lockf_entry *lock,
struct flock *fl)
1577 struct lockf_entry *block;
1580 if (lockf_debug & 1)
1581 lf_print(
"lf_getlock", lock);
1585 fl->l_type = block->lf_type;
1586 fl->l_whence = SEEK_SET;
1587 fl->l_start = block->lf_start;
1588 if (block->lf_end == OFF_MAX)
1591 fl->l_len = block->lf_end - block->lf_start + 1;
1592 fl->l_pid = block->lf_owner->lo_pid;
1593 fl->l_sysid = block->lf_owner->lo_sysid;
1595 fl->l_type = F_UNLCK;
1604lf_cancel(
struct lockf *state,
struct lockf_entry *lock,
void *cookie)
1606 struct lockf_entry *reallock;
1612 LIST_FOREACH(reallock, &state->ls_pending, lf_link) {
1613 if ((
void *) reallock == cookie) {
1619 if (!(reallock->lf_vnode == lock->lf_vnode
1620 && reallock->lf_start == lock->lf_start
1621 && reallock->lf_end == lock->lf_end)) {
1629 if (!reallock->lf_async_task) {
1656static struct lockf_entry *
1659 struct lockf_entry *overlap;
1661 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
1666 if (overlap->lf_start > lock->lf_end)
1703 struct lockf_entry *lf;
1711 if (lockf_debug & 2)
1712 lf_print(
"lf_findoverlap: looking for overlap in", lock);
1714 start = lock->lf_start;
1719 if (lf->lf_start > end)
1721 if (((
type &
SELF) && lf->lf_owner != lock->lf_owner) ||
1722 ((
type &
OTHERS) && lf->lf_owner == lock->lf_owner)) {
1723 *overlap = LIST_NEXT(lf, lf_link);
1727 if (lockf_debug & 2)
1728 lf_print(
"\tchecking", lf);
1741 if (
start > lf->lf_end) {
1744 if (lockf_debug & 2)
1747 *overlap = LIST_NEXT(lf, lf_link);
1750 if (lf->lf_start ==
start && lf->lf_end == end) {
1753 if (lockf_debug & 2)
1754 printf(
"overlap == lock\n");
1759 if (lf->lf_start <=
start && lf->lf_end >= end) {
1762 if (lockf_debug & 2)
1763 printf(
"overlap contains lock\n");
1768 if (start <= lf->lf_start && end >= lf->lf_end) {
1771 if (lockf_debug & 2)
1772 printf(
"lock contains overlap\n");
1777 if (lf->lf_start <
start && lf->lf_end >=
start) {
1780 if (lockf_debug & 2)
1781 printf(
"overlap starts before lock\n");
1786 if (lf->lf_start >
start && lf->lf_end > end) {
1789 if (lockf_debug & 2)
1790 printf(
"overlap ends after lock\n");
1795 panic(
"lf_findoverlap: default");
1809lf_split(
struct lockf *state,
struct lockf_entry *lock1,
1810 struct lockf_entry *lock2,
struct lockf_entry_list *granted)
1812 struct lockf_entry *splitlock;
1815 if (lockf_debug & 2) {
1816 lf_print(
"lf_split", lock1);
1817 lf_print(
"splitting from", lock2);
1823 if (lock1->lf_start == lock2->lf_start) {
1824 lf_set_start(state, lock1, lock2->lf_end + 1, granted);
1827 if (lock1->lf_end == lock2->lf_end) {
1828 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1836 memcpy(splitlock, lock1,
sizeof *splitlock);
1837 splitlock->lf_refs = 1;
1838 if (splitlock->lf_flags & F_REMOTE)
1839 vref(splitlock->lf_vnode);
1848 splitlock->lf_start = lock2->lf_end + 1;
1849 LIST_INIT(&splitlock->lf_outedges);
1850 LIST_INIT(&splitlock->lf_inedges);
1853 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1872 struct lockf_entry *lf;
1874 struct lockdesclist locks;
1885 STAILQ_INIT(&locks);
1888 sx_xlock(&ls->ls_lock);
1889 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1890 if (lf->lf_owner->lo_sysid != sysid)
1895 ldesc->vp = lf->lf_vnode;
1897 ldesc->fl.l_start = lf->lf_start;
1898 if (lf->lf_end == OFF_MAX)
1899 ldesc->fl.l_len = 0;
1902 lf->lf_end - lf->lf_start + 1;
1903 ldesc->fl.l_whence = SEEK_SET;
1904 ldesc->fl.l_type = F_UNLCK;
1905 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1906 ldesc->fl.l_sysid = sysid;
1907 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1909 sx_xunlock(&ls->ls_lock);
1919 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1920 STAILQ_REMOVE_HEAD(&locks, link);
1922 error = fn(ldesc->vp, &ldesc->fl, arg);
1924 free(ldesc, M_LOCKF);
1934 struct lockf_entry *lf;
1936 struct lockdesclist locks;
1947 STAILQ_INIT(&locks);
1954 MPASS(ls->ls_threads >= 0);
1958 sx_xlock(&ls->ls_lock);
1959 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1962 ldesc->vp = lf->lf_vnode;
1964 ldesc->fl.l_start = lf->lf_start;
1965 if (lf->lf_end == OFF_MAX)
1966 ldesc->fl.l_len = 0;
1969 lf->lf_end - lf->lf_start + 1;
1970 ldesc->fl.l_whence = SEEK_SET;
1971 ldesc->fl.l_type = F_UNLCK;
1972 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1973 ldesc->fl.l_sysid = lf->lf_owner->lo_sysid;
1974 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1976 sx_xunlock(&ls->ls_lock);
1978 MPASS(ls->ls_threads > 0);
1989 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1990 STAILQ_REMOVE_HEAD(&locks, link);
1992 error = fn(ldesc->vp, &ldesc->fl, arg);
1994 free(ldesc, M_LOCKF);
2004 VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE);
2012 KASSERT(sysid != 0, (
"Can't clear local locks with F_UNLCKSYS"));
2027 if (lo->lo_sysid == sysid)
2028 count += lo->lo_refs;
2044 struct owner_vertex_list *
path)
2050 TAILQ_INSERT_HEAD(
path, x, v_link);
2054 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2055 if (graph_reaches(e->e_to, y,
path)) {
2057 TAILQ_INSERT_HEAD(
path, x, v_link);
2074 for (i = 0; i < g->
g_size; i++) {
2078 (
"lock graph vertices disordered"));
2080 for (j = 0; j < i; j++) {
2085 (
"lock graph vertices disordered"));
2092graph_print_vertices(
struct owner_vertex_list *
set)
2097 TAILQ_FOREACH(v,
set, v_link) {
2098 printf(
"%d:", v->v_order);
2099 lf_print_owner(v->v_owner);
2100 if (TAILQ_NEXT(v, v_link))
2116 struct owner_vertex *y,
struct owner_vertex_list *delta)
2131 TAILQ_INSERT_TAIL(delta, y, v_link);
2136 LIST_FOREACH(e, &v->v_outedges, e_outlink) {
2139 if (e->e_to->v_order < x->v_order
2140 && e->e_to->v_gen != gen) {
2141 e->e_to->v_gen = gen;
2142 TAILQ_INSERT_TAIL(delta, e->e_to, v_link);
2146 v = TAILQ_NEXT(v, v_link);
2158 struct owner_vertex *y,
struct owner_vertex_list *delta)
2172 TAILQ_INSERT_TAIL(delta, x, v_link);
2177 LIST_FOREACH(e, &v->v_inedges, e_inlink) {
2178 if (e->e_from->v_order > y->v_order
2179 && e->e_from->v_gen != gen) {
2180 e->e_from->v_gen = gen;
2181 TAILQ_INSERT_HEAD(delta, e->e_from, v_link);
2185 v = TAILQ_PREV(v, owner_vertex_list, v_link);
2197 TAILQ_FOREACH(v,
set, v_link) {
2199 i > 0 && indices[i - 1] > v->v_order; i--)
2201 for (j = n - 1; j >= i; j--)
2202 indices[j + 1] = indices[j];
2203 indices[i] = v->v_order;
2212 struct owner_vertex_list *
set)
2216 while (!TAILQ_EMPTY(
set)) {
2218 TAILQ_FOREACH(v,
set, v_link) {
2219 if (!vlowest || v->v_order < vlowest->v_order)
2222 TAILQ_REMOVE(
set, vlowest, v_link);
2223 vlowest->v_order = indices[nextunused];
2228 return (nextunused);
2236 struct owner_vertex_list deltaF, deltaB;
2243 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2251 if (lockf_debug & 8) {
2252 printf(
"adding edge %d:", x->v_order);
2253 lf_print_owner(x->v_owner);
2254 printf(
" -> %d:", y->v_order);
2255 lf_print_owner(y->v_owner);
2259 if (y->v_order < x->v_order) {
2274 if (g->
g_gen == 0) {
2278 for (vi = 0; vi < g->
g_size; vi++) {
2286 if (lockf_debug & 8) {
2287 struct owner_vertex_list
path;
2290 graph_reaches(y, x, &
path);
2291 graph_print_vertices(&
path);
2298 if (lockf_debug & 8) {
2299 printf(
"re-ordering graph vertices\n");
2301 graph_print_vertices(&deltaF);
2308 if (lockf_debug & 8) {
2310 graph_print_vertices(&deltaB);
2337 if (lockf_debug & 8) {
2338 struct owner_vertex_list
set;
2340 for (i = 0; i < nB + nF; i++)
2341 TAILQ_INSERT_TAIL(&
set,
2343 printf(
"new ordering = ");
2344 graph_print_vertices(&
set);
2349 KASSERT(x->v_order < y->v_order, (
"Failed to re-order graph"));
2352 if (lockf_debug & 8) {
2353 graph_check(g, TRUE);
2359 LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink);
2360 LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink);
2379 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2383 KASSERT(e, (
"Removing non-existent edge from deadlock graph"));
2386 if (e->e_refs == 0) {
2388 if (lockf_debug & 8) {
2389 printf(
"removing edge %d:", x->v_order);
2390 lf_print_owner(x->v_owner);
2391 printf(
" -> %d:", y->v_order);
2392 lf_print_owner(y->v_owner);
2396 LIST_REMOVE(e, e_outlink);
2397 LIST_REMOVE(e, e_inlink);
2424 v->v_gen = g->
g_gen;
2428 LIST_INIT(&v->v_outedges);
2429 LIST_INIT(&v->v_inedges);
2443 KASSERT(LIST_EMPTY(&v->v_outedges), (
"Freeing vertex with edges"));
2444 KASSERT(LIST_EMPTY(&v->v_inedges), (
"Freeing vertex with edges"));
2450 for (i = v->v_order + 1; i < g->g_size; i++) {
2482 if (lo->lo_flags & F_REMOTE) {
2483 printf(
"remote pid %d, system %d",
2484 lo->lo_pid, lo->lo_sysid);
2485 }
else if (lo->lo_flags & F_FLOCK) {
2486 printf(
"file %p", lo->lo_id);
2488 printf(
"local pid %d", lo->lo_pid);
2496lf_print(
char *tag,
struct lockf_entry *lock)
2499 printf(
"%s: lock %p for ", tag, (
void *)lock);
2500 lf_print_owner(lock->lf_owner);
2501 if (lock->lf_inode != (
struct inode *)0)
2502 printf(
" in ino %ju on dev <%s>,",
2503 (uintmax_t)lock->lf_inode->i_number,
2505 printf(
" %s, start %jd, end ",
2506 lock->lf_type == F_RDLCK ?
"shared" :
2507 lock->lf_type == F_WRLCK ?
"exclusive" :
2508 lock->lf_type == F_UNLCK ?
"unlock" :
"unknown",
2509 (intmax_t)lock->lf_start);
2510 if (lock->lf_end == OFF_MAX)
2513 printf(
"%jd", (intmax_t)lock->lf_end);
2514 if (!LIST_EMPTY(&lock->lf_outedges))
2516 (
void *)LIST_FIRST(&lock->lf_outedges)->le_to);
2522lf_printlist(
char *tag,
struct lockf_entry *lock)
2524 struct lockf_entry *lf, *blk;
2525 struct lockf_edge *e;
2527 if (lock->lf_inode == (
struct inode *)0)
2530 printf(
"%s: Lock list for ino %ju on dev <%s>:\n",
2531 tag, (uintmax_t)lock->lf_inode->i_number,
2533 LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) {
2534 printf(
"\tlock %p for ",(
void *)lf);
2535 lf_print_owner(lock->lf_owner);
2536 printf(
", %s, start %jd, end %jd",
2537 lf->lf_type == F_RDLCK ?
"shared" :
2538 lf->lf_type == F_WRLCK ?
"exclusive" :
2539 lf->lf_type == F_UNLCK ?
"unlock" :
2540 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
2541 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) {
2543 printf(
"\n\t\tlock request %p for ", (
void *)blk);
2544 lf_print_owner(blk->lf_owner);
2545 printf(
", %s, start %jd, end %jd",
2546 blk->lf_type == F_RDLCK ?
"shared" :
2547 blk->lf_type == F_WRLCK ?
"exclusive" :
2548 blk->lf_type == F_UNLCK ?
"unlock" :
2549 "unknown", (intmax_t)blk->lf_start,
2550 (intmax_t)blk->lf_end);
2551 if (!LIST_EMPTY(&blk->lf_inedges))
2552 panic(
"lf_printlist: bad list");
device_property_type_t type
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN, &__elfN(aslr_enabled), 0, ": enable address map randomization")
const char * devtoname(struct cdev *dev)
static void lf_init(void *)
TAILQ_HEAD(owner_vertex_list, owner_vertex)
static int lf_overlaps(struct lockf_entry *, struct lockf_entry *)
static void lf_alloc_vertex(struct lockf_entry *)
LIST_HEAD(lock_owner_list, lock_owner)
static void lf_update_dependancies(struct lockf *, struct lockf_entry *, int all, struct lockf_entry_list *)
static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *)
static int lf_setlock(struct lockf *, struct lockf_entry *, struct vnode *, void **cookiep)
static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures")
static void lf_insert_lock(struct lockf *, struct lockf_entry *)
int lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg)
static void lf_remove_incoming(struct lockf_entry *)
int lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
void lf_clearremotesys(int sysid)
int lf_countlocks(int sysid)
static int lf_cancel(struct lockf *, struct lockf_entry *, void *)
static int lf_free_lock(struct lockf_entry *)
int lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, u_quad_t size)
static struct owner_graph lf_owner_graph
int lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
static void lf_free_edge(struct lockf_edge *)
static struct lockf_entry * lf_alloc_lock(struct lock_owner *)
static void graph_remove_edge(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y)
STAILQ_HEAD(lockdesclist, lockdesc)
static struct lock_owner_chain lf_lock_owners[LOCK_OWNER_HASH_SIZE]
static int lf_add_edge(struct lockf_entry *, struct lockf_entry *)
static struct lockf_edge * lf_alloc_edge(void)
static void lf_split(struct lockf *, struct lockf_entry *, struct lockf_entry *, struct lockf_entry_list *)
static void lf_wakeup_lock(struct lockf *, struct lockf_entry *)
static void graph_free_vertex(struct owner_graph *g, struct owner_vertex *v)
static int graph_assign_indices(struct owner_graph *g, int *indices, int nextunused, struct owner_vertex_list *set)
#define LOCK_OWNER_HASH_SIZE
static int lf_hash_owner(caddr_t, struct vnode *, struct flock *, int)
static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *, int)
static int graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
static struct owner_vertex * graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo)
static int lf_add_outgoing(struct lockf *, struct lockf_entry *)
static int graph_add_edge(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y)
static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *, int)
static struct lockf_entry * lf_getblock(struct lockf *, struct lockf_entry *)
void lf_purgelocks(struct vnode *vp, struct lockf **statep)
static void lf_set_end(struct lockf *, struct lockf_entry *, off_t, struct lockf_entry_list *)
static int lf_blocks(struct lockf_entry *, struct lockf_entry *)
static void lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
static int lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg)
static void lf_remove_edge(struct lockf_edge *)
static int lf_add_incoming(struct lockf *, struct lockf_entry *)
static int graph_delta_backward(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y, struct owner_vertex_list *delta)
static struct sx lf_owner_graph_lock
static void lf_set_start(struct lockf *, struct lockf_entry *, off_t, struct lockf_entry_list *)
static void lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
static struct owner_graph * graph_init(struct owner_graph *g)
static struct lockf_list lf_lock_states
static struct sx lf_lock_states_lock
static int lf_clearlock(struct lockf *, struct lockf_entry *)
static void lf_remove_outgoing(struct lockf_entry *)
static int graph_delta_forward(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y, struct owner_vertex_list *delta)
SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL)
void *() malloc(size_t size, struct malloc_type *mtp, int flags)
void * realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
void free(void *addr, struct malloc_type *mtp)
void panic(const char *fmt,...)
void sx_destroy(struct sx *sx)
void wakeup(const void *ident)
struct lock_owner_list list
struct owner_vertex ** g_vertices
int printf(const char *fmt,...)
int taskqueue_enqueue(struct taskqueue *queue, struct task *task)
void vref(struct vnode *vp)
void vrele(struct vnode *vp)