FreeBSD kernel kern code
kern_lockf.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
5 * Authors: Doug Rabson <dfr@rabson.org>
6 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29/*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Scooter Morris at Genentech Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
61 */
62
63#include <sys/cdefs.h>
64__FBSDID("$FreeBSD$");
65
66#include "opt_debug_lockf.h"
67
68#include <sys/param.h>
69#include <sys/systm.h>
70#include <sys/hash.h>
71#include <sys/kernel.h>
72#include <sys/limits.h>
73#include <sys/lock.h>
74#include <sys/mount.h>
75#include <sys/mutex.h>
76#include <sys/proc.h>
77#include <sys/sx.h>
78#include <sys/unistd.h>
79#include <sys/vnode.h>
80#include <sys/malloc.h>
81#include <sys/fcntl.h>
82#include <sys/lockf.h>
83#include <sys/taskqueue.h>
84
85#ifdef LOCKF_DEBUG
86#include <sys/sysctl.h>
87
88#include <ufs/ufs/extattr.h>
89#include <ufs/ufs/quota.h>
90#include <ufs/ufs/ufsmount.h>
91#include <ufs/ufs/inode.h>
92
93static int lockf_debug = 0; /* control debug output */
94SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
95#endif
96
97static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
98
99struct owner_edge;
100struct owner_vertex;
101struct owner_vertex_list;
102struct owner_graph;
103
104#define NOLOCKF (struct lockf_entry *)0
105#define SELF 0x1
106#define OTHERS 0x2
107static void lf_init(void *);
108static int lf_hash_owner(caddr_t, struct vnode *, struct flock *, int);
109static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *,
110 int);
111static struct lockf_entry *
112 lf_alloc_lock(struct lock_owner *);
113static int lf_free_lock(struct lockf_entry *);
114static int lf_clearlock(struct lockf *, struct lockf_entry *);
115static int lf_overlaps(struct lockf_entry *, struct lockf_entry *);
116static int lf_blocks(struct lockf_entry *, struct lockf_entry *);
117static void lf_free_edge(struct lockf_edge *);
118static struct lockf_edge *
119 lf_alloc_edge(void);
120static void lf_alloc_vertex(struct lockf_entry *);
121static int lf_add_edge(struct lockf_entry *, struct lockf_entry *);
122static void lf_remove_edge(struct lockf_edge *);
123static void lf_remove_outgoing(struct lockf_entry *);
124static void lf_remove_incoming(struct lockf_entry *);
125static int lf_add_outgoing(struct lockf *, struct lockf_entry *);
126static int lf_add_incoming(struct lockf *, struct lockf_entry *);
127static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *,
128 int);
129static struct lockf_entry *
130 lf_getblock(struct lockf *, struct lockf_entry *);
131static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *);
132static void lf_insert_lock(struct lockf *, struct lockf_entry *);
133static void lf_wakeup_lock(struct lockf *, struct lockf_entry *);
134static void lf_update_dependancies(struct lockf *, struct lockf_entry *,
135 int all, struct lockf_entry_list *);
136static void lf_set_start(struct lockf *, struct lockf_entry *, off_t,
137 struct lockf_entry_list*);
138static void lf_set_end(struct lockf *, struct lockf_entry *, off_t,
139 struct lockf_entry_list*);
140static int lf_setlock(struct lockf *, struct lockf_entry *,
141 struct vnode *, void **cookiep);
142static int lf_cancel(struct lockf *, struct lockf_entry *, void *);
143static void lf_split(struct lockf *, struct lockf_entry *,
144 struct lockf_entry *, struct lockf_entry_list *);
145#ifdef LOCKF_DEBUG
146static int graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
147 struct owner_vertex_list *path);
148static void graph_check(struct owner_graph *g, int checkorder);
149static void graph_print_vertices(struct owner_vertex_list *set);
150#endif
151static int graph_delta_forward(struct owner_graph *g,
152 struct owner_vertex *x, struct owner_vertex *y,
153 struct owner_vertex_list *delta);
154static int graph_delta_backward(struct owner_graph *g,
155 struct owner_vertex *x, struct owner_vertex *y,
156 struct owner_vertex_list *delta);
157static int graph_add_indices(int *indices, int n,
158 struct owner_vertex_list *set);
159static int graph_assign_indices(struct owner_graph *g, int *indices,
160 int nextunused, struct owner_vertex_list *set);
161static int graph_add_edge(struct owner_graph *g,
162 struct owner_vertex *x, struct owner_vertex *y);
163static void graph_remove_edge(struct owner_graph *g,
164 struct owner_vertex *x, struct owner_vertex *y);
165static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g,
166 struct lock_owner *lo);
167static void graph_free_vertex(struct owner_graph *g,
168 struct owner_vertex *v);
169static struct owner_graph * graph_init(struct owner_graph *g);
170#ifdef LOCKF_DEBUG
171static void lf_print(char *, struct lockf_entry *);
172static void lf_printlist(char *, struct lockf_entry *);
173static void lf_print_owner(struct lock_owner *);
174#endif
175
176/*
177 * This structure is used to keep track of both local and remote lock
178 * owners. The lf_owner field of the struct lockf_entry points back at
179 * the lock owner structure. Each possible lock owner (local proc for
180 * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid>
181 * pair for remote locks) is represented by a unique instance of
182 * struct lock_owner.
183 *
184 * If a lock owner has a lock that blocks some other lock or a lock
185 * that is waiting for some other lock, it also has a vertex in the
186 * owner_graph below.
187 *
188 * Locks:
189 * (s) locked by state->ls_lock
190 * (S) locked by lf_lock_states_lock
191 * (g) locked by lf_owner_graph_lock
192 * (c) const until freeing
193 */
194#define LOCK_OWNER_HASH_SIZE 256
195
197 LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */
198 int lo_refs; /* (l) Number of locks referring to this */
199 int lo_flags; /* (c) Flags passwd to lf_advlock */
200 caddr_t lo_id; /* (c) Id value passed to lf_advlock */
201 pid_t lo_pid; /* (c) Process Id of the lock owner */
202 int lo_sysid; /* (c) System Id of the lock owner */
203 int lo_hash; /* (c) Used to lock the appropriate chain */
204 struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */
205};
206
207LIST_HEAD(lock_owner_list, lock_owner);
208
210 struct sx lock;
211 struct lock_owner_list list;
212};
213
214static struct sx lf_lock_states_lock;
215static struct lockf_list lf_lock_states; /* (S) */
217
218/*
219 * Structures for deadlock detection.
220 *
221 * We have two types of directed graph, the first is the set of locks,
222 * both active and pending on a vnode. Within this graph, active locks
223 * are terminal nodes in the graph (i.e. have no out-going
224 * edges). Pending locks have out-going edges to each blocking active
225 * lock that prevents the lock from being granted and also to each
226 * older pending lock that would block them if it was active. The
227 * graph for each vnode is naturally acyclic; new edges are only ever
228 * added to or from new nodes (either new pending locks which only add
229 * out-going edges or new active locks which only add in-coming edges)
230 * therefore they cannot create loops in the lock graph.
231 *
232 * The second graph is a global graph of lock owners. Each lock owner
233 * is a vertex in that graph and an edge is added to the graph
234 * whenever an edge is added to a vnode graph, with end points
235 * corresponding to owner of the new pending lock and the owner of the
236 * lock upon which it waits. In order to prevent deadlock, we only add
237 * an edge to this graph if the new edge would not create a cycle.
238 *
239 * The lock owner graph is topologically sorted, i.e. if a node has
240 * any outgoing edges, then it has an order strictly less than any
241 * node to which it has an outgoing edge. We preserve this ordering
242 * (and detect cycles) on edge insertion using Algorithm PK from the
243 * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic
244 * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article
245 * No. 1.7)
246 */
247struct owner_vertex;
248
250 LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */
251 LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */
252 int e_refs; /* (g) number of times added */
253 struct owner_vertex *e_from; /* (c) out-going from here */
254 struct owner_vertex *e_to; /* (c) in-coming to here */
255};
256LIST_HEAD(owner_edge_list, owner_edge);
257
259 TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */
260 uint32_t v_gen; /* (g) workspace for edge insertion */
261 int v_order; /* (g) order of vertex in graph */
262 struct owner_edge_list v_outedges;/* (g) list of out-edges */
263 struct owner_edge_list v_inedges; /* (g) list of in-edges */
264 struct lock_owner *v_owner; /* (c) corresponding lock owner */
265};
266TAILQ_HEAD(owner_vertex_list, owner_vertex);
267
269 struct owner_vertex** g_vertices; /* (g) pointers to vertices */
270 int g_size; /* (g) number of vertices */
271 int g_space; /* (g) space allocated for vertices */
272 int *g_indexbuf; /* (g) workspace for loop detection */
273 uint32_t g_gen; /* (g) increment when re-ordering */
274};
275
276static struct sx lf_owner_graph_lock;
278
279/*
280 * Initialise various structures and locks.
281 */
282static void
284{
285 int i;
286
287 sx_init(&lf_lock_states_lock, "lock states lock");
288 LIST_INIT(&lf_lock_states);
289
290 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) {
291 sx_init(&lf_lock_owners[i].lock, "lock owners lock");
292 LIST_INIT(&lf_lock_owners[i].list);
293 }
294
295 sx_init(&lf_owner_graph_lock, "owner graph lock");
297}
298SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL);
299
300/*
301 * Generate a hash value for a lock owner.
302 */
303static int
304lf_hash_owner(caddr_t id, struct vnode *vp, struct flock *fl, int flags)
305{
306 uint32_t h;
307
308 if (flags & F_REMOTE) {
309 h = HASHSTEP(0, fl->l_pid);
310 h = HASHSTEP(h, fl->l_sysid);
311 } else if (flags & F_FLOCK) {
312 h = ((uintptr_t) id) >> 7;
313 } else {
314 h = ((uintptr_t) vp) >> 7;
315 }
316
317 return (h % LOCK_OWNER_HASH_SIZE);
318}
319
320/*
321 * Return true if a lock owner matches the details passed to
322 * lf_advlock.
323 */
324static int
325lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl,
326 int flags)
327{
328 if (flags & F_REMOTE) {
329 return lo->lo_pid == fl->l_pid
330 && lo->lo_sysid == fl->l_sysid;
331 } else {
332 return lo->lo_id == id;
333 }
334}
335
336static struct lockf_entry *
338{
339 struct lockf_entry *lf;
340
341 lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO);
342
343#ifdef LOCKF_DEBUG
344 if (lockf_debug & 4)
345 printf("Allocated lock %p\n", lf);
346#endif
347 if (lo) {
348 sx_xlock(&lf_lock_owners[lo->lo_hash].lock);
349 lo->lo_refs++;
350 sx_xunlock(&lf_lock_owners[lo->lo_hash].lock);
351 lf->lf_owner = lo;
352 }
353
354 return (lf);
355}
356
357static int
358lf_free_lock(struct lockf_entry *lock)
359{
360 struct sx *chainlock;
361
362 KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock));
363 if (--lock->lf_refs > 0)
364 return (0);
365 /*
366 * Adjust the lock_owner reference count and
367 * reclaim the entry if this is the last lock
368 * for that owner.
369 */
370 struct lock_owner *lo = lock->lf_owner;
371 if (lo) {
372 KASSERT(LIST_EMPTY(&lock->lf_outedges),
373 ("freeing lock with dependencies"));
374 KASSERT(LIST_EMPTY(&lock->lf_inedges),
375 ("freeing lock with dependants"));
376 chainlock = &lf_lock_owners[lo->lo_hash].lock;
377 sx_xlock(chainlock);
378 KASSERT(lo->lo_refs > 0, ("lock owner refcount"));
379 lo->lo_refs--;
380 if (lo->lo_refs == 0) {
381#ifdef LOCKF_DEBUG
382 if (lockf_debug & 1)
383 printf("lf_free_lock: freeing lock owner %p\n",
384 lo);
385#endif
386 if (lo->lo_vertex) {
387 sx_xlock(&lf_owner_graph_lock);
389 lo->lo_vertex);
390 sx_xunlock(&lf_owner_graph_lock);
391 }
392 LIST_REMOVE(lo, lo_link);
393 free(lo, M_LOCKF);
394#ifdef LOCKF_DEBUG
395 if (lockf_debug & 4)
396 printf("Freed lock owner %p\n", lo);
397#endif
398 }
399 sx_unlock(chainlock);
400 }
401 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) {
402 vrele(lock->lf_vnode);
403 lock->lf_vnode = NULL;
404 }
405#ifdef LOCKF_DEBUG
406 if (lockf_debug & 4)
407 printf("Freed lock %p\n", lock);
408#endif
409 free(lock, M_LOCKF);
410 return (1);
411}
412
413/*
414 * Advisory record locking support
415 */
416int
417lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
418 u_quad_t size)
419{
420 struct lockf *state;
421 struct flock *fl = ap->a_fl;
422 struct lockf_entry *lock;
423 struct vnode *vp = ap->a_vp;
424 caddr_t id = ap->a_id;
425 int flags = ap->a_flags;
426 int hash;
427 struct lock_owner *lo;
428 off_t start, end, oadd;
429 int error;
430
431 /*
432 * Handle the F_UNLKSYS case first - no need to mess about
433 * creating a lock owner for this one.
434 */
435 if (ap->a_op == F_UNLCKSYS) {
436 lf_clearremotesys(fl->l_sysid);
437 return (0);
438 }
439
440 /*
441 * Convert the flock structure into a start and end.
442 */
443 switch (fl->l_whence) {
444 case SEEK_SET:
445 case SEEK_CUR:
446 /*
447 * Caller is responsible for adding any necessary offset
448 * when SEEK_CUR is used.
449 */
450 start = fl->l_start;
451 break;
452
453 case SEEK_END:
454 if (size > OFF_MAX ||
455 (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
456 return (EOVERFLOW);
457 start = size + fl->l_start;
458 break;
459
460 default:
461 return (EINVAL);
462 }
463 if (start < 0)
464 return (EINVAL);
465 if (fl->l_len < 0) {
466 if (start == 0)
467 return (EINVAL);
468 end = start - 1;
469 start += fl->l_len;
470 if (start < 0)
471 return (EINVAL);
472 } else if (fl->l_len == 0) {
473 end = OFF_MAX;
474 } else {
475 oadd = fl->l_len - 1;
476 if (oadd > OFF_MAX - start)
477 return (EOVERFLOW);
478 end = start + oadd;
479 }
480
481retry_setlock:
482
483 /*
484 * Avoid the common case of unlocking when inode has no locks.
485 */
486 if (ap->a_op != F_SETLK && (*statep) == NULL) {
487 VI_LOCK(vp);
488 if ((*statep) == NULL) {
489 fl->l_type = F_UNLCK;
490 VI_UNLOCK(vp);
491 return (0);
492 }
493 VI_UNLOCK(vp);
494 }
495
496 /*
497 * Map our arguments to an existing lock owner or create one
498 * if this is the first time we have seen this owner.
499 */
500 hash = lf_hash_owner(id, vp, fl, flags);
501 sx_xlock(&lf_lock_owners[hash].lock);
502 LIST_FOREACH(lo, &lf_lock_owners[hash].list, lo_link)
503 if (lf_owner_matches(lo, id, fl, flags))
504 break;
505 if (!lo) {
506 /*
507 * We initialise the lock with a reference
508 * count which matches the new lockf_entry
509 * structure created below.
510 */
511 lo = malloc(sizeof(struct lock_owner), M_LOCKF,
512 M_WAITOK|M_ZERO);
513#ifdef LOCKF_DEBUG
514 if (lockf_debug & 4)
515 printf("Allocated lock owner %p\n", lo);
516#endif
517
518 lo->lo_refs = 1;
519 lo->lo_flags = flags;
520 lo->lo_id = id;
521 lo->lo_hash = hash;
522 if (flags & F_REMOTE) {
523 lo->lo_pid = fl->l_pid;
524 lo->lo_sysid = fl->l_sysid;
525 } else if (flags & F_FLOCK) {
526 lo->lo_pid = -1;
527 lo->lo_sysid = 0;
528 } else {
529 struct proc *p = (struct proc *) id;
530 lo->lo_pid = p->p_pid;
531 lo->lo_sysid = 0;
532 }
533 lo->lo_vertex = NULL;
534
535#ifdef LOCKF_DEBUG
536 if (lockf_debug & 1) {
537 printf("lf_advlockasync: new lock owner %p ", lo);
538 lf_print_owner(lo);
539 printf("\n");
540 }
541#endif
542
543 LIST_INSERT_HEAD(&lf_lock_owners[hash].list, lo, lo_link);
544 } else {
545 /*
546 * We have seen this lock owner before, increase its
547 * reference count to account for the new lockf_entry
548 * structure we create below.
549 */
550 lo->lo_refs++;
551 }
552 sx_xunlock(&lf_lock_owners[hash].lock);
553
554 /*
555 * Create the lockf structure. We initialise the lf_owner
556 * field here instead of in lf_alloc_lock() to avoid paying
557 * the lf_lock_owners_lock tax twice.
558 */
559 lock = lf_alloc_lock(NULL);
560 lock->lf_refs = 1;
561 lock->lf_start = start;
562 lock->lf_end = end;
563 lock->lf_owner = lo;
564 lock->lf_vnode = vp;
565 if (flags & F_REMOTE) {
566 /*
567 * For remote locks, the caller may release its ref to
568 * the vnode at any time - we have to ref it here to
569 * prevent it from being recycled unexpectedly.
570 */
571 vref(vp);
572 }
573
574 /*
575 * XXX The problem is that VTOI is ufs specific, so it will
576 * break LOCKF_DEBUG for all other FS's other than UFS because
577 * it casts the vnode->data ptr to struct inode *.
578 */
579/* lock->lf_inode = VTOI(ap->a_vp); */
580 lock->lf_inode = (struct inode *)0;
581 lock->lf_type = fl->l_type;
582 LIST_INIT(&lock->lf_outedges);
583 LIST_INIT(&lock->lf_inedges);
584 lock->lf_async_task = ap->a_task;
585 lock->lf_flags = ap->a_flags;
586
587 /*
588 * Do the requested operation. First find our state structure
589 * and create a new one if necessary - the caller's *statep
590 * variable and the state's ls_threads count is protected by
591 * the vnode interlock.
592 */
593 VI_LOCK(vp);
594 if (VN_IS_DOOMED(vp)) {
595 VI_UNLOCK(vp);
596 lf_free_lock(lock);
597 return (ENOENT);
598 }
599
600 /*
601 * Allocate a state structure if necessary.
602 */
603 state = *statep;
604 if (state == NULL) {
605 struct lockf *ls;
606
607 VI_UNLOCK(vp);
608
609 ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO);
610 sx_init(&ls->ls_lock, "ls_lock");
611 LIST_INIT(&ls->ls_active);
612 LIST_INIT(&ls->ls_pending);
613 ls->ls_threads = 1;
614
615 sx_xlock(&lf_lock_states_lock);
616 LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link);
617 sx_xunlock(&lf_lock_states_lock);
618
619 /*
620 * Cope if we lost a race with some other thread while
621 * trying to allocate memory.
622 */
623 VI_LOCK(vp);
624 if (VN_IS_DOOMED(vp)) {
625 VI_UNLOCK(vp);
626 sx_xlock(&lf_lock_states_lock);
627 LIST_REMOVE(ls, ls_link);
628 sx_xunlock(&lf_lock_states_lock);
629 sx_destroy(&ls->ls_lock);
630 free(ls, M_LOCKF);
631 lf_free_lock(lock);
632 return (ENOENT);
633 }
634 if ((*statep) == NULL) {
635 state = *statep = ls;
636 VI_UNLOCK(vp);
637 } else {
638 state = *statep;
639 MPASS(state->ls_threads >= 0);
640 state->ls_threads++;
641 VI_UNLOCK(vp);
642
643 sx_xlock(&lf_lock_states_lock);
644 LIST_REMOVE(ls, ls_link);
645 sx_xunlock(&lf_lock_states_lock);
646 sx_destroy(&ls->ls_lock);
647 free(ls, M_LOCKF);
648 }
649 } else {
650 MPASS(state->ls_threads >= 0);
651 state->ls_threads++;
652 VI_UNLOCK(vp);
653 }
654
655 sx_xlock(&state->ls_lock);
656 /*
657 * Recheck the doomed vnode after state->ls_lock is
658 * locked. lf_purgelocks() requires that no new threads add
659 * pending locks when vnode is marked by VIRF_DOOMED flag.
660 */
661 if (VN_IS_DOOMED(vp)) {
662 VI_LOCK(vp);
663 MPASS(state->ls_threads > 0);
664 state->ls_threads--;
665 wakeup(state);
666 VI_UNLOCK(vp);
667 sx_xunlock(&state->ls_lock);
668 lf_free_lock(lock);
669 return (ENOENT);
670 }
671
672 switch (ap->a_op) {
673 case F_SETLK:
674 error = lf_setlock(state, lock, vp, ap->a_cookiep);
675 break;
676
677 case F_UNLCK:
678 error = lf_clearlock(state, lock);
679 lf_free_lock(lock);
680 break;
681
682 case F_GETLK:
683 error = lf_getlock(state, lock, fl);
684 lf_free_lock(lock);
685 break;
686
687 case F_CANCEL:
688 if (ap->a_cookiep)
689 error = lf_cancel(state, lock, *ap->a_cookiep);
690 else
691 error = EINVAL;
692 lf_free_lock(lock);
693 break;
694
695 default:
696 lf_free_lock(lock);
697 error = EINVAL;
698 break;
699 }
700
701#ifdef DIAGNOSTIC
702 /*
703 * Check for some can't happen stuff. In this case, the active
704 * lock list becoming disordered or containing mutually
705 * blocking locks. We also check the pending list for locks
706 * which should be active (i.e. have no out-going edges).
707 */
708 LIST_FOREACH(lock, &state->ls_active, lf_link) {
709 struct lockf_entry *lf;
710 if (LIST_NEXT(lock, lf_link))
711 KASSERT((lock->lf_start
712 <= LIST_NEXT(lock, lf_link)->lf_start),
713 ("locks disordered"));
714 LIST_FOREACH(lf, &state->ls_active, lf_link) {
715 if (lock == lf)
716 break;
717 KASSERT(!lf_blocks(lock, lf),
718 ("two conflicting active locks"));
719 if (lock->lf_owner == lf->lf_owner)
720 KASSERT(!lf_overlaps(lock, lf),
721 ("two overlapping locks from same owner"));
722 }
723 }
724 LIST_FOREACH(lock, &state->ls_pending, lf_link) {
725 KASSERT(!LIST_EMPTY(&lock->lf_outedges),
726 ("pending lock which should be active"));
727 }
728#endif
729 sx_xunlock(&state->ls_lock);
730
731 VI_LOCK(vp);
732 MPASS(state->ls_threads > 0);
733 state->ls_threads--;
734 if (state->ls_threads != 0) {
735 wakeup(state);
736 }
737 VI_UNLOCK(vp);
738
739 if (error == EDOOFUS) {
740 KASSERT(ap->a_op == F_SETLK, ("EDOOFUS"));
741 goto retry_setlock;
742 }
743 return (error);
744}
745
746int
747lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
748{
749 struct vop_advlockasync_args a;
750
751 a.a_vp = ap->a_vp;
752 a.a_id = ap->a_id;
753 a.a_op = ap->a_op;
754 a.a_fl = ap->a_fl;
755 a.a_flags = ap->a_flags;
756 a.a_task = NULL;
757 a.a_cookiep = NULL;
758
759 return (lf_advlockasync(&a, statep, size));
760}
761
762void
763lf_purgelocks(struct vnode *vp, struct lockf **statep)
764{
765 struct lockf *state;
766 struct lockf_entry *lock, *nlock;
767
768 /*
769 * For this to work correctly, the caller must ensure that no
770 * other threads enter the locking system for this vnode,
771 * e.g. by checking VIRF_DOOMED. We wake up any threads that are
772 * sleeping waiting for locks on this vnode and then free all
773 * the remaining locks.
774 */
775 VI_LOCK(vp);
776 KASSERT(VN_IS_DOOMED(vp),
777 ("lf_purgelocks: vp %p has not vgone yet", vp));
778 state = *statep;
779 if (state == NULL) {
780 VI_UNLOCK(vp);
781 return;
782 }
783 *statep = NULL;
784 if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
785 KASSERT(LIST_EMPTY(&state->ls_pending),
786 ("freeing state with pending locks"));
787 VI_UNLOCK(vp);
788 goto out_free;
789 }
790 MPASS(state->ls_threads >= 0);
791 state->ls_threads++;
792 VI_UNLOCK(vp);
793
794 sx_xlock(&state->ls_lock);
795 sx_xlock(&lf_owner_graph_lock);
796 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
797 LIST_REMOVE(lock, lf_link);
798 lf_remove_outgoing(lock);
799 lf_remove_incoming(lock);
800
801 /*
802 * If its an async lock, we can just free it
803 * here, otherwise we let the sleeping thread
804 * free it.
805 */
806 if (lock->lf_async_task) {
807 lf_free_lock(lock);
808 } else {
809 lock->lf_flags |= F_INTR;
810 wakeup(lock);
811 }
812 }
813 sx_xunlock(&lf_owner_graph_lock);
814 sx_xunlock(&state->ls_lock);
815
816 /*
817 * Wait for all other threads, sleeping and otherwise
818 * to leave.
819 */
820 VI_LOCK(vp);
821 while (state->ls_threads > 1)
822 msleep(state, VI_MTX(vp), 0, "purgelocks", 0);
823 VI_UNLOCK(vp);
824
825 /*
826 * We can just free all the active locks since they
827 * will have no dependencies (we removed them all
828 * above). We don't need to bother locking since we
829 * are the last thread using this state structure.
830 */
831 KASSERT(LIST_EMPTY(&state->ls_pending),
832 ("lock pending for %p", state));
833 LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) {
834 LIST_REMOVE(lock, lf_link);
835 lf_free_lock(lock);
836 }
837out_free:
838 sx_xlock(&lf_lock_states_lock);
839 LIST_REMOVE(state, ls_link);
840 sx_xunlock(&lf_lock_states_lock);
841 sx_destroy(&state->ls_lock);
842 free(state, M_LOCKF);
843}
844
845/*
846 * Return non-zero if locks 'x' and 'y' overlap.
847 */
848static int
849lf_overlaps(struct lockf_entry *x, struct lockf_entry *y)
850{
851
852 return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start);
853}
854
855/*
856 * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
857 */
858static int
859lf_blocks(struct lockf_entry *x, struct lockf_entry *y)
860{
861
862 return x->lf_owner != y->lf_owner
863 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK)
864 && lf_overlaps(x, y);
865}
866
867/*
868 * Allocate a lock edge from the free list
869 */
870static struct lockf_edge *
872{
873
874 return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO));
875}
876
877/*
878 * Free a lock edge.
879 */
880static void
881lf_free_edge(struct lockf_edge *e)
882{
883
884 free(e, M_LOCKF);
885}
886
887/*
888 * Ensure that the lock's owner has a corresponding vertex in the
889 * owner graph.
890 */
891static void
892lf_alloc_vertex(struct lockf_entry *lock)
893{
894 struct owner_graph *g = &lf_owner_graph;
895
896 if (!lock->lf_owner->lo_vertex)
897 lock->lf_owner->lo_vertex =
898 graph_alloc_vertex(g, lock->lf_owner);
899}
900
901/*
902 * Attempt to record an edge from lock x to lock y. Return EDEADLK if
903 * the new edge would cause a cycle in the owner graph.
904 */
905static int
906lf_add_edge(struct lockf_entry *x, struct lockf_entry *y)
907{
908 struct owner_graph *g = &lf_owner_graph;
909 struct lockf_edge *e;
910 int error;
911
912#ifdef DIAGNOSTIC
913 LIST_FOREACH(e, &x->lf_outedges, le_outlink)
914 KASSERT(e->le_to != y, ("adding lock edge twice"));
915#endif
916
917 /*
918 * Make sure the two owners have entries in the owner graph.
919 */
922
923 error = graph_add_edge(g, x->lf_owner->lo_vertex,
924 y->lf_owner->lo_vertex);
925 if (error)
926 return (error);
927
928 e = lf_alloc_edge();
929 LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink);
930 LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink);
931 e->le_from = x;
932 e->le_to = y;
933
934 return (0);
935}
936
937/*
938 * Remove an edge from the lock graph.
939 */
940static void
941lf_remove_edge(struct lockf_edge *e)
942{
943 struct owner_graph *g = &lf_owner_graph;
944 struct lockf_entry *x = e->le_from;
945 struct lockf_entry *y = e->le_to;
946
947 graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex);
948 LIST_REMOVE(e, le_outlink);
949 LIST_REMOVE(e, le_inlink);
950 e->le_from = NULL;
951 e->le_to = NULL;
952 lf_free_edge(e);
953}
954
955/*
956 * Remove all out-going edges from lock x.
957 */
958static void
959lf_remove_outgoing(struct lockf_entry *x)
960{
961 struct lockf_edge *e;
962
963 while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) {
965 }
966}
967
968/*
969 * Remove all in-coming edges from lock x.
970 */
971static void
972lf_remove_incoming(struct lockf_entry *x)
973{
974 struct lockf_edge *e;
975
976 while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) {
978 }
979}
980
981/*
982 * Walk the list of locks for the file and create an out-going edge
983 * from lock to each blocking lock.
984 */
985static int
986lf_add_outgoing(struct lockf *state, struct lockf_entry *lock)
987{
988 struct lockf_entry *overlap;
989 int error;
990
991 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
992 /*
993 * We may assume that the active list is sorted by
994 * lf_start.
995 */
996 if (overlap->lf_start > lock->lf_end)
997 break;
998 if (!lf_blocks(lock, overlap))
999 continue;
1000
1001 /*
1002 * We've found a blocking lock. Add the corresponding
1003 * edge to the graphs and see if it would cause a
1004 * deadlock.
1005 */
1006 error = lf_add_edge(lock, overlap);
1007
1008 /*
1009 * The only error that lf_add_edge returns is EDEADLK.
1010 * Remove any edges we added and return the error.
1011 */
1012 if (error) {
1013 lf_remove_outgoing(lock);
1014 return (error);
1015 }
1016 }
1017
1018 /*
1019 * We also need to add edges to sleeping locks that block
1020 * us. This ensures that lf_wakeup_lock cannot grant two
1021 * mutually blocking locks simultaneously and also enforces a
1022 * 'first come, first served' fairness model. Note that this
1023 * only happens if we are blocked by at least one active lock
1024 * due to the call to lf_getblock in lf_setlock below.
1025 */
1026 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1027 if (!lf_blocks(lock, overlap))
1028 continue;
1029 /*
1030 * We've found a blocking lock. Add the corresponding
1031 * edge to the graphs and see if it would cause a
1032 * deadlock.
1033 */
1034 error = lf_add_edge(lock, overlap);
1035
1036 /*
1037 * The only error that lf_add_edge returns is EDEADLK.
1038 * Remove any edges we added and return the error.
1039 */
1040 if (error) {
1041 lf_remove_outgoing(lock);
1042 return (error);
1043 }
1044 }
1045
1046 return (0);
1047}
1048
1049/*
1050 * Walk the list of pending locks for the file and create an in-coming
1051 * edge from lock to each blocking lock.
1052 */
1053static int
1054lf_add_incoming(struct lockf *state, struct lockf_entry *lock)
1055{
1056 struct lockf_entry *overlap;
1057 int error;
1058
1059 sx_assert(&state->ls_lock, SX_XLOCKED);
1060 if (LIST_EMPTY(&state->ls_pending))
1061 return (0);
1062
1063 error = 0;
1064 sx_xlock(&lf_owner_graph_lock);
1065 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1066 if (!lf_blocks(lock, overlap))
1067 continue;
1068
1069 /*
1070 * We've found a blocking lock. Add the corresponding
1071 * edge to the graphs and see if it would cause a
1072 * deadlock.
1073 */
1074 error = lf_add_edge(overlap, lock);
1075
1076 /*
1077 * The only error that lf_add_edge returns is EDEADLK.
1078 * Remove any edges we added and return the error.
1079 */
1080 if (error) {
1081 lf_remove_incoming(lock);
1082 break;
1083 }
1084 }
1085 sx_xunlock(&lf_owner_graph_lock);
1086 return (error);
1087}
1088
1089/*
1090 * Insert lock into the active list, keeping list entries ordered by
1091 * increasing values of lf_start.
1092 */
1093static void
1094lf_insert_lock(struct lockf *state, struct lockf_entry *lock)
1095{
1096 struct lockf_entry *lf, *lfprev;
1097
1098 if (LIST_EMPTY(&state->ls_active)) {
1099 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link);
1100 return;
1101 }
1102
1103 lfprev = NULL;
1104 LIST_FOREACH(lf, &state->ls_active, lf_link) {
1105 if (lf->lf_start > lock->lf_start) {
1106 LIST_INSERT_BEFORE(lf, lock, lf_link);
1107 return;
1108 }
1109 lfprev = lf;
1110 }
1111 LIST_INSERT_AFTER(lfprev, lock, lf_link);
1112}
1113
1114/*
1115 * Wake up a sleeping lock and remove it from the pending list now
1116 * that all its dependencies have been resolved. The caller should
1117 * arrange for the lock to be added to the active list, adjusting any
1118 * existing locks for the same owner as needed.
1119 */
1120static void
1121lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock)
1122{
1123
1124 /*
1125 * Remove from ls_pending list and wake up the caller
1126 * or start the async notification, as appropriate.
1127 */
1128 LIST_REMOVE(wakelock, lf_link);
1129#ifdef LOCKF_DEBUG
1130 if (lockf_debug & 1)
1131 lf_print("lf_wakeup_lock: awakening", wakelock);
1132#endif /* LOCKF_DEBUG */
1133 if (wakelock->lf_async_task) {
1134 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task);
1135 } else {
1136 wakeup(wakelock);
1137 }
1138}
1139
1140/*
1141 * Re-check all dependent locks and remove edges to locks that we no
1142 * longer block. If 'all' is non-zero, the lock has been removed and
1143 * we must remove all the dependencies, otherwise it has simply been
1144 * reduced but remains active. Any pending locks which have been been
1145 * unblocked are added to 'granted'
1146 */
1147static void
1148lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all,
1149 struct lockf_entry_list *granted)
1150{
1151 struct lockf_edge *e, *ne;
1152 struct lockf_entry *deplock;
1153
1154 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) {
1155 deplock = e->le_from;
1156 if (all || !lf_blocks(lock, deplock)) {
1157 sx_xlock(&lf_owner_graph_lock);
1158 lf_remove_edge(e);
1159 sx_xunlock(&lf_owner_graph_lock);
1160 if (LIST_EMPTY(&deplock->lf_outedges)) {
1161 lf_wakeup_lock(state, deplock);
1162 LIST_INSERT_HEAD(granted, deplock, lf_link);
1163 }
1164 }
1165 }
1166}
1167
1168/*
1169 * Set the start of an existing active lock, updating dependencies and
1170 * adding any newly woken locks to 'granted'.
1171 */
1172static void
1173lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start,
1174 struct lockf_entry_list *granted)
1175{
1176
1177 KASSERT(new_start >= lock->lf_start, ("can't increase lock"));
1178 lock->lf_start = new_start;
1179 LIST_REMOVE(lock, lf_link);
1180 lf_insert_lock(state, lock);
1181 lf_update_dependancies(state, lock, FALSE, granted);
1182}
1183
1184/*
1185 * Set the end of an existing active lock, updating dependencies and
1186 * adding any newly woken locks to 'granted'.
1187 */
1188static void
1189lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end,
1190 struct lockf_entry_list *granted)
1191{
1192
1193 KASSERT(new_end <= lock->lf_end, ("can't increase lock"));
1194 lock->lf_end = new_end;
1195 lf_update_dependancies(state, lock, FALSE, granted);
1196}
1197
1198/*
1199 * Add a lock to the active list, updating or removing any current
1200 * locks owned by the same owner and processing any pending locks that
1201 * become unblocked as a result. This code is also used for unlock
1202 * since the logic for updating existing locks is identical.
1203 *
1204 * As a result of processing the new lock, we may unblock existing
1205 * pending locks as a result of downgrading/unlocking. We simply
1206 * activate the newly granted locks by looping.
1207 *
1208 * Since the new lock already has its dependencies set up, we always
1209 * add it to the list (unless its an unlock request). This may
1210 * fragment the lock list in some pathological cases but its probably
1211 * not a real problem.
1212 */
1213static void
1214lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
1215{
1216 struct lockf_entry *overlap, *lf;
1217 struct lockf_entry_list granted;
1218 int ovcase;
1219
1220 LIST_INIT(&granted);
1221 LIST_INSERT_HEAD(&granted, lock, lf_link);
1222
1223 while (!LIST_EMPTY(&granted)) {
1224 lock = LIST_FIRST(&granted);
1225 LIST_REMOVE(lock, lf_link);
1226
1227 /*
1228 * Skip over locks owned by other processes. Handle
1229 * any locks that overlap and are owned by ourselves.
1230 */
1231 overlap = LIST_FIRST(&state->ls_active);
1232 for (;;) {
1233 ovcase = lf_findoverlap(&overlap, lock, SELF);
1234
1235#ifdef LOCKF_DEBUG
1236 if (ovcase && (lockf_debug & 2)) {
1237 printf("lf_setlock: overlap %d", ovcase);
1238 lf_print("", overlap);
1239 }
1240#endif
1241 /*
1242 * Six cases:
1243 * 0) no overlap
1244 * 1) overlap == lock
1245 * 2) overlap contains lock
1246 * 3) lock contains overlap
1247 * 4) overlap starts before lock
1248 * 5) overlap ends after lock
1249 */
1250 switch (ovcase) {
1251 case 0: /* no overlap */
1252 break;
1253
1254 case 1: /* overlap == lock */
1255 /*
1256 * We have already setup the
1257 * dependants for the new lock, taking
1258 * into account a possible downgrade
1259 * or unlock. Remove the old lock.
1260 */
1261 LIST_REMOVE(overlap, lf_link);
1262 lf_update_dependancies(state, overlap, TRUE,
1263 &granted);
1264 lf_free_lock(overlap);
1265 break;
1266
1267 case 2: /* overlap contains lock */
1268 /*
1269 * Just split the existing lock.
1270 */
1271 lf_split(state, overlap, lock, &granted);
1272 break;
1273
1274 case 3: /* lock contains overlap */
1275 /*
1276 * Delete the overlap and advance to
1277 * the next entry in the list.
1278 */
1279 lf = LIST_NEXT(overlap, lf_link);
1280 LIST_REMOVE(overlap, lf_link);
1281 lf_update_dependancies(state, overlap, TRUE,
1282 &granted);
1283 lf_free_lock(overlap);
1284 overlap = lf;
1285 continue;
1286
1287 case 4: /* overlap starts before lock */
1288 /*
1289 * Just update the overlap end and
1290 * move on.
1291 */
1292 lf_set_end(state, overlap, lock->lf_start - 1,
1293 &granted);
1294 overlap = LIST_NEXT(overlap, lf_link);
1295 continue;
1296
1297 case 5: /* overlap ends after lock */
1298 /*
1299 * Change the start of overlap and
1300 * re-insert.
1301 */
1302 lf_set_start(state, overlap, lock->lf_end + 1,
1303 &granted);
1304 break;
1305 }
1306 break;
1307 }
1308#ifdef LOCKF_DEBUG
1309 if (lockf_debug & 1) {
1310 if (lock->lf_type != F_UNLCK)
1311 lf_print("lf_activate_lock: activated", lock);
1312 else
1313 lf_print("lf_activate_lock: unlocked", lock);
1314 lf_printlist("lf_activate_lock", lock);
1315 }
1316#endif /* LOCKF_DEBUG */
1317 if (lock->lf_type != F_UNLCK)
1318 lf_insert_lock(state, lock);
1319 }
1320}
1321
1322/*
1323 * Cancel a pending lock request, either as a result of a signal or a
1324 * cancel request for an async lock.
1325 */
1326static void
1327lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
1328{
1329 struct lockf_entry_list granted;
1330
1331 /*
1332 * Note it is theoretically possible that cancelling this lock
1333 * may allow some other pending lock to become
1334 * active. Consider this case:
1335 *
1336 * Owner Action Result Dependencies
1337 *
1338 * A: lock [0..0] succeeds
1339 * B: lock [2..2] succeeds
1340 * C: lock [1..2] blocked C->B
1341 * D: lock [0..1] blocked C->B,D->A,D->C
1342 * A: unlock [0..0] C->B,D->C
1343 * C: cancel [1..2]
1344 */
1345
1346 LIST_REMOVE(lock, lf_link);
1347
1348 /*
1349 * Removing out-going edges is simple.
1350 */
1351 sx_xlock(&lf_owner_graph_lock);
1352 lf_remove_outgoing(lock);
1353 sx_xunlock(&lf_owner_graph_lock);
1354
1355 /*
1356 * Removing in-coming edges may allow some other lock to
1357 * become active - we use lf_update_dependancies to figure
1358 * this out.
1359 */
1360 LIST_INIT(&granted);
1361 lf_update_dependancies(state, lock, TRUE, &granted);
1362 lf_free_lock(lock);
1363
1364 /*
1365 * Feed any newly active locks to lf_activate_lock.
1366 */
1367 while (!LIST_EMPTY(&granted)) {
1368 lock = LIST_FIRST(&granted);
1369 LIST_REMOVE(lock, lf_link);
1370 lf_activate_lock(state, lock);
1371 }
1372}
1373
1374/*
1375 * Set a byte-range lock.
1376 */
1377static int
1378lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
1379 void **cookiep)
1380{
1381 static char lockstr[] = "lockf";
1382 int error, priority, stops_deferred;
1383
1384#ifdef LOCKF_DEBUG
1385 if (lockf_debug & 1)
1386 lf_print("lf_setlock", lock);
1387#endif /* LOCKF_DEBUG */
1388
1389 /*
1390 * Set the priority
1391 */
1392 priority = PLOCK;
1393 if (lock->lf_type == F_WRLCK)
1394 priority += 4;
1395 if (!(lock->lf_flags & F_NOINTR))
1396 priority |= PCATCH;
1397 /*
1398 * Scan lock list for this file looking for locks that would block us.
1399 */
1400 if (lf_getblock(state, lock)) {
1401 /*
1402 * Free the structure and return if nonblocking.
1403 */
1404 if ((lock->lf_flags & F_WAIT) == 0
1405 && lock->lf_async_task == NULL) {
1406 lf_free_lock(lock);
1407 error = EAGAIN;
1408 goto out;
1409 }
1410
1411 /*
1412 * For flock type locks, we must first remove
1413 * any shared locks that we hold before we sleep
1414 * waiting for an exclusive lock.
1415 */
1416 if ((lock->lf_flags & F_FLOCK) &&
1417 lock->lf_type == F_WRLCK) {
1418 lock->lf_type = F_UNLCK;
1419 lf_activate_lock(state, lock);
1420 lock->lf_type = F_WRLCK;
1421 }
1422
1423 /*
1424 * We are blocked. Create edges to each blocking lock,
1425 * checking for deadlock using the owner graph. For
1426 * simplicity, we run deadlock detection for all
1427 * locks, posix and otherwise.
1428 */
1429 sx_xlock(&lf_owner_graph_lock);
1430 error = lf_add_outgoing(state, lock);
1431 sx_xunlock(&lf_owner_graph_lock);
1432
1433 if (error) {
1434#ifdef LOCKF_DEBUG
1435 if (lockf_debug & 1)
1436 lf_print("lf_setlock: deadlock", lock);
1437#endif
1438 lf_free_lock(lock);
1439 goto out;
1440 }
1441
1442 /*
1443 * We have added edges to everything that blocks
1444 * us. Sleep until they all go away.
1445 */
1446 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link);
1447#ifdef LOCKF_DEBUG
1448 if (lockf_debug & 1) {
1449 struct lockf_edge *e;
1450 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) {
1451 lf_print("lf_setlock: blocking on", e->le_to);
1452 lf_printlist("lf_setlock", e->le_to);
1453 }
1454 }
1455#endif /* LOCKF_DEBUG */
1456
1457 if ((lock->lf_flags & F_WAIT) == 0) {
1458 /*
1459 * The caller requested async notification -
1460 * this callback happens when the blocking
1461 * lock is released, allowing the caller to
1462 * make another attempt to take the lock.
1463 */
1464 *cookiep = (void *) lock;
1465 error = EINPROGRESS;
1466 goto out;
1467 }
1468
1469 lock->lf_refs++;
1470 stops_deferred = sigdeferstop(SIGDEFERSTOP_ERESTART);
1471 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0);
1472 sigallowstop(stops_deferred);
1473 if (lf_free_lock(lock)) {
1474 error = EDOOFUS;
1475 goto out;
1476 }
1477
1478 /*
1479 * We may have been awakened by a signal and/or by a
1480 * debugger continuing us (in which cases we must
1481 * remove our lock graph edges) and/or by another
1482 * process releasing a lock (in which case our edges
1483 * have already been removed and we have been moved to
1484 * the active list). We may also have been woken by
1485 * lf_purgelocks which we report to the caller as
1486 * EINTR. In that case, lf_purgelocks will have
1487 * removed our lock graph edges.
1488 *
1489 * Note that it is possible to receive a signal after
1490 * we were successfully woken (and moved to the active
1491 * list) but before we resumed execution. In this
1492 * case, our lf_outedges list will be clear. We
1493 * pretend there was no error.
1494 *
1495 * Note also, if we have been sleeping long enough, we
1496 * may now have incoming edges from some newer lock
1497 * which is waiting behind us in the queue.
1498 */
1499 if (lock->lf_flags & F_INTR) {
1500 error = EINTR;
1501 lf_free_lock(lock);
1502 goto out;
1503 }
1504 if (LIST_EMPTY(&lock->lf_outedges)) {
1505 error = 0;
1506 } else {
1507 lf_cancel_lock(state, lock);
1508 goto out;
1509 }
1510#ifdef LOCKF_DEBUG
1511 if (lockf_debug & 1) {
1512 lf_print("lf_setlock: granted", lock);
1513 }
1514#endif
1515 goto out;
1516 }
1517 /*
1518 * It looks like we are going to grant the lock. First add
1519 * edges from any currently pending lock that the new lock
1520 * would block.
1521 */
1522 error = lf_add_incoming(state, lock);
1523 if (error) {
1524#ifdef LOCKF_DEBUG
1525 if (lockf_debug & 1)
1526 lf_print("lf_setlock: deadlock", lock);
1527#endif
1528 lf_free_lock(lock);
1529 goto out;
1530 }
1531
1532 /*
1533 * No blocks!! Add the lock. Note that we will
1534 * downgrade or upgrade any overlapping locks this
1535 * process already owns.
1536 */
1537 lf_activate_lock(state, lock);
1538 error = 0;
1539out:
1540 return (error);
1541}
1542
1543/*
1544 * Remove a byte-range lock on an inode.
1545 *
1546 * Generally, find the lock (or an overlap to that lock)
1547 * and remove it (or shrink it), then wakeup anyone we can.
1548 */
1549static int
1550lf_clearlock(struct lockf *state, struct lockf_entry *unlock)
1551{
1552 struct lockf_entry *overlap;
1553
1554 overlap = LIST_FIRST(&state->ls_active);
1555
1556 if (overlap == NOLOCKF)
1557 return (0);
1558#ifdef LOCKF_DEBUG
1559 if (unlock->lf_type != F_UNLCK)
1560 panic("lf_clearlock: bad type");
1561 if (lockf_debug & 1)
1562 lf_print("lf_clearlock", unlock);
1563#endif /* LOCKF_DEBUG */
1564
1565 lf_activate_lock(state, unlock);
1566
1567 return (0);
1568}
1569
1570/*
1571 * Check whether there is a blocking lock, and if so return its
1572 * details in '*fl'.
1573 */
1574static int
1575lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl)
1576{
1577 struct lockf_entry *block;
1578
1579#ifdef LOCKF_DEBUG
1580 if (lockf_debug & 1)
1581 lf_print("lf_getlock", lock);
1582#endif /* LOCKF_DEBUG */
1583
1584 if ((block = lf_getblock(state, lock))) {
1585 fl->l_type = block->lf_type;
1586 fl->l_whence = SEEK_SET;
1587 fl->l_start = block->lf_start;
1588 if (block->lf_end == OFF_MAX)
1589 fl->l_len = 0;
1590 else
1591 fl->l_len = block->lf_end - block->lf_start + 1;
1592 fl->l_pid = block->lf_owner->lo_pid;
1593 fl->l_sysid = block->lf_owner->lo_sysid;
1594 } else {
1595 fl->l_type = F_UNLCK;
1596 }
1597 return (0);
1598}
1599
1600/*
1601 * Cancel an async lock request.
1602 */
1603static int
1604lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie)
1605{
1606 struct lockf_entry *reallock;
1607
1608 /*
1609 * We need to match this request with an existing lock
1610 * request.
1611 */
1612 LIST_FOREACH(reallock, &state->ls_pending, lf_link) {
1613 if ((void *) reallock == cookie) {
1614 /*
1615 * Double-check that this lock looks right
1616 * (maybe use a rolling ID for the cancel
1617 * cookie instead?)
1618 */
1619 if (!(reallock->lf_vnode == lock->lf_vnode
1620 && reallock->lf_start == lock->lf_start
1621 && reallock->lf_end == lock->lf_end)) {
1622 return (ENOENT);
1623 }
1624
1625 /*
1626 * Make sure this lock was async and then just
1627 * remove it from its wait lists.
1628 */
1629 if (!reallock->lf_async_task) {
1630 return (ENOENT);
1631 }
1632
1633 /*
1634 * Note that since any other thread must take
1635 * state->ls_lock before it can possibly
1636 * trigger the async callback, we are safe
1637 * from a race with lf_wakeup_lock, i.e. we
1638 * can free the lock (actually our caller does
1639 * this).
1640 */
1641 lf_cancel_lock(state, reallock);
1642 return (0);
1643 }
1644 }
1645
1646 /*
1647 * We didn't find a matching lock - not much we can do here.
1648 */
1649 return (ENOENT);
1650}
1651
1652/*
1653 * Walk the list of locks for an inode and
1654 * return the first blocking lock.
1655 */
1656static struct lockf_entry *
1657lf_getblock(struct lockf *state, struct lockf_entry *lock)
1658{
1659 struct lockf_entry *overlap;
1660
1661 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
1662 /*
1663 * We may assume that the active list is sorted by
1664 * lf_start.
1665 */
1666 if (overlap->lf_start > lock->lf_end)
1667 break;
1668 if (!lf_blocks(lock, overlap))
1669 continue;
1670 return (overlap);
1671 }
1672 return (NOLOCKF);
1673}
1674
1675/*
1676 * Walk the list of locks for an inode to find an overlapping lock (if
1677 * any) and return a classification of that overlap.
1678 *
1679 * Arguments:
1680 * *overlap The place in the lock list to start looking
1681 * lock The lock which is being tested
1682 * type Pass 'SELF' to test only locks with the same
1683 * owner as lock, or 'OTHER' to test only locks
1684 * with a different owner
1685 *
1686 * Returns one of six values:
1687 * 0) no overlap
1688 * 1) overlap == lock
1689 * 2) overlap contains lock
1690 * 3) lock contains overlap
1691 * 4) overlap starts before lock
1692 * 5) overlap ends after lock
1693 *
1694 * If there is an overlapping lock, '*overlap' is set to point at the
1695 * overlapping lock.
1696 *
1697 * NOTE: this returns only the FIRST overlapping lock. There
1698 * may be more than one.
1699 */
1700static int
1701lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type)
1702{
1703 struct lockf_entry *lf;
1704 off_t start, end;
1705 int res;
1706
1707 if ((*overlap) == NOLOCKF) {
1708 return (0);
1709 }
1710#ifdef LOCKF_DEBUG
1711 if (lockf_debug & 2)
1712 lf_print("lf_findoverlap: looking for overlap in", lock);
1713#endif /* LOCKF_DEBUG */
1714 start = lock->lf_start;
1715 end = lock->lf_end;
1716 res = 0;
1717 while (*overlap) {
1718 lf = *overlap;
1719 if (lf->lf_start > end)
1720 break;
1721 if (((type & SELF) && lf->lf_owner != lock->lf_owner) ||
1722 ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) {
1723 *overlap = LIST_NEXT(lf, lf_link);
1724 continue;
1725 }
1726#ifdef LOCKF_DEBUG
1727 if (lockf_debug & 2)
1728 lf_print("\tchecking", lf);
1729#endif /* LOCKF_DEBUG */
1730 /*
1731 * OK, check for overlap
1732 *
1733 * Six cases:
1734 * 0) no overlap
1735 * 1) overlap == lock
1736 * 2) overlap contains lock
1737 * 3) lock contains overlap
1738 * 4) overlap starts before lock
1739 * 5) overlap ends after lock
1740 */
1741 if (start > lf->lf_end) {
1742 /* Case 0 */
1743#ifdef LOCKF_DEBUG
1744 if (lockf_debug & 2)
1745 printf("no overlap\n");
1746#endif /* LOCKF_DEBUG */
1747 *overlap = LIST_NEXT(lf, lf_link);
1748 continue;
1749 }
1750 if (lf->lf_start == start && lf->lf_end == end) {
1751 /* Case 1 */
1752#ifdef LOCKF_DEBUG
1753 if (lockf_debug & 2)
1754 printf("overlap == lock\n");
1755#endif /* LOCKF_DEBUG */
1756 res = 1;
1757 break;
1758 }
1759 if (lf->lf_start <= start && lf->lf_end >= end) {
1760 /* Case 2 */
1761#ifdef LOCKF_DEBUG
1762 if (lockf_debug & 2)
1763 printf("overlap contains lock\n");
1764#endif /* LOCKF_DEBUG */
1765 res = 2;
1766 break;
1767 }
1768 if (start <= lf->lf_start && end >= lf->lf_end) {
1769 /* Case 3 */
1770#ifdef LOCKF_DEBUG
1771 if (lockf_debug & 2)
1772 printf("lock contains overlap\n");
1773#endif /* LOCKF_DEBUG */
1774 res = 3;
1775 break;
1776 }
1777 if (lf->lf_start < start && lf->lf_end >= start) {
1778 /* Case 4 */
1779#ifdef LOCKF_DEBUG
1780 if (lockf_debug & 2)
1781 printf("overlap starts before lock\n");
1782#endif /* LOCKF_DEBUG */
1783 res = 4;
1784 break;
1785 }
1786 if (lf->lf_start > start && lf->lf_end > end) {
1787 /* Case 5 */
1788#ifdef LOCKF_DEBUG
1789 if (lockf_debug & 2)
1790 printf("overlap ends after lock\n");
1791#endif /* LOCKF_DEBUG */
1792 res = 5;
1793 break;
1794 }
1795 panic("lf_findoverlap: default");
1796 }
1797 return (res);
1798}
1799
1800/*
1801 * Split an the existing 'lock1', based on the extent of the lock
1802 * described by 'lock2'. The existing lock should cover 'lock2'
1803 * entirely.
1804 *
1805 * Any pending locks which have been been unblocked are added to
1806 * 'granted'
1807 */
1808static void
1809lf_split(struct lockf *state, struct lockf_entry *lock1,
1810 struct lockf_entry *lock2, struct lockf_entry_list *granted)
1811{
1812 struct lockf_entry *splitlock;
1813
1814#ifdef LOCKF_DEBUG
1815 if (lockf_debug & 2) {
1816 lf_print("lf_split", lock1);
1817 lf_print("splitting from", lock2);
1818 }
1819#endif /* LOCKF_DEBUG */
1820 /*
1821 * Check to see if we don't need to split at all.
1822 */
1823 if (lock1->lf_start == lock2->lf_start) {
1824 lf_set_start(state, lock1, lock2->lf_end + 1, granted);
1825 return;
1826 }
1827 if (lock1->lf_end == lock2->lf_end) {
1828 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1829 return;
1830 }
1831 /*
1832 * Make a new lock consisting of the last part of
1833 * the encompassing lock.
1834 */
1835 splitlock = lf_alloc_lock(lock1->lf_owner);
1836 memcpy(splitlock, lock1, sizeof *splitlock);
1837 splitlock->lf_refs = 1;
1838 if (splitlock->lf_flags & F_REMOTE)
1839 vref(splitlock->lf_vnode);
1840
1841 /*
1842 * This cannot cause a deadlock since any edges we would add
1843 * to splitlock already exist in lock1. We must be sure to add
1844 * necessary dependencies to splitlock before we reduce lock1
1845 * otherwise we may accidentally grant a pending lock that
1846 * was blocked by the tail end of lock1.
1847 */
1848 splitlock->lf_start = lock2->lf_end + 1;
1849 LIST_INIT(&splitlock->lf_outedges);
1850 LIST_INIT(&splitlock->lf_inedges);
1851 lf_add_incoming(state, splitlock);
1852
1853 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1854
1855 /*
1856 * OK, now link it in
1857 */
1858 lf_insert_lock(state, splitlock);
1859}
1860
1861struct lockdesc {
1862 STAILQ_ENTRY(lockdesc) link;
1863 struct vnode *vp;
1864 struct flock fl;
1865};
1866STAILQ_HEAD(lockdesclist, lockdesc);
1867
1868int
1869lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg)
1870{
1871 struct lockf *ls;
1872 struct lockf_entry *lf;
1873 struct lockdesc *ldesc;
1874 struct lockdesclist locks;
1875 int error;
1876
1877 /*
1878 * In order to keep the locking simple, we iterate over the
1879 * active lock lists to build a list of locks that need
1880 * releasing. We then call the iterator for each one in turn.
1881 *
1882 * We take an extra reference to the vnode for the duration to
1883 * make sure it doesn't go away before we are finished.
1884 */
1885 STAILQ_INIT(&locks);
1886 sx_xlock(&lf_lock_states_lock);
1887 LIST_FOREACH(ls, &lf_lock_states, ls_link) {
1888 sx_xlock(&ls->ls_lock);
1889 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1890 if (lf->lf_owner->lo_sysid != sysid)
1891 continue;
1892
1893 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1894 M_WAITOK);
1895 ldesc->vp = lf->lf_vnode;
1896 vref(ldesc->vp);
1897 ldesc->fl.l_start = lf->lf_start;
1898 if (lf->lf_end == OFF_MAX)
1899 ldesc->fl.l_len = 0;
1900 else
1901 ldesc->fl.l_len =
1902 lf->lf_end - lf->lf_start + 1;
1903 ldesc->fl.l_whence = SEEK_SET;
1904 ldesc->fl.l_type = F_UNLCK;
1905 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1906 ldesc->fl.l_sysid = sysid;
1907 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1908 }
1909 sx_xunlock(&ls->ls_lock);
1910 }
1911 sx_xunlock(&lf_lock_states_lock);
1912
1913 /*
1914 * Call the iterator function for each lock in turn. If the
1915 * iterator returns an error code, just free the rest of the
1916 * lockdesc structures.
1917 */
1918 error = 0;
1919 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1920 STAILQ_REMOVE_HEAD(&locks, link);
1921 if (!error)
1922 error = fn(ldesc->vp, &ldesc->fl, arg);
1923 vrele(ldesc->vp);
1924 free(ldesc, M_LOCKF);
1925 }
1926
1927 return (error);
1928}
1929
1930int
1931lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
1932{
1933 struct lockf *ls;
1934 struct lockf_entry *lf;
1935 struct lockdesc *ldesc;
1936 struct lockdesclist locks;
1937 int error;
1938
1939 /*
1940 * In order to keep the locking simple, we iterate over the
1941 * active lock lists to build a list of locks that need
1942 * releasing. We then call the iterator for each one in turn.
1943 *
1944 * We take an extra reference to the vnode for the duration to
1945 * make sure it doesn't go away before we are finished.
1946 */
1947 STAILQ_INIT(&locks);
1948 VI_LOCK(vp);
1949 ls = vp->v_lockf;
1950 if (!ls) {
1951 VI_UNLOCK(vp);
1952 return (0);
1953 }
1954 MPASS(ls->ls_threads >= 0);
1955 ls->ls_threads++;
1956 VI_UNLOCK(vp);
1957
1958 sx_xlock(&ls->ls_lock);
1959 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1960 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1961 M_WAITOK);
1962 ldesc->vp = lf->lf_vnode;
1963 vref(ldesc->vp);
1964 ldesc->fl.l_start = lf->lf_start;
1965 if (lf->lf_end == OFF_MAX)
1966 ldesc->fl.l_len = 0;
1967 else
1968 ldesc->fl.l_len =
1969 lf->lf_end - lf->lf_start + 1;
1970 ldesc->fl.l_whence = SEEK_SET;
1971 ldesc->fl.l_type = F_UNLCK;
1972 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1973 ldesc->fl.l_sysid = lf->lf_owner->lo_sysid;
1974 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1975 }
1976 sx_xunlock(&ls->ls_lock);
1977 VI_LOCK(vp);
1978 MPASS(ls->ls_threads > 0);
1979 ls->ls_threads--;
1980 wakeup(ls);
1981 VI_UNLOCK(vp);
1982
1983 /*
1984 * Call the iterator function for each lock in turn. If the
1985 * iterator returns an error code, just free the rest of the
1986 * lockdesc structures.
1987 */
1988 error = 0;
1989 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1990 STAILQ_REMOVE_HEAD(&locks, link);
1991 if (!error)
1992 error = fn(ldesc->vp, &ldesc->fl, arg);
1993 vrele(ldesc->vp);
1994 free(ldesc, M_LOCKF);
1995 }
1996
1997 return (error);
1998}
1999
2000static int
2001lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg)
2002{
2003
2004 VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE);
2005 return (0);
2006}
2007
2008void
2010{
2011
2012 KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS"));
2014}
2015
2016int
2018{
2019 int i;
2020 struct lock_owner *lo;
2021 int count;
2022
2023 count = 0;
2024 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) {
2025 sx_xlock(&lf_lock_owners[i].lock);
2026 LIST_FOREACH(lo, &lf_lock_owners[i].list, lo_link)
2027 if (lo->lo_sysid == sysid)
2028 count += lo->lo_refs;
2029 sx_xunlock(&lf_lock_owners[i].lock);
2030 }
2031
2032 return (count);
2033}
2034
2035#ifdef LOCKF_DEBUG
2036
2037/*
2038 * Return non-zero if y is reachable from x using a brute force
2039 * search. If reachable and path is non-null, return the route taken
2040 * in path.
2041 */
2042static int
2043graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
2044 struct owner_vertex_list *path)
2045{
2046 struct owner_edge *e;
2047
2048 if (x == y) {
2049 if (path)
2050 TAILQ_INSERT_HEAD(path, x, v_link);
2051 return 1;
2052 }
2053
2054 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2055 if (graph_reaches(e->e_to, y, path)) {
2056 if (path)
2057 TAILQ_INSERT_HEAD(path, x, v_link);
2058 return 1;
2059 }
2060 }
2061 return 0;
2062}
2063
2064/*
2065 * Perform consistency checks on the graph. Make sure the values of
2066 * v_order are correct. If checkorder is non-zero, check no vertex can
2067 * reach any other vertex with a smaller order.
2068 */
2069static void
2070graph_check(struct owner_graph *g, int checkorder)
2071{
2072 int i, j;
2073
2074 for (i = 0; i < g->g_size; i++) {
2075 if (!g->g_vertices[i]->v_owner)
2076 continue;
2077 KASSERT(g->g_vertices[i]->v_order == i,
2078 ("lock graph vertices disordered"));
2079 if (checkorder) {
2080 for (j = 0; j < i; j++) {
2081 if (!g->g_vertices[j]->v_owner)
2082 continue;
2083 KASSERT(!graph_reaches(g->g_vertices[i],
2084 g->g_vertices[j], NULL),
2085 ("lock graph vertices disordered"));
2086 }
2087 }
2088 }
2089}
2090
2091static void
2092graph_print_vertices(struct owner_vertex_list *set)
2093{
2094 struct owner_vertex *v;
2095
2096 printf("{ ");
2097 TAILQ_FOREACH(v, set, v_link) {
2098 printf("%d:", v->v_order);
2099 lf_print_owner(v->v_owner);
2100 if (TAILQ_NEXT(v, v_link))
2101 printf(", ");
2102 }
2103 printf(" }\n");
2104}
2105
2106#endif
2107
2108/*
2109 * Calculate the sub-set of vertices v from the affected region [y..x]
2110 * where v is reachable from y. Return -1 if a loop was detected
2111 * (i.e. x is reachable from y, otherwise the number of vertices in
2112 * this subset.
2113 */
2114static int
2116 struct owner_vertex *y, struct owner_vertex_list *delta)
2117{
2118 uint32_t gen;
2119 struct owner_vertex *v;
2120 struct owner_edge *e;
2121 int n;
2122
2123 /*
2124 * We start with a set containing just y. Then for each vertex
2125 * v in the set so far unprocessed, we add each vertex that v
2126 * has an out-edge to and that is within the affected region
2127 * [y..x]. If we see the vertex x on our travels, stop
2128 * immediately.
2129 */
2130 TAILQ_INIT(delta);
2131 TAILQ_INSERT_TAIL(delta, y, v_link);
2132 v = y;
2133 n = 1;
2134 gen = g->g_gen;
2135 while (v) {
2136 LIST_FOREACH(e, &v->v_outedges, e_outlink) {
2137 if (e->e_to == x)
2138 return -1;
2139 if (e->e_to->v_order < x->v_order
2140 && e->e_to->v_gen != gen) {
2141 e->e_to->v_gen = gen;
2142 TAILQ_INSERT_TAIL(delta, e->e_to, v_link);
2143 n++;
2144 }
2145 }
2146 v = TAILQ_NEXT(v, v_link);
2147 }
2148
2149 return (n);
2150}
2151
2152/*
2153 * Calculate the sub-set of vertices v from the affected region [y..x]
2154 * where v reaches x. Return the number of vertices in this subset.
2155 */
2156static int
2158 struct owner_vertex *y, struct owner_vertex_list *delta)
2159{
2160 uint32_t gen;
2161 struct owner_vertex *v;
2162 struct owner_edge *e;
2163 int n;
2164
2165 /*
2166 * We start with a set containing just x. Then for each vertex
2167 * v in the set so far unprocessed, we add each vertex that v
2168 * has an in-edge from and that is within the affected region
2169 * [y..x].
2170 */
2171 TAILQ_INIT(delta);
2172 TAILQ_INSERT_TAIL(delta, x, v_link);
2173 v = x;
2174 n = 1;
2175 gen = g->g_gen;
2176 while (v) {
2177 LIST_FOREACH(e, &v->v_inedges, e_inlink) {
2178 if (e->e_from->v_order > y->v_order
2179 && e->e_from->v_gen != gen) {
2180 e->e_from->v_gen = gen;
2181 TAILQ_INSERT_HEAD(delta, e->e_from, v_link);
2182 n++;
2183 }
2184 }
2185 v = TAILQ_PREV(v, owner_vertex_list, v_link);
2186 }
2187
2188 return (n);
2189}
2190
2191static int
2192graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
2193{
2194 struct owner_vertex *v;
2195 int i, j;
2196
2197 TAILQ_FOREACH(v, set, v_link) {
2198 for (i = n;
2199 i > 0 && indices[i - 1] > v->v_order; i--)
2200 ;
2201 for (j = n - 1; j >= i; j--)
2202 indices[j + 1] = indices[j];
2203 indices[i] = v->v_order;
2204 n++;
2205 }
2206
2207 return (n);
2208}
2209
2210static int
2211graph_assign_indices(struct owner_graph *g, int *indices, int nextunused,
2212 struct owner_vertex_list *set)
2213{
2214 struct owner_vertex *v, *vlowest;
2215
2216 while (!TAILQ_EMPTY(set)) {
2217 vlowest = NULL;
2218 TAILQ_FOREACH(v, set, v_link) {
2219 if (!vlowest || v->v_order < vlowest->v_order)
2220 vlowest = v;
2221 }
2222 TAILQ_REMOVE(set, vlowest, v_link);
2223 vlowest->v_order = indices[nextunused];
2224 g->g_vertices[vlowest->v_order] = vlowest;
2225 nextunused++;
2226 }
2227
2228 return (nextunused);
2229}
2230
2231static int
2233 struct owner_vertex *y)
2234{
2235 struct owner_edge *e;
2236 struct owner_vertex_list deltaF, deltaB;
2237 int nF, n, vi, i;
2238 int *indices;
2239 int nB __unused;
2240
2241 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2242
2243 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2244 if (e->e_to == y) {
2245 e->e_refs++;
2246 return (0);
2247 }
2248 }
2249
2250#ifdef LOCKF_DEBUG
2251 if (lockf_debug & 8) {
2252 printf("adding edge %d:", x->v_order);
2253 lf_print_owner(x->v_owner);
2254 printf(" -> %d:", y->v_order);
2255 lf_print_owner(y->v_owner);
2256 printf("\n");
2257 }
2258#endif
2259 if (y->v_order < x->v_order) {
2260 /*
2261 * The new edge violates the order. First find the set
2262 * of affected vertices reachable from y (deltaF) and
2263 * the set of affect vertices affected that reach x
2264 * (deltaB), using the graph generation number to
2265 * detect whether we have visited a given vertex
2266 * already. We re-order the graph so that each vertex
2267 * in deltaB appears before each vertex in deltaF.
2268 *
2269 * If x is a member of deltaF, then the new edge would
2270 * create a cycle. Otherwise, we may assume that
2271 * deltaF and deltaB are disjoint.
2272 */
2273 g->g_gen++;
2274 if (g->g_gen == 0) {
2275 /*
2276 * Generation wrap.
2277 */
2278 for (vi = 0; vi < g->g_size; vi++) {
2279 g->g_vertices[vi]->v_gen = 0;
2280 }
2281 g->g_gen++;
2282 }
2283 nF = graph_delta_forward(g, x, y, &deltaF);
2284 if (nF < 0) {
2285#ifdef LOCKF_DEBUG
2286 if (lockf_debug & 8) {
2287 struct owner_vertex_list path;
2288 printf("deadlock: ");
2289 TAILQ_INIT(&path);
2290 graph_reaches(y, x, &path);
2291 graph_print_vertices(&path);
2292 }
2293#endif
2294 return (EDEADLK);
2295 }
2296
2297#ifdef LOCKF_DEBUG
2298 if (lockf_debug & 8) {
2299 printf("re-ordering graph vertices\n");
2300 printf("deltaF = ");
2301 graph_print_vertices(&deltaF);
2302 }
2303#endif
2304
2305 nB = graph_delta_backward(g, x, y, &deltaB);
2306
2307#ifdef LOCKF_DEBUG
2308 if (lockf_debug & 8) {
2309 printf("deltaB = ");
2310 graph_print_vertices(&deltaB);
2311 }
2312#endif
2313
2314 /*
2315 * We first build a set of vertex indices (vertex
2316 * order values) that we may use, then we re-assign
2317 * orders first to those vertices in deltaB, then to
2318 * deltaF. Note that the contents of deltaF and deltaB
2319 * may be partially disordered - we perform an
2320 * insertion sort while building our index set.
2321 */
2322 indices = g->g_indexbuf;
2323 n = graph_add_indices(indices, 0, &deltaF);
2324 graph_add_indices(indices, n, &deltaB);
2325
2326 /*
2327 * We must also be sure to maintain the relative
2328 * ordering of deltaF and deltaB when re-assigning
2329 * vertices. We do this by iteratively removing the
2330 * lowest ordered element from the set and assigning
2331 * it the next value from our new ordering.
2332 */
2333 i = graph_assign_indices(g, indices, 0, &deltaB);
2334 graph_assign_indices(g, indices, i, &deltaF);
2335
2336#ifdef LOCKF_DEBUG
2337 if (lockf_debug & 8) {
2338 struct owner_vertex_list set;
2339 TAILQ_INIT(&set);
2340 for (i = 0; i < nB + nF; i++)
2341 TAILQ_INSERT_TAIL(&set,
2342 g->g_vertices[indices[i]], v_link);
2343 printf("new ordering = ");
2344 graph_print_vertices(&set);
2345 }
2346#endif
2347 }
2348
2349 KASSERT(x->v_order < y->v_order, ("Failed to re-order graph"));
2350
2351#ifdef LOCKF_DEBUG
2352 if (lockf_debug & 8) {
2353 graph_check(g, TRUE);
2354 }
2355#endif
2356
2357 e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK);
2358
2359 LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink);
2360 LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink);
2361 e->e_refs = 1;
2362 e->e_from = x;
2363 e->e_to = y;
2364
2365 return (0);
2366}
2367
2368/*
2369 * Remove an edge x->y from the graph.
2370 */
2371static void
2373 struct owner_vertex *y)
2374{
2375 struct owner_edge *e;
2376
2377 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2378
2379 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2380 if (e->e_to == y)
2381 break;
2382 }
2383 KASSERT(e, ("Removing non-existent edge from deadlock graph"));
2384
2385 e->e_refs--;
2386 if (e->e_refs == 0) {
2387#ifdef LOCKF_DEBUG
2388 if (lockf_debug & 8) {
2389 printf("removing edge %d:", x->v_order);
2390 lf_print_owner(x->v_owner);
2391 printf(" -> %d:", y->v_order);
2392 lf_print_owner(y->v_owner);
2393 printf("\n");
2394 }
2395#endif
2396 LIST_REMOVE(e, e_outlink);
2397 LIST_REMOVE(e, e_inlink);
2398 free(e, M_LOCKF);
2399 }
2400}
2401
2402/*
2403 * Allocate a vertex from the free list. Return ENOMEM if there are
2404 * none.
2405 */
2406static struct owner_vertex *
2408{
2409 struct owner_vertex *v;
2410
2411 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2412
2413 v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK);
2414 if (g->g_size == g->g_space) {
2416 2 * g->g_space * sizeof(struct owner_vertex *),
2417 M_LOCKF, M_WAITOK);
2418 free(g->g_indexbuf, M_LOCKF);
2419 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int),
2420 M_LOCKF, M_WAITOK);
2421 g->g_space = 2 * g->g_space;
2422 }
2423 v->v_order = g->g_size;
2424 v->v_gen = g->g_gen;
2425 g->g_vertices[g->g_size] = v;
2426 g->g_size++;
2427
2428 LIST_INIT(&v->v_outedges);
2429 LIST_INIT(&v->v_inedges);
2430 v->v_owner = lo;
2431
2432 return (v);
2433}
2434
2435static void
2437{
2438 struct owner_vertex *w;
2439 int i;
2440
2441 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2442
2443 KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges"));
2444 KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges"));
2445
2446 /*
2447 * Remove from the graph's array and close up the gap,
2448 * renumbering the other vertices.
2449 */
2450 for (i = v->v_order + 1; i < g->g_size; i++) {
2451 w = g->g_vertices[i];
2452 w->v_order--;
2453 g->g_vertices[i - 1] = w;
2454 }
2455 g->g_size--;
2456
2457 free(v, M_LOCKF);
2458}
2459
2460static struct owner_graph *
2462{
2463
2464 g->g_vertices = malloc(10 * sizeof(struct owner_vertex *),
2465 M_LOCKF, M_WAITOK);
2466 g->g_size = 0;
2467 g->g_space = 10;
2468 g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK);
2469 g->g_gen = 0;
2470
2471 return (g);
2472}
2473
2474#ifdef LOCKF_DEBUG
2475/*
2476 * Print description of a lock owner
2477 */
2478static void
2479lf_print_owner(struct lock_owner *lo)
2480{
2481
2482 if (lo->lo_flags & F_REMOTE) {
2483 printf("remote pid %d, system %d",
2484 lo->lo_pid, lo->lo_sysid);
2485 } else if (lo->lo_flags & F_FLOCK) {
2486 printf("file %p", lo->lo_id);
2487 } else {
2488 printf("local pid %d", lo->lo_pid);
2489 }
2490}
2491
2492/*
2493 * Print out a lock.
2494 */
2495static void
2496lf_print(char *tag, struct lockf_entry *lock)
2497{
2498
2499 printf("%s: lock %p for ", tag, (void *)lock);
2500 lf_print_owner(lock->lf_owner);
2501 if (lock->lf_inode != (struct inode *)0)
2502 printf(" in ino %ju on dev <%s>,",
2503 (uintmax_t)lock->lf_inode->i_number,
2504 devtoname(ITODEV(lock->lf_inode)));
2505 printf(" %s, start %jd, end ",
2506 lock->lf_type == F_RDLCK ? "shared" :
2507 lock->lf_type == F_WRLCK ? "exclusive" :
2508 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
2509 (intmax_t)lock->lf_start);
2510 if (lock->lf_end == OFF_MAX)
2511 printf("EOF");
2512 else
2513 printf("%jd", (intmax_t)lock->lf_end);
2514 if (!LIST_EMPTY(&lock->lf_outedges))
2515 printf(" block %p\n",
2516 (void *)LIST_FIRST(&lock->lf_outedges)->le_to);
2517 else
2518 printf("\n");
2519}
2520
2521static void
2522lf_printlist(char *tag, struct lockf_entry *lock)
2523{
2524 struct lockf_entry *lf, *blk;
2525 struct lockf_edge *e;
2526
2527 if (lock->lf_inode == (struct inode *)0)
2528 return;
2529
2530 printf("%s: Lock list for ino %ju on dev <%s>:\n",
2531 tag, (uintmax_t)lock->lf_inode->i_number,
2532 devtoname(ITODEV(lock->lf_inode)));
2533 LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) {
2534 printf("\tlock %p for ",(void *)lf);
2535 lf_print_owner(lock->lf_owner);
2536 printf(", %s, start %jd, end %jd",
2537 lf->lf_type == F_RDLCK ? "shared" :
2538 lf->lf_type == F_WRLCK ? "exclusive" :
2539 lf->lf_type == F_UNLCK ? "unlock" :
2540 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
2541 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) {
2542 blk = e->le_to;
2543 printf("\n\t\tlock request %p for ", (void *)blk);
2544 lf_print_owner(blk->lf_owner);
2545 printf(", %s, start %jd, end %jd",
2546 blk->lf_type == F_RDLCK ? "shared" :
2547 blk->lf_type == F_WRLCK ? "exclusive" :
2548 blk->lf_type == F_UNLCK ? "unlock" :
2549 "unknown", (intmax_t)blk->lf_start,
2550 (intmax_t)blk->lf_end);
2551 if (!LIST_EMPTY(&blk->lf_inedges))
2552 panic("lf_printlist: bad list");
2553 }
2554 printf("\n");
2555 }
2556}
2557#endif /* LOCKF_DEBUG */
METHOD int set
Definition: cpufreq_if.m:43
int priority
Definition: cpufreq_if.m:46
int * count
Definition: cpufreq_if.m:63
device_property_type_t type
Definition: bus_if.m:941
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN, &__elfN(aslr_enabled), 0, ": enable address map randomization")
const char * devtoname(struct cdev *dev)
Definition: kern_conf.c:1249
static void lf_init(void *)
Definition: kern_lockf.c:283
TAILQ_HEAD(owner_vertex_list, owner_vertex)
static int lf_overlaps(struct lockf_entry *, struct lockf_entry *)
Definition: kern_lockf.c:849
static void lf_alloc_vertex(struct lockf_entry *)
Definition: kern_lockf.c:892
LIST_HEAD(lock_owner_list, lock_owner)
static void lf_update_dependancies(struct lockf *, struct lockf_entry *, int all, struct lockf_entry_list *)
Definition: kern_lockf.c:1148
static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *)
Definition: kern_lockf.c:1575
static int lf_setlock(struct lockf *, struct lockf_entry *, struct vnode *, void **cookiep)
Definition: kern_lockf.c:1378
static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures")
static void lf_insert_lock(struct lockf *, struct lockf_entry *)
Definition: kern_lockf.c:1094
int lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg)
Definition: kern_lockf.c:1869
static void lf_remove_incoming(struct lockf_entry *)
Definition: kern_lockf.c:972
int lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
Definition: kern_lockf.c:747
void lf_clearremotesys(int sysid)
Definition: kern_lockf.c:2009
int lf_countlocks(int sysid)
Definition: kern_lockf.c:2017
static int lf_cancel(struct lockf *, struct lockf_entry *, void *)
Definition: kern_lockf.c:1604
static int lf_free_lock(struct lockf_entry *)
Definition: kern_lockf.c:358
int lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, u_quad_t size)
Definition: kern_lockf.c:417
static struct owner_graph lf_owner_graph
Definition: kern_lockf.c:277
int lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
Definition: kern_lockf.c:1931
static void lf_free_edge(struct lockf_edge *)
Definition: kern_lockf.c:881
static struct lockf_entry * lf_alloc_lock(struct lock_owner *)
Definition: kern_lockf.c:337
#define SELF
Definition: kern_lockf.c:105
static void graph_remove_edge(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y)
Definition: kern_lockf.c:2372
#define OTHERS
Definition: kern_lockf.c:106
STAILQ_HEAD(lockdesclist, lockdesc)
static struct lock_owner_chain lf_lock_owners[LOCK_OWNER_HASH_SIZE]
Definition: kern_lockf.c:216
static int lf_add_edge(struct lockf_entry *, struct lockf_entry *)
Definition: kern_lockf.c:906
static struct lockf_edge * lf_alloc_edge(void)
Definition: kern_lockf.c:871
static void lf_split(struct lockf *, struct lockf_entry *, struct lockf_entry *, struct lockf_entry_list *)
Definition: kern_lockf.c:1809
static void lf_wakeup_lock(struct lockf *, struct lockf_entry *)
Definition: kern_lockf.c:1121
static void graph_free_vertex(struct owner_graph *g, struct owner_vertex *v)
Definition: kern_lockf.c:2436
static int graph_assign_indices(struct owner_graph *g, int *indices, int nextunused, struct owner_vertex_list *set)
Definition: kern_lockf.c:2211
#define LOCK_OWNER_HASH_SIZE
Definition: kern_lockf.c:194
static int lf_hash_owner(caddr_t, struct vnode *, struct flock *, int)
Definition: kern_lockf.c:304
static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *, int)
Definition: kern_lockf.c:1701
static int graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
Definition: kern_lockf.c:2192
__FBSDID("$FreeBSD$")
static struct owner_vertex * graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo)
Definition: kern_lockf.c:2407
static int lf_add_outgoing(struct lockf *, struct lockf_entry *)
Definition: kern_lockf.c:986
static int graph_add_edge(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y)
Definition: kern_lockf.c:2232
static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *, int)
Definition: kern_lockf.c:325
static struct lockf_entry * lf_getblock(struct lockf *, struct lockf_entry *)
Definition: kern_lockf.c:1657
void lf_purgelocks(struct vnode *vp, struct lockf **statep)
Definition: kern_lockf.c:763
static void lf_set_end(struct lockf *, struct lockf_entry *, off_t, struct lockf_entry_list *)
Definition: kern_lockf.c:1189
static int lf_blocks(struct lockf_entry *, struct lockf_entry *)
Definition: kern_lockf.c:859
static void lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
Definition: kern_lockf.c:1214
static int lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg)
Definition: kern_lockf.c:2001
#define NOLOCKF
Definition: kern_lockf.c:104
static void lf_remove_edge(struct lockf_edge *)
Definition: kern_lockf.c:941
static int lf_add_incoming(struct lockf *, struct lockf_entry *)
Definition: kern_lockf.c:1054
static int graph_delta_backward(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y, struct owner_vertex_list *delta)
Definition: kern_lockf.c:2157
static struct sx lf_owner_graph_lock
Definition: kern_lockf.c:276
static void lf_set_start(struct lockf *, struct lockf_entry *, off_t, struct lockf_entry_list *)
Definition: kern_lockf.c:1173
static void lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
Definition: kern_lockf.c:1327
static struct owner_graph * graph_init(struct owner_graph *g)
Definition: kern_lockf.c:2461
static struct lockf_list lf_lock_states
Definition: kern_lockf.c:215
static struct sx lf_lock_states_lock
Definition: kern_lockf.c:214
static int lf_clearlock(struct lockf *, struct lockf_entry *)
Definition: kern_lockf.c:1550
static void lf_remove_outgoing(struct lockf_entry *)
Definition: kern_lockf.c:959
static int graph_delta_forward(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y, struct owner_vertex_list *delta)
Definition: kern_lockf.c:2115
SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL)
void *() malloc(size_t size, struct malloc_type *mtp, int flags)
Definition: kern_malloc.c:632
void * realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
Definition: kern_malloc.c:987
void free(void *addr, struct malloc_type *mtp)
Definition: kern_malloc.c:907
void panic(const char *fmt,...)
void sx_destroy(struct sx *sx)
Definition: kern_sx.c:266
void wakeup(const void *ident)
Definition: kern_synch.c:349
void *** start
Definition: linker_if.m:98
struct resource * res
Definition: pic_if.m:98
struct sx lock
Definition: kern_lockf.c:210
struct lock_owner_list list
Definition: kern_lockf.c:211
uint32_t g_gen
Definition: kern_lockf.c:273
int * g_indexbuf
Definition: kern_lockf.c:272
struct owner_vertex ** g_vertices
Definition: kern_lockf.c:269
int printf(const char *fmt,...)
Definition: subr_prf.c:397
uint16_t flags
Definition: subr_stats.c:2
int taskqueue_enqueue(struct taskqueue *queue, struct task *task)
static int dummy
const char * path
Definition: vfs_extattr.c:715
void vref(struct vnode *vp)
Definition: vfs_subr.c:3065
void vrele(struct vnode *vp)
Definition: vfs_subr.c:3334