Deleted Added
full compact
kern_lockf.c (177371) kern_lockf.c (177633)
1/*-
1/*-
2 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
3 * Authors: Doug Rabson <dfr@rabson.org>
4 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27/*-
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Scooter Morris at Genentech Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions

--- 18 unchanged lines hidden (view full) ---

28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
33 */
34
35#include <sys/cdefs.h>
28 * Copyright (c) 1982, 1986, 1989, 1993
29 * The Regents of the University of California. All rights reserved.
30 *
31 * This code is derived from software contributed to Berkeley by
32 * Scooter Morris at Genentech Inc.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions

--- 18 unchanged lines hidden (view full) ---

54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
59 */
60
61#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/kern/kern_lockf.c 177371 2008-03-19 07:13:24Z jeff $");
62__FBSDID("$FreeBSD: head/sys/kern/kern_lockf.c 177633 2008-03-26 15:23:12Z dfr $");
37
38#include "opt_debug_lockf.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
63
64#include "opt_debug_lockf.h"
65
66#include <sys/param.h>
67#include <sys/systm.h>
68#include <sys/hash.h>
42#include <sys/kernel.h>
43#include <sys/limits.h>
44#include <sys/lock.h>
45#include <sys/mount.h>
46#include <sys/mutex.h>
47#include <sys/proc.h>
69#include <sys/kernel.h>
70#include <sys/limits.h>
71#include <sys/lock.h>
72#include <sys/mount.h>
73#include <sys/mutex.h>
74#include <sys/proc.h>
75#include <sys/sx.h>
48#include <sys/unistd.h>
49#include <sys/vnode.h>
50#include <sys/malloc.h>
51#include <sys/fcntl.h>
52#include <sys/lockf.h>
76#include <sys/unistd.h>
77#include <sys/vnode.h>
78#include <sys/malloc.h>
79#include <sys/fcntl.h>
80#include <sys/lockf.h>
81#include <sys/taskqueue.h>
53
82
54/*
55 * This variable controls the maximum number of processes that will
56 * be checked in doing deadlock detection.
57 */
58static int maxlockdepth = MAXDEPTH;
59
60#ifdef LOCKF_DEBUG
61#include <sys/sysctl.h>
62
63#include <ufs/ufs/quota.h>
64#include <ufs/ufs/inode.h>
65
83#ifdef LOCKF_DEBUG
84#include <sys/sysctl.h>
85
86#include <ufs/ufs/quota.h>
87#include <ufs/ufs/inode.h>
88
66
67static int lockf_debug = 0;
89static int lockf_debug = 0; /* control debug output */
68SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
69#endif
70
71MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
72
90SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
91#endif
92
93MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
94
73#define NOLOCKF (struct lockf *)0
95struct owner_edge;
96struct owner_vertex;
97struct owner_vertex_list;
98struct owner_graph;
99
100#define NOLOCKF (struct lockf_entry *)0
74#define SELF 0x1
75#define OTHERS 0x2
101#define SELF 0x1
102#define OTHERS 0x2
76static int lf_clearlock(struct lockf *, struct lockf **);
77static int lf_findoverlap(struct lockf *,
78 struct lockf *, int, struct lockf ***, struct lockf **);
79static struct lockf *
80 lf_getblock(struct lockf *);
81static int lf_getlock(struct lockf *, struct flock *);
82static int lf_setlock(struct lockf *, struct vnode *, struct lockf **);
83static void lf_split(struct lockf *, struct lockf *, struct lockf **);
84static void lf_wakelock(struct lockf *);
103static void lf_init(void *);
104static int lf_hash_owner(caddr_t, struct flock *, int);
105static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *,
106 int);
107static struct lockf_entry *
108 lf_alloc_lock(struct lock_owner *);
109static void lf_free_lock(struct lockf_entry *);
110static int lf_clearlock(struct lockf *, struct lockf_entry *);
111static int lf_overlaps(struct lockf_entry *, struct lockf_entry *);
112static int lf_blocks(struct lockf_entry *, struct lockf_entry *);
113static void lf_free_edge(struct lockf_edge *);
114static struct lockf_edge *
115 lf_alloc_edge(void);
116static void lf_alloc_vertex(struct lockf_entry *);
117static int lf_add_edge(struct lockf_entry *, struct lockf_entry *);
118static void lf_remove_edge(struct lockf_edge *);
119static void lf_remove_outgoing(struct lockf_entry *);
120static void lf_remove_incoming(struct lockf_entry *);
121static int lf_add_outgoing(struct lockf *, struct lockf_entry *);
122static int lf_add_incoming(struct lockf *, struct lockf_entry *);
123static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *,
124 int);
125static struct lockf_entry *
126 lf_getblock(struct lockf *, struct lockf_entry *);
127static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *);
128static void lf_insert_lock(struct lockf *, struct lockf_entry *);
129static void lf_wakeup_lock(struct lockf *, struct lockf_entry *);
130static void lf_update_dependancies(struct lockf *, struct lockf_entry *,
131 int all, struct lockf_entry_list *);
132static void lf_set_start(struct lockf *, struct lockf_entry *, off_t,
133 struct lockf_entry_list*);
134static void lf_set_end(struct lockf *, struct lockf_entry *, off_t,
135 struct lockf_entry_list*);
136static int lf_setlock(struct lockf *, struct lockf_entry *,
137 struct vnode *, void **cookiep);
138static int lf_cancel(struct lockf *, struct lockf_entry *, void *);
139static void lf_split(struct lockf *, struct lockf_entry *,
140 struct lockf_entry *, struct lockf_entry_list *);
85#ifdef LOCKF_DEBUG
141#ifdef LOCKF_DEBUG
86static void lf_print(char *, struct lockf *);
87static void lf_printlist(char *, struct lockf *);
142static int graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
143 struct owner_vertex_list *path);
144static void graph_check(struct owner_graph *g, int checkorder);
145static void graph_print_vertices(struct owner_vertex_list *set);
88#endif
146#endif
147static int graph_delta_forward(struct owner_graph *g,
148 struct owner_vertex *x, struct owner_vertex *y,
149 struct owner_vertex_list *delta);
150static int graph_delta_backward(struct owner_graph *g,
151 struct owner_vertex *x, struct owner_vertex *y,
152 struct owner_vertex_list *delta);
153static int graph_add_indices(int *indices, int n,
154 struct owner_vertex_list *set);
155static int graph_assign_indices(struct owner_graph *g, int *indices,
156 int nextunused, struct owner_vertex_list *set);
157static int graph_add_edge(struct owner_graph *g,
158 struct owner_vertex *x, struct owner_vertex *y);
159static void graph_remove_edge(struct owner_graph *g,
160 struct owner_vertex *x, struct owner_vertex *y);
161static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g,
162 struct lock_owner *lo);
163static void graph_free_vertex(struct owner_graph *g,
164 struct owner_vertex *v);
165static struct owner_graph * graph_init(struct owner_graph *g);
166#ifdef LOCKF_DEBUG
167static void lf_print(char *, struct lockf_entry *);
168static void lf_printlist(char *, struct lockf_entry *);
169static void lf_print_owner(struct lock_owner *);
170#endif
89
90/*
171
172/*
173 * This structure is used to keep track of both local and remote lock
174 * owners. The lf_owner field of the struct lockf_entry points back at
175 * the lock owner structure. Each possible lock owner (local proc for
176 * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid>
177 * pair for remote locks) is represented by a unique instance of
178 * struct lock_owner.
179 *
180 * If a lock owner has a lock that blocks some other lock or a lock
181 * that is waiting for some other lock, it also has a vertex in the
182 * owner_graph below.
183 *
184 * Locks:
185 * (s) locked by state->ls_lock
186 * (S) locked by lf_lock_states_lock
187 * (l) locked by lf_lock_owners_lock
188 * (g) locked by lf_owner_graph_lock
189 * (c) const until freeing
190 */
191#define LOCK_OWNER_HASH_SIZE 256
192
193struct lock_owner {
194 LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */
195 int lo_refs; /* (l) Number of locks referring to this */
196 int lo_flags; /* (c) Flags passwd to lf_advlock */
197 caddr_t lo_id; /* (c) Id value passed to lf_advlock */
198 pid_t lo_pid; /* (c) Process Id of the lock owner */
199 int lo_sysid; /* (c) System Id of the lock owner */
200 struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */
201};
202
203LIST_HEAD(lock_owner_list, lock_owner);
204
205static struct sx lf_lock_states_lock;
206static struct lockf_list lf_lock_states; /* (S) */
207static struct sx lf_lock_owners_lock;
208static struct lock_owner_list lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */
209
210/*
211 * Structures for deadlock detection.
212 *
213 * We have two types of directed graph, the first is the set of locks,
214 * both active and pending on a vnode. Within this graph, active locks
215 * are terminal nodes in the graph (i.e. have no out-going
216 * edges). Pending locks have out-going edges to each blocking active
217 * lock that prevents the lock from being granted and also to each
218 * older pending lock that would block them if it was active. The
219 * graph for each vnode is naturally acyclic; new edges are only ever
220 * added to or from new nodes (either new pending locks which only add
221 * out-going edges or new active locks which only add in-coming edges)
222 * therefore they cannot create loops in the lock graph.
223 *
224 * The second graph is a global graph of lock owners. Each lock owner
225 * is a vertex in that graph and an edge is added to the graph
226 * whenever an edge is added to a vnode graph, with end points
227 * corresponding to owner of the new pending lock and the owner of the
228 * lock upon which it waits. In order to prevent deadlock, we only add
229 * an edge to this graph if the new edge would not create a cycle.
230 *
231 * The lock owner graph is topologically sorted, i.e. if a node has
232 * any outgoing edges, then it has an order strictly less than any
233 * node to which it has an outgoing edge. We preserve this ordering
234 * (and detect cycles) on edge insertion using Algorithm PK from the
235 * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic
236 * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article
237 * No. 1.7)
238 */
239struct owner_vertex;
240
241struct owner_edge {
242 LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */
243 LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */
244 int e_refs; /* (g) number of times added */
245 struct owner_vertex *e_from; /* (c) out-going from here */
246 struct owner_vertex *e_to; /* (c) in-coming to here */
247};
248LIST_HEAD(owner_edge_list, owner_edge);
249
250struct owner_vertex {
251 TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */
252 uint32_t v_gen; /* (g) workspace for edge insertion */
253 int v_order; /* (g) order of vertex in graph */
254 struct owner_edge_list v_outedges;/* (g) list of out-edges */
255 struct owner_edge_list v_inedges; /* (g) list of in-edges */
256 struct lock_owner *v_owner; /* (c) corresponding lock owner */
257};
258TAILQ_HEAD(owner_vertex_list, owner_vertex);
259
260struct owner_graph {
261 struct owner_vertex** g_vertices; /* (g) pointers to vertices */
262 int g_size; /* (g) number of vertices */
263 int g_space; /* (g) space allocated for vertices */
264 int *g_indexbuf; /* (g) workspace for loop detection */
265 uint32_t g_gen; /* (g) increment when re-ordering */
266};
267
268static struct sx lf_owner_graph_lock;
269static struct owner_graph lf_owner_graph;
270
271/*
272 * Initialise various structures and locks.
273 */
274static void
275lf_init(void *dummy)
276{
277 int i;
278
279 sx_init(&lf_lock_states_lock, "lock states lock");
280 LIST_INIT(&lf_lock_states);
281
282 sx_init(&lf_lock_owners_lock, "lock owners lock");
283 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
284 LIST_INIT(&lf_lock_owners[i]);
285
286 sx_init(&lf_owner_graph_lock, "owner graph lock");
287 graph_init(&lf_owner_graph);
288}
289SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL);
290
291/*
292 * Generate a hash value for a lock owner.
293 */
294static int
295lf_hash_owner(caddr_t id, struct flock *fl, int flags)
296{
297 uint32_t h;
298
299 if (flags & F_REMOTE) {
300 h = HASHSTEP(0, fl->l_pid);
301 h = HASHSTEP(h, fl->l_sysid);
302 } else if (flags & F_FLOCK) {
303 h = ((uintptr_t) id) >> 7;
304 } else {
305 struct proc *p = (struct proc *) id;
306 h = HASHSTEP(0, p->p_pid);
307 h = HASHSTEP(h, 0);
308 }
309
310 return (h % LOCK_OWNER_HASH_SIZE);
311}
312
313/*
314 * Return true if a lock owner matches the details passed to
315 * lf_advlock.
316 */
317static int
318lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl,
319 int flags)
320{
321 if (flags & F_REMOTE) {
322 return lo->lo_pid == fl->l_pid
323 && lo->lo_sysid == fl->l_sysid;
324 } else {
325 return lo->lo_id == id;
326 }
327}
328
329static struct lockf_entry *
330lf_alloc_lock(struct lock_owner *lo)
331{
332 struct lockf_entry *lf;
333
334 lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO);
335
336#ifdef LOCKF_DEBUG
337 if (lockf_debug & 4)
338 printf("Allocated lock %p\n", lf);
339#endif
340 if (lo) {
341 sx_xlock(&lf_lock_owners_lock);
342 lo->lo_refs++;
343 sx_xunlock(&lf_lock_owners_lock);
344 lf->lf_owner = lo;
345 }
346
347 return (lf);
348}
349
350static void
351lf_free_lock(struct lockf_entry *lock)
352{
353 /*
354 * Adjust the lock_owner reference count and
355 * reclaim the entry if this is the last lock
356 * for that owner.
357 */
358 struct lock_owner *lo = lock->lf_owner;
359 if (lo) {
360 KASSERT(LIST_EMPTY(&lock->lf_outedges),
361 ("freeing lock with dependancies"));
362 KASSERT(LIST_EMPTY(&lock->lf_inedges),
363 ("freeing lock with dependants"));
364 sx_xlock(&lf_lock_owners_lock);
365 KASSERT(lo->lo_refs > 0, ("lock owner refcount"));
366 lo->lo_refs--;
367 if (lo->lo_refs == 0) {
368#ifdef LOCKF_DEBUG
369 if (lockf_debug & 1)
370 printf("lf_free_lock: freeing lock owner %p\n",
371 lo);
372#endif
373 if (lo->lo_vertex) {
374 sx_xlock(&lf_owner_graph_lock);
375 graph_free_vertex(&lf_owner_graph,
376 lo->lo_vertex);
377 sx_xunlock(&lf_owner_graph_lock);
378 }
379 LIST_REMOVE(lo, lo_link);
380 free(lo, M_LOCKF);
381#ifdef LOCKF_DEBUG
382 if (lockf_debug & 4)
383 printf("Freed lock owner %p\n", lo);
384#endif
385 }
386 sx_unlock(&lf_lock_owners_lock);
387 }
388 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) {
389 vrele(lock->lf_vnode);
390 lock->lf_vnode = NULL;
391 }
392#ifdef LOCKF_DEBUG
393 if (lockf_debug & 4)
394 printf("Freed lock %p\n", lock);
395#endif
396 free(lock, M_LOCKF);
397}
398
399/*
91 * Advisory record locking support
92 */
93int
400 * Advisory record locking support
401 */
402int
94lf_advlock(ap, head, size)
95 struct vop_advlock_args /* {
96 struct vnode *a_vp;
97 caddr_t a_id;
98 int a_op;
99 struct flock *a_fl;
100 int a_flags;
101 } */ *ap;
102 struct lockf **head;
103 u_quad_t size;
403lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
404 u_quad_t size)
104{
405{
406 struct lockf *state, *freestate = NULL;
105 struct flock *fl = ap->a_fl;
407 struct flock *fl = ap->a_fl;
106 struct lockf *lock;
408 struct lockf_entry *lock;
107 struct vnode *vp = ap->a_vp;
409 struct vnode *vp = ap->a_vp;
410 caddr_t id = ap->a_id;
411 int flags = ap->a_flags;
412 int hash;
413 struct lock_owner *lo;
108 off_t start, end, oadd;
414 off_t start, end, oadd;
109 struct lockf *clean, *n;
110 int error;
111
112 /*
415 int error;
416
417 /*
418 * Handle the F_UNLKSYS case first - no need to mess about
419 * creating a lock owner for this one.
420 */
421 if (ap->a_op == F_UNLCKSYS) {
422 lf_clearremotesys(fl->l_sysid);
423 return (0);
424 }
425
426 /*
113 * Convert the flock structure into a start and end.
114 */
115 switch (fl->l_whence) {
116
117 case SEEK_SET:
118 case SEEK_CUR:
119 /*
120 * Caller is responsible for adding any necessary offset

--- 16 unchanged lines hidden (view full) ---

137 return (EINVAL);
138 if (fl->l_len < 0) {
139 if (start == 0)
140 return (EINVAL);
141 end = start - 1;
142 start += fl->l_len;
143 if (start < 0)
144 return (EINVAL);
427 * Convert the flock structure into a start and end.
428 */
429 switch (fl->l_whence) {
430
431 case SEEK_SET:
432 case SEEK_CUR:
433 /*
434 * Caller is responsible for adding any necessary offset

--- 16 unchanged lines hidden (view full) ---

451 return (EINVAL);
452 if (fl->l_len < 0) {
453 if (start == 0)
454 return (EINVAL);
455 end = start - 1;
456 start += fl->l_len;
457 if (start < 0)
458 return (EINVAL);
145 } else if (fl->l_len == 0)
146 end = -1;
147 else {
459 } else if (fl->l_len == 0) {
460 end = OFF_MAX;
461 } else {
148 oadd = fl->l_len - 1;
149 if (oadd > OFF_MAX - start)
150 return (EOVERFLOW);
151 end = start + oadd;
152 }
153 /*
154 * Avoid the common case of unlocking when inode has no locks.
155 */
462 oadd = fl->l_len - 1;
463 if (oadd > OFF_MAX - start)
464 return (EOVERFLOW);
465 end = start + oadd;
466 }
467 /*
468 * Avoid the common case of unlocking when inode has no locks.
469 */
156 if (*head == (struct lockf *)0) {
470 if ((*statep) == NULL || LIST_EMPTY(&(*statep)->ls_active)) {
157 if (ap->a_op != F_SETLK) {
158 fl->l_type = F_UNLCK;
159 return (0);
160 }
161 }
471 if (ap->a_op != F_SETLK) {
472 fl->l_type = F_UNLCK;
473 return (0);
474 }
475 }
476
162 /*
477 /*
163 * Allocate a spare structure in case we have to split.
478 * Map our arguments to an existing lock owner or create one
479 * if this is the first time we have seen this owner.
164 */
480 */
165 clean = NULL;
166 if (ap->a_op == F_SETLK || ap->a_op == F_UNLCK) {
167 MALLOC(clean, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
168 clean->lf_next = NULL;
481 hash = lf_hash_owner(id, fl, flags);
482 sx_xlock(&lf_lock_owners_lock);
483 LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link)
484 if (lf_owner_matches(lo, id, fl, flags))
485 break;
486 if (!lo) {
487 /*
488 * We initialise the lock with a reference
489 * count which matches the new lockf_entry
490 * structure created below.
491 */
492 lo = malloc(sizeof(struct lock_owner), M_LOCKF,
493 M_WAITOK|M_ZERO);
494#ifdef LOCKF_DEBUG
495 if (lockf_debug & 4)
496 printf("Allocated lock owner %p\n", lo);
497#endif
498
499 lo->lo_refs = 1;
500 lo->lo_flags = flags;
501 lo->lo_id = id;
502 if (flags & F_REMOTE) {
503 lo->lo_pid = fl->l_pid;
504 lo->lo_sysid = fl->l_sysid;
505 } else if (flags & F_FLOCK) {
506 lo->lo_pid = -1;
507 lo->lo_sysid = 0;
508 } else {
509 struct proc *p = (struct proc *) id;
510 lo->lo_pid = p->p_pid;
511 lo->lo_sysid = 0;
512 }
513 lo->lo_vertex = NULL;
514
515#ifdef LOCKF_DEBUG
516 if (lockf_debug & 1) {
517 printf("lf_advlockasync: new lock owner %p ", lo);
518 lf_print_owner(lo);
519 printf("\n");
520 }
521#endif
522
523 LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link);
524 } else {
525 /*
526 * We have seen this lock owner before, increase its
527 * reference count to account for the new lockf_entry
528 * structure we create below.
529 */
530 lo->lo_refs++;
169 }
531 }
532 sx_xunlock(&lf_lock_owners_lock);
533
170 /*
534 /*
171 * Create the lockf structure
535 * Create the lockf structure. We initialise the lf_owner
536 * field here instead of in lf_alloc_lock() to avoid paying
537 * the lf_lock_owners_lock tax twice.
172 */
538 */
173 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
539 lock = lf_alloc_lock(NULL);
174 lock->lf_start = start;
175 lock->lf_end = end;
540 lock->lf_start = start;
541 lock->lf_end = end;
176 lock->lf_id = ap->a_id;
542 lock->lf_owner = lo;
543 lock->lf_vnode = vp;
544 if (flags & F_REMOTE) {
545 /*
546 * For remote locks, the caller may release its ref to
547 * the vnode at any time - we have to ref it here to
548 * prevent it from being recycled unexpectedly.
549 */
550 vref(vp);
551 }
552
177 /*
178 * XXX The problem is that VTOI is ufs specific, so it will
179 * break LOCKF_DEBUG for all other FS's other than UFS because
180 * it casts the vnode->data ptr to struct inode *.
181 */
182/* lock->lf_inode = VTOI(ap->a_vp); */
183 lock->lf_inode = (struct inode *)0;
184 lock->lf_type = fl->l_type;
553 /*
554 * XXX The problem is that VTOI is ufs specific, so it will
555 * break LOCKF_DEBUG for all other FS's other than UFS because
556 * it casts the vnode->data ptr to struct inode *.
557 */
558/* lock->lf_inode = VTOI(ap->a_vp); */
559 lock->lf_inode = (struct inode *)0;
560 lock->lf_type = fl->l_type;
185 lock->lf_head = head;
186 lock->lf_next = (struct lockf *)0;
187 TAILQ_INIT(&lock->lf_blkhd);
561 LIST_INIT(&lock->lf_outedges);
562 LIST_INIT(&lock->lf_inedges);
563 lock->lf_async_task = ap->a_task;
188 lock->lf_flags = ap->a_flags;
564 lock->lf_flags = ap->a_flags;
565
189 /*
566 /*
190 * Do the requested operation.
567 * Do the requested operation. First find our state structure
568 * and create a new one if necessary - the caller's *statep
569 * variable and the state's ls_threads count is protected by
570 * the vnode interlock.
191 */
192 VI_LOCK(vp);
571 */
572 VI_LOCK(vp);
573
574 /*
575 * Allocate a state structure if necessary.
576 */
577 state = *statep;
578 if (state == NULL) {
579 struct lockf *ls;
580
581 VI_UNLOCK(vp);
582
583 ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO);
584 sx_init(&ls->ls_lock, "ls_lock");
585 LIST_INIT(&ls->ls_active);
586 LIST_INIT(&ls->ls_pending);
587
588 sx_xlock(&lf_lock_states_lock);
589 LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link);
590 sx_xunlock(&lf_lock_states_lock);
591
592 /*
593 * Cope if we lost a race with some other thread while
594 * trying to allocate memory.
595 */
596 VI_LOCK(vp);
597 if ((*statep) == NULL) {
598 (*statep) = ls;
599 } else {
600 sx_xlock(&lf_lock_states_lock);
601 LIST_REMOVE(ls, ls_link);
602 sx_xunlock(&lf_lock_states_lock);
603 sx_destroy(&ls->ls_lock);
604 free(ls, M_LOCKF);
605 }
606 }
607 state = *statep;
608 state->ls_threads++;
609
610 VI_UNLOCK(vp);
611
612 sx_xlock(&state->ls_lock);
193 switch(ap->a_op) {
194 case F_SETLK:
613 switch(ap->a_op) {
614 case F_SETLK:
195 error = lf_setlock(lock, vp, &clean);
615 error = lf_setlock(state, lock, vp, ap->a_cookiep);
196 break;
197
198 case F_UNLCK:
616 break;
617
618 case F_UNLCK:
199 error = lf_clearlock(lock, &clean);
200 lock->lf_next = clean;
201 clean = lock;
619 error = lf_clearlock(state, lock);
620 lf_free_lock(lock);
202 break;
203
204 case F_GETLK:
621 break;
622
623 case F_GETLK:
205 error = lf_getlock(lock, fl);
206 lock->lf_next = clean;
207 clean = lock;
624 error = lf_getlock(state, lock, fl);
625 lf_free_lock(lock);
208 break;
209
626 break;
627
628 case F_CANCEL:
629 if (ap->a_cookiep)
630 error = lf_cancel(state, lock, *ap->a_cookiep);
631 else
632 error = EINVAL;
633 lf_free_lock(lock);
634 break;
635
210 default:
636 default:
211 lock->lf_next = clean;
212 clean = lock;
637 lf_free_lock(lock);
213 error = EINVAL;
214 break;
215 }
638 error = EINVAL;
639 break;
640 }
641
642#ifdef INVARIANTS
643 /*
644 * Check for some can't happen stuff. In this case, the active
645 * lock list becoming disordered or containing mutually
646 * blocking locks. We also check the pending list for locks
647 * which should be active (i.e. have no out-going edges).
648 */
649 LIST_FOREACH(lock, &state->ls_active, lf_link) {
650 struct lockf_entry *lf;
651 if (LIST_NEXT(lock, lf_link))
652 KASSERT((lock->lf_start
653 <= LIST_NEXT(lock, lf_link)->lf_start),
654 ("locks disordered"));
655 LIST_FOREACH(lf, &state->ls_active, lf_link) {
656 if (lock == lf)
657 break;
658 KASSERT(!lf_blocks(lock, lf),
659 ("two conflicting active locks"));
660 if (lock->lf_owner == lf->lf_owner)
661 KASSERT(!lf_overlaps(lock, lf),
662 ("two overlapping locks from same owner"));
663 }
664 }
665 LIST_FOREACH(lock, &state->ls_pending, lf_link) {
666 KASSERT(!LIST_EMPTY(&lock->lf_outedges),
667 ("pending lock which should be active"));
668 }
669#endif
670 sx_xunlock(&state->ls_lock);
671
672 /*
673 * If we have removed the last active lock on the vnode and
674 * this is the last thread that was in-progress, we can free
675 * the state structure. We update the caller's pointer inside
676 * the vnode interlock but call free outside.
677 *
678 * XXX alternatively, keep the state structure around until
679 * the filesystem recycles - requires a callback from the
680 * filesystem.
681 */
682 VI_LOCK(vp);
683
684 state->ls_threads--;
685 if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
686 KASSERT(LIST_EMPTY(&state->ls_pending),
687 ("freeing state with pending locks"));
688 freestate = state;
689 *statep = NULL;
690 }
691
216 VI_UNLOCK(vp);
692 VI_UNLOCK(vp);
217 for (lock = clean; lock != NULL; ) {
218 n = lock->lf_next;
219 free(lock, M_LOCKF);
220 lock = n;
693
694 if (freestate) {
695 sx_xlock(&lf_lock_states_lock);
696 LIST_REMOVE(freestate, ls_link);
697 sx_xunlock(&lf_lock_states_lock);
698 sx_destroy(&freestate->ls_lock);
699 free(freestate, M_LOCKF);
221 }
222 return (error);
223}
224
700 }
701 return (error);
702}
703
704int
705lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
706{
707 struct vop_advlockasync_args a;
708
709 a.a_vp = ap->a_vp;
710 a.a_id = ap->a_id;
711 a.a_op = ap->a_op;
712 a.a_fl = ap->a_fl;
713 a.a_flags = ap->a_flags;
714 a.a_task = NULL;
715 a.a_cookiep = NULL;
716
717 return (lf_advlockasync(&a, statep, size));
718}
719
225/*
720/*
721 * Return non-zero if locks 'x' and 'y' overlap.
722 */
723static int
724lf_overlaps(struct lockf_entry *x, struct lockf_entry *y)
725{
726
727 return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start);
728}
729
730/*
731 * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
732 */
733static int
734lf_blocks(struct lockf_entry *x, struct lockf_entry *y)
735{
736
737 return x->lf_owner != y->lf_owner
738 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK)
739 && lf_overlaps(x, y);
740}
741
742/*
743 * Allocate a lock edge from the free list
744 */
745static struct lockf_edge *
746lf_alloc_edge(void)
747{
748
749 return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO));
750}
751
752/*
753 * Free a lock edge.
754 */
755static void
756lf_free_edge(struct lockf_edge *e)
757{
758
759 free(e, M_LOCKF);
760}
761
762
763/*
764 * Ensure that the lock's owner has a corresponding vertex in the
765 * owner graph.
766 */
767static void
768lf_alloc_vertex(struct lockf_entry *lock)
769{
770 struct owner_graph *g = &lf_owner_graph;
771
772 if (!lock->lf_owner->lo_vertex)
773 lock->lf_owner->lo_vertex =
774 graph_alloc_vertex(g, lock->lf_owner);
775}
776
777/*
778 * Attempt to record an edge from lock x to lock y. Return EDEADLK if
779 * the new edge would cause a cycle in the owner graph.
780 */
781static int
782lf_add_edge(struct lockf_entry *x, struct lockf_entry *y)
783{
784 struct owner_graph *g = &lf_owner_graph;
785 struct lockf_edge *e;
786 int error;
787
788#ifdef INVARIANTS
789 LIST_FOREACH(e, &x->lf_outedges, le_outlink)
790 KASSERT(e->le_to != y, ("adding lock edge twice"));
791#endif
792
793 /*
794 * Make sure the two owners have entries in the owner graph.
795 */
796 lf_alloc_vertex(x);
797 lf_alloc_vertex(y);
798
799 error = graph_add_edge(g, x->lf_owner->lo_vertex,
800 y->lf_owner->lo_vertex);
801 if (error)
802 return (error);
803
804 e = lf_alloc_edge();
805 LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink);
806 LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink);
807 e->le_from = x;
808 e->le_to = y;
809
810 return (0);
811}
812
813/*
814 * Remove an edge from the lock graph.
815 */
816static void
817lf_remove_edge(struct lockf_edge *e)
818{
819 struct owner_graph *g = &lf_owner_graph;
820 struct lockf_entry *x = e->le_from;
821 struct lockf_entry *y = e->le_to;
822
823 graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex);
824 LIST_REMOVE(e, le_outlink);
825 LIST_REMOVE(e, le_inlink);
826 e->le_from = NULL;
827 e->le_to = NULL;
828 lf_free_edge(e);
829}
830
831/*
832 * Remove all out-going edges from lock x.
833 */
834static void
835lf_remove_outgoing(struct lockf_entry *x)
836{
837 struct lockf_edge *e;
838
839 while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) {
840 lf_remove_edge(e);
841 }
842}
843
844/*
845 * Remove all in-coming edges from lock x.
846 */
847static void
848lf_remove_incoming(struct lockf_entry *x)
849{
850 struct lockf_edge *e;
851
852 while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) {
853 lf_remove_edge(e);
854 }
855}
856
857/*
858 * Walk the list of locks for the file and create an out-going edge
859 * from lock to each blocking lock.
860 */
861static int
862lf_add_outgoing(struct lockf *state, struct lockf_entry *lock)
863{
864 struct lockf_entry *overlap;
865 int error;
866
867 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
868 /*
869 * We may assume that the active list is sorted by
870 * lf_start.
871 */
872 if (overlap->lf_start > lock->lf_end)
873 break;
874 if (!lf_blocks(lock, overlap))
875 continue;
876
877 /*
878 * We've found a blocking lock. Add the corresponding
879 * edge to the graphs and see if it would cause a
880 * deadlock.
881 */
882 error = lf_add_edge(lock, overlap);
883
884 /*
885 * The only error that lf_add_edge returns is EDEADLK.
886 * Remove any edges we added and return the error.
887 */
888 if (error) {
889 lf_remove_outgoing(lock);
890 return (error);
891 }
892 }
893
894 /*
895 * We also need to add edges to sleeping locks that block
896 * us. This ensures that lf_wakeup_lock cannot grant two
897 * mutually blocking locks simultaneously and also enforces a
898 * 'first come, first served' fairness model. Note that this
899 * only happens if we are blocked by at least one active lock
900 * due to the call to lf_getblock in lf_setlock below.
901 */
902 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
903 if (!lf_blocks(lock, overlap))
904 continue;
905 /*
906 * We've found a blocking lock. Add the corresponding
907 * edge to the graphs and see if it would cause a
908 * deadlock.
909 */
910 error = lf_add_edge(lock, overlap);
911
912 /*
913 * The only error that lf_add_edge returns is EDEADLK.
914 * Remove any edges we added and return the error.
915 */
916 if (error) {
917 lf_remove_outgoing(lock);
918 return (error);
919 }
920 }
921
922 return (0);
923}
924
925/*
926 * Walk the list of pending locks for the file and create an in-coming
927 * edge from lock to each blocking lock.
928 */
929static int
930lf_add_incoming(struct lockf *state, struct lockf_entry *lock)
931{
932 struct lockf_entry *overlap;
933 int error;
934
935 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
936 if (!lf_blocks(lock, overlap))
937 continue;
938
939 /*
940 * We've found a blocking lock. Add the corresponding
941 * edge to the graphs and see if it would cause a
942 * deadlock.
943 */
944 error = lf_add_edge(overlap, lock);
945
946 /*
947 * The only error that lf_add_edge returns is EDEADLK.
948 * Remove any edges we added and return the error.
949 */
950 if (error) {
951 lf_remove_incoming(lock);
952 return (error);
953 }
954 }
955 return (0);
956}
957
958/*
959 * Insert lock into the active list, keeping list entries ordered by
960 * increasing values of lf_start.
961 */
962static void
963lf_insert_lock(struct lockf *state, struct lockf_entry *lock)
964{
965 struct lockf_entry *lf, *lfprev;
966
967 if (LIST_EMPTY(&state->ls_active)) {
968 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link);
969 return;
970 }
971
972 lfprev = NULL;
973 LIST_FOREACH(lf, &state->ls_active, lf_link) {
974 if (lf->lf_start > lock->lf_start) {
975 LIST_INSERT_BEFORE(lf, lock, lf_link);
976 return;
977 }
978 lfprev = lf;
979 }
980 LIST_INSERT_AFTER(lfprev, lock, lf_link);
981}
982
983/*
984 * Wake up a sleeping lock and remove it from the pending list now
985 * that all its dependancies have been resolved. The caller should
986 * arrange for the lock to be added to the active list, adjusting any
987 * existing locks for the same owner as needed.
988 */
989static void
990lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock)
991{
992
993 /*
994 * Remove from ls_pending list and wake up the caller
995 * or start the async notification, as appropriate.
996 */
997 LIST_REMOVE(wakelock, lf_link);
998#ifdef LOCKF_DEBUG
999 if (lockf_debug & 1)
1000 lf_print("lf_wakeup_lock: awakening", wakelock);
1001#endif /* LOCKF_DEBUG */
1002 if (wakelock->lf_async_task) {
1003 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task);
1004 } else {
1005 wakeup(wakelock);
1006 }
1007}
1008
1009/*
1010 * Re-check all dependant locks and remove edges to locks that we no
1011 * longer block. If 'all' is non-zero, the lock has been removed and
1012 * we must remove all the dependancies, otherwise it has simply been
1013 * reduced but remains active. Any pending locks which have been been
1014 * unblocked are added to 'granted'
1015 */
1016static void
1017lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all,
1018 struct lockf_entry_list *granted)
1019{
1020 struct lockf_edge *e, *ne;
1021 struct lockf_entry *deplock;
1022
1023 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) {
1024 deplock = e->le_from;
1025 if (all || !lf_blocks(lock, deplock)) {
1026 sx_xlock(&lf_owner_graph_lock);
1027 lf_remove_edge(e);
1028 sx_xunlock(&lf_owner_graph_lock);
1029 if (LIST_EMPTY(&deplock->lf_outedges)) {
1030 lf_wakeup_lock(state, deplock);
1031 LIST_INSERT_HEAD(granted, deplock, lf_link);
1032 }
1033 }
1034 }
1035}
1036
1037/*
1038 * Set the start of an existing active lock, updating dependancies and
1039 * adding any newly woken locks to 'granted'.
1040 */
1041static void
1042lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start,
1043 struct lockf_entry_list *granted)
1044{
1045
1046 KASSERT(new_start >= lock->lf_start, ("can't increase lock"));
1047 lock->lf_start = new_start;
1048 LIST_REMOVE(lock, lf_link);
1049 lf_insert_lock(state, lock);
1050 lf_update_dependancies(state, lock, FALSE, granted);
1051}
1052
1053/*
1054 * Set the end of an existing active lock, updating dependancies and
1055 * adding any newly woken locks to 'granted'.
1056 */
1057static void
1058lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end,
1059 struct lockf_entry_list *granted)
1060{
1061
1062 KASSERT(new_end <= lock->lf_end, ("can't increase lock"));
1063 lock->lf_end = new_end;
1064 lf_update_dependancies(state, lock, FALSE, granted);
1065}
1066
1067/*
1068 * Add a lock to the active list, updating or removing any current
1069 * locks owned by the same owner and processing any pending locks that
1070 * become unblocked as a result. This code is also used for unlock
1071 * since the logic for updating existing locks is identical.
1072 *
1073 * As a result of processing the new lock, we may unblock existing
1074 * pending locks as a result of downgrading/unlocking. We simply
1075 * activate the newly granted locks by looping.
1076 *
1077 * Since the new lock already has its dependancies set up, we always
1078 * add it to the list (unless its an unlock request). This may
1079 * fragment the lock list in some pathological cases but its probably
1080 * not a real problem.
1081 */
1082static void
1083lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
1084{
1085 struct lockf_entry *overlap, *lf;
1086 struct lockf_entry_list granted;
1087 int ovcase;
1088
1089 LIST_INIT(&granted);
1090 LIST_INSERT_HEAD(&granted, lock, lf_link);
1091
1092 while (!LIST_EMPTY(&granted)) {
1093 lock = LIST_FIRST(&granted);
1094 LIST_REMOVE(lock, lf_link);
1095
1096 /*
1097 * Skip over locks owned by other processes. Handle
1098 * any locks that overlap and are owned by ourselves.
1099 */
1100 overlap = LIST_FIRST(&state->ls_active);
1101 for (;;) {
1102 ovcase = lf_findoverlap(&overlap, lock, SELF);
1103
1104#ifdef LOCKF_DEBUG
1105 if (ovcase && (lockf_debug & 2)) {
1106 printf("lf_setlock: overlap %d", ovcase);
1107 lf_print("", overlap);
1108 }
1109#endif
1110 /*
1111 * Six cases:
1112 * 0) no overlap
1113 * 1) overlap == lock
1114 * 2) overlap contains lock
1115 * 3) lock contains overlap
1116 * 4) overlap starts before lock
1117 * 5) overlap ends after lock
1118 */
1119 switch (ovcase) {
1120 case 0: /* no overlap */
1121 break;
1122
1123 case 1: /* overlap == lock */
1124 /*
1125 * We have already setup the
1126 * dependants for the new lock, taking
1127 * into account a possible downgrade
1128 * or unlock. Remove the old lock.
1129 */
1130 LIST_REMOVE(overlap, lf_link);
1131 lf_update_dependancies(state, overlap, TRUE,
1132 &granted);
1133 lf_free_lock(overlap);
1134 break;
1135
1136 case 2: /* overlap contains lock */
1137 /*
1138 * Just split the existing lock.
1139 */
1140 lf_split(state, overlap, lock, &granted);
1141 break;
1142
1143 case 3: /* lock contains overlap */
1144 /*
1145 * Delete the overlap and advance to
1146 * the next entry in the list.
1147 */
1148 lf = LIST_NEXT(overlap, lf_link);
1149 LIST_REMOVE(overlap, lf_link);
1150 lf_update_dependancies(state, overlap, TRUE,
1151 &granted);
1152 lf_free_lock(overlap);
1153 overlap = lf;
1154 continue;
1155
1156 case 4: /* overlap starts before lock */
1157 /*
1158 * Just update the overlap end and
1159 * move on.
1160 */
1161 lf_set_end(state, overlap, lock->lf_start - 1,
1162 &granted);
1163 overlap = LIST_NEXT(overlap, lf_link);
1164 continue;
1165
1166 case 5: /* overlap ends after lock */
1167 /*
1168 * Change the start of overlap and
1169 * re-insert.
1170 */
1171 lf_set_start(state, overlap, lock->lf_end + 1,
1172 &granted);
1173 break;
1174 }
1175 break;
1176 }
1177#ifdef LOCKF_DEBUG
1178 if (lockf_debug & 1) {
1179 if (lock->lf_type != F_UNLCK)
1180 lf_print("lf_activate_lock: activated", lock);
1181 else
1182 lf_print("lf_activate_lock: unlocked", lock);
1183 lf_printlist("lf_activate_lock", lock);
1184 }
1185#endif /* LOCKF_DEBUG */
1186 if (lock->lf_type != F_UNLCK)
1187 lf_insert_lock(state, lock);
1188 }
1189}
1190
1191/*
1192 * Cancel a pending lock request, either as a result of a signal or a
1193 * cancel request for an async lock.
1194 */
1195static void
1196lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
1197{
1198 struct lockf_entry_list granted;
1199
1200 /*
1201 * Note it is theoretically possible that cancelling this lock
1202 * may allow some other pending lock to become
1203 * active. Consider this case:
1204 *
1205 * Owner Action Result Dependancies
1206 *
1207 * A: lock [0..0] succeeds
1208 * B: lock [2..2] succeeds
1209 * C: lock [1..2] blocked C->B
1210 * D: lock [0..1] blocked C->B,D->A,D->C
1211 * A: unlock [0..0] C->B,D->C
1212 * C: cancel [1..2]
1213 */
1214
1215 LIST_REMOVE(lock, lf_link);
1216
1217 /*
1218 * Removing out-going edges is simple.
1219 */
1220 sx_xlock(&lf_owner_graph_lock);
1221 lf_remove_outgoing(lock);
1222 sx_xunlock(&lf_owner_graph_lock);
1223
1224 /*
1225 * Removing in-coming edges may allow some other lock to
1226 * become active - we use lf_update_dependancies to figure
1227 * this out.
1228 */
1229 LIST_INIT(&granted);
1230 lf_update_dependancies(state, lock, TRUE, &granted);
1231 lf_free_lock(lock);
1232
1233 /*
1234 * Feed any newly active locks to lf_activate_lock.
1235 */
1236 while (!LIST_EMPTY(&granted)) {
1237 lock = LIST_FIRST(&granted);
1238 LIST_REMOVE(lock, lf_link);
1239 lf_activate_lock(state, lock);
1240 }
1241}
1242
1243/*
226 * Set a byte-range lock.
227 */
228static int
1244 * Set a byte-range lock.
1245 */
1246static int
229lf_setlock(lock, vp, clean)
230 struct lockf *lock;
231 struct vnode *vp;
232 struct lockf **clean;
1247lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
1248 void **cookiep)
233{
1249{
234 struct lockf *block;
235 struct lockf **head = lock->lf_head;
236 struct lockf **prev, *overlap, *ltmp;
1250 struct lockf_entry *block;
237 static char lockstr[] = "lockf";
1251 static char lockstr[] = "lockf";
238 int ovcase, priority, needtolink, error;
1252 int priority, error;
239
240#ifdef LOCKF_DEBUG
241 if (lockf_debug & 1)
242 lf_print("lf_setlock", lock);
243#endif /* LOCKF_DEBUG */
244
245 /*
246 * Set the priority
247 */
248 priority = PLOCK;
249 if (lock->lf_type == F_WRLCK)
250 priority += 4;
251 priority |= PCATCH;
252 /*
253 * Scan lock list for this file looking for locks that would block us.
254 */
1253
1254#ifdef LOCKF_DEBUG
1255 if (lockf_debug & 1)
1256 lf_print("lf_setlock", lock);
1257#endif /* LOCKF_DEBUG */
1258
1259 /*
1260 * Set the priority
1261 */
1262 priority = PLOCK;
1263 if (lock->lf_type == F_WRLCK)
1264 priority += 4;
1265 priority |= PCATCH;
1266 /*
1267 * Scan lock list for this file looking for locks that would block us.
1268 */
255 while ((block = lf_getblock(lock))) {
1269 while ((block = lf_getblock(state, lock))) {
256 /*
257 * Free the structure and return if nonblocking.
258 */
1270 /*
1271 * Free the structure and return if nonblocking.
1272 */
259 if ((lock->lf_flags & F_WAIT) == 0) {
260 lock->lf_next = *clean;
261 *clean = lock;
262 return (EAGAIN);
1273 if ((lock->lf_flags & F_WAIT) == 0
1274 && lock->lf_async_task == NULL) {
1275 lf_free_lock(lock);
1276 error = EAGAIN;
1277 goto out;
263 }
1278 }
1279
264 /*
1280 /*
265 * We are blocked. Since flock style locks cover
266 * the whole file, there is no chance for deadlock.
267 * For byte-range locks we must check for deadlock.
268 *
269 * Deadlock detection is done by looking through the
270 * wait channels to see if there are any cycles that
271 * involve us. MAXDEPTH is set just to make sure we
272 * do not go off into neverland.
1281 * We are blocked. Create edges to each blocking lock,
1282 * checking for deadlock using the owner graph. For
1283 * simplicity, we run deadlock detection for all
1284 * locks, posix and otherwise.
273 */
1285 */
274 if ((lock->lf_flags & F_POSIX) &&
275 (block->lf_flags & F_POSIX)) {
276 struct proc *wproc;
277 struct proc *nproc;
278 struct thread *td;
279 struct lockf *waitblock;
280 int i = 0;
1286 sx_xlock(&lf_owner_graph_lock);
1287 error = lf_add_outgoing(state, lock);
1288 sx_xunlock(&lf_owner_graph_lock);
281
1289
282 /* The block is waiting on something */
283 wproc = (struct proc *)block->lf_id;
284restart:
285 nproc = NULL;
286 PROC_LOCK(wproc);
287 FOREACH_THREAD_IN_PROC(wproc, td) {
288 thread_lock(td);
289 for (;;) {
290 if (!TD_ON_SLEEPQ(td) ||
291 td->td_wmesg != lockstr)
292 break;
293 waitblock = (struct lockf *)td->td_wchan;
294 /* Get the owner of the blocking lock */
295 if (waitblock->lf_next == NULL)
296 break;
297 waitblock = waitblock->lf_next;
298 if ((waitblock->lf_flags & F_POSIX) == 0)
299 break;
300 if (waitblock->lf_id == lock->lf_id) {
301 thread_unlock(td);
302 PROC_UNLOCK(wproc);
303 lock->lf_next = *clean;
304 *clean = lock;
305 return (EDEADLK);
306 }
307 nproc = (struct proc *)waitblock->lf_id;
308 break;
309 }
310 thread_unlock(td);
311 if (nproc)
312 break;
313 }
314 PROC_UNLOCK(wproc);
315 wproc = nproc;
316 if (++i < maxlockdepth && wproc)
317 goto restart;
1290 if (error) {
1291#ifdef LOCKF_DEBUG
1292 if (lockf_debug & 1)
1293 lf_print("lf_setlock: deadlock", lock);
1294#endif
1295 lf_free_lock(lock);
1296 goto out;
318 }
1297 }
1298
319 /*
320 * For flock type locks, we must first remove
321 * any shared locks that we hold before we sleep
322 * waiting for an exclusive lock.
323 */
324 if ((lock->lf_flags & F_FLOCK) &&
325 lock->lf_type == F_WRLCK) {
326 lock->lf_type = F_UNLCK;
1299 /*
1300 * For flock type locks, we must first remove
1301 * any shared locks that we hold before we sleep
1302 * waiting for an exclusive lock.
1303 */
1304 if ((lock->lf_flags & F_FLOCK) &&
1305 lock->lf_type == F_WRLCK) {
1306 lock->lf_type = F_UNLCK;
327 (void) lf_clearlock(lock, clean);
1307 lf_activate_lock(state, lock);
328 lock->lf_type = F_WRLCK;
329 }
330 /*
1308 lock->lf_type = F_WRLCK;
1309 }
1310 /*
331 * Add our lock to the blocked list and sleep until we're free.
332 * Remember who blocked us (for deadlock detection).
1311 * We have added edges to everything that blocks
1312 * us. Sleep until they all go away.
333 */
1313 */
334 lock->lf_next = block;
335 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
1314 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link);
336#ifdef LOCKF_DEBUG
337 if (lockf_debug & 1) {
1315#ifdef LOCKF_DEBUG
1316 if (lockf_debug & 1) {
338 lf_print("lf_setlock: blocking on", block);
339 lf_printlist("lf_setlock", block);
1317 struct lockf_edge *e;
1318 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) {
1319 lf_print("lf_setlock: blocking on", e->le_to);
1320 lf_printlist("lf_setlock", e->le_to);
1321 }
340 }
341#endif /* LOCKF_DEBUG */
1322 }
1323#endif /* LOCKF_DEBUG */
342 error = msleep(lock, VI_MTX(vp), priority, lockstr, 0);
1324
1325 if ((lock->lf_flags & F_WAIT) == 0) {
1326 /*
1327 * The caller requested async notification -
1328 * this callback happens when the blocking
1329 * lock is released, allowing the caller to
1330 * make another attempt to take the lock.
1331 */
1332 *cookiep = (void *) lock;
1333 error = EINPROGRESS;
1334 goto out;
1335 }
1336
1337 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0);
343 /*
344 * We may have been awakened by a signal and/or by a
1338 /*
1339 * We may have been awakened by a signal and/or by a
345 * debugger continuing us (in which cases we must remove
346 * ourselves from the blocked list) and/or by another
347 * process releasing a lock (in which case we have
348 * already been removed from the blocked list and our
349 * lf_next field set to NOLOCKF).
1340 * debugger continuing us (in which cases we must
1341 * remove our lock graph edges) and/or by another
1342 * process releasing a lock (in which case our edges
1343 * have already been removed and we have been moved to
1344 * the active list).
1345 *
1346 * Note that it is possible to receive a signal after
1347 * we were successfully woken (and moved to the active
1348 * list) but before we resumed execution. In this
1349 * case, our lf_outedges list will be clear. We
1350 * pretend there was no error.
1351 *
1352 * Note also, if we have been sleeping long enough, we
1353 * may now have incoming edges from some newer lock
1354 * which is waiting behind us in the queue.
350 */
1355 */
351 if (lock->lf_next) {
352 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
353 lock->lf_next = NOLOCKF;
1356 if (LIST_EMPTY(&lock->lf_outedges)) {
1357 error = 0;
1358 } else {
1359 lf_cancel_lock(state, lock);
1360 goto out;
354 }
1361 }
355 if (error) {
356 lock->lf_next = *clean;
357 *clean = lock;
358 return (error);
1362#ifdef LOCKF_DEBUG
1363 if (lockf_debug & 1) {
1364 lf_print("lf_setlock: granted", lock);
359 }
1365 }
1366#endif
1367 goto out;
360 }
361 /*
1368 }
1369 /*
1370 * It looks like we are going to grant the lock. First add
1371 * edges from any currently pending lock that the new lock
1372 * would block.
1373 */
1374 sx_xlock(&lf_owner_graph_lock);
1375 error = lf_add_incoming(state, lock);
1376 sx_xunlock(&lf_owner_graph_lock);
1377 if (error) {
1378#ifdef LOCKF_DEBUG
1379 if (lockf_debug & 1)
1380 lf_print("lf_setlock: deadlock", lock);
1381#endif
1382 lf_free_lock(lock);
1383 goto out;
1384 }
1385
1386 /*
362 * No blocks!! Add the lock. Note that we will
363 * downgrade or upgrade any overlapping locks this
364 * process already owns.
1387 * No blocks!! Add the lock. Note that we will
1388 * downgrade or upgrade any overlapping locks this
1389 * process already owns.
365 *
366 * Skip over locks owned by other processes.
367 * Handle any locks that overlap and are owned by ourselves.
368 */
1390 */
369 prev = head;
370 block = *head;
371 needtolink = 1;
372 for (;;) {
373 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
374 if (ovcase)
375 block = overlap->lf_next;
376 /*
377 * Six cases:
378 * 0) no overlap
379 * 1) overlap == lock
380 * 2) overlap contains lock
381 * 3) lock contains overlap
382 * 4) overlap starts before lock
383 * 5) overlap ends after lock
384 */
385 switch (ovcase) {
386 case 0: /* no overlap */
387 if (needtolink) {
388 *prev = lock;
389 lock->lf_next = overlap;
390 }
391 break;
392
393 case 1: /* overlap == lock */
394 /*
395 * If downgrading lock, others may be
396 * able to acquire it.
397 */
398 if (lock->lf_type == F_RDLCK &&
399 overlap->lf_type == F_WRLCK)
400 lf_wakelock(overlap);
401 overlap->lf_type = lock->lf_type;
402 lock->lf_next = *clean;
403 *clean = lock;
404 lock = overlap; /* for debug output below */
405 break;
406
407 case 2: /* overlap contains lock */
408 /*
409 * Check for common starting point and different types.
410 */
411 if (overlap->lf_type == lock->lf_type) {
412 lock->lf_next = *clean;
413 *clean = lock;
414 lock = overlap; /* for debug output below */
415 break;
416 }
417 if (overlap->lf_start == lock->lf_start) {
418 *prev = lock;
419 lock->lf_next = overlap;
420 overlap->lf_start = lock->lf_end + 1;
421 } else
422 lf_split(overlap, lock, clean);
423 lf_wakelock(overlap);
424 break;
425
426 case 3: /* lock contains overlap */
427 /*
428 * If downgrading lock, others may be able to
429 * acquire it, otherwise take the list.
430 */
431 if (lock->lf_type == F_RDLCK &&
432 overlap->lf_type == F_WRLCK) {
433 lf_wakelock(overlap);
434 } else {
435 while (!TAILQ_EMPTY(&overlap->lf_blkhd)) {
436 ltmp = TAILQ_FIRST(&overlap->lf_blkhd);
437 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
438 lf_block);
439 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
440 ltmp, lf_block);
441 ltmp->lf_next = lock;
442 }
443 }
444 /*
445 * Add the new lock if necessary and delete the overlap.
446 */
447 if (needtolink) {
448 *prev = lock;
449 lock->lf_next = overlap->lf_next;
450 prev = &lock->lf_next;
451 needtolink = 0;
452 } else
453 *prev = overlap->lf_next;
454 overlap->lf_next = *clean;
455 *clean = overlap;
456 continue;
457
458 case 4: /* overlap starts before lock */
459 /*
460 * Add lock after overlap on the list.
461 */
462 lock->lf_next = overlap->lf_next;
463 overlap->lf_next = lock;
464 overlap->lf_end = lock->lf_start - 1;
465 prev = &lock->lf_next;
466 lf_wakelock(overlap);
467 needtolink = 0;
468 continue;
469
470 case 5: /* overlap ends after lock */
471 /*
472 * Add the new lock before overlap.
473 */
474 if (needtolink) {
475 *prev = lock;
476 lock->lf_next = overlap;
477 }
478 overlap->lf_start = lock->lf_end + 1;
479 lf_wakelock(overlap);
480 break;
481 }
482 break;
483 }
484#ifdef LOCKF_DEBUG
485 if (lockf_debug & 1) {
486 lf_print("lf_setlock: got the lock", lock);
487 lf_printlist("lf_setlock", lock);
488 }
489#endif /* LOCKF_DEBUG */
490 return (0);
1391 lf_activate_lock(state, lock);
1392 error = 0;
1393out:
1394 return (error);
491}
492
493/*
494 * Remove a byte-range lock on an inode.
495 *
496 * Generally, find the lock (or an overlap to that lock)
497 * and remove it (or shrink it), then wakeup anyone we can.
498 */
499static int
1395}
1396
1397/*
1398 * Remove a byte-range lock on an inode.
1399 *
1400 * Generally, find the lock (or an overlap to that lock)
1401 * and remove it (or shrink it), then wakeup anyone we can.
1402 */
1403static int
500lf_clearlock(unlock, clean)
501 struct lockf *unlock;
502 struct lockf **clean;
1404lf_clearlock(struct lockf *state, struct lockf_entry *unlock)
503{
1405{
504 struct lockf **head = unlock->lf_head;
505 register struct lockf *lf = *head;
506 struct lockf *overlap, **prev;
507 int ovcase;
1406 struct lockf_entry *overlap;
508
1407
509 if (lf == NOLOCKF)
1408 overlap = LIST_FIRST(&state->ls_active);
1409
1410 if (overlap == NOLOCKF)
510 return (0);
511#ifdef LOCKF_DEBUG
512 if (unlock->lf_type != F_UNLCK)
513 panic("lf_clearlock: bad type");
514 if (lockf_debug & 1)
515 lf_print("lf_clearlock", unlock);
516#endif /* LOCKF_DEBUG */
1411 return (0);
1412#ifdef LOCKF_DEBUG
1413 if (unlock->lf_type != F_UNLCK)
1414 panic("lf_clearlock: bad type");
1415 if (lockf_debug & 1)
1416 lf_print("lf_clearlock", unlock);
1417#endif /* LOCKF_DEBUG */
517 prev = head;
518 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) {
519 /*
520 * Wakeup the list of locks to be retried.
521 */
522 lf_wakelock(overlap);
523
1418
524 switch (ovcase) {
1419 lf_activate_lock(state, unlock);
525
1420
526 case 1: /* overlap == lock */
527 *prev = overlap->lf_next;
528 overlap->lf_next = *clean;
529 *clean = overlap;
530 break;
531
532 case 2: /* overlap contains lock: split it */
533 if (overlap->lf_start == unlock->lf_start) {
534 overlap->lf_start = unlock->lf_end + 1;
535 break;
536 }
537 lf_split(overlap, unlock, clean);
538 overlap->lf_next = unlock->lf_next;
539 break;
540
541 case 3: /* lock contains overlap */
542 *prev = overlap->lf_next;
543 lf = overlap->lf_next;
544 overlap->lf_next = *clean;
545 *clean = overlap;
546 continue;
547
548 case 4: /* overlap starts before lock */
549 overlap->lf_end = unlock->lf_start - 1;
550 prev = &overlap->lf_next;
551 lf = overlap->lf_next;
552 continue;
553
554 case 5: /* overlap ends after lock */
555 overlap->lf_start = unlock->lf_end + 1;
556 break;
557 }
558 break;
559 }
560#ifdef LOCKF_DEBUG
561 if (lockf_debug & 1)
562 lf_printlist("lf_clearlock", unlock);
563#endif /* LOCKF_DEBUG */
564 return (0);
565}
566
567/*
1421 return (0);
1422}
1423
1424/*
568 * Check whether there is a blocking lock,
569 * and if so return its process identifier.
1425 * Check whether there is a blocking lock, and if so return its
1426 * details in '*fl'.
570 */
571static int
1427 */
1428static int
572lf_getlock(lock, fl)
573 register struct lockf *lock;
574 register struct flock *fl;
1429lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl)
575{
1430{
576 register struct lockf *block;
1431 struct lockf_entry *block;
577
578#ifdef LOCKF_DEBUG
579 if (lockf_debug & 1)
580 lf_print("lf_getlock", lock);
581#endif /* LOCKF_DEBUG */
582
1432
1433#ifdef LOCKF_DEBUG
1434 if (lockf_debug & 1)
1435 lf_print("lf_getlock", lock);
1436#endif /* LOCKF_DEBUG */
1437
583 if ((block = lf_getblock(lock))) {
1438 if ((block = lf_getblock(state, lock))) {
584 fl->l_type = block->lf_type;
585 fl->l_whence = SEEK_SET;
586 fl->l_start = block->lf_start;
1439 fl->l_type = block->lf_type;
1440 fl->l_whence = SEEK_SET;
1441 fl->l_start = block->lf_start;
587 if (block->lf_end == -1)
1442 if (block->lf_end == OFF_MAX)
588 fl->l_len = 0;
589 else
590 fl->l_len = block->lf_end - block->lf_start + 1;
1443 fl->l_len = 0;
1444 else
1445 fl->l_len = block->lf_end - block->lf_start + 1;
591 if (block->lf_flags & F_POSIX)
592 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
593 else
594 fl->l_pid = -1;
1446 fl->l_pid = block->lf_owner->lo_pid;
1447 fl->l_sysid = block->lf_owner->lo_sysid;
595 } else {
596 fl->l_type = F_UNLCK;
597 }
598 return (0);
599}
600
601/*
1448 } else {
1449 fl->l_type = F_UNLCK;
1450 }
1451 return (0);
1452}
1453
1454/*
1455 * Cancel an async lock request.
1456 */
1457static int
1458lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie)
1459{
1460 struct lockf_entry *reallock;
1461
1462 /*
1463 * We need to match this request with an existing lock
1464 * request.
1465 */
1466 LIST_FOREACH(reallock, &state->ls_pending, lf_link) {
1467 if ((void *) reallock == cookie) {
1468 /*
1469 * Double-check that this lock looks right
1470 * (maybe use a rolling ID for the cancel
1471 * cookie instead?)
1472 */
1473 if (!(reallock->lf_vnode == lock->lf_vnode
1474 && reallock->lf_start == lock->lf_start
1475 && reallock->lf_end == lock->lf_end)) {
1476 return (ENOENT);
1477 }
1478
1479 /*
1480 * Make sure this lock was async and then just
1481 * remove it from its wait lists.
1482 */
1483 if (!reallock->lf_async_task) {
1484 return (ENOENT);
1485 }
1486
1487 /*
1488 * Note that since any other thread must take
1489 * state->ls_lock before it can possibly
1490 * trigger the async callback, we are safe
1491 * from a race with lf_wakeup_lock, i.e. we
1492 * can free the lock (actually our caller does
1493 * this).
1494 */
1495 lf_cancel_lock(state, reallock);
1496 return (0);
1497 }
1498 }
1499
1500 /*
1501 * We didn't find a matching lock - not much we can do here.
1502 */
1503 return (ENOENT);
1504}
1505
1506/*
602 * Walk the list of locks for an inode and
603 * return the first blocking lock.
604 */
1507 * Walk the list of locks for an inode and
1508 * return the first blocking lock.
1509 */
605static struct lockf *
606lf_getblock(lock)
607 register struct lockf *lock;
1510static struct lockf_entry *
1511lf_getblock(struct lockf *state, struct lockf_entry *lock)
608{
1512{
609 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
610 int ovcase;
1513 struct lockf_entry *overlap;
611
1514
612 prev = lock->lf_head;
613 while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) {
1515 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
614 /*
1516 /*
615 * We've found an overlap, see if it blocks us
1517 * We may assume that the active list is sorted by
1518 * lf_start.
616 */
1519 */
617 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
618 return (overlap);
619 /*
620 * Nope, point to the next one on the list and
621 * see if it blocks us
622 */
623 lf = overlap->lf_next;
1520 if (overlap->lf_start > lock->lf_end)
1521 break;
1522 if (!lf_blocks(lock, overlap))
1523 continue;
1524 return (overlap);
624 }
625 return (NOLOCKF);
626}
627
628/*
1525 }
1526 return (NOLOCKF);
1527}
1528
1529/*
629 * Walk the list of locks for an inode to
630 * find an overlapping lock (if any).
1530 * Walk the list of locks for an inode to find an overlapping lock (if
1531 * any) and return a classification of that overlap.
631 *
1532 *
1533 * Arguments:
1534 * *overlap The place in the lock list to start looking
1535 * lock The lock which is being tested
1536 * type Pass 'SELF' to test only locks with the same
1537 * owner as lock, or 'OTHER' to test only locks
1538 * with a different owner
1539 *
1540 * Returns one of six values:
1541 * 0) no overlap
1542 * 1) overlap == lock
1543 * 2) overlap contains lock
1544 * 3) lock contains overlap
1545 * 4) overlap starts before lock
1546 * 5) overlap ends after lock
1547 *
1548 * If there is an overlapping lock, '*overlap' is set to point at the
1549 * overlapping lock.
1550 *
632 * NOTE: this returns only the FIRST overlapping lock. There
633 * may be more than one.
634 */
635static int
1551 * NOTE: this returns only the FIRST overlapping lock. There
1552 * may be more than one.
1553 */
1554static int
636lf_findoverlap(lf, lock, type, prev, overlap)
637 register struct lockf *lf;
638 struct lockf *lock;
639 int type;
640 struct lockf ***prev;
641 struct lockf **overlap;
1555lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type)
642{
1556{
1557 struct lockf_entry *lf;
643 off_t start, end;
1558 off_t start, end;
1559 int res;
644
1560
645 *overlap = lf;
646 if (lf == NOLOCKF)
1561 if ((*overlap) == NOLOCKF) {
647 return (0);
1562 return (0);
1563 }
648#ifdef LOCKF_DEBUG
649 if (lockf_debug & 2)
650 lf_print("lf_findoverlap: looking for overlap in", lock);
651#endif /* LOCKF_DEBUG */
652 start = lock->lf_start;
653 end = lock->lf_end;
1564#ifdef LOCKF_DEBUG
1565 if (lockf_debug & 2)
1566 lf_print("lf_findoverlap: looking for overlap in", lock);
1567#endif /* LOCKF_DEBUG */
1568 start = lock->lf_start;
1569 end = lock->lf_end;
654 while (lf != NOLOCKF) {
655 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
656 ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
657 *prev = &lf->lf_next;
658 *overlap = lf = lf->lf_next;
1570 res = 0;
1571 while (*overlap) {
1572 lf = *overlap;
1573 if (lf->lf_start > end)
1574 break;
1575 if (((type & SELF) && lf->lf_owner != lock->lf_owner) ||
1576 ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) {
1577 *overlap = LIST_NEXT(lf, lf_link);
659 continue;
660 }
661#ifdef LOCKF_DEBUG
662 if (lockf_debug & 2)
663 lf_print("\tchecking", lf);
664#endif /* LOCKF_DEBUG */
665 /*
666 * OK, check for overlap
667 *
668 * Six cases:
669 * 0) no overlap
670 * 1) overlap == lock
671 * 2) overlap contains lock
672 * 3) lock contains overlap
673 * 4) overlap starts before lock
674 * 5) overlap ends after lock
675 */
1578 continue;
1579 }
1580#ifdef LOCKF_DEBUG
1581 if (lockf_debug & 2)
1582 lf_print("\tchecking", lf);
1583#endif /* LOCKF_DEBUG */
1584 /*
1585 * OK, check for overlap
1586 *
1587 * Six cases:
1588 * 0) no overlap
1589 * 1) overlap == lock
1590 * 2) overlap contains lock
1591 * 3) lock contains overlap
1592 * 4) overlap starts before lock
1593 * 5) overlap ends after lock
1594 */
676 if ((lf->lf_end != -1 && start > lf->lf_end) ||
677 (end != -1 && lf->lf_start > end)) {
1595 if (start > lf->lf_end) {
678 /* Case 0 */
679#ifdef LOCKF_DEBUG
680 if (lockf_debug & 2)
681 printf("no overlap\n");
682#endif /* LOCKF_DEBUG */
1596 /* Case 0 */
1597#ifdef LOCKF_DEBUG
1598 if (lockf_debug & 2)
1599 printf("no overlap\n");
1600#endif /* LOCKF_DEBUG */
683 if ((type & SELF) && end != -1 && lf->lf_start > end)
684 return (0);
685 *prev = &lf->lf_next;
686 *overlap = lf = lf->lf_next;
1601 *overlap = LIST_NEXT(lf, lf_link);
687 continue;
688 }
1602 continue;
1603 }
689 if ((lf->lf_start == start) && (lf->lf_end == end)) {
1604 if (lf->lf_start == start && lf->lf_end == end) {
690 /* Case 1 */
691#ifdef LOCKF_DEBUG
692 if (lockf_debug & 2)
693 printf("overlap == lock\n");
694#endif /* LOCKF_DEBUG */
1605 /* Case 1 */
1606#ifdef LOCKF_DEBUG
1607 if (lockf_debug & 2)
1608 printf("overlap == lock\n");
1609#endif /* LOCKF_DEBUG */
695 return (1);
1610 res = 1;
1611 break;
696 }
1612 }
697 if ((lf->lf_start <= start) &&
698 (end != -1) &&
699 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
1613 if (lf->lf_start <= start && lf->lf_end >= end) {
700 /* Case 2 */
701#ifdef LOCKF_DEBUG
702 if (lockf_debug & 2)
703 printf("overlap contains lock\n");
704#endif /* LOCKF_DEBUG */
1614 /* Case 2 */
1615#ifdef LOCKF_DEBUG
1616 if (lockf_debug & 2)
1617 printf("overlap contains lock\n");
1618#endif /* LOCKF_DEBUG */
705 return (2);
1619 res = 2;
1620 break;
706 }
1621 }
707 if (start <= lf->lf_start &&
708 (end == -1 ||
709 (lf->lf_end != -1 && end >= lf->lf_end))) {
1622 if (start <= lf->lf_start && end >= lf->lf_end) {
710 /* Case 3 */
711#ifdef LOCKF_DEBUG
712 if (lockf_debug & 2)
713 printf("lock contains overlap\n");
714#endif /* LOCKF_DEBUG */
1623 /* Case 3 */
1624#ifdef LOCKF_DEBUG
1625 if (lockf_debug & 2)
1626 printf("lock contains overlap\n");
1627#endif /* LOCKF_DEBUG */
715 return (3);
1628 res = 3;
1629 break;
716 }
1630 }
717 if ((lf->lf_start < start) &&
718 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
1631 if (lf->lf_start < start && lf->lf_end >= start) {
719 /* Case 4 */
720#ifdef LOCKF_DEBUG
721 if (lockf_debug & 2)
722 printf("overlap starts before lock\n");
723#endif /* LOCKF_DEBUG */
1632 /* Case 4 */
1633#ifdef LOCKF_DEBUG
1634 if (lockf_debug & 2)
1635 printf("overlap starts before lock\n");
1636#endif /* LOCKF_DEBUG */
724 return (4);
1637 res = 4;
1638 break;
725 }
1639 }
726 if ((lf->lf_start > start) &&
727 (end != -1) &&
728 ((lf->lf_end > end) || (lf->lf_end == -1))) {
1640 if (lf->lf_start > start && lf->lf_end > end) {
729 /* Case 5 */
730#ifdef LOCKF_DEBUG
731 if (lockf_debug & 2)
732 printf("overlap ends after lock\n");
733#endif /* LOCKF_DEBUG */
1641 /* Case 5 */
1642#ifdef LOCKF_DEBUG
1643 if (lockf_debug & 2)
1644 printf("overlap ends after lock\n");
1645#endif /* LOCKF_DEBUG */
734 return (5);
1646 res = 5;
1647 break;
735 }
736 panic("lf_findoverlap: default");
737 }
1648 }
1649 panic("lf_findoverlap: default");
1650 }
738 return (0);
1651 return (res);
739}
740
741/*
1652}
1653
1654/*
742 * Split a lock and a contained region into
743 * two or three locks as necessary.
1655 * Split an the existing 'lock1', based on the extent of the lock
1656 * described by 'lock2'. The existing lock should cover 'lock2'
1657 * entirely.
1658 *
1659 * Any pending locks which have been been unblocked are added to
1660 * 'granted'
744 */
745static void
1661 */
1662static void
746lf_split(lock1, lock2, split)
747 struct lockf *lock1;
748 struct lockf *lock2;
749 struct lockf **split;
1663lf_split(struct lockf *state, struct lockf_entry *lock1,
1664 struct lockf_entry *lock2, struct lockf_entry_list *granted)
750{
1665{
751 struct lockf *splitlock;
1666 struct lockf_entry *splitlock;
752
753#ifdef LOCKF_DEBUG
754 if (lockf_debug & 2) {
755 lf_print("lf_split", lock1);
756 lf_print("splitting from", lock2);
757 }
758#endif /* LOCKF_DEBUG */
759 /*
1667
1668#ifdef LOCKF_DEBUG
1669 if (lockf_debug & 2) {
1670 lf_print("lf_split", lock1);
1671 lf_print("splitting from", lock2);
1672 }
1673#endif /* LOCKF_DEBUG */
1674 /*
760 * Check to see if spliting into only two pieces.
1675 * Check to see if we don't need to split at all.
761 */
762 if (lock1->lf_start == lock2->lf_start) {
1676 */
1677 if (lock1->lf_start == lock2->lf_start) {
763 lock1->lf_start = lock2->lf_end + 1;
764 lock2->lf_next = lock1;
1678 lf_set_start(state, lock1, lock2->lf_end + 1, granted);
765 return;
766 }
767 if (lock1->lf_end == lock2->lf_end) {
1679 return;
1680 }
1681 if (lock1->lf_end == lock2->lf_end) {
768 lock1->lf_end = lock2->lf_start - 1;
769 lock2->lf_next = lock1->lf_next;
770 lock1->lf_next = lock2;
1682 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
771 return;
772 }
773 /*
774 * Make a new lock consisting of the last part of
1683 return;
1684 }
1685 /*
1686 * Make a new lock consisting of the last part of
775 * the encompassing lock. We use the preallocated
776 * splitlock so we don't have to block.
1687 * the encompassing lock.
777 */
1688 */
778 splitlock = *split;
779 KASSERT(splitlock != NULL, ("no split"));
780 *split = splitlock->lf_next;
781 bcopy(lock1, splitlock, sizeof *splitlock);
1689 splitlock = lf_alloc_lock(lock1->lf_owner);
1690 memcpy(splitlock, lock1, sizeof *splitlock);
1691 if (splitlock->lf_flags & F_REMOTE)
1692 vref(splitlock->lf_vnode);
1693
1694 /*
1695 * This cannot cause a deadlock since any edges we would add
1696 * to splitlock already exist in lock1. We must be sure to add
1697 * necessary dependancies to splitlock before we reduce lock1
1698 * otherwise we may accidentally grant a pending lock that
1699 * was blocked by the tail end of lock1.
1700 */
782 splitlock->lf_start = lock2->lf_end + 1;
1701 splitlock->lf_start = lock2->lf_end + 1;
783 TAILQ_INIT(&splitlock->lf_blkhd);
784 lock1->lf_end = lock2->lf_start - 1;
1702 LIST_INIT(&splitlock->lf_outedges);
1703 LIST_INIT(&splitlock->lf_inedges);
1704 sx_xlock(&lf_owner_graph_lock);
1705 lf_add_incoming(state, splitlock);
1706 sx_xunlock(&lf_owner_graph_lock);
1707
1708 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1709
785 /*
786 * OK, now link it in
787 */
1710 /*
1711 * OK, now link it in
1712 */
788 splitlock->lf_next = lock1->lf_next;
789 lock2->lf_next = splitlock;
790 lock1->lf_next = lock2;
1713 lf_insert_lock(state, splitlock);
791}
792
1714}
1715
1716struct clearlock {
1717 STAILQ_ENTRY(clearlock) link;
1718 struct vnode *vp;
1719 struct flock fl;
1720};
1721STAILQ_HEAD(clearlocklist, clearlock);
1722
1723void
1724lf_clearremotesys(int sysid)
1725{
1726 struct lockf *ls;
1727 struct lockf_entry *lf;
1728 struct clearlock *cl;
1729 struct clearlocklist locks;
1730
1731 KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS"));
1732
1733 /*
1734 * In order to keep the locking simple, we iterate over the
1735 * active lock lists to build a list of locks that need
1736 * releasing. We then call VOP_ADVLOCK for each one in turn.
1737 *
1738 * We take an extra reference to the vnode for the duration to
1739 * make sure it doesn't go away before we are finished.
1740 */
1741 STAILQ_INIT(&locks);
1742 sx_xlock(&lf_lock_states_lock);
1743 LIST_FOREACH(ls, &lf_lock_states, ls_link) {
1744 sx_xlock(&ls->ls_lock);
1745 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1746 if (lf->lf_owner->lo_sysid != sysid)
1747 continue;
1748
1749 cl = malloc(sizeof(struct clearlock), M_LOCKF,
1750 M_WAITOK);
1751 cl->vp = lf->lf_vnode;
1752 vref(cl->vp);
1753 cl->fl.l_start = lf->lf_start;
1754 if (lf->lf_end == OFF_MAX)
1755 cl->fl.l_len = 0;
1756 else
1757 cl->fl.l_len =
1758 lf->lf_end - lf->lf_start + 1;
1759 cl->fl.l_whence = SEEK_SET;
1760 cl->fl.l_type = F_UNLCK;
1761 cl->fl.l_pid = lf->lf_owner->lo_pid;
1762 cl->fl.l_sysid = sysid;
1763 STAILQ_INSERT_TAIL(&locks, cl, link);
1764 }
1765 sx_xunlock(&ls->ls_lock);
1766 }
1767 sx_xunlock(&lf_lock_states_lock);
1768
1769 while ((cl = STAILQ_FIRST(&locks)) != NULL) {
1770 STAILQ_REMOVE_HEAD(&locks, link);
1771 VOP_ADVLOCK(cl->vp, 0, F_UNLCK, &cl->fl, F_REMOTE);
1772 vrele(cl->vp);
1773 free(cl, M_LOCKF);
1774 }
1775}
1776
1777int
1778lf_countlocks(int sysid)
1779{
1780 int i;
1781 struct lock_owner *lo;
1782 int count;
1783
1784 count = 0;
1785 sx_xlock(&lf_lock_owners_lock);
1786 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
1787 LIST_FOREACH(lo, &lf_lock_owners[i], lo_link)
1788 if (lo->lo_sysid == sysid)
1789 count += lo->lo_refs;
1790 sx_xunlock(&lf_lock_owners_lock);
1791
1792 return (count);
1793}
1794
1795#ifdef LOCKF_DEBUG
1796
793/*
1797/*
794 * Wakeup a blocklist
1798 * Return non-zero if y is reachable from x using a brute force
1799 * search. If reachable and path is non-null, return the route taken
1800 * in path.
795 */
1801 */
1802static int
1803graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
1804 struct owner_vertex_list *path)
1805{
1806 struct owner_edge *e;
1807
1808 if (x == y) {
1809 if (path)
1810 TAILQ_INSERT_HEAD(path, x, v_link);
1811 return 1;
1812 }
1813
1814 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
1815 if (graph_reaches(e->e_to, y, path)) {
1816 if (path)
1817 TAILQ_INSERT_HEAD(path, x, v_link);
1818 return 1;
1819 }
1820 }
1821 return 0;
1822}
1823
1824/*
1825 * Perform consistency checks on the graph. Make sure the values of
1826 * v_order are correct. If checkorder is non-zero, check no vertex can
1827 * reach any other vertex with a smaller order.
1828 */
796static void
1829static void
797lf_wakelock(listhead)
798 struct lockf *listhead;
1830graph_check(struct owner_graph *g, int checkorder)
799{
1831{
800 register struct lockf *wakelock;
1832 int i, j;
801
1833
802 while (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
803 wakelock = TAILQ_FIRST(&listhead->lf_blkhd);
804 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
805 wakelock->lf_next = NOLOCKF;
1834 for (i = 0; i < g->g_size; i++) {
1835 if (!g->g_vertices[i]->v_owner)
1836 continue;
1837 KASSERT(g->g_vertices[i]->v_order == i,
1838 ("lock graph vertices disordered"));
1839 if (checkorder) {
1840 for (j = 0; j < i; j++) {
1841 if (!g->g_vertices[j]->v_owner)
1842 continue;
1843 KASSERT(!graph_reaches(g->g_vertices[i],
1844 g->g_vertices[j], NULL),
1845 ("lock graph vertices disordered"));
1846 }
1847 }
1848 }
1849}
1850
1851static void
1852graph_print_vertices(struct owner_vertex_list *set)
1853{
1854 struct owner_vertex *v;
1855
1856 printf("{ ");
1857 TAILQ_FOREACH(v, set, v_link) {
1858 printf("%d:", v->v_order);
1859 lf_print_owner(v->v_owner);
1860 if (TAILQ_NEXT(v, v_link))
1861 printf(", ");
1862 }
1863 printf(" }\n");
1864}
1865
1866#endif
1867
1868/*
1869 * Calculate the sub-set of vertices v from the affected region [y..x]
1870 * where v is reachable from y. Return -1 if a loop was detected
1871 * (i.e. x is reachable from y, otherwise the number of vertices in
1872 * this subset.
1873 */
1874static int
1875graph_delta_forward(struct owner_graph *g, struct owner_vertex *x,
1876 struct owner_vertex *y, struct owner_vertex_list *delta)
1877{
1878 uint32_t gen;
1879 struct owner_vertex *v;
1880 struct owner_edge *e;
1881 int n;
1882
1883 /*
1884 * We start with a set containing just y. Then for each vertex
1885 * v in the set so far unprocessed, we add each vertex that v
1886 * has an out-edge to and that is within the affected region
1887 * [y..x]. If we see the vertex x on our travels, stop
1888 * immediately.
1889 */
1890 TAILQ_INIT(delta);
1891 TAILQ_INSERT_TAIL(delta, y, v_link);
1892 v = y;
1893 n = 1;
1894 gen = g->g_gen;
1895 while (v) {
1896 LIST_FOREACH(e, &v->v_outedges, e_outlink) {
1897 if (e->e_to == x)
1898 return -1;
1899 if (e->e_to->v_order < x->v_order
1900 && e->e_to->v_gen != gen) {
1901 e->e_to->v_gen = gen;
1902 TAILQ_INSERT_TAIL(delta, e->e_to, v_link);
1903 n++;
1904 }
1905 }
1906 v = TAILQ_NEXT(v, v_link);
1907 }
1908
1909 return (n);
1910}
1911
1912/*
1913 * Calculate the sub-set of vertices v from the affected region [y..x]
1914 * where v reaches x. Return the number of vertices in this subset.
1915 */
1916static int
1917graph_delta_backward(struct owner_graph *g, struct owner_vertex *x,
1918 struct owner_vertex *y, struct owner_vertex_list *delta)
1919{
1920 uint32_t gen;
1921 struct owner_vertex *v;
1922 struct owner_edge *e;
1923 int n;
1924
1925 /*
1926 * We start with a set containing just x. Then for each vertex
1927 * v in the set so far unprocessed, we add each vertex that v
1928 * has an in-edge from and that is within the affected region
1929 * [y..x].
1930 */
1931 TAILQ_INIT(delta);
1932 TAILQ_INSERT_TAIL(delta, x, v_link);
1933 v = x;
1934 n = 1;
1935 gen = g->g_gen;
1936 while (v) {
1937 LIST_FOREACH(e, &v->v_inedges, e_inlink) {
1938 if (e->e_from->v_order > y->v_order
1939 && e->e_from->v_gen != gen) {
1940 e->e_from->v_gen = gen;
1941 TAILQ_INSERT_HEAD(delta, e->e_from, v_link);
1942 n++;
1943 }
1944 }
1945 v = TAILQ_PREV(v, owner_vertex_list, v_link);
1946 }
1947
1948 return (n);
1949}
1950
1951static int
1952graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
1953{
1954 struct owner_vertex *v;
1955 int i, j;
1956
1957 TAILQ_FOREACH(v, set, v_link) {
1958 for (i = n;
1959 i > 0 && indices[i - 1] > v->v_order; i--)
1960 ;
1961 for (j = n - 1; j >= i; j--)
1962 indices[j + 1] = indices[j];
1963 indices[i] = v->v_order;
1964 n++;
1965 }
1966
1967 return (n);
1968}
1969
1970static int
1971graph_assign_indices(struct owner_graph *g, int *indices, int nextunused,
1972 struct owner_vertex_list *set)
1973{
1974 struct owner_vertex *v, *vlowest;
1975
1976 while (!TAILQ_EMPTY(set)) {
1977 vlowest = NULL;
1978 TAILQ_FOREACH(v, set, v_link) {
1979 if (!vlowest || v->v_order < vlowest->v_order)
1980 vlowest = v;
1981 }
1982 TAILQ_REMOVE(set, vlowest, v_link);
1983 vlowest->v_order = indices[nextunused];
1984 g->g_vertices[vlowest->v_order] = vlowest;
1985 nextunused++;
1986 }
1987
1988 return (nextunused);
1989}
1990
1991static int
1992graph_add_edge(struct owner_graph *g, struct owner_vertex *x,
1993 struct owner_vertex *y)
1994{
1995 struct owner_edge *e;
1996 struct owner_vertex_list deltaF, deltaB;
1997 int nF, nB, n, vi, i;
1998 int *indices;
1999
2000 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2001
2002 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2003 if (e->e_to == y) {
2004 e->e_refs++;
2005 return (0);
2006 }
2007 }
2008
806#ifdef LOCKF_DEBUG
2009#ifdef LOCKF_DEBUG
807 if (lockf_debug & 2)
808 lf_print("lf_wakelock: awakening", wakelock);
809#endif /* LOCKF_DEBUG */
810 wakeup(wakelock);
2010 if (lockf_debug & 8) {
2011 printf("adding edge %d:", x->v_order);
2012 lf_print_owner(x->v_owner);
2013 printf(" -> %d:", y->v_order);
2014 lf_print_owner(y->v_owner);
2015 printf("\n");
811 }
2016 }
2017#endif
2018 if (y->v_order < x->v_order) {
2019 /*
2020 * The new edge violates the order. First find the set
2021 * of affected vertices reachable from y (deltaF) and
2022 * the set of affect vertices affected that reach x
2023 * (deltaB), using the graph generation number to
2024 * detect whether we have visited a given vertex
2025 * already. We re-order the graph so that each vertex
2026 * in deltaB appears before each vertex in deltaF.
2027 *
2028 * If x is a member of deltaF, then the new edge would
2029 * create a cycle. Otherwise, we may assume that
2030 * deltaF and deltaB are disjoint.
2031 */
2032 g->g_gen++;
2033 if (g->g_gen == 0) {
2034 /*
2035 * Generation wrap.
2036 */
2037 for (vi = 0; vi < g->g_size; vi++) {
2038 g->g_vertices[vi]->v_gen = 0;
2039 }
2040 g->g_gen++;
2041 }
2042 nF = graph_delta_forward(g, x, y, &deltaF);
2043 if (nF < 0) {
2044#ifdef LOCKF_DEBUG
2045 if (lockf_debug & 8) {
2046 struct owner_vertex_list path;
2047 printf("deadlock: ");
2048 TAILQ_INIT(&path);
2049 graph_reaches(y, x, &path);
2050 graph_print_vertices(&path);
2051 }
2052#endif
2053 return (EDEADLK);
2054 }
2055
2056#ifdef LOCKF_DEBUG
2057 if (lockf_debug & 8) {
2058 printf("re-ordering graph vertices\n");
2059 printf("deltaF = ");
2060 graph_print_vertices(&deltaF);
2061 }
2062#endif
2063
2064 nB = graph_delta_backward(g, x, y, &deltaB);
2065
2066#ifdef LOCKF_DEBUG
2067 if (lockf_debug & 8) {
2068 printf("deltaB = ");
2069 graph_print_vertices(&deltaB);
2070 }
2071#endif
2072
2073 /*
2074 * We first build a set of vertex indices (vertex
2075 * order values) that we may use, then we re-assign
2076 * orders first to those vertices in deltaB, then to
2077 * deltaF. Note that the contents of deltaF and deltaB
2078 * may be partially disordered - we perform an
2079 * insertion sort while building our index set.
2080 */
2081 indices = g->g_indexbuf;
2082 n = graph_add_indices(indices, 0, &deltaF);
2083 graph_add_indices(indices, n, &deltaB);
2084
2085 /*
2086 * We must also be sure to maintain the relative
2087 * ordering of deltaF and deltaB when re-assigning
2088 * vertices. We do this by iteratively removing the
2089 * lowest ordered element from the set and assigning
2090 * it the next value from our new ordering.
2091 */
2092 i = graph_assign_indices(g, indices, 0, &deltaB);
2093 graph_assign_indices(g, indices, i, &deltaF);
2094
2095#ifdef LOCKF_DEBUG
2096 if (lockf_debug & 8) {
2097 struct owner_vertex_list set;
2098 TAILQ_INIT(&set);
2099 for (i = 0; i < nB + nF; i++)
2100 TAILQ_INSERT_TAIL(&set,
2101 g->g_vertices[indices[i]], v_link);
2102 printf("new ordering = ");
2103 graph_print_vertices(&set);
2104 }
2105#endif
2106 }
2107
2108 KASSERT(x->v_order < y->v_order, ("Failed to re-order graph"));
2109
2110#ifdef LOCKF_DEBUG
2111 if (lockf_debug & 8) {
2112 graph_check(g, TRUE);
2113 }
2114#endif
2115
2116 e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK);
2117
2118 LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink);
2119 LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink);
2120 e->e_refs = 1;
2121 e->e_from = x;
2122 e->e_to = y;
2123
2124 return (0);
812}
813
2125}
2126
2127/*
2128 * Remove an edge x->y from the graph.
2129 */
2130static void
2131graph_remove_edge(struct owner_graph *g, struct owner_vertex *x,
2132 struct owner_vertex *y)
2133{
2134 struct owner_edge *e;
2135
2136 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2137
2138 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2139 if (e->e_to == y)
2140 break;
2141 }
2142 KASSERT(e, ("Removing non-existent edge from deadlock graph"));
2143
2144 e->e_refs--;
2145 if (e->e_refs == 0) {
814#ifdef LOCKF_DEBUG
2146#ifdef LOCKF_DEBUG
2147 if (lockf_debug & 8) {
2148 printf("removing edge %d:", x->v_order);
2149 lf_print_owner(x->v_owner);
2150 printf(" -> %d:", y->v_order);
2151 lf_print_owner(y->v_owner);
2152 printf("\n");
2153 }
2154#endif
2155 LIST_REMOVE(e, e_outlink);
2156 LIST_REMOVE(e, e_inlink);
2157 free(e, M_LOCKF);
2158 }
2159}
2160
815/*
2161/*
2162 * Allocate a vertex from the free list. Return ENOMEM if there are
2163 * none.
2164 */
2165static struct owner_vertex *
2166graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo)
2167{
2168 struct owner_vertex *v;
2169
2170 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2171
2172 v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK);
2173 if (g->g_size == g->g_space) {
2174 g->g_vertices = realloc(g->g_vertices,
2175 2 * g->g_space * sizeof(struct owner_vertex *),
2176 M_LOCKF, M_WAITOK);
2177 free(g->g_indexbuf, M_LOCKF);
2178 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int),
2179 M_LOCKF, M_WAITOK);
2180 g->g_space = 2 * g->g_space;
2181 }
2182 v->v_order = g->g_size;
2183 v->v_gen = g->g_gen;
2184 g->g_vertices[g->g_size] = v;
2185 g->g_size++;
2186
2187 LIST_INIT(&v->v_outedges);
2188 LIST_INIT(&v->v_inedges);
2189 v->v_owner = lo;
2190
2191 return (v);
2192}
2193
2194static void
2195graph_free_vertex(struct owner_graph *g, struct owner_vertex *v)
2196{
2197 struct owner_vertex *w;
2198 int i;
2199
2200 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2201
2202 KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges"));
2203 KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges"));
2204
2205 /*
2206 * Remove from the graph's array and close up the gap,
2207 * renumbering the other vertices.
2208 */
2209 for (i = v->v_order + 1; i < g->g_size; i++) {
2210 w = g->g_vertices[i];
2211 w->v_order--;
2212 g->g_vertices[i - 1] = w;
2213 }
2214 g->g_size--;
2215
2216 free(v, M_LOCKF);
2217}
2218
2219static struct owner_graph *
2220graph_init(struct owner_graph *g)
2221{
2222
2223 g->g_vertices = malloc(10 * sizeof(struct owner_vertex *),
2224 M_LOCKF, M_WAITOK);
2225 g->g_size = 0;
2226 g->g_space = 10;
2227 g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK);
2228 g->g_gen = 0;
2229
2230 return (g);
2231}
2232
2233#ifdef LOCKF_DEBUG
2234/*
2235 * Print description of a lock owner
2236 */
2237static void
2238lf_print_owner(struct lock_owner *lo)
2239{
2240
2241 if (lo->lo_flags & F_REMOTE) {
2242 printf("remote pid %d, system %d",
2243 lo->lo_pid, lo->lo_sysid);
2244 } else if (lo->lo_flags & F_FLOCK) {
2245 printf("file %p", lo->lo_id);
2246 } else {
2247 printf("local pid %d", lo->lo_pid);
2248 }
2249}
2250
2251/*
816 * Print out a lock.
817 */
818static void
2252 * Print out a lock.
2253 */
2254static void
819lf_print(tag, lock)
820 char *tag;
821 register struct lockf *lock;
2255lf_print(char *tag, struct lockf_entry *lock)
822{
823
824 printf("%s: lock %p for ", tag, (void *)lock);
2256{
2257
2258 printf("%s: lock %p for ", tag, (void *)lock);
825 if (lock->lf_flags & F_POSIX)
826 printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid);
827 else
828 printf("id %p", (void *)lock->lf_id);
2259 lf_print_owner(lock->lf_owner);
829 if (lock->lf_inode != (struct inode *)0)
2260 if (lock->lf_inode != (struct inode *)0)
830 printf(" in ino %ju on dev <%s>, %s, start %jd, end %jd",
2261 printf(" in ino %ju on dev <%s>,",
831 (uintmax_t)lock->lf_inode->i_number,
2262 (uintmax_t)lock->lf_inode->i_number,
832 devtoname(lock->lf_inode->i_dev),
833 lock->lf_type == F_RDLCK ? "shared" :
834 lock->lf_type == F_WRLCK ? "exclusive" :
835 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
836 (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
2263 devtoname(lock->lf_inode->i_dev));
2264 printf(" %s, start %jd, end ",
2265 lock->lf_type == F_RDLCK ? "shared" :
2266 lock->lf_type == F_WRLCK ? "exclusive" :
2267 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
2268 (intmax_t)lock->lf_start);
2269 if (lock->lf_end == OFF_MAX)
2270 printf("EOF");
837 else
2271 else
838 printf(" %s, start %jd, end %jd",
839 lock->lf_type == F_RDLCK ? "shared" :
840 lock->lf_type == F_WRLCK ? "exclusive" :
841 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
842 (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
843 if (!TAILQ_EMPTY(&lock->lf_blkhd))
844 printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
2272 printf("%jd", (intmax_t)lock->lf_end);
2273 if (!LIST_EMPTY(&lock->lf_outedges))
2274 printf(" block %p\n",
2275 (void *)LIST_FIRST(&lock->lf_outedges)->le_to);
845 else
846 printf("\n");
847}
848
849static void
2276 else
2277 printf("\n");
2278}
2279
2280static void
850lf_printlist(tag, lock)
851 char *tag;
852 struct lockf *lock;
2281lf_printlist(char *tag, struct lockf_entry *lock)
853{
2282{
854 register struct lockf *lf, *blk;
2283 struct lockf_entry *lf, *blk;
2284 struct lockf_edge *e;
855
856 if (lock->lf_inode == (struct inode *)0)
857 return;
858
859 printf("%s: Lock list for ino %ju on dev <%s>:\n",
860 tag, (uintmax_t)lock->lf_inode->i_number,
861 devtoname(lock->lf_inode->i_dev));
2285
2286 if (lock->lf_inode == (struct inode *)0)
2287 return;
2288
2289 printf("%s: Lock list for ino %ju on dev <%s>:\n",
2290 tag, (uintmax_t)lock->lf_inode->i_number,
2291 devtoname(lock->lf_inode->i_dev));
862 for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) {
2292 LIST_FOREACH(lf, &lock->lf_inode->i_lockf->ls_active, lf_link) {
863 printf("\tlock %p for ",(void *)lf);
2293 printf("\tlock %p for ",(void *)lf);
864 if (lf->lf_flags & F_POSIX)
865 printf("proc %ld",
866 (long)((struct proc *)lf->lf_id)->p_pid);
867 else
868 printf("id %p", (void *)lf->lf_id);
2294 lf_print_owner(lock->lf_owner);
869 printf(", %s, start %jd, end %jd",
870 lf->lf_type == F_RDLCK ? "shared" :
871 lf->lf_type == F_WRLCK ? "exclusive" :
872 lf->lf_type == F_UNLCK ? "unlock" :
873 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
2295 printf(", %s, start %jd, end %jd",
2296 lf->lf_type == F_RDLCK ? "shared" :
2297 lf->lf_type == F_WRLCK ? "exclusive" :
2298 lf->lf_type == F_UNLCK ? "unlock" :
2299 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
874 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
2300 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) {
2301 blk = e->le_to;
875 printf("\n\t\tlock request %p for ", (void *)blk);
2302 printf("\n\t\tlock request %p for ", (void *)blk);
876 if (blk->lf_flags & F_POSIX)
877 printf("proc %ld",
878 (long)((struct proc *)blk->lf_id)->p_pid);
879 else
880 printf("id %p", (void *)blk->lf_id);
2303 lf_print_owner(blk->lf_owner);
881 printf(", %s, start %jd, end %jd",
882 blk->lf_type == F_RDLCK ? "shared" :
883 blk->lf_type == F_WRLCK ? "exclusive" :
884 blk->lf_type == F_UNLCK ? "unlock" :
885 "unknown", (intmax_t)blk->lf_start,
886 (intmax_t)blk->lf_end);
2304 printf(", %s, start %jd, end %jd",
2305 blk->lf_type == F_RDLCK ? "shared" :
2306 blk->lf_type == F_WRLCK ? "exclusive" :
2307 blk->lf_type == F_UNLCK ? "unlock" :
2308 "unknown", (intmax_t)blk->lf_start,
2309 (intmax_t)blk->lf_end);
887 if (!TAILQ_EMPTY(&blk->lf_blkhd))
2310 if (!LIST_EMPTY(&blk->lf_inedges))
888 panic("lf_printlist: bad list");
889 }
890 printf("\n");
891 }
892}
893#endif /* LOCKF_DEBUG */
2311 panic("lf_printlist: bad list");
2312 }
2313 printf("\n");
2314 }
2315}
2316#endif /* LOCKF_DEBUG */