1139804Simp/*- 2177633Sdfr * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ 3177633Sdfr * Authors: Doug Rabson <dfr@rabson.org> 4177633Sdfr * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org> 5177633Sdfr * 6177633Sdfr * Redistribution and use in source and binary forms, with or without 7177633Sdfr * modification, are permitted provided that the following conditions 8177633Sdfr * are met: 9177633Sdfr * 1. Redistributions of source code must retain the above copyright 10177633Sdfr * notice, this list of conditions and the following disclaimer. 11177633Sdfr * 2. Redistributions in binary form must reproduce the above copyright 12177633Sdfr * notice, this list of conditions and the following disclaimer in the 13177633Sdfr * documentation and/or other materials provided with the distribution. 14177633Sdfr * 15177633Sdfr * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16177633Sdfr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17177633Sdfr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18177633Sdfr * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19177633Sdfr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20177633Sdfr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21177633Sdfr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22177633Sdfr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23177633Sdfr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24177633Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25177633Sdfr * SUCH DAMAGE. 26177633Sdfr */ 27177633Sdfr/*- 281960Sdg * Copyright (c) 1982, 1986, 1989, 1993 291960Sdg * The Regents of the University of California. All rights reserved. 301960Sdg * 311960Sdg * This code is derived from software contributed to Berkeley by 321960Sdg * Scooter Morris at Genentech Inc. 331960Sdg * 341960Sdg * Redistribution and use in source and binary forms, with or without 351960Sdg * modification, are permitted provided that the following conditions 361960Sdg * are met: 371960Sdg * 1. Redistributions of source code must retain the above copyright 381960Sdg * notice, this list of conditions and the following disclaimer. 391960Sdg * 2. Redistributions in binary form must reproduce the above copyright 401960Sdg * notice, this list of conditions and the following disclaimer in the 411960Sdg * documentation and/or other materials provided with the distribution. 421960Sdg * 4. Neither the name of the University nor the names of its contributors 431960Sdg * may be used to endorse or promote products derived from this software 441960Sdg * without specific prior written permission. 451960Sdg * 461960Sdg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 471960Sdg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 481960Sdg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 491960Sdg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 501960Sdg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 511960Sdg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 521960Sdg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 531960Sdg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 541960Sdg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 551960Sdg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 561960Sdg * SUCH DAMAGE. 571960Sdg * 581960Sdg * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 591960Sdg */ 601960Sdg 61116182Sobrien#include <sys/cdefs.h> 62116182Sobrien__FBSDID("$FreeBSD: stable/10/sys/kern/kern_lockf.c 313729 2017-02-14 13:45:38Z avg $"); 63116182Sobrien 6432929Seivind#include "opt_debug_lockf.h" 6532929Seivind 661960Sdg#include <sys/param.h> 671960Sdg#include <sys/systm.h> 68177633Sdfr#include <sys/hash.h> 6941059Speter#include <sys/kernel.h> 70114216Skan#include <sys/limits.h> 7131561Sbde#include <sys/lock.h> 72101778Sphk#include <sys/mount.h> 7376166Smarkm#include <sys/mutex.h> 741960Sdg#include <sys/proc.h> 75177633Sdfr#include <sys/sx.h> 7618020Sbde#include <sys/unistd.h> 771960Sdg#include <sys/vnode.h> 781960Sdg#include <sys/malloc.h> 791960Sdg#include <sys/fcntl.h> 801960Sdg#include <sys/lockf.h> 81177633Sdfr#include <sys/taskqueue.h> 821960Sdg 831960Sdg#ifdef LOCKF_DEBUG 8422880Sbde#include <sys/sysctl.h> 8522880Sbde 8622880Sbde#include <ufs/ufs/quota.h> 8722880Sbde#include <ufs/ufs/inode.h> 8822880Sbde 89177633Sdfrstatic int lockf_debug = 0; /* control debug output */ 9024481SbdeSYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, ""); 911960Sdg#endif 921960Sdg 93227293Sedstatic MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); 9430309Sphk 95177633Sdfrstruct owner_edge; 96177633Sdfrstruct owner_vertex; 97177633Sdfrstruct owner_vertex_list; 98177633Sdfrstruct owner_graph; 99177633Sdfr 100177633Sdfr#define NOLOCKF (struct lockf_entry *)0 1011960Sdg#define SELF 0x1 1021960Sdg#define OTHERS 0x2 103177633Sdfrstatic void lf_init(void *); 104177633Sdfrstatic int lf_hash_owner(caddr_t, struct flock *, int); 105177633Sdfrstatic int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *, 106177633Sdfr int); 107177633Sdfrstatic struct lockf_entry * 108177633Sdfr lf_alloc_lock(struct lock_owner *); 109192685Skibstatic int lf_free_lock(struct lockf_entry *); 110177633Sdfrstatic int lf_clearlock(struct lockf *, struct lockf_entry *); 111177633Sdfrstatic int lf_overlaps(struct lockf_entry *, struct lockf_entry *); 112177633Sdfrstatic int lf_blocks(struct lockf_entry *, struct lockf_entry *); 113177633Sdfrstatic void lf_free_edge(struct lockf_edge *); 114177633Sdfrstatic struct lockf_edge * 115177633Sdfr lf_alloc_edge(void); 116177633Sdfrstatic void lf_alloc_vertex(struct lockf_entry *); 117177633Sdfrstatic int lf_add_edge(struct lockf_entry *, struct lockf_entry *); 118177633Sdfrstatic void lf_remove_edge(struct lockf_edge *); 119177633Sdfrstatic void lf_remove_outgoing(struct lockf_entry *); 120177633Sdfrstatic void lf_remove_incoming(struct lockf_entry *); 121177633Sdfrstatic int lf_add_outgoing(struct lockf *, struct lockf_entry *); 122177633Sdfrstatic int lf_add_incoming(struct lockf *, struct lockf_entry *); 123177633Sdfrstatic int lf_findoverlap(struct lockf_entry **, struct lockf_entry *, 124177633Sdfr int); 125177633Sdfrstatic struct lockf_entry * 126177633Sdfr lf_getblock(struct lockf *, struct lockf_entry *); 127177633Sdfrstatic int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *); 128177633Sdfrstatic void lf_insert_lock(struct lockf *, struct lockf_entry *); 129177633Sdfrstatic void lf_wakeup_lock(struct lockf *, struct lockf_entry *); 130177633Sdfrstatic void lf_update_dependancies(struct lockf *, struct lockf_entry *, 131177633Sdfr int all, struct lockf_entry_list *); 132177633Sdfrstatic void lf_set_start(struct lockf *, struct lockf_entry *, off_t, 133177633Sdfr struct lockf_entry_list*); 134177633Sdfrstatic void lf_set_end(struct lockf *, struct lockf_entry *, off_t, 135177633Sdfr struct lockf_entry_list*); 136177633Sdfrstatic int lf_setlock(struct lockf *, struct lockf_entry *, 137177633Sdfr struct vnode *, void **cookiep); 138177633Sdfrstatic int lf_cancel(struct lockf *, struct lockf_entry *, void *); 139177633Sdfrstatic void lf_split(struct lockf *, struct lockf_entry *, 140177633Sdfr struct lockf_entry *, struct lockf_entry_list *); 141140808Sjeff#ifdef LOCKF_DEBUG 142177633Sdfrstatic int graph_reaches(struct owner_vertex *x, struct owner_vertex *y, 143177633Sdfr struct owner_vertex_list *path); 144177633Sdfrstatic void graph_check(struct owner_graph *g, int checkorder); 145177633Sdfrstatic void graph_print_vertices(struct owner_vertex_list *set); 146140808Sjeff#endif 147177633Sdfrstatic int graph_delta_forward(struct owner_graph *g, 148177633Sdfr struct owner_vertex *x, struct owner_vertex *y, 149177633Sdfr struct owner_vertex_list *delta); 150177633Sdfrstatic int graph_delta_backward(struct owner_graph *g, 151177633Sdfr struct owner_vertex *x, struct owner_vertex *y, 152177633Sdfr struct owner_vertex_list *delta); 153177633Sdfrstatic int graph_add_indices(int *indices, int n, 154177633Sdfr struct owner_vertex_list *set); 155177633Sdfrstatic int graph_assign_indices(struct owner_graph *g, int *indices, 156177633Sdfr int nextunused, struct owner_vertex_list *set); 157177633Sdfrstatic int graph_add_edge(struct owner_graph *g, 158177633Sdfr struct owner_vertex *x, struct owner_vertex *y); 159177633Sdfrstatic void graph_remove_edge(struct owner_graph *g, 160177633Sdfr struct owner_vertex *x, struct owner_vertex *y); 161177633Sdfrstatic struct owner_vertex *graph_alloc_vertex(struct owner_graph *g, 162177633Sdfr struct lock_owner *lo); 163177633Sdfrstatic void graph_free_vertex(struct owner_graph *g, 164177633Sdfr struct owner_vertex *v); 165177633Sdfrstatic struct owner_graph * graph_init(struct owner_graph *g); 166177633Sdfr#ifdef LOCKF_DEBUG 167177633Sdfrstatic void lf_print(char *, struct lockf_entry *); 168177633Sdfrstatic void lf_printlist(char *, struct lockf_entry *); 169177633Sdfrstatic void lf_print_owner(struct lock_owner *); 170177633Sdfr#endif 1711960Sdg 1721960Sdg/* 173177633Sdfr * This structure is used to keep track of both local and remote lock 174177633Sdfr * owners. The lf_owner field of the struct lockf_entry points back at 175177633Sdfr * the lock owner structure. Each possible lock owner (local proc for 176177633Sdfr * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid> 177177633Sdfr * pair for remote locks) is represented by a unique instance of 178177633Sdfr * struct lock_owner. 179177633Sdfr * 180177633Sdfr * If a lock owner has a lock that blocks some other lock or a lock 181177633Sdfr * that is waiting for some other lock, it also has a vertex in the 182177633Sdfr * owner_graph below. 183177633Sdfr * 184177633Sdfr * Locks: 185177633Sdfr * (s) locked by state->ls_lock 186177633Sdfr * (S) locked by lf_lock_states_lock 187177633Sdfr * (l) locked by lf_lock_owners_lock 188177633Sdfr * (g) locked by lf_owner_graph_lock 189177633Sdfr * (c) const until freeing 190177633Sdfr */ 191177633Sdfr#define LOCK_OWNER_HASH_SIZE 256 192177633Sdfr 193177633Sdfrstruct lock_owner { 194177633Sdfr LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */ 195177633Sdfr int lo_refs; /* (l) Number of locks referring to this */ 196177633Sdfr int lo_flags; /* (c) Flags passwd to lf_advlock */ 197177633Sdfr caddr_t lo_id; /* (c) Id value passed to lf_advlock */ 198177633Sdfr pid_t lo_pid; /* (c) Process Id of the lock owner */ 199177633Sdfr int lo_sysid; /* (c) System Id of the lock owner */ 200177633Sdfr struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */ 201177633Sdfr}; 202177633Sdfr 203177633SdfrLIST_HEAD(lock_owner_list, lock_owner); 204177633Sdfr 205177633Sdfrstatic struct sx lf_lock_states_lock; 206177633Sdfrstatic struct lockf_list lf_lock_states; /* (S) */ 207177633Sdfrstatic struct sx lf_lock_owners_lock; 208177633Sdfrstatic struct lock_owner_list lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */ 209177633Sdfr 210177633Sdfr/* 211177633Sdfr * Structures for deadlock detection. 212177633Sdfr * 213177633Sdfr * We have two types of directed graph, the first is the set of locks, 214177633Sdfr * both active and pending on a vnode. Within this graph, active locks 215177633Sdfr * are terminal nodes in the graph (i.e. have no out-going 216177633Sdfr * edges). Pending locks have out-going edges to each blocking active 217177633Sdfr * lock that prevents the lock from being granted and also to each 218177633Sdfr * older pending lock that would block them if it was active. The 219177633Sdfr * graph for each vnode is naturally acyclic; new edges are only ever 220177633Sdfr * added to or from new nodes (either new pending locks which only add 221177633Sdfr * out-going edges or new active locks which only add in-coming edges) 222177633Sdfr * therefore they cannot create loops in the lock graph. 223177633Sdfr * 224177633Sdfr * The second graph is a global graph of lock owners. Each lock owner 225177633Sdfr * is a vertex in that graph and an edge is added to the graph 226177633Sdfr * whenever an edge is added to a vnode graph, with end points 227177633Sdfr * corresponding to owner of the new pending lock and the owner of the 228177633Sdfr * lock upon which it waits. In order to prevent deadlock, we only add 229177633Sdfr * an edge to this graph if the new edge would not create a cycle. 230177633Sdfr * 231177633Sdfr * The lock owner graph is topologically sorted, i.e. if a node has 232177633Sdfr * any outgoing edges, then it has an order strictly less than any 233177633Sdfr * node to which it has an outgoing edge. We preserve this ordering 234177633Sdfr * (and detect cycles) on edge insertion using Algorithm PK from the 235177633Sdfr * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic 236177633Sdfr * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article 237177633Sdfr * No. 1.7) 238177633Sdfr */ 239177633Sdfrstruct owner_vertex; 240177633Sdfr 241177633Sdfrstruct owner_edge { 242177633Sdfr LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */ 243177633Sdfr LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */ 244177633Sdfr int e_refs; /* (g) number of times added */ 245177633Sdfr struct owner_vertex *e_from; /* (c) out-going from here */ 246177633Sdfr struct owner_vertex *e_to; /* (c) in-coming to here */ 247177633Sdfr}; 248177633SdfrLIST_HEAD(owner_edge_list, owner_edge); 249177633Sdfr 250177633Sdfrstruct owner_vertex { 251177633Sdfr TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */ 252177633Sdfr uint32_t v_gen; /* (g) workspace for edge insertion */ 253177633Sdfr int v_order; /* (g) order of vertex in graph */ 254177633Sdfr struct owner_edge_list v_outedges;/* (g) list of out-edges */ 255177633Sdfr struct owner_edge_list v_inedges; /* (g) list of in-edges */ 256177633Sdfr struct lock_owner *v_owner; /* (c) corresponding lock owner */ 257177633Sdfr}; 258177633SdfrTAILQ_HEAD(owner_vertex_list, owner_vertex); 259177633Sdfr 260177633Sdfrstruct owner_graph { 261177633Sdfr struct owner_vertex** g_vertices; /* (g) pointers to vertices */ 262177633Sdfr int g_size; /* (g) number of vertices */ 263177633Sdfr int g_space; /* (g) space allocated for vertices */ 264177633Sdfr int *g_indexbuf; /* (g) workspace for loop detection */ 265177633Sdfr uint32_t g_gen; /* (g) increment when re-ordering */ 266177633Sdfr}; 267177633Sdfr 268177633Sdfrstatic struct sx lf_owner_graph_lock; 269177633Sdfrstatic struct owner_graph lf_owner_graph; 270177633Sdfr 271177633Sdfr/* 272177633Sdfr * Initialise various structures and locks. 273177633Sdfr */ 274177633Sdfrstatic void 275177633Sdfrlf_init(void *dummy) 276177633Sdfr{ 277177633Sdfr int i; 278177633Sdfr 279177633Sdfr sx_init(&lf_lock_states_lock, "lock states lock"); 280177633Sdfr LIST_INIT(&lf_lock_states); 281177633Sdfr 282177633Sdfr sx_init(&lf_lock_owners_lock, "lock owners lock"); 283177633Sdfr for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 284177633Sdfr LIST_INIT(&lf_lock_owners[i]); 285177633Sdfr 286177633Sdfr sx_init(&lf_owner_graph_lock, "owner graph lock"); 287177633Sdfr graph_init(&lf_owner_graph); 288177633Sdfr} 289177633SdfrSYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL); 290177633Sdfr 291177633Sdfr/* 292177633Sdfr * Generate a hash value for a lock owner. 293177633Sdfr */ 294177633Sdfrstatic int 295177633Sdfrlf_hash_owner(caddr_t id, struct flock *fl, int flags) 296177633Sdfr{ 297177633Sdfr uint32_t h; 298177633Sdfr 299177633Sdfr if (flags & F_REMOTE) { 300177633Sdfr h = HASHSTEP(0, fl->l_pid); 301177633Sdfr h = HASHSTEP(h, fl->l_sysid); 302177633Sdfr } else if (flags & F_FLOCK) { 303177633Sdfr h = ((uintptr_t) id) >> 7; 304177633Sdfr } else { 305177633Sdfr struct proc *p = (struct proc *) id; 306177633Sdfr h = HASHSTEP(0, p->p_pid); 307177633Sdfr h = HASHSTEP(h, 0); 308177633Sdfr } 309177633Sdfr 310177633Sdfr return (h % LOCK_OWNER_HASH_SIZE); 311177633Sdfr} 312177633Sdfr 313177633Sdfr/* 314177633Sdfr * Return true if a lock owner matches the details passed to 315177633Sdfr * lf_advlock. 316177633Sdfr */ 317177633Sdfrstatic int 318177633Sdfrlf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl, 319177633Sdfr int flags) 320177633Sdfr{ 321177633Sdfr if (flags & F_REMOTE) { 322177633Sdfr return lo->lo_pid == fl->l_pid 323177633Sdfr && lo->lo_sysid == fl->l_sysid; 324177633Sdfr } else { 325177633Sdfr return lo->lo_id == id; 326177633Sdfr } 327177633Sdfr} 328177633Sdfr 329177633Sdfrstatic struct lockf_entry * 330177633Sdfrlf_alloc_lock(struct lock_owner *lo) 331177633Sdfr{ 332177633Sdfr struct lockf_entry *lf; 333177633Sdfr 334177633Sdfr lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO); 335177633Sdfr 336177633Sdfr#ifdef LOCKF_DEBUG 337177633Sdfr if (lockf_debug & 4) 338177633Sdfr printf("Allocated lock %p\n", lf); 339177633Sdfr#endif 340177633Sdfr if (lo) { 341177633Sdfr sx_xlock(&lf_lock_owners_lock); 342177633Sdfr lo->lo_refs++; 343177633Sdfr sx_xunlock(&lf_lock_owners_lock); 344177633Sdfr lf->lf_owner = lo; 345177633Sdfr } 346177633Sdfr 347177633Sdfr return (lf); 348177633Sdfr} 349177633Sdfr 350192685Skibstatic int 351177633Sdfrlf_free_lock(struct lockf_entry *lock) 352177633Sdfr{ 353192685Skib 354192685Skib KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock)); 355192685Skib if (--lock->lf_refs > 0) 356192685Skib return (0); 357177633Sdfr /* 358177633Sdfr * Adjust the lock_owner reference count and 359177633Sdfr * reclaim the entry if this is the last lock 360177633Sdfr * for that owner. 361177633Sdfr */ 362177633Sdfr struct lock_owner *lo = lock->lf_owner; 363177633Sdfr if (lo) { 364177633Sdfr KASSERT(LIST_EMPTY(&lock->lf_outedges), 365302234Sbdrewery ("freeing lock with dependencies")); 366177633Sdfr KASSERT(LIST_EMPTY(&lock->lf_inedges), 367177633Sdfr ("freeing lock with dependants")); 368177633Sdfr sx_xlock(&lf_lock_owners_lock); 369177633Sdfr KASSERT(lo->lo_refs > 0, ("lock owner refcount")); 370177633Sdfr lo->lo_refs--; 371177633Sdfr if (lo->lo_refs == 0) { 372177633Sdfr#ifdef LOCKF_DEBUG 373177633Sdfr if (lockf_debug & 1) 374177633Sdfr printf("lf_free_lock: freeing lock owner %p\n", 375177633Sdfr lo); 376177633Sdfr#endif 377177633Sdfr if (lo->lo_vertex) { 378177633Sdfr sx_xlock(&lf_owner_graph_lock); 379177633Sdfr graph_free_vertex(&lf_owner_graph, 380177633Sdfr lo->lo_vertex); 381177633Sdfr sx_xunlock(&lf_owner_graph_lock); 382177633Sdfr } 383177633Sdfr LIST_REMOVE(lo, lo_link); 384177633Sdfr free(lo, M_LOCKF); 385177633Sdfr#ifdef LOCKF_DEBUG 386177633Sdfr if (lockf_debug & 4) 387177633Sdfr printf("Freed lock owner %p\n", lo); 388177633Sdfr#endif 389177633Sdfr } 390177633Sdfr sx_unlock(&lf_lock_owners_lock); 391177633Sdfr } 392177633Sdfr if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) { 393177633Sdfr vrele(lock->lf_vnode); 394177633Sdfr lock->lf_vnode = NULL; 395177633Sdfr } 396177633Sdfr#ifdef LOCKF_DEBUG 397177633Sdfr if (lockf_debug & 4) 398177633Sdfr printf("Freed lock %p\n", lock); 399177633Sdfr#endif 400177633Sdfr free(lock, M_LOCKF); 401192685Skib return (1); 402177633Sdfr} 403177633Sdfr 404177633Sdfr/* 4051960Sdg * Advisory record locking support 4061960Sdg */ 4071960Sdgint 408177633Sdfrlf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, 409177633Sdfr u_quad_t size) 4101960Sdg{ 411177633Sdfr struct lockf *state, *freestate = NULL; 412171193Sjeff struct flock *fl = ap->a_fl; 413177633Sdfr struct lockf_entry *lock; 414171193Sjeff struct vnode *vp = ap->a_vp; 415177633Sdfr caddr_t id = ap->a_id; 416177633Sdfr int flags = ap->a_flags; 417177633Sdfr int hash; 418177633Sdfr struct lock_owner *lo; 41982346Sache off_t start, end, oadd; 4201960Sdg int error; 4211960Sdg 4221960Sdg /* 423177633Sdfr * Handle the F_UNLKSYS case first - no need to mess about 424177633Sdfr * creating a lock owner for this one. 425177633Sdfr */ 426177633Sdfr if (ap->a_op == F_UNLCKSYS) { 427177633Sdfr lf_clearremotesys(fl->l_sysid); 428177633Sdfr return (0); 429177633Sdfr } 430177633Sdfr 431177633Sdfr /* 4321960Sdg * Convert the flock structure into a start and end. 4331960Sdg */ 4341960Sdg switch (fl->l_whence) { 4351960Sdg 4361960Sdg case SEEK_SET: 4371960Sdg case SEEK_CUR: 4381960Sdg /* 4391960Sdg * Caller is responsible for adding any necessary offset 4401960Sdg * when SEEK_CUR is used. 4411960Sdg */ 4421960Sdg start = fl->l_start; 4431960Sdg break; 4441960Sdg 4451960Sdg case SEEK_END: 44682516Sache if (size > OFF_MAX || 447171193Sjeff (fl->l_start > 0 && size > OFF_MAX - fl->l_start)) 448171193Sjeff return (EOVERFLOW); 4491960Sdg start = size + fl->l_start; 4501960Sdg break; 4511960Sdg 4521960Sdg default: 453171193Sjeff return (EINVAL); 4541960Sdg } 455171193Sjeff if (start < 0) 456171193Sjeff return (EINVAL); 45782200Sache if (fl->l_len < 0) { 458171193Sjeff if (start == 0) 459171193Sjeff return (EINVAL); 46082202Sache end = start - 1; 46182200Sache start += fl->l_len; 462171193Sjeff if (start < 0) 463171193Sjeff return (EINVAL); 464177633Sdfr } else if (fl->l_len == 0) { 465177633Sdfr end = OFF_MAX; 466177633Sdfr } else { 46782346Sache oadd = fl->l_len - 1; 468171193Sjeff if (oadd > OFF_MAX - start) 469171193Sjeff return (EOVERFLOW); 47082172Sache end = start + oadd; 47120676Sbde } 472269055Skib 473269055Skibretry_setlock: 474269055Skib 4751960Sdg /* 47620676Sbde * Avoid the common case of unlocking when inode has no locks. 47720676Sbde */ 478184227Sdfr VI_LOCK(vp); 479184227Sdfr if ((*statep) == NULL) { 48020676Sbde if (ap->a_op != F_SETLK) { 48120676Sbde fl->l_type = F_UNLCK; 482184227Sdfr VI_UNLOCK(vp); 483171193Sjeff return (0); 48420676Sbde } 48520676Sbde } 486184227Sdfr VI_UNLOCK(vp); 487177633Sdfr 48820676Sbde /* 489177633Sdfr * Map our arguments to an existing lock owner or create one 490177633Sdfr * if this is the first time we have seen this owner. 491171193Sjeff */ 492177633Sdfr hash = lf_hash_owner(id, fl, flags); 493177633Sdfr sx_xlock(&lf_lock_owners_lock); 494177633Sdfr LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link) 495177633Sdfr if (lf_owner_matches(lo, id, fl, flags)) 496177633Sdfr break; 497177633Sdfr if (!lo) { 498177633Sdfr /* 499177633Sdfr * We initialise the lock with a reference 500177633Sdfr * count which matches the new lockf_entry 501177633Sdfr * structure created below. 502177633Sdfr */ 503177633Sdfr lo = malloc(sizeof(struct lock_owner), M_LOCKF, 504177633Sdfr M_WAITOK|M_ZERO); 505177633Sdfr#ifdef LOCKF_DEBUG 506177633Sdfr if (lockf_debug & 4) 507177633Sdfr printf("Allocated lock owner %p\n", lo); 508177633Sdfr#endif 509177633Sdfr 510177633Sdfr lo->lo_refs = 1; 511177633Sdfr lo->lo_flags = flags; 512177633Sdfr lo->lo_id = id; 513177633Sdfr if (flags & F_REMOTE) { 514177633Sdfr lo->lo_pid = fl->l_pid; 515177633Sdfr lo->lo_sysid = fl->l_sysid; 516177633Sdfr } else if (flags & F_FLOCK) { 517177633Sdfr lo->lo_pid = -1; 518177633Sdfr lo->lo_sysid = 0; 519177633Sdfr } else { 520177633Sdfr struct proc *p = (struct proc *) id; 521177633Sdfr lo->lo_pid = p->p_pid; 522177633Sdfr lo->lo_sysid = 0; 523177633Sdfr } 524177633Sdfr lo->lo_vertex = NULL; 525177633Sdfr 526177633Sdfr#ifdef LOCKF_DEBUG 527177633Sdfr if (lockf_debug & 1) { 528177633Sdfr printf("lf_advlockasync: new lock owner %p ", lo); 529177633Sdfr lf_print_owner(lo); 530177633Sdfr printf("\n"); 531177633Sdfr } 532177633Sdfr#endif 533177633Sdfr 534177633Sdfr LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link); 535177633Sdfr } else { 536177633Sdfr /* 537177633Sdfr * We have seen this lock owner before, increase its 538177633Sdfr * reference count to account for the new lockf_entry 539177633Sdfr * structure we create below. 540177633Sdfr */ 541177633Sdfr lo->lo_refs++; 542171772Skib } 543177633Sdfr sx_xunlock(&lf_lock_owners_lock); 544177633Sdfr 545171193Sjeff /* 546177633Sdfr * Create the lockf structure. We initialise the lf_owner 547177633Sdfr * field here instead of in lf_alloc_lock() to avoid paying 548177633Sdfr * the lf_lock_owners_lock tax twice. 5491960Sdg */ 550177633Sdfr lock = lf_alloc_lock(NULL); 551192685Skib lock->lf_refs = 1; 5521960Sdg lock->lf_start = start; 5531960Sdg lock->lf_end = end; 554177633Sdfr lock->lf_owner = lo; 555177633Sdfr lock->lf_vnode = vp; 556177633Sdfr if (flags & F_REMOTE) { 557177633Sdfr /* 558177633Sdfr * For remote locks, the caller may release its ref to 559177633Sdfr * the vnode at any time - we have to ref it here to 560177633Sdfr * prevent it from being recycled unexpectedly. 561177633Sdfr */ 562177633Sdfr vref(vp); 563177633Sdfr } 564177633Sdfr 56587211Salfred /* 56687211Salfred * XXX The problem is that VTOI is ufs specific, so it will 56787211Salfred * break LOCKF_DEBUG for all other FS's other than UFS because 56887211Salfred * it casts the vnode->data ptr to struct inode *. 56987211Salfred */ 57087211Salfred/* lock->lf_inode = VTOI(ap->a_vp); */ 57187211Salfred lock->lf_inode = (struct inode *)0; 57222521Sdyson lock->lf_type = fl->l_type; 573177633Sdfr LIST_INIT(&lock->lf_outedges); 574177633Sdfr LIST_INIT(&lock->lf_inedges); 575177633Sdfr lock->lf_async_task = ap->a_task; 5761960Sdg lock->lf_flags = ap->a_flags; 577177633Sdfr 5781960Sdg /* 579177633Sdfr * Do the requested operation. First find our state structure 580177633Sdfr * and create a new one if necessary - the caller's *statep 581177633Sdfr * variable and the state's ls_threads count is protected by 582177633Sdfr * the vnode interlock. 5831960Sdg */ 584171193Sjeff VI_LOCK(vp); 585178243Skib if (vp->v_iflag & VI_DOOMED) { 586178243Skib VI_UNLOCK(vp); 587178243Skib lf_free_lock(lock); 588178243Skib return (ENOENT); 589178243Skib } 590177633Sdfr 591177633Sdfr /* 592177633Sdfr * Allocate a state structure if necessary. 593177633Sdfr */ 594177633Sdfr state = *statep; 595177633Sdfr if (state == NULL) { 596177633Sdfr struct lockf *ls; 597177633Sdfr 598177633Sdfr VI_UNLOCK(vp); 599177633Sdfr 600177633Sdfr ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO); 601177633Sdfr sx_init(&ls->ls_lock, "ls_lock"); 602177633Sdfr LIST_INIT(&ls->ls_active); 603177633Sdfr LIST_INIT(&ls->ls_pending); 604177841Sdfr ls->ls_threads = 1; 605177633Sdfr 606177633Sdfr sx_xlock(&lf_lock_states_lock); 607177633Sdfr LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link); 608177633Sdfr sx_xunlock(&lf_lock_states_lock); 609177633Sdfr 610177633Sdfr /* 611177633Sdfr * Cope if we lost a race with some other thread while 612177633Sdfr * trying to allocate memory. 613177633Sdfr */ 614177633Sdfr VI_LOCK(vp); 615178243Skib if (vp->v_iflag & VI_DOOMED) { 616178243Skib VI_UNLOCK(vp); 617178243Skib sx_xlock(&lf_lock_states_lock); 618178243Skib LIST_REMOVE(ls, ls_link); 619178243Skib sx_xunlock(&lf_lock_states_lock); 620178243Skib sx_destroy(&ls->ls_lock); 621178243Skib free(ls, M_LOCKF); 622178243Skib lf_free_lock(lock); 623178243Skib return (ENOENT); 624178243Skib } 625177633Sdfr if ((*statep) == NULL) { 626177841Sdfr state = *statep = ls; 627177841Sdfr VI_UNLOCK(vp); 628177633Sdfr } else { 629177841Sdfr state = *statep; 630177841Sdfr state->ls_threads++; 631177841Sdfr VI_UNLOCK(vp); 632177841Sdfr 633177633Sdfr sx_xlock(&lf_lock_states_lock); 634177633Sdfr LIST_REMOVE(ls, ls_link); 635177633Sdfr sx_xunlock(&lf_lock_states_lock); 636177633Sdfr sx_destroy(&ls->ls_lock); 637177633Sdfr free(ls, M_LOCKF); 638177633Sdfr } 639177841Sdfr } else { 640177841Sdfr state->ls_threads++; 641177841Sdfr VI_UNLOCK(vp); 642177633Sdfr } 643177633Sdfr 644177633Sdfr sx_xlock(&state->ls_lock); 645192683Skib /* 646192683Skib * Recheck the doomed vnode after state->ls_lock is 647192683Skib * locked. lf_purgelocks() requires that no new threads add 648192683Skib * pending locks when vnode is marked by VI_DOOMED flag. 649192683Skib */ 650192683Skib VI_LOCK(vp); 651192683Skib if (vp->v_iflag & VI_DOOMED) { 652194356Skib state->ls_threads--; 653194356Skib wakeup(state); 654192683Skib VI_UNLOCK(vp); 655193931Skib sx_xunlock(&state->ls_lock); 656192683Skib lf_free_lock(lock); 657192683Skib return (ENOENT); 658192683Skib } 659192683Skib VI_UNLOCK(vp); 660192683Skib 661192683Skib switch (ap->a_op) { 6621960Sdg case F_SETLK: 663177633Sdfr error = lf_setlock(state, lock, vp, ap->a_cookiep); 664171193Sjeff break; 6651960Sdg 6661960Sdg case F_UNLCK: 667177633Sdfr error = lf_clearlock(state, lock); 668177633Sdfr lf_free_lock(lock); 669171193Sjeff break; 6701960Sdg 6711960Sdg case F_GETLK: 672177633Sdfr error = lf_getlock(state, lock, fl); 673177633Sdfr lf_free_lock(lock); 674171193Sjeff break; 6758876Srgrimes 676177633Sdfr case F_CANCEL: 677177633Sdfr if (ap->a_cookiep) 678177633Sdfr error = lf_cancel(state, lock, *ap->a_cookiep); 679177633Sdfr else 680177633Sdfr error = EINVAL; 681177633Sdfr lf_free_lock(lock); 682177633Sdfr break; 683177633Sdfr 6841960Sdg default: 685177633Sdfr lf_free_lock(lock); 686140808Sjeff error = EINVAL; 687171193Sjeff break; 6881960Sdg } 689177633Sdfr 690313729Savg#ifdef DIAGNOSTIC 691177633Sdfr /* 692177633Sdfr * Check for some can't happen stuff. In this case, the active 693177633Sdfr * lock list becoming disordered or containing mutually 694177633Sdfr * blocking locks. We also check the pending list for locks 695177633Sdfr * which should be active (i.e. have no out-going edges). 696177633Sdfr */ 697177633Sdfr LIST_FOREACH(lock, &state->ls_active, lf_link) { 698177633Sdfr struct lockf_entry *lf; 699177633Sdfr if (LIST_NEXT(lock, lf_link)) 700177633Sdfr KASSERT((lock->lf_start 701177633Sdfr <= LIST_NEXT(lock, lf_link)->lf_start), 702177633Sdfr ("locks disordered")); 703177633Sdfr LIST_FOREACH(lf, &state->ls_active, lf_link) { 704177633Sdfr if (lock == lf) 705177633Sdfr break; 706177633Sdfr KASSERT(!lf_blocks(lock, lf), 707177633Sdfr ("two conflicting active locks")); 708177633Sdfr if (lock->lf_owner == lf->lf_owner) 709177633Sdfr KASSERT(!lf_overlaps(lock, lf), 710177633Sdfr ("two overlapping locks from same owner")); 711177633Sdfr } 712177633Sdfr } 713177633Sdfr LIST_FOREACH(lock, &state->ls_pending, lf_link) { 714177633Sdfr KASSERT(!LIST_EMPTY(&lock->lf_outedges), 715177633Sdfr ("pending lock which should be active")); 716177633Sdfr } 717177633Sdfr#endif 718177633Sdfr sx_xunlock(&state->ls_lock); 719177633Sdfr 720177633Sdfr /* 721177633Sdfr * If we have removed the last active lock on the vnode and 722177633Sdfr * this is the last thread that was in-progress, we can free 723177633Sdfr * the state structure. We update the caller's pointer inside 724177633Sdfr * the vnode interlock but call free outside. 725177633Sdfr * 726177633Sdfr * XXX alternatively, keep the state structure around until 727177633Sdfr * the filesystem recycles - requires a callback from the 728177633Sdfr * filesystem. 729177633Sdfr */ 730177633Sdfr VI_LOCK(vp); 731177633Sdfr 732177633Sdfr state->ls_threads--; 733178243Skib wakeup(state); 734177633Sdfr if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) { 735177633Sdfr KASSERT(LIST_EMPTY(&state->ls_pending), 736177633Sdfr ("freeing state with pending locks")); 737177633Sdfr freestate = state; 738177633Sdfr *statep = NULL; 739177633Sdfr } 740177633Sdfr 741171193Sjeff VI_UNLOCK(vp); 742177633Sdfr 743277625Sdelphij if (freestate != NULL) { 744177633Sdfr sx_xlock(&lf_lock_states_lock); 745177633Sdfr LIST_REMOVE(freestate, ls_link); 746177633Sdfr sx_xunlock(&lf_lock_states_lock); 747177633Sdfr sx_destroy(&freestate->ls_lock); 748177633Sdfr free(freestate, M_LOCKF); 749277625Sdelphij freestate = NULL; 750171772Skib } 751269055Skib 752269055Skib if (error == EDOOFUS) { 753269055Skib KASSERT(ap->a_op == F_SETLK, ("EDOOFUS")); 754269055Skib goto retry_setlock; 755269055Skib } 756140808Sjeff return (error); 7571960Sdg} 7581960Sdg 759177633Sdfrint 760177633Sdfrlf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size) 761177633Sdfr{ 762177633Sdfr struct vop_advlockasync_args a; 763177633Sdfr 764177633Sdfr a.a_vp = ap->a_vp; 765177633Sdfr a.a_id = ap->a_id; 766177633Sdfr a.a_op = ap->a_op; 767177633Sdfr a.a_fl = ap->a_fl; 768177633Sdfr a.a_flags = ap->a_flags; 769177633Sdfr a.a_task = NULL; 770177633Sdfr a.a_cookiep = NULL; 771177633Sdfr 772177633Sdfr return (lf_advlockasync(&a, statep, size)); 773177633Sdfr} 774177633Sdfr 775178243Skibvoid 776178243Skiblf_purgelocks(struct vnode *vp, struct lockf **statep) 777178243Skib{ 778178243Skib struct lockf *state; 779178243Skib struct lockf_entry *lock, *nlock; 780178243Skib 781178243Skib /* 782178243Skib * For this to work correctly, the caller must ensure that no 783178243Skib * other threads enter the locking system for this vnode, 784178243Skib * e.g. by checking VI_DOOMED. We wake up any threads that are 785178243Skib * sleeping waiting for locks on this vnode and then free all 786178243Skib * the remaining locks. 787178243Skib */ 788178243Skib VI_LOCK(vp); 789192683Skib KASSERT(vp->v_iflag & VI_DOOMED, 790192683Skib ("lf_purgelocks: vp %p has not vgone yet", vp)); 791178243Skib state = *statep; 792178243Skib if (state) { 793192683Skib *statep = NULL; 794178243Skib state->ls_threads++; 795178243Skib VI_UNLOCK(vp); 796178243Skib 797178243Skib sx_xlock(&state->ls_lock); 798178243Skib sx_xlock(&lf_owner_graph_lock); 799178243Skib LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) { 800178243Skib LIST_REMOVE(lock, lf_link); 801178243Skib lf_remove_outgoing(lock); 802178243Skib lf_remove_incoming(lock); 803178243Skib 804178243Skib /* 805178243Skib * If its an async lock, we can just free it 806178243Skib * here, otherwise we let the sleeping thread 807178243Skib * free it. 808178243Skib */ 809178243Skib if (lock->lf_async_task) { 810178243Skib lf_free_lock(lock); 811178243Skib } else { 812178243Skib lock->lf_flags |= F_INTR; 813178243Skib wakeup(lock); 814178243Skib } 815178243Skib } 816178243Skib sx_xunlock(&lf_owner_graph_lock); 817178243Skib sx_xunlock(&state->ls_lock); 818178243Skib 819178243Skib /* 820178243Skib * Wait for all other threads, sleeping and otherwise 821178243Skib * to leave. 822178243Skib */ 823178243Skib VI_LOCK(vp); 824178243Skib while (state->ls_threads > 1) 825178243Skib msleep(state, VI_MTX(vp), 0, "purgelocks", 0); 826178243Skib VI_UNLOCK(vp); 827178243Skib 828178243Skib /* 829178243Skib * We can just free all the active locks since they 830302234Sbdrewery * will have no dependencies (we removed them all 831178243Skib * above). We don't need to bother locking since we 832178243Skib * are the last thread using this state structure. 833178243Skib */ 834192684Skib KASSERT(LIST_EMPTY(&state->ls_pending), 835192684Skib ("lock pending for %p", state)); 836192684Skib LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) { 837178243Skib LIST_REMOVE(lock, lf_link); 838178243Skib lf_free_lock(lock); 839178243Skib } 840178243Skib sx_xlock(&lf_lock_states_lock); 841178243Skib LIST_REMOVE(state, ls_link); 842178243Skib sx_xunlock(&lf_lock_states_lock); 843178243Skib sx_destroy(&state->ls_lock); 844178243Skib free(state, M_LOCKF); 845178243Skib } else { 846178243Skib VI_UNLOCK(vp); 847178243Skib } 848178243Skib} 849178243Skib 8501960Sdg/* 851177633Sdfr * Return non-zero if locks 'x' and 'y' overlap. 852177633Sdfr */ 853177633Sdfrstatic int 854177633Sdfrlf_overlaps(struct lockf_entry *x, struct lockf_entry *y) 855177633Sdfr{ 856177633Sdfr 857177633Sdfr return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start); 858177633Sdfr} 859177633Sdfr 860177633Sdfr/* 861177633Sdfr * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa). 862177633Sdfr */ 863177633Sdfrstatic int 864177633Sdfrlf_blocks(struct lockf_entry *x, struct lockf_entry *y) 865177633Sdfr{ 866177633Sdfr 867177633Sdfr return x->lf_owner != y->lf_owner 868177633Sdfr && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK) 869177633Sdfr && lf_overlaps(x, y); 870177633Sdfr} 871177633Sdfr 872177633Sdfr/* 873177633Sdfr * Allocate a lock edge from the free list 874177633Sdfr */ 875177633Sdfrstatic struct lockf_edge * 876177633Sdfrlf_alloc_edge(void) 877177633Sdfr{ 878177633Sdfr 879177633Sdfr return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO)); 880177633Sdfr} 881177633Sdfr 882177633Sdfr/* 883177633Sdfr * Free a lock edge. 884177633Sdfr */ 885177633Sdfrstatic void 886177633Sdfrlf_free_edge(struct lockf_edge *e) 887177633Sdfr{ 888177633Sdfr 889177633Sdfr free(e, M_LOCKF); 890177633Sdfr} 891177633Sdfr 892177633Sdfr 893177633Sdfr/* 894177633Sdfr * Ensure that the lock's owner has a corresponding vertex in the 895177633Sdfr * owner graph. 896177633Sdfr */ 897177633Sdfrstatic void 898177633Sdfrlf_alloc_vertex(struct lockf_entry *lock) 899177633Sdfr{ 900177633Sdfr struct owner_graph *g = &lf_owner_graph; 901177633Sdfr 902177633Sdfr if (!lock->lf_owner->lo_vertex) 903177633Sdfr lock->lf_owner->lo_vertex = 904177633Sdfr graph_alloc_vertex(g, lock->lf_owner); 905177633Sdfr} 906177633Sdfr 907177633Sdfr/* 908177633Sdfr * Attempt to record an edge from lock x to lock y. Return EDEADLK if 909177633Sdfr * the new edge would cause a cycle in the owner graph. 910177633Sdfr */ 911177633Sdfrstatic int 912177633Sdfrlf_add_edge(struct lockf_entry *x, struct lockf_entry *y) 913177633Sdfr{ 914177633Sdfr struct owner_graph *g = &lf_owner_graph; 915177633Sdfr struct lockf_edge *e; 916177633Sdfr int error; 917177633Sdfr 918313729Savg#ifdef DIAGNOSTIC 919177633Sdfr LIST_FOREACH(e, &x->lf_outedges, le_outlink) 920177633Sdfr KASSERT(e->le_to != y, ("adding lock edge twice")); 921177633Sdfr#endif 922177633Sdfr 923177633Sdfr /* 924177633Sdfr * Make sure the two owners have entries in the owner graph. 925177633Sdfr */ 926177633Sdfr lf_alloc_vertex(x); 927177633Sdfr lf_alloc_vertex(y); 928177633Sdfr 929177633Sdfr error = graph_add_edge(g, x->lf_owner->lo_vertex, 930177633Sdfr y->lf_owner->lo_vertex); 931177633Sdfr if (error) 932177633Sdfr return (error); 933177633Sdfr 934177633Sdfr e = lf_alloc_edge(); 935177633Sdfr LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink); 936177633Sdfr LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink); 937177633Sdfr e->le_from = x; 938177633Sdfr e->le_to = y; 939177633Sdfr 940177633Sdfr return (0); 941177633Sdfr} 942177633Sdfr 943177633Sdfr/* 944177633Sdfr * Remove an edge from the lock graph. 945177633Sdfr */ 946177633Sdfrstatic void 947177633Sdfrlf_remove_edge(struct lockf_edge *e) 948177633Sdfr{ 949177633Sdfr struct owner_graph *g = &lf_owner_graph; 950177633Sdfr struct lockf_entry *x = e->le_from; 951177633Sdfr struct lockf_entry *y = e->le_to; 952177633Sdfr 953177633Sdfr graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex); 954177633Sdfr LIST_REMOVE(e, le_outlink); 955177633Sdfr LIST_REMOVE(e, le_inlink); 956177633Sdfr e->le_from = NULL; 957177633Sdfr e->le_to = NULL; 958177633Sdfr lf_free_edge(e); 959177633Sdfr} 960177633Sdfr 961177633Sdfr/* 962177633Sdfr * Remove all out-going edges from lock x. 963177633Sdfr */ 964177633Sdfrstatic void 965177633Sdfrlf_remove_outgoing(struct lockf_entry *x) 966177633Sdfr{ 967177633Sdfr struct lockf_edge *e; 968177633Sdfr 969177633Sdfr while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) { 970177633Sdfr lf_remove_edge(e); 971177633Sdfr } 972177633Sdfr} 973177633Sdfr 974177633Sdfr/* 975177633Sdfr * Remove all in-coming edges from lock x. 976177633Sdfr */ 977177633Sdfrstatic void 978177633Sdfrlf_remove_incoming(struct lockf_entry *x) 979177633Sdfr{ 980177633Sdfr struct lockf_edge *e; 981177633Sdfr 982177633Sdfr while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) { 983177633Sdfr lf_remove_edge(e); 984177633Sdfr } 985177633Sdfr} 986177633Sdfr 987177633Sdfr/* 988177633Sdfr * Walk the list of locks for the file and create an out-going edge 989177633Sdfr * from lock to each blocking lock. 990177633Sdfr */ 991177633Sdfrstatic int 992177633Sdfrlf_add_outgoing(struct lockf *state, struct lockf_entry *lock) 993177633Sdfr{ 994177633Sdfr struct lockf_entry *overlap; 995177633Sdfr int error; 996177633Sdfr 997177633Sdfr LIST_FOREACH(overlap, &state->ls_active, lf_link) { 998177633Sdfr /* 999177633Sdfr * We may assume that the active list is sorted by 1000177633Sdfr * lf_start. 1001177633Sdfr */ 1002177633Sdfr if (overlap->lf_start > lock->lf_end) 1003177633Sdfr break; 1004177633Sdfr if (!lf_blocks(lock, overlap)) 1005177633Sdfr continue; 1006177633Sdfr 1007177633Sdfr /* 1008177633Sdfr * We've found a blocking lock. Add the corresponding 1009177633Sdfr * edge to the graphs and see if it would cause a 1010177633Sdfr * deadlock. 1011177633Sdfr */ 1012177633Sdfr error = lf_add_edge(lock, overlap); 1013177633Sdfr 1014177633Sdfr /* 1015177633Sdfr * The only error that lf_add_edge returns is EDEADLK. 1016177633Sdfr * Remove any edges we added and return the error. 1017177633Sdfr */ 1018177633Sdfr if (error) { 1019177633Sdfr lf_remove_outgoing(lock); 1020177633Sdfr return (error); 1021177633Sdfr } 1022177633Sdfr } 1023177633Sdfr 1024177633Sdfr /* 1025177633Sdfr * We also need to add edges to sleeping locks that block 1026177633Sdfr * us. This ensures that lf_wakeup_lock cannot grant two 1027177633Sdfr * mutually blocking locks simultaneously and also enforces a 1028177633Sdfr * 'first come, first served' fairness model. Note that this 1029177633Sdfr * only happens if we are blocked by at least one active lock 1030177633Sdfr * due to the call to lf_getblock in lf_setlock below. 1031177633Sdfr */ 1032177633Sdfr LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 1033177633Sdfr if (!lf_blocks(lock, overlap)) 1034177633Sdfr continue; 1035177633Sdfr /* 1036177633Sdfr * We've found a blocking lock. Add the corresponding 1037177633Sdfr * edge to the graphs and see if it would cause a 1038177633Sdfr * deadlock. 1039177633Sdfr */ 1040177633Sdfr error = lf_add_edge(lock, overlap); 1041177633Sdfr 1042177633Sdfr /* 1043177633Sdfr * The only error that lf_add_edge returns is EDEADLK. 1044177633Sdfr * Remove any edges we added and return the error. 1045177633Sdfr */ 1046177633Sdfr if (error) { 1047177633Sdfr lf_remove_outgoing(lock); 1048177633Sdfr return (error); 1049177633Sdfr } 1050177633Sdfr } 1051177633Sdfr 1052177633Sdfr return (0); 1053177633Sdfr} 1054177633Sdfr 1055177633Sdfr/* 1056177633Sdfr * Walk the list of pending locks for the file and create an in-coming 1057177633Sdfr * edge from lock to each blocking lock. 1058177633Sdfr */ 1059177633Sdfrstatic int 1060177633Sdfrlf_add_incoming(struct lockf *state, struct lockf_entry *lock) 1061177633Sdfr{ 1062177633Sdfr struct lockf_entry *overlap; 1063177633Sdfr int error; 1064177633Sdfr 1065177633Sdfr LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 1066177633Sdfr if (!lf_blocks(lock, overlap)) 1067177633Sdfr continue; 1068177633Sdfr 1069177633Sdfr /* 1070177633Sdfr * We've found a blocking lock. Add the corresponding 1071177633Sdfr * edge to the graphs and see if it would cause a 1072177633Sdfr * deadlock. 1073177633Sdfr */ 1074177633Sdfr error = lf_add_edge(overlap, lock); 1075177633Sdfr 1076177633Sdfr /* 1077177633Sdfr * The only error that lf_add_edge returns is EDEADLK. 1078177633Sdfr * Remove any edges we added and return the error. 1079177633Sdfr */ 1080177633Sdfr if (error) { 1081177633Sdfr lf_remove_incoming(lock); 1082177633Sdfr return (error); 1083177633Sdfr } 1084177633Sdfr } 1085177633Sdfr return (0); 1086177633Sdfr} 1087177633Sdfr 1088177633Sdfr/* 1089177633Sdfr * Insert lock into the active list, keeping list entries ordered by 1090177633Sdfr * increasing values of lf_start. 1091177633Sdfr */ 1092177633Sdfrstatic void 1093177633Sdfrlf_insert_lock(struct lockf *state, struct lockf_entry *lock) 1094177633Sdfr{ 1095177633Sdfr struct lockf_entry *lf, *lfprev; 1096177633Sdfr 1097177633Sdfr if (LIST_EMPTY(&state->ls_active)) { 1098177633Sdfr LIST_INSERT_HEAD(&state->ls_active, lock, lf_link); 1099177633Sdfr return; 1100177633Sdfr } 1101177633Sdfr 1102177633Sdfr lfprev = NULL; 1103177633Sdfr LIST_FOREACH(lf, &state->ls_active, lf_link) { 1104177633Sdfr if (lf->lf_start > lock->lf_start) { 1105177633Sdfr LIST_INSERT_BEFORE(lf, lock, lf_link); 1106177633Sdfr return; 1107177633Sdfr } 1108177633Sdfr lfprev = lf; 1109177633Sdfr } 1110177633Sdfr LIST_INSERT_AFTER(lfprev, lock, lf_link); 1111177633Sdfr} 1112177633Sdfr 1113177633Sdfr/* 1114177633Sdfr * Wake up a sleeping lock and remove it from the pending list now 1115302234Sbdrewery * that all its dependencies have been resolved. The caller should 1116177633Sdfr * arrange for the lock to be added to the active list, adjusting any 1117177633Sdfr * existing locks for the same owner as needed. 1118177633Sdfr */ 1119177633Sdfrstatic void 1120177633Sdfrlf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock) 1121177633Sdfr{ 1122177633Sdfr 1123177633Sdfr /* 1124177633Sdfr * Remove from ls_pending list and wake up the caller 1125177633Sdfr * or start the async notification, as appropriate. 1126177633Sdfr */ 1127177633Sdfr LIST_REMOVE(wakelock, lf_link); 1128177633Sdfr#ifdef LOCKF_DEBUG 1129177633Sdfr if (lockf_debug & 1) 1130177633Sdfr lf_print("lf_wakeup_lock: awakening", wakelock); 1131177633Sdfr#endif /* LOCKF_DEBUG */ 1132177633Sdfr if (wakelock->lf_async_task) { 1133177633Sdfr taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task); 1134177633Sdfr } else { 1135177633Sdfr wakeup(wakelock); 1136177633Sdfr } 1137177633Sdfr} 1138177633Sdfr 1139177633Sdfr/* 1140302234Sbdrewery * Re-check all dependent locks and remove edges to locks that we no 1141177633Sdfr * longer block. If 'all' is non-zero, the lock has been removed and 1142302234Sbdrewery * we must remove all the dependencies, otherwise it has simply been 1143177633Sdfr * reduced but remains active. Any pending locks which have been been 1144177633Sdfr * unblocked are added to 'granted' 1145177633Sdfr */ 1146177633Sdfrstatic void 1147177633Sdfrlf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all, 1148177633Sdfr struct lockf_entry_list *granted) 1149177633Sdfr{ 1150177633Sdfr struct lockf_edge *e, *ne; 1151177633Sdfr struct lockf_entry *deplock; 1152177633Sdfr 1153177633Sdfr LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) { 1154177633Sdfr deplock = e->le_from; 1155177633Sdfr if (all || !lf_blocks(lock, deplock)) { 1156177633Sdfr sx_xlock(&lf_owner_graph_lock); 1157177633Sdfr lf_remove_edge(e); 1158177633Sdfr sx_xunlock(&lf_owner_graph_lock); 1159177633Sdfr if (LIST_EMPTY(&deplock->lf_outedges)) { 1160177633Sdfr lf_wakeup_lock(state, deplock); 1161177633Sdfr LIST_INSERT_HEAD(granted, deplock, lf_link); 1162177633Sdfr } 1163177633Sdfr } 1164177633Sdfr } 1165177633Sdfr} 1166177633Sdfr 1167177633Sdfr/* 1168302234Sbdrewery * Set the start of an existing active lock, updating dependencies and 1169177633Sdfr * adding any newly woken locks to 'granted'. 1170177633Sdfr */ 1171177633Sdfrstatic void 1172177633Sdfrlf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start, 1173177633Sdfr struct lockf_entry_list *granted) 1174177633Sdfr{ 1175177633Sdfr 1176177633Sdfr KASSERT(new_start >= lock->lf_start, ("can't increase lock")); 1177177633Sdfr lock->lf_start = new_start; 1178177633Sdfr LIST_REMOVE(lock, lf_link); 1179177633Sdfr lf_insert_lock(state, lock); 1180177633Sdfr lf_update_dependancies(state, lock, FALSE, granted); 1181177633Sdfr} 1182177633Sdfr 1183177633Sdfr/* 1184302234Sbdrewery * Set the end of an existing active lock, updating dependencies and 1185177633Sdfr * adding any newly woken locks to 'granted'. 1186177633Sdfr */ 1187177633Sdfrstatic void 1188177633Sdfrlf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end, 1189177633Sdfr struct lockf_entry_list *granted) 1190177633Sdfr{ 1191177633Sdfr 1192177633Sdfr KASSERT(new_end <= lock->lf_end, ("can't increase lock")); 1193177633Sdfr lock->lf_end = new_end; 1194177633Sdfr lf_update_dependancies(state, lock, FALSE, granted); 1195177633Sdfr} 1196177633Sdfr 1197177633Sdfr/* 1198177633Sdfr * Add a lock to the active list, updating or removing any current 1199177633Sdfr * locks owned by the same owner and processing any pending locks that 1200177633Sdfr * become unblocked as a result. This code is also used for unlock 1201177633Sdfr * since the logic for updating existing locks is identical. 1202177633Sdfr * 1203177633Sdfr * As a result of processing the new lock, we may unblock existing 1204177633Sdfr * pending locks as a result of downgrading/unlocking. We simply 1205177633Sdfr * activate the newly granted locks by looping. 1206177633Sdfr * 1207302234Sbdrewery * Since the new lock already has its dependencies set up, we always 1208177633Sdfr * add it to the list (unless its an unlock request). This may 1209177633Sdfr * fragment the lock list in some pathological cases but its probably 1210177633Sdfr * not a real problem. 1211177633Sdfr */ 1212177633Sdfrstatic void 1213177633Sdfrlf_activate_lock(struct lockf *state, struct lockf_entry *lock) 1214177633Sdfr{ 1215177633Sdfr struct lockf_entry *overlap, *lf; 1216177633Sdfr struct lockf_entry_list granted; 1217177633Sdfr int ovcase; 1218177633Sdfr 1219177633Sdfr LIST_INIT(&granted); 1220177633Sdfr LIST_INSERT_HEAD(&granted, lock, lf_link); 1221177633Sdfr 1222177633Sdfr while (!LIST_EMPTY(&granted)) { 1223177633Sdfr lock = LIST_FIRST(&granted); 1224177633Sdfr LIST_REMOVE(lock, lf_link); 1225177633Sdfr 1226177633Sdfr /* 1227177633Sdfr * Skip over locks owned by other processes. Handle 1228177633Sdfr * any locks that overlap and are owned by ourselves. 1229177633Sdfr */ 1230177633Sdfr overlap = LIST_FIRST(&state->ls_active); 1231177633Sdfr for (;;) { 1232177633Sdfr ovcase = lf_findoverlap(&overlap, lock, SELF); 1233177633Sdfr 1234177633Sdfr#ifdef LOCKF_DEBUG 1235177633Sdfr if (ovcase && (lockf_debug & 2)) { 1236177633Sdfr printf("lf_setlock: overlap %d", ovcase); 1237177633Sdfr lf_print("", overlap); 1238177633Sdfr } 1239177633Sdfr#endif 1240177633Sdfr /* 1241177633Sdfr * Six cases: 1242177633Sdfr * 0) no overlap 1243177633Sdfr * 1) overlap == lock 1244177633Sdfr * 2) overlap contains lock 1245177633Sdfr * 3) lock contains overlap 1246177633Sdfr * 4) overlap starts before lock 1247177633Sdfr * 5) overlap ends after lock 1248177633Sdfr */ 1249177633Sdfr switch (ovcase) { 1250177633Sdfr case 0: /* no overlap */ 1251177633Sdfr break; 1252177633Sdfr 1253177633Sdfr case 1: /* overlap == lock */ 1254177633Sdfr /* 1255177633Sdfr * We have already setup the 1256177633Sdfr * dependants for the new lock, taking 1257177633Sdfr * into account a possible downgrade 1258177633Sdfr * or unlock. Remove the old lock. 1259177633Sdfr */ 1260177633Sdfr LIST_REMOVE(overlap, lf_link); 1261177633Sdfr lf_update_dependancies(state, overlap, TRUE, 1262177633Sdfr &granted); 1263177633Sdfr lf_free_lock(overlap); 1264177633Sdfr break; 1265177633Sdfr 1266177633Sdfr case 2: /* overlap contains lock */ 1267177633Sdfr /* 1268177633Sdfr * Just split the existing lock. 1269177633Sdfr */ 1270177633Sdfr lf_split(state, overlap, lock, &granted); 1271177633Sdfr break; 1272177633Sdfr 1273177633Sdfr case 3: /* lock contains overlap */ 1274177633Sdfr /* 1275177633Sdfr * Delete the overlap and advance to 1276177633Sdfr * the next entry in the list. 1277177633Sdfr */ 1278177633Sdfr lf = LIST_NEXT(overlap, lf_link); 1279177633Sdfr LIST_REMOVE(overlap, lf_link); 1280177633Sdfr lf_update_dependancies(state, overlap, TRUE, 1281177633Sdfr &granted); 1282177633Sdfr lf_free_lock(overlap); 1283177633Sdfr overlap = lf; 1284177633Sdfr continue; 1285177633Sdfr 1286177633Sdfr case 4: /* overlap starts before lock */ 1287177633Sdfr /* 1288177633Sdfr * Just update the overlap end and 1289177633Sdfr * move on. 1290177633Sdfr */ 1291177633Sdfr lf_set_end(state, overlap, lock->lf_start - 1, 1292177633Sdfr &granted); 1293177633Sdfr overlap = LIST_NEXT(overlap, lf_link); 1294177633Sdfr continue; 1295177633Sdfr 1296177633Sdfr case 5: /* overlap ends after lock */ 1297177633Sdfr /* 1298177633Sdfr * Change the start of overlap and 1299177633Sdfr * re-insert. 1300177633Sdfr */ 1301177633Sdfr lf_set_start(state, overlap, lock->lf_end + 1, 1302177633Sdfr &granted); 1303177633Sdfr break; 1304177633Sdfr } 1305177633Sdfr break; 1306177633Sdfr } 1307177633Sdfr#ifdef LOCKF_DEBUG 1308177633Sdfr if (lockf_debug & 1) { 1309177633Sdfr if (lock->lf_type != F_UNLCK) 1310177633Sdfr lf_print("lf_activate_lock: activated", lock); 1311177633Sdfr else 1312177633Sdfr lf_print("lf_activate_lock: unlocked", lock); 1313177633Sdfr lf_printlist("lf_activate_lock", lock); 1314177633Sdfr } 1315177633Sdfr#endif /* LOCKF_DEBUG */ 1316177633Sdfr if (lock->lf_type != F_UNLCK) 1317177633Sdfr lf_insert_lock(state, lock); 1318177633Sdfr } 1319177633Sdfr} 1320177633Sdfr 1321177633Sdfr/* 1322177633Sdfr * Cancel a pending lock request, either as a result of a signal or a 1323177633Sdfr * cancel request for an async lock. 1324177633Sdfr */ 1325177633Sdfrstatic void 1326177633Sdfrlf_cancel_lock(struct lockf *state, struct lockf_entry *lock) 1327177633Sdfr{ 1328177633Sdfr struct lockf_entry_list granted; 1329177633Sdfr 1330177633Sdfr /* 1331177633Sdfr * Note it is theoretically possible that cancelling this lock 1332177633Sdfr * may allow some other pending lock to become 1333177633Sdfr * active. Consider this case: 1334177633Sdfr * 1335302234Sbdrewery * Owner Action Result Dependencies 1336177633Sdfr * 1337177633Sdfr * A: lock [0..0] succeeds 1338177633Sdfr * B: lock [2..2] succeeds 1339177633Sdfr * C: lock [1..2] blocked C->B 1340177633Sdfr * D: lock [0..1] blocked C->B,D->A,D->C 1341177633Sdfr * A: unlock [0..0] C->B,D->C 1342177633Sdfr * C: cancel [1..2] 1343177633Sdfr */ 1344177633Sdfr 1345177633Sdfr LIST_REMOVE(lock, lf_link); 1346177633Sdfr 1347177633Sdfr /* 1348177633Sdfr * Removing out-going edges is simple. 1349177633Sdfr */ 1350177633Sdfr sx_xlock(&lf_owner_graph_lock); 1351177633Sdfr lf_remove_outgoing(lock); 1352177633Sdfr sx_xunlock(&lf_owner_graph_lock); 1353177633Sdfr 1354177633Sdfr /* 1355177633Sdfr * Removing in-coming edges may allow some other lock to 1356177633Sdfr * become active - we use lf_update_dependancies to figure 1357177633Sdfr * this out. 1358177633Sdfr */ 1359177633Sdfr LIST_INIT(&granted); 1360177633Sdfr lf_update_dependancies(state, lock, TRUE, &granted); 1361177633Sdfr lf_free_lock(lock); 1362177633Sdfr 1363177633Sdfr /* 1364177633Sdfr * Feed any newly active locks to lf_activate_lock. 1365177633Sdfr */ 1366177633Sdfr while (!LIST_EMPTY(&granted)) { 1367177633Sdfr lock = LIST_FIRST(&granted); 1368177633Sdfr LIST_REMOVE(lock, lf_link); 1369177633Sdfr lf_activate_lock(state, lock); 1370177633Sdfr } 1371177633Sdfr} 1372177633Sdfr 1373177633Sdfr/* 13741960Sdg * Set a byte-range lock. 13751960Sdg */ 137612819Sphkstatic int 1377177633Sdfrlf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp, 1378177633Sdfr void **cookiep) 13791960Sdg{ 13801960Sdg static char lockstr[] = "lockf"; 1381177633Sdfr int priority, error; 13821960Sdg 13831960Sdg#ifdef LOCKF_DEBUG 13841960Sdg if (lockf_debug & 1) 13851960Sdg lf_print("lf_setlock", lock); 13861960Sdg#endif /* LOCKF_DEBUG */ 13871960Sdg 13881960Sdg /* 13891960Sdg * Set the priority 13901960Sdg */ 13911960Sdg priority = PLOCK; 13921960Sdg if (lock->lf_type == F_WRLCK) 13931960Sdg priority += 4; 1394180025Sdfr if (!(lock->lf_flags & F_NOINTR)) 1395180025Sdfr priority |= PCATCH; 13961960Sdg /* 13971960Sdg * Scan lock list for this file looking for locks that would block us. 13981960Sdg */ 1399192681Skib if (lf_getblock(state, lock)) { 14001960Sdg /* 14011960Sdg * Free the structure and return if nonblocking. 14021960Sdg */ 1403177633Sdfr if ((lock->lf_flags & F_WAIT) == 0 1404177633Sdfr && lock->lf_async_task == NULL) { 1405177633Sdfr lf_free_lock(lock); 1406177633Sdfr error = EAGAIN; 1407177633Sdfr goto out; 14081960Sdg } 1409177633Sdfr 14101960Sdg /* 1411178873Sdfr * For flock type locks, we must first remove 1412178873Sdfr * any shared locks that we hold before we sleep 1413178873Sdfr * waiting for an exclusive lock. 1414178873Sdfr */ 1415178873Sdfr if ((lock->lf_flags & F_FLOCK) && 1416178873Sdfr lock->lf_type == F_WRLCK) { 1417178873Sdfr lock->lf_type = F_UNLCK; 1418178873Sdfr lf_activate_lock(state, lock); 1419178873Sdfr lock->lf_type = F_WRLCK; 1420178873Sdfr } 1421178873Sdfr 1422178873Sdfr /* 1423177633Sdfr * We are blocked. Create edges to each blocking lock, 1424177633Sdfr * checking for deadlock using the owner graph. For 1425177633Sdfr * simplicity, we run deadlock detection for all 1426177633Sdfr * locks, posix and otherwise. 14271960Sdg */ 1428177633Sdfr sx_xlock(&lf_owner_graph_lock); 1429177633Sdfr error = lf_add_outgoing(state, lock); 1430177633Sdfr sx_xunlock(&lf_owner_graph_lock); 14311960Sdg 1432177633Sdfr if (error) { 1433177633Sdfr#ifdef LOCKF_DEBUG 1434177633Sdfr if (lockf_debug & 1) 1435177633Sdfr lf_print("lf_setlock: deadlock", lock); 1436177633Sdfr#endif 1437177633Sdfr lf_free_lock(lock); 1438177633Sdfr goto out; 14391960Sdg } 1440177633Sdfr 14411960Sdg /* 1442177633Sdfr * We have added edges to everything that blocks 1443177633Sdfr * us. Sleep until they all go away. 14441960Sdg */ 1445177633Sdfr LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link); 14461960Sdg#ifdef LOCKF_DEBUG 14471960Sdg if (lockf_debug & 1) { 1448177633Sdfr struct lockf_edge *e; 1449177633Sdfr LIST_FOREACH(e, &lock->lf_outedges, le_outlink) { 1450177633Sdfr lf_print("lf_setlock: blocking on", e->le_to); 1451177633Sdfr lf_printlist("lf_setlock", e->le_to); 1452177633Sdfr } 14531960Sdg } 14541960Sdg#endif /* LOCKF_DEBUG */ 1455177633Sdfr 1456177633Sdfr if ((lock->lf_flags & F_WAIT) == 0) { 1457177633Sdfr /* 1458177633Sdfr * The caller requested async notification - 1459177633Sdfr * this callback happens when the blocking 1460177633Sdfr * lock is released, allowing the caller to 1461177633Sdfr * make another attempt to take the lock. 1462177633Sdfr */ 1463177633Sdfr *cookiep = (void *) lock; 1464177633Sdfr error = EINPROGRESS; 1465177633Sdfr goto out; 1466177633Sdfr } 1467177633Sdfr 1468192685Skib lock->lf_refs++; 1469177633Sdfr error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0); 1470192685Skib if (lf_free_lock(lock)) { 1471269055Skib error = EDOOFUS; 1472192685Skib goto out; 1473192685Skib } 1474192685Skib 147548556Sbde /* 147648556Sbde * We may have been awakened by a signal and/or by a 1477177633Sdfr * debugger continuing us (in which cases we must 1478177633Sdfr * remove our lock graph edges) and/or by another 1479177633Sdfr * process releasing a lock (in which case our edges 1480177633Sdfr * have already been removed and we have been moved to 1481178243Skib * the active list). We may also have been woken by 1482178243Skib * lf_purgelocks which we report to the caller as 1483178243Skib * EINTR. In that case, lf_purgelocks will have 1484178243Skib * removed our lock graph edges. 1485177633Sdfr * 1486177633Sdfr * Note that it is possible to receive a signal after 1487177633Sdfr * we were successfully woken (and moved to the active 1488177633Sdfr * list) but before we resumed execution. In this 1489177633Sdfr * case, our lf_outedges list will be clear. We 1490177633Sdfr * pretend there was no error. 1491177633Sdfr * 1492177633Sdfr * Note also, if we have been sleeping long enough, we 1493177633Sdfr * may now have incoming edges from some newer lock 1494177633Sdfr * which is waiting behind us in the queue. 149548556Sbde */ 1496178243Skib if (lock->lf_flags & F_INTR) { 1497178243Skib error = EINTR; 1498178243Skib lf_free_lock(lock); 1499178243Skib goto out; 1500178243Skib } 1501177633Sdfr if (LIST_EMPTY(&lock->lf_outedges)) { 1502177633Sdfr error = 0; 1503177633Sdfr } else { 1504177633Sdfr lf_cancel_lock(state, lock); 1505177633Sdfr goto out; 15061960Sdg } 1507177633Sdfr#ifdef LOCKF_DEBUG 1508177633Sdfr if (lockf_debug & 1) { 1509177633Sdfr lf_print("lf_setlock: granted", lock); 151048556Sbde } 1511177633Sdfr#endif 1512177633Sdfr goto out; 15131960Sdg } 15141960Sdg /* 1515177633Sdfr * It looks like we are going to grant the lock. First add 1516177633Sdfr * edges from any currently pending lock that the new lock 1517177633Sdfr * would block. 1518177633Sdfr */ 1519177633Sdfr sx_xlock(&lf_owner_graph_lock); 1520177633Sdfr error = lf_add_incoming(state, lock); 1521177633Sdfr sx_xunlock(&lf_owner_graph_lock); 1522177633Sdfr if (error) { 1523177633Sdfr#ifdef LOCKF_DEBUG 1524177633Sdfr if (lockf_debug & 1) 1525177633Sdfr lf_print("lf_setlock: deadlock", lock); 1526177633Sdfr#endif 1527177633Sdfr lf_free_lock(lock); 1528177633Sdfr goto out; 1529177633Sdfr } 1530177633Sdfr 1531177633Sdfr /* 15321960Sdg * No blocks!! Add the lock. Note that we will 15331960Sdg * downgrade or upgrade any overlapping locks this 15341960Sdg * process already owns. 15351960Sdg */ 1536177633Sdfr lf_activate_lock(state, lock); 1537177633Sdfr error = 0; 1538177633Sdfrout: 1539177633Sdfr return (error); 15401960Sdg} 15411960Sdg 15421960Sdg/* 15431960Sdg * Remove a byte-range lock on an inode. 15441960Sdg * 15451960Sdg * Generally, find the lock (or an overlap to that lock) 15461960Sdg * and remove it (or shrink it), then wakeup anyone we can. 15471960Sdg */ 154812819Sphkstatic int 1549177633Sdfrlf_clearlock(struct lockf *state, struct lockf_entry *unlock) 15501960Sdg{ 1551177633Sdfr struct lockf_entry *overlap; 15521960Sdg 1553177633Sdfr overlap = LIST_FIRST(&state->ls_active); 1554177633Sdfr 1555177633Sdfr if (overlap == NOLOCKF) 15561960Sdg return (0); 15571960Sdg#ifdef LOCKF_DEBUG 15581960Sdg if (unlock->lf_type != F_UNLCK) 15591960Sdg panic("lf_clearlock: bad type"); 15601960Sdg if (lockf_debug & 1) 15611960Sdg lf_print("lf_clearlock", unlock); 15621960Sdg#endif /* LOCKF_DEBUG */ 15631960Sdg 1564177633Sdfr lf_activate_lock(state, unlock); 15651960Sdg 15661960Sdg return (0); 15671960Sdg} 15681960Sdg 15691960Sdg/* 1570177633Sdfr * Check whether there is a blocking lock, and if so return its 1571177633Sdfr * details in '*fl'. 15721960Sdg */ 157312819Sphkstatic int 1574177633Sdfrlf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl) 15751960Sdg{ 1576177633Sdfr struct lockf_entry *block; 15771960Sdg 15781960Sdg#ifdef LOCKF_DEBUG 15791960Sdg if (lockf_debug & 1) 15801960Sdg lf_print("lf_getlock", lock); 15811960Sdg#endif /* LOCKF_DEBUG */ 15821960Sdg 1583177633Sdfr if ((block = lf_getblock(state, lock))) { 15841960Sdg fl->l_type = block->lf_type; 15851960Sdg fl->l_whence = SEEK_SET; 15861960Sdg fl->l_start = block->lf_start; 1587177633Sdfr if (block->lf_end == OFF_MAX) 15881960Sdg fl->l_len = 0; 15891960Sdg else 15901960Sdg fl->l_len = block->lf_end - block->lf_start + 1; 1591177633Sdfr fl->l_pid = block->lf_owner->lo_pid; 1592177633Sdfr fl->l_sysid = block->lf_owner->lo_sysid; 15931960Sdg } else { 15941960Sdg fl->l_type = F_UNLCK; 15951960Sdg } 15961960Sdg return (0); 15971960Sdg} 15981960Sdg 15991960Sdg/* 1600177633Sdfr * Cancel an async lock request. 1601177633Sdfr */ 1602177633Sdfrstatic int 1603177633Sdfrlf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie) 1604177633Sdfr{ 1605177633Sdfr struct lockf_entry *reallock; 1606177633Sdfr 1607177633Sdfr /* 1608177633Sdfr * We need to match this request with an existing lock 1609177633Sdfr * request. 1610177633Sdfr */ 1611177633Sdfr LIST_FOREACH(reallock, &state->ls_pending, lf_link) { 1612177633Sdfr if ((void *) reallock == cookie) { 1613177633Sdfr /* 1614177633Sdfr * Double-check that this lock looks right 1615177633Sdfr * (maybe use a rolling ID for the cancel 1616177633Sdfr * cookie instead?) 1617177633Sdfr */ 1618177633Sdfr if (!(reallock->lf_vnode == lock->lf_vnode 1619177633Sdfr && reallock->lf_start == lock->lf_start 1620177633Sdfr && reallock->lf_end == lock->lf_end)) { 1621177633Sdfr return (ENOENT); 1622177633Sdfr } 1623177633Sdfr 1624177633Sdfr /* 1625177633Sdfr * Make sure this lock was async and then just 1626177633Sdfr * remove it from its wait lists. 1627177633Sdfr */ 1628177633Sdfr if (!reallock->lf_async_task) { 1629177633Sdfr return (ENOENT); 1630177633Sdfr } 1631177633Sdfr 1632177633Sdfr /* 1633177633Sdfr * Note that since any other thread must take 1634177633Sdfr * state->ls_lock before it can possibly 1635177633Sdfr * trigger the async callback, we are safe 1636177633Sdfr * from a race with lf_wakeup_lock, i.e. we 1637177633Sdfr * can free the lock (actually our caller does 1638177633Sdfr * this). 1639177633Sdfr */ 1640177633Sdfr lf_cancel_lock(state, reallock); 1641177633Sdfr return (0); 1642177633Sdfr } 1643177633Sdfr } 1644177633Sdfr 1645177633Sdfr /* 1646177633Sdfr * We didn't find a matching lock - not much we can do here. 1647177633Sdfr */ 1648177633Sdfr return (ENOENT); 1649177633Sdfr} 1650177633Sdfr 1651177633Sdfr/* 16521960Sdg * Walk the list of locks for an inode and 16531960Sdg * return the first blocking lock. 16541960Sdg */ 1655177633Sdfrstatic struct lockf_entry * 1656177633Sdfrlf_getblock(struct lockf *state, struct lockf_entry *lock) 16571960Sdg{ 1658177633Sdfr struct lockf_entry *overlap; 16591960Sdg 1660177633Sdfr LIST_FOREACH(overlap, &state->ls_active, lf_link) { 16611960Sdg /* 1662177633Sdfr * We may assume that the active list is sorted by 1663177633Sdfr * lf_start. 16641960Sdg */ 1665177633Sdfr if (overlap->lf_start > lock->lf_end) 1666177633Sdfr break; 1667177633Sdfr if (!lf_blocks(lock, overlap)) 1668177633Sdfr continue; 1669177633Sdfr return (overlap); 16701960Sdg } 16711960Sdg return (NOLOCKF); 16721960Sdg} 16731960Sdg 16741960Sdg/* 1675177633Sdfr * Walk the list of locks for an inode to find an overlapping lock (if 1676177633Sdfr * any) and return a classification of that overlap. 16771960Sdg * 1678177633Sdfr * Arguments: 1679177633Sdfr * *overlap The place in the lock list to start looking 1680177633Sdfr * lock The lock which is being tested 1681177633Sdfr * type Pass 'SELF' to test only locks with the same 1682177633Sdfr * owner as lock, or 'OTHER' to test only locks 1683177633Sdfr * with a different owner 1684177633Sdfr * 1685177633Sdfr * Returns one of six values: 1686177633Sdfr * 0) no overlap 1687177633Sdfr * 1) overlap == lock 1688177633Sdfr * 2) overlap contains lock 1689177633Sdfr * 3) lock contains overlap 1690177633Sdfr * 4) overlap starts before lock 1691177633Sdfr * 5) overlap ends after lock 1692177633Sdfr * 1693177633Sdfr * If there is an overlapping lock, '*overlap' is set to point at the 1694177633Sdfr * overlapping lock. 1695177633Sdfr * 16961960Sdg * NOTE: this returns only the FIRST overlapping lock. There 16971960Sdg * may be more than one. 16981960Sdg */ 169912819Sphkstatic int 1700177633Sdfrlf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type) 17011960Sdg{ 1702177633Sdfr struct lockf_entry *lf; 17031960Sdg off_t start, end; 1704177633Sdfr int res; 17051960Sdg 1706177633Sdfr if ((*overlap) == NOLOCKF) { 17071960Sdg return (0); 1708177633Sdfr } 17091960Sdg#ifdef LOCKF_DEBUG 17101960Sdg if (lockf_debug & 2) 17111960Sdg lf_print("lf_findoverlap: looking for overlap in", lock); 17121960Sdg#endif /* LOCKF_DEBUG */ 17131960Sdg start = lock->lf_start; 17141960Sdg end = lock->lf_end; 1715177633Sdfr res = 0; 1716177633Sdfr while (*overlap) { 1717177633Sdfr lf = *overlap; 1718177633Sdfr if (lf->lf_start > end) 1719177633Sdfr break; 1720177633Sdfr if (((type & SELF) && lf->lf_owner != lock->lf_owner) || 1721177633Sdfr ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) { 1722177633Sdfr *overlap = LIST_NEXT(lf, lf_link); 17231960Sdg continue; 17241960Sdg } 17251960Sdg#ifdef LOCKF_DEBUG 17261960Sdg if (lockf_debug & 2) 17271960Sdg lf_print("\tchecking", lf); 17281960Sdg#endif /* LOCKF_DEBUG */ 17291960Sdg /* 17301960Sdg * OK, check for overlap 17311960Sdg * 17321960Sdg * Six cases: 17331960Sdg * 0) no overlap 17341960Sdg * 1) overlap == lock 17351960Sdg * 2) overlap contains lock 17361960Sdg * 3) lock contains overlap 17371960Sdg * 4) overlap starts before lock 17381960Sdg * 5) overlap ends after lock 17391960Sdg */ 1740177633Sdfr if (start > lf->lf_end) { 17411960Sdg /* Case 0 */ 17421960Sdg#ifdef LOCKF_DEBUG 17431960Sdg if (lockf_debug & 2) 17441960Sdg printf("no overlap\n"); 17451960Sdg#endif /* LOCKF_DEBUG */ 1746177633Sdfr *overlap = LIST_NEXT(lf, lf_link); 17471960Sdg continue; 17481960Sdg } 1749177633Sdfr if (lf->lf_start == start && lf->lf_end == end) { 17501960Sdg /* Case 1 */ 17511960Sdg#ifdef LOCKF_DEBUG 17521960Sdg if (lockf_debug & 2) 17531960Sdg printf("overlap == lock\n"); 17541960Sdg#endif /* LOCKF_DEBUG */ 1755177633Sdfr res = 1; 1756177633Sdfr break; 17571960Sdg } 1758177633Sdfr if (lf->lf_start <= start && lf->lf_end >= end) { 17591960Sdg /* Case 2 */ 17601960Sdg#ifdef LOCKF_DEBUG 17611960Sdg if (lockf_debug & 2) 17621960Sdg printf("overlap contains lock\n"); 17631960Sdg#endif /* LOCKF_DEBUG */ 1764177633Sdfr res = 2; 1765177633Sdfr break; 17661960Sdg } 1767177633Sdfr if (start <= lf->lf_start && end >= lf->lf_end) { 17681960Sdg /* Case 3 */ 17691960Sdg#ifdef LOCKF_DEBUG 17701960Sdg if (lockf_debug & 2) 17711960Sdg printf("lock contains overlap\n"); 17721960Sdg#endif /* LOCKF_DEBUG */ 1773177633Sdfr res = 3; 1774177633Sdfr break; 17751960Sdg } 1776177633Sdfr if (lf->lf_start < start && lf->lf_end >= start) { 17771960Sdg /* Case 4 */ 17781960Sdg#ifdef LOCKF_DEBUG 17791960Sdg if (lockf_debug & 2) 17801960Sdg printf("overlap starts before lock\n"); 17811960Sdg#endif /* LOCKF_DEBUG */ 1782177633Sdfr res = 4; 1783177633Sdfr break; 17841960Sdg } 1785177633Sdfr if (lf->lf_start > start && lf->lf_end > end) { 17861960Sdg /* Case 5 */ 17871960Sdg#ifdef LOCKF_DEBUG 17881960Sdg if (lockf_debug & 2) 17891960Sdg printf("overlap ends after lock\n"); 17901960Sdg#endif /* LOCKF_DEBUG */ 1791177633Sdfr res = 5; 1792177633Sdfr break; 17931960Sdg } 17941960Sdg panic("lf_findoverlap: default"); 17951960Sdg } 1796177633Sdfr return (res); 17971960Sdg} 17981960Sdg 17991960Sdg/* 1800177633Sdfr * Split an the existing 'lock1', based on the extent of the lock 1801177633Sdfr * described by 'lock2'. The existing lock should cover 'lock2' 1802177633Sdfr * entirely. 1803177633Sdfr * 1804177633Sdfr * Any pending locks which have been been unblocked are added to 1805177633Sdfr * 'granted' 18061960Sdg */ 180712819Sphkstatic void 1808177633Sdfrlf_split(struct lockf *state, struct lockf_entry *lock1, 1809177633Sdfr struct lockf_entry *lock2, struct lockf_entry_list *granted) 18101960Sdg{ 1811177633Sdfr struct lockf_entry *splitlock; 18121960Sdg 18131960Sdg#ifdef LOCKF_DEBUG 18141960Sdg if (lockf_debug & 2) { 18151960Sdg lf_print("lf_split", lock1); 18161960Sdg lf_print("splitting from", lock2); 18171960Sdg } 18181960Sdg#endif /* LOCKF_DEBUG */ 18191960Sdg /* 1820177633Sdfr * Check to see if we don't need to split at all. 18211960Sdg */ 18221960Sdg if (lock1->lf_start == lock2->lf_start) { 1823177633Sdfr lf_set_start(state, lock1, lock2->lf_end + 1, granted); 18241960Sdg return; 18251960Sdg } 18261960Sdg if (lock1->lf_end == lock2->lf_end) { 1827177633Sdfr lf_set_end(state, lock1, lock2->lf_start - 1, granted); 18281960Sdg return; 18291960Sdg } 18301960Sdg /* 18311960Sdg * Make a new lock consisting of the last part of 1832177633Sdfr * the encompassing lock. 18331960Sdg */ 1834177633Sdfr splitlock = lf_alloc_lock(lock1->lf_owner); 1835177633Sdfr memcpy(splitlock, lock1, sizeof *splitlock); 1836192685Skib splitlock->lf_refs = 1; 1837177633Sdfr if (splitlock->lf_flags & F_REMOTE) 1838177633Sdfr vref(splitlock->lf_vnode); 1839177633Sdfr 1840177633Sdfr /* 1841177633Sdfr * This cannot cause a deadlock since any edges we would add 1842177633Sdfr * to splitlock already exist in lock1. We must be sure to add 1843302234Sbdrewery * necessary dependencies to splitlock before we reduce lock1 1844177633Sdfr * otherwise we may accidentally grant a pending lock that 1845177633Sdfr * was blocked by the tail end of lock1. 1846177633Sdfr */ 18471960Sdg splitlock->lf_start = lock2->lf_end + 1; 1848177633Sdfr LIST_INIT(&splitlock->lf_outedges); 1849177633Sdfr LIST_INIT(&splitlock->lf_inedges); 1850177633Sdfr sx_xlock(&lf_owner_graph_lock); 1851177633Sdfr lf_add_incoming(state, splitlock); 1852177633Sdfr sx_xunlock(&lf_owner_graph_lock); 1853177633Sdfr 1854177633Sdfr lf_set_end(state, lock1, lock2->lf_start - 1, granted); 1855177633Sdfr 18561960Sdg /* 18571960Sdg * OK, now link it in 18581960Sdg */ 1859177633Sdfr lf_insert_lock(state, splitlock); 18601960Sdg} 18611960Sdg 1862180025Sdfrstruct lockdesc { 1863180025Sdfr STAILQ_ENTRY(lockdesc) link; 1864177633Sdfr struct vnode *vp; 1865177633Sdfr struct flock fl; 1866177633Sdfr}; 1867180025SdfrSTAILQ_HEAD(lockdesclist, lockdesc); 1868177633Sdfr 1869180025Sdfrint 1870180025Sdfrlf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg) 1871177633Sdfr{ 1872177633Sdfr struct lockf *ls; 1873177633Sdfr struct lockf_entry *lf; 1874180025Sdfr struct lockdesc *ldesc; 1875180025Sdfr struct lockdesclist locks; 1876180025Sdfr int error; 1877177633Sdfr 1878177633Sdfr /* 1879177633Sdfr * In order to keep the locking simple, we iterate over the 1880177633Sdfr * active lock lists to build a list of locks that need 1881180025Sdfr * releasing. We then call the iterator for each one in turn. 1882177633Sdfr * 1883177633Sdfr * We take an extra reference to the vnode for the duration to 1884177633Sdfr * make sure it doesn't go away before we are finished. 1885177633Sdfr */ 1886177633Sdfr STAILQ_INIT(&locks); 1887177633Sdfr sx_xlock(&lf_lock_states_lock); 1888177633Sdfr LIST_FOREACH(ls, &lf_lock_states, ls_link) { 1889177633Sdfr sx_xlock(&ls->ls_lock); 1890177633Sdfr LIST_FOREACH(lf, &ls->ls_active, lf_link) { 1891177633Sdfr if (lf->lf_owner->lo_sysid != sysid) 1892177633Sdfr continue; 1893177633Sdfr 1894180025Sdfr ldesc = malloc(sizeof(struct lockdesc), M_LOCKF, 1895177633Sdfr M_WAITOK); 1896180025Sdfr ldesc->vp = lf->lf_vnode; 1897180025Sdfr vref(ldesc->vp); 1898180025Sdfr ldesc->fl.l_start = lf->lf_start; 1899177633Sdfr if (lf->lf_end == OFF_MAX) 1900180025Sdfr ldesc->fl.l_len = 0; 1901177633Sdfr else 1902180025Sdfr ldesc->fl.l_len = 1903177633Sdfr lf->lf_end - lf->lf_start + 1; 1904180025Sdfr ldesc->fl.l_whence = SEEK_SET; 1905180025Sdfr ldesc->fl.l_type = F_UNLCK; 1906180025Sdfr ldesc->fl.l_pid = lf->lf_owner->lo_pid; 1907180025Sdfr ldesc->fl.l_sysid = sysid; 1908180025Sdfr STAILQ_INSERT_TAIL(&locks, ldesc, link); 1909177633Sdfr } 1910177633Sdfr sx_xunlock(&ls->ls_lock); 1911177633Sdfr } 1912177633Sdfr sx_xunlock(&lf_lock_states_lock); 1913177633Sdfr 1914180025Sdfr /* 1915180025Sdfr * Call the iterator function for each lock in turn. If the 1916180025Sdfr * iterator returns an error code, just free the rest of the 1917180025Sdfr * lockdesc structures. 1918180025Sdfr */ 1919180025Sdfr error = 0; 1920180025Sdfr while ((ldesc = STAILQ_FIRST(&locks)) != NULL) { 1921177633Sdfr STAILQ_REMOVE_HEAD(&locks, link); 1922180025Sdfr if (!error) 1923180025Sdfr error = fn(ldesc->vp, &ldesc->fl, arg); 1924180025Sdfr vrele(ldesc->vp); 1925180025Sdfr free(ldesc, M_LOCKF); 1926177633Sdfr } 1927180025Sdfr 1928180025Sdfr return (error); 1929177633Sdfr} 1930177633Sdfr 1931177633Sdfrint 1932180025Sdfrlf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg) 1933180025Sdfr{ 1934180025Sdfr struct lockf *ls; 1935180025Sdfr struct lockf_entry *lf; 1936180025Sdfr struct lockdesc *ldesc; 1937180025Sdfr struct lockdesclist locks; 1938180025Sdfr int error; 1939180025Sdfr 1940180025Sdfr /* 1941180025Sdfr * In order to keep the locking simple, we iterate over the 1942180025Sdfr * active lock lists to build a list of locks that need 1943180025Sdfr * releasing. We then call the iterator for each one in turn. 1944180025Sdfr * 1945180025Sdfr * We take an extra reference to the vnode for the duration to 1946180025Sdfr * make sure it doesn't go away before we are finished. 1947180025Sdfr */ 1948180025Sdfr STAILQ_INIT(&locks); 1949194993Skib VI_LOCK(vp); 1950180025Sdfr ls = vp->v_lockf; 1951194993Skib if (!ls) { 1952194993Skib VI_UNLOCK(vp); 1953180025Sdfr return (0); 1954194993Skib } 1955194993Skib ls->ls_threads++; 1956194993Skib VI_UNLOCK(vp); 1957180025Sdfr 1958180025Sdfr sx_xlock(&ls->ls_lock); 1959180025Sdfr LIST_FOREACH(lf, &ls->ls_active, lf_link) { 1960180025Sdfr ldesc = malloc(sizeof(struct lockdesc), M_LOCKF, 1961180025Sdfr M_WAITOK); 1962180025Sdfr ldesc->vp = lf->lf_vnode; 1963180025Sdfr vref(ldesc->vp); 1964180025Sdfr ldesc->fl.l_start = lf->lf_start; 1965180025Sdfr if (lf->lf_end == OFF_MAX) 1966180025Sdfr ldesc->fl.l_len = 0; 1967180025Sdfr else 1968180025Sdfr ldesc->fl.l_len = 1969180025Sdfr lf->lf_end - lf->lf_start + 1; 1970180025Sdfr ldesc->fl.l_whence = SEEK_SET; 1971180025Sdfr ldesc->fl.l_type = F_UNLCK; 1972180025Sdfr ldesc->fl.l_pid = lf->lf_owner->lo_pid; 1973180025Sdfr ldesc->fl.l_sysid = lf->lf_owner->lo_sysid; 1974180025Sdfr STAILQ_INSERT_TAIL(&locks, ldesc, link); 1975180025Sdfr } 1976180025Sdfr sx_xunlock(&ls->ls_lock); 1977194993Skib VI_LOCK(vp); 1978194993Skib ls->ls_threads--; 1979194993Skib wakeup(ls); 1980194993Skib VI_UNLOCK(vp); 1981180025Sdfr 1982180025Sdfr /* 1983180025Sdfr * Call the iterator function for each lock in turn. If the 1984180025Sdfr * iterator returns an error code, just free the rest of the 1985180025Sdfr * lockdesc structures. 1986180025Sdfr */ 1987180025Sdfr error = 0; 1988180025Sdfr while ((ldesc = STAILQ_FIRST(&locks)) != NULL) { 1989180025Sdfr STAILQ_REMOVE_HEAD(&locks, link); 1990180025Sdfr if (!error) 1991180025Sdfr error = fn(ldesc->vp, &ldesc->fl, arg); 1992180025Sdfr vrele(ldesc->vp); 1993180025Sdfr free(ldesc, M_LOCKF); 1994180025Sdfr } 1995180025Sdfr 1996180025Sdfr return (error); 1997180025Sdfr} 1998180025Sdfr 1999180025Sdfrstatic int 2000180025Sdfrlf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg) 2001180025Sdfr{ 2002180025Sdfr 2003180025Sdfr VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE); 2004180025Sdfr return (0); 2005180025Sdfr} 2006180025Sdfr 2007180025Sdfrvoid 2008180025Sdfrlf_clearremotesys(int sysid) 2009180025Sdfr{ 2010180025Sdfr 2011180025Sdfr KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS")); 2012180025Sdfr lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL); 2013180025Sdfr} 2014180025Sdfr 2015180025Sdfrint 2016177633Sdfrlf_countlocks(int sysid) 2017177633Sdfr{ 2018177633Sdfr int i; 2019177633Sdfr struct lock_owner *lo; 2020177633Sdfr int count; 2021177633Sdfr 2022177633Sdfr count = 0; 2023177633Sdfr sx_xlock(&lf_lock_owners_lock); 2024177633Sdfr for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 2025177633Sdfr LIST_FOREACH(lo, &lf_lock_owners[i], lo_link) 2026177633Sdfr if (lo->lo_sysid == sysid) 2027177633Sdfr count += lo->lo_refs; 2028177633Sdfr sx_xunlock(&lf_lock_owners_lock); 2029177633Sdfr 2030177633Sdfr return (count); 2031177633Sdfr} 2032177633Sdfr 2033177633Sdfr#ifdef LOCKF_DEBUG 2034177633Sdfr 20351960Sdg/* 2036177633Sdfr * Return non-zero if y is reachable from x using a brute force 2037177633Sdfr * search. If reachable and path is non-null, return the route taken 2038177633Sdfr * in path. 20391960Sdg */ 2040177633Sdfrstatic int 2041177633Sdfrgraph_reaches(struct owner_vertex *x, struct owner_vertex *y, 2042177633Sdfr struct owner_vertex_list *path) 2043177633Sdfr{ 2044177633Sdfr struct owner_edge *e; 2045177633Sdfr 2046177633Sdfr if (x == y) { 2047177633Sdfr if (path) 2048177633Sdfr TAILQ_INSERT_HEAD(path, x, v_link); 2049177633Sdfr return 1; 2050177633Sdfr } 2051177633Sdfr 2052177633Sdfr LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2053177633Sdfr if (graph_reaches(e->e_to, y, path)) { 2054177633Sdfr if (path) 2055177633Sdfr TAILQ_INSERT_HEAD(path, x, v_link); 2056177633Sdfr return 1; 2057177633Sdfr } 2058177633Sdfr } 2059177633Sdfr return 0; 2060177633Sdfr} 2061177633Sdfr 2062177633Sdfr/* 2063177633Sdfr * Perform consistency checks on the graph. Make sure the values of 2064177633Sdfr * v_order are correct. If checkorder is non-zero, check no vertex can 2065177633Sdfr * reach any other vertex with a smaller order. 2066177633Sdfr */ 206712819Sphkstatic void 2068177633Sdfrgraph_check(struct owner_graph *g, int checkorder) 20691960Sdg{ 2070177633Sdfr int i, j; 20711960Sdg 2072177633Sdfr for (i = 0; i < g->g_size; i++) { 2073177633Sdfr if (!g->g_vertices[i]->v_owner) 2074177633Sdfr continue; 2075177633Sdfr KASSERT(g->g_vertices[i]->v_order == i, 2076177633Sdfr ("lock graph vertices disordered")); 2077177633Sdfr if (checkorder) { 2078177633Sdfr for (j = 0; j < i; j++) { 2079177633Sdfr if (!g->g_vertices[j]->v_owner) 2080177633Sdfr continue; 2081177633Sdfr KASSERT(!graph_reaches(g->g_vertices[i], 2082177633Sdfr g->g_vertices[j], NULL), 2083177633Sdfr ("lock graph vertices disordered")); 2084177633Sdfr } 2085177633Sdfr } 2086177633Sdfr } 2087177633Sdfr} 2088177633Sdfr 2089177633Sdfrstatic void 2090177633Sdfrgraph_print_vertices(struct owner_vertex_list *set) 2091177633Sdfr{ 2092177633Sdfr struct owner_vertex *v; 2093177633Sdfr 2094177633Sdfr printf("{ "); 2095177633Sdfr TAILQ_FOREACH(v, set, v_link) { 2096177633Sdfr printf("%d:", v->v_order); 2097177633Sdfr lf_print_owner(v->v_owner); 2098177633Sdfr if (TAILQ_NEXT(v, v_link)) 2099177633Sdfr printf(", "); 2100177633Sdfr } 2101177633Sdfr printf(" }\n"); 2102177633Sdfr} 2103177633Sdfr 2104177633Sdfr#endif 2105177633Sdfr 2106177633Sdfr/* 2107177633Sdfr * Calculate the sub-set of vertices v from the affected region [y..x] 2108177633Sdfr * where v is reachable from y. Return -1 if a loop was detected 2109177633Sdfr * (i.e. x is reachable from y, otherwise the number of vertices in 2110177633Sdfr * this subset. 2111177633Sdfr */ 2112177633Sdfrstatic int 2113177633Sdfrgraph_delta_forward(struct owner_graph *g, struct owner_vertex *x, 2114177633Sdfr struct owner_vertex *y, struct owner_vertex_list *delta) 2115177633Sdfr{ 2116177633Sdfr uint32_t gen; 2117177633Sdfr struct owner_vertex *v; 2118177633Sdfr struct owner_edge *e; 2119177633Sdfr int n; 2120177633Sdfr 2121177633Sdfr /* 2122177633Sdfr * We start with a set containing just y. Then for each vertex 2123177633Sdfr * v in the set so far unprocessed, we add each vertex that v 2124177633Sdfr * has an out-edge to and that is within the affected region 2125177633Sdfr * [y..x]. If we see the vertex x on our travels, stop 2126177633Sdfr * immediately. 2127177633Sdfr */ 2128177633Sdfr TAILQ_INIT(delta); 2129177633Sdfr TAILQ_INSERT_TAIL(delta, y, v_link); 2130177633Sdfr v = y; 2131177633Sdfr n = 1; 2132177633Sdfr gen = g->g_gen; 2133177633Sdfr while (v) { 2134177633Sdfr LIST_FOREACH(e, &v->v_outedges, e_outlink) { 2135177633Sdfr if (e->e_to == x) 2136177633Sdfr return -1; 2137177633Sdfr if (e->e_to->v_order < x->v_order 2138177633Sdfr && e->e_to->v_gen != gen) { 2139177633Sdfr e->e_to->v_gen = gen; 2140177633Sdfr TAILQ_INSERT_TAIL(delta, e->e_to, v_link); 2141177633Sdfr n++; 2142177633Sdfr } 2143177633Sdfr } 2144177633Sdfr v = TAILQ_NEXT(v, v_link); 2145177633Sdfr } 2146177633Sdfr 2147177633Sdfr return (n); 2148177633Sdfr} 2149177633Sdfr 2150177633Sdfr/* 2151177633Sdfr * Calculate the sub-set of vertices v from the affected region [y..x] 2152177633Sdfr * where v reaches x. Return the number of vertices in this subset. 2153177633Sdfr */ 2154177633Sdfrstatic int 2155177633Sdfrgraph_delta_backward(struct owner_graph *g, struct owner_vertex *x, 2156177633Sdfr struct owner_vertex *y, struct owner_vertex_list *delta) 2157177633Sdfr{ 2158177633Sdfr uint32_t gen; 2159177633Sdfr struct owner_vertex *v; 2160177633Sdfr struct owner_edge *e; 2161177633Sdfr int n; 2162177633Sdfr 2163177633Sdfr /* 2164177633Sdfr * We start with a set containing just x. Then for each vertex 2165177633Sdfr * v in the set so far unprocessed, we add each vertex that v 2166177633Sdfr * has an in-edge from and that is within the affected region 2167177633Sdfr * [y..x]. 2168177633Sdfr */ 2169177633Sdfr TAILQ_INIT(delta); 2170177633Sdfr TAILQ_INSERT_TAIL(delta, x, v_link); 2171177633Sdfr v = x; 2172177633Sdfr n = 1; 2173177633Sdfr gen = g->g_gen; 2174177633Sdfr while (v) { 2175177633Sdfr LIST_FOREACH(e, &v->v_inedges, e_inlink) { 2176177633Sdfr if (e->e_from->v_order > y->v_order 2177177633Sdfr && e->e_from->v_gen != gen) { 2178177633Sdfr e->e_from->v_gen = gen; 2179177633Sdfr TAILQ_INSERT_HEAD(delta, e->e_from, v_link); 2180177633Sdfr n++; 2181177633Sdfr } 2182177633Sdfr } 2183177633Sdfr v = TAILQ_PREV(v, owner_vertex_list, v_link); 2184177633Sdfr } 2185177633Sdfr 2186177633Sdfr return (n); 2187177633Sdfr} 2188177633Sdfr 2189177633Sdfrstatic int 2190177633Sdfrgraph_add_indices(int *indices, int n, struct owner_vertex_list *set) 2191177633Sdfr{ 2192177633Sdfr struct owner_vertex *v; 2193177633Sdfr int i, j; 2194177633Sdfr 2195177633Sdfr TAILQ_FOREACH(v, set, v_link) { 2196177633Sdfr for (i = n; 2197177633Sdfr i > 0 && indices[i - 1] > v->v_order; i--) 2198177633Sdfr ; 2199177633Sdfr for (j = n - 1; j >= i; j--) 2200177633Sdfr indices[j + 1] = indices[j]; 2201177633Sdfr indices[i] = v->v_order; 2202177633Sdfr n++; 2203177633Sdfr } 2204177633Sdfr 2205177633Sdfr return (n); 2206177633Sdfr} 2207177633Sdfr 2208177633Sdfrstatic int 2209177633Sdfrgraph_assign_indices(struct owner_graph *g, int *indices, int nextunused, 2210177633Sdfr struct owner_vertex_list *set) 2211177633Sdfr{ 2212177633Sdfr struct owner_vertex *v, *vlowest; 2213177633Sdfr 2214177633Sdfr while (!TAILQ_EMPTY(set)) { 2215177633Sdfr vlowest = NULL; 2216177633Sdfr TAILQ_FOREACH(v, set, v_link) { 2217177633Sdfr if (!vlowest || v->v_order < vlowest->v_order) 2218177633Sdfr vlowest = v; 2219177633Sdfr } 2220177633Sdfr TAILQ_REMOVE(set, vlowest, v_link); 2221177633Sdfr vlowest->v_order = indices[nextunused]; 2222177633Sdfr g->g_vertices[vlowest->v_order] = vlowest; 2223177633Sdfr nextunused++; 2224177633Sdfr } 2225177633Sdfr 2226177633Sdfr return (nextunused); 2227177633Sdfr} 2228177633Sdfr 2229177633Sdfrstatic int 2230177633Sdfrgraph_add_edge(struct owner_graph *g, struct owner_vertex *x, 2231177633Sdfr struct owner_vertex *y) 2232177633Sdfr{ 2233177633Sdfr struct owner_edge *e; 2234177633Sdfr struct owner_vertex_list deltaF, deltaB; 2235177633Sdfr int nF, nB, n, vi, i; 2236177633Sdfr int *indices; 2237177633Sdfr 2238177633Sdfr sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2239177633Sdfr 2240177633Sdfr LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2241177633Sdfr if (e->e_to == y) { 2242177633Sdfr e->e_refs++; 2243177633Sdfr return (0); 2244177633Sdfr } 2245177633Sdfr } 2246177633Sdfr 22471960Sdg#ifdef LOCKF_DEBUG 2248177633Sdfr if (lockf_debug & 8) { 2249177633Sdfr printf("adding edge %d:", x->v_order); 2250177633Sdfr lf_print_owner(x->v_owner); 2251177633Sdfr printf(" -> %d:", y->v_order); 2252177633Sdfr lf_print_owner(y->v_owner); 2253177633Sdfr printf("\n"); 225422521Sdyson } 2255177633Sdfr#endif 2256177633Sdfr if (y->v_order < x->v_order) { 2257177633Sdfr /* 2258177633Sdfr * The new edge violates the order. First find the set 2259177633Sdfr * of affected vertices reachable from y (deltaF) and 2260177633Sdfr * the set of affect vertices affected that reach x 2261177633Sdfr * (deltaB), using the graph generation number to 2262177633Sdfr * detect whether we have visited a given vertex 2263177633Sdfr * already. We re-order the graph so that each vertex 2264177633Sdfr * in deltaB appears before each vertex in deltaF. 2265177633Sdfr * 2266177633Sdfr * If x is a member of deltaF, then the new edge would 2267177633Sdfr * create a cycle. Otherwise, we may assume that 2268177633Sdfr * deltaF and deltaB are disjoint. 2269177633Sdfr */ 2270177633Sdfr g->g_gen++; 2271177633Sdfr if (g->g_gen == 0) { 2272177633Sdfr /* 2273177633Sdfr * Generation wrap. 2274177633Sdfr */ 2275177633Sdfr for (vi = 0; vi < g->g_size; vi++) { 2276177633Sdfr g->g_vertices[vi]->v_gen = 0; 2277177633Sdfr } 2278177633Sdfr g->g_gen++; 2279177633Sdfr } 2280177633Sdfr nF = graph_delta_forward(g, x, y, &deltaF); 2281177633Sdfr if (nF < 0) { 2282177633Sdfr#ifdef LOCKF_DEBUG 2283177633Sdfr if (lockf_debug & 8) { 2284177633Sdfr struct owner_vertex_list path; 2285177633Sdfr printf("deadlock: "); 2286177633Sdfr TAILQ_INIT(&path); 2287177633Sdfr graph_reaches(y, x, &path); 2288177633Sdfr graph_print_vertices(&path); 2289177633Sdfr } 2290177633Sdfr#endif 2291177633Sdfr return (EDEADLK); 2292177633Sdfr } 2293177633Sdfr 2294177633Sdfr#ifdef LOCKF_DEBUG 2295177633Sdfr if (lockf_debug & 8) { 2296177633Sdfr printf("re-ordering graph vertices\n"); 2297177633Sdfr printf("deltaF = "); 2298177633Sdfr graph_print_vertices(&deltaF); 2299177633Sdfr } 2300177633Sdfr#endif 2301177633Sdfr 2302177633Sdfr nB = graph_delta_backward(g, x, y, &deltaB); 2303177633Sdfr 2304177633Sdfr#ifdef LOCKF_DEBUG 2305177633Sdfr if (lockf_debug & 8) { 2306177633Sdfr printf("deltaB = "); 2307177633Sdfr graph_print_vertices(&deltaB); 2308177633Sdfr } 2309177633Sdfr#endif 2310177633Sdfr 2311177633Sdfr /* 2312177633Sdfr * We first build a set of vertex indices (vertex 2313177633Sdfr * order values) that we may use, then we re-assign 2314177633Sdfr * orders first to those vertices in deltaB, then to 2315177633Sdfr * deltaF. Note that the contents of deltaF and deltaB 2316177633Sdfr * may be partially disordered - we perform an 2317177633Sdfr * insertion sort while building our index set. 2318177633Sdfr */ 2319177633Sdfr indices = g->g_indexbuf; 2320177633Sdfr n = graph_add_indices(indices, 0, &deltaF); 2321177633Sdfr graph_add_indices(indices, n, &deltaB); 2322177633Sdfr 2323177633Sdfr /* 2324177633Sdfr * We must also be sure to maintain the relative 2325177633Sdfr * ordering of deltaF and deltaB when re-assigning 2326177633Sdfr * vertices. We do this by iteratively removing the 2327177633Sdfr * lowest ordered element from the set and assigning 2328177633Sdfr * it the next value from our new ordering. 2329177633Sdfr */ 2330177633Sdfr i = graph_assign_indices(g, indices, 0, &deltaB); 2331177633Sdfr graph_assign_indices(g, indices, i, &deltaF); 2332177633Sdfr 2333177633Sdfr#ifdef LOCKF_DEBUG 2334177633Sdfr if (lockf_debug & 8) { 2335177633Sdfr struct owner_vertex_list set; 2336177633Sdfr TAILQ_INIT(&set); 2337177633Sdfr for (i = 0; i < nB + nF; i++) 2338177633Sdfr TAILQ_INSERT_TAIL(&set, 2339177633Sdfr g->g_vertices[indices[i]], v_link); 2340177633Sdfr printf("new ordering = "); 2341177633Sdfr graph_print_vertices(&set); 2342177633Sdfr } 2343177633Sdfr#endif 2344177633Sdfr } 2345177633Sdfr 2346177633Sdfr KASSERT(x->v_order < y->v_order, ("Failed to re-order graph")); 2347177633Sdfr 2348177633Sdfr#ifdef LOCKF_DEBUG 2349177633Sdfr if (lockf_debug & 8) { 2350177633Sdfr graph_check(g, TRUE); 2351177633Sdfr } 2352177633Sdfr#endif 2353177633Sdfr 2354177633Sdfr e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK); 2355177633Sdfr 2356177633Sdfr LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink); 2357177633Sdfr LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink); 2358177633Sdfr e->e_refs = 1; 2359177633Sdfr e->e_from = x; 2360177633Sdfr e->e_to = y; 2361177633Sdfr 2362177633Sdfr return (0); 23631960Sdg} 23641960Sdg 2365177633Sdfr/* 2366177633Sdfr * Remove an edge x->y from the graph. 2367177633Sdfr */ 2368177633Sdfrstatic void 2369177633Sdfrgraph_remove_edge(struct owner_graph *g, struct owner_vertex *x, 2370177633Sdfr struct owner_vertex *y) 2371177633Sdfr{ 2372177633Sdfr struct owner_edge *e; 2373177633Sdfr 2374177633Sdfr sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2375177633Sdfr 2376177633Sdfr LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2377177633Sdfr if (e->e_to == y) 2378177633Sdfr break; 2379177633Sdfr } 2380177633Sdfr KASSERT(e, ("Removing non-existent edge from deadlock graph")); 2381177633Sdfr 2382177633Sdfr e->e_refs--; 2383177633Sdfr if (e->e_refs == 0) { 23841960Sdg#ifdef LOCKF_DEBUG 2385177633Sdfr if (lockf_debug & 8) { 2386177633Sdfr printf("removing edge %d:", x->v_order); 2387177633Sdfr lf_print_owner(x->v_owner); 2388177633Sdfr printf(" -> %d:", y->v_order); 2389177633Sdfr lf_print_owner(y->v_owner); 2390177633Sdfr printf("\n"); 2391177633Sdfr } 2392177633Sdfr#endif 2393177633Sdfr LIST_REMOVE(e, e_outlink); 2394177633Sdfr LIST_REMOVE(e, e_inlink); 2395177633Sdfr free(e, M_LOCKF); 2396177633Sdfr } 2397177633Sdfr} 2398177633Sdfr 23991960Sdg/* 2400177633Sdfr * Allocate a vertex from the free list. Return ENOMEM if there are 2401177633Sdfr * none. 2402177633Sdfr */ 2403177633Sdfrstatic struct owner_vertex * 2404177633Sdfrgraph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo) 2405177633Sdfr{ 2406177633Sdfr struct owner_vertex *v; 2407177633Sdfr 2408177633Sdfr sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2409177633Sdfr 2410177633Sdfr v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK); 2411177633Sdfr if (g->g_size == g->g_space) { 2412177633Sdfr g->g_vertices = realloc(g->g_vertices, 2413177633Sdfr 2 * g->g_space * sizeof(struct owner_vertex *), 2414177633Sdfr M_LOCKF, M_WAITOK); 2415177633Sdfr free(g->g_indexbuf, M_LOCKF); 2416177633Sdfr g->g_indexbuf = malloc(2 * g->g_space * sizeof(int), 2417177633Sdfr M_LOCKF, M_WAITOK); 2418177633Sdfr g->g_space = 2 * g->g_space; 2419177633Sdfr } 2420177633Sdfr v->v_order = g->g_size; 2421177633Sdfr v->v_gen = g->g_gen; 2422177633Sdfr g->g_vertices[g->g_size] = v; 2423177633Sdfr g->g_size++; 2424177633Sdfr 2425177633Sdfr LIST_INIT(&v->v_outedges); 2426177633Sdfr LIST_INIT(&v->v_inedges); 2427177633Sdfr v->v_owner = lo; 2428177633Sdfr 2429177633Sdfr return (v); 2430177633Sdfr} 2431177633Sdfr 2432177633Sdfrstatic void 2433177633Sdfrgraph_free_vertex(struct owner_graph *g, struct owner_vertex *v) 2434177633Sdfr{ 2435177633Sdfr struct owner_vertex *w; 2436177633Sdfr int i; 2437177633Sdfr 2438177633Sdfr sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2439177633Sdfr 2440177633Sdfr KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges")); 2441177633Sdfr KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges")); 2442177633Sdfr 2443177633Sdfr /* 2444177633Sdfr * Remove from the graph's array and close up the gap, 2445177633Sdfr * renumbering the other vertices. 2446177633Sdfr */ 2447177633Sdfr for (i = v->v_order + 1; i < g->g_size; i++) { 2448177633Sdfr w = g->g_vertices[i]; 2449177633Sdfr w->v_order--; 2450177633Sdfr g->g_vertices[i - 1] = w; 2451177633Sdfr } 2452177633Sdfr g->g_size--; 2453177633Sdfr 2454177633Sdfr free(v, M_LOCKF); 2455177633Sdfr} 2456177633Sdfr 2457177633Sdfrstatic struct owner_graph * 2458177633Sdfrgraph_init(struct owner_graph *g) 2459177633Sdfr{ 2460177633Sdfr 2461177633Sdfr g->g_vertices = malloc(10 * sizeof(struct owner_vertex *), 2462177633Sdfr M_LOCKF, M_WAITOK); 2463177633Sdfr g->g_size = 0; 2464177633Sdfr g->g_space = 10; 2465177633Sdfr g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK); 2466177633Sdfr g->g_gen = 0; 2467177633Sdfr 2468177633Sdfr return (g); 2469177633Sdfr} 2470177633Sdfr 2471177633Sdfr#ifdef LOCKF_DEBUG 2472177633Sdfr/* 2473177633Sdfr * Print description of a lock owner 2474177633Sdfr */ 2475177633Sdfrstatic void 2476177633Sdfrlf_print_owner(struct lock_owner *lo) 2477177633Sdfr{ 2478177633Sdfr 2479177633Sdfr if (lo->lo_flags & F_REMOTE) { 2480177633Sdfr printf("remote pid %d, system %d", 2481177633Sdfr lo->lo_pid, lo->lo_sysid); 2482177633Sdfr } else if (lo->lo_flags & F_FLOCK) { 2483177633Sdfr printf("file %p", lo->lo_id); 2484177633Sdfr } else { 2485177633Sdfr printf("local pid %d", lo->lo_pid); 2486177633Sdfr } 2487177633Sdfr} 2488177633Sdfr 2489177633Sdfr/* 24901960Sdg * Print out a lock. 24911960Sdg */ 2492140808Sjeffstatic void 2493177633Sdfrlf_print(char *tag, struct lockf_entry *lock) 24941960Sdg{ 24958876Srgrimes 249637951Sbde printf("%s: lock %p for ", tag, (void *)lock); 2497177633Sdfr lf_print_owner(lock->lf_owner); 249887211Salfred if (lock->lf_inode != (struct inode *)0) 2499177633Sdfr printf(" in ino %ju on dev <%s>,", 2500106584Smux (uintmax_t)lock->lf_inode->i_number, 2501177633Sdfr devtoname(lock->lf_inode->i_dev)); 2502177633Sdfr printf(" %s, start %jd, end ", 2503177633Sdfr lock->lf_type == F_RDLCK ? "shared" : 2504177633Sdfr lock->lf_type == F_WRLCK ? "exclusive" : 2505177633Sdfr lock->lf_type == F_UNLCK ? "unlock" : "unknown", 2506177633Sdfr (intmax_t)lock->lf_start); 2507177633Sdfr if (lock->lf_end == OFF_MAX) 2508177633Sdfr printf("EOF"); 250987211Salfred else 2510177633Sdfr printf("%jd", (intmax_t)lock->lf_end); 2511177633Sdfr if (!LIST_EMPTY(&lock->lf_outedges)) 2512177633Sdfr printf(" block %p\n", 2513177633Sdfr (void *)LIST_FIRST(&lock->lf_outedges)->le_to); 25141960Sdg else 25151960Sdg printf("\n"); 25161960Sdg} 25171960Sdg 2518140808Sjeffstatic void 2519177633Sdfrlf_printlist(char *tag, struct lockf_entry *lock) 25201960Sdg{ 2521177633Sdfr struct lockf_entry *lf, *blk; 2522177633Sdfr struct lockf_edge *e; 25231960Sdg 252487211Salfred if (lock->lf_inode == (struct inode *)0) 252587211Salfred return; 252687211Salfred 2527144278Sphk printf("%s: Lock list for ino %ju on dev <%s>:\n", 2528106584Smux tag, (uintmax_t)lock->lf_inode->i_number, 2529144278Sphk devtoname(lock->lf_inode->i_dev)); 2530178247Sdfr LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) { 253137951Sbde printf("\tlock %p for ",(void *)lf); 2532177633Sdfr lf_print_owner(lock->lf_owner); 2533106584Smux printf(", %s, start %jd, end %jd", 253437951Sbde lf->lf_type == F_RDLCK ? "shared" : 253537951Sbde lf->lf_type == F_WRLCK ? "exclusive" : 253637951Sbde lf->lf_type == F_UNLCK ? "unlock" : 2537106584Smux "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end); 2538177633Sdfr LIST_FOREACH(e, &lf->lf_outedges, le_outlink) { 2539177633Sdfr blk = e->le_to; 254037951Sbde printf("\n\t\tlock request %p for ", (void *)blk); 2541177633Sdfr lf_print_owner(blk->lf_owner); 2542106584Smux printf(", %s, start %jd, end %jd", 254337951Sbde blk->lf_type == F_RDLCK ? "shared" : 254437951Sbde blk->lf_type == F_WRLCK ? "exclusive" : 254537951Sbde blk->lf_type == F_UNLCK ? "unlock" : 2546106584Smux "unknown", (intmax_t)blk->lf_start, 2547106584Smux (intmax_t)blk->lf_end); 2548177633Sdfr if (!LIST_EMPTY(&blk->lf_inedges)) 254922521Sdyson panic("lf_printlist: bad list"); 255022521Sdyson } 255122521Sdyson printf("\n"); 25521960Sdg } 25531960Sdg} 25541960Sdg#endif /* LOCKF_DEBUG */ 2555