kern_rmlock.c revision 215399
1/*-
2 * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/kern/kern_rmlock.c 215399 2010-11-16 14:08:21Z cognet $");
36
37#include "opt_ddb.h"
38#include "opt_kdtrace.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42
43#include <sys/kernel.h>
44#include <sys/ktr.h>
45#include <sys/lock.h>
46#include <sys/mutex.h>
47#include <sys/proc.h>
48#include <sys/rmlock.h>
49#include <sys/sched.h>
50#include <sys/smp.h>
51#include <sys/turnstile.h>
52#include <sys/lock_profile.h>
53#include <machine/cpu.h>
54
55#ifdef DDB
56#include <ddb/ddb.h>
57#endif
58
59#define RMPF_ONQUEUE	1
60#define RMPF_SIGNAL	2
61
62/*
63 * To support usage of rmlock in CVs and msleep yet another list for the
64 * priority tracker would be needed.  Using this lock for cv and msleep also
65 * does not seem very useful
66 */
67
68static __inline void compiler_memory_barrier(void) {
69	__asm __volatile("":::"memory");
70}
71
72static void	assert_rm(struct lock_object *lock, int what);
73static void	lock_rm(struct lock_object *lock, int how);
74#ifdef KDTRACE_HOOKS
75static int	owner_rm(struct lock_object *lock, struct thread **owner);
76#endif
77static int	unlock_rm(struct lock_object *lock);
78
79struct lock_class lock_class_rm = {
80	.lc_name = "rm",
81	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
82	.lc_assert = assert_rm,
83#if 0
84#ifdef DDB
85	.lc_ddb_show = db_show_rwlock,
86#endif
87#endif
88	.lc_lock = lock_rm,
89	.lc_unlock = unlock_rm,
90#ifdef KDTRACE_HOOKS
91	.lc_owner = owner_rm,
92#endif
93};
94
95static void
96assert_rm(struct lock_object *lock, int what)
97{
98
99	panic("assert_rm called");
100}
101
102static void
103lock_rm(struct lock_object *lock, int how)
104{
105
106	panic("lock_rm called");
107}
108
109static int
110unlock_rm(struct lock_object *lock)
111{
112
113	panic("unlock_rm called");
114}
115
116#ifdef KDTRACE_HOOKS
117static int
118owner_rm(struct lock_object *lock, struct thread **owner)
119{
120
121	panic("owner_rm called");
122}
123#endif
124
125static struct mtx rm_spinlock;
126
127MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
128
129/*
130 * Add or remove tracker from per-cpu list.
131 *
132 * The per-cpu list can be traversed at any time in forward direction from an
133 * interrupt on the *local* cpu.
134 */
135static void inline
136rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
137{
138	struct rm_queue *next;
139
140	/* Initialize all tracker pointers */
141	tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
142	next = pc->pc_rm_queue.rmq_next;
143	tracker->rmp_cpuQueue.rmq_next = next;
144
145	/* rmq_prev is not used during froward traversal. */
146	next->rmq_prev = &tracker->rmp_cpuQueue;
147
148	/* Update pointer to first element. */
149	pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
150}
151
152static void inline
153rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
154{
155	struct rm_queue *next, *prev;
156
157	next = tracker->rmp_cpuQueue.rmq_next;
158	prev = tracker->rmp_cpuQueue.rmq_prev;
159
160	/* Not used during forward traversal. */
161	next->rmq_prev = prev;
162
163	/* Remove from list. */
164	prev->rmq_next = next;
165}
166
167static void
168rm_cleanIPI(void *arg)
169{
170	struct pcpu *pc;
171	struct rmlock *rm = arg;
172	struct rm_priotracker *tracker;
173	struct rm_queue *queue;
174	pc = pcpu_find(curcpu);
175
176	for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
177	    queue = queue->rmq_next) {
178		tracker = (struct rm_priotracker *)queue;
179		if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
180			tracker->rmp_flags = RMPF_ONQUEUE;
181			mtx_lock_spin(&rm_spinlock);
182			LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
183			    rmp_qentry);
184			mtx_unlock_spin(&rm_spinlock);
185		}
186	}
187}
188
189CTASSERT((RM_SLEEPABLE & LO_CLASSFLAGS) == RM_SLEEPABLE);
190
191void
192rm_init_flags(struct rmlock *rm, const char *name, int opts)
193{
194	int liflags;
195
196	liflags = 0;
197	if (!(opts & RM_NOWITNESS))
198		liflags |= LO_WITNESS;
199	if (opts & RM_RECURSE)
200		liflags |= LO_RECURSABLE;
201	rm->rm_writecpus = all_cpus;
202	LIST_INIT(&rm->rm_activeReaders);
203	if (opts & RM_SLEEPABLE) {
204		liflags |= RM_SLEEPABLE;
205		sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_RECURSE);
206	} else
207		mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS);
208	lock_init(&rm->lock_object, &lock_class_rm, name, NULL, liflags);
209}
210
211void
212rm_init(struct rmlock *rm, const char *name)
213{
214
215	rm_init_flags(rm, name, 0);
216}
217
218void
219rm_destroy(struct rmlock *rm)
220{
221
222	if (rm->lock_object.lo_flags & RM_SLEEPABLE)
223		sx_destroy(&rm->rm_lock_sx);
224	else
225		mtx_destroy(&rm->rm_lock_mtx);
226	lock_destroy(&rm->lock_object);
227}
228
229int
230rm_wowned(struct rmlock *rm)
231{
232
233	if (rm->lock_object.lo_flags & RM_SLEEPABLE)
234		return (sx_xlocked(&rm->rm_lock_sx));
235	else
236		return (mtx_owned(&rm->rm_lock_mtx));
237}
238
239void
240rm_sysinit(void *arg)
241{
242	struct rm_args *args = arg;
243
244	rm_init(args->ra_rm, args->ra_desc);
245}
246
247void
248rm_sysinit_flags(void *arg)
249{
250	struct rm_args_flags *args = arg;
251
252	rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts);
253}
254
255static int
256_rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
257{
258	struct pcpu *pc;
259	struct rm_queue *queue;
260	struct rm_priotracker *atracker;
261
262	critical_enter();
263	pc = pcpu_find(curcpu);
264
265	/* Check if we just need to do a proper critical_exit. */
266	if (!(pc->pc_cpumask & rm->rm_writecpus)) {
267		critical_exit();
268		return (1);
269	}
270
271	/* Remove our tracker from the per-cpu list. */
272	rm_tracker_remove(pc, tracker);
273
274	/* Check to see if the IPI granted us the lock after all. */
275	if (tracker->rmp_flags) {
276		/* Just add back tracker - we hold the lock. */
277		rm_tracker_add(pc, tracker);
278		critical_exit();
279		return (1);
280	}
281
282	/*
283	 * We allow readers to aquire a lock even if a writer is blocked if
284	 * the lock is recursive and the reader already holds the lock.
285	 */
286	if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
287		/*
288		 * Just grant the lock if this thread already has a tracker
289		 * for this lock on the per-cpu queue.
290		 */
291		for (queue = pc->pc_rm_queue.rmq_next;
292		    queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
293			atracker = (struct rm_priotracker *)queue;
294			if ((atracker->rmp_rmlock == rm) &&
295			    (atracker->rmp_thread == tracker->rmp_thread)) {
296				mtx_lock_spin(&rm_spinlock);
297				LIST_INSERT_HEAD(&rm->rm_activeReaders,
298				    tracker, rmp_qentry);
299				tracker->rmp_flags = RMPF_ONQUEUE;
300				mtx_unlock_spin(&rm_spinlock);
301				rm_tracker_add(pc, tracker);
302				critical_exit();
303				return (1);
304			}
305		}
306	}
307
308	sched_unpin();
309	critical_exit();
310
311	if (trylock) {
312		if (rm->lock_object.lo_flags & RM_SLEEPABLE) {
313			if (!sx_try_xlock(&rm->rm_lock_sx))
314				return (0);
315		} else {
316			if (!mtx_trylock(&rm->rm_lock_mtx))
317				return (0);
318		}
319	} else {
320		if (rm->lock_object.lo_flags & RM_SLEEPABLE)
321			sx_xlock(&rm->rm_lock_sx);
322		else
323			mtx_lock(&rm->rm_lock_mtx);
324	}
325
326	critical_enter();
327	pc = pcpu_find(curcpu);
328	rm->rm_writecpus &= ~pc->pc_cpumask;
329	rm_tracker_add(pc, tracker);
330	sched_pin();
331	critical_exit();
332
333	if (rm->lock_object.lo_flags & RM_SLEEPABLE)
334		sx_xunlock(&rm->rm_lock_sx);
335	else
336		mtx_unlock(&rm->rm_lock_mtx);
337
338	return (1);
339}
340
341int
342_rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
343{
344	struct thread *td = curthread;
345	struct pcpu *pc;
346
347	tracker->rmp_flags  = 0;
348	tracker->rmp_thread = td;
349	tracker->rmp_rmlock = rm;
350
351	td->td_critnest++;	/* critical_enter(); */
352
353	compiler_memory_barrier();
354
355	pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
356
357	rm_tracker_add(pc, tracker);
358
359	sched_pin();
360
361	compiler_memory_barrier();
362
363	td->td_critnest--;
364
365	/*
366	 * Fast path to combine two common conditions into a single
367	 * conditional jump.
368	 */
369	if (0 == (td->td_owepreempt | (rm->rm_writecpus & pc->pc_cpumask)))
370		return (1);
371
372	/* We do not have a read token and need to acquire one. */
373	return _rm_rlock_hard(rm, tracker, trylock);
374}
375
376static void
377_rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
378{
379
380	if (td->td_owepreempt) {
381		td->td_critnest++;
382		critical_exit();
383	}
384
385	if (!tracker->rmp_flags)
386		return;
387
388	mtx_lock_spin(&rm_spinlock);
389	LIST_REMOVE(tracker, rmp_qentry);
390
391	if (tracker->rmp_flags & RMPF_SIGNAL) {
392		struct rmlock *rm;
393		struct turnstile *ts;
394
395		rm = tracker->rmp_rmlock;
396
397		turnstile_chain_lock(&rm->lock_object);
398		mtx_unlock_spin(&rm_spinlock);
399
400		ts = turnstile_lookup(&rm->lock_object);
401
402		turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
403		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
404		turnstile_chain_unlock(&rm->lock_object);
405	} else
406		mtx_unlock_spin(&rm_spinlock);
407}
408
409void
410_rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
411{
412	struct pcpu *pc;
413	struct thread *td = tracker->rmp_thread;
414
415	td->td_critnest++;	/* critical_enter(); */
416	pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
417	rm_tracker_remove(pc, tracker);
418	td->td_critnest--;
419	sched_unpin();
420
421	if (0 == (td->td_owepreempt | tracker->rmp_flags))
422		return;
423
424	_rm_unlock_hard(td, tracker);
425}
426
427void
428_rm_wlock(struct rmlock *rm)
429{
430	struct rm_priotracker *prio;
431	struct turnstile *ts;
432	cpumask_t readcpus;
433
434	if (rm->lock_object.lo_flags & RM_SLEEPABLE)
435		sx_xlock(&rm->rm_lock_sx);
436	else
437		mtx_lock(&rm->rm_lock_mtx);
438
439	if (rm->rm_writecpus != all_cpus) {
440		/* Get all read tokens back */
441
442		readcpus = all_cpus & (all_cpus & ~rm->rm_writecpus);
443		rm->rm_writecpus = all_cpus;
444
445		/*
446		 * Assumes rm->rm_writecpus update is visible on other CPUs
447		 * before rm_cleanIPI is called.
448		 */
449#ifdef SMP
450		smp_rendezvous_cpus(readcpus,
451		    smp_no_rendevous_barrier,
452		    rm_cleanIPI,
453		    smp_no_rendevous_barrier,
454		    rm);
455
456#else
457		rm_cleanIPI(rm);
458#endif
459
460		mtx_lock_spin(&rm_spinlock);
461		while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
462			ts = turnstile_trywait(&rm->lock_object);
463			prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
464			mtx_unlock_spin(&rm_spinlock);
465			turnstile_wait(ts, prio->rmp_thread,
466			    TS_EXCLUSIVE_QUEUE);
467			mtx_lock_spin(&rm_spinlock);
468		}
469		mtx_unlock_spin(&rm_spinlock);
470	}
471}
472
473void
474_rm_wunlock(struct rmlock *rm)
475{
476
477	if (rm->lock_object.lo_flags & RM_SLEEPABLE)
478		sx_xunlock(&rm->rm_lock_sx);
479	else
480		mtx_unlock(&rm->rm_lock_mtx);
481}
482
483#ifdef LOCK_DEBUG
484
485void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
486{
487
488	WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
489	    file, line, NULL);
490
491	_rm_wlock(rm);
492
493	LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
494
495	if (rm->lock_object.lo_flags & RM_SLEEPABLE)
496		WITNESS_LOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE,
497		    file, line);
498	else
499		WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
500
501	curthread->td_locks++;
502
503}
504
505void
506_rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
507{
508
509	curthread->td_locks--;
510	if (rm->lock_object.lo_flags & RM_SLEEPABLE)
511		WITNESS_UNLOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE,
512		    file, line);
513	else
514		WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
515	LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
516	_rm_wunlock(rm);
517}
518
519int
520_rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
521    int trylock, const char *file, int line)
522{
523	if (!trylock && (rm->lock_object.lo_flags & RM_SLEEPABLE))
524		WITNESS_CHECKORDER(&rm->rm_lock_sx.lock_object, LOP_NEWORDER,
525		    file, line, NULL);
526	WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL);
527
528	if (_rm_rlock(rm, tracker, trylock)) {
529		LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line);
530
531		WITNESS_LOCK(&rm->lock_object, 0, file, line);
532
533		curthread->td_locks++;
534
535		return (1);
536	}
537
538	return (0);
539}
540
541void
542_rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
543    const char *file, int line)
544{
545
546	curthread->td_locks--;
547	WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
548	LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
549	_rm_runlock(rm, tracker);
550}
551
552#else
553
554/*
555 * Just strip out file and line arguments if no lock debugging is enabled in
556 * the kernel - we are called from a kernel module.
557 */
558void
559_rm_wlock_debug(struct rmlock *rm, const char *file, int line)
560{
561
562	_rm_wlock(rm);
563}
564
565void
566_rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
567{
568
569	_rm_wunlock(rm);
570}
571
572int
573_rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
574    int trylock, const char *file, int line)
575{
576
577	return _rm_rlock(rm, tracker, trylock);
578}
579
580void
581_rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
582    const char *file, int line)
583{
584
585	_rm_runlock(rm, tracker);
586}
587
588#endif
589