audit_worker.c revision 165604
1156888Srwatson/*
2156888Srwatson * Copyright (c) 1999-2005 Apple Computer, Inc.
3156888Srwatson * Copyright (c) 2006 Robert N. M. Watson
4156888Srwatson * All rights reserved.
5156888Srwatson *
6156888Srwatson * Redistribution and use in source and binary forms, with or without
7156888Srwatson * modification, are permitted provided that the following conditions
8156888Srwatson * are met:
9156888Srwatson * 1.  Redistributions of source code must retain the above copyright
10156888Srwatson *     notice, this list of conditions and the following disclaimer.
11156888Srwatson * 2.  Redistributions in binary form must reproduce the above copyright
12156888Srwatson *     notice, this list of conditions and the following disclaimer in the
13156888Srwatson *     documentation and/or other materials provided with the distribution.
14156888Srwatson * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15156888Srwatson *     its contributors may be used to endorse or promote products derived
16156888Srwatson *     from this software without specific prior written permission.
17156888Srwatson *
18156888Srwatson * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND
19156888Srwatson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20156888Srwatson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21156888Srwatson * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR
22156888Srwatson * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23156888Srwatson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24156888Srwatson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25156888Srwatson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26156888Srwatson * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27156888Srwatson * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28156888Srwatson * POSSIBILITY OF SUCH DAMAGE.
29156888Srwatson *
30156888Srwatson * $FreeBSD: head/sys/security/audit/audit_worker.c 165604 2006-12-28 22:18:43Z rwatson $
31156888Srwatson */
32156888Srwatson
33156888Srwatson#include <sys/param.h>
34156888Srwatson#include <sys/condvar.h>
35156888Srwatson#include <sys/conf.h>
36156888Srwatson#include <sys/file.h>
37156888Srwatson#include <sys/filedesc.h>
38156888Srwatson#include <sys/fcntl.h>
39156888Srwatson#include <sys/ipc.h>
40156888Srwatson#include <sys/kernel.h>
41156888Srwatson#include <sys/kthread.h>
42156888Srwatson#include <sys/malloc.h>
43156888Srwatson#include <sys/mount.h>
44156888Srwatson#include <sys/namei.h>
45156888Srwatson#include <sys/proc.h>
46156888Srwatson#include <sys/queue.h>
47156888Srwatson#include <sys/socket.h>
48156888Srwatson#include <sys/socketvar.h>
49156888Srwatson#include <sys/protosw.h>
50156888Srwatson#include <sys/domain.h>
51156888Srwatson#include <sys/sysproto.h>
52156888Srwatson#include <sys/sysent.h>
53156888Srwatson#include <sys/systm.h>
54156888Srwatson#include <sys/ucred.h>
55156888Srwatson#include <sys/uio.h>
56156888Srwatson#include <sys/un.h>
57156888Srwatson#include <sys/unistd.h>
58156888Srwatson#include <sys/vnode.h>
59156888Srwatson
60156888Srwatson#include <bsm/audit.h>
61156888Srwatson#include <bsm/audit_internal.h>
62156888Srwatson#include <bsm/audit_kevents.h>
63156888Srwatson
64156888Srwatson#include <netinet/in.h>
65156888Srwatson#include <netinet/in_pcb.h>
66156888Srwatson
67156888Srwatson#include <security/audit/audit.h>
68156888Srwatson#include <security/audit/audit_private.h>
69156888Srwatson
70156888Srwatson#include <vm/uma.h>
71156888Srwatson
72156888Srwatson/*
73156888Srwatson * Worker thread that will schedule disk I/O, etc.
74156889Srwatson */
75156888Srwatsonstatic struct proc		*audit_thread;
76156888Srwatson
77156888Srwatson/*
78156889Srwatson * When an audit log is rotated, the actual rotation must be performed by the
79156889Srwatson * audit worker thread, as it may have outstanding writes on the current
80156889Srwatson * audit log.  audit_replacement_vp holds the vnode replacing the current
81156889Srwatson * vnode.  We can't let more than one replacement occur at a time, so if more
82156889Srwatson * than one thread requests a replacement, only one can have the replacement
83156889Srwatson * "in progress" at any given moment.  If a thread tries to replace the audit
84156889Srwatson * vnode and discovers a replacement is already in progress (i.e.,
85156889Srwatson * audit_replacement_flag != 0), then it will sleep on audit_replacement_cv
86156889Srwatson * waiting its turn to perform a replacement.  When a replacement is
87156889Srwatson * completed, this cv is signalled by the worker thread so a waiting thread
88156889Srwatson * can start another replacement.  We also store a credential to perform
89156889Srwatson * audit log write operations with.
90156888Srwatson *
91156888Srwatson * The current credential and vnode are thread-local to audit_worker.
92156888Srwatson */
93156888Srwatsonstatic struct cv		audit_replacement_cv;
94156888Srwatson
95156888Srwatsonstatic int			audit_replacement_flag;
96156888Srwatsonstatic struct vnode		*audit_replacement_vp;
97156888Srwatsonstatic struct ucred		*audit_replacement_cred;
98156888Srwatson
99156888Srwatson/*
100156888Srwatson * Flags related to Kernel->user-space communication.
101156888Srwatson */
102156888Srwatsonstatic int			audit_file_rotate_wait;
103156888Srwatson
104156888Srwatson/*
105162599Srwatson * Write an audit record to a file, performed as the last stage after both
106162599Srwatson * preselection and BSM conversion.  Both space management and write failures
107162599Srwatson * are handled in this function.
108162599Srwatson *
109162599Srwatson * No attempt is made to deal with possible failure to deliver a trigger to
110162599Srwatson * the audit daemon, since the message is asynchronous anyway.
111156888Srwatson */
112162599Srwatsonstatic void
113159264Srwatsonaudit_record_write(struct vnode *vp, struct ucred *cred, struct thread *td,
114159264Srwatson    void *data, size_t len)
115156888Srwatson{
116162599Srwatson	static struct timeval last_lowspace_trigger;
117162599Srwatson	static struct timeval last_fail;
118162599Srwatson	static int cur_lowspace_trigger;
119162599Srwatson	struct statfs *mnt_stat;
120162599Srwatson	int error, vfslocked;
121162599Srwatson	static int cur_fail;
122162599Srwatson	struct vattr vattr;
123156888Srwatson	long temp;
124156888Srwatson
125159264Srwatson	if (vp == NULL)
126162599Srwatson		return;
127159264Srwatson
128159332Srwatson 	mnt_stat = &vp->v_mount->mnt_stat;
129156888Srwatson	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
130156888Srwatson
131156888Srwatson	/*
132156889Srwatson	 * First, gather statistics on the audit log file and file system so
133162599Srwatson	 * that we know how we're doing on space.  Consider failure of these
134162599Srwatson	 * operations to indicate a future inability to write to the file.
135156888Srwatson	 */
136162599Srwatson	error = VFS_STATFS(vp->v_mount, mnt_stat, td);
137162599Srwatson	if (error)
138162599Srwatson		goto fail;
139156888Srwatson	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
140162599Srwatson	error = VOP_GETATTR(vp, &vattr, cred, td);
141156888Srwatson	VOP_UNLOCK(vp, 0, td);
142162599Srwatson	if (error)
143162599Srwatson		goto fail;
144156889Srwatson	audit_fstat.af_currsz = vattr.va_size;
145156888Srwatson
146156888Srwatson	/*
147162599Srwatson	 * We handle four different space-related limits:
148162599Srwatson	 *
149162599Srwatson	 * - A fixed (hard) limit on the minimum free blocks we require on
150162599Srwatson	 *   the file system, and results in record loss, a trigger, and
151162599Srwatson	 *   possible fail stop due to violating invariants.
152162599Srwatson	 *
153162599Srwatson	 * - An administrative (soft) limit, which when fallen below, results
154162599Srwatson	 *   in the kernel notifying the audit daemon of low space.
155162599Srwatson	 *
156162599Srwatson	 * - An audit trail size limit, which when gone above, results in the
157162599Srwatson	 *   kernel notifying the audit daemon that rotation is desired.
158162599Srwatson	 *
159162599Srwatson	 * - The total depth of the kernel audit record exceeding free space,
160162599Srwatson	 *   which can lead to possible fail stop (with drain), in order to
161162599Srwatson	 *   prevent violating invariants.  Failure here doesn't halt
162162599Srwatson	 *   immediately, but prevents new records from being generated.
163162599Srwatson	 *
164162599Srwatson	 * Possibly, the last of these should be handled differently, always
165162599Srwatson	 * allowing a full queue to be lost, rather than trying to prevent
166162599Srwatson	 * loss.
167162599Srwatson	 *
168162599Srwatson	 * First, handle the hard limit, which generates a trigger and may
169162599Srwatson	 * fail stop.  This is handled in the same manner as ENOSPC from
170162599Srwatson	 * VOP_WRITE, and results in record loss.
171156888Srwatson	 */
172162599Srwatson	if (mnt_stat->f_bfree < AUDIT_HARD_LIMIT_FREE_BLOCKS) {
173162599Srwatson		error = ENOSPC;
174162599Srwatson		goto fail_enospc;
175162599Srwatson	}
176156888Srwatson
177156889Srwatson	/*
178162599Srwatson	 * Second, handle falling below the soft limit, if defined; we send
179162599Srwatson	 * the daemon a trigger and continue processing the record.  Triggers
180162599Srwatson	 * are limited to 1/sec.
181156888Srwatson	 */
182162599Srwatson	if (audit_qctrl.aq_minfree != 0) {
183156889Srwatson		/*
184162599Srwatson		 * XXXAUDIT: Check math and block size calculations here.
185156888Srwatson		 */
186162599Srwatson		temp = mnt_stat->f_blocks / (100 / audit_qctrl.aq_minfree);
187162599Srwatson		if (mnt_stat->f_bfree < temp) {
188162599Srwatson			if (ppsratecheck(&last_lowspace_trigger,
189162599Srwatson			    &cur_lowspace_trigger, 1)) {
190156888Srwatson				(void)send_trigger(AUDIT_TRIGGER_LOW_SPACE);
191162599Srwatson				printf("Warning: audit space low\n");
192162599Srwatson			}
193156888Srwatson		}
194162599Srwatson	}
195156888Srwatson
196156889Srwatson	/*
197162599Srwatson	 * If the current file is getting full, generate a rotation trigger
198162599Srwatson	 * to the daemon.  This is only approximate, which is fine as more
199162599Srwatson	 * records may be generated before the daemon rotates the file.
200156888Srwatson	 */
201162599Srwatson	if ((audit_fstat.af_filesz != 0) && (audit_file_rotate_wait == 0) &&
202156888Srwatson	    (vattr.va_size >= audit_fstat.af_filesz)) {
203156888Srwatson		audit_file_rotate_wait = 1;
204162508Srwatson		(void)send_trigger(AUDIT_TRIGGER_ROTATE_KERNEL);
205156888Srwatson	}
206156888Srwatson
207156888Srwatson	/*
208156888Srwatson	 * If the estimated amount of audit data in the audit event queue
209156889Srwatson	 * (plus records allocated but not yet queued) has reached the amount
210156889Srwatson	 * of free space on the disk, then we need to go into an audit fail
211156889Srwatson	 * stop state, in which we do not permit the allocation/committing of
212162599Srwatson	 * any new audit records.  We continue to process records but don't
213156889Srwatson	 * allow any activities that might generate new records.  In the
214156889Srwatson	 * future, we might want to detect when space is available again and
215156889Srwatson	 * allow operation to continue, but this behavior is sufficient to
216156889Srwatson	 * meet fail stop requirements in CAPP.
217156888Srwatson	 */
218162599Srwatson	if (audit_fail_stop) {
219162599Srwatson		if ((unsigned long)((audit_q_len + audit_pre_q_len + 1) *
220162599Srwatson		    MAX_AUDIT_RECORD_SIZE) / mnt_stat->f_bsize >=
221162599Srwatson		    (unsigned long)(mnt_stat->f_bfree)) {
222162599Srwatson			if (ppsratecheck(&last_fail, &cur_fail, 1))
223162599Srwatson				printf("audit_record_write: free space "
224162599Srwatson				    "below size of audit queue, failing "
225162599Srwatson				    "stop\n");
226162599Srwatson			audit_in_failure = 1;
227162599Srwatson		} else if (audit_in_failure) {
228162599Srwatson			/*
229165604Srwatson			 * Note: if we want to handle recovery, this is the
230162599Srwatson			 * spot to do it: unset audit_in_failure, and issue a
231162599Srwatson			 * wakeup on the cv.
232162599Srwatson			 */
233162599Srwatson		}
234156888Srwatson	}
235156888Srwatson
236162599Srwatson	error = vn_rdwr(UIO_WRITE, vp, data, len, (off_t)0, UIO_SYSSPACE,
237159264Srwatson	    IO_APPEND|IO_UNIT, cred, NULL, NULL, td);
238162599Srwatson	if (error == ENOSPC)
239162599Srwatson		goto fail_enospc;
240162599Srwatson	else if (error)
241162599Srwatson		goto fail;
242156888Srwatson
243156888Srwatson	/*
244162599Srwatson	 * Catch completion of a queue drain here; if we're draining and the
245162599Srwatson	 * queue is now empty, fail stop.  That audit_fail_stop is implicitly
246162599Srwatson	 * true, since audit_in_failure can only be set of audit_fail_stop is
247162599Srwatson	 * set.
248162599Srwatson	 *
249165604Srwatson	 * Note: if we handle recovery from audit_in_failure, then we need to
250165604Srwatson	 * make panic here conditional.
251156888Srwatson	 */
252162599Srwatson	if (audit_in_failure) {
253162599Srwatson		if (audit_q_len == 0 && audit_pre_q_len == 0) {
254162599Srwatson			VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
255162599Srwatson			(void)VOP_FSYNC(vp, MNT_WAIT, td);
256162599Srwatson			VOP_UNLOCK(vp, 0, td);
257162599Srwatson			panic("Audit store overflow; record queue drained.");
258162599Srwatson		}
259162599Srwatson	}
260162599Srwatson
261162599Srwatson	VFS_UNLOCK_GIANT(vfslocked);
262162599Srwatson	return;
263162599Srwatson
264162599Srwatsonfail_enospc:
265162599Srwatson	/*
266162599Srwatson	 * ENOSPC is considered a special case with respect to failures, as
267162599Srwatson	 * this can reflect either our preemptive detection of insufficient
268162599Srwatson	 * space, or ENOSPC returned by the vnode write call.
269162599Srwatson	 */
270162599Srwatson	if (audit_fail_stop) {
271156888Srwatson		VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
272156888Srwatson		(void)VOP_FSYNC(vp, MNT_WAIT, td);
273156888Srwatson		VOP_UNLOCK(vp, 0, td);
274162599Srwatson		panic("Audit log space exhausted and fail-stop set.");
275156888Srwatson	}
276162599Srwatson	(void)send_trigger(AUDIT_TRIGGER_NO_SPACE);
277162599Srwatson	audit_suspended = 1;
278156888Srwatson
279162599Srwatson	/* FALLTHROUGH */
280162599Srwatsonfail:
281162599Srwatson	/*
282162599Srwatson	 * We have failed to write to the file, so the current record is
283162599Srwatson	 * lost, which may require an immediate system halt.
284162599Srwatson	 */
285162599Srwatson	if (audit_panic_on_write_fail) {
286162599Srwatson		VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
287162599Srwatson		(void)VOP_FSYNC(vp, MNT_WAIT, td);
288162599Srwatson		VOP_UNLOCK(vp, 0, td);
289162599Srwatson		panic("audit_worker: write error %d\n", error);
290162599Srwatson	} else if (ppsratecheck(&last_fail, &cur_fail, 1))
291162599Srwatson		printf("audit_worker: write error %d\n", error);
292156888Srwatson	VFS_UNLOCK_GIANT(vfslocked);
293156888Srwatson}
294156888Srwatson
295156888Srwatson/*
296156888Srwatson * If an appropriate signal has been received rotate the audit log based on
297156888Srwatson * the global replacement variables.  Signal consumers as needed that the
298156888Srwatson * rotation has taken place.
299156888Srwatson *
300165604Srwatson * The global variables and CVs used to signal the audit_worker to perform a
301165604Srwatson * rotation are essentially a message queue of depth 1.  It would be much
302165604Srwatson * nicer to actually use a message queue.
303156888Srwatson */
304156888Srwatsonstatic void
305156888Srwatsonaudit_worker_rotate(struct ucred **audit_credp, struct vnode **audit_vpp,
306156888Srwatson    struct thread *audit_td)
307156888Srwatson{
308156888Srwatson	int do_replacement_signal, vfslocked;
309156888Srwatson	struct ucred *old_cred;
310156888Srwatson	struct vnode *old_vp;
311156888Srwatson
312156888Srwatson	mtx_assert(&audit_mtx, MA_OWNED);
313156888Srwatson
314156888Srwatson	do_replacement_signal = 0;
315156888Srwatson	while (audit_replacement_flag != 0) {
316156888Srwatson		old_cred = *audit_credp;
317156888Srwatson		old_vp = *audit_vpp;
318156888Srwatson		*audit_credp = audit_replacement_cred;
319156888Srwatson		*audit_vpp = audit_replacement_vp;
320156888Srwatson		audit_replacement_cred = NULL;
321156888Srwatson		audit_replacement_vp = NULL;
322156888Srwatson		audit_replacement_flag = 0;
323156888Srwatson
324156888Srwatson		audit_enabled = (*audit_vpp != NULL);
325156888Srwatson
326156888Srwatson		if (old_vp != NULL) {
327156888Srwatson			AUDIT_PRINTF(("Closing old audit file\n"));
328156888Srwatson			mtx_unlock(&audit_mtx);
329156888Srwatson			vfslocked = VFS_LOCK_GIANT(old_vp->v_mount);
330156888Srwatson			vn_close(old_vp, AUDIT_CLOSE_FLAGS, old_cred,
331156888Srwatson			    audit_td);
332156888Srwatson			VFS_UNLOCK_GIANT(vfslocked);
333156888Srwatson			crfree(old_cred);
334156888Srwatson			mtx_lock(&audit_mtx);
335156888Srwatson			old_cred = NULL;
336156888Srwatson			old_vp = NULL;
337156888Srwatson			AUDIT_PRINTF(("Audit file closed\n"));
338156888Srwatson		}
339156888Srwatson		if (*audit_vpp != NULL) {
340156888Srwatson			AUDIT_PRINTF(("Opening new audit file\n"));
341156888Srwatson		}
342156888Srwatson		do_replacement_signal = 1;
343156888Srwatson	}
344156888Srwatson
345156888Srwatson	/*
346156888Srwatson	 * Signal that replacement have occurred to wake up and
347156888Srwatson	 * start any other replacements started in parallel.  We can
348156888Srwatson	 * continue about our business in the mean time.  We
349156888Srwatson	 * broadcast so that both new replacements can be inserted,
350156888Srwatson	 * but also so that the source(s) of replacement can return
351156888Srwatson	 * successfully.
352156888Srwatson	 */
353156888Srwatson	if (do_replacement_signal)
354156888Srwatson		cv_broadcast(&audit_replacement_cv);
355156888Srwatson}
356156888Srwatson
357156888Srwatson/*
358159264Srwatson * Given a kernel audit record, process as required.  Kernel audit records
359159264Srwatson * are converted to one, or possibly two, BSM records, depending on whether
360159264Srwatson * there is a user audit record present also.  Kernel records need be
361159264Srwatson * converted to BSM before they can be written out.  Both types will be
362159264Srwatson * written to disk, and audit pipes.
363159263Srwatson */
364159263Srwatsonstatic void
365159263Srwatsonaudit_worker_process_record(struct vnode *audit_vp, struct ucred *audit_cred,
366159263Srwatson    struct thread *audit_td, struct kaudit_record *ar)
367159263Srwatson{
368159264Srwatson	struct au_record *bsm;
369159269Srwatson	au_class_t class;
370159269Srwatson	au_event_t event;
371159269Srwatson	au_id_t auid;
372162599Srwatson	int error, sorf;
373159263Srwatson
374162599Srwatson	/*
375162599Srwatson	 * First, handle the user record, if any: commit to the system trail
376162599Srwatson	 * and audit pipes as selected.
377162599Srwatson	 */
378159269Srwatson	if ((ar->k_ar_commit & AR_COMMIT_USER) &&
379162599Srwatson	    (ar->k_ar_commit & AR_PRESELECT_USER_TRAIL))
380162599Srwatson		audit_record_write(audit_vp, audit_cred, audit_td,
381159264Srwatson		    ar->k_udata, ar->k_ulen);
382162380Scsjp
383159269Srwatson	if ((ar->k_ar_commit & AR_COMMIT_USER) &&
384162380Scsjp	    (ar->k_ar_commit & AR_PRESELECT_USER_PIPE))
385159269Srwatson		audit_pipe_submit_user(ar->k_udata, ar->k_ulen);
386159264Srwatson
387162380Scsjp	if (!(ar->k_ar_commit & AR_COMMIT_KERNEL) ||
388162380Scsjp	    ((ar->k_ar_commit & AR_PRESELECT_PIPE) == 0 &&
389162380Scsjp	    (ar->k_ar_commit & AR_PRESELECT_TRAIL) == 0))
390159269Srwatson		return;
391159264Srwatson
392159269Srwatson	auid = ar->k_ar.ar_subj_auid;
393159269Srwatson	event = ar->k_ar.ar_event;
394159269Srwatson	class = au_event_class(event);
395159269Srwatson	if (ar->k_ar.ar_errno == 0)
396159269Srwatson		sorf = AU_PRS_SUCCESS;
397159269Srwatson	else
398159269Srwatson		sorf = AU_PRS_FAILURE;
399159264Srwatson
400162599Srwatson	error = kaudit_to_bsm(ar, &bsm);
401162599Srwatson	switch (error) {
402159269Srwatson	case BSM_NOAUDIT:
403159269Srwatson		return;
404159264Srwatson
405159269Srwatson	case BSM_FAILURE:
406159269Srwatson		printf("audit_worker_process_record: BSM_FAILURE\n");
407159269Srwatson		return;
408159269Srwatson
409159269Srwatson	case BSM_SUCCESS:
410159269Srwatson		break;
411159269Srwatson
412159269Srwatson	default:
413162599Srwatson		panic("kaudit_to_bsm returned %d", error);
414159264Srwatson	}
415159269Srwatson
416162599Srwatson	if (ar->k_ar_commit & AR_PRESELECT_TRAIL)
417162599Srwatson		audit_record_write(audit_vp, audit_cred, audit_td, bsm->data,
418162599Srwatson		    bsm->len);
419162380Scsjp
420159269Srwatson	if (ar->k_ar_commit & AR_PRESELECT_PIPE)
421159269Srwatson		audit_pipe_submit(auid, event, class, sorf,
422159269Srwatson		    ar->k_ar_commit & AR_PRESELECT_TRAIL, bsm->data,
423159269Srwatson		    bsm->len);
424162599Srwatson
425159269Srwatson	kau_free(bsm);
426159263Srwatson}
427159263Srwatson
428159263Srwatson/*
429156888Srwatson * The audit_worker thread is responsible for watching the event queue,
430156888Srwatson * dequeueing records, converting them to BSM format, and committing them to
431156888Srwatson * disk.  In order to minimize lock thrashing, records are dequeued in sets
432156888Srwatson * to a thread-local work queue.  In addition, the audit_work performs the
433156888Srwatson * actual exchange of audit log vnode pointer, as audit_vp is a thread-local
434156888Srwatson * variable.
435156888Srwatson */
436156888Srwatsonstatic void
437156888Srwatsonaudit_worker(void *arg)
438156888Srwatson{
439159262Srwatson	struct kaudit_queue ar_worklist;
440156888Srwatson	struct kaudit_record *ar;
441156888Srwatson	struct ucred *audit_cred;
442156888Srwatson	struct thread *audit_td;
443156888Srwatson	struct vnode *audit_vp;
444159263Srwatson	int lowater_signal;
445156888Srwatson
446156888Srwatson	AUDIT_PRINTF(("audit_worker starting\n"));
447156888Srwatson
448156888Srwatson	/*
449156888Srwatson	 * These are thread-local variables requiring no synchronization.
450156888Srwatson	 */
451156888Srwatson	TAILQ_INIT(&ar_worklist);
452156888Srwatson	audit_cred = NULL;
453156888Srwatson	audit_td = curthread;
454156888Srwatson	audit_vp = NULL;
455156888Srwatson
456156888Srwatson	mtx_lock(&audit_mtx);
457156888Srwatson	while (1) {
458156888Srwatson		mtx_assert(&audit_mtx, MA_OWNED);
459156888Srwatson
460156888Srwatson		/*
461156888Srwatson		 * Wait for record or rotation events.
462156888Srwatson		 */
463156888Srwatson		while (!audit_replacement_flag && TAILQ_EMPTY(&audit_q)) {
464156888Srwatson			AUDIT_PRINTF(("audit_worker waiting\n"));
465159261Srwatson			cv_wait(&audit_worker_cv, &audit_mtx);
466156888Srwatson			AUDIT_PRINTF(("audit_worker woken up\n"));
467156888Srwatson			AUDIT_PRINTF(("audit_worker: new vp = %p; value of "
468156888Srwatson			    "flag %d\n", audit_replacement_vp,
469156888Srwatson			    audit_replacement_flag));
470156888Srwatson		}
471156888Srwatson
472156888Srwatson		/*
473156888Srwatson		 * First priority: replace the audit log target if requested.
474156888Srwatson		 */
475156888Srwatson		audit_worker_rotate(&audit_cred, &audit_vp, audit_td);
476156888Srwatson
477156888Srwatson		/*
478159265Srwatson		 * If there are records in the global audit record queue,
479159265Srwatson		 * transfer them to a thread-local queue and process them
480159265Srwatson		 * one by one.  If we cross the low watermark threshold,
481159265Srwatson		 * signal any waiting processes that they may wake up and
482159265Srwatson		 * continue generating records.
483156888Srwatson		 */
484156888Srwatson		lowater_signal = 0;
485156888Srwatson		while ((ar = TAILQ_FIRST(&audit_q))) {
486156888Srwatson			TAILQ_REMOVE(&audit_q, ar, k_q);
487156888Srwatson			audit_q_len--;
488156888Srwatson			if (audit_q_len == audit_qctrl.aq_lowater)
489156888Srwatson				lowater_signal++;
490156888Srwatson			TAILQ_INSERT_TAIL(&ar_worklist, ar, k_q);
491156888Srwatson		}
492156888Srwatson		if (lowater_signal)
493159261Srwatson			cv_broadcast(&audit_watermark_cv);
494156888Srwatson
495156888Srwatson		mtx_unlock(&audit_mtx);
496156888Srwatson		while ((ar = TAILQ_FIRST(&ar_worklist))) {
497156888Srwatson			TAILQ_REMOVE(&ar_worklist, ar, k_q);
498159263Srwatson			audit_worker_process_record(audit_vp, audit_cred,
499159263Srwatson			    audit_td, ar);
500156888Srwatson			audit_free(ar);
501156888Srwatson		}
502156888Srwatson		mtx_lock(&audit_mtx);
503156888Srwatson	}
504156888Srwatson}
505156888Srwatson
506156888Srwatson/*
507156888Srwatson * audit_rotate_vnode() is called by a user or kernel thread to configure or
508156888Srwatson * de-configure auditing on a vnode.  The arguments are the replacement
509156888Srwatson * credential and vnode to substitute for the current credential and vnode,
510156888Srwatson * if any.  If either is set to NULL, both should be NULL, and this is used
511156888Srwatson * to indicate that audit is being disabled.  The real work is done in the
512156888Srwatson * audit_worker thread, but audit_rotate_vnode() waits synchronously for that
513156888Srwatson * to complete.
514156888Srwatson *
515156888Srwatson * The vnode should be referenced and opened by the caller.  The credential
516156888Srwatson * should be referenced.  audit_rotate_vnode() will own both references as of
517156888Srwatson * this call, so the caller should not release either.
518156888Srwatson *
519156888Srwatson * XXXAUDIT: Review synchronize communication logic.  Really, this is a
520165604Srwatson * message queue of depth 1.  We are essentially acquiring ownership of the
521165604Srwatson * communications queue, inserting our message, and waiting for an
522165604Srwatson * acknowledgement.
523156888Srwatson */
524156888Srwatsonvoid
525156888Srwatsonaudit_rotate_vnode(struct ucred *cred, struct vnode *vp)
526156888Srwatson{
527156888Srwatson
528156888Srwatson	/*
529156888Srwatson	 * If other parallel log replacements have been requested, we wait
530156888Srwatson	 * until they've finished before continuing.
531156888Srwatson	 */
532156888Srwatson	mtx_lock(&audit_mtx);
533156888Srwatson	while (audit_replacement_flag != 0) {
534156888Srwatson		AUDIT_PRINTF(("audit_rotate_vnode: sleeping to wait for "
535156888Srwatson		    "flag\n"));
536156888Srwatson		cv_wait(&audit_replacement_cv, &audit_mtx);
537156888Srwatson		AUDIT_PRINTF(("audit_rotate_vnode: woken up (flag %d)\n",
538156888Srwatson		    audit_replacement_flag));
539156888Srwatson	}
540156888Srwatson	audit_replacement_cred = cred;
541156888Srwatson	audit_replacement_flag = 1;
542156888Srwatson	audit_replacement_vp = vp;
543156888Srwatson
544156888Srwatson	/*
545156888Srwatson	 * Wake up the audit worker to perform the exchange once we
546156888Srwatson	 * release the mutex.
547156888Srwatson	 */
548159261Srwatson	cv_signal(&audit_worker_cv);
549156888Srwatson
550156888Srwatson	/*
551156888Srwatson	 * Wait for the audit_worker to broadcast that a replacement has
552156888Srwatson	 * taken place; we know that once this has happened, our vnode
553156888Srwatson	 * has been replaced in, so we can return successfully.
554156888Srwatson	 */
555156888Srwatson	AUDIT_PRINTF(("audit_rotate_vnode: waiting for news of "
556156888Srwatson	    "replacement\n"));
557156888Srwatson	cv_wait(&audit_replacement_cv, &audit_mtx);
558156888Srwatson	AUDIT_PRINTF(("audit_rotate_vnode: change acknowledged by "
559156888Srwatson	    "audit_worker (flag " "now %d)\n", audit_replacement_flag));
560156888Srwatson	mtx_unlock(&audit_mtx);
561156888Srwatson
562156888Srwatson	audit_file_rotate_wait = 0; /* We can now request another rotation */
563156888Srwatson}
564156888Srwatson
565156888Srwatsonvoid
566156888Srwatsonaudit_worker_init(void)
567156888Srwatson{
568156888Srwatson	int error;
569156888Srwatson
570156888Srwatson	cv_init(&audit_replacement_cv, "audit_replacement_cv");
571156888Srwatson	error = kthread_create(audit_worker, NULL, &audit_thread, RFHIGHPID,
572156888Srwatson	    0, "audit_worker");
573156888Srwatson	if (error)
574156888Srwatson		panic("audit_worker_init: kthread_create returned %d", error);
575156888Srwatson}
576