g_journal.c revision 328948
1/*-
2 * Copyright (c) 2005-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/sys/geom/journal/g_journal.c 328948 2018-02-06 19:17:40Z mckusick $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/module.h>
34#include <sys/limits.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/malloc.h>
40#include <sys/mount.h>
41#include <sys/eventhandler.h>
42#include <sys/proc.h>
43#include <sys/kthread.h>
44#include <sys/sched.h>
45#include <sys/taskqueue.h>
46#include <sys/vnode.h>
47#include <sys/sbuf.h>
48#ifdef GJ_MEMDEBUG
49#include <sys/stack.h>
50#include <sys/kdb.h>
51#endif
52#include <vm/vm.h>
53#include <vm/vm_kern.h>
54#include <geom/geom.h>
55
56#include <geom/journal/g_journal.h>
57
58FEATURE(geom_journal, "GEOM journaling support");
59
60/*
61 * On-disk journal format:
62 *
63 * JH - Journal header
64 * RH - Record header
65 *
66 * %%%%%% ****** +------+ +------+     ****** +------+     %%%%%%
67 * % JH % * RH * | Data | | Data | ... * RH * | Data | ... % JH % ...
68 * %%%%%% ****** +------+ +------+     ****** +------+     %%%%%%
69 *
70 */
71
72CTASSERT(sizeof(struct g_journal_header) <= 512);
73CTASSERT(sizeof(struct g_journal_record_header) <= 512);
74
75static MALLOC_DEFINE(M_JOURNAL, "journal_data", "GEOM_JOURNAL Data");
76static struct mtx g_journal_cache_mtx;
77MTX_SYSINIT(g_journal_cache, &g_journal_cache_mtx, "cache usage", MTX_DEF);
78
79const struct g_journal_desc *g_journal_filesystems[] = {
80	&g_journal_ufs,
81	NULL
82};
83
84SYSCTL_DECL(_kern_geom);
85
86int g_journal_debug = 0;
87TUNABLE_INT("kern.geom.journal.debug", &g_journal_debug);
88static u_int g_journal_switch_time = 10;
89static u_int g_journal_force_switch = 70;
90static u_int g_journal_parallel_flushes = 16;
91static u_int g_journal_parallel_copies = 16;
92static u_int g_journal_accept_immediately = 64;
93static u_int g_journal_record_entries = GJ_RECORD_HEADER_NENTRIES;
94static u_int g_journal_do_optimize = 1;
95
96static SYSCTL_NODE(_kern_geom, OID_AUTO, journal, CTLFLAG_RW, 0,
97    "GEOM_JOURNAL stuff");
98SYSCTL_INT(_kern_geom_journal, OID_AUTO, debug, CTLFLAG_RW, &g_journal_debug, 0,
99    "Debug level");
100SYSCTL_UINT(_kern_geom_journal, OID_AUTO, switch_time, CTLFLAG_RW,
101    &g_journal_switch_time, 0, "Switch journals every N seconds");
102SYSCTL_UINT(_kern_geom_journal, OID_AUTO, force_switch, CTLFLAG_RW,
103    &g_journal_force_switch, 0, "Force switch when journal is N% full");
104SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_flushes, CTLFLAG_RW,
105    &g_journal_parallel_flushes, 0,
106    "Number of flush I/O requests to send in parallel");
107SYSCTL_UINT(_kern_geom_journal, OID_AUTO, accept_immediately, CTLFLAG_RW,
108    &g_journal_accept_immediately, 0,
109    "Number of I/O requests accepted immediately");
110SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_copies, CTLFLAG_RW,
111    &g_journal_parallel_copies, 0,
112    "Number of copy I/O requests to send in parallel");
113static int
114g_journal_record_entries_sysctl(SYSCTL_HANDLER_ARGS)
115{
116	u_int entries;
117	int error;
118
119	entries = g_journal_record_entries;
120	error = sysctl_handle_int(oidp, &entries, 0, req);
121	if (error != 0 || req->newptr == NULL)
122		return (error);
123	if (entries < 1 || entries > GJ_RECORD_HEADER_NENTRIES)
124		return (EINVAL);
125	g_journal_record_entries = entries;
126	return (0);
127}
128SYSCTL_PROC(_kern_geom_journal, OID_AUTO, record_entries,
129    CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_record_entries_sysctl, "I",
130    "Maximum number of entires in one journal record");
131SYSCTL_UINT(_kern_geom_journal, OID_AUTO, optimize, CTLFLAG_RW,
132    &g_journal_do_optimize, 0, "Try to combine bios on flush and copy");
133
134static u_long g_journal_cache_used = 0;
135static u_long g_journal_cache_limit = 64 * 1024 * 1024;
136TUNABLE_LONG("kern.geom.journal.cache.limit", &g_journal_cache_limit);
137static u_int g_journal_cache_divisor = 2;
138TUNABLE_INT("kern.geom.journal.cache.divisor", &g_journal_cache_divisor);
139static u_int g_journal_cache_switch = 90;
140static u_int g_journal_cache_misses = 0;
141static u_int g_journal_cache_alloc_failures = 0;
142static u_long g_journal_cache_low = 0;
143
144static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, cache, CTLFLAG_RW, 0,
145    "GEOM_JOURNAL cache");
146SYSCTL_ULONG(_kern_geom_journal_cache, OID_AUTO, used, CTLFLAG_RD,
147    &g_journal_cache_used, 0, "Number of allocated bytes");
148static int
149g_journal_cache_limit_sysctl(SYSCTL_HANDLER_ARGS)
150{
151	u_long limit;
152	int error;
153
154	limit = g_journal_cache_limit;
155	error = sysctl_handle_long(oidp, &limit, 0, req);
156	if (error != 0 || req->newptr == NULL)
157		return (error);
158	g_journal_cache_limit = limit;
159	g_journal_cache_low = (limit / 100) * g_journal_cache_switch;
160	return (0);
161}
162SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, limit,
163    CTLTYPE_ULONG | CTLFLAG_RW, NULL, 0, g_journal_cache_limit_sysctl, "I",
164    "Maximum number of allocated bytes");
165SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, divisor, CTLFLAG_RDTUN,
166    &g_journal_cache_divisor, 0,
167    "(kmem_size / kern.geom.journal.cache.divisor) == cache size");
168static int
169g_journal_cache_switch_sysctl(SYSCTL_HANDLER_ARGS)
170{
171	u_int cswitch;
172	int error;
173
174	cswitch = g_journal_cache_switch;
175	error = sysctl_handle_int(oidp, &cswitch, 0, req);
176	if (error != 0 || req->newptr == NULL)
177		return (error);
178	if (cswitch > 100)
179		return (EINVAL);
180	g_journal_cache_switch = cswitch;
181	g_journal_cache_low = (g_journal_cache_limit / 100) * cswitch;
182	return (0);
183}
184SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, switch,
185    CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_cache_switch_sysctl, "I",
186    "Force switch when we hit this percent of cache use");
187SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, misses, CTLFLAG_RW,
188    &g_journal_cache_misses, 0, "Number of cache misses");
189SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, alloc_failures, CTLFLAG_RW,
190    &g_journal_cache_alloc_failures, 0, "Memory allocation failures");
191
192static u_long g_journal_stats_bytes_skipped = 0;
193static u_long g_journal_stats_combined_ios = 0;
194static u_long g_journal_stats_switches = 0;
195static u_long g_journal_stats_wait_for_copy = 0;
196static u_long g_journal_stats_journal_full = 0;
197static u_long g_journal_stats_low_mem = 0;
198
199static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, stats, CTLFLAG_RW, 0,
200    "GEOM_JOURNAL statistics");
201SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, skipped_bytes, CTLFLAG_RW,
202    &g_journal_stats_bytes_skipped, 0, "Number of skipped bytes");
203SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, combined_ios, CTLFLAG_RW,
204    &g_journal_stats_combined_ios, 0, "Number of combined I/O requests");
205SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, switches, CTLFLAG_RW,
206    &g_journal_stats_switches, 0, "Number of journal switches");
207SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, wait_for_copy, CTLFLAG_RW,
208    &g_journal_stats_wait_for_copy, 0, "Wait for journal copy on switch");
209SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, journal_full, CTLFLAG_RW,
210    &g_journal_stats_journal_full, 0,
211    "Number of times journal was almost full.");
212SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, low_mem, CTLFLAG_RW,
213    &g_journal_stats_low_mem, 0, "Number of times low_mem hook was called.");
214
215static g_taste_t g_journal_taste;
216static g_ctl_req_t g_journal_config;
217static g_dumpconf_t g_journal_dumpconf;
218static g_init_t g_journal_init;
219static g_fini_t g_journal_fini;
220
221struct g_class g_journal_class = {
222	.name = G_JOURNAL_CLASS_NAME,
223	.version = G_VERSION,
224	.taste = g_journal_taste,
225	.ctlreq = g_journal_config,
226	.dumpconf = g_journal_dumpconf,
227	.init = g_journal_init,
228	.fini = g_journal_fini
229};
230
231static int g_journal_destroy(struct g_journal_softc *sc);
232static void g_journal_metadata_update(struct g_journal_softc *sc);
233static void g_journal_start_switcher(struct g_class *mp);
234static void g_journal_stop_switcher(void);
235static void g_journal_switch_wait(struct g_journal_softc *sc);
236
237#define	GJ_SWITCHER_WORKING	0
238#define	GJ_SWITCHER_DIE		1
239#define	GJ_SWITCHER_DIED	2
240static struct proc *g_journal_switcher_proc = NULL;
241static int g_journal_switcher_state = GJ_SWITCHER_WORKING;
242static int g_journal_switcher_wokenup = 0;
243static int g_journal_sync_requested = 0;
244
245#ifdef GJ_MEMDEBUG
246struct meminfo {
247	size_t		mi_size;
248	struct stack	mi_stack;
249};
250#endif
251
252/*
253 * We use our own malloc/realloc/free funtions, so we can collect statistics
254 * and force journal switch when we're running out of cache.
255 */
256static void *
257gj_malloc(size_t size, int flags)
258{
259	void *p;
260#ifdef GJ_MEMDEBUG
261	struct meminfo *mi;
262#endif
263
264	mtx_lock(&g_journal_cache_mtx);
265	if (g_journal_cache_limit > 0 && !g_journal_switcher_wokenup &&
266	    g_journal_cache_used + size > g_journal_cache_low) {
267		GJ_DEBUG(1, "No cache, waking up the switcher.");
268		g_journal_switcher_wokenup = 1;
269		wakeup(&g_journal_switcher_state);
270	}
271	if ((flags & M_NOWAIT) && g_journal_cache_limit > 0 &&
272	    g_journal_cache_used + size > g_journal_cache_limit) {
273		mtx_unlock(&g_journal_cache_mtx);
274		g_journal_cache_alloc_failures++;
275		return (NULL);
276	}
277	g_journal_cache_used += size;
278	mtx_unlock(&g_journal_cache_mtx);
279	flags &= ~M_NOWAIT;
280#ifndef GJ_MEMDEBUG
281	p = malloc(size, M_JOURNAL, flags | M_WAITOK);
282#else
283	mi = malloc(sizeof(*mi) + size, M_JOURNAL, flags | M_WAITOK);
284	p = (u_char *)mi + sizeof(*mi);
285	mi->mi_size = size;
286	stack_save(&mi->mi_stack);
287#endif
288	return (p);
289}
290
291static void
292gj_free(void *p, size_t size)
293{
294#ifdef GJ_MEMDEBUG
295	struct meminfo *mi;
296#endif
297
298	KASSERT(p != NULL, ("p=NULL"));
299	KASSERT(size > 0, ("size=0"));
300	mtx_lock(&g_journal_cache_mtx);
301	KASSERT(g_journal_cache_used >= size, ("Freeing too much?"));
302	g_journal_cache_used -= size;
303	mtx_unlock(&g_journal_cache_mtx);
304#ifdef GJ_MEMDEBUG
305	mi = p = (void *)((u_char *)p - sizeof(*mi));
306	if (mi->mi_size != size) {
307		printf("GJOURNAL: Size mismatch! %zu != %zu\n", size,
308		    mi->mi_size);
309		printf("GJOURNAL: Alloc backtrace:\n");
310		stack_print(&mi->mi_stack);
311		printf("GJOURNAL: Free backtrace:\n");
312		kdb_backtrace();
313	}
314#endif
315	free(p, M_JOURNAL);
316}
317
318static void *
319gj_realloc(void *p, size_t size, size_t oldsize)
320{
321	void *np;
322
323#ifndef GJ_MEMDEBUG
324	mtx_lock(&g_journal_cache_mtx);
325	g_journal_cache_used -= oldsize;
326	g_journal_cache_used += size;
327	mtx_unlock(&g_journal_cache_mtx);
328	np = realloc(p, size, M_JOURNAL, M_WAITOK);
329#else
330	np = gj_malloc(size, M_WAITOK);
331	bcopy(p, np, MIN(oldsize, size));
332	gj_free(p, oldsize);
333#endif
334	return (np);
335}
336
337static void
338g_journal_check_overflow(struct g_journal_softc *sc)
339{
340	off_t length, used;
341
342	if ((sc->sc_active.jj_offset < sc->sc_inactive.jj_offset &&
343	     sc->sc_journal_offset >= sc->sc_inactive.jj_offset) ||
344	    (sc->sc_active.jj_offset > sc->sc_inactive.jj_offset &&
345	     sc->sc_journal_offset >= sc->sc_inactive.jj_offset &&
346	     sc->sc_journal_offset < sc->sc_active.jj_offset)) {
347		panic("Journal overflow "
348		    "(id = %u joffset=%jd active=%jd inactive=%jd)",
349		    (unsigned)sc->sc_id,
350		    (intmax_t)sc->sc_journal_offset,
351		    (intmax_t)sc->sc_active.jj_offset,
352		    (intmax_t)sc->sc_inactive.jj_offset);
353	}
354	if (sc->sc_active.jj_offset < sc->sc_inactive.jj_offset) {
355		length = sc->sc_inactive.jj_offset - sc->sc_active.jj_offset;
356		used = sc->sc_journal_offset - sc->sc_active.jj_offset;
357	} else {
358		length = sc->sc_jend - sc->sc_active.jj_offset;
359		length += sc->sc_inactive.jj_offset - sc->sc_jstart;
360		if (sc->sc_journal_offset >= sc->sc_active.jj_offset)
361			used = sc->sc_journal_offset - sc->sc_active.jj_offset;
362		else {
363			used = sc->sc_jend - sc->sc_active.jj_offset;
364			used += sc->sc_journal_offset - sc->sc_jstart;
365		}
366	}
367	/* Already woken up? */
368	if (g_journal_switcher_wokenup)
369		return;
370	/*
371	 * If the active journal takes more than g_journal_force_switch precent
372	 * of free journal space, we force journal switch.
373	 */
374	KASSERT(length > 0,
375	    ("length=%jd used=%jd active=%jd inactive=%jd joffset=%jd",
376	    (intmax_t)length, (intmax_t)used,
377	    (intmax_t)sc->sc_active.jj_offset,
378	    (intmax_t)sc->sc_inactive.jj_offset,
379	    (intmax_t)sc->sc_journal_offset));
380	if ((used * 100) / length > g_journal_force_switch) {
381		g_journal_stats_journal_full++;
382		GJ_DEBUG(1, "Journal %s %jd%% full, forcing journal switch.",
383		    sc->sc_name, (used * 100) / length);
384		mtx_lock(&g_journal_cache_mtx);
385		g_journal_switcher_wokenup = 1;
386		wakeup(&g_journal_switcher_state);
387		mtx_unlock(&g_journal_cache_mtx);
388	}
389}
390
391static void
392g_journal_orphan(struct g_consumer *cp)
393{
394	struct g_journal_softc *sc;
395	char name[256];
396	int error;
397
398	g_topology_assert();
399	sc = cp->geom->softc;
400	strlcpy(name, cp->provider->name, sizeof(name));
401	GJ_DEBUG(0, "Lost provider %s.", name);
402	if (sc == NULL)
403		return;
404	error = g_journal_destroy(sc);
405	if (error == 0)
406		GJ_DEBUG(0, "Journal %s destroyed.", name);
407	else {
408		GJ_DEBUG(0, "Cannot destroy journal %s (error=%d). "
409		    "Destroy it manually after last close.", sc->sc_name,
410		    error);
411	}
412}
413
414static int
415g_journal_access(struct g_provider *pp, int acr, int acw, int ace)
416{
417	struct g_journal_softc *sc;
418	int dcr, dcw, dce;
419
420	g_topology_assert();
421	GJ_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name,
422	    acr, acw, ace);
423
424	dcr = pp->acr + acr;
425	dcw = pp->acw + acw;
426	dce = pp->ace + ace;
427
428	sc = pp->geom->softc;
429	if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY)) {
430		if (acr <= 0 && acw <= 0 && ace <= 0)
431			return (0);
432		else
433			return (ENXIO);
434	}
435	if (pp->acw == 0 && dcw > 0) {
436		GJ_DEBUG(1, "Marking %s as dirty.", sc->sc_name);
437		sc->sc_flags &= ~GJF_DEVICE_CLEAN;
438		g_topology_unlock();
439		g_journal_metadata_update(sc);
440		g_topology_lock();
441	} /* else if (pp->acw == 0 && dcw > 0 && JEMPTY(sc)) {
442		GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
443		sc->sc_flags |= GJF_DEVICE_CLEAN;
444		g_topology_unlock();
445		g_journal_metadata_update(sc);
446		g_topology_lock();
447	} */
448	return (0);
449}
450
451static void
452g_journal_header_encode(struct g_journal_header *hdr, u_char *data)
453{
454
455	bcopy(GJ_HEADER_MAGIC, data, sizeof(GJ_HEADER_MAGIC));
456	data += sizeof(GJ_HEADER_MAGIC);
457	le32enc(data, hdr->jh_journal_id);
458	data += 4;
459	le32enc(data, hdr->jh_journal_next_id);
460}
461
462static int
463g_journal_header_decode(const u_char *data, struct g_journal_header *hdr)
464{
465
466	bcopy(data, hdr->jh_magic, sizeof(hdr->jh_magic));
467	data += sizeof(hdr->jh_magic);
468	if (bcmp(hdr->jh_magic, GJ_HEADER_MAGIC, sizeof(GJ_HEADER_MAGIC)) != 0)
469		return (EINVAL);
470	hdr->jh_journal_id = le32dec(data);
471	data += 4;
472	hdr->jh_journal_next_id = le32dec(data);
473	return (0);
474}
475
476static void
477g_journal_flush_cache(struct g_journal_softc *sc)
478{
479	struct bintime bt;
480	int error;
481
482	if (sc->sc_bio_flush == 0)
483		return;
484	GJ_TIMER_START(1, &bt);
485	if (sc->sc_bio_flush & GJ_FLUSH_JOURNAL) {
486		error = g_io_flush(sc->sc_jconsumer);
487		GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
488		    sc->sc_jconsumer->provider->name, error);
489	}
490	if (sc->sc_bio_flush & GJ_FLUSH_DATA) {
491		/*
492		 * TODO: This could be called in parallel with the
493		 *       previous call.
494		 */
495		error = g_io_flush(sc->sc_dconsumer);
496		GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
497		    sc->sc_dconsumer->provider->name, error);
498	}
499	GJ_TIMER_STOP(1, &bt, "Cache flush time");
500}
501
502static int
503g_journal_write_header(struct g_journal_softc *sc)
504{
505	struct g_journal_header hdr;
506	struct g_consumer *cp;
507	u_char *buf;
508	int error;
509
510	cp = sc->sc_jconsumer;
511	buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
512
513	strlcpy(hdr.jh_magic, GJ_HEADER_MAGIC, sizeof(hdr.jh_magic));
514	hdr.jh_journal_id = sc->sc_journal_id;
515	hdr.jh_journal_next_id = sc->sc_journal_next_id;
516	g_journal_header_encode(&hdr, buf);
517	error = g_write_data(cp, sc->sc_journal_offset, buf,
518	    cp->provider->sectorsize);
519	/* if (error == 0) */
520	sc->sc_journal_offset += cp->provider->sectorsize;
521
522	gj_free(buf, cp->provider->sectorsize);
523	return (error);
524}
525
526/*
527 * Every journal record has a header and data following it.
528 * Functions below are used to decode the header before storing it to
529 * little endian and to encode it after reading to system endianess.
530 */
531static void
532g_journal_record_header_encode(struct g_journal_record_header *hdr,
533    u_char *data)
534{
535	struct g_journal_entry *ent;
536	u_int i;
537
538	bcopy(GJ_RECORD_HEADER_MAGIC, data, sizeof(GJ_RECORD_HEADER_MAGIC));
539	data += sizeof(GJ_RECORD_HEADER_MAGIC);
540	le32enc(data, hdr->jrh_journal_id);
541	data += 8;
542	le16enc(data, hdr->jrh_nentries);
543	data += 2;
544	bcopy(hdr->jrh_sum, data, sizeof(hdr->jrh_sum));
545	data += 8;
546	for (i = 0; i < hdr->jrh_nentries; i++) {
547		ent = &hdr->jrh_entries[i];
548		le64enc(data, ent->je_joffset);
549		data += 8;
550		le64enc(data, ent->je_offset);
551		data += 8;
552		le64enc(data, ent->je_length);
553		data += 8;
554	}
555}
556
557static int
558g_journal_record_header_decode(const u_char *data,
559    struct g_journal_record_header *hdr)
560{
561	struct g_journal_entry *ent;
562	u_int i;
563
564	bcopy(data, hdr->jrh_magic, sizeof(hdr->jrh_magic));
565	data += sizeof(hdr->jrh_magic);
566	if (strcmp(hdr->jrh_magic, GJ_RECORD_HEADER_MAGIC) != 0)
567		return (EINVAL);
568	hdr->jrh_journal_id = le32dec(data);
569	data += 8;
570	hdr->jrh_nentries = le16dec(data);
571	data += 2;
572	if (hdr->jrh_nentries > GJ_RECORD_HEADER_NENTRIES)
573		return (EINVAL);
574	bcopy(data, hdr->jrh_sum, sizeof(hdr->jrh_sum));
575	data += 8;
576	for (i = 0; i < hdr->jrh_nentries; i++) {
577		ent = &hdr->jrh_entries[i];
578		ent->je_joffset = le64dec(data);
579		data += 8;
580		ent->je_offset = le64dec(data);
581		data += 8;
582		ent->je_length = le64dec(data);
583		data += 8;
584	}
585	return (0);
586}
587
588/*
589 * Function reads metadata from a provider (via the given consumer), decodes
590 * it to system endianess and verifies its correctness.
591 */
592static int
593g_journal_metadata_read(struct g_consumer *cp, struct g_journal_metadata *md)
594{
595	struct g_provider *pp;
596	u_char *buf;
597	int error;
598
599	g_topology_assert();
600
601	error = g_access(cp, 1, 0, 0);
602	if (error != 0)
603		return (error);
604	pp = cp->provider;
605	g_topology_unlock();
606	/* Metadata is stored in last sector. */
607	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
608	    &error);
609	g_topology_lock();
610	g_access(cp, -1, 0, 0);
611	if (buf == NULL) {
612		GJ_DEBUG(1, "Cannot read metadata from %s (error=%d).",
613		    cp->provider->name, error);
614		return (error);
615	}
616
617	/* Decode metadata. */
618	error = journal_metadata_decode(buf, md);
619	g_free(buf);
620	/* Is this is gjournal provider at all? */
621	if (strcmp(md->md_magic, G_JOURNAL_MAGIC) != 0)
622		return (EINVAL);
623	/*
624	 * Are we able to handle this version of metadata?
625	 * We only maintain backward compatibility.
626	 */
627	if (md->md_version > G_JOURNAL_VERSION) {
628		GJ_DEBUG(0,
629		    "Kernel module is too old to handle metadata from %s.",
630		    cp->provider->name);
631		return (EINVAL);
632	}
633	/* Is checksum correct? */
634	if (error != 0) {
635		GJ_DEBUG(0, "MD5 metadata hash mismatch for provider %s.",
636		    cp->provider->name);
637		return (error);
638	}
639	return (0);
640}
641
642/*
643 * Two functions below are responsible for updating metadata.
644 * Only metadata on the data provider is updated (we need to update
645 * information about active journal in there).
646 */
647static void
648g_journal_metadata_done(struct bio *bp)
649{
650
651	/*
652	 * There is not much we can do on error except informing about it.
653	 */
654	if (bp->bio_error != 0) {
655		GJ_LOGREQ(0, bp, "Cannot update metadata (error=%d).",
656		    bp->bio_error);
657	} else {
658		GJ_LOGREQ(2, bp, "Metadata updated.");
659	}
660	gj_free(bp->bio_data, bp->bio_length);
661	g_destroy_bio(bp);
662}
663
664static void
665g_journal_metadata_update(struct g_journal_softc *sc)
666{
667	struct g_journal_metadata md;
668	struct g_consumer *cp;
669	struct bio *bp;
670	u_char *sector;
671
672	cp = sc->sc_dconsumer;
673	sector = gj_malloc(cp->provider->sectorsize, M_WAITOK);
674	strlcpy(md.md_magic, G_JOURNAL_MAGIC, sizeof(md.md_magic));
675	md.md_version = G_JOURNAL_VERSION;
676	md.md_id = sc->sc_id;
677	md.md_type = sc->sc_orig_type;
678	md.md_jstart = sc->sc_jstart;
679	md.md_jend = sc->sc_jend;
680	md.md_joffset = sc->sc_inactive.jj_offset;
681	md.md_jid = sc->sc_journal_previous_id;
682	md.md_flags = 0;
683	if (sc->sc_flags & GJF_DEVICE_CLEAN)
684		md.md_flags |= GJ_FLAG_CLEAN;
685
686	if (sc->sc_flags & GJF_DEVICE_HARDCODED)
687		strlcpy(md.md_provider, sc->sc_name, sizeof(md.md_provider));
688	else
689		bzero(md.md_provider, sizeof(md.md_provider));
690	md.md_provsize = cp->provider->mediasize;
691	journal_metadata_encode(&md, sector);
692
693	/*
694	 * Flush the cache, so we know all data are on disk.
695	 * We write here informations like "journal is consistent", so we need
696	 * to be sure it is. Without BIO_FLUSH here, we can end up in situation
697	 * where metadata is stored on disk, but not all data.
698	 */
699	g_journal_flush_cache(sc);
700
701	bp = g_alloc_bio();
702	bp->bio_offset = cp->provider->mediasize - cp->provider->sectorsize;
703	bp->bio_length = cp->provider->sectorsize;
704	bp->bio_data = sector;
705	bp->bio_cmd = BIO_WRITE;
706	if (!(sc->sc_flags & GJF_DEVICE_DESTROY)) {
707		bp->bio_done = g_journal_metadata_done;
708		g_io_request(bp, cp);
709	} else {
710		bp->bio_done = NULL;
711		g_io_request(bp, cp);
712		biowait(bp, "gjmdu");
713		g_journal_metadata_done(bp);
714	}
715
716	/*
717	 * Be sure metadata reached the disk.
718	 */
719	g_journal_flush_cache(sc);
720}
721
722/*
723 * This is where the I/O request comes from the GEOM.
724 */
725static void
726g_journal_start(struct bio *bp)
727{
728	struct g_journal_softc *sc;
729
730	sc = bp->bio_to->geom->softc;
731	GJ_LOGREQ(3, bp, "Request received.");
732
733	switch (bp->bio_cmd) {
734	case BIO_READ:
735	case BIO_WRITE:
736		mtx_lock(&sc->sc_mtx);
737		bioq_insert_tail(&sc->sc_regular_queue, bp);
738		wakeup(sc);
739		mtx_unlock(&sc->sc_mtx);
740		return;
741	case BIO_GETATTR:
742		if (strcmp(bp->bio_attribute, "GJOURNAL::provider") == 0) {
743			strlcpy(bp->bio_data, bp->bio_to->name, bp->bio_length);
744			bp->bio_completed = strlen(bp->bio_to->name) + 1;
745			g_io_deliver(bp, 0);
746			return;
747		}
748		/* FALLTHROUGH */
749	case BIO_DELETE:
750	default:
751		g_io_deliver(bp, EOPNOTSUPP);
752		return;
753	}
754}
755
756static void
757g_journal_std_done(struct bio *bp)
758{
759	struct g_journal_softc *sc;
760
761	sc = bp->bio_from->geom->softc;
762	mtx_lock(&sc->sc_mtx);
763	bioq_insert_tail(&sc->sc_back_queue, bp);
764	wakeup(sc);
765	mtx_unlock(&sc->sc_mtx);
766}
767
768static struct bio *
769g_journal_new_bio(off_t start, off_t end, off_t joffset, u_char *data,
770    int flags)
771{
772	struct bio *bp;
773
774	bp = g_alloc_bio();
775	bp->bio_offset = start;
776	bp->bio_joffset = joffset;
777	bp->bio_length = end - start;
778	bp->bio_cmd = BIO_WRITE;
779	bp->bio_done = g_journal_std_done;
780	if (data == NULL)
781		bp->bio_data = NULL;
782	else {
783		bp->bio_data = gj_malloc(bp->bio_length, flags);
784		if (bp->bio_data != NULL)
785			bcopy(data, bp->bio_data, bp->bio_length);
786	}
787	return (bp);
788}
789
790#define	g_journal_insert_bio(head, bp, flags)				\
791	g_journal_insert((head), (bp)->bio_offset,			\
792		(bp)->bio_offset + (bp)->bio_length, (bp)->bio_joffset,	\
793		(bp)->bio_data, flags)
794/*
795 * The function below does a lot more than just inserting bio to the queue.
796 * It keeps the queue sorted by offset and ensures that there are no doubled
797 * data (it combines bios where ranges overlap).
798 *
799 * The function returns the number of bios inserted (as bio can be splitted).
800 */
801static int
802g_journal_insert(struct bio **head, off_t nstart, off_t nend, off_t joffset,
803    u_char *data, int flags)
804{
805	struct bio *nbp, *cbp, *pbp;
806	off_t cstart, cend;
807	u_char *tmpdata;
808	int n;
809
810	GJ_DEBUG(3, "INSERT(%p): (%jd, %jd, %jd)", *head, nstart, nend,
811	    joffset);
812	n = 0;
813	pbp = NULL;
814	GJQ_FOREACH(*head, cbp) {
815		cstart = cbp->bio_offset;
816		cend = cbp->bio_offset + cbp->bio_length;
817
818		if (nstart >= cend) {
819			/*
820			 *  +-------------+
821			 *  |             |
822			 *  |   current   |  +-------------+
823			 *  |     bio     |  |             |
824			 *  |             |  |     new     |
825			 *  +-------------+  |     bio     |
826			 *                   |             |
827			 *                   +-------------+
828			 */
829			GJ_DEBUG(3, "INSERT(%p): 1", *head);
830		} else if (nend <= cstart) {
831			/*
832			 *                   +-------------+
833			 *                   |             |
834			 *  +-------------+  |   current   |
835			 *  |             |  |     bio     |
836			 *  |     new     |  |             |
837			 *  |     bio     |  +-------------+
838			 *  |             |
839			 *  +-------------+
840			 */
841			nbp = g_journal_new_bio(nstart, nend, joffset, data,
842			    flags);
843			if (pbp == NULL)
844				*head = nbp;
845			else
846				pbp->bio_next = nbp;
847			nbp->bio_next = cbp;
848			n++;
849			GJ_DEBUG(3, "INSERT(%p): 2 (nbp=%p pbp=%p)", *head, nbp,
850			    pbp);
851			goto end;
852		} else if (nstart <= cstart && nend >= cend) {
853			/*
854			 *      +-------------+      +-------------+
855			 *      | current bio |      | current bio |
856			 *  +---+-------------+---+  +-------------+---+
857			 *  |   |             |   |  |             |   |
858			 *  |   |             |   |  |             |   |
859			 *  |   +-------------+   |  +-------------+   |
860			 *  |       new bio       |  |     new bio     |
861			 *  +---------------------+  +-----------------+
862			 *
863			 *      +-------------+  +-------------+
864			 *      | current bio |  | current bio |
865			 *  +---+-------------+  +-------------+
866			 *  |   |             |  |             |
867			 *  |   |             |  |             |
868			 *  |   +-------------+  +-------------+
869			 *  |     new bio     |  |   new bio   |
870			 *  +-----------------+  +-------------+
871			 */
872			g_journal_stats_bytes_skipped += cbp->bio_length;
873			cbp->bio_offset = nstart;
874			cbp->bio_joffset = joffset;
875			cbp->bio_length = cend - nstart;
876			if (cbp->bio_data != NULL) {
877				gj_free(cbp->bio_data, cend - cstart);
878				cbp->bio_data = NULL;
879			}
880			if (data != NULL) {
881				cbp->bio_data = gj_malloc(cbp->bio_length,
882				    flags);
883				if (cbp->bio_data != NULL) {
884					bcopy(data, cbp->bio_data,
885					    cbp->bio_length);
886				}
887				data += cend - nstart;
888			}
889			joffset += cend - nstart;
890			nstart = cend;
891			GJ_DEBUG(3, "INSERT(%p): 3 (cbp=%p)", *head, cbp);
892		} else if (nstart > cstart && nend >= cend) {
893			/*
894			 *  +-----------------+  +-------------+
895			 *  |   current bio   |  | current bio |
896			 *  |   +-------------+  |   +---------+---+
897			 *  |   |             |  |   |         |   |
898			 *  |   |             |  |   |         |   |
899			 *  +---+-------------+  +---+---------+   |
900			 *      |   new bio   |      |   new bio   |
901			 *      +-------------+      +-------------+
902			 */
903			g_journal_stats_bytes_skipped += cend - nstart;
904			nbp = g_journal_new_bio(nstart, cend, joffset, data,
905			    flags);
906			nbp->bio_next = cbp->bio_next;
907			cbp->bio_next = nbp;
908			cbp->bio_length = nstart - cstart;
909			if (cbp->bio_data != NULL) {
910				cbp->bio_data = gj_realloc(cbp->bio_data,
911				    cbp->bio_length, cend - cstart);
912			}
913			if (data != NULL)
914				data += cend - nstart;
915			joffset += cend - nstart;
916			nstart = cend;
917			n++;
918			GJ_DEBUG(3, "INSERT(%p): 4 (cbp=%p)", *head, cbp);
919		} else if (nstart > cstart && nend < cend) {
920			/*
921			 *  +---------------------+
922			 *  |     current bio     |
923			 *  |   +-------------+   |
924			 *  |   |             |   |
925			 *  |   |             |   |
926			 *  +---+-------------+---+
927			 *      |   new bio   |
928			 *      +-------------+
929			 */
930			g_journal_stats_bytes_skipped += nend - nstart;
931			nbp = g_journal_new_bio(nstart, nend, joffset, data,
932			    flags);
933			nbp->bio_next = cbp->bio_next;
934			cbp->bio_next = nbp;
935			if (cbp->bio_data == NULL)
936				tmpdata = NULL;
937			else
938				tmpdata = cbp->bio_data + nend - cstart;
939			nbp = g_journal_new_bio(nend, cend,
940			    cbp->bio_joffset + nend - cstart, tmpdata, flags);
941			nbp->bio_next = ((struct bio *)cbp->bio_next)->bio_next;
942			((struct bio *)cbp->bio_next)->bio_next = nbp;
943			cbp->bio_length = nstart - cstart;
944			if (cbp->bio_data != NULL) {
945				cbp->bio_data = gj_realloc(cbp->bio_data,
946				    cbp->bio_length, cend - cstart);
947			}
948			n += 2;
949			GJ_DEBUG(3, "INSERT(%p): 5 (cbp=%p)", *head, cbp);
950			goto end;
951		} else if (nstart <= cstart && nend < cend) {
952			/*
953			 *  +-----------------+      +-------------+
954			 *  |   current bio   |      | current bio |
955			 *  +-------------+   |  +---+---------+   |
956			 *  |             |   |  |   |         |   |
957			 *  |             |   |  |   |         |   |
958			 *  +-------------+---+  |   +---------+---+
959			 *  |   new bio   |      |   new bio   |
960			 *  +-------------+      +-------------+
961			 */
962			g_journal_stats_bytes_skipped += nend - nstart;
963			nbp = g_journal_new_bio(nstart, nend, joffset, data,
964			    flags);
965			if (pbp == NULL)
966				*head = nbp;
967			else
968				pbp->bio_next = nbp;
969			nbp->bio_next = cbp;
970			cbp->bio_offset = nend;
971			cbp->bio_length = cend - nend;
972			cbp->bio_joffset += nend - cstart;
973			tmpdata = cbp->bio_data;
974			if (tmpdata != NULL) {
975				cbp->bio_data = gj_malloc(cbp->bio_length,
976				    flags);
977				if (cbp->bio_data != NULL) {
978					bcopy(tmpdata + nend - cstart,
979					    cbp->bio_data, cbp->bio_length);
980				}
981				gj_free(tmpdata, cend - cstart);
982			}
983			n++;
984			GJ_DEBUG(3, "INSERT(%p): 6 (cbp=%p)", *head, cbp);
985			goto end;
986		}
987		if (nstart == nend)
988			goto end;
989		pbp = cbp;
990	}
991	nbp = g_journal_new_bio(nstart, nend, joffset, data, flags);
992	if (pbp == NULL)
993		*head = nbp;
994	else
995		pbp->bio_next = nbp;
996	nbp->bio_next = NULL;
997	n++;
998	GJ_DEBUG(3, "INSERT(%p): 8 (nbp=%p pbp=%p)", *head, nbp, pbp);
999end:
1000	if (g_journal_debug >= 3) {
1001		GJQ_FOREACH(*head, cbp) {
1002			GJ_DEBUG(3, "ELEMENT: %p (%jd, %jd, %jd, %p)", cbp,
1003			    (intmax_t)cbp->bio_offset,
1004			    (intmax_t)cbp->bio_length,
1005			    (intmax_t)cbp->bio_joffset, cbp->bio_data);
1006		}
1007		GJ_DEBUG(3, "INSERT(%p): DONE %d", *head, n);
1008	}
1009	return (n);
1010}
1011
1012/*
1013 * The function combines neighbour bios trying to squeeze as much data as
1014 * possible into one bio.
1015 *
1016 * The function returns the number of bios combined (negative value).
1017 */
1018static int
1019g_journal_optimize(struct bio *head)
1020{
1021	struct bio *cbp, *pbp;
1022	int n;
1023
1024	n = 0;
1025	pbp = NULL;
1026	GJQ_FOREACH(head, cbp) {
1027		/* Skip bios which has to be read first. */
1028		if (cbp->bio_data == NULL) {
1029			pbp = NULL;
1030			continue;
1031		}
1032		/* There is no previous bio yet. */
1033		if (pbp == NULL) {
1034			pbp = cbp;
1035			continue;
1036		}
1037		/* Is this a neighbour bio? */
1038		if (pbp->bio_offset + pbp->bio_length != cbp->bio_offset) {
1039			/* Be sure that bios queue is sorted. */
1040			KASSERT(pbp->bio_offset + pbp->bio_length < cbp->bio_offset,
1041			    ("poffset=%jd plength=%jd coffset=%jd",
1042			    (intmax_t)pbp->bio_offset,
1043			    (intmax_t)pbp->bio_length,
1044			    (intmax_t)cbp->bio_offset));
1045			pbp = cbp;
1046			continue;
1047		}
1048		/* Be sure we don't end up with too big bio. */
1049		if (pbp->bio_length + cbp->bio_length > MAXPHYS) {
1050			pbp = cbp;
1051			continue;
1052		}
1053		/* Ok, we can join bios. */
1054		GJ_LOGREQ(4, pbp, "Join: ");
1055		GJ_LOGREQ(4, cbp, "and: ");
1056		pbp->bio_data = gj_realloc(pbp->bio_data,
1057		    pbp->bio_length + cbp->bio_length, pbp->bio_length);
1058		bcopy(cbp->bio_data, pbp->bio_data + pbp->bio_length,
1059		    cbp->bio_length);
1060		gj_free(cbp->bio_data, cbp->bio_length);
1061		pbp->bio_length += cbp->bio_length;
1062		pbp->bio_next = cbp->bio_next;
1063		g_destroy_bio(cbp);
1064		cbp = pbp;
1065		g_journal_stats_combined_ios++;
1066		n--;
1067		GJ_LOGREQ(4, pbp, "Got: ");
1068	}
1069	return (n);
1070}
1071
1072/*
1073 * TODO: Update comment.
1074 * These are functions responsible for copying one portion of data from journal
1075 * to the destination provider.
1076 * The order goes like this:
1077 * 1. Read the header, which contains informations about data blocks
1078 *    following it.
1079 * 2. Read the data blocks from the journal.
1080 * 3. Write the data blocks on the data provider.
1081 *
1082 * g_journal_copy_start()
1083 * g_journal_copy_done() - got finished write request, logs potential errors.
1084 */
1085
1086/*
1087 * When there is no data in cache, this function is used to read it.
1088 */
1089static void
1090g_journal_read_first(struct g_journal_softc *sc, struct bio *bp)
1091{
1092	struct bio *cbp;
1093
1094	/*
1095	 * We were short in memory, so data was freed.
1096	 * In that case we need to read it back from journal.
1097	 */
1098	cbp = g_alloc_bio();
1099	cbp->bio_cflags = bp->bio_cflags;
1100	cbp->bio_parent = bp;
1101	cbp->bio_offset = bp->bio_joffset;
1102	cbp->bio_length = bp->bio_length;
1103	cbp->bio_data = gj_malloc(bp->bio_length, M_WAITOK);
1104	cbp->bio_cmd = BIO_READ;
1105	cbp->bio_done = g_journal_std_done;
1106	GJ_LOGREQ(4, cbp, "READ FIRST");
1107	g_io_request(cbp, sc->sc_jconsumer);
1108	g_journal_cache_misses++;
1109}
1110
1111static void
1112g_journal_copy_send(struct g_journal_softc *sc)
1113{
1114	struct bio *bioq, *bp, *lbp;
1115
1116	bioq = lbp = NULL;
1117	mtx_lock(&sc->sc_mtx);
1118	for (; sc->sc_copy_in_progress < g_journal_parallel_copies;) {
1119		bp = GJQ_FIRST(sc->sc_inactive.jj_queue);
1120		if (bp == NULL)
1121			break;
1122		GJQ_REMOVE(sc->sc_inactive.jj_queue, bp);
1123		sc->sc_copy_in_progress++;
1124		GJQ_INSERT_AFTER(bioq, bp, lbp);
1125		lbp = bp;
1126	}
1127	mtx_unlock(&sc->sc_mtx);
1128	if (g_journal_do_optimize)
1129		sc->sc_copy_in_progress += g_journal_optimize(bioq);
1130	while ((bp = GJQ_FIRST(bioq)) != NULL) {
1131		GJQ_REMOVE(bioq, bp);
1132		GJQ_INSERT_HEAD(sc->sc_copy_queue, bp);
1133		bp->bio_cflags = GJ_BIO_COPY;
1134		if (bp->bio_data == NULL)
1135			g_journal_read_first(sc, bp);
1136		else {
1137			bp->bio_joffset = 0;
1138			GJ_LOGREQ(4, bp, "SEND");
1139			g_io_request(bp, sc->sc_dconsumer);
1140		}
1141	}
1142}
1143
1144static void
1145g_journal_copy_start(struct g_journal_softc *sc)
1146{
1147
1148	/*
1149	 * Remember in metadata that we're starting to copy journaled data
1150	 * to the data provider.
1151	 * In case of power failure, we will copy these data once again on boot.
1152	 */
1153	if (!sc->sc_journal_copying) {
1154		sc->sc_journal_copying = 1;
1155		GJ_DEBUG(1, "Starting copy of journal.");
1156		g_journal_metadata_update(sc);
1157	}
1158	g_journal_copy_send(sc);
1159}
1160
1161/*
1162 * Data block has been read from the journal provider.
1163 */
1164static int
1165g_journal_copy_read_done(struct bio *bp)
1166{
1167	struct g_journal_softc *sc;
1168	struct g_consumer *cp;
1169	struct bio *pbp;
1170
1171	KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1172	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1173
1174	sc = bp->bio_from->geom->softc;
1175	pbp = bp->bio_parent;
1176
1177	if (bp->bio_error != 0) {
1178		GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1179		    bp->bio_to->name, bp->bio_error);
1180		/*
1181		 * We will not be able to deliver WRITE request as well.
1182		 */
1183		gj_free(bp->bio_data, bp->bio_length);
1184		g_destroy_bio(pbp);
1185		g_destroy_bio(bp);
1186		sc->sc_copy_in_progress--;
1187		return (1);
1188	}
1189	pbp->bio_data = bp->bio_data;
1190	cp = sc->sc_dconsumer;
1191	g_io_request(pbp, cp);
1192	GJ_LOGREQ(4, bp, "READ DONE");
1193	g_destroy_bio(bp);
1194	return (0);
1195}
1196
1197/*
1198 * Data block has been written to the data provider.
1199 */
1200static void
1201g_journal_copy_write_done(struct bio *bp)
1202{
1203	struct g_journal_softc *sc;
1204
1205	KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1206	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1207
1208	sc = bp->bio_from->geom->softc;
1209	sc->sc_copy_in_progress--;
1210
1211	if (bp->bio_error != 0) {
1212		GJ_LOGREQ(0, bp, "[copy] Error while writing data (error=%d)",
1213		    bp->bio_error);
1214	}
1215	GJQ_REMOVE(sc->sc_copy_queue, bp);
1216	gj_free(bp->bio_data, bp->bio_length);
1217	GJ_LOGREQ(4, bp, "DONE");
1218	g_destroy_bio(bp);
1219
1220	if (sc->sc_copy_in_progress == 0) {
1221		/*
1222		 * This was the last write request for this journal.
1223		 */
1224		GJ_DEBUG(1, "Data has been copied.");
1225		sc->sc_journal_copying = 0;
1226	}
1227}
1228
1229static void g_journal_flush_done(struct bio *bp);
1230
1231/*
1232 * Flush one record onto active journal provider.
1233 */
1234static void
1235g_journal_flush(struct g_journal_softc *sc)
1236{
1237	struct g_journal_record_header hdr;
1238	struct g_journal_entry *ent;
1239	struct g_provider *pp;
1240	struct bio **bioq;
1241	struct bio *bp, *fbp, *pbp;
1242	off_t joffset;
1243	u_char *data, hash[16];
1244	MD5_CTX ctx;
1245	u_int i;
1246
1247	if (sc->sc_current_count == 0)
1248		return;
1249
1250	pp = sc->sc_jprovider;
1251	GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1252	joffset = sc->sc_journal_offset;
1253
1254	GJ_DEBUG(2, "Storing %d journal entries on %s at %jd.",
1255	    sc->sc_current_count, pp->name, (intmax_t)joffset);
1256
1257	/*
1258	 * Store 'journal id', so we know to which journal this record belongs.
1259	 */
1260	hdr.jrh_journal_id = sc->sc_journal_id;
1261	/* Could be less than g_journal_record_entries if called due timeout. */
1262	hdr.jrh_nentries = MIN(sc->sc_current_count, g_journal_record_entries);
1263	strlcpy(hdr.jrh_magic, GJ_RECORD_HEADER_MAGIC, sizeof(hdr.jrh_magic));
1264
1265	bioq = &sc->sc_active.jj_queue;
1266	GJQ_LAST(sc->sc_flush_queue, pbp);
1267
1268	fbp = g_alloc_bio();
1269	fbp->bio_parent = NULL;
1270	fbp->bio_cflags = GJ_BIO_JOURNAL;
1271	fbp->bio_offset = -1;
1272	fbp->bio_joffset = joffset;
1273	fbp->bio_length = pp->sectorsize;
1274	fbp->bio_cmd = BIO_WRITE;
1275	fbp->bio_done = g_journal_std_done;
1276	GJQ_INSERT_AFTER(sc->sc_flush_queue, fbp, pbp);
1277	pbp = fbp;
1278	fbp->bio_to = pp;
1279	GJ_LOGREQ(4, fbp, "FLUSH_OUT");
1280	joffset += pp->sectorsize;
1281	sc->sc_flush_count++;
1282	if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1283		MD5Init(&ctx);
1284
1285	for (i = 0; i < hdr.jrh_nentries; i++) {
1286		bp = sc->sc_current_queue;
1287		KASSERT(bp != NULL, ("NULL bp"));
1288		bp->bio_to = pp;
1289		GJ_LOGREQ(4, bp, "FLUSHED");
1290		sc->sc_current_queue = bp->bio_next;
1291		bp->bio_next = NULL;
1292		sc->sc_current_count--;
1293
1294		/* Add to the header. */
1295		ent = &hdr.jrh_entries[i];
1296		ent->je_offset = bp->bio_offset;
1297		ent->je_joffset = joffset;
1298		ent->je_length = bp->bio_length;
1299
1300		data = bp->bio_data;
1301		if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1302			MD5Update(&ctx, data, ent->je_length);
1303		bzero(bp, sizeof(*bp));
1304		bp->bio_cflags = GJ_BIO_JOURNAL;
1305		bp->bio_offset = ent->je_offset;
1306		bp->bio_joffset = ent->je_joffset;
1307		bp->bio_length = ent->je_length;
1308		bp->bio_data = data;
1309		bp->bio_cmd = BIO_WRITE;
1310		bp->bio_done = g_journal_std_done;
1311		GJQ_INSERT_AFTER(sc->sc_flush_queue, bp, pbp);
1312		pbp = bp;
1313		bp->bio_to = pp;
1314		GJ_LOGREQ(4, bp, "FLUSH_OUT");
1315		joffset += bp->bio_length;
1316		sc->sc_flush_count++;
1317
1318		/*
1319		 * Add request to the active sc_journal_queue queue.
1320		 * This is our cache. After journal switch we don't have to
1321		 * read the data from the inactive journal, because we keep
1322		 * it in memory.
1323		 */
1324		g_journal_insert(bioq, ent->je_offset,
1325		    ent->je_offset + ent->je_length, ent->je_joffset, data,
1326		    M_NOWAIT);
1327	}
1328
1329	/*
1330	 * After all requests, store valid header.
1331	 */
1332	data = gj_malloc(pp->sectorsize, M_WAITOK);
1333	if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1334		MD5Final(hash, &ctx);
1335		bcopy(hash, hdr.jrh_sum, sizeof(hdr.jrh_sum));
1336	}
1337	g_journal_record_header_encode(&hdr, data);
1338	fbp->bio_data = data;
1339
1340	sc->sc_journal_offset = joffset;
1341
1342	g_journal_check_overflow(sc);
1343}
1344
1345/*
1346 * Flush request finished.
1347 */
1348static void
1349g_journal_flush_done(struct bio *bp)
1350{
1351	struct g_journal_softc *sc;
1352	struct g_consumer *cp;
1353
1354	KASSERT((bp->bio_cflags & GJ_BIO_MASK) == GJ_BIO_JOURNAL,
1355	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_JOURNAL));
1356
1357	cp = bp->bio_from;
1358	sc = cp->geom->softc;
1359	sc->sc_flush_in_progress--;
1360
1361	if (bp->bio_error != 0) {
1362		GJ_LOGREQ(0, bp, "[flush] Error while writing data (error=%d)",
1363		    bp->bio_error);
1364	}
1365	gj_free(bp->bio_data, bp->bio_length);
1366	GJ_LOGREQ(4, bp, "DONE");
1367	g_destroy_bio(bp);
1368}
1369
1370static void g_journal_release_delayed(struct g_journal_softc *sc);
1371
1372static void
1373g_journal_flush_send(struct g_journal_softc *sc)
1374{
1375	struct g_consumer *cp;
1376	struct bio *bioq, *bp, *lbp;
1377
1378	cp = sc->sc_jconsumer;
1379	bioq = lbp = NULL;
1380	while (sc->sc_flush_in_progress < g_journal_parallel_flushes) {
1381		/* Send one flush requests to the active journal. */
1382		bp = GJQ_FIRST(sc->sc_flush_queue);
1383		if (bp != NULL) {
1384			GJQ_REMOVE(sc->sc_flush_queue, bp);
1385			sc->sc_flush_count--;
1386			bp->bio_offset = bp->bio_joffset;
1387			bp->bio_joffset = 0;
1388			sc->sc_flush_in_progress++;
1389			GJQ_INSERT_AFTER(bioq, bp, lbp);
1390			lbp = bp;
1391		}
1392		/* Try to release delayed requests. */
1393		g_journal_release_delayed(sc);
1394		/* If there are no requests to flush, leave. */
1395		if (GJQ_FIRST(sc->sc_flush_queue) == NULL)
1396			break;
1397	}
1398	if (g_journal_do_optimize)
1399		sc->sc_flush_in_progress += g_journal_optimize(bioq);
1400	while ((bp = GJQ_FIRST(bioq)) != NULL) {
1401		GJQ_REMOVE(bioq, bp);
1402		GJ_LOGREQ(3, bp, "Flush request send");
1403		g_io_request(bp, cp);
1404	}
1405}
1406
1407static void
1408g_journal_add_current(struct g_journal_softc *sc, struct bio *bp)
1409{
1410	int n;
1411
1412	GJ_LOGREQ(4, bp, "CURRENT %d", sc->sc_current_count);
1413	n = g_journal_insert_bio(&sc->sc_current_queue, bp, M_WAITOK);
1414	sc->sc_current_count += n;
1415	n = g_journal_optimize(sc->sc_current_queue);
1416	sc->sc_current_count += n;
1417	/*
1418	 * For requests which are added to the current queue we deliver
1419	 * response immediately.
1420	 */
1421	bp->bio_completed = bp->bio_length;
1422	g_io_deliver(bp, 0);
1423	if (sc->sc_current_count >= g_journal_record_entries) {
1424		/*
1425		 * Let's flush one record onto active journal provider.
1426		 */
1427		g_journal_flush(sc);
1428	}
1429}
1430
1431static void
1432g_journal_release_delayed(struct g_journal_softc *sc)
1433{
1434	struct bio *bp;
1435
1436	for (;;) {
1437		/* The flush queue is full, exit. */
1438		if (sc->sc_flush_count >= g_journal_accept_immediately)
1439			return;
1440		bp = bioq_takefirst(&sc->sc_delayed_queue);
1441		if (bp == NULL)
1442			return;
1443		sc->sc_delayed_count--;
1444		g_journal_add_current(sc, bp);
1445	}
1446}
1447
1448/*
1449 * Add I/O request to the current queue. If we have enough requests for one
1450 * journal record we flush them onto active journal provider.
1451 */
1452static void
1453g_journal_add_request(struct g_journal_softc *sc, struct bio *bp)
1454{
1455
1456	/*
1457	 * The flush queue is full, we need to delay the request.
1458	 */
1459	if (sc->sc_delayed_count > 0 ||
1460	    sc->sc_flush_count >= g_journal_accept_immediately) {
1461		GJ_LOGREQ(4, bp, "DELAYED");
1462		bioq_insert_tail(&sc->sc_delayed_queue, bp);
1463		sc->sc_delayed_count++;
1464		return;
1465	}
1466
1467	KASSERT(TAILQ_EMPTY(&sc->sc_delayed_queue.queue),
1468	    ("DELAYED queue not empty."));
1469	g_journal_add_current(sc, bp);
1470}
1471
1472static void g_journal_read_done(struct bio *bp);
1473
1474/*
1475 * Try to find requested data in cache.
1476 */
1477static struct bio *
1478g_journal_read_find(struct bio *head, int sorted, struct bio *pbp, off_t ostart,
1479    off_t oend)
1480{
1481	off_t cstart, cend;
1482	struct bio *bp;
1483
1484	GJQ_FOREACH(head, bp) {
1485		if (bp->bio_offset == -1)
1486			continue;
1487		cstart = MAX(ostart, bp->bio_offset);
1488		cend = MIN(oend, bp->bio_offset + bp->bio_length);
1489		if (cend <= ostart)
1490			continue;
1491		else if (cstart >= oend) {
1492			if (!sorted)
1493				continue;
1494			else {
1495				bp = NULL;
1496				break;
1497			}
1498		}
1499		if (bp->bio_data == NULL)
1500			break;
1501		GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
1502		    bp);
1503		bcopy(bp->bio_data + cstart - bp->bio_offset,
1504		    pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
1505		pbp->bio_completed += cend - cstart;
1506		if (pbp->bio_completed == pbp->bio_length) {
1507			/*
1508			 * Cool, the whole request was in cache, deliver happy
1509			 * message.
1510			 */
1511			g_io_deliver(pbp, 0);
1512			return (pbp);
1513		}
1514		break;
1515	}
1516	return (bp);
1517}
1518
1519/*
1520 * This function is used for collecting data on read.
1521 * The complexity is because parts of the data can be stored in four different
1522 * places:
1523 * - in memory - the data not yet send to the active journal provider
1524 * - in the active journal
1525 * - in the inactive journal
1526 * - in the data provider
1527 */
1528static void
1529g_journal_read(struct g_journal_softc *sc, struct bio *pbp, off_t ostart,
1530    off_t oend)
1531{
1532	struct bio *bp, *nbp, *head;
1533	off_t cstart, cend;
1534	u_int i, sorted = 0;
1535
1536	GJ_DEBUG(3, "READ: (%jd, %jd)", ostart, oend);
1537
1538	cstart = cend = -1;
1539	bp = NULL;
1540	head = NULL;
1541	for (i = 1; i <= 5; i++) {
1542		switch (i) {
1543		case 1:	/* Not-yet-send data. */
1544			head = sc->sc_current_queue;
1545			sorted = 1;
1546			break;
1547		case 2: /* Skip flush queue as they are also in active queue */
1548			continue;
1549		case 3:	/* Active journal. */
1550			head = sc->sc_active.jj_queue;
1551			sorted = 1;
1552			break;
1553		case 4:	/* Inactive journal. */
1554			/*
1555			 * XXX: Here could be a race with g_journal_lowmem().
1556			 */
1557			head = sc->sc_inactive.jj_queue;
1558			sorted = 1;
1559			break;
1560		case 5:	/* In-flight to the data provider. */
1561			head = sc->sc_copy_queue;
1562			sorted = 0;
1563			break;
1564		default:
1565			panic("gjournal %s: i=%d", __func__, i);
1566		}
1567		bp = g_journal_read_find(head, sorted, pbp, ostart, oend);
1568		if (bp == pbp) { /* Got the whole request. */
1569			GJ_DEBUG(2, "Got the whole request from %u.", i);
1570			return;
1571		} else if (bp != NULL) {
1572			cstart = MAX(ostart, bp->bio_offset);
1573			cend = MIN(oend, bp->bio_offset + bp->bio_length);
1574			GJ_DEBUG(2, "Got part of the request from %u (%jd-%jd).",
1575			    i, (intmax_t)cstart, (intmax_t)cend);
1576			break;
1577		}
1578	}
1579	if (bp != NULL) {
1580		if (bp->bio_data == NULL) {
1581			nbp = g_duplicate_bio(pbp);
1582			nbp->bio_cflags = GJ_BIO_READ;
1583			nbp->bio_data =
1584			    pbp->bio_data + cstart - pbp->bio_offset;
1585			nbp->bio_offset =
1586			    bp->bio_joffset + cstart - bp->bio_offset;
1587			nbp->bio_length = cend - cstart;
1588			nbp->bio_done = g_journal_read_done;
1589			g_io_request(nbp, sc->sc_jconsumer);
1590		}
1591		/*
1592		 * If we don't have the whole request yet, call g_journal_read()
1593		 * recursively.
1594		 */
1595		if (ostart < cstart)
1596			g_journal_read(sc, pbp, ostart, cstart);
1597		if (oend > cend)
1598			g_journal_read(sc, pbp, cend, oend);
1599	} else {
1600		/*
1601		 * No data in memory, no data in journal.
1602		 * Its time for asking data provider.
1603		 */
1604		GJ_DEBUG(3, "READ(data): (%jd, %jd)", ostart, oend);
1605		nbp = g_duplicate_bio(pbp);
1606		nbp->bio_cflags = GJ_BIO_READ;
1607		nbp->bio_data = pbp->bio_data + ostart - pbp->bio_offset;
1608		nbp->bio_offset = ostart;
1609		nbp->bio_length = oend - ostart;
1610		nbp->bio_done = g_journal_read_done;
1611		g_io_request(nbp, sc->sc_dconsumer);
1612		/* We have the whole request, return here. */
1613		return;
1614	}
1615}
1616
1617/*
1618 * Function responsible for handling finished READ requests.
1619 * Actually, g_std_done() could be used here, the only difference is that we
1620 * log error.
1621 */
1622static void
1623g_journal_read_done(struct bio *bp)
1624{
1625	struct bio *pbp;
1626
1627	KASSERT(bp->bio_cflags == GJ_BIO_READ,
1628	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_READ));
1629
1630	pbp = bp->bio_parent;
1631	pbp->bio_inbed++;
1632	pbp->bio_completed += bp->bio_length;
1633
1634	if (bp->bio_error != 0) {
1635		if (pbp->bio_error == 0)
1636			pbp->bio_error = bp->bio_error;
1637		GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1638		    bp->bio_to->name, bp->bio_error);
1639	}
1640	g_destroy_bio(bp);
1641	if (pbp->bio_children == pbp->bio_inbed &&
1642	    pbp->bio_completed == pbp->bio_length) {
1643		/* We're done. */
1644		g_io_deliver(pbp, 0);
1645	}
1646}
1647
1648/*
1649 * Deactive current journal and active next one.
1650 */
1651static void
1652g_journal_switch(struct g_journal_softc *sc)
1653{
1654	struct g_provider *pp;
1655
1656	if (JEMPTY(sc)) {
1657		GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
1658		pp = LIST_FIRST(&sc->sc_geom->provider);
1659		if (!(sc->sc_flags & GJF_DEVICE_CLEAN) && pp->acw == 0) {
1660			sc->sc_flags |= GJF_DEVICE_CLEAN;
1661			GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
1662			g_journal_metadata_update(sc);
1663		}
1664	} else {
1665		GJ_DEBUG(3, "Switching journal %s.", sc->sc_geom->name);
1666
1667		pp = sc->sc_jprovider;
1668
1669		sc->sc_journal_previous_id = sc->sc_journal_id;
1670
1671		sc->sc_journal_id = sc->sc_journal_next_id;
1672		sc->sc_journal_next_id = arc4random();
1673
1674		GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1675
1676		g_journal_write_header(sc);
1677
1678		sc->sc_inactive.jj_offset = sc->sc_active.jj_offset;
1679		sc->sc_inactive.jj_queue = sc->sc_active.jj_queue;
1680
1681		sc->sc_active.jj_offset =
1682		    sc->sc_journal_offset - pp->sectorsize;
1683		sc->sc_active.jj_queue = NULL;
1684
1685		/*
1686		 * Switch is done, start copying data from the (now) inactive
1687		 * journal to the data provider.
1688		 */
1689		g_journal_copy_start(sc);
1690	}
1691	mtx_lock(&sc->sc_mtx);
1692	sc->sc_flags &= ~GJF_DEVICE_SWITCH;
1693	mtx_unlock(&sc->sc_mtx);
1694}
1695
1696static void
1697g_journal_initialize(struct g_journal_softc *sc)
1698{
1699
1700	sc->sc_journal_id = arc4random();
1701	sc->sc_journal_next_id = arc4random();
1702	sc->sc_journal_previous_id = sc->sc_journal_id;
1703	sc->sc_journal_offset = sc->sc_jstart;
1704	sc->sc_inactive.jj_offset = sc->sc_jstart;
1705	g_journal_write_header(sc);
1706	sc->sc_active.jj_offset = sc->sc_jstart;
1707}
1708
1709static void
1710g_journal_mark_as_dirty(struct g_journal_softc *sc)
1711{
1712	const struct g_journal_desc *desc;
1713	int i;
1714
1715	GJ_DEBUG(1, "Marking file system %s as dirty.", sc->sc_name);
1716	for (i = 0; (desc = g_journal_filesystems[i]) != NULL; i++)
1717		desc->jd_dirty(sc->sc_dconsumer);
1718}
1719
1720/*
1721 * Function read record header from the given journal.
1722 * It is very simlar to g_read_data(9), but it doesn't allocate memory for bio
1723 * and data on every call.
1724 */
1725static int
1726g_journal_sync_read(struct g_consumer *cp, struct bio *bp, off_t offset,
1727    void *data)
1728{
1729	int error;
1730
1731	bzero(bp, sizeof(*bp));
1732	bp->bio_cmd = BIO_READ;
1733	bp->bio_done = NULL;
1734	bp->bio_offset = offset;
1735	bp->bio_length = cp->provider->sectorsize;
1736	bp->bio_data = data;
1737	g_io_request(bp, cp);
1738	error = biowait(bp, "gjs_read");
1739	return (error);
1740}
1741
1742#if 0
1743/*
1744 * Function is called when we start the journal device and we detect that
1745 * one of the journals was not fully copied.
1746 * The purpose of this function is to read all records headers from journal
1747 * and placed them in the inactive queue, so we can start journal
1748 * synchronization process and the journal provider itself.
1749 * Design decision was taken to not synchronize the whole journal here as it
1750 * can take too much time. Reading headers only and delaying synchronization
1751 * process until after journal provider is started should be the best choice.
1752 */
1753#endif
1754
1755static void
1756g_journal_sync(struct g_journal_softc *sc)
1757{
1758	struct g_journal_record_header rhdr;
1759	struct g_journal_entry *ent;
1760	struct g_journal_header jhdr;
1761	struct g_consumer *cp;
1762	struct bio *bp, *fbp, *tbp;
1763	off_t joffset, offset;
1764	u_char *buf, sum[16];
1765	uint64_t id;
1766	MD5_CTX ctx;
1767	int error, found, i;
1768
1769	found = 0;
1770	fbp = NULL;
1771	cp = sc->sc_jconsumer;
1772	bp = g_alloc_bio();
1773	buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
1774	offset = joffset = sc->sc_inactive.jj_offset = sc->sc_journal_offset;
1775
1776	GJ_DEBUG(2, "Looking for termination at %jd.", (intmax_t)joffset);
1777
1778	/*
1779	 * Read and decode first journal header.
1780	 */
1781	error = g_journal_sync_read(cp, bp, offset, buf);
1782	if (error != 0) {
1783		GJ_DEBUG(0, "Error while reading journal header from %s.",
1784		    cp->provider->name);
1785		goto end;
1786	}
1787	error = g_journal_header_decode(buf, &jhdr);
1788	if (error != 0) {
1789		GJ_DEBUG(0, "Cannot decode journal header from %s.",
1790		    cp->provider->name);
1791		goto end;
1792	}
1793	id = sc->sc_journal_id;
1794	if (jhdr.jh_journal_id != sc->sc_journal_id) {
1795		GJ_DEBUG(1, "Journal ID mismatch at %jd (0x%08x != 0x%08x).",
1796		    (intmax_t)offset, (u_int)jhdr.jh_journal_id, (u_int)id);
1797		goto end;
1798	}
1799	offset += cp->provider->sectorsize;
1800	id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1801
1802	for (;;) {
1803		/*
1804		 * If the biggest record won't fit, look for a record header or
1805		 * journal header from the begining.
1806		 */
1807		GJ_VALIDATE_OFFSET(offset, sc);
1808		error = g_journal_sync_read(cp, bp, offset, buf);
1809		if (error != 0) {
1810			/*
1811			 * Not good. Having an error while reading header
1812			 * means, that we cannot read next headers and in
1813			 * consequence we cannot find termination.
1814			 */
1815			GJ_DEBUG(0,
1816			    "Error while reading record header from %s.",
1817			    cp->provider->name);
1818			break;
1819		}
1820
1821		error = g_journal_record_header_decode(buf, &rhdr);
1822		if (error != 0) {
1823			GJ_DEBUG(2, "Not a record header at %jd (error=%d).",
1824			    (intmax_t)offset, error);
1825			/*
1826			 * This is not a record header.
1827			 * If we are lucky, this is next journal header.
1828			 */
1829			error = g_journal_header_decode(buf, &jhdr);
1830			if (error != 0) {
1831				GJ_DEBUG(1, "Not a journal header at %jd (error=%d).",
1832				    (intmax_t)offset, error);
1833				/*
1834				 * Nope, this is not journal header, which
1835				 * bascially means that journal is not
1836				 * terminated properly.
1837				 */
1838				error = ENOENT;
1839				break;
1840			}
1841			/*
1842			 * Ok. This is header of _some_ journal. Now we need to
1843			 * verify if this is header of the _next_ journal.
1844			 */
1845			if (jhdr.jh_journal_id != id) {
1846				GJ_DEBUG(1, "Journal ID mismatch at %jd "
1847				    "(0x%08x != 0x%08x).", (intmax_t)offset,
1848				    (u_int)jhdr.jh_journal_id, (u_int)id);
1849				error = ENOENT;
1850				break;
1851			}
1852
1853			/* Found termination. */
1854			found++;
1855			GJ_DEBUG(1, "Found termination at %jd (id=0x%08x).",
1856			    (intmax_t)offset, (u_int)id);
1857			sc->sc_active.jj_offset = offset;
1858			sc->sc_journal_offset =
1859			    offset + cp->provider->sectorsize;
1860			sc->sc_journal_id = id;
1861			id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1862
1863			while ((tbp = fbp) != NULL) {
1864				fbp = tbp->bio_next;
1865				GJ_LOGREQ(3, tbp, "Adding request.");
1866				g_journal_insert_bio(&sc->sc_inactive.jj_queue,
1867				    tbp, M_WAITOK);
1868			}
1869
1870			/* Skip journal's header. */
1871			offset += cp->provider->sectorsize;
1872			continue;
1873		}
1874
1875		/* Skip record's header. */
1876		offset += cp->provider->sectorsize;
1877
1878		/*
1879		 * Add information about every record entry to the inactive
1880		 * queue.
1881		 */
1882		if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1883			MD5Init(&ctx);
1884		for (i = 0; i < rhdr.jrh_nentries; i++) {
1885			ent = &rhdr.jrh_entries[i];
1886			GJ_DEBUG(3, "Insert entry: %jd %jd.",
1887			    (intmax_t)ent->je_offset, (intmax_t)ent->je_length);
1888			g_journal_insert(&fbp, ent->je_offset,
1889			    ent->je_offset + ent->je_length, ent->je_joffset,
1890			    NULL, M_WAITOK);
1891			if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1892				u_char *buf2;
1893
1894				/*
1895				 * TODO: Should use faster function (like
1896				 *       g_journal_sync_read()).
1897				 */
1898				buf2 = g_read_data(cp, offset, ent->je_length,
1899				    NULL);
1900				if (buf2 == NULL)
1901					GJ_DEBUG(0, "Cannot read data at %jd.",
1902					    (intmax_t)offset);
1903				else {
1904					MD5Update(&ctx, buf2, ent->je_length);
1905					g_free(buf2);
1906				}
1907			}
1908			/* Skip entry's data. */
1909			offset += ent->je_length;
1910		}
1911		if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1912			MD5Final(sum, &ctx);
1913			if (bcmp(sum, rhdr.jrh_sum, sizeof(rhdr.jrh_sum)) != 0) {
1914				GJ_DEBUG(0, "MD5 hash mismatch at %jd!",
1915				    (intmax_t)offset);
1916			}
1917		}
1918	}
1919end:
1920	gj_free(bp->bio_data, cp->provider->sectorsize);
1921	g_destroy_bio(bp);
1922
1923	/* Remove bios from unterminated journal. */
1924	while ((tbp = fbp) != NULL) {
1925		fbp = tbp->bio_next;
1926		g_destroy_bio(tbp);
1927	}
1928
1929	if (found < 1 && joffset > 0) {
1930		GJ_DEBUG(0, "Journal on %s is broken/corrupted. Initializing.",
1931		    sc->sc_name);
1932		while ((tbp = sc->sc_inactive.jj_queue) != NULL) {
1933			sc->sc_inactive.jj_queue = tbp->bio_next;
1934			g_destroy_bio(tbp);
1935		}
1936		g_journal_initialize(sc);
1937		g_journal_mark_as_dirty(sc);
1938	} else {
1939		GJ_DEBUG(0, "Journal %s consistent.", sc->sc_name);
1940		g_journal_copy_start(sc);
1941	}
1942}
1943
1944/*
1945 * Wait for requests.
1946 * If we have requests in the current queue, flush them after 3 seconds from the
1947 * last flush. In this way we don't wait forever (or for journal switch) with
1948 * storing not full records on journal.
1949 */
1950static void
1951g_journal_wait(struct g_journal_softc *sc, time_t last_write)
1952{
1953	int error, timeout;
1954
1955	GJ_DEBUG(3, "%s: enter", __func__);
1956	if (sc->sc_current_count == 0) {
1957		if (g_journal_debug < 2)
1958			msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", 0);
1959		else {
1960			/*
1961			 * If we have debug turned on, show number of elements
1962			 * in various queues.
1963			 */
1964			for (;;) {
1965				error = msleep(sc, &sc->sc_mtx, PRIBIO,
1966				    "gj:work", hz * 3);
1967				if (error == 0) {
1968					mtx_unlock(&sc->sc_mtx);
1969					break;
1970				}
1971				GJ_DEBUG(3, "Report: current count=%d",
1972				    sc->sc_current_count);
1973				GJ_DEBUG(3, "Report: flush count=%d",
1974				    sc->sc_flush_count);
1975				GJ_DEBUG(3, "Report: flush in progress=%d",
1976				    sc->sc_flush_in_progress);
1977				GJ_DEBUG(3, "Report: copy in progress=%d",
1978				    sc->sc_copy_in_progress);
1979				GJ_DEBUG(3, "Report: delayed=%d",
1980				    sc->sc_delayed_count);
1981			}
1982		}
1983		GJ_DEBUG(3, "%s: exit 1", __func__);
1984		return;
1985	}
1986
1987	/*
1988	 * Flush even not full records every 3 seconds.
1989	 */
1990	timeout = (last_write + 3 - time_second) * hz;
1991	if (timeout <= 0) {
1992		mtx_unlock(&sc->sc_mtx);
1993		g_journal_flush(sc);
1994		g_journal_flush_send(sc);
1995		GJ_DEBUG(3, "%s: exit 2", __func__);
1996		return;
1997	}
1998	error = msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", timeout);
1999	if (error == EWOULDBLOCK)
2000		g_journal_flush_send(sc);
2001	GJ_DEBUG(3, "%s: exit 3", __func__);
2002}
2003
2004/*
2005 * Worker thread.
2006 */
2007static void
2008g_journal_worker(void *arg)
2009{
2010	struct g_journal_softc *sc;
2011	struct g_geom *gp;
2012	struct g_provider *pp;
2013	struct bio *bp;
2014	time_t last_write;
2015	int type;
2016
2017	thread_lock(curthread);
2018	sched_prio(curthread, PRIBIO);
2019	thread_unlock(curthread);
2020
2021	sc = arg;
2022	type = 0;	/* gcc */
2023
2024	if (sc->sc_flags & GJF_DEVICE_CLEAN) {
2025		GJ_DEBUG(0, "Journal %s clean.", sc->sc_name);
2026		g_journal_initialize(sc);
2027	} else {
2028		g_journal_sync(sc);
2029	}
2030	/*
2031	 * Check if we can use BIO_FLUSH.
2032	 */
2033	sc->sc_bio_flush = 0;
2034	if (g_io_flush(sc->sc_jconsumer) == 0) {
2035		sc->sc_bio_flush |= GJ_FLUSH_JOURNAL;
2036		GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2037		    sc->sc_jconsumer->provider->name);
2038	} else {
2039		GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2040		    sc->sc_jconsumer->provider->name);
2041	}
2042	if (sc->sc_jconsumer != sc->sc_dconsumer) {
2043		if (g_io_flush(sc->sc_dconsumer) == 0) {
2044			sc->sc_bio_flush |= GJ_FLUSH_DATA;
2045			GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2046			    sc->sc_dconsumer->provider->name);
2047		} else {
2048			GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2049			    sc->sc_dconsumer->provider->name);
2050		}
2051	}
2052
2053	gp = sc->sc_geom;
2054	g_topology_lock();
2055	pp = g_new_providerf(gp, "%s.journal", sc->sc_name);
2056	pp->mediasize = sc->sc_mediasize;
2057	/*
2058	 * There could be a problem when data provider and journal providers
2059	 * have different sectorsize, but such scenario is prevented on journal
2060	 * creation.
2061	 */
2062	pp->sectorsize = sc->sc_sectorsize;
2063	g_error_provider(pp, 0);
2064	g_topology_unlock();
2065	last_write = time_second;
2066
2067	if (sc->sc_rootmount != NULL) {
2068		GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2069		root_mount_rel(sc->sc_rootmount);
2070		sc->sc_rootmount = NULL;
2071	}
2072
2073	for (;;) {
2074		/* Get first request from the queue. */
2075		mtx_lock(&sc->sc_mtx);
2076		bp = bioq_first(&sc->sc_back_queue);
2077		if (bp != NULL)
2078			type = (bp->bio_cflags & GJ_BIO_MASK);
2079		if (bp == NULL) {
2080			bp = bioq_first(&sc->sc_regular_queue);
2081			if (bp != NULL)
2082				type = GJ_BIO_REGULAR;
2083		}
2084		if (bp == NULL) {
2085try_switch:
2086			if ((sc->sc_flags & GJF_DEVICE_SWITCH) ||
2087			    (sc->sc_flags & GJF_DEVICE_DESTROY)) {
2088				if (sc->sc_current_count > 0) {
2089					mtx_unlock(&sc->sc_mtx);
2090					g_journal_flush(sc);
2091					g_journal_flush_send(sc);
2092					continue;
2093				}
2094				if (sc->sc_flush_in_progress > 0)
2095					goto sleep;
2096				if (sc->sc_copy_in_progress > 0)
2097					goto sleep;
2098			}
2099			if (sc->sc_flags & GJF_DEVICE_SWITCH) {
2100				mtx_unlock(&sc->sc_mtx);
2101				g_journal_switch(sc);
2102				wakeup(&sc->sc_journal_copying);
2103				continue;
2104			}
2105			if (sc->sc_flags & GJF_DEVICE_DESTROY) {
2106				GJ_DEBUG(1, "Shutting down worker "
2107				    "thread for %s.", gp->name);
2108				sc->sc_worker = NULL;
2109				wakeup(&sc->sc_worker);
2110				mtx_unlock(&sc->sc_mtx);
2111				kproc_exit(0);
2112			}
2113sleep:
2114			g_journal_wait(sc, last_write);
2115			continue;
2116		}
2117		/*
2118		 * If we're in switch process, we need to delay all new
2119		 * write requests until its done.
2120		 */
2121		if ((sc->sc_flags & GJF_DEVICE_SWITCH) &&
2122		    type == GJ_BIO_REGULAR && bp->bio_cmd == BIO_WRITE) {
2123			GJ_LOGREQ(2, bp, "WRITE on SWITCH");
2124			goto try_switch;
2125		}
2126		if (type == GJ_BIO_REGULAR)
2127			bioq_remove(&sc->sc_regular_queue, bp);
2128		else
2129			bioq_remove(&sc->sc_back_queue, bp);
2130		mtx_unlock(&sc->sc_mtx);
2131		switch (type) {
2132		case GJ_BIO_REGULAR:
2133			/* Regular request. */
2134			switch (bp->bio_cmd) {
2135			case BIO_READ:
2136				g_journal_read(sc, bp, bp->bio_offset,
2137				    bp->bio_offset + bp->bio_length);
2138				break;
2139			case BIO_WRITE:
2140				last_write = time_second;
2141				g_journal_add_request(sc, bp);
2142				g_journal_flush_send(sc);
2143				break;
2144			default:
2145				panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2146			}
2147			break;
2148		case GJ_BIO_COPY:
2149			switch (bp->bio_cmd) {
2150			case BIO_READ:
2151				if (g_journal_copy_read_done(bp))
2152					g_journal_copy_send(sc);
2153				break;
2154			case BIO_WRITE:
2155				g_journal_copy_write_done(bp);
2156				g_journal_copy_send(sc);
2157				break;
2158			default:
2159				panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2160			}
2161			break;
2162		case GJ_BIO_JOURNAL:
2163			g_journal_flush_done(bp);
2164			g_journal_flush_send(sc);
2165			break;
2166		case GJ_BIO_READ:
2167		default:
2168			panic("Invalid bio (%d).", type);
2169		}
2170	}
2171}
2172
2173static void
2174g_journal_destroy_event(void *arg, int flags __unused)
2175{
2176	struct g_journal_softc *sc;
2177
2178	g_topology_assert();
2179	sc = arg;
2180	g_journal_destroy(sc);
2181}
2182
2183static void
2184g_journal_timeout(void *arg)
2185{
2186	struct g_journal_softc *sc;
2187
2188	sc = arg;
2189	GJ_DEBUG(0, "Timeout. Journal %s cannot be completed.",
2190	    sc->sc_geom->name);
2191	g_post_event(g_journal_destroy_event, sc, M_NOWAIT, NULL);
2192}
2193
2194static struct g_geom *
2195g_journal_create(struct g_class *mp, struct g_provider *pp,
2196    const struct g_journal_metadata *md)
2197{
2198	struct g_journal_softc *sc;
2199	struct g_geom *gp;
2200	struct g_consumer *cp;
2201	int error;
2202
2203	sc = NULL;	/* gcc */
2204
2205	g_topology_assert();
2206	/*
2207	 * There are two possibilities:
2208	 * 1. Data and both journals are on the same provider.
2209	 * 2. Data and journals are all on separated providers.
2210	 */
2211	/* Look for journal device with the same ID. */
2212	LIST_FOREACH(gp, &mp->geom, geom) {
2213		sc = gp->softc;
2214		if (sc == NULL)
2215			continue;
2216		if (sc->sc_id == md->md_id)
2217			break;
2218	}
2219	if (gp == NULL)
2220		sc = NULL;
2221	else if (sc != NULL && (sc->sc_type & md->md_type) != 0) {
2222		GJ_DEBUG(1, "Journal device %u already configured.", sc->sc_id);
2223		return (NULL);
2224	}
2225	if (md->md_type == 0 || (md->md_type & ~GJ_TYPE_COMPLETE) != 0) {
2226		GJ_DEBUG(0, "Invalid type on %s.", pp->name);
2227		return (NULL);
2228	}
2229	if (md->md_type & GJ_TYPE_DATA) {
2230		GJ_DEBUG(0, "Journal %u: %s contains data.", md->md_id,
2231		    pp->name);
2232	}
2233	if (md->md_type & GJ_TYPE_JOURNAL) {
2234		GJ_DEBUG(0, "Journal %u: %s contains journal.", md->md_id,
2235		    pp->name);
2236	}
2237
2238	if (sc == NULL) {
2239		/* Action geom. */
2240		sc = malloc(sizeof(*sc), M_JOURNAL, M_WAITOK | M_ZERO);
2241		sc->sc_id = md->md_id;
2242		sc->sc_type = 0;
2243		sc->sc_flags = 0;
2244		sc->sc_worker = NULL;
2245
2246		gp = g_new_geomf(mp, "gjournal %u", sc->sc_id);
2247		gp->start = g_journal_start;
2248		gp->orphan = g_journal_orphan;
2249		gp->access = g_journal_access;
2250		gp->softc = sc;
2251		gp->flags |= G_GEOM_VOLATILE_BIO;
2252		sc->sc_geom = gp;
2253
2254		mtx_init(&sc->sc_mtx, "gjournal", NULL, MTX_DEF);
2255
2256		bioq_init(&sc->sc_back_queue);
2257		bioq_init(&sc->sc_regular_queue);
2258		bioq_init(&sc->sc_delayed_queue);
2259		sc->sc_delayed_count = 0;
2260		sc->sc_current_queue = NULL;
2261		sc->sc_current_count = 0;
2262		sc->sc_flush_queue = NULL;
2263		sc->sc_flush_count = 0;
2264		sc->sc_flush_in_progress = 0;
2265		sc->sc_copy_queue = NULL;
2266		sc->sc_copy_in_progress = 0;
2267		sc->sc_inactive.jj_queue = NULL;
2268		sc->sc_active.jj_queue = NULL;
2269
2270		sc->sc_rootmount = root_mount_hold("GJOURNAL");
2271		GJ_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
2272
2273		callout_init(&sc->sc_callout, 1);
2274		if (md->md_type != GJ_TYPE_COMPLETE) {
2275			/*
2276			 * Journal and data are on separate providers.
2277			 * At this point we have only one of them.
2278			 * We setup a timeout in case the other part will not
2279			 * appear, so we won't wait forever.
2280			 */
2281			callout_reset(&sc->sc_callout, 5 * hz,
2282			    g_journal_timeout, sc);
2283		}
2284	}
2285
2286	/* Remember type of the data provider. */
2287	if (md->md_type & GJ_TYPE_DATA)
2288		sc->sc_orig_type = md->md_type;
2289	sc->sc_type |= md->md_type;
2290	cp = NULL;
2291
2292	if (md->md_type & GJ_TYPE_DATA) {
2293		if (md->md_flags & GJ_FLAG_CLEAN)
2294			sc->sc_flags |= GJF_DEVICE_CLEAN;
2295		if (md->md_flags & GJ_FLAG_CHECKSUM)
2296			sc->sc_flags |= GJF_DEVICE_CHECKSUM;
2297		cp = g_new_consumer(gp);
2298		error = g_attach(cp, pp);
2299		KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2300		    pp->name, error));
2301		error = g_access(cp, 1, 1, 1);
2302		if (error != 0) {
2303			GJ_DEBUG(0, "Cannot access %s (error=%d).", pp->name,
2304			    error);
2305			g_journal_destroy(sc);
2306			return (NULL);
2307		}
2308		sc->sc_dconsumer = cp;
2309		sc->sc_mediasize = pp->mediasize - pp->sectorsize;
2310		sc->sc_sectorsize = pp->sectorsize;
2311		sc->sc_jstart = md->md_jstart;
2312		sc->sc_jend = md->md_jend;
2313		if (md->md_provider[0] != '\0')
2314			sc->sc_flags |= GJF_DEVICE_HARDCODED;
2315		sc->sc_journal_offset = md->md_joffset;
2316		sc->sc_journal_id = md->md_jid;
2317		sc->sc_journal_previous_id = md->md_jid;
2318	}
2319	if (md->md_type & GJ_TYPE_JOURNAL) {
2320		if (cp == NULL) {
2321			cp = g_new_consumer(gp);
2322			error = g_attach(cp, pp);
2323			KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2324			    pp->name, error));
2325			error = g_access(cp, 1, 1, 1);
2326			if (error != 0) {
2327				GJ_DEBUG(0, "Cannot access %s (error=%d).",
2328				    pp->name, error);
2329				g_journal_destroy(sc);
2330				return (NULL);
2331			}
2332		} else {
2333			/*
2334			 * Journal is on the same provider as data, which means
2335			 * that data provider ends where journal starts.
2336			 */
2337			sc->sc_mediasize = md->md_jstart;
2338		}
2339		sc->sc_jconsumer = cp;
2340	}
2341
2342	/* Start switcher kproc if needed. */
2343	if (g_journal_switcher_proc == NULL)
2344		g_journal_start_switcher(mp);
2345
2346	if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE) {
2347		/* Journal is not complete yet. */
2348		return (gp);
2349	} else {
2350		/* Journal complete, cancel timeout. */
2351		callout_drain(&sc->sc_callout);
2352	}
2353
2354	error = kproc_create(g_journal_worker, sc, &sc->sc_worker, 0, 0,
2355	    "g_journal %s", sc->sc_name);
2356	if (error != 0) {
2357		GJ_DEBUG(0, "Cannot create worker thread for %s.journal.",
2358		    sc->sc_name);
2359		g_journal_destroy(sc);
2360		return (NULL);
2361	}
2362
2363	return (gp);
2364}
2365
2366static void
2367g_journal_destroy_consumer(void *arg, int flags __unused)
2368{
2369	struct g_consumer *cp;
2370
2371	g_topology_assert();
2372	cp = arg;
2373	g_detach(cp);
2374	g_destroy_consumer(cp);
2375}
2376
2377static int
2378g_journal_destroy(struct g_journal_softc *sc)
2379{
2380	struct g_geom *gp;
2381	struct g_provider *pp;
2382	struct g_consumer *cp;
2383
2384	g_topology_assert();
2385
2386	if (sc == NULL)
2387		return (ENXIO);
2388
2389	gp = sc->sc_geom;
2390	pp = LIST_FIRST(&gp->provider);
2391	if (pp != NULL) {
2392		if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) {
2393			GJ_DEBUG(1, "Device %s is still open (r%dw%de%d).",
2394			    pp->name, pp->acr, pp->acw, pp->ace);
2395			return (EBUSY);
2396		}
2397		g_error_provider(pp, ENXIO);
2398
2399		g_journal_flush(sc);
2400		g_journal_flush_send(sc);
2401		g_journal_switch(sc);
2402	}
2403
2404	sc->sc_flags |= (GJF_DEVICE_DESTROY | GJF_DEVICE_CLEAN);
2405
2406	g_topology_unlock();
2407
2408	if (sc->sc_rootmount != NULL) {
2409		GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2410		root_mount_rel(sc->sc_rootmount);
2411		sc->sc_rootmount = NULL;
2412	}
2413
2414	callout_drain(&sc->sc_callout);
2415	mtx_lock(&sc->sc_mtx);
2416	wakeup(sc);
2417	while (sc->sc_worker != NULL)
2418		msleep(&sc->sc_worker, &sc->sc_mtx, PRIBIO, "gj:destroy", 0);
2419	mtx_unlock(&sc->sc_mtx);
2420
2421	if (pp != NULL) {
2422		GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
2423		g_journal_metadata_update(sc);
2424		g_topology_lock();
2425		g_wither_provider(pp, ENXIO);
2426	} else {
2427		g_topology_lock();
2428	}
2429	mtx_destroy(&sc->sc_mtx);
2430
2431	if (sc->sc_current_count != 0) {
2432		GJ_DEBUG(0, "Warning! Number of current requests %d.",
2433		    sc->sc_current_count);
2434	}
2435
2436	gp->softc = NULL;
2437	LIST_FOREACH(cp, &gp->consumer, consumer) {
2438		if (cp->acr + cp->acw + cp->ace > 0)
2439			g_access(cp, -1, -1, -1);
2440		/*
2441		 * We keep all consumers open for writting, so if I'll detach
2442		 * and destroy consumer here, I'll get providers for taste, so
2443		 * journal will be started again.
2444		 * Sending an event here, prevents this from happening.
2445		 */
2446		g_post_event(g_journal_destroy_consumer, cp, M_WAITOK, NULL);
2447	}
2448	g_wither_geom(gp, ENXIO);
2449	free(sc, M_JOURNAL);
2450	return (0);
2451}
2452
2453static void
2454g_journal_taste_orphan(struct g_consumer *cp)
2455{
2456
2457	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2458	    cp->provider->name));
2459}
2460
2461static struct g_geom *
2462g_journal_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2463{
2464	struct g_journal_metadata md;
2465	struct g_consumer *cp;
2466	struct g_geom *gp;
2467	int error;
2468
2469	g_topology_assert();
2470	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2471	GJ_DEBUG(2, "Tasting %s.", pp->name);
2472	if (pp->geom->class == mp)
2473		return (NULL);
2474
2475	gp = g_new_geomf(mp, "journal:taste");
2476	/* This orphan function should be never called. */
2477	gp->orphan = g_journal_taste_orphan;
2478	cp = g_new_consumer(gp);
2479	g_attach(cp, pp);
2480	error = g_journal_metadata_read(cp, &md);
2481	g_detach(cp);
2482	g_destroy_consumer(cp);
2483	g_destroy_geom(gp);
2484	if (error != 0)
2485		return (NULL);
2486	gp = NULL;
2487
2488	if (md.md_provider[0] != '\0' &&
2489	    !g_compare_names(md.md_provider, pp->name))
2490		return (NULL);
2491	if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
2492		return (NULL);
2493	if (g_journal_debug >= 2)
2494		journal_metadata_dump(&md);
2495
2496	gp = g_journal_create(mp, pp, &md);
2497	return (gp);
2498}
2499
2500static struct g_journal_softc *
2501g_journal_find_device(struct g_class *mp, const char *name)
2502{
2503	struct g_journal_softc *sc;
2504	struct g_geom *gp;
2505	struct g_provider *pp;
2506
2507	if (strncmp(name, "/dev/", 5) == 0)
2508		name += 5;
2509	LIST_FOREACH(gp, &mp->geom, geom) {
2510		sc = gp->softc;
2511		if (sc == NULL)
2512			continue;
2513		if (sc->sc_flags & GJF_DEVICE_DESTROY)
2514			continue;
2515		if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2516			continue;
2517		pp = LIST_FIRST(&gp->provider);
2518		if (strcmp(sc->sc_name, name) == 0)
2519			return (sc);
2520		if (pp != NULL && strcmp(pp->name, name) == 0)
2521			return (sc);
2522	}
2523	return (NULL);
2524}
2525
2526static void
2527g_journal_ctl_destroy(struct gctl_req *req, struct g_class *mp)
2528{
2529	struct g_journal_softc *sc;
2530	const char *name;
2531	char param[16];
2532	int *nargs;
2533	int error, i;
2534
2535	g_topology_assert();
2536
2537	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
2538	if (nargs == NULL) {
2539		gctl_error(req, "No '%s' argument.", "nargs");
2540		return;
2541	}
2542	if (*nargs <= 0) {
2543		gctl_error(req, "Missing device(s).");
2544		return;
2545	}
2546
2547	for (i = 0; i < *nargs; i++) {
2548		snprintf(param, sizeof(param), "arg%d", i);
2549		name = gctl_get_asciiparam(req, param);
2550		if (name == NULL) {
2551			gctl_error(req, "No 'arg%d' argument.", i);
2552			return;
2553		}
2554		sc = g_journal_find_device(mp, name);
2555		if (sc == NULL) {
2556			gctl_error(req, "No such device: %s.", name);
2557			return;
2558		}
2559		error = g_journal_destroy(sc);
2560		if (error != 0) {
2561			gctl_error(req, "Cannot destroy device %s (error=%d).",
2562			    LIST_FIRST(&sc->sc_geom->provider)->name, error);
2563			return;
2564		}
2565	}
2566}
2567
2568static void
2569g_journal_ctl_sync(struct gctl_req *req __unused, struct g_class *mp __unused)
2570{
2571
2572	g_topology_assert();
2573	g_topology_unlock();
2574	g_journal_sync_requested++;
2575	wakeup(&g_journal_switcher_state);
2576	while (g_journal_sync_requested > 0)
2577		tsleep(&g_journal_sync_requested, PRIBIO, "j:sreq", hz / 2);
2578	g_topology_lock();
2579}
2580
2581static void
2582g_journal_config(struct gctl_req *req, struct g_class *mp, const char *verb)
2583{
2584	uint32_t *version;
2585
2586	g_topology_assert();
2587
2588	version = gctl_get_paraml(req, "version", sizeof(*version));
2589	if (version == NULL) {
2590		gctl_error(req, "No '%s' argument.", "version");
2591		return;
2592	}
2593	if (*version != G_JOURNAL_VERSION) {
2594		gctl_error(req, "Userland and kernel parts are out of sync.");
2595		return;
2596	}
2597
2598	if (strcmp(verb, "destroy") == 0 || strcmp(verb, "stop") == 0) {
2599		g_journal_ctl_destroy(req, mp);
2600		return;
2601	} else if (strcmp(verb, "sync") == 0) {
2602		g_journal_ctl_sync(req, mp);
2603		return;
2604	}
2605
2606	gctl_error(req, "Unknown verb.");
2607}
2608
2609static void
2610g_journal_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2611    struct g_consumer *cp, struct g_provider *pp)
2612{
2613	struct g_journal_softc *sc;
2614
2615	g_topology_assert();
2616
2617	sc = gp->softc;
2618	if (sc == NULL)
2619		return;
2620	if (pp != NULL) {
2621		/* Nothing here. */
2622	} else if (cp != NULL) {
2623		int first = 1;
2624
2625		sbuf_printf(sb, "%s<Role>", indent);
2626		if (cp == sc->sc_dconsumer) {
2627			sbuf_printf(sb, "Data");
2628			first = 0;
2629		}
2630		if (cp == sc->sc_jconsumer) {
2631			if (!first)
2632				sbuf_printf(sb, ",");
2633			sbuf_printf(sb, "Journal");
2634		}
2635		sbuf_printf(sb, "</Role>\n");
2636		if (cp == sc->sc_jconsumer) {
2637			sbuf_printf(sb, "<Jstart>%jd</Jstart>\n",
2638			    (intmax_t)sc->sc_jstart);
2639			sbuf_printf(sb, "<Jend>%jd</Jend>\n",
2640			    (intmax_t)sc->sc_jend);
2641		}
2642	} else {
2643		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
2644	}
2645}
2646
2647static eventhandler_tag g_journal_event_shutdown = NULL;
2648static eventhandler_tag g_journal_event_lowmem = NULL;
2649
2650static void
2651g_journal_shutdown(void *arg, int howto __unused)
2652{
2653	struct g_class *mp;
2654	struct g_geom *gp, *gp2;
2655
2656	if (panicstr != NULL)
2657		return;
2658	mp = arg;
2659	DROP_GIANT();
2660	g_topology_lock();
2661	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2662		if (gp->softc == NULL)
2663			continue;
2664		GJ_DEBUG(0, "Shutting down geom %s.", gp->name);
2665		g_journal_destroy(gp->softc);
2666	}
2667	g_topology_unlock();
2668	PICKUP_GIANT();
2669}
2670
2671/*
2672 * Free cached requests from inactive queue in case of low memory.
2673 * We free GJ_FREE_AT_ONCE elements at once.
2674 */
2675#define	GJ_FREE_AT_ONCE	4
2676static void
2677g_journal_lowmem(void *arg, int howto __unused)
2678{
2679	struct g_journal_softc *sc;
2680	struct g_class *mp;
2681	struct g_geom *gp;
2682	struct bio *bp;
2683	u_int nfree = GJ_FREE_AT_ONCE;
2684
2685	g_journal_stats_low_mem++;
2686	mp = arg;
2687	DROP_GIANT();
2688	g_topology_lock();
2689	LIST_FOREACH(gp, &mp->geom, geom) {
2690		sc = gp->softc;
2691		if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY))
2692			continue;
2693		mtx_lock(&sc->sc_mtx);
2694		for (bp = sc->sc_inactive.jj_queue; nfree > 0 && bp != NULL;
2695		    nfree--, bp = bp->bio_next) {
2696			/*
2697			 * This is safe to free the bio_data, because:
2698			 * 1. If bio_data is NULL it will be read from the
2699			 *    inactive journal.
2700			 * 2. If bp is sent down, it is first removed from the
2701			 *    inactive queue, so it's impossible to free the
2702			 *    data from under in-flight bio.
2703			 * On the other hand, freeing elements from the active
2704			 * queue, is not safe.
2705			 */
2706			if (bp->bio_data != NULL) {
2707				GJ_DEBUG(2, "Freeing data from %s.",
2708				    sc->sc_name);
2709				gj_free(bp->bio_data, bp->bio_length);
2710				bp->bio_data = NULL;
2711			}
2712		}
2713		mtx_unlock(&sc->sc_mtx);
2714		if (nfree == 0)
2715			break;
2716	}
2717	g_topology_unlock();
2718	PICKUP_GIANT();
2719}
2720
2721static void g_journal_switcher(void *arg);
2722
2723static void
2724g_journal_init(struct g_class *mp)
2725{
2726
2727	/* Pick a conservative value if provided value sucks. */
2728	if (g_journal_cache_divisor <= 0 ||
2729	    (vm_kmem_size / g_journal_cache_divisor == 0)) {
2730		g_journal_cache_divisor = 5;
2731	}
2732	if (g_journal_cache_limit > 0) {
2733		g_journal_cache_limit = vm_kmem_size / g_journal_cache_divisor;
2734		g_journal_cache_low =
2735		    (g_journal_cache_limit / 100) * g_journal_cache_switch;
2736	}
2737	g_journal_event_shutdown = EVENTHANDLER_REGISTER(shutdown_post_sync,
2738	    g_journal_shutdown, mp, EVENTHANDLER_PRI_FIRST);
2739	if (g_journal_event_shutdown == NULL)
2740		GJ_DEBUG(0, "Warning! Cannot register shutdown event.");
2741	g_journal_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
2742	    g_journal_lowmem, mp, EVENTHANDLER_PRI_FIRST);
2743	if (g_journal_event_lowmem == NULL)
2744		GJ_DEBUG(0, "Warning! Cannot register lowmem event.");
2745}
2746
2747static void
2748g_journal_fini(struct g_class *mp)
2749{
2750
2751	if (g_journal_event_shutdown != NULL) {
2752		EVENTHANDLER_DEREGISTER(shutdown_post_sync,
2753		    g_journal_event_shutdown);
2754	}
2755	if (g_journal_event_lowmem != NULL)
2756		EVENTHANDLER_DEREGISTER(vm_lowmem, g_journal_event_lowmem);
2757	g_journal_stop_switcher();
2758}
2759
2760DECLARE_GEOM_CLASS(g_journal_class, g_journal);
2761
2762static const struct g_journal_desc *
2763g_journal_find_desc(const char *fstype)
2764{
2765	const struct g_journal_desc *desc;
2766	int i;
2767
2768	for (desc = g_journal_filesystems[i = 0]; desc != NULL;
2769	     desc = g_journal_filesystems[++i]) {
2770		if (strcmp(desc->jd_fstype, fstype) == 0)
2771			break;
2772	}
2773	return (desc);
2774}
2775
2776static void
2777g_journal_switch_wait(struct g_journal_softc *sc)
2778{
2779	struct bintime bt;
2780
2781	mtx_assert(&sc->sc_mtx, MA_OWNED);
2782	if (g_journal_debug >= 2) {
2783		if (sc->sc_flush_in_progress > 0) {
2784			GJ_DEBUG(2, "%d requests flushing.",
2785			    sc->sc_flush_in_progress);
2786		}
2787		if (sc->sc_copy_in_progress > 0) {
2788			GJ_DEBUG(2, "%d requests copying.",
2789			    sc->sc_copy_in_progress);
2790		}
2791		if (sc->sc_flush_count > 0) {
2792			GJ_DEBUG(2, "%d requests to flush.",
2793			    sc->sc_flush_count);
2794		}
2795		if (sc->sc_delayed_count > 0) {
2796			GJ_DEBUG(2, "%d requests delayed.",
2797			    sc->sc_delayed_count);
2798		}
2799	}
2800	g_journal_stats_switches++;
2801	if (sc->sc_copy_in_progress > 0)
2802		g_journal_stats_wait_for_copy++;
2803	GJ_TIMER_START(1, &bt);
2804	sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2805	sc->sc_flags |= GJF_DEVICE_SWITCH;
2806	wakeup(sc);
2807	while (sc->sc_flags & GJF_DEVICE_SWITCH) {
2808		msleep(&sc->sc_journal_copying, &sc->sc_mtx, PRIBIO,
2809		    "gj:switch", 0);
2810	}
2811	GJ_TIMER_STOP(1, &bt, "Switch time of %s", sc->sc_name);
2812}
2813
2814static void
2815g_journal_do_switch(struct g_class *classp)
2816{
2817	struct g_journal_softc *sc;
2818	const struct g_journal_desc *desc;
2819	struct g_geom *gp;
2820	struct mount *mp;
2821	struct bintime bt;
2822	char *mountpoint;
2823	int error, save;
2824
2825	DROP_GIANT();
2826	g_topology_lock();
2827	LIST_FOREACH(gp, &classp->geom, geom) {
2828		sc = gp->softc;
2829		if (sc == NULL)
2830			continue;
2831		if (sc->sc_flags & GJF_DEVICE_DESTROY)
2832			continue;
2833		if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2834			continue;
2835		mtx_lock(&sc->sc_mtx);
2836		sc->sc_flags |= GJF_DEVICE_BEFORE_SWITCH;
2837		mtx_unlock(&sc->sc_mtx);
2838	}
2839	g_topology_unlock();
2840	PICKUP_GIANT();
2841
2842	mtx_lock(&mountlist_mtx);
2843	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2844		if (mp->mnt_gjprovider == NULL)
2845			continue;
2846		if (mp->mnt_flag & MNT_RDONLY)
2847			continue;
2848		desc = g_journal_find_desc(mp->mnt_stat.f_fstypename);
2849		if (desc == NULL)
2850			continue;
2851		if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
2852			continue;
2853		/* mtx_unlock(&mountlist_mtx) was done inside vfs_busy() */
2854
2855		DROP_GIANT();
2856		g_topology_lock();
2857		sc = g_journal_find_device(classp, mp->mnt_gjprovider);
2858		g_topology_unlock();
2859		PICKUP_GIANT();
2860
2861		if (sc == NULL) {
2862			GJ_DEBUG(0, "Cannot find journal geom for %s.",
2863			    mp->mnt_gjprovider);
2864			goto next;
2865		} else if (JEMPTY(sc)) {
2866			mtx_lock(&sc->sc_mtx);
2867			sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2868			mtx_unlock(&sc->sc_mtx);
2869			GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
2870			goto next;
2871		}
2872
2873		mountpoint = mp->mnt_stat.f_mntonname;
2874
2875		error = vn_start_write(NULL, &mp, V_WAIT);
2876		if (error != 0) {
2877			GJ_DEBUG(0, "vn_start_write(%s) failed (error=%d).",
2878			    mountpoint, error);
2879			goto next;
2880		}
2881
2882		save = curthread_pflags_set(TDP_SYNCIO);
2883
2884		GJ_TIMER_START(1, &bt);
2885		vfs_msync(mp, MNT_NOWAIT);
2886		GJ_TIMER_STOP(1, &bt, "Msync time of %s", mountpoint);
2887
2888		GJ_TIMER_START(1, &bt);
2889		error = VFS_SYNC(mp, MNT_NOWAIT);
2890		if (error == 0)
2891			GJ_TIMER_STOP(1, &bt, "Sync time of %s", mountpoint);
2892		else {
2893			GJ_DEBUG(0, "Cannot sync file system %s (error=%d).",
2894			    mountpoint, error);
2895		}
2896
2897		curthread_pflags_restore(save);
2898
2899		vn_finished_write(mp);
2900
2901		if (error != 0)
2902			goto next;
2903
2904		/*
2905		 * Send BIO_FLUSH before freezing the file system, so it can be
2906		 * faster after the freeze.
2907		 */
2908		GJ_TIMER_START(1, &bt);
2909		g_journal_flush_cache(sc);
2910		GJ_TIMER_STOP(1, &bt, "BIO_FLUSH time of %s", sc->sc_name);
2911
2912		GJ_TIMER_START(1, &bt);
2913		error = vfs_write_suspend(mp, VS_SKIP_UNMOUNT);
2914		GJ_TIMER_STOP(1, &bt, "Suspend time of %s", mountpoint);
2915		if (error != 0) {
2916			GJ_DEBUG(0, "Cannot suspend file system %s (error=%d).",
2917			    mountpoint, error);
2918			goto next;
2919		}
2920
2921		error = desc->jd_clean(mp);
2922		if (error != 0)
2923			goto next;
2924
2925		mtx_lock(&sc->sc_mtx);
2926		g_journal_switch_wait(sc);
2927		mtx_unlock(&sc->sc_mtx);
2928
2929		vfs_write_resume(mp, 0);
2930next:
2931		mtx_lock(&mountlist_mtx);
2932		vfs_unbusy(mp);
2933	}
2934	mtx_unlock(&mountlist_mtx);
2935
2936	sc = NULL;
2937	for (;;) {
2938		DROP_GIANT();
2939		g_topology_lock();
2940		LIST_FOREACH(gp, &g_journal_class.geom, geom) {
2941			sc = gp->softc;
2942			if (sc == NULL)
2943				continue;
2944			mtx_lock(&sc->sc_mtx);
2945			if ((sc->sc_type & GJ_TYPE_COMPLETE) == GJ_TYPE_COMPLETE &&
2946			    !(sc->sc_flags & GJF_DEVICE_DESTROY) &&
2947			    (sc->sc_flags & GJF_DEVICE_BEFORE_SWITCH)) {
2948				break;
2949			}
2950			mtx_unlock(&sc->sc_mtx);
2951			sc = NULL;
2952		}
2953		g_topology_unlock();
2954		PICKUP_GIANT();
2955		if (sc == NULL)
2956			break;
2957		mtx_assert(&sc->sc_mtx, MA_OWNED);
2958		g_journal_switch_wait(sc);
2959		mtx_unlock(&sc->sc_mtx);
2960	}
2961}
2962
2963static void
2964g_journal_start_switcher(struct g_class *mp)
2965{
2966	int error;
2967
2968	g_topology_assert();
2969	MPASS(g_journal_switcher_proc == NULL);
2970	g_journal_switcher_state = GJ_SWITCHER_WORKING;
2971	error = kproc_create(g_journal_switcher, mp, &g_journal_switcher_proc,
2972	    0, 0, "g_journal switcher");
2973	KASSERT(error == 0, ("Cannot create switcher thread."));
2974}
2975
2976static void
2977g_journal_stop_switcher(void)
2978{
2979	g_topology_assert();
2980	MPASS(g_journal_switcher_proc != NULL);
2981	g_journal_switcher_state = GJ_SWITCHER_DIE;
2982	wakeup(&g_journal_switcher_state);
2983	while (g_journal_switcher_state != GJ_SWITCHER_DIED)
2984		tsleep(&g_journal_switcher_state, PRIBIO, "jfini:wait", hz / 5);
2985	GJ_DEBUG(1, "Switcher died.");
2986	g_journal_switcher_proc = NULL;
2987}
2988
2989/*
2990 * TODO: Kill switcher thread on last geom destruction?
2991 */
2992static void
2993g_journal_switcher(void *arg)
2994{
2995	struct g_class *mp;
2996	struct bintime bt;
2997	int error;
2998
2999	mp = arg;
3000	curthread->td_pflags |= TDP_NORUNNINGBUF;
3001	for (;;) {
3002		g_journal_switcher_wokenup = 0;
3003		error = tsleep(&g_journal_switcher_state, PRIBIO, "jsw:wait",
3004		    g_journal_switch_time * hz);
3005		if (g_journal_switcher_state == GJ_SWITCHER_DIE) {
3006			g_journal_switcher_state = GJ_SWITCHER_DIED;
3007			GJ_DEBUG(1, "Switcher exiting.");
3008			wakeup(&g_journal_switcher_state);
3009			kproc_exit(0);
3010		}
3011		if (error == 0 && g_journal_sync_requested == 0) {
3012			GJ_DEBUG(1, "Out of cache, force switch (used=%jd "
3013			    "limit=%jd).", (intmax_t)g_journal_cache_used,
3014			    (intmax_t)g_journal_cache_limit);
3015		}
3016		GJ_TIMER_START(1, &bt);
3017		g_journal_do_switch(mp);
3018		GJ_TIMER_STOP(1, &bt, "Entire switch time");
3019		if (g_journal_sync_requested > 0) {
3020			g_journal_sync_requested = 0;
3021			wakeup(&g_journal_sync_requested);
3022		}
3023	}
3024}
3025