g_journal.c revision 322566
1/*-
2 * Copyright (c) 2005-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/sys/geom/journal/g_journal.c 322566 2017-08-16 05:51:05Z mckusick $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/module.h>
34#include <sys/limits.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/malloc.h>
40#include <sys/mount.h>
41#include <sys/eventhandler.h>
42#include <sys/proc.h>
43#include <sys/kthread.h>
44#include <sys/sched.h>
45#include <sys/taskqueue.h>
46#include <sys/vnode.h>
47#include <sys/sbuf.h>
48#ifdef GJ_MEMDEBUG
49#include <sys/stack.h>
50#include <sys/kdb.h>
51#endif
52#include <vm/vm.h>
53#include <vm/vm_kern.h>
54#include <geom/geom.h>
55
56#include <geom/journal/g_journal.h>
57
58FEATURE(geom_journal, "GEOM journaling support");
59
60/*
61 * On-disk journal format:
62 *
63 * JH - Journal header
64 * RH - Record header
65 *
66 * %%%%%% ****** +------+ +------+     ****** +------+     %%%%%%
67 * % JH % * RH * | Data | | Data | ... * RH * | Data | ... % JH % ...
68 * %%%%%% ****** +------+ +------+     ****** +------+     %%%%%%
69 *
70 */
71
72CTASSERT(sizeof(struct g_journal_header) <= 512);
73CTASSERT(sizeof(struct g_journal_record_header) <= 512);
74
75static MALLOC_DEFINE(M_JOURNAL, "journal_data", "GEOM_JOURNAL Data");
76static struct mtx g_journal_cache_mtx;
77MTX_SYSINIT(g_journal_cache, &g_journal_cache_mtx, "cache usage", MTX_DEF);
78
79const struct g_journal_desc *g_journal_filesystems[] = {
80	&g_journal_ufs,
81	NULL
82};
83
84SYSCTL_DECL(_kern_geom);
85
86int g_journal_debug = 0;
87TUNABLE_INT("kern.geom.journal.debug", &g_journal_debug);
88static u_int g_journal_switch_time = 10;
89static u_int g_journal_force_switch = 70;
90static u_int g_journal_parallel_flushes = 16;
91static u_int g_journal_parallel_copies = 16;
92static u_int g_journal_accept_immediately = 64;
93static u_int g_journal_record_entries = GJ_RECORD_HEADER_NENTRIES;
94static u_int g_journal_do_optimize = 1;
95
96static SYSCTL_NODE(_kern_geom, OID_AUTO, journal, CTLFLAG_RW, 0,
97    "GEOM_JOURNAL stuff");
98SYSCTL_INT(_kern_geom_journal, OID_AUTO, debug, CTLFLAG_RW, &g_journal_debug, 0,
99    "Debug level");
100SYSCTL_UINT(_kern_geom_journal, OID_AUTO, switch_time, CTLFLAG_RW,
101    &g_journal_switch_time, 0, "Switch journals every N seconds");
102SYSCTL_UINT(_kern_geom_journal, OID_AUTO, force_switch, CTLFLAG_RW,
103    &g_journal_force_switch, 0, "Force switch when journal is N% full");
104SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_flushes, CTLFLAG_RW,
105    &g_journal_parallel_flushes, 0,
106    "Number of flush I/O requests to send in parallel");
107SYSCTL_UINT(_kern_geom_journal, OID_AUTO, accept_immediately, CTLFLAG_RW,
108    &g_journal_accept_immediately, 0,
109    "Number of I/O requests accepted immediately");
110SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_copies, CTLFLAG_RW,
111    &g_journal_parallel_copies, 0,
112    "Number of copy I/O requests to send in parallel");
113static int
114g_journal_record_entries_sysctl(SYSCTL_HANDLER_ARGS)
115{
116	u_int entries;
117	int error;
118
119	entries = g_journal_record_entries;
120	error = sysctl_handle_int(oidp, &entries, 0, req);
121	if (error != 0 || req->newptr == NULL)
122		return (error);
123	if (entries < 1 || entries > GJ_RECORD_HEADER_NENTRIES)
124		return (EINVAL);
125	g_journal_record_entries = entries;
126	return (0);
127}
128SYSCTL_PROC(_kern_geom_journal, OID_AUTO, record_entries,
129    CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_record_entries_sysctl, "I",
130    "Maximum number of entires in one journal record");
131SYSCTL_UINT(_kern_geom_journal, OID_AUTO, optimize, CTLFLAG_RW,
132    &g_journal_do_optimize, 0, "Try to combine bios on flush and copy");
133
134static u_long g_journal_cache_used = 0;
135static u_long g_journal_cache_limit = 64 * 1024 * 1024;
136TUNABLE_LONG("kern.geom.journal.cache.limit", &g_journal_cache_limit);
137static u_int g_journal_cache_divisor = 2;
138TUNABLE_INT("kern.geom.journal.cache.divisor", &g_journal_cache_divisor);
139static u_int g_journal_cache_switch = 90;
140static u_int g_journal_cache_misses = 0;
141static u_int g_journal_cache_alloc_failures = 0;
142static u_long g_journal_cache_low = 0;
143
144static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, cache, CTLFLAG_RW, 0,
145    "GEOM_JOURNAL cache");
146SYSCTL_ULONG(_kern_geom_journal_cache, OID_AUTO, used, CTLFLAG_RD,
147    &g_journal_cache_used, 0, "Number of allocated bytes");
148static int
149g_journal_cache_limit_sysctl(SYSCTL_HANDLER_ARGS)
150{
151	u_long limit;
152	int error;
153
154	limit = g_journal_cache_limit;
155	error = sysctl_handle_long(oidp, &limit, 0, req);
156	if (error != 0 || req->newptr == NULL)
157		return (error);
158	g_journal_cache_limit = limit;
159	g_journal_cache_low = (limit / 100) * g_journal_cache_switch;
160	return (0);
161}
162SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, limit,
163    CTLTYPE_ULONG | CTLFLAG_RW, NULL, 0, g_journal_cache_limit_sysctl, "I",
164    "Maximum number of allocated bytes");
165SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, divisor, CTLFLAG_RDTUN,
166    &g_journal_cache_divisor, 0,
167    "(kmem_size / kern.geom.journal.cache.divisor) == cache size");
168static int
169g_journal_cache_switch_sysctl(SYSCTL_HANDLER_ARGS)
170{
171	u_int cswitch;
172	int error;
173
174	cswitch = g_journal_cache_switch;
175	error = sysctl_handle_int(oidp, &cswitch, 0, req);
176	if (error != 0 || req->newptr == NULL)
177		return (error);
178	if (cswitch < 0 || cswitch > 100)
179		return (EINVAL);
180	g_journal_cache_switch = cswitch;
181	g_journal_cache_low = (g_journal_cache_limit / 100) * cswitch;
182	return (0);
183}
184SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, switch,
185    CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_cache_switch_sysctl, "I",
186    "Force switch when we hit this percent of cache use");
187SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, misses, CTLFLAG_RW,
188    &g_journal_cache_misses, 0, "Number of cache misses");
189SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, alloc_failures, CTLFLAG_RW,
190    &g_journal_cache_alloc_failures, 0, "Memory allocation failures");
191
192static u_long g_journal_stats_bytes_skipped = 0;
193static u_long g_journal_stats_combined_ios = 0;
194static u_long g_journal_stats_switches = 0;
195static u_long g_journal_stats_wait_for_copy = 0;
196static u_long g_journal_stats_journal_full = 0;
197static u_long g_journal_stats_low_mem = 0;
198
199static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, stats, CTLFLAG_RW, 0,
200    "GEOM_JOURNAL statistics");
201SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, skipped_bytes, CTLFLAG_RW,
202    &g_journal_stats_bytes_skipped, 0, "Number of skipped bytes");
203SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, combined_ios, CTLFLAG_RW,
204    &g_journal_stats_combined_ios, 0, "Number of combined I/O requests");
205SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, switches, CTLFLAG_RW,
206    &g_journal_stats_switches, 0, "Number of journal switches");
207SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, wait_for_copy, CTLFLAG_RW,
208    &g_journal_stats_wait_for_copy, 0, "Wait for journal copy on switch");
209SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, journal_full, CTLFLAG_RW,
210    &g_journal_stats_journal_full, 0,
211    "Number of times journal was almost full.");
212SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, low_mem, CTLFLAG_RW,
213    &g_journal_stats_low_mem, 0, "Number of times low_mem hook was called.");
214
215static g_taste_t g_journal_taste;
216static g_ctl_req_t g_journal_config;
217static g_dumpconf_t g_journal_dumpconf;
218static g_init_t g_journal_init;
219static g_fini_t g_journal_fini;
220
221struct g_class g_journal_class = {
222	.name = G_JOURNAL_CLASS_NAME,
223	.version = G_VERSION,
224	.taste = g_journal_taste,
225	.ctlreq = g_journal_config,
226	.dumpconf = g_journal_dumpconf,
227	.init = g_journal_init,
228	.fini = g_journal_fini
229};
230
231static int g_journal_destroy(struct g_journal_softc *sc);
232static void g_journal_metadata_update(struct g_journal_softc *sc);
233static void g_journal_start_switcher(struct g_class *mp);
234static void g_journal_stop_switcher(void);
235static void g_journal_switch_wait(struct g_journal_softc *sc);
236
237#define	GJ_SWITCHER_WORKING	0
238#define	GJ_SWITCHER_DIE		1
239#define	GJ_SWITCHER_DIED	2
240static struct proc *g_journal_switcher_proc = NULL;
241static int g_journal_switcher_state = GJ_SWITCHER_WORKING;
242static int g_journal_switcher_wokenup = 0;
243static int g_journal_sync_requested = 0;
244
245#ifdef GJ_MEMDEBUG
246struct meminfo {
247	size_t		mi_size;
248	struct stack	mi_stack;
249};
250#endif
251
252/*
253 * We use our own malloc/realloc/free funtions, so we can collect statistics
254 * and force journal switch when we're running out of cache.
255 */
256static void *
257gj_malloc(size_t size, int flags)
258{
259	void *p;
260#ifdef GJ_MEMDEBUG
261	struct meminfo *mi;
262#endif
263
264	mtx_lock(&g_journal_cache_mtx);
265	if (g_journal_cache_limit > 0 && !g_journal_switcher_wokenup &&
266	    g_journal_cache_used + size > g_journal_cache_low) {
267		GJ_DEBUG(1, "No cache, waking up the switcher.");
268		g_journal_switcher_wokenup = 1;
269		wakeup(&g_journal_switcher_state);
270	}
271	if ((flags & M_NOWAIT) && g_journal_cache_limit > 0 &&
272	    g_journal_cache_used + size > g_journal_cache_limit) {
273		mtx_unlock(&g_journal_cache_mtx);
274		g_journal_cache_alloc_failures++;
275		return (NULL);
276	}
277	g_journal_cache_used += size;
278	mtx_unlock(&g_journal_cache_mtx);
279	flags &= ~M_NOWAIT;
280#ifndef GJ_MEMDEBUG
281	p = malloc(size, M_JOURNAL, flags | M_WAITOK);
282#else
283	mi = malloc(sizeof(*mi) + size, M_JOURNAL, flags | M_WAITOK);
284	p = (u_char *)mi + sizeof(*mi);
285	mi->mi_size = size;
286	stack_save(&mi->mi_stack);
287#endif
288	return (p);
289}
290
291static void
292gj_free(void *p, size_t size)
293{
294#ifdef GJ_MEMDEBUG
295	struct meminfo *mi;
296#endif
297
298	KASSERT(p != NULL, ("p=NULL"));
299	KASSERT(size > 0, ("size=0"));
300	mtx_lock(&g_journal_cache_mtx);
301	KASSERT(g_journal_cache_used >= size, ("Freeing too much?"));
302	g_journal_cache_used -= size;
303	mtx_unlock(&g_journal_cache_mtx);
304#ifdef GJ_MEMDEBUG
305	mi = p = (void *)((u_char *)p - sizeof(*mi));
306	if (mi->mi_size != size) {
307		printf("GJOURNAL: Size mismatch! %zu != %zu\n", size,
308		    mi->mi_size);
309		printf("GJOURNAL: Alloc backtrace:\n");
310		stack_print(&mi->mi_stack);
311		printf("GJOURNAL: Free backtrace:\n");
312		kdb_backtrace();
313	}
314#endif
315	free(p, M_JOURNAL);
316}
317
318static void *
319gj_realloc(void *p, size_t size, size_t oldsize)
320{
321	void *np;
322
323#ifndef GJ_MEMDEBUG
324	mtx_lock(&g_journal_cache_mtx);
325	g_journal_cache_used -= oldsize;
326	g_journal_cache_used += size;
327	mtx_unlock(&g_journal_cache_mtx);
328	np = realloc(p, size, M_JOURNAL, M_WAITOK);
329#else
330	np = gj_malloc(size, M_WAITOK);
331	bcopy(p, np, MIN(oldsize, size));
332	gj_free(p, oldsize);
333#endif
334	return (np);
335}
336
337static void
338g_journal_check_overflow(struct g_journal_softc *sc)
339{
340	off_t length, used;
341
342	if ((sc->sc_active.jj_offset < sc->sc_inactive.jj_offset &&
343	     sc->sc_journal_offset >= sc->sc_inactive.jj_offset) ||
344	    (sc->sc_active.jj_offset > sc->sc_inactive.jj_offset &&
345	     sc->sc_journal_offset >= sc->sc_inactive.jj_offset &&
346	     sc->sc_journal_offset < sc->sc_active.jj_offset)) {
347		panic("Journal overflow "
348		    "(id = %u joffset=%jd active=%jd inactive=%jd)",
349		    (unsigned)sc->sc_id,
350		    (intmax_t)sc->sc_journal_offset,
351		    (intmax_t)sc->sc_active.jj_offset,
352		    (intmax_t)sc->sc_inactive.jj_offset);
353	}
354	if (sc->sc_active.jj_offset < sc->sc_inactive.jj_offset) {
355		length = sc->sc_inactive.jj_offset - sc->sc_active.jj_offset;
356		used = sc->sc_journal_offset - sc->sc_active.jj_offset;
357	} else {
358		length = sc->sc_jend - sc->sc_active.jj_offset;
359		length += sc->sc_inactive.jj_offset - sc->sc_jstart;
360		if (sc->sc_journal_offset >= sc->sc_active.jj_offset)
361			used = sc->sc_journal_offset - sc->sc_active.jj_offset;
362		else {
363			used = sc->sc_jend - sc->sc_active.jj_offset;
364			used += sc->sc_journal_offset - sc->sc_jstart;
365		}
366	}
367	/* Already woken up? */
368	if (g_journal_switcher_wokenup)
369		return;
370	/*
371	 * If the active journal takes more than g_journal_force_switch precent
372	 * of free journal space, we force journal switch.
373	 */
374	KASSERT(length > 0,
375	    ("length=%jd used=%jd active=%jd inactive=%jd joffset=%jd",
376	    (intmax_t)length, (intmax_t)used,
377	    (intmax_t)sc->sc_active.jj_offset,
378	    (intmax_t)sc->sc_inactive.jj_offset,
379	    (intmax_t)sc->sc_journal_offset));
380	if ((used * 100) / length > g_journal_force_switch) {
381		g_journal_stats_journal_full++;
382		GJ_DEBUG(1, "Journal %s %jd%% full, forcing journal switch.",
383		    sc->sc_name, (used * 100) / length);
384		mtx_lock(&g_journal_cache_mtx);
385		g_journal_switcher_wokenup = 1;
386		wakeup(&g_journal_switcher_state);
387		mtx_unlock(&g_journal_cache_mtx);
388	}
389}
390
391static void
392g_journal_orphan(struct g_consumer *cp)
393{
394	struct g_journal_softc *sc;
395	char name[256];
396	int error;
397
398	g_topology_assert();
399	sc = cp->geom->softc;
400	strlcpy(name, cp->provider->name, sizeof(name));
401	GJ_DEBUG(0, "Lost provider %s.", name);
402	if (sc == NULL)
403		return;
404	error = g_journal_destroy(sc);
405	if (error == 0)
406		GJ_DEBUG(0, "Journal %s destroyed.", name);
407	else {
408		GJ_DEBUG(0, "Cannot destroy journal %s (error=%d). "
409		    "Destroy it manually after last close.", sc->sc_name,
410		    error);
411	}
412}
413
414static int
415g_journal_access(struct g_provider *pp, int acr, int acw, int ace)
416{
417	struct g_journal_softc *sc;
418	int dcr, dcw, dce;
419
420	g_topology_assert();
421	GJ_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name,
422	    acr, acw, ace);
423
424	dcr = pp->acr + acr;
425	dcw = pp->acw + acw;
426	dce = pp->ace + ace;
427
428	sc = pp->geom->softc;
429	if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY)) {
430		if (acr <= 0 && acw <= 0 && ace <= 0)
431			return (0);
432		else
433			return (ENXIO);
434	}
435	if (pp->acw == 0 && dcw > 0) {
436		GJ_DEBUG(1, "Marking %s as dirty.", sc->sc_name);
437		sc->sc_flags &= ~GJF_DEVICE_CLEAN;
438		g_topology_unlock();
439		g_journal_metadata_update(sc);
440		g_topology_lock();
441	} /* else if (pp->acw == 0 && dcw > 0 && JEMPTY(sc)) {
442		GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
443		sc->sc_flags |= GJF_DEVICE_CLEAN;
444		g_topology_unlock();
445		g_journal_metadata_update(sc);
446		g_topology_lock();
447	} */
448	return (0);
449}
450
451static void
452g_journal_header_encode(struct g_journal_header *hdr, u_char *data)
453{
454
455	bcopy(GJ_HEADER_MAGIC, data, sizeof(GJ_HEADER_MAGIC));
456	data += sizeof(GJ_HEADER_MAGIC);
457	le32enc(data, hdr->jh_journal_id);
458	data += 4;
459	le32enc(data, hdr->jh_journal_next_id);
460}
461
462static int
463g_journal_header_decode(const u_char *data, struct g_journal_header *hdr)
464{
465
466	bcopy(data, hdr->jh_magic, sizeof(hdr->jh_magic));
467	data += sizeof(hdr->jh_magic);
468	if (bcmp(hdr->jh_magic, GJ_HEADER_MAGIC, sizeof(GJ_HEADER_MAGIC)) != 0)
469		return (EINVAL);
470	hdr->jh_journal_id = le32dec(data);
471	data += 4;
472	hdr->jh_journal_next_id = le32dec(data);
473	return (0);
474}
475
476static void
477g_journal_flush_cache(struct g_journal_softc *sc)
478{
479	struct bintime bt;
480	int error;
481
482	if (sc->sc_bio_flush == 0)
483		return;
484	GJ_TIMER_START(1, &bt);
485	if (sc->sc_bio_flush & GJ_FLUSH_JOURNAL) {
486		error = g_io_flush(sc->sc_jconsumer);
487		GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
488		    sc->sc_jconsumer->provider->name, error);
489	}
490	if (sc->sc_bio_flush & GJ_FLUSH_DATA) {
491		/*
492		 * TODO: This could be called in parallel with the
493		 *       previous call.
494		 */
495		error = g_io_flush(sc->sc_dconsumer);
496		GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
497		    sc->sc_dconsumer->provider->name, error);
498	}
499	GJ_TIMER_STOP(1, &bt, "Cache flush time");
500}
501
502static int
503g_journal_write_header(struct g_journal_softc *sc)
504{
505	struct g_journal_header hdr;
506	struct g_consumer *cp;
507	u_char *buf;
508	int error;
509
510	cp = sc->sc_jconsumer;
511	buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
512
513	strlcpy(hdr.jh_magic, GJ_HEADER_MAGIC, sizeof(hdr.jh_magic));
514	hdr.jh_journal_id = sc->sc_journal_id;
515	hdr.jh_journal_next_id = sc->sc_journal_next_id;
516	g_journal_header_encode(&hdr, buf);
517	error = g_write_data(cp, sc->sc_journal_offset, buf,
518	    cp->provider->sectorsize);
519	/* if (error == 0) */
520	sc->sc_journal_offset += cp->provider->sectorsize;
521
522	gj_free(buf, cp->provider->sectorsize);
523	return (error);
524}
525
526/*
527 * Every journal record has a header and data following it.
528 * Functions below are used to decode the header before storing it to
529 * little endian and to encode it after reading to system endianess.
530 */
531static void
532g_journal_record_header_encode(struct g_journal_record_header *hdr,
533    u_char *data)
534{
535	struct g_journal_entry *ent;
536	u_int i;
537
538	bcopy(GJ_RECORD_HEADER_MAGIC, data, sizeof(GJ_RECORD_HEADER_MAGIC));
539	data += sizeof(GJ_RECORD_HEADER_MAGIC);
540	le32enc(data, hdr->jrh_journal_id);
541	data += 8;
542	le16enc(data, hdr->jrh_nentries);
543	data += 2;
544	bcopy(hdr->jrh_sum, data, sizeof(hdr->jrh_sum));
545	data += 8;
546	for (i = 0; i < hdr->jrh_nentries; i++) {
547		ent = &hdr->jrh_entries[i];
548		le64enc(data, ent->je_joffset);
549		data += 8;
550		le64enc(data, ent->je_offset);
551		data += 8;
552		le64enc(data, ent->je_length);
553		data += 8;
554	}
555}
556
557static int
558g_journal_record_header_decode(const u_char *data,
559    struct g_journal_record_header *hdr)
560{
561	struct g_journal_entry *ent;
562	u_int i;
563
564	bcopy(data, hdr->jrh_magic, sizeof(hdr->jrh_magic));
565	data += sizeof(hdr->jrh_magic);
566	if (strcmp(hdr->jrh_magic, GJ_RECORD_HEADER_MAGIC) != 0)
567		return (EINVAL);
568	hdr->jrh_journal_id = le32dec(data);
569	data += 8;
570	hdr->jrh_nentries = le16dec(data);
571	data += 2;
572	if (hdr->jrh_nentries > GJ_RECORD_HEADER_NENTRIES)
573		return (EINVAL);
574	bcopy(data, hdr->jrh_sum, sizeof(hdr->jrh_sum));
575	data += 8;
576	for (i = 0; i < hdr->jrh_nentries; i++) {
577		ent = &hdr->jrh_entries[i];
578		ent->je_joffset = le64dec(data);
579		data += 8;
580		ent->je_offset = le64dec(data);
581		data += 8;
582		ent->je_length = le64dec(data);
583		data += 8;
584	}
585	return (0);
586}
587
588/*
589 * Function reads metadata from a provider (via the given consumer), decodes
590 * it to system endianess and verifies its correctness.
591 */
592static int
593g_journal_metadata_read(struct g_consumer *cp, struct g_journal_metadata *md)
594{
595	struct g_provider *pp;
596	u_char *buf;
597	int error;
598
599	g_topology_assert();
600
601	error = g_access(cp, 1, 0, 0);
602	if (error != 0)
603		return (error);
604	pp = cp->provider;
605	g_topology_unlock();
606	/* Metadata is stored in last sector. */
607	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
608	    &error);
609	g_topology_lock();
610	g_access(cp, -1, 0, 0);
611	if (buf == NULL) {
612		GJ_DEBUG(1, "Cannot read metadata from %s (error=%d).",
613		    cp->provider->name, error);
614		return (error);
615	}
616
617	/* Decode metadata. */
618	error = journal_metadata_decode(buf, md);
619	g_free(buf);
620	/* Is this is gjournal provider at all? */
621	if (strcmp(md->md_magic, G_JOURNAL_MAGIC) != 0)
622		return (EINVAL);
623	/*
624	 * Are we able to handle this version of metadata?
625	 * We only maintain backward compatibility.
626	 */
627	if (md->md_version > G_JOURNAL_VERSION) {
628		GJ_DEBUG(0,
629		    "Kernel module is too old to handle metadata from %s.",
630		    cp->provider->name);
631		return (EINVAL);
632	}
633	/* Is checksum correct? */
634	if (error != 0) {
635		GJ_DEBUG(0, "MD5 metadata hash mismatch for provider %s.",
636		    cp->provider->name);
637		return (error);
638	}
639	return (0);
640}
641
642/*
643 * Two functions below are responsible for updating metadata.
644 * Only metadata on the data provider is updated (we need to update
645 * information about active journal in there).
646 */
647static void
648g_journal_metadata_done(struct bio *bp)
649{
650
651	/*
652	 * There is not much we can do on error except informing about it.
653	 */
654	if (bp->bio_error != 0) {
655		GJ_LOGREQ(0, bp, "Cannot update metadata (error=%d).",
656		    bp->bio_error);
657	} else {
658		GJ_LOGREQ(2, bp, "Metadata updated.");
659	}
660	gj_free(bp->bio_data, bp->bio_length);
661	g_destroy_bio(bp);
662}
663
664static void
665g_journal_metadata_update(struct g_journal_softc *sc)
666{
667	struct g_journal_metadata md;
668	struct g_consumer *cp;
669	struct bio *bp;
670	u_char *sector;
671
672	cp = sc->sc_dconsumer;
673	sector = gj_malloc(cp->provider->sectorsize, M_WAITOK);
674	strlcpy(md.md_magic, G_JOURNAL_MAGIC, sizeof(md.md_magic));
675	md.md_version = G_JOURNAL_VERSION;
676	md.md_id = sc->sc_id;
677	md.md_type = sc->sc_orig_type;
678	md.md_jstart = sc->sc_jstart;
679	md.md_jend = sc->sc_jend;
680	md.md_joffset = sc->sc_inactive.jj_offset;
681	md.md_jid = sc->sc_journal_previous_id;
682	md.md_flags = 0;
683	if (sc->sc_flags & GJF_DEVICE_CLEAN)
684		md.md_flags |= GJ_FLAG_CLEAN;
685
686	if (sc->sc_flags & GJF_DEVICE_HARDCODED)
687		strlcpy(md.md_provider, sc->sc_name, sizeof(md.md_provider));
688	else
689		bzero(md.md_provider, sizeof(md.md_provider));
690	md.md_provsize = cp->provider->mediasize;
691	journal_metadata_encode(&md, sector);
692
693	/*
694	 * Flush the cache, so we know all data are on disk.
695	 * We write here informations like "journal is consistent", so we need
696	 * to be sure it is. Without BIO_FLUSH here, we can end up in situation
697	 * where metadata is stored on disk, but not all data.
698	 */
699	g_journal_flush_cache(sc);
700
701	bp = g_alloc_bio();
702	bp->bio_offset = cp->provider->mediasize - cp->provider->sectorsize;
703	bp->bio_length = cp->provider->sectorsize;
704	bp->bio_data = sector;
705	bp->bio_cmd = BIO_WRITE;
706	if (!(sc->sc_flags & GJF_DEVICE_DESTROY)) {
707		bp->bio_done = g_journal_metadata_done;
708		g_io_request(bp, cp);
709	} else {
710		bp->bio_done = NULL;
711		g_io_request(bp, cp);
712		biowait(bp, "gjmdu");
713		g_journal_metadata_done(bp);
714	}
715
716	/*
717	 * Be sure metadata reached the disk.
718	 */
719	g_journal_flush_cache(sc);
720}
721
722/*
723 * This is where the I/O request comes from the GEOM.
724 */
725static void
726g_journal_start(struct bio *bp)
727{
728	struct g_journal_softc *sc;
729
730	sc = bp->bio_to->geom->softc;
731	GJ_LOGREQ(3, bp, "Request received.");
732
733	switch (bp->bio_cmd) {
734	case BIO_READ:
735	case BIO_WRITE:
736		mtx_lock(&sc->sc_mtx);
737		bioq_insert_tail(&sc->sc_regular_queue, bp);
738		wakeup(sc);
739		mtx_unlock(&sc->sc_mtx);
740		return;
741	case BIO_GETATTR:
742		if (strcmp(bp->bio_attribute, "GJOURNAL::provider") == 0) {
743			strlcpy(bp->bio_data, bp->bio_to->name, bp->bio_length);
744			bp->bio_completed = strlen(bp->bio_to->name) + 1;
745			g_io_deliver(bp, 0);
746			return;
747		}
748		/* FALLTHROUGH */
749	case BIO_DELETE:
750	default:
751		g_io_deliver(bp, EOPNOTSUPP);
752		return;
753	}
754}
755
756static void
757g_journal_std_done(struct bio *bp)
758{
759	struct g_journal_softc *sc;
760
761	sc = bp->bio_from->geom->softc;
762	mtx_lock(&sc->sc_mtx);
763	bioq_insert_tail(&sc->sc_back_queue, bp);
764	wakeup(sc);
765	mtx_unlock(&sc->sc_mtx);
766}
767
768static struct bio *
769g_journal_new_bio(off_t start, off_t end, off_t joffset, u_char *data,
770    int flags)
771{
772	struct bio *bp;
773
774	bp = g_alloc_bio();
775	bp->bio_offset = start;
776	bp->bio_joffset = joffset;
777	bp->bio_length = end - start;
778	bp->bio_cmd = BIO_WRITE;
779	bp->bio_done = g_journal_std_done;
780	if (data == NULL)
781		bp->bio_data = NULL;
782	else {
783		bp->bio_data = gj_malloc(bp->bio_length, flags);
784		if (bp->bio_data != NULL)
785			bcopy(data, bp->bio_data, bp->bio_length);
786	}
787	return (bp);
788}
789
790#define	g_journal_insert_bio(head, bp, flags)				\
791	g_journal_insert((head), (bp)->bio_offset,			\
792		(bp)->bio_offset + (bp)->bio_length, (bp)->bio_joffset,	\
793		(bp)->bio_data, flags)
794/*
795 * The function below does a lot more than just inserting bio to the queue.
796 * It keeps the queue sorted by offset and ensures that there are no doubled
797 * data (it combines bios where ranges overlap).
798 *
799 * The function returns the number of bios inserted (as bio can be splitted).
800 */
801static int
802g_journal_insert(struct bio **head, off_t nstart, off_t nend, off_t joffset,
803    u_char *data, int flags)
804{
805	struct bio *nbp, *cbp, *pbp;
806	off_t cstart, cend;
807	u_char *tmpdata;
808	int n;
809
810	GJ_DEBUG(3, "INSERT(%p): (%jd, %jd, %jd)", *head, nstart, nend,
811	    joffset);
812	n = 0;
813	pbp = NULL;
814	GJQ_FOREACH(*head, cbp) {
815		cstart = cbp->bio_offset;
816		cend = cbp->bio_offset + cbp->bio_length;
817
818		if (nstart >= cend) {
819			/*
820			 *  +-------------+
821			 *  |             |
822			 *  |   current   |  +-------------+
823			 *  |     bio     |  |             |
824			 *  |             |  |     new     |
825			 *  +-------------+  |     bio     |
826			 *                   |             |
827			 *                   +-------------+
828			 */
829			GJ_DEBUG(3, "INSERT(%p): 1", *head);
830		} else if (nend <= cstart) {
831			/*
832			 *                   +-------------+
833			 *                   |             |
834			 *  +-------------+  |   current   |
835			 *  |             |  |     bio     |
836			 *  |     new     |  |             |
837			 *  |     bio     |  +-------------+
838			 *  |             |
839			 *  +-------------+
840			 */
841			nbp = g_journal_new_bio(nstart, nend, joffset, data,
842			    flags);
843			if (pbp == NULL)
844				*head = nbp;
845			else
846				pbp->bio_next = nbp;
847			nbp->bio_next = cbp;
848			n++;
849			GJ_DEBUG(3, "INSERT(%p): 2 (nbp=%p pbp=%p)", *head, nbp,
850			    pbp);
851			goto end;
852		} else if (nstart <= cstart && nend >= cend) {
853			/*
854			 *      +-------------+      +-------------+
855			 *      | current bio |      | current bio |
856			 *  +---+-------------+---+  +-------------+---+
857			 *  |   |             |   |  |             |   |
858			 *  |   |             |   |  |             |   |
859			 *  |   +-------------+   |  +-------------+   |
860			 *  |       new bio       |  |     new bio     |
861			 *  +---------------------+  +-----------------+
862			 *
863			 *      +-------------+  +-------------+
864			 *      | current bio |  | current bio |
865			 *  +---+-------------+  +-------------+
866			 *  |   |             |  |             |
867			 *  |   |             |  |             |
868			 *  |   +-------------+  +-------------+
869			 *  |     new bio     |  |   new bio   |
870			 *  +-----------------+  +-------------+
871			 */
872			g_journal_stats_bytes_skipped += cbp->bio_length;
873			cbp->bio_offset = nstart;
874			cbp->bio_joffset = joffset;
875			cbp->bio_length = cend - nstart;
876			if (cbp->bio_data != NULL) {
877				gj_free(cbp->bio_data, cend - cstart);
878				cbp->bio_data = NULL;
879			}
880			if (data != NULL) {
881				cbp->bio_data = gj_malloc(cbp->bio_length,
882				    flags);
883				if (cbp->bio_data != NULL) {
884					bcopy(data, cbp->bio_data,
885					    cbp->bio_length);
886				}
887				data += cend - nstart;
888			}
889			joffset += cend - nstart;
890			nstart = cend;
891			GJ_DEBUG(3, "INSERT(%p): 3 (cbp=%p)", *head, cbp);
892		} else if (nstart > cstart && nend >= cend) {
893			/*
894			 *  +-----------------+  +-------------+
895			 *  |   current bio   |  | current bio |
896			 *  |   +-------------+  |   +---------+---+
897			 *  |   |             |  |   |         |   |
898			 *  |   |             |  |   |         |   |
899			 *  +---+-------------+  +---+---------+   |
900			 *      |   new bio   |      |   new bio   |
901			 *      +-------------+      +-------------+
902			 */
903			g_journal_stats_bytes_skipped += cend - nstart;
904			nbp = g_journal_new_bio(nstart, cend, joffset, data,
905			    flags);
906			nbp->bio_next = cbp->bio_next;
907			cbp->bio_next = nbp;
908			cbp->bio_length = nstart - cstart;
909			if (cbp->bio_data != NULL) {
910				cbp->bio_data = gj_realloc(cbp->bio_data,
911				    cbp->bio_length, cend - cstart);
912			}
913			if (data != NULL)
914				data += cend - nstart;
915			joffset += cend - nstart;
916			nstart = cend;
917			n++;
918			GJ_DEBUG(3, "INSERT(%p): 4 (cbp=%p)", *head, cbp);
919		} else if (nstart > cstart && nend < cend) {
920			/*
921			 *  +---------------------+
922			 *  |     current bio     |
923			 *  |   +-------------+   |
924			 *  |   |             |   |
925			 *  |   |             |   |
926			 *  +---+-------------+---+
927			 *      |   new bio   |
928			 *      +-------------+
929			 */
930			g_journal_stats_bytes_skipped += nend - nstart;
931			nbp = g_journal_new_bio(nstart, nend, joffset, data,
932			    flags);
933			nbp->bio_next = cbp->bio_next;
934			cbp->bio_next = nbp;
935			if (cbp->bio_data == NULL)
936				tmpdata = NULL;
937			else
938				tmpdata = cbp->bio_data + nend - cstart;
939			nbp = g_journal_new_bio(nend, cend,
940			    cbp->bio_joffset + nend - cstart, tmpdata, flags);
941			nbp->bio_next = ((struct bio *)cbp->bio_next)->bio_next;
942			((struct bio *)cbp->bio_next)->bio_next = nbp;
943			cbp->bio_length = nstart - cstart;
944			if (cbp->bio_data != NULL) {
945				cbp->bio_data = gj_realloc(cbp->bio_data,
946				    cbp->bio_length, cend - cstart);
947			}
948			n += 2;
949			GJ_DEBUG(3, "INSERT(%p): 5 (cbp=%p)", *head, cbp);
950			goto end;
951		} else if (nstart <= cstart && nend < cend) {
952			/*
953			 *  +-----------------+      +-------------+
954			 *  |   current bio   |      | current bio |
955			 *  +-------------+   |  +---+---------+   |
956			 *  |             |   |  |   |         |   |
957			 *  |             |   |  |   |         |   |
958			 *  +-------------+---+  |   +---------+---+
959			 *  |   new bio   |      |   new bio   |
960			 *  +-------------+      +-------------+
961			 */
962			g_journal_stats_bytes_skipped += nend - nstart;
963			nbp = g_journal_new_bio(nstart, nend, joffset, data,
964			    flags);
965			if (pbp == NULL)
966				*head = nbp;
967			else
968				pbp->bio_next = nbp;
969			nbp->bio_next = cbp;
970			cbp->bio_offset = nend;
971			cbp->bio_length = cend - nend;
972			cbp->bio_joffset += nend - cstart;
973			tmpdata = cbp->bio_data;
974			if (tmpdata != NULL) {
975				cbp->bio_data = gj_malloc(cbp->bio_length,
976				    flags);
977				if (cbp->bio_data != NULL) {
978					bcopy(tmpdata + nend - cstart,
979					    cbp->bio_data, cbp->bio_length);
980				}
981				gj_free(tmpdata, cend - cstart);
982			}
983			n++;
984			GJ_DEBUG(3, "INSERT(%p): 6 (cbp=%p)", *head, cbp);
985			goto end;
986		}
987		if (nstart == nend)
988			goto end;
989		pbp = cbp;
990	}
991	nbp = g_journal_new_bio(nstart, nend, joffset, data, flags);
992	if (pbp == NULL)
993		*head = nbp;
994	else
995		pbp->bio_next = nbp;
996	nbp->bio_next = NULL;
997	n++;
998	GJ_DEBUG(3, "INSERT(%p): 8 (nbp=%p pbp=%p)", *head, nbp, pbp);
999end:
1000	if (g_journal_debug >= 3) {
1001		GJQ_FOREACH(*head, cbp) {
1002			GJ_DEBUG(3, "ELEMENT: %p (%jd, %jd, %jd, %p)", cbp,
1003			    (intmax_t)cbp->bio_offset,
1004			    (intmax_t)cbp->bio_length,
1005			    (intmax_t)cbp->bio_joffset, cbp->bio_data);
1006		}
1007		GJ_DEBUG(3, "INSERT(%p): DONE %d", *head, n);
1008	}
1009	return (n);
1010}
1011
1012/*
1013 * The function combines neighbour bios trying to squeeze as much data as
1014 * possible into one bio.
1015 *
1016 * The function returns the number of bios combined (negative value).
1017 */
1018static int
1019g_journal_optimize(struct bio *head)
1020{
1021	struct bio *cbp, *pbp;
1022	int n;
1023
1024	n = 0;
1025	pbp = NULL;
1026	GJQ_FOREACH(head, cbp) {
1027		/* Skip bios which has to be read first. */
1028		if (cbp->bio_data == NULL) {
1029			pbp = NULL;
1030			continue;
1031		}
1032		/* There is no previous bio yet. */
1033		if (pbp == NULL) {
1034			pbp = cbp;
1035			continue;
1036		}
1037		/* Is this a neighbour bio? */
1038		if (pbp->bio_offset + pbp->bio_length != cbp->bio_offset) {
1039			/* Be sure that bios queue is sorted. */
1040			KASSERT(pbp->bio_offset + pbp->bio_length < cbp->bio_offset,
1041			    ("poffset=%jd plength=%jd coffset=%jd",
1042			    (intmax_t)pbp->bio_offset,
1043			    (intmax_t)pbp->bio_length,
1044			    (intmax_t)cbp->bio_offset));
1045			pbp = cbp;
1046			continue;
1047		}
1048		/* Be sure we don't end up with too big bio. */
1049		if (pbp->bio_length + cbp->bio_length > MAXPHYS) {
1050			pbp = cbp;
1051			continue;
1052		}
1053		/* Ok, we can join bios. */
1054		GJ_LOGREQ(4, pbp, "Join: ");
1055		GJ_LOGREQ(4, cbp, "and: ");
1056		pbp->bio_data = gj_realloc(pbp->bio_data,
1057		    pbp->bio_length + cbp->bio_length, pbp->bio_length);
1058		bcopy(cbp->bio_data, pbp->bio_data + pbp->bio_length,
1059		    cbp->bio_length);
1060		gj_free(cbp->bio_data, cbp->bio_length);
1061		pbp->bio_length += cbp->bio_length;
1062		pbp->bio_next = cbp->bio_next;
1063		g_destroy_bio(cbp);
1064		cbp = pbp;
1065		g_journal_stats_combined_ios++;
1066		n--;
1067		GJ_LOGREQ(4, pbp, "Got: ");
1068	}
1069	return (n);
1070}
1071
1072/*
1073 * TODO: Update comment.
1074 * These are functions responsible for copying one portion of data from journal
1075 * to the destination provider.
1076 * The order goes like this:
1077 * 1. Read the header, which contains informations about data blocks
1078 *    following it.
1079 * 2. Read the data blocks from the journal.
1080 * 3. Write the data blocks on the data provider.
1081 *
1082 * g_journal_copy_start()
1083 * g_journal_copy_done() - got finished write request, logs potential errors.
1084 */
1085
1086/*
1087 * When there is no data in cache, this function is used to read it.
1088 */
1089static void
1090g_journal_read_first(struct g_journal_softc *sc, struct bio *bp)
1091{
1092	struct bio *cbp;
1093
1094	/*
1095	 * We were short in memory, so data was freed.
1096	 * In that case we need to read it back from journal.
1097	 */
1098	cbp = g_alloc_bio();
1099	cbp->bio_cflags = bp->bio_cflags;
1100	cbp->bio_parent = bp;
1101	cbp->bio_offset = bp->bio_joffset;
1102	cbp->bio_length = bp->bio_length;
1103	cbp->bio_data = gj_malloc(bp->bio_length, M_WAITOK);
1104	cbp->bio_cmd = BIO_READ;
1105	cbp->bio_done = g_journal_std_done;
1106	GJ_LOGREQ(4, cbp, "READ FIRST");
1107	g_io_request(cbp, sc->sc_jconsumer);
1108	g_journal_cache_misses++;
1109}
1110
1111static void
1112g_journal_copy_send(struct g_journal_softc *sc)
1113{
1114	struct bio *bioq, *bp, *lbp;
1115
1116	bioq = lbp = NULL;
1117	mtx_lock(&sc->sc_mtx);
1118	for (; sc->sc_copy_in_progress < g_journal_parallel_copies;) {
1119		bp = GJQ_FIRST(sc->sc_inactive.jj_queue);
1120		if (bp == NULL)
1121			break;
1122		GJQ_REMOVE(sc->sc_inactive.jj_queue, bp);
1123		sc->sc_copy_in_progress++;
1124		GJQ_INSERT_AFTER(bioq, bp, lbp);
1125		lbp = bp;
1126	}
1127	mtx_unlock(&sc->sc_mtx);
1128	if (g_journal_do_optimize)
1129		sc->sc_copy_in_progress += g_journal_optimize(bioq);
1130	while ((bp = GJQ_FIRST(bioq)) != NULL) {
1131		GJQ_REMOVE(bioq, bp);
1132		GJQ_INSERT_HEAD(sc->sc_copy_queue, bp);
1133		bp->bio_cflags = GJ_BIO_COPY;
1134		if (bp->bio_data == NULL)
1135			g_journal_read_first(sc, bp);
1136		else {
1137			bp->bio_joffset = 0;
1138			GJ_LOGREQ(4, bp, "SEND");
1139			g_io_request(bp, sc->sc_dconsumer);
1140		}
1141	}
1142}
1143
1144static void
1145g_journal_copy_start(struct g_journal_softc *sc)
1146{
1147
1148	/*
1149	 * Remember in metadata that we're starting to copy journaled data
1150	 * to the data provider.
1151	 * In case of power failure, we will copy these data once again on boot.
1152	 */
1153	if (!sc->sc_journal_copying) {
1154		sc->sc_journal_copying = 1;
1155		GJ_DEBUG(1, "Starting copy of journal.");
1156		g_journal_metadata_update(sc);
1157	}
1158	g_journal_copy_send(sc);
1159}
1160
1161/*
1162 * Data block has been read from the journal provider.
1163 */
1164static int
1165g_journal_copy_read_done(struct bio *bp)
1166{
1167	struct g_journal_softc *sc;
1168	struct g_consumer *cp;
1169	struct bio *pbp;
1170
1171	KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1172	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1173
1174	sc = bp->bio_from->geom->softc;
1175	pbp = bp->bio_parent;
1176
1177	if (bp->bio_error != 0) {
1178		GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1179		    bp->bio_to->name, bp->bio_error);
1180		/*
1181		 * We will not be able to deliver WRITE request as well.
1182		 */
1183		gj_free(bp->bio_data, bp->bio_length);
1184		g_destroy_bio(pbp);
1185		g_destroy_bio(bp);
1186		sc->sc_copy_in_progress--;
1187		return (1);
1188	}
1189	pbp->bio_data = bp->bio_data;
1190	cp = sc->sc_dconsumer;
1191	g_io_request(pbp, cp);
1192	GJ_LOGREQ(4, bp, "READ DONE");
1193	g_destroy_bio(bp);
1194	return (0);
1195}
1196
1197/*
1198 * Data block has been written to the data provider.
1199 */
1200static void
1201g_journal_copy_write_done(struct bio *bp)
1202{
1203	struct g_journal_softc *sc;
1204
1205	KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1206	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1207
1208	sc = bp->bio_from->geom->softc;
1209	sc->sc_copy_in_progress--;
1210
1211	if (bp->bio_error != 0) {
1212		GJ_LOGREQ(0, bp, "[copy] Error while writing data (error=%d)",
1213		    bp->bio_error);
1214	}
1215	GJQ_REMOVE(sc->sc_copy_queue, bp);
1216	gj_free(bp->bio_data, bp->bio_length);
1217	GJ_LOGREQ(4, bp, "DONE");
1218	g_destroy_bio(bp);
1219
1220	if (sc->sc_copy_in_progress == 0) {
1221		/*
1222		 * This was the last write request for this journal.
1223		 */
1224		GJ_DEBUG(1, "Data has been copied.");
1225		sc->sc_journal_copying = 0;
1226	}
1227}
1228
1229static void g_journal_flush_done(struct bio *bp);
1230
1231/*
1232 * Flush one record onto active journal provider.
1233 */
1234static void
1235g_journal_flush(struct g_journal_softc *sc)
1236{
1237	struct g_journal_record_header hdr;
1238	struct g_journal_entry *ent;
1239	struct g_provider *pp;
1240	struct bio **bioq;
1241	struct bio *bp, *fbp, *pbp;
1242	off_t joffset, size;
1243	u_char *data, hash[16];
1244	MD5_CTX ctx;
1245	u_int i;
1246
1247	if (sc->sc_current_count == 0)
1248		return;
1249
1250	size = 0;
1251	pp = sc->sc_jprovider;
1252	GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1253	joffset = sc->sc_journal_offset;
1254
1255	GJ_DEBUG(2, "Storing %d journal entries on %s at %jd.",
1256	    sc->sc_current_count, pp->name, (intmax_t)joffset);
1257
1258	/*
1259	 * Store 'journal id', so we know to which journal this record belongs.
1260	 */
1261	hdr.jrh_journal_id = sc->sc_journal_id;
1262	/* Could be less than g_journal_record_entries if called due timeout. */
1263	hdr.jrh_nentries = MIN(sc->sc_current_count, g_journal_record_entries);
1264	strlcpy(hdr.jrh_magic, GJ_RECORD_HEADER_MAGIC, sizeof(hdr.jrh_magic));
1265
1266	bioq = &sc->sc_active.jj_queue;
1267	pbp = sc->sc_flush_queue;
1268
1269	fbp = g_alloc_bio();
1270	fbp->bio_parent = NULL;
1271	fbp->bio_cflags = GJ_BIO_JOURNAL;
1272	fbp->bio_offset = -1;
1273	fbp->bio_joffset = joffset;
1274	fbp->bio_length = pp->sectorsize;
1275	fbp->bio_cmd = BIO_WRITE;
1276	fbp->bio_done = g_journal_std_done;
1277	GJQ_INSERT_AFTER(sc->sc_flush_queue, fbp, pbp);
1278	pbp = fbp;
1279	fbp->bio_to = pp;
1280	GJ_LOGREQ(4, fbp, "FLUSH_OUT");
1281	joffset += pp->sectorsize;
1282	sc->sc_flush_count++;
1283	if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1284		MD5Init(&ctx);
1285
1286	for (i = 0; i < hdr.jrh_nentries; i++) {
1287		bp = sc->sc_current_queue;
1288		KASSERT(bp != NULL, ("NULL bp"));
1289		bp->bio_to = pp;
1290		GJ_LOGREQ(4, bp, "FLUSHED");
1291		sc->sc_current_queue = bp->bio_next;
1292		bp->bio_next = NULL;
1293		sc->sc_current_count--;
1294
1295		/* Add to the header. */
1296		ent = &hdr.jrh_entries[i];
1297		ent->je_offset = bp->bio_offset;
1298		ent->je_joffset = joffset;
1299		ent->je_length = bp->bio_length;
1300		size += ent->je_length;
1301
1302		data = bp->bio_data;
1303		if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1304			MD5Update(&ctx, data, ent->je_length);
1305		bzero(bp, sizeof(*bp));
1306		bp->bio_cflags = GJ_BIO_JOURNAL;
1307		bp->bio_offset = ent->je_offset;
1308		bp->bio_joffset = ent->je_joffset;
1309		bp->bio_length = ent->je_length;
1310		bp->bio_data = data;
1311		bp->bio_cmd = BIO_WRITE;
1312		bp->bio_done = g_journal_std_done;
1313		GJQ_INSERT_AFTER(sc->sc_flush_queue, bp, pbp);
1314		pbp = bp;
1315		bp->bio_to = pp;
1316		GJ_LOGREQ(4, bp, "FLUSH_OUT");
1317		joffset += bp->bio_length;
1318		sc->sc_flush_count++;
1319
1320		/*
1321		 * Add request to the active sc_journal_queue queue.
1322		 * This is our cache. After journal switch we don't have to
1323		 * read the data from the inactive journal, because we keep
1324		 * it in memory.
1325		 */
1326		g_journal_insert(bioq, ent->je_offset,
1327		    ent->je_offset + ent->je_length, ent->je_joffset, data,
1328		    M_NOWAIT);
1329	}
1330
1331	/*
1332	 * After all requests, store valid header.
1333	 */
1334	data = gj_malloc(pp->sectorsize, M_WAITOK);
1335	if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1336		MD5Final(hash, &ctx);
1337		bcopy(hash, hdr.jrh_sum, sizeof(hdr.jrh_sum));
1338	}
1339	g_journal_record_header_encode(&hdr, data);
1340	fbp->bio_data = data;
1341
1342	sc->sc_journal_offset = joffset;
1343
1344	g_journal_check_overflow(sc);
1345}
1346
1347/*
1348 * Flush request finished.
1349 */
1350static void
1351g_journal_flush_done(struct bio *bp)
1352{
1353	struct g_journal_softc *sc;
1354	struct g_consumer *cp;
1355
1356	KASSERT((bp->bio_cflags & GJ_BIO_MASK) == GJ_BIO_JOURNAL,
1357	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_JOURNAL));
1358
1359	cp = bp->bio_from;
1360	sc = cp->geom->softc;
1361	sc->sc_flush_in_progress--;
1362
1363	if (bp->bio_error != 0) {
1364		GJ_LOGREQ(0, bp, "[flush] Error while writing data (error=%d)",
1365		    bp->bio_error);
1366	}
1367	gj_free(bp->bio_data, bp->bio_length);
1368	GJ_LOGREQ(4, bp, "DONE");
1369	g_destroy_bio(bp);
1370}
1371
1372static void g_journal_release_delayed(struct g_journal_softc *sc);
1373
1374static void
1375g_journal_flush_send(struct g_journal_softc *sc)
1376{
1377	struct g_consumer *cp;
1378	struct bio *bioq, *bp, *lbp;
1379
1380	cp = sc->sc_jconsumer;
1381	bioq = lbp = NULL;
1382	while (sc->sc_flush_in_progress < g_journal_parallel_flushes) {
1383		/* Send one flush requests to the active journal. */
1384		bp = GJQ_FIRST(sc->sc_flush_queue);
1385		if (bp != NULL) {
1386			GJQ_REMOVE(sc->sc_flush_queue, bp);
1387			sc->sc_flush_count--;
1388			bp->bio_offset = bp->bio_joffset;
1389			bp->bio_joffset = 0;
1390			sc->sc_flush_in_progress++;
1391			GJQ_INSERT_AFTER(bioq, bp, lbp);
1392			lbp = bp;
1393		}
1394		/* Try to release delayed requests. */
1395		g_journal_release_delayed(sc);
1396		/* If there are no requests to flush, leave. */
1397		if (GJQ_FIRST(sc->sc_flush_queue) == NULL)
1398			break;
1399	}
1400	if (g_journal_do_optimize)
1401		sc->sc_flush_in_progress += g_journal_optimize(bioq);
1402	while ((bp = GJQ_FIRST(bioq)) != NULL) {
1403		GJQ_REMOVE(bioq, bp);
1404		GJ_LOGREQ(3, bp, "Flush request send");
1405		g_io_request(bp, cp);
1406	}
1407}
1408
1409static void
1410g_journal_add_current(struct g_journal_softc *sc, struct bio *bp)
1411{
1412	int n;
1413
1414	GJ_LOGREQ(4, bp, "CURRENT %d", sc->sc_current_count);
1415	n = g_journal_insert_bio(&sc->sc_current_queue, bp, M_WAITOK);
1416	sc->sc_current_count += n;
1417	n = g_journal_optimize(sc->sc_current_queue);
1418	sc->sc_current_count += n;
1419	/*
1420	 * For requests which are added to the current queue we deliver
1421	 * response immediately.
1422	 */
1423	bp->bio_completed = bp->bio_length;
1424	g_io_deliver(bp, 0);
1425	if (sc->sc_current_count >= g_journal_record_entries) {
1426		/*
1427		 * Let's flush one record onto active journal provider.
1428		 */
1429		g_journal_flush(sc);
1430	}
1431}
1432
1433static void
1434g_journal_release_delayed(struct g_journal_softc *sc)
1435{
1436	struct bio *bp;
1437
1438	for (;;) {
1439		/* The flush queue is full, exit. */
1440		if (sc->sc_flush_count >= g_journal_accept_immediately)
1441			return;
1442		bp = bioq_takefirst(&sc->sc_delayed_queue);
1443		if (bp == NULL)
1444			return;
1445		sc->sc_delayed_count--;
1446		g_journal_add_current(sc, bp);
1447	}
1448}
1449
1450/*
1451 * Add I/O request to the current queue. If we have enough requests for one
1452 * journal record we flush them onto active journal provider.
1453 */
1454static void
1455g_journal_add_request(struct g_journal_softc *sc, struct bio *bp)
1456{
1457
1458	/*
1459	 * The flush queue is full, we need to delay the request.
1460	 */
1461	if (sc->sc_delayed_count > 0 ||
1462	    sc->sc_flush_count >= g_journal_accept_immediately) {
1463		GJ_LOGREQ(4, bp, "DELAYED");
1464		bioq_insert_tail(&sc->sc_delayed_queue, bp);
1465		sc->sc_delayed_count++;
1466		return;
1467	}
1468
1469	KASSERT(TAILQ_EMPTY(&sc->sc_delayed_queue.queue),
1470	    ("DELAYED queue not empty."));
1471	g_journal_add_current(sc, bp);
1472}
1473
1474static void g_journal_read_done(struct bio *bp);
1475
1476/*
1477 * Try to find requested data in cache.
1478 */
1479static struct bio *
1480g_journal_read_find(struct bio *head, int sorted, struct bio *pbp, off_t ostart,
1481    off_t oend)
1482{
1483	off_t cstart, cend;
1484	struct bio *bp;
1485
1486	GJQ_FOREACH(head, bp) {
1487		if (bp->bio_offset == -1)
1488			continue;
1489		cstart = MAX(ostart, bp->bio_offset);
1490		cend = MIN(oend, bp->bio_offset + bp->bio_length);
1491		if (cend <= ostart)
1492			continue;
1493		else if (cstart >= oend) {
1494			if (!sorted)
1495				continue;
1496			else {
1497				bp = NULL;
1498				break;
1499			}
1500		}
1501		if (bp->bio_data == NULL)
1502			break;
1503		GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
1504		    bp);
1505		bcopy(bp->bio_data + cstart - bp->bio_offset,
1506		    pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
1507		pbp->bio_completed += cend - cstart;
1508		if (pbp->bio_completed == pbp->bio_length) {
1509			/*
1510			 * Cool, the whole request was in cache, deliver happy
1511			 * message.
1512			 */
1513			g_io_deliver(pbp, 0);
1514			return (pbp);
1515		}
1516		break;
1517	}
1518	return (bp);
1519}
1520
1521/*
1522 * Try to find requested data in cache.
1523 */
1524static struct bio *
1525g_journal_read_queue_find(struct bio_queue *head, struct bio *pbp, off_t ostart,
1526    off_t oend)
1527{
1528	off_t cstart, cend;
1529	struct bio *bp;
1530
1531	TAILQ_FOREACH(bp, head, bio_queue) {
1532		cstart = MAX(ostart, bp->bio_offset);
1533		cend = MIN(oend, bp->bio_offset + bp->bio_length);
1534		if (cend <= ostart)
1535			continue;
1536		else if (cstart >= oend)
1537			continue;
1538		KASSERT(bp->bio_data != NULL,
1539		    ("%s: bio_data == NULL", __func__));
1540		GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
1541		    bp);
1542		bcopy(bp->bio_data + cstart - bp->bio_offset,
1543		    pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
1544		pbp->bio_completed += cend - cstart;
1545		if (pbp->bio_completed == pbp->bio_length) {
1546			/*
1547			 * Cool, the whole request was in cache, deliver happy
1548			 * message.
1549			 */
1550			g_io_deliver(pbp, 0);
1551			return (pbp);
1552		}
1553		break;
1554	}
1555	return (bp);
1556}
1557
1558/*
1559 * This function is used for colecting data on read.
1560 * The complexity is because parts of the data can be stored in four different
1561 * places:
1562 * - in delayed requests
1563 * - in memory - the data not yet send to the active journal provider
1564 * - in requests which are going to be sent to the active journal
1565 * - in the active journal
1566 * - in the inactive journal
1567 * - in the data provider
1568 */
1569static void
1570g_journal_read(struct g_journal_softc *sc, struct bio *pbp, off_t ostart,
1571    off_t oend)
1572{
1573	struct bio *bp, *nbp, *head;
1574	off_t cstart, cend;
1575	u_int i, sorted = 0;
1576
1577	GJ_DEBUG(3, "READ: (%jd, %jd)", ostart, oend);
1578
1579	cstart = cend = -1;
1580	bp = NULL;
1581	head = NULL;
1582	for (i = 0; i <= 5; i++) {
1583		switch (i) {
1584		case 0:	/* Delayed requests. */
1585			head = NULL;
1586			sorted = 0;
1587			break;
1588		case 1:	/* Not-yet-send data. */
1589			head = sc->sc_current_queue;
1590			sorted = 1;
1591			break;
1592		case 2:	/* In-flight to the active journal. */
1593			head = sc->sc_flush_queue;
1594			sorted = 0;
1595			break;
1596		case 3:	/* Active journal. */
1597			head = sc->sc_active.jj_queue;
1598			sorted = 1;
1599			break;
1600		case 4:	/* Inactive journal. */
1601			/*
1602			 * XXX: Here could be a race with g_journal_lowmem().
1603			 */
1604			head = sc->sc_inactive.jj_queue;
1605			sorted = 1;
1606			break;
1607		case 5:	/* In-flight to the data provider. */
1608			head = sc->sc_copy_queue;
1609			sorted = 0;
1610			break;
1611		default:
1612			panic("gjournal %s: i=%d", __func__, i);
1613		}
1614		if (i == 0)
1615			bp = g_journal_read_queue_find(&sc->sc_delayed_queue.queue, pbp, ostart, oend);
1616		else
1617			bp = g_journal_read_find(head, sorted, pbp, ostart, oend);
1618		if (bp == pbp) { /* Got the whole request. */
1619			GJ_DEBUG(2, "Got the whole request from %u.", i);
1620			return;
1621		} else if (bp != NULL) {
1622			cstart = MAX(ostart, bp->bio_offset);
1623			cend = MIN(oend, bp->bio_offset + bp->bio_length);
1624			GJ_DEBUG(2, "Got part of the request from %u (%jd-%jd).",
1625			    i, (intmax_t)cstart, (intmax_t)cend);
1626			break;
1627		}
1628	}
1629	if (bp != NULL) {
1630		if (bp->bio_data == NULL) {
1631			nbp = g_duplicate_bio(pbp);
1632			nbp->bio_cflags = GJ_BIO_READ;
1633			nbp->bio_data =
1634			    pbp->bio_data + cstart - pbp->bio_offset;
1635			nbp->bio_offset =
1636			    bp->bio_joffset + cstart - bp->bio_offset;
1637			nbp->bio_length = cend - cstart;
1638			nbp->bio_done = g_journal_read_done;
1639			g_io_request(nbp, sc->sc_jconsumer);
1640		}
1641		/*
1642		 * If we don't have the whole request yet, call g_journal_read()
1643		 * recursively.
1644		 */
1645		if (ostart < cstart)
1646			g_journal_read(sc, pbp, ostart, cstart);
1647		if (oend > cend)
1648			g_journal_read(sc, pbp, cend, oend);
1649	} else {
1650		/*
1651		 * No data in memory, no data in journal.
1652		 * Its time for asking data provider.
1653		 */
1654		GJ_DEBUG(3, "READ(data): (%jd, %jd)", ostart, oend);
1655		nbp = g_duplicate_bio(pbp);
1656		nbp->bio_cflags = GJ_BIO_READ;
1657		nbp->bio_data = pbp->bio_data + ostart - pbp->bio_offset;
1658		nbp->bio_offset = ostart;
1659		nbp->bio_length = oend - ostart;
1660		nbp->bio_done = g_journal_read_done;
1661		g_io_request(nbp, sc->sc_dconsumer);
1662		/* We have the whole request, return here. */
1663		return;
1664	}
1665}
1666
1667/*
1668 * Function responsible for handling finished READ requests.
1669 * Actually, g_std_done() could be used here, the only difference is that we
1670 * log error.
1671 */
1672static void
1673g_journal_read_done(struct bio *bp)
1674{
1675	struct bio *pbp;
1676
1677	KASSERT(bp->bio_cflags == GJ_BIO_READ,
1678	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_READ));
1679
1680	pbp = bp->bio_parent;
1681	pbp->bio_inbed++;
1682	pbp->bio_completed += bp->bio_length;
1683
1684	if (bp->bio_error != 0) {
1685		if (pbp->bio_error == 0)
1686			pbp->bio_error = bp->bio_error;
1687		GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1688		    bp->bio_to->name, bp->bio_error);
1689	}
1690	g_destroy_bio(bp);
1691	if (pbp->bio_children == pbp->bio_inbed &&
1692	    pbp->bio_completed == pbp->bio_length) {
1693		/* We're done. */
1694		g_io_deliver(pbp, 0);
1695	}
1696}
1697
1698/*
1699 * Deactive current journal and active next one.
1700 */
1701static void
1702g_journal_switch(struct g_journal_softc *sc)
1703{
1704	struct g_provider *pp;
1705
1706	if (JEMPTY(sc)) {
1707		GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
1708		pp = LIST_FIRST(&sc->sc_geom->provider);
1709		if (!(sc->sc_flags & GJF_DEVICE_CLEAN) && pp->acw == 0) {
1710			sc->sc_flags |= GJF_DEVICE_CLEAN;
1711			GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
1712			g_journal_metadata_update(sc);
1713		}
1714	} else {
1715		GJ_DEBUG(3, "Switching journal %s.", sc->sc_geom->name);
1716
1717		pp = sc->sc_jprovider;
1718
1719		sc->sc_journal_previous_id = sc->sc_journal_id;
1720
1721		sc->sc_journal_id = sc->sc_journal_next_id;
1722		sc->sc_journal_next_id = arc4random();
1723
1724		GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1725
1726		g_journal_write_header(sc);
1727
1728		sc->sc_inactive.jj_offset = sc->sc_active.jj_offset;
1729		sc->sc_inactive.jj_queue = sc->sc_active.jj_queue;
1730
1731		sc->sc_active.jj_offset =
1732		    sc->sc_journal_offset - pp->sectorsize;
1733		sc->sc_active.jj_queue = NULL;
1734
1735		/*
1736		 * Switch is done, start copying data from the (now) inactive
1737		 * journal to the data provider.
1738		 */
1739		g_journal_copy_start(sc);
1740	}
1741	mtx_lock(&sc->sc_mtx);
1742	sc->sc_flags &= ~GJF_DEVICE_SWITCH;
1743	mtx_unlock(&sc->sc_mtx);
1744}
1745
1746static void
1747g_journal_initialize(struct g_journal_softc *sc)
1748{
1749
1750	sc->sc_journal_id = arc4random();
1751	sc->sc_journal_next_id = arc4random();
1752	sc->sc_journal_previous_id = sc->sc_journal_id;
1753	sc->sc_journal_offset = sc->sc_jstart;
1754	sc->sc_inactive.jj_offset = sc->sc_jstart;
1755	g_journal_write_header(sc);
1756	sc->sc_active.jj_offset = sc->sc_jstart;
1757}
1758
1759static void
1760g_journal_mark_as_dirty(struct g_journal_softc *sc)
1761{
1762	const struct g_journal_desc *desc;
1763	int i;
1764
1765	GJ_DEBUG(1, "Marking file system %s as dirty.", sc->sc_name);
1766	for (i = 0; (desc = g_journal_filesystems[i]) != NULL; i++)
1767		desc->jd_dirty(sc->sc_dconsumer);
1768}
1769
1770/*
1771 * Function read record header from the given journal.
1772 * It is very simlar to g_read_data(9), but it doesn't allocate memory for bio
1773 * and data on every call.
1774 */
1775static int
1776g_journal_sync_read(struct g_consumer *cp, struct bio *bp, off_t offset,
1777    void *data)
1778{
1779	int error;
1780
1781	bzero(bp, sizeof(*bp));
1782	bp->bio_cmd = BIO_READ;
1783	bp->bio_done = NULL;
1784	bp->bio_offset = offset;
1785	bp->bio_length = cp->provider->sectorsize;
1786	bp->bio_data = data;
1787	g_io_request(bp, cp);
1788	error = biowait(bp, "gjs_read");
1789	return (error);
1790}
1791
1792#if 0
1793/*
1794 * Function is called when we start the journal device and we detect that
1795 * one of the journals was not fully copied.
1796 * The purpose of this function is to read all records headers from journal
1797 * and placed them in the inactive queue, so we can start journal
1798 * synchronization process and the journal provider itself.
1799 * Design decision was taken to not synchronize the whole journal here as it
1800 * can take too much time. Reading headers only and delaying synchronization
1801 * process until after journal provider is started should be the best choice.
1802 */
1803#endif
1804
1805static void
1806g_journal_sync(struct g_journal_softc *sc)
1807{
1808	struct g_journal_record_header rhdr;
1809	struct g_journal_entry *ent;
1810	struct g_journal_header jhdr;
1811	struct g_consumer *cp;
1812	struct bio *bp, *fbp, *tbp;
1813	off_t joffset, offset;
1814	u_char *buf, sum[16];
1815	uint64_t id;
1816	MD5_CTX ctx;
1817	int error, found, i;
1818
1819	found = 0;
1820	fbp = NULL;
1821	cp = sc->sc_jconsumer;
1822	bp = g_alloc_bio();
1823	buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
1824	offset = joffset = sc->sc_inactive.jj_offset = sc->sc_journal_offset;
1825
1826	GJ_DEBUG(2, "Looking for termination at %jd.", (intmax_t)joffset);
1827
1828	/*
1829	 * Read and decode first journal header.
1830	 */
1831	error = g_journal_sync_read(cp, bp, offset, buf);
1832	if (error != 0) {
1833		GJ_DEBUG(0, "Error while reading journal header from %s.",
1834		    cp->provider->name);
1835		goto end;
1836	}
1837	error = g_journal_header_decode(buf, &jhdr);
1838	if (error != 0) {
1839		GJ_DEBUG(0, "Cannot decode journal header from %s.",
1840		    cp->provider->name);
1841		goto end;
1842	}
1843	id = sc->sc_journal_id;
1844	if (jhdr.jh_journal_id != sc->sc_journal_id) {
1845		GJ_DEBUG(1, "Journal ID mismatch at %jd (0x%08x != 0x%08x).",
1846		    (intmax_t)offset, (u_int)jhdr.jh_journal_id, (u_int)id);
1847		goto end;
1848	}
1849	offset += cp->provider->sectorsize;
1850	id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1851
1852	for (;;) {
1853		/*
1854		 * If the biggest record won't fit, look for a record header or
1855		 * journal header from the begining.
1856		 */
1857		GJ_VALIDATE_OFFSET(offset, sc);
1858		error = g_journal_sync_read(cp, bp, offset, buf);
1859		if (error != 0) {
1860			/*
1861			 * Not good. Having an error while reading header
1862			 * means, that we cannot read next headers and in
1863			 * consequence we cannot find termination.
1864			 */
1865			GJ_DEBUG(0,
1866			    "Error while reading record header from %s.",
1867			    cp->provider->name);
1868			break;
1869		}
1870
1871		error = g_journal_record_header_decode(buf, &rhdr);
1872		if (error != 0) {
1873			GJ_DEBUG(2, "Not a record header at %jd (error=%d).",
1874			    (intmax_t)offset, error);
1875			/*
1876			 * This is not a record header.
1877			 * If we are lucky, this is next journal header.
1878			 */
1879			error = g_journal_header_decode(buf, &jhdr);
1880			if (error != 0) {
1881				GJ_DEBUG(1, "Not a journal header at %jd (error=%d).",
1882				    (intmax_t)offset, error);
1883				/*
1884				 * Nope, this is not journal header, which
1885				 * bascially means that journal is not
1886				 * terminated properly.
1887				 */
1888				error = ENOENT;
1889				break;
1890			}
1891			/*
1892			 * Ok. This is header of _some_ journal. Now we need to
1893			 * verify if this is header of the _next_ journal.
1894			 */
1895			if (jhdr.jh_journal_id != id) {
1896				GJ_DEBUG(1, "Journal ID mismatch at %jd "
1897				    "(0x%08x != 0x%08x).", (intmax_t)offset,
1898				    (u_int)jhdr.jh_journal_id, (u_int)id);
1899				error = ENOENT;
1900				break;
1901			}
1902
1903			/* Found termination. */
1904			found++;
1905			GJ_DEBUG(1, "Found termination at %jd (id=0x%08x).",
1906			    (intmax_t)offset, (u_int)id);
1907			sc->sc_active.jj_offset = offset;
1908			sc->sc_journal_offset =
1909			    offset + cp->provider->sectorsize;
1910			sc->sc_journal_id = id;
1911			id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1912
1913			while ((tbp = fbp) != NULL) {
1914				fbp = tbp->bio_next;
1915				GJ_LOGREQ(3, tbp, "Adding request.");
1916				g_journal_insert_bio(&sc->sc_inactive.jj_queue,
1917				    tbp, M_WAITOK);
1918			}
1919
1920			/* Skip journal's header. */
1921			offset += cp->provider->sectorsize;
1922			continue;
1923		}
1924
1925		/* Skip record's header. */
1926		offset += cp->provider->sectorsize;
1927
1928		/*
1929		 * Add information about every record entry to the inactive
1930		 * queue.
1931		 */
1932		if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1933			MD5Init(&ctx);
1934		for (i = 0; i < rhdr.jrh_nentries; i++) {
1935			ent = &rhdr.jrh_entries[i];
1936			GJ_DEBUG(3, "Insert entry: %jd %jd.",
1937			    (intmax_t)ent->je_offset, (intmax_t)ent->je_length);
1938			g_journal_insert(&fbp, ent->je_offset,
1939			    ent->je_offset + ent->je_length, ent->je_joffset,
1940			    NULL, M_WAITOK);
1941			if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1942				u_char *buf2;
1943
1944				/*
1945				 * TODO: Should use faster function (like
1946				 *       g_journal_sync_read()).
1947				 */
1948				buf2 = g_read_data(cp, offset, ent->je_length,
1949				    NULL);
1950				if (buf2 == NULL)
1951					GJ_DEBUG(0, "Cannot read data at %jd.",
1952					    (intmax_t)offset);
1953				else {
1954					MD5Update(&ctx, buf2, ent->je_length);
1955					g_free(buf2);
1956				}
1957			}
1958			/* Skip entry's data. */
1959			offset += ent->je_length;
1960		}
1961		if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1962			MD5Final(sum, &ctx);
1963			if (bcmp(sum, rhdr.jrh_sum, sizeof(rhdr.jrh_sum)) != 0) {
1964				GJ_DEBUG(0, "MD5 hash mismatch at %jd!",
1965				    (intmax_t)offset);
1966			}
1967		}
1968	}
1969end:
1970	gj_free(bp->bio_data, cp->provider->sectorsize);
1971	g_destroy_bio(bp);
1972
1973	/* Remove bios from unterminated journal. */
1974	while ((tbp = fbp) != NULL) {
1975		fbp = tbp->bio_next;
1976		g_destroy_bio(tbp);
1977	}
1978
1979	if (found < 1 && joffset > 0) {
1980		GJ_DEBUG(0, "Journal on %s is broken/corrupted. Initializing.",
1981		    sc->sc_name);
1982		while ((tbp = sc->sc_inactive.jj_queue) != NULL) {
1983			sc->sc_inactive.jj_queue = tbp->bio_next;
1984			g_destroy_bio(tbp);
1985		}
1986		g_journal_initialize(sc);
1987		g_journal_mark_as_dirty(sc);
1988	} else {
1989		GJ_DEBUG(0, "Journal %s consistent.", sc->sc_name);
1990		g_journal_copy_start(sc);
1991	}
1992}
1993
1994/*
1995 * Wait for requests.
1996 * If we have requests in the current queue, flush them after 3 seconds from the
1997 * last flush. In this way we don't wait forever (or for journal switch) with
1998 * storing not full records on journal.
1999 */
2000static void
2001g_journal_wait(struct g_journal_softc *sc, time_t last_write)
2002{
2003	int error, timeout;
2004
2005	GJ_DEBUG(3, "%s: enter", __func__);
2006	if (sc->sc_current_count == 0) {
2007		if (g_journal_debug < 2)
2008			msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", 0);
2009		else {
2010			/*
2011			 * If we have debug turned on, show number of elements
2012			 * in various queues.
2013			 */
2014			for (;;) {
2015				error = msleep(sc, &sc->sc_mtx, PRIBIO,
2016				    "gj:work", hz * 3);
2017				if (error == 0) {
2018					mtx_unlock(&sc->sc_mtx);
2019					break;
2020				}
2021				GJ_DEBUG(3, "Report: current count=%d",
2022				    sc->sc_current_count);
2023				GJ_DEBUG(3, "Report: flush count=%d",
2024				    sc->sc_flush_count);
2025				GJ_DEBUG(3, "Report: flush in progress=%d",
2026				    sc->sc_flush_in_progress);
2027				GJ_DEBUG(3, "Report: copy in progress=%d",
2028				    sc->sc_copy_in_progress);
2029				GJ_DEBUG(3, "Report: delayed=%d",
2030				    sc->sc_delayed_count);
2031			}
2032		}
2033		GJ_DEBUG(3, "%s: exit 1", __func__);
2034		return;
2035	}
2036
2037	/*
2038	 * Flush even not full records every 3 seconds.
2039	 */
2040	timeout = (last_write + 3 - time_second) * hz;
2041	if (timeout <= 0) {
2042		mtx_unlock(&sc->sc_mtx);
2043		g_journal_flush(sc);
2044		g_journal_flush_send(sc);
2045		GJ_DEBUG(3, "%s: exit 2", __func__);
2046		return;
2047	}
2048	error = msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", timeout);
2049	if (error == EWOULDBLOCK)
2050		g_journal_flush_send(sc);
2051	GJ_DEBUG(3, "%s: exit 3", __func__);
2052}
2053
2054/*
2055 * Worker thread.
2056 */
2057static void
2058g_journal_worker(void *arg)
2059{
2060	struct g_journal_softc *sc;
2061	struct g_geom *gp;
2062	struct g_provider *pp;
2063	struct bio *bp;
2064	time_t last_write;
2065	int type;
2066
2067	thread_lock(curthread);
2068	sched_prio(curthread, PRIBIO);
2069	thread_unlock(curthread);
2070
2071	sc = arg;
2072	type = 0;	/* gcc */
2073
2074	if (sc->sc_flags & GJF_DEVICE_CLEAN) {
2075		GJ_DEBUG(0, "Journal %s clean.", sc->sc_name);
2076		g_journal_initialize(sc);
2077	} else {
2078		g_journal_sync(sc);
2079	}
2080	/*
2081	 * Check if we can use BIO_FLUSH.
2082	 */
2083	sc->sc_bio_flush = 0;
2084	if (g_io_flush(sc->sc_jconsumer) == 0) {
2085		sc->sc_bio_flush |= GJ_FLUSH_JOURNAL;
2086		GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2087		    sc->sc_jconsumer->provider->name);
2088	} else {
2089		GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2090		    sc->sc_jconsumer->provider->name);
2091	}
2092	if (sc->sc_jconsumer != sc->sc_dconsumer) {
2093		if (g_io_flush(sc->sc_dconsumer) == 0) {
2094			sc->sc_bio_flush |= GJ_FLUSH_DATA;
2095			GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2096			    sc->sc_dconsumer->provider->name);
2097		} else {
2098			GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2099			    sc->sc_dconsumer->provider->name);
2100		}
2101	}
2102
2103	gp = sc->sc_geom;
2104	g_topology_lock();
2105	pp = g_new_providerf(gp, "%s.journal", sc->sc_name);
2106	pp->mediasize = sc->sc_mediasize;
2107	/*
2108	 * There could be a problem when data provider and journal providers
2109	 * have different sectorsize, but such scenario is prevented on journal
2110	 * creation.
2111	 */
2112	pp->sectorsize = sc->sc_sectorsize;
2113	g_error_provider(pp, 0);
2114	g_topology_unlock();
2115	last_write = time_second;
2116
2117	if (sc->sc_rootmount != NULL) {
2118		GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2119		root_mount_rel(sc->sc_rootmount);
2120		sc->sc_rootmount = NULL;
2121	}
2122
2123	for (;;) {
2124		/* Get first request from the queue. */
2125		mtx_lock(&sc->sc_mtx);
2126		bp = bioq_first(&sc->sc_back_queue);
2127		if (bp != NULL)
2128			type = (bp->bio_cflags & GJ_BIO_MASK);
2129		if (bp == NULL) {
2130			bp = bioq_first(&sc->sc_regular_queue);
2131			if (bp != NULL)
2132				type = GJ_BIO_REGULAR;
2133		}
2134		if (bp == NULL) {
2135try_switch:
2136			if ((sc->sc_flags & GJF_DEVICE_SWITCH) ||
2137			    (sc->sc_flags & GJF_DEVICE_DESTROY)) {
2138				if (sc->sc_current_count > 0) {
2139					mtx_unlock(&sc->sc_mtx);
2140					g_journal_flush(sc);
2141					g_journal_flush_send(sc);
2142					continue;
2143				}
2144				if (sc->sc_flush_in_progress > 0)
2145					goto sleep;
2146				if (sc->sc_copy_in_progress > 0)
2147					goto sleep;
2148			}
2149			if (sc->sc_flags & GJF_DEVICE_SWITCH) {
2150				mtx_unlock(&sc->sc_mtx);
2151				g_journal_switch(sc);
2152				wakeup(&sc->sc_journal_copying);
2153				continue;
2154			}
2155			if (sc->sc_flags & GJF_DEVICE_DESTROY) {
2156				GJ_DEBUG(1, "Shutting down worker "
2157				    "thread for %s.", gp->name);
2158				sc->sc_worker = NULL;
2159				wakeup(&sc->sc_worker);
2160				mtx_unlock(&sc->sc_mtx);
2161				kproc_exit(0);
2162			}
2163sleep:
2164			g_journal_wait(sc, last_write);
2165			continue;
2166		}
2167		/*
2168		 * If we're in switch process, we need to delay all new
2169		 * write requests until its done.
2170		 */
2171		if ((sc->sc_flags & GJF_DEVICE_SWITCH) &&
2172		    type == GJ_BIO_REGULAR && bp->bio_cmd == BIO_WRITE) {
2173			GJ_LOGREQ(2, bp, "WRITE on SWITCH");
2174			goto try_switch;
2175		}
2176		if (type == GJ_BIO_REGULAR)
2177			bioq_remove(&sc->sc_regular_queue, bp);
2178		else
2179			bioq_remove(&sc->sc_back_queue, bp);
2180		mtx_unlock(&sc->sc_mtx);
2181		switch (type) {
2182		case GJ_BIO_REGULAR:
2183			/* Regular request. */
2184			switch (bp->bio_cmd) {
2185			case BIO_READ:
2186				g_journal_read(sc, bp, bp->bio_offset,
2187				    bp->bio_offset + bp->bio_length);
2188				break;
2189			case BIO_WRITE:
2190				last_write = time_second;
2191				g_journal_add_request(sc, bp);
2192				g_journal_flush_send(sc);
2193				break;
2194			default:
2195				panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2196			}
2197			break;
2198		case GJ_BIO_COPY:
2199			switch (bp->bio_cmd) {
2200			case BIO_READ:
2201				if (g_journal_copy_read_done(bp))
2202					g_journal_copy_send(sc);
2203				break;
2204			case BIO_WRITE:
2205				g_journal_copy_write_done(bp);
2206				g_journal_copy_send(sc);
2207				break;
2208			default:
2209				panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2210			}
2211			break;
2212		case GJ_BIO_JOURNAL:
2213			g_journal_flush_done(bp);
2214			g_journal_flush_send(sc);
2215			break;
2216		case GJ_BIO_READ:
2217		default:
2218			panic("Invalid bio (%d).", type);
2219		}
2220	}
2221}
2222
2223static void
2224g_journal_destroy_event(void *arg, int flags __unused)
2225{
2226	struct g_journal_softc *sc;
2227
2228	g_topology_assert();
2229	sc = arg;
2230	g_journal_destroy(sc);
2231}
2232
2233static void
2234g_journal_timeout(void *arg)
2235{
2236	struct g_journal_softc *sc;
2237
2238	sc = arg;
2239	GJ_DEBUG(0, "Timeout. Journal %s cannot be completed.",
2240	    sc->sc_geom->name);
2241	g_post_event(g_journal_destroy_event, sc, M_NOWAIT, NULL);
2242}
2243
2244static struct g_geom *
2245g_journal_create(struct g_class *mp, struct g_provider *pp,
2246    const struct g_journal_metadata *md)
2247{
2248	struct g_journal_softc *sc;
2249	struct g_geom *gp;
2250	struct g_consumer *cp;
2251	int error;
2252
2253	sc = NULL;	/* gcc */
2254
2255	g_topology_assert();
2256	/*
2257	 * There are two possibilities:
2258	 * 1. Data and both journals are on the same provider.
2259	 * 2. Data and journals are all on separated providers.
2260	 */
2261	/* Look for journal device with the same ID. */
2262	LIST_FOREACH(gp, &mp->geom, geom) {
2263		sc = gp->softc;
2264		if (sc == NULL)
2265			continue;
2266		if (sc->sc_id == md->md_id)
2267			break;
2268	}
2269	if (gp == NULL)
2270		sc = NULL;
2271	else if (sc != NULL && (sc->sc_type & md->md_type) != 0) {
2272		GJ_DEBUG(1, "Journal device %u already configured.", sc->sc_id);
2273		return (NULL);
2274	}
2275	if (md->md_type == 0 || (md->md_type & ~GJ_TYPE_COMPLETE) != 0) {
2276		GJ_DEBUG(0, "Invalid type on %s.", pp->name);
2277		return (NULL);
2278	}
2279	if (md->md_type & GJ_TYPE_DATA) {
2280		GJ_DEBUG(0, "Journal %u: %s contains data.", md->md_id,
2281		    pp->name);
2282	}
2283	if (md->md_type & GJ_TYPE_JOURNAL) {
2284		GJ_DEBUG(0, "Journal %u: %s contains journal.", md->md_id,
2285		    pp->name);
2286	}
2287
2288	if (sc == NULL) {
2289		/* Action geom. */
2290		sc = malloc(sizeof(*sc), M_JOURNAL, M_WAITOK | M_ZERO);
2291		sc->sc_id = md->md_id;
2292		sc->sc_type = 0;
2293		sc->sc_flags = 0;
2294		sc->sc_worker = NULL;
2295
2296		gp = g_new_geomf(mp, "gjournal %u", sc->sc_id);
2297		gp->start = g_journal_start;
2298		gp->orphan = g_journal_orphan;
2299		gp->access = g_journal_access;
2300		gp->softc = sc;
2301		gp->flags |= G_GEOM_VOLATILE_BIO;
2302		sc->sc_geom = gp;
2303
2304		mtx_init(&sc->sc_mtx, "gjournal", NULL, MTX_DEF);
2305
2306		bioq_init(&sc->sc_back_queue);
2307		bioq_init(&sc->sc_regular_queue);
2308		bioq_init(&sc->sc_delayed_queue);
2309		sc->sc_delayed_count = 0;
2310		sc->sc_current_queue = NULL;
2311		sc->sc_current_count = 0;
2312		sc->sc_flush_queue = NULL;
2313		sc->sc_flush_count = 0;
2314		sc->sc_flush_in_progress = 0;
2315		sc->sc_copy_queue = NULL;
2316		sc->sc_copy_in_progress = 0;
2317		sc->sc_inactive.jj_queue = NULL;
2318		sc->sc_active.jj_queue = NULL;
2319
2320		sc->sc_rootmount = root_mount_hold("GJOURNAL");
2321		GJ_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
2322
2323		callout_init(&sc->sc_callout, 1);
2324		if (md->md_type != GJ_TYPE_COMPLETE) {
2325			/*
2326			 * Journal and data are on separate providers.
2327			 * At this point we have only one of them.
2328			 * We setup a timeout in case the other part will not
2329			 * appear, so we won't wait forever.
2330			 */
2331			callout_reset(&sc->sc_callout, 5 * hz,
2332			    g_journal_timeout, sc);
2333		}
2334	}
2335
2336	/* Remember type of the data provider. */
2337	if (md->md_type & GJ_TYPE_DATA)
2338		sc->sc_orig_type = md->md_type;
2339	sc->sc_type |= md->md_type;
2340	cp = NULL;
2341
2342	if (md->md_type & GJ_TYPE_DATA) {
2343		if (md->md_flags & GJ_FLAG_CLEAN)
2344			sc->sc_flags |= GJF_DEVICE_CLEAN;
2345		if (md->md_flags & GJ_FLAG_CHECKSUM)
2346			sc->sc_flags |= GJF_DEVICE_CHECKSUM;
2347		cp = g_new_consumer(gp);
2348		error = g_attach(cp, pp);
2349		KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2350		    pp->name, error));
2351		error = g_access(cp, 1, 1, 1);
2352		if (error != 0) {
2353			GJ_DEBUG(0, "Cannot access %s (error=%d).", pp->name,
2354			    error);
2355			g_journal_destroy(sc);
2356			return (NULL);
2357		}
2358		sc->sc_dconsumer = cp;
2359		sc->sc_mediasize = pp->mediasize - pp->sectorsize;
2360		sc->sc_sectorsize = pp->sectorsize;
2361		sc->sc_jstart = md->md_jstart;
2362		sc->sc_jend = md->md_jend;
2363		if (md->md_provider[0] != '\0')
2364			sc->sc_flags |= GJF_DEVICE_HARDCODED;
2365		sc->sc_journal_offset = md->md_joffset;
2366		sc->sc_journal_id = md->md_jid;
2367		sc->sc_journal_previous_id = md->md_jid;
2368	}
2369	if (md->md_type & GJ_TYPE_JOURNAL) {
2370		if (cp == NULL) {
2371			cp = g_new_consumer(gp);
2372			error = g_attach(cp, pp);
2373			KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2374			    pp->name, error));
2375			error = g_access(cp, 1, 1, 1);
2376			if (error != 0) {
2377				GJ_DEBUG(0, "Cannot access %s (error=%d).",
2378				    pp->name, error);
2379				g_journal_destroy(sc);
2380				return (NULL);
2381			}
2382		} else {
2383			/*
2384			 * Journal is on the same provider as data, which means
2385			 * that data provider ends where journal starts.
2386			 */
2387			sc->sc_mediasize = md->md_jstart;
2388		}
2389		sc->sc_jconsumer = cp;
2390	}
2391
2392	/* Start switcher kproc if needed. */
2393	if (g_journal_switcher_proc == NULL)
2394		g_journal_start_switcher(mp);
2395
2396	if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE) {
2397		/* Journal is not complete yet. */
2398		return (gp);
2399	} else {
2400		/* Journal complete, cancel timeout. */
2401		callout_drain(&sc->sc_callout);
2402	}
2403
2404	error = kproc_create(g_journal_worker, sc, &sc->sc_worker, 0, 0,
2405	    "g_journal %s", sc->sc_name);
2406	if (error != 0) {
2407		GJ_DEBUG(0, "Cannot create worker thread for %s.journal.",
2408		    sc->sc_name);
2409		g_journal_destroy(sc);
2410		return (NULL);
2411	}
2412
2413	return (gp);
2414}
2415
2416static void
2417g_journal_destroy_consumer(void *arg, int flags __unused)
2418{
2419	struct g_consumer *cp;
2420
2421	g_topology_assert();
2422	cp = arg;
2423	g_detach(cp);
2424	g_destroy_consumer(cp);
2425}
2426
2427static int
2428g_journal_destroy(struct g_journal_softc *sc)
2429{
2430	struct g_geom *gp;
2431	struct g_provider *pp;
2432	struct g_consumer *cp;
2433
2434	g_topology_assert();
2435
2436	if (sc == NULL)
2437		return (ENXIO);
2438
2439	gp = sc->sc_geom;
2440	pp = LIST_FIRST(&gp->provider);
2441	if (pp != NULL) {
2442		if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) {
2443			GJ_DEBUG(1, "Device %s is still open (r%dw%de%d).",
2444			    pp->name, pp->acr, pp->acw, pp->ace);
2445			return (EBUSY);
2446		}
2447		g_error_provider(pp, ENXIO);
2448
2449		g_journal_flush(sc);
2450		g_journal_flush_send(sc);
2451		g_journal_switch(sc);
2452	}
2453
2454	sc->sc_flags |= (GJF_DEVICE_DESTROY | GJF_DEVICE_CLEAN);
2455
2456	g_topology_unlock();
2457
2458	if (sc->sc_rootmount != NULL) {
2459		GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2460		root_mount_rel(sc->sc_rootmount);
2461		sc->sc_rootmount = NULL;
2462	}
2463
2464	callout_drain(&sc->sc_callout);
2465	mtx_lock(&sc->sc_mtx);
2466	wakeup(sc);
2467	while (sc->sc_worker != NULL)
2468		msleep(&sc->sc_worker, &sc->sc_mtx, PRIBIO, "gj:destroy", 0);
2469	mtx_unlock(&sc->sc_mtx);
2470
2471	if (pp != NULL) {
2472		GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
2473		g_journal_metadata_update(sc);
2474		g_topology_lock();
2475		g_wither_provider(pp, ENXIO);
2476	} else {
2477		g_topology_lock();
2478	}
2479	mtx_destroy(&sc->sc_mtx);
2480
2481	if (sc->sc_current_count != 0) {
2482		GJ_DEBUG(0, "Warning! Number of current requests %d.",
2483		    sc->sc_current_count);
2484	}
2485
2486	LIST_FOREACH(cp, &gp->consumer, consumer) {
2487		if (cp->acr + cp->acw + cp->ace > 0)
2488			g_access(cp, -1, -1, -1);
2489		/*
2490		 * We keep all consumers open for writting, so if I'll detach
2491		 * and destroy consumer here, I'll get providers for taste, so
2492		 * journal will be started again.
2493		 * Sending an event here, prevents this from happening.
2494		 */
2495		g_post_event(g_journal_destroy_consumer, cp, M_WAITOK, NULL);
2496	}
2497	gp->softc = NULL;
2498	g_wither_geom(gp, ENXIO);
2499	free(sc, M_JOURNAL);
2500	return (0);
2501}
2502
2503static void
2504g_journal_taste_orphan(struct g_consumer *cp)
2505{
2506
2507	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2508	    cp->provider->name));
2509}
2510
2511static struct g_geom *
2512g_journal_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2513{
2514	struct g_journal_metadata md;
2515	struct g_consumer *cp;
2516	struct g_geom *gp;
2517	int error;
2518
2519	g_topology_assert();
2520	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2521	GJ_DEBUG(2, "Tasting %s.", pp->name);
2522	if (pp->geom->class == mp)
2523		return (NULL);
2524
2525	gp = g_new_geomf(mp, "journal:taste");
2526	/* This orphan function should be never called. */
2527	gp->orphan = g_journal_taste_orphan;
2528	cp = g_new_consumer(gp);
2529	g_attach(cp, pp);
2530	error = g_journal_metadata_read(cp, &md);
2531	g_detach(cp);
2532	g_destroy_consumer(cp);
2533	g_destroy_geom(gp);
2534	if (error != 0)
2535		return (NULL);
2536	gp = NULL;
2537
2538	if (md.md_provider[0] != '\0' &&
2539	    !g_compare_names(md.md_provider, pp->name))
2540		return (NULL);
2541	if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
2542		return (NULL);
2543	if (g_journal_debug >= 2)
2544		journal_metadata_dump(&md);
2545
2546	gp = g_journal_create(mp, pp, &md);
2547	return (gp);
2548}
2549
2550static struct g_journal_softc *
2551g_journal_find_device(struct g_class *mp, const char *name)
2552{
2553	struct g_journal_softc *sc;
2554	struct g_geom *gp;
2555	struct g_provider *pp;
2556
2557	if (strncmp(name, "/dev/", 5) == 0)
2558		name += 5;
2559	LIST_FOREACH(gp, &mp->geom, geom) {
2560		sc = gp->softc;
2561		if (sc == NULL)
2562			continue;
2563		if (sc->sc_flags & GJF_DEVICE_DESTROY)
2564			continue;
2565		if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2566			continue;
2567		pp = LIST_FIRST(&gp->provider);
2568		if (strcmp(sc->sc_name, name) == 0)
2569			return (sc);
2570		if (pp != NULL && strcmp(pp->name, name) == 0)
2571			return (sc);
2572	}
2573	return (NULL);
2574}
2575
2576static void
2577g_journal_ctl_destroy(struct gctl_req *req, struct g_class *mp)
2578{
2579	struct g_journal_softc *sc;
2580	const char *name;
2581	char param[16];
2582	int *nargs;
2583	int error, i;
2584
2585	g_topology_assert();
2586
2587	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
2588	if (nargs == NULL) {
2589		gctl_error(req, "No '%s' argument.", "nargs");
2590		return;
2591	}
2592	if (*nargs <= 0) {
2593		gctl_error(req, "Missing device(s).");
2594		return;
2595	}
2596
2597	for (i = 0; i < *nargs; i++) {
2598		snprintf(param, sizeof(param), "arg%d", i);
2599		name = gctl_get_asciiparam(req, param);
2600		if (name == NULL) {
2601			gctl_error(req, "No 'arg%d' argument.", i);
2602			return;
2603		}
2604		sc = g_journal_find_device(mp, name);
2605		if (sc == NULL) {
2606			gctl_error(req, "No such device: %s.", name);
2607			return;
2608		}
2609		error = g_journal_destroy(sc);
2610		if (error != 0) {
2611			gctl_error(req, "Cannot destroy device %s (error=%d).",
2612			    LIST_FIRST(&sc->sc_geom->provider)->name, error);
2613			return;
2614		}
2615	}
2616}
2617
2618static void
2619g_journal_ctl_sync(struct gctl_req *req __unused, struct g_class *mp __unused)
2620{
2621
2622	g_topology_assert();
2623	g_topology_unlock();
2624	g_journal_sync_requested++;
2625	wakeup(&g_journal_switcher_state);
2626	while (g_journal_sync_requested > 0)
2627		tsleep(&g_journal_sync_requested, PRIBIO, "j:sreq", hz / 2);
2628	g_topology_lock();
2629}
2630
2631static void
2632g_journal_config(struct gctl_req *req, struct g_class *mp, const char *verb)
2633{
2634	uint32_t *version;
2635
2636	g_topology_assert();
2637
2638	version = gctl_get_paraml(req, "version", sizeof(*version));
2639	if (version == NULL) {
2640		gctl_error(req, "No '%s' argument.", "version");
2641		return;
2642	}
2643	if (*version != G_JOURNAL_VERSION) {
2644		gctl_error(req, "Userland and kernel parts are out of sync.");
2645		return;
2646	}
2647
2648	if (strcmp(verb, "destroy") == 0 || strcmp(verb, "stop") == 0) {
2649		g_journal_ctl_destroy(req, mp);
2650		return;
2651	} else if (strcmp(verb, "sync") == 0) {
2652		g_journal_ctl_sync(req, mp);
2653		return;
2654	}
2655
2656	gctl_error(req, "Unknown verb.");
2657}
2658
2659static void
2660g_journal_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2661    struct g_consumer *cp, struct g_provider *pp)
2662{
2663	struct g_journal_softc *sc;
2664
2665	g_topology_assert();
2666
2667	sc = gp->softc;
2668	if (sc == NULL)
2669		return;
2670	if (pp != NULL) {
2671		/* Nothing here. */
2672	} else if (cp != NULL) {
2673		int first = 1;
2674
2675		sbuf_printf(sb, "%s<Role>", indent);
2676		if (cp == sc->sc_dconsumer) {
2677			sbuf_printf(sb, "Data");
2678			first = 0;
2679		}
2680		if (cp == sc->sc_jconsumer) {
2681			if (!first)
2682				sbuf_printf(sb, ",");
2683			sbuf_printf(sb, "Journal");
2684		}
2685		sbuf_printf(sb, "</Role>\n");
2686		if (cp == sc->sc_jconsumer) {
2687			sbuf_printf(sb, "<Jstart>%jd</Jstart>\n",
2688			    (intmax_t)sc->sc_jstart);
2689			sbuf_printf(sb, "<Jend>%jd</Jend>\n",
2690			    (intmax_t)sc->sc_jend);
2691		}
2692	} else {
2693		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
2694	}
2695}
2696
2697static eventhandler_tag g_journal_event_shutdown = NULL;
2698static eventhandler_tag g_journal_event_lowmem = NULL;
2699
2700static void
2701g_journal_shutdown(void *arg, int howto __unused)
2702{
2703	struct g_class *mp;
2704	struct g_geom *gp, *gp2;
2705
2706	if (panicstr != NULL)
2707		return;
2708	mp = arg;
2709	DROP_GIANT();
2710	g_topology_lock();
2711	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2712		if (gp->softc == NULL)
2713			continue;
2714		GJ_DEBUG(0, "Shutting down geom %s.", gp->name);
2715		g_journal_destroy(gp->softc);
2716	}
2717	g_topology_unlock();
2718	PICKUP_GIANT();
2719}
2720
2721/*
2722 * Free cached requests from inactive queue in case of low memory.
2723 * We free GJ_FREE_AT_ONCE elements at once.
2724 */
2725#define	GJ_FREE_AT_ONCE	4
2726static void
2727g_journal_lowmem(void *arg, int howto __unused)
2728{
2729	struct g_journal_softc *sc;
2730	struct g_class *mp;
2731	struct g_geom *gp;
2732	struct bio *bp;
2733	u_int nfree = GJ_FREE_AT_ONCE;
2734
2735	g_journal_stats_low_mem++;
2736	mp = arg;
2737	DROP_GIANT();
2738	g_topology_lock();
2739	LIST_FOREACH(gp, &mp->geom, geom) {
2740		sc = gp->softc;
2741		if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY))
2742			continue;
2743		mtx_lock(&sc->sc_mtx);
2744		for (bp = sc->sc_inactive.jj_queue; nfree > 0 && bp != NULL;
2745		    nfree--, bp = bp->bio_next) {
2746			/*
2747			 * This is safe to free the bio_data, because:
2748			 * 1. If bio_data is NULL it will be read from the
2749			 *    inactive journal.
2750			 * 2. If bp is sent down, it is first removed from the
2751			 *    inactive queue, so it's impossible to free the
2752			 *    data from under in-flight bio.
2753			 * On the other hand, freeing elements from the active
2754			 * queue, is not safe.
2755			 */
2756			if (bp->bio_data != NULL) {
2757				GJ_DEBUG(2, "Freeing data from %s.",
2758				    sc->sc_name);
2759				gj_free(bp->bio_data, bp->bio_length);
2760				bp->bio_data = NULL;
2761			}
2762		}
2763		mtx_unlock(&sc->sc_mtx);
2764		if (nfree == 0)
2765			break;
2766	}
2767	g_topology_unlock();
2768	PICKUP_GIANT();
2769}
2770
2771static void g_journal_switcher(void *arg);
2772
2773static void
2774g_journal_init(struct g_class *mp)
2775{
2776
2777	/* Pick a conservative value if provided value sucks. */
2778	if (g_journal_cache_divisor <= 0 ||
2779	    (vm_kmem_size / g_journal_cache_divisor == 0)) {
2780		g_journal_cache_divisor = 5;
2781	}
2782	if (g_journal_cache_limit > 0) {
2783		g_journal_cache_limit = vm_kmem_size / g_journal_cache_divisor;
2784		g_journal_cache_low =
2785		    (g_journal_cache_limit / 100) * g_journal_cache_switch;
2786	}
2787	g_journal_event_shutdown = EVENTHANDLER_REGISTER(shutdown_post_sync,
2788	    g_journal_shutdown, mp, EVENTHANDLER_PRI_FIRST);
2789	if (g_journal_event_shutdown == NULL)
2790		GJ_DEBUG(0, "Warning! Cannot register shutdown event.");
2791	g_journal_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
2792	    g_journal_lowmem, mp, EVENTHANDLER_PRI_FIRST);
2793	if (g_journal_event_lowmem == NULL)
2794		GJ_DEBUG(0, "Warning! Cannot register lowmem event.");
2795}
2796
2797static void
2798g_journal_fini(struct g_class *mp)
2799{
2800
2801	if (g_journal_event_shutdown != NULL) {
2802		EVENTHANDLER_DEREGISTER(shutdown_post_sync,
2803		    g_journal_event_shutdown);
2804	}
2805	if (g_journal_event_lowmem != NULL)
2806		EVENTHANDLER_DEREGISTER(vm_lowmem, g_journal_event_lowmem);
2807	g_journal_stop_switcher();
2808}
2809
2810DECLARE_GEOM_CLASS(g_journal_class, g_journal);
2811
2812static const struct g_journal_desc *
2813g_journal_find_desc(const char *fstype)
2814{
2815	const struct g_journal_desc *desc;
2816	int i;
2817
2818	for (desc = g_journal_filesystems[i = 0]; desc != NULL;
2819	     desc = g_journal_filesystems[++i]) {
2820		if (strcmp(desc->jd_fstype, fstype) == 0)
2821			break;
2822	}
2823	return (desc);
2824}
2825
2826static void
2827g_journal_switch_wait(struct g_journal_softc *sc)
2828{
2829	struct bintime bt;
2830
2831	mtx_assert(&sc->sc_mtx, MA_OWNED);
2832	if (g_journal_debug >= 2) {
2833		if (sc->sc_flush_in_progress > 0) {
2834			GJ_DEBUG(2, "%d requests flushing.",
2835			    sc->sc_flush_in_progress);
2836		}
2837		if (sc->sc_copy_in_progress > 0) {
2838			GJ_DEBUG(2, "%d requests copying.",
2839			    sc->sc_copy_in_progress);
2840		}
2841		if (sc->sc_flush_count > 0) {
2842			GJ_DEBUG(2, "%d requests to flush.",
2843			    sc->sc_flush_count);
2844		}
2845		if (sc->sc_delayed_count > 0) {
2846			GJ_DEBUG(2, "%d requests delayed.",
2847			    sc->sc_delayed_count);
2848		}
2849	}
2850	g_journal_stats_switches++;
2851	if (sc->sc_copy_in_progress > 0)
2852		g_journal_stats_wait_for_copy++;
2853	GJ_TIMER_START(1, &bt);
2854	sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2855	sc->sc_flags |= GJF_DEVICE_SWITCH;
2856	wakeup(sc);
2857	while (sc->sc_flags & GJF_DEVICE_SWITCH) {
2858		msleep(&sc->sc_journal_copying, &sc->sc_mtx, PRIBIO,
2859		    "gj:switch", 0);
2860	}
2861	GJ_TIMER_STOP(1, &bt, "Switch time of %s", sc->sc_name);
2862}
2863
2864static void
2865g_journal_do_switch(struct g_class *classp)
2866{
2867	struct g_journal_softc *sc;
2868	const struct g_journal_desc *desc;
2869	struct g_geom *gp;
2870	struct mount *mp;
2871	struct bintime bt;
2872	char *mountpoint;
2873	int error, save;
2874
2875	DROP_GIANT();
2876	g_topology_lock();
2877	LIST_FOREACH(gp, &classp->geom, geom) {
2878		sc = gp->softc;
2879		if (sc == NULL)
2880			continue;
2881		if (sc->sc_flags & GJF_DEVICE_DESTROY)
2882			continue;
2883		if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2884			continue;
2885		mtx_lock(&sc->sc_mtx);
2886		sc->sc_flags |= GJF_DEVICE_BEFORE_SWITCH;
2887		mtx_unlock(&sc->sc_mtx);
2888	}
2889	g_topology_unlock();
2890	PICKUP_GIANT();
2891
2892	mtx_lock(&mountlist_mtx);
2893	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2894		if (mp->mnt_gjprovider == NULL)
2895			continue;
2896		if (mp->mnt_flag & MNT_RDONLY)
2897			continue;
2898		desc = g_journal_find_desc(mp->mnt_stat.f_fstypename);
2899		if (desc == NULL)
2900			continue;
2901		if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
2902			continue;
2903		/* mtx_unlock(&mountlist_mtx) was done inside vfs_busy() */
2904
2905		DROP_GIANT();
2906		g_topology_lock();
2907		sc = g_journal_find_device(classp, mp->mnt_gjprovider);
2908		g_topology_unlock();
2909		PICKUP_GIANT();
2910
2911		if (sc == NULL) {
2912			GJ_DEBUG(0, "Cannot find journal geom for %s.",
2913			    mp->mnt_gjprovider);
2914			goto next;
2915		} else if (JEMPTY(sc)) {
2916			mtx_lock(&sc->sc_mtx);
2917			sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2918			mtx_unlock(&sc->sc_mtx);
2919			GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
2920			goto next;
2921		}
2922
2923		mountpoint = mp->mnt_stat.f_mntonname;
2924
2925		error = vn_start_write(NULL, &mp, V_WAIT);
2926		if (error != 0) {
2927			GJ_DEBUG(0, "vn_start_write(%s) failed (error=%d).",
2928			    mountpoint, error);
2929			goto next;
2930		}
2931
2932		save = curthread_pflags_set(TDP_SYNCIO);
2933
2934		GJ_TIMER_START(1, &bt);
2935		vfs_msync(mp, MNT_NOWAIT);
2936		GJ_TIMER_STOP(1, &bt, "Msync time of %s", mountpoint);
2937
2938		GJ_TIMER_START(1, &bt);
2939		error = VFS_SYNC(mp, MNT_NOWAIT);
2940		if (error == 0)
2941			GJ_TIMER_STOP(1, &bt, "Sync time of %s", mountpoint);
2942		else {
2943			GJ_DEBUG(0, "Cannot sync file system %s (error=%d).",
2944			    mountpoint, error);
2945		}
2946
2947		curthread_pflags_restore(save);
2948
2949		vn_finished_write(mp);
2950
2951		if (error != 0)
2952			goto next;
2953
2954		/*
2955		 * Send BIO_FLUSH before freezing the file system, so it can be
2956		 * faster after the freeze.
2957		 */
2958		GJ_TIMER_START(1, &bt);
2959		g_journal_flush_cache(sc);
2960		GJ_TIMER_STOP(1, &bt, "BIO_FLUSH time of %s", sc->sc_name);
2961
2962		GJ_TIMER_START(1, &bt);
2963		error = vfs_write_suspend(mp, VS_SKIP_UNMOUNT);
2964		GJ_TIMER_STOP(1, &bt, "Suspend time of %s", mountpoint);
2965		if (error != 0) {
2966			GJ_DEBUG(0, "Cannot suspend file system %s (error=%d).",
2967			    mountpoint, error);
2968			goto next;
2969		}
2970
2971		error = desc->jd_clean(mp);
2972		if (error != 0)
2973			goto next;
2974
2975		mtx_lock(&sc->sc_mtx);
2976		g_journal_switch_wait(sc);
2977		mtx_unlock(&sc->sc_mtx);
2978
2979		vfs_write_resume(mp, 0);
2980next:
2981		mtx_lock(&mountlist_mtx);
2982		vfs_unbusy(mp);
2983	}
2984	mtx_unlock(&mountlist_mtx);
2985
2986	sc = NULL;
2987	for (;;) {
2988		DROP_GIANT();
2989		g_topology_lock();
2990		LIST_FOREACH(gp, &g_journal_class.geom, geom) {
2991			sc = gp->softc;
2992			if (sc == NULL)
2993				continue;
2994			mtx_lock(&sc->sc_mtx);
2995			if ((sc->sc_type & GJ_TYPE_COMPLETE) == GJ_TYPE_COMPLETE &&
2996			    !(sc->sc_flags & GJF_DEVICE_DESTROY) &&
2997			    (sc->sc_flags & GJF_DEVICE_BEFORE_SWITCH)) {
2998				break;
2999			}
3000			mtx_unlock(&sc->sc_mtx);
3001			sc = NULL;
3002		}
3003		g_topology_unlock();
3004		PICKUP_GIANT();
3005		if (sc == NULL)
3006			break;
3007		mtx_assert(&sc->sc_mtx, MA_OWNED);
3008		g_journal_switch_wait(sc);
3009		mtx_unlock(&sc->sc_mtx);
3010	}
3011}
3012
3013static void
3014g_journal_start_switcher(struct g_class *mp)
3015{
3016	int error;
3017
3018	g_topology_assert();
3019	MPASS(g_journal_switcher_proc == NULL);
3020	g_journal_switcher_state = GJ_SWITCHER_WORKING;
3021	error = kproc_create(g_journal_switcher, mp, &g_journal_switcher_proc,
3022	    0, 0, "g_journal switcher");
3023	KASSERT(error == 0, ("Cannot create switcher thread."));
3024}
3025
3026static void
3027g_journal_stop_switcher(void)
3028{
3029	g_topology_assert();
3030	MPASS(g_journal_switcher_proc != NULL);
3031	g_journal_switcher_state = GJ_SWITCHER_DIE;
3032	wakeup(&g_journal_switcher_state);
3033	while (g_journal_switcher_state != GJ_SWITCHER_DIED)
3034		tsleep(&g_journal_switcher_state, PRIBIO, "jfini:wait", hz / 5);
3035	GJ_DEBUG(1, "Switcher died.");
3036	g_journal_switcher_proc = NULL;
3037}
3038
3039/*
3040 * TODO: Kill switcher thread on last geom destruction?
3041 */
3042static void
3043g_journal_switcher(void *arg)
3044{
3045	struct g_class *mp;
3046	struct bintime bt;
3047	int error;
3048
3049	mp = arg;
3050	curthread->td_pflags |= TDP_NORUNNINGBUF;
3051	for (;;) {
3052		g_journal_switcher_wokenup = 0;
3053		error = tsleep(&g_journal_switcher_state, PRIBIO, "jsw:wait",
3054		    g_journal_switch_time * hz);
3055		if (g_journal_switcher_state == GJ_SWITCHER_DIE) {
3056			g_journal_switcher_state = GJ_SWITCHER_DIED;
3057			GJ_DEBUG(1, "Switcher exiting.");
3058			wakeup(&g_journal_switcher_state);
3059			kproc_exit(0);
3060		}
3061		if (error == 0 && g_journal_sync_requested == 0) {
3062			GJ_DEBUG(1, "Out of cache, force switch (used=%jd "
3063			    "limit=%jd).", (intmax_t)g_journal_cache_used,
3064			    (intmax_t)g_journal_cache_limit);
3065		}
3066		GJ_TIMER_START(1, &bt);
3067		g_journal_do_switch(mp);
3068		GJ_TIMER_STOP(1, &bt, "Entire switch time");
3069		if (g_journal_sync_requested > 0) {
3070			g_journal_sync_requested = 0;
3071			wakeup(&g_journal_sync_requested);
3072		}
3073	}
3074}
3075