geom_dev.c revision 282953
1/*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7 * and NAI Labs, the Security Research Division of Network Associates, Inc.
8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. The names of the authors may not be used to endorse or promote
20 *    products derived from this software without specific prior written
21 *    permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: stable/10/sys/geom/geom_dev.c 282953 2015-05-15 10:42:48Z trasz $");
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/kernel.h>
43#include <sys/conf.h>
44#include <sys/ctype.h>
45#include <sys/bio.h>
46#include <sys/bus.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/proc.h>
50#include <sys/errno.h>
51#include <sys/time.h>
52#include <sys/disk.h>
53#include <sys/fcntl.h>
54#include <sys/limits.h>
55#include <sys/sysctl.h>
56#include <geom/geom.h>
57#include <geom/geom_int.h>
58#include <machine/stdarg.h>
59
60struct g_dev_softc {
61	struct mtx	 sc_mtx;
62	struct cdev	*sc_dev;
63	struct cdev	*sc_alias;
64	int		 sc_open;
65	int		 sc_active;
66};
67
68static d_open_t		g_dev_open;
69static d_close_t	g_dev_close;
70static d_strategy_t	g_dev_strategy;
71static d_ioctl_t	g_dev_ioctl;
72
73static struct cdevsw g_dev_cdevsw = {
74	.d_version =	D_VERSION,
75	.d_open =	g_dev_open,
76	.d_close =	g_dev_close,
77	.d_read =	physread,
78	.d_write =	physwrite,
79	.d_ioctl =	g_dev_ioctl,
80	.d_strategy =	g_dev_strategy,
81	.d_name =	"g_dev",
82	.d_flags =	D_DISK | D_TRACKCLOSE,
83};
84
85static g_init_t g_dev_init;
86static g_fini_t g_dev_fini;
87static g_taste_t g_dev_taste;
88static g_orphan_t g_dev_orphan;
89static g_attrchanged_t g_dev_attrchanged;
90
91static struct g_class g_dev_class	= {
92	.name = "DEV",
93	.version = G_VERSION,
94	.init = g_dev_init,
95	.fini = g_dev_fini,
96	.taste = g_dev_taste,
97	.orphan = g_dev_orphan,
98	.attrchanged = g_dev_attrchanged
99};
100
101/*
102 * We target 262144 (8 x 32768) sectors by default as this significantly
103 * increases the throughput on commonly used SSD's with a marginal
104 * increase in non-interruptible request latency.
105 */
106static uint64_t g_dev_del_max_sectors = 262144;
107SYSCTL_DECL(_kern_geom);
108SYSCTL_NODE(_kern_geom, OID_AUTO, dev, CTLFLAG_RW, 0, "GEOM_DEV stuff");
109SYSCTL_QUAD(_kern_geom_dev, OID_AUTO, delete_max_sectors, CTLFLAG_RW,
110    &g_dev_del_max_sectors, 0, "Maximum number of sectors in a single "
111    "delete request sent to the provider. Larger requests are chunked "
112    "so they can be interrupted. (0 = disable chunking)");
113
114static char *dumpdev = NULL;
115static void
116g_dev_init(struct g_class *mp)
117{
118
119	dumpdev = getenv("dumpdev");
120}
121
122static void
123g_dev_fini(struct g_class *mp)
124{
125
126	freeenv(dumpdev);
127}
128
129static int
130g_dev_setdumpdev(struct cdev *dev)
131{
132	struct g_kerneldump kd;
133	struct g_consumer *cp;
134	int error, len;
135
136	if (dev == NULL)
137		return (set_dumper(NULL, NULL));
138
139	cp = dev->si_drv2;
140	len = sizeof(kd);
141	kd.offset = 0;
142	kd.length = OFF_MAX;
143	error = g_io_getattr("GEOM::kerneldump", cp, &len, &kd);
144	if (error == 0) {
145		error = set_dumper(&kd.di, devtoname(dev));
146		if (error == 0)
147			dev->si_flags |= SI_DUMPDEV;
148	}
149	return (error);
150}
151
152static void
153init_dumpdev(struct cdev *dev)
154{
155
156	if (dumpdev == NULL)
157		return;
158	if (strcmp(devtoname(dev), dumpdev) != 0)
159		return;
160	if (g_dev_setdumpdev(dev) == 0) {
161		freeenv(dumpdev);
162		dumpdev = NULL;
163	}
164}
165
166static void
167g_dev_destroy(void *arg, int flags __unused)
168{
169	struct g_consumer *cp;
170	struct g_geom *gp;
171	struct g_dev_softc *sc;
172	char buf[SPECNAMELEN + 6];
173
174	g_topology_assert();
175	cp = arg;
176	gp = cp->geom;
177	sc = cp->private;
178	g_trace(G_T_TOPOLOGY, "g_dev_destroy(%p(%s))", cp, gp->name);
179	snprintf(buf, sizeof(buf), "cdev=%s", gp->name);
180	devctl_notify_f("GEOM", "DEV", "DESTROY", buf, M_WAITOK);
181	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
182		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
183	g_detach(cp);
184	g_destroy_consumer(cp);
185	g_destroy_geom(gp);
186	mtx_destroy(&sc->sc_mtx);
187	g_free(sc);
188}
189
190void
191g_dev_print(void)
192{
193	struct g_geom *gp;
194	char const *p = "";
195
196	LIST_FOREACH(gp, &g_dev_class.geom, geom) {
197		printf("%s%s", p, gp->name);
198		p = " ";
199	}
200	printf("\n");
201}
202
203static void
204g_dev_attrchanged(struct g_consumer *cp, const char *attr)
205{
206	struct g_dev_softc *sc;
207	struct cdev *dev;
208	char buf[SPECNAMELEN + 6];
209
210	sc = cp->private;
211	if (strcmp(attr, "GEOM::media") == 0) {
212		dev = sc->sc_dev;
213		snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name);
214		devctl_notify_f("DEVFS", "CDEV", "MEDIACHANGE", buf, M_WAITOK);
215		devctl_notify_f("GEOM", "DEV", "MEDIACHANGE", buf, M_WAITOK);
216		dev = sc->sc_alias;
217		if (dev != NULL) {
218			snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name);
219			devctl_notify_f("DEVFS", "CDEV", "MEDIACHANGE", buf,
220			    M_WAITOK);
221			devctl_notify_f("GEOM", "DEV", "MEDIACHANGE", buf,
222			    M_WAITOK);
223		}
224		return;
225	}
226
227	if (strcmp(attr, "GEOM::physpath") != 0)
228		return;
229
230	if (g_access(cp, 1, 0, 0) == 0) {
231		char *physpath;
232		int error, physpath_len;
233
234		physpath_len = MAXPATHLEN;
235		physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO);
236		error =
237		    g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath);
238		g_access(cp, -1, 0, 0);
239		if (error == 0 && strlen(physpath) != 0) {
240			struct cdev *old_alias_dev;
241			struct cdev **alias_devp;
242
243			dev = sc->sc_dev;
244			old_alias_dev = sc->sc_alias;
245			alias_devp = (struct cdev **)&sc->sc_alias;
246			make_dev_physpath_alias(MAKEDEV_WAITOK, alias_devp,
247			    dev, old_alias_dev, physpath);
248		} else if (sc->sc_alias) {
249			destroy_dev((struct cdev *)sc->sc_alias);
250			sc->sc_alias = NULL;
251		}
252		g_free(physpath);
253	}
254}
255
256struct g_provider *
257g_dev_getprovider(struct cdev *dev)
258{
259	struct g_consumer *cp;
260
261	g_topology_assert();
262	if (dev == NULL)
263		return (NULL);
264	if (dev->si_devsw != &g_dev_cdevsw)
265		return (NULL);
266	cp = dev->si_drv2;
267	return (cp->provider);
268}
269
270static struct g_geom *
271g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused)
272{
273	struct g_geom *gp;
274	struct g_consumer *cp;
275	struct g_dev_softc *sc;
276	int error, len;
277	struct cdev *dev, *adev;
278	char buf[SPECNAMELEN + 6], *val;
279
280	g_trace(G_T_TOPOLOGY, "dev_taste(%s,%s)", mp->name, pp->name);
281	g_topology_assert();
282	gp = g_new_geomf(mp, "%s", pp->name);
283	sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
284	mtx_init(&sc->sc_mtx, "g_dev", NULL, MTX_DEF);
285	cp = g_new_consumer(gp);
286	cp->private = sc;
287	cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
288	error = g_attach(cp, pp);
289	KASSERT(error == 0,
290	    ("g_dev_taste(%s) failed to g_attach, err=%d", pp->name, error));
291	error = make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev,
292	    &g_dev_cdevsw, NULL, UID_ROOT, GID_OPERATOR, 0640, "%s", gp->name);
293	if (error != 0) {
294		printf("%s: make_dev_p() failed (gp->name=%s, error=%d)\n",
295		    __func__, gp->name, error);
296		g_detach(cp);
297		g_destroy_consumer(cp);
298		g_destroy_geom(gp);
299		mtx_destroy(&sc->sc_mtx);
300		g_free(sc);
301		return (NULL);
302	}
303	dev->si_flags |= SI_UNMAPPED;
304	sc->sc_dev = dev;
305
306	/* Search for device alias name and create it if found. */
307	adev = NULL;
308	for (len = MIN(strlen(gp->name), sizeof(buf) - 15); len > 0; len--) {
309		snprintf(buf, sizeof(buf), "kern.devalias.%s", gp->name);
310		buf[14 + len] = 0;
311		val = getenv(buf);
312		if (val != NULL) {
313			snprintf(buf, sizeof(buf), "%s%s",
314			    val, gp->name + len);
315			freeenv(val);
316			make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK,
317			    &adev, dev, "%s", buf);
318			adev->si_flags |= SI_UNMAPPED;
319			break;
320		}
321	}
322
323	dev->si_iosize_max = MAXPHYS;
324	dev->si_drv2 = cp;
325	init_dumpdev(dev);
326	if (adev != NULL) {
327		adev->si_iosize_max = MAXPHYS;
328		adev->si_drv2 = cp;
329		init_dumpdev(adev);
330	}
331
332	g_dev_attrchanged(cp, "GEOM::physpath");
333	snprintf(buf, sizeof(buf), "cdev=%s", gp->name);
334	devctl_notify_f("GEOM", "DEV", "CREATE", buf, M_WAITOK);
335
336	return (gp);
337}
338
339static int
340g_dev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
341{
342	struct g_consumer *cp;
343	struct g_dev_softc *sc;
344	int error, r, w, e;
345
346	cp = dev->si_drv2;
347	if (cp == NULL)
348		return(ENXIO);		/* g_dev_taste() not done yet */
349	g_trace(G_T_ACCESS, "g_dev_open(%s, %d, %d, %p)",
350	    cp->geom->name, flags, fmt, td);
351
352	r = flags & FREAD ? 1 : 0;
353	w = flags & FWRITE ? 1 : 0;
354#ifdef notyet
355	e = flags & O_EXCL ? 1 : 0;
356#else
357	e = 0;
358#endif
359	if (w) {
360		/*
361		 * When running in very secure mode, do not allow
362		 * opens for writing of any disks.
363		 */
364		error = securelevel_ge(td->td_ucred, 2);
365		if (error)
366			return (error);
367	}
368	g_topology_lock();
369	error = g_access(cp, r, w, e);
370	g_topology_unlock();
371	if (error == 0) {
372		sc = cp->private;
373		mtx_lock(&sc->sc_mtx);
374		if (sc->sc_open == 0 && sc->sc_active != 0)
375			wakeup(&sc->sc_active);
376		sc->sc_open += r + w + e;
377		mtx_unlock(&sc->sc_mtx);
378	}
379	return(error);
380}
381
382static int
383g_dev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
384{
385	struct g_consumer *cp;
386	struct g_dev_softc *sc;
387	int error, r, w, e;
388
389	cp = dev->si_drv2;
390	if (cp == NULL)
391		return(ENXIO);
392	g_trace(G_T_ACCESS, "g_dev_close(%s, %d, %d, %p)",
393	    cp->geom->name, flags, fmt, td);
394
395	r = flags & FREAD ? -1 : 0;
396	w = flags & FWRITE ? -1 : 0;
397#ifdef notyet
398	e = flags & O_EXCL ? -1 : 0;
399#else
400	e = 0;
401#endif
402	sc = cp->private;
403	mtx_lock(&sc->sc_mtx);
404	sc->sc_open += r + w + e;
405	while (sc->sc_open == 0 && sc->sc_active != 0)
406		msleep(&sc->sc_active, &sc->sc_mtx, 0, "PRIBIO", 0);
407	mtx_unlock(&sc->sc_mtx);
408	g_topology_lock();
409	error = g_access(cp, r, w, e);
410	g_topology_unlock();
411	return (error);
412}
413
414/*
415 * XXX: Until we have unmessed the ioctl situation, there is a race against
416 * XXX: a concurrent orphanization.  We cannot close it by holding topology
417 * XXX: since that would prevent us from doing our job, and stalling events
418 * XXX: will break (actually: stall) the BSD disklabel hacks.
419 */
420static int
421g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
422{
423	struct g_consumer *cp;
424	struct g_provider *pp;
425	off_t offset, length, chunk;
426	int i, error;
427
428	cp = dev->si_drv2;
429	pp = cp->provider;
430
431	error = 0;
432	KASSERT(cp->acr || cp->acw,
433	    ("Consumer with zero access count in g_dev_ioctl"));
434
435	i = IOCPARM_LEN(cmd);
436	switch (cmd) {
437	case DIOCGSECTORSIZE:
438		*(u_int *)data = cp->provider->sectorsize;
439		if (*(u_int *)data == 0)
440			error = ENOENT;
441		break;
442	case DIOCGMEDIASIZE:
443		*(off_t *)data = cp->provider->mediasize;
444		if (*(off_t *)data == 0)
445			error = ENOENT;
446		break;
447	case DIOCGFWSECTORS:
448		error = g_io_getattr("GEOM::fwsectors", cp, &i, data);
449		if (error == 0 && *(u_int *)data == 0)
450			error = ENOENT;
451		break;
452	case DIOCGFWHEADS:
453		error = g_io_getattr("GEOM::fwheads", cp, &i, data);
454		if (error == 0 && *(u_int *)data == 0)
455			error = ENOENT;
456		break;
457	case DIOCGFRONTSTUFF:
458		error = g_io_getattr("GEOM::frontstuff", cp, &i, data);
459		break;
460	case DIOCSKERNELDUMP:
461		if (*(u_int *)data == 0)
462			error = g_dev_setdumpdev(NULL);
463		else
464			error = g_dev_setdumpdev(dev);
465		break;
466	case DIOCGFLUSH:
467		error = g_io_flush(cp);
468		break;
469	case DIOCGDELETE:
470		offset = ((off_t *)data)[0];
471		length = ((off_t *)data)[1];
472		if ((offset % cp->provider->sectorsize) != 0 ||
473		    (length % cp->provider->sectorsize) != 0 || length <= 0) {
474			printf("%s: offset=%jd length=%jd\n", __func__, offset,
475			    length);
476			error = EINVAL;
477			break;
478		}
479		while (length > 0) {
480			chunk = length;
481			if (g_dev_del_max_sectors != 0 && chunk >
482			    g_dev_del_max_sectors * cp->provider->sectorsize) {
483				chunk = g_dev_del_max_sectors *
484				    cp->provider->sectorsize;
485			}
486			error = g_delete_data(cp, offset, chunk);
487			length -= chunk;
488			offset += chunk;
489			if (error)
490				break;
491			/*
492			 * Since the request size can be large, the service
493			 * time can be is likewise.  We make this ioctl
494			 * interruptible by checking for signals for each bio.
495			 */
496			if (SIGPENDING(td))
497				break;
498		}
499		break;
500	case DIOCGIDENT:
501		error = g_io_getattr("GEOM::ident", cp, &i, data);
502		break;
503	case DIOCGPROVIDERNAME:
504		if (pp == NULL)
505			return (ENOENT);
506		strlcpy(data, pp->name, i);
507		break;
508	case DIOCGSTRIPESIZE:
509		*(off_t *)data = cp->provider->stripesize;
510		break;
511	case DIOCGSTRIPEOFFSET:
512		*(off_t *)data = cp->provider->stripeoffset;
513		break;
514	case DIOCGPHYSPATH:
515		error = g_io_getattr("GEOM::physpath", cp, &i, data);
516		if (error == 0 && *(char *)data == '\0')
517			error = ENOENT;
518		break;
519	case DIOCGATTR: {
520		struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
521
522		if (arg->len > sizeof(arg->value)) {
523			error = EINVAL;
524			break;
525		}
526		error = g_io_getattr(arg->name, cp, &arg->len, &arg->value);
527		break;
528	}
529	default:
530		if (cp->provider->geom->ioctl != NULL) {
531			error = cp->provider->geom->ioctl(cp->provider, cmd, data, fflag, td);
532		} else {
533			error = ENOIOCTL;
534		}
535	}
536
537	return (error);
538}
539
540static void
541g_dev_done(struct bio *bp2)
542{
543	struct g_consumer *cp;
544	struct g_dev_softc *sc;
545	struct bio *bp;
546	int destroy;
547
548	cp = bp2->bio_from;
549	sc = cp->private;
550	bp = bp2->bio_parent;
551	bp->bio_error = bp2->bio_error;
552	bp->bio_completed = bp2->bio_completed;
553	bp->bio_resid = bp->bio_length - bp2->bio_completed;
554	if (bp2->bio_error != 0) {
555		g_trace(G_T_BIO, "g_dev_done(%p) had error %d",
556		    bp2, bp2->bio_error);
557		bp->bio_flags |= BIO_ERROR;
558	} else {
559		g_trace(G_T_BIO, "g_dev_done(%p/%p) resid %ld completed %jd",
560		    bp2, bp, bp2->bio_resid, (intmax_t)bp2->bio_completed);
561	}
562	g_destroy_bio(bp2);
563	destroy = 0;
564	mtx_lock(&sc->sc_mtx);
565	if ((--sc->sc_active) == 0) {
566		if (sc->sc_open == 0)
567			wakeup(&sc->sc_active);
568		if (sc->sc_dev == NULL)
569			destroy = 1;
570	}
571	mtx_unlock(&sc->sc_mtx);
572	if (destroy)
573		g_post_event(g_dev_destroy, cp, M_WAITOK, NULL);
574	biodone(bp);
575}
576
577static void
578g_dev_strategy(struct bio *bp)
579{
580	struct g_consumer *cp;
581	struct bio *bp2;
582	struct cdev *dev;
583	struct g_dev_softc *sc;
584
585	KASSERT(bp->bio_cmd == BIO_READ ||
586	        bp->bio_cmd == BIO_WRITE ||
587	        bp->bio_cmd == BIO_DELETE ||
588		bp->bio_cmd == BIO_FLUSH,
589		("Wrong bio_cmd bio=%p cmd=%d", bp, bp->bio_cmd));
590	dev = bp->bio_dev;
591	cp = dev->si_drv2;
592	sc = cp->private;
593	KASSERT(cp->acr || cp->acw,
594	    ("Consumer with zero access count in g_dev_strategy"));
595#ifdef INVARIANTS
596	if ((bp->bio_offset % cp->provider->sectorsize) != 0 ||
597	    (bp->bio_bcount % cp->provider->sectorsize) != 0) {
598		bp->bio_resid = bp->bio_bcount;
599		biofinish(bp, NULL, EINVAL);
600		return;
601	}
602#endif
603	mtx_lock(&sc->sc_mtx);
604	KASSERT(sc->sc_open > 0, ("Closed device in g_dev_strategy"));
605	sc->sc_active++;
606	mtx_unlock(&sc->sc_mtx);
607
608	for (;;) {
609		/*
610		 * XXX: This is not an ideal solution, but I belive it to
611		 * XXX: deadlock safe, all things considered.
612		 */
613		bp2 = g_clone_bio(bp);
614		if (bp2 != NULL)
615			break;
616		pause("gdstrat", hz / 10);
617	}
618	KASSERT(bp2 != NULL, ("XXX: ENOMEM in a bad place"));
619	bp2->bio_done = g_dev_done;
620	g_trace(G_T_BIO,
621	    "g_dev_strategy(%p/%p) offset %jd length %jd data %p cmd %d",
622	    bp, bp2, (intmax_t)bp->bio_offset, (intmax_t)bp2->bio_length,
623	    bp2->bio_data, bp2->bio_cmd);
624	g_io_request(bp2, cp);
625	KASSERT(cp->acr || cp->acw,
626	    ("g_dev_strategy raced with g_dev_close and lost"));
627
628}
629
630/*
631 * g_dev_callback()
632 *
633 * Called by devfs when asynchronous device destruction is completed.
634 * - Mark that we have no attached device any more.
635 * - If there are no outstanding requests, schedule geom destruction.
636 *   Otherwise destruction will be scheduled later by g_dev_done().
637 */
638
639static void
640g_dev_callback(void *arg)
641{
642	struct g_consumer *cp;
643	struct g_dev_softc *sc;
644	int destroy;
645
646	cp = arg;
647	sc = cp->private;
648	g_trace(G_T_TOPOLOGY, "g_dev_callback(%p(%s))", cp, cp->geom->name);
649
650	mtx_lock(&sc->sc_mtx);
651	sc->sc_dev = NULL;
652	sc->sc_alias = NULL;
653	destroy = (sc->sc_active == 0);
654	mtx_unlock(&sc->sc_mtx);
655	if (destroy)
656		g_post_event(g_dev_destroy, cp, M_WAITOK, NULL);
657}
658
659/*
660 * g_dev_orphan()
661 *
662 * Called from below when the provider orphaned us.
663 * - Clear any dump settings.
664 * - Request asynchronous device destruction to prevent any more requests
665 *   from coming in.  The provider is already marked with an error, so
666 *   anything which comes in in the interrim will be returned immediately.
667 */
668
669static void
670g_dev_orphan(struct g_consumer *cp)
671{
672	struct cdev *dev;
673	struct g_dev_softc *sc;
674
675	g_topology_assert();
676	sc = cp->private;
677	dev = sc->sc_dev;
678	g_trace(G_T_TOPOLOGY, "g_dev_orphan(%p(%s))", cp, cp->geom->name);
679
680	/* Reset any dump-area set on this device */
681	if (dev->si_flags & SI_DUMPDEV)
682		set_dumper(NULL, NULL);
683
684	/* Destroy the struct cdev *so we get no more requests */
685	destroy_dev_sched_cb(dev, g_dev_callback, cp);
686}
687
688DECLARE_GEOM_CLASS(g_dev_class, g_dev);
689