geom_dev.c revision 274732
1/*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7 * and NAI Labs, the Security Research Division of Network Associates, Inc.
8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. The names of the authors may not be used to endorse or promote
20 *    products derived from this software without specific prior written
21 *    permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: stable/10/sys/geom/geom_dev.c 274732 2014-11-20 01:55:12Z mav $");
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/kernel.h>
43#include <sys/conf.h>
44#include <sys/ctype.h>
45#include <sys/bio.h>
46#include <sys/bus.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/proc.h>
50#include <sys/errno.h>
51#include <sys/time.h>
52#include <sys/disk.h>
53#include <sys/fcntl.h>
54#include <sys/limits.h>
55#include <sys/sysctl.h>
56#include <geom/geom.h>
57#include <geom/geom_int.h>
58#include <machine/stdarg.h>
59
60struct g_dev_softc {
61	struct mtx	 sc_mtx;
62	struct cdev	*sc_dev;
63	struct cdev	*sc_alias;
64	int		 sc_open;
65	int		 sc_active;
66};
67
68static d_open_t		g_dev_open;
69static d_close_t	g_dev_close;
70static d_strategy_t	g_dev_strategy;
71static d_ioctl_t	g_dev_ioctl;
72
73static struct cdevsw g_dev_cdevsw = {
74	.d_version =	D_VERSION,
75	.d_open =	g_dev_open,
76	.d_close =	g_dev_close,
77	.d_read =	physread,
78	.d_write =	physwrite,
79	.d_ioctl =	g_dev_ioctl,
80	.d_strategy =	g_dev_strategy,
81	.d_name =	"g_dev",
82	.d_flags =	D_DISK | D_TRACKCLOSE,
83};
84
85static g_init_t g_dev_init;
86static g_fini_t g_dev_fini;
87static g_taste_t g_dev_taste;
88static g_orphan_t g_dev_orphan;
89static g_attrchanged_t g_dev_attrchanged;
90
91static struct g_class g_dev_class	= {
92	.name = "DEV",
93	.version = G_VERSION,
94	.init = g_dev_init,
95	.fini = g_dev_fini,
96	.taste = g_dev_taste,
97	.orphan = g_dev_orphan,
98	.attrchanged = g_dev_attrchanged
99};
100
101/*
102 * We target 262144 (8 x 32768) sectors by default as this significantly
103 * increases the throughput on commonly used SSD's with a marginal
104 * increase in non-interruptible request latency.
105 */
106static uint64_t g_dev_del_max_sectors = 262144;
107SYSCTL_DECL(_kern_geom);
108SYSCTL_NODE(_kern_geom, OID_AUTO, dev, CTLFLAG_RW, 0, "GEOM_DEV stuff");
109SYSCTL_QUAD(_kern_geom_dev, OID_AUTO, delete_max_sectors, CTLFLAG_RW,
110    &g_dev_del_max_sectors, 0, "Maximum number of sectors in a single "
111    "delete request sent to the provider. Larger requests are chunked "
112    "so they can be interrupted. (0 = disable chunking)");
113
114static char *dumpdev = NULL;
115static void
116g_dev_init(struct g_class *mp)
117{
118
119	dumpdev = getenv("dumpdev");
120}
121
122static void
123g_dev_fini(struct g_class *mp)
124{
125
126	freeenv(dumpdev);
127}
128
129static int
130g_dev_setdumpdev(struct cdev *dev)
131{
132	struct g_kerneldump kd;
133	struct g_consumer *cp;
134	int error, len;
135
136	if (dev == NULL)
137		return (set_dumper(NULL, NULL));
138
139	cp = dev->si_drv2;
140	len = sizeof(kd);
141	kd.offset = 0;
142	kd.length = OFF_MAX;
143	error = g_io_getattr("GEOM::kerneldump", cp, &len, &kd);
144	if (error == 0) {
145		error = set_dumper(&kd.di, devtoname(dev));
146		if (error == 0)
147			dev->si_flags |= SI_DUMPDEV;
148	}
149	return (error);
150}
151
152static void
153init_dumpdev(struct cdev *dev)
154{
155
156	if (dumpdev == NULL)
157		return;
158	if (strcmp(devtoname(dev), dumpdev) != 0)
159		return;
160	if (g_dev_setdumpdev(dev) == 0) {
161		freeenv(dumpdev);
162		dumpdev = NULL;
163	}
164}
165
166static void
167g_dev_destroy(void *arg, int flags __unused)
168{
169	struct g_consumer *cp;
170	struct g_geom *gp;
171	struct g_dev_softc *sc;
172
173	g_topology_assert();
174	cp = arg;
175	gp = cp->geom;
176	sc = cp->private;
177	g_trace(G_T_TOPOLOGY, "g_dev_destroy(%p(%s))", cp, gp->name);
178	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
179		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
180	g_detach(cp);
181	g_destroy_consumer(cp);
182	g_destroy_geom(gp);
183	mtx_destroy(&sc->sc_mtx);
184	g_free(sc);
185}
186
187void
188g_dev_print(void)
189{
190	struct g_geom *gp;
191	char const *p = "";
192
193	LIST_FOREACH(gp, &g_dev_class.geom, geom) {
194		printf("%s%s", p, gp->name);
195		p = " ";
196	}
197	printf("\n");
198}
199
200static void
201g_dev_attrchanged(struct g_consumer *cp, const char *attr)
202{
203	struct g_dev_softc *sc;
204	struct cdev *dev;
205	char buf[SPECNAMELEN + 6];
206
207	sc = cp->private;
208	if (strcmp(attr, "GEOM::media") == 0) {
209		dev = sc->sc_dev;
210		snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name);
211		devctl_notify_f("DEVFS", "CDEV", "MEDIACHANGE", buf, M_WAITOK);
212		dev = sc->sc_alias;
213		if (dev != NULL) {
214			snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name);
215			devctl_notify_f("DEVFS", "CDEV", "MEDIACHANGE", buf,
216			    M_WAITOK);
217		}
218		return;
219	}
220
221	if (strcmp(attr, "GEOM::physpath") != 0)
222		return;
223
224	if (g_access(cp, 1, 0, 0) == 0) {
225		char *physpath;
226		int error, physpath_len;
227
228		physpath_len = MAXPATHLEN;
229		physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO);
230		error =
231		    g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath);
232		g_access(cp, -1, 0, 0);
233		if (error == 0 && strlen(physpath) != 0) {
234			struct cdev *old_alias_dev;
235			struct cdev **alias_devp;
236
237			dev = sc->sc_dev;
238			old_alias_dev = sc->sc_alias;
239			alias_devp = (struct cdev **)&sc->sc_alias;
240			make_dev_physpath_alias(MAKEDEV_WAITOK, alias_devp,
241			    dev, old_alias_dev, physpath);
242		} else if (sc->sc_alias) {
243			destroy_dev((struct cdev *)sc->sc_alias);
244			sc->sc_alias = NULL;
245		}
246		g_free(physpath);
247	}
248}
249
250struct g_provider *
251g_dev_getprovider(struct cdev *dev)
252{
253	struct g_consumer *cp;
254
255	g_topology_assert();
256	if (dev == NULL)
257		return (NULL);
258	if (dev->si_devsw != &g_dev_cdevsw)
259		return (NULL);
260	cp = dev->si_drv2;
261	return (cp->provider);
262}
263
264static struct g_geom *
265g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused)
266{
267	struct g_geom *gp;
268	struct g_consumer *cp;
269	struct g_dev_softc *sc;
270	int error, len;
271	struct cdev *dev, *adev;
272	char buf[64], *val;
273
274	g_trace(G_T_TOPOLOGY, "dev_taste(%s,%s)", mp->name, pp->name);
275	g_topology_assert();
276	gp = g_new_geomf(mp, "%s", pp->name);
277	sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
278	mtx_init(&sc->sc_mtx, "g_dev", NULL, MTX_DEF);
279	cp = g_new_consumer(gp);
280	cp->private = sc;
281	cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
282	error = g_attach(cp, pp);
283	KASSERT(error == 0,
284	    ("g_dev_taste(%s) failed to g_attach, err=%d", pp->name, error));
285	error = make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev,
286	    &g_dev_cdevsw, NULL, UID_ROOT, GID_OPERATOR, 0640, "%s", gp->name);
287	if (error != 0) {
288		printf("%s: make_dev_p() failed (gp->name=%s, error=%d)\n",
289		    __func__, gp->name, error);
290		g_detach(cp);
291		g_destroy_consumer(cp);
292		g_destroy_geom(gp);
293		mtx_destroy(&sc->sc_mtx);
294		g_free(sc);
295		return (NULL);
296	}
297	dev->si_flags |= SI_UNMAPPED;
298	sc->sc_dev = dev;
299
300	/* Search for device alias name and create it if found. */
301	adev = NULL;
302	for (len = MIN(strlen(gp->name), sizeof(buf) - 15); len > 0; len--) {
303		snprintf(buf, sizeof(buf), "kern.devalias.%s", gp->name);
304		buf[14 + len] = 0;
305		val = getenv(buf);
306		if (val != NULL) {
307			snprintf(buf, sizeof(buf), "%s%s",
308			    val, gp->name + len);
309			freeenv(val);
310			make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK,
311			    &adev, dev, "%s", buf);
312			adev->si_flags |= SI_UNMAPPED;
313			break;
314		}
315	}
316
317	dev->si_iosize_max = MAXPHYS;
318	dev->si_drv2 = cp;
319	init_dumpdev(dev);
320	if (adev != NULL) {
321		adev->si_iosize_max = MAXPHYS;
322		adev->si_drv2 = cp;
323		init_dumpdev(adev);
324	}
325
326	g_dev_attrchanged(cp, "GEOM::physpath");
327
328	return (gp);
329}
330
331static int
332g_dev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
333{
334	struct g_consumer *cp;
335	struct g_dev_softc *sc;
336	int error, r, w, e;
337
338	cp = dev->si_drv2;
339	if (cp == NULL)
340		return(ENXIO);		/* g_dev_taste() not done yet */
341	g_trace(G_T_ACCESS, "g_dev_open(%s, %d, %d, %p)",
342	    cp->geom->name, flags, fmt, td);
343
344	r = flags & FREAD ? 1 : 0;
345	w = flags & FWRITE ? 1 : 0;
346#ifdef notyet
347	e = flags & O_EXCL ? 1 : 0;
348#else
349	e = 0;
350#endif
351	if (w) {
352		/*
353		 * When running in very secure mode, do not allow
354		 * opens for writing of any disks.
355		 */
356		error = securelevel_ge(td->td_ucred, 2);
357		if (error)
358			return (error);
359	}
360	g_topology_lock();
361	error = g_access(cp, r, w, e);
362	g_topology_unlock();
363	if (error == 0) {
364		sc = cp->private;
365		mtx_lock(&sc->sc_mtx);
366		if (sc->sc_open == 0 && sc->sc_active != 0)
367			wakeup(&sc->sc_active);
368		sc->sc_open += r + w + e;
369		mtx_unlock(&sc->sc_mtx);
370	}
371	return(error);
372}
373
374static int
375g_dev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
376{
377	struct g_consumer *cp;
378	struct g_dev_softc *sc;
379	int error, r, w, e;
380
381	cp = dev->si_drv2;
382	if (cp == NULL)
383		return(ENXIO);
384	g_trace(G_T_ACCESS, "g_dev_close(%s, %d, %d, %p)",
385	    cp->geom->name, flags, fmt, td);
386
387	r = flags & FREAD ? -1 : 0;
388	w = flags & FWRITE ? -1 : 0;
389#ifdef notyet
390	e = flags & O_EXCL ? -1 : 0;
391#else
392	e = 0;
393#endif
394	sc = cp->private;
395	mtx_lock(&sc->sc_mtx);
396	sc->sc_open += r + w + e;
397	while (sc->sc_open == 0 && sc->sc_active != 0)
398		msleep(&sc->sc_active, &sc->sc_mtx, 0, "PRIBIO", 0);
399	mtx_unlock(&sc->sc_mtx);
400	g_topology_lock();
401	error = g_access(cp, r, w, e);
402	g_topology_unlock();
403	return (error);
404}
405
406/*
407 * XXX: Until we have unmessed the ioctl situation, there is a race against
408 * XXX: a concurrent orphanization.  We cannot close it by holding topology
409 * XXX: since that would prevent us from doing our job, and stalling events
410 * XXX: will break (actually: stall) the BSD disklabel hacks.
411 */
412static int
413g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
414{
415	struct g_consumer *cp;
416	struct g_provider *pp;
417	off_t offset, length, chunk;
418	int i, error;
419
420	cp = dev->si_drv2;
421	pp = cp->provider;
422
423	error = 0;
424	KASSERT(cp->acr || cp->acw,
425	    ("Consumer with zero access count in g_dev_ioctl"));
426
427	i = IOCPARM_LEN(cmd);
428	switch (cmd) {
429	case DIOCGSECTORSIZE:
430		*(u_int *)data = cp->provider->sectorsize;
431		if (*(u_int *)data == 0)
432			error = ENOENT;
433		break;
434	case DIOCGMEDIASIZE:
435		*(off_t *)data = cp->provider->mediasize;
436		if (*(off_t *)data == 0)
437			error = ENOENT;
438		break;
439	case DIOCGFWSECTORS:
440		error = g_io_getattr("GEOM::fwsectors", cp, &i, data);
441		if (error == 0 && *(u_int *)data == 0)
442			error = ENOENT;
443		break;
444	case DIOCGFWHEADS:
445		error = g_io_getattr("GEOM::fwheads", cp, &i, data);
446		if (error == 0 && *(u_int *)data == 0)
447			error = ENOENT;
448		break;
449	case DIOCGFRONTSTUFF:
450		error = g_io_getattr("GEOM::frontstuff", cp, &i, data);
451		break;
452	case DIOCSKERNELDUMP:
453		if (*(u_int *)data == 0)
454			error = g_dev_setdumpdev(NULL);
455		else
456			error = g_dev_setdumpdev(dev);
457		break;
458	case DIOCGFLUSH:
459		error = g_io_flush(cp);
460		break;
461	case DIOCGDELETE:
462		offset = ((off_t *)data)[0];
463		length = ((off_t *)data)[1];
464		if ((offset % cp->provider->sectorsize) != 0 ||
465		    (length % cp->provider->sectorsize) != 0 || length <= 0) {
466			printf("%s: offset=%jd length=%jd\n", __func__, offset,
467			    length);
468			error = EINVAL;
469			break;
470		}
471		while (length > 0) {
472			chunk = length;
473			if (g_dev_del_max_sectors != 0 && chunk >
474			    g_dev_del_max_sectors * cp->provider->sectorsize) {
475				chunk = g_dev_del_max_sectors *
476				    cp->provider->sectorsize;
477			}
478			error = g_delete_data(cp, offset, chunk);
479			length -= chunk;
480			offset += chunk;
481			if (error)
482				break;
483			/*
484			 * Since the request size can be large, the service
485			 * time can be is likewise.  We make this ioctl
486			 * interruptible by checking for signals for each bio.
487			 */
488			if (SIGPENDING(td))
489				break;
490		}
491		break;
492	case DIOCGIDENT:
493		error = g_io_getattr("GEOM::ident", cp, &i, data);
494		break;
495	case DIOCGPROVIDERNAME:
496		if (pp == NULL)
497			return (ENOENT);
498		strlcpy(data, pp->name, i);
499		break;
500	case DIOCGSTRIPESIZE:
501		*(off_t *)data = cp->provider->stripesize;
502		break;
503	case DIOCGSTRIPEOFFSET:
504		*(off_t *)data = cp->provider->stripeoffset;
505		break;
506	case DIOCGPHYSPATH:
507		error = g_io_getattr("GEOM::physpath", cp, &i, data);
508		if (error == 0 && *(char *)data == '\0')
509			error = ENOENT;
510		break;
511	case DIOCGATTR: {
512		struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
513
514		if (arg->len > sizeof(arg->value)) {
515			error = EINVAL;
516			break;
517		}
518		error = g_io_getattr(arg->name, cp, &arg->len, &arg->value);
519		break;
520	}
521	default:
522		if (cp->provider->geom->ioctl != NULL) {
523			error = cp->provider->geom->ioctl(cp->provider, cmd, data, fflag, td);
524		} else {
525			error = ENOIOCTL;
526		}
527	}
528
529	return (error);
530}
531
532static void
533g_dev_done(struct bio *bp2)
534{
535	struct g_consumer *cp;
536	struct g_dev_softc *sc;
537	struct bio *bp;
538	int destroy;
539
540	cp = bp2->bio_from;
541	sc = cp->private;
542	bp = bp2->bio_parent;
543	bp->bio_error = bp2->bio_error;
544	bp->bio_completed = bp2->bio_completed;
545	bp->bio_resid = bp->bio_length - bp2->bio_completed;
546	if (bp2->bio_error != 0) {
547		g_trace(G_T_BIO, "g_dev_done(%p) had error %d",
548		    bp2, bp2->bio_error);
549		bp->bio_flags |= BIO_ERROR;
550	} else {
551		g_trace(G_T_BIO, "g_dev_done(%p/%p) resid %ld completed %jd",
552		    bp2, bp, bp2->bio_resid, (intmax_t)bp2->bio_completed);
553	}
554	g_destroy_bio(bp2);
555	destroy = 0;
556	mtx_lock(&sc->sc_mtx);
557	if ((--sc->sc_active) == 0) {
558		if (sc->sc_open == 0)
559			wakeup(&sc->sc_active);
560		if (sc->sc_dev == NULL)
561			destroy = 1;
562	}
563	mtx_unlock(&sc->sc_mtx);
564	if (destroy)
565		g_post_event(g_dev_destroy, cp, M_WAITOK, NULL);
566	biodone(bp);
567}
568
569static void
570g_dev_strategy(struct bio *bp)
571{
572	struct g_consumer *cp;
573	struct bio *bp2;
574	struct cdev *dev;
575	struct g_dev_softc *sc;
576
577	KASSERT(bp->bio_cmd == BIO_READ ||
578	        bp->bio_cmd == BIO_WRITE ||
579	        bp->bio_cmd == BIO_DELETE ||
580		bp->bio_cmd == BIO_FLUSH,
581		("Wrong bio_cmd bio=%p cmd=%d", bp, bp->bio_cmd));
582	dev = bp->bio_dev;
583	cp = dev->si_drv2;
584	sc = cp->private;
585	KASSERT(cp->acr || cp->acw,
586	    ("Consumer with zero access count in g_dev_strategy"));
587#ifdef INVARIANTS
588	if ((bp->bio_offset % cp->provider->sectorsize) != 0 ||
589	    (bp->bio_bcount % cp->provider->sectorsize) != 0) {
590		bp->bio_resid = bp->bio_bcount;
591		biofinish(bp, NULL, EINVAL);
592		return;
593	}
594#endif
595	mtx_lock(&sc->sc_mtx);
596	KASSERT(sc->sc_open > 0, ("Closed device in g_dev_strategy"));
597	sc->sc_active++;
598	mtx_unlock(&sc->sc_mtx);
599
600	for (;;) {
601		/*
602		 * XXX: This is not an ideal solution, but I belive it to
603		 * XXX: deadlock safe, all things considered.
604		 */
605		bp2 = g_clone_bio(bp);
606		if (bp2 != NULL)
607			break;
608		pause("gdstrat", hz / 10);
609	}
610	KASSERT(bp2 != NULL, ("XXX: ENOMEM in a bad place"));
611	bp2->bio_done = g_dev_done;
612	g_trace(G_T_BIO,
613	    "g_dev_strategy(%p/%p) offset %jd length %jd data %p cmd %d",
614	    bp, bp2, (intmax_t)bp->bio_offset, (intmax_t)bp2->bio_length,
615	    bp2->bio_data, bp2->bio_cmd);
616	g_io_request(bp2, cp);
617	KASSERT(cp->acr || cp->acw,
618	    ("g_dev_strategy raced with g_dev_close and lost"));
619
620}
621
622/*
623 * g_dev_callback()
624 *
625 * Called by devfs when asynchronous device destruction is completed.
626 * - Mark that we have no attached device any more.
627 * - If there are no outstanding requests, schedule geom destruction.
628 *   Otherwise destruction will be scheduled later by g_dev_done().
629 */
630
631static void
632g_dev_callback(void *arg)
633{
634	struct g_consumer *cp;
635	struct g_dev_softc *sc;
636	int destroy;
637
638	cp = arg;
639	sc = cp->private;
640	g_trace(G_T_TOPOLOGY, "g_dev_callback(%p(%s))", cp, cp->geom->name);
641
642	mtx_lock(&sc->sc_mtx);
643	sc->sc_dev = NULL;
644	sc->sc_alias = NULL;
645	destroy = (sc->sc_active == 0);
646	mtx_unlock(&sc->sc_mtx);
647	if (destroy)
648		g_post_event(g_dev_destroy, cp, M_WAITOK, NULL);
649}
650
651/*
652 * g_dev_orphan()
653 *
654 * Called from below when the provider orphaned us.
655 * - Clear any dump settings.
656 * - Request asynchronous device destruction to prevent any more requests
657 *   from coming in.  The provider is already marked with an error, so
658 *   anything which comes in in the interrim will be returned immediately.
659 */
660
661static void
662g_dev_orphan(struct g_consumer *cp)
663{
664	struct cdev *dev;
665	struct g_dev_softc *sc;
666
667	g_topology_assert();
668	sc = cp->private;
669	dev = sc->sc_dev;
670	g_trace(G_T_TOPOLOGY, "g_dev_orphan(%p(%s))", cp, cp->geom->name);
671
672	/* Reset any dump-area set on this device */
673	if (dev->si_flags & SI_DUMPDEV)
674		set_dumper(NULL, NULL);
675
676	/* Destroy the struct cdev *so we get no more requests */
677	destroy_dev_sched_cb(dev, g_dev_callback, cp);
678}
679
680DECLARE_GEOM_CLASS(g_dev_class, g_dev);
681