vdev_geom.c revision 297957
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
23 * All rights reserved.
24 *
25 * Portions Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>
26 */
27
28#include <sys/zfs_context.h>
29#include <sys/param.h>
30#include <sys/kernel.h>
31#include <sys/bio.h>
32#include <sys/disk.h>
33#include <sys/spa.h>
34#include <sys/spa_impl.h>
35#include <sys/vdev_impl.h>
36#include <sys/fs/zfs.h>
37#include <sys/zio.h>
38#include <geom/geom.h>
39#include <geom/geom_int.h>
40
41/*
42 * Virtual device vector for GEOM.
43 */
44
45static g_attrchanged_t vdev_geom_attrchanged;
46struct g_class zfs_vdev_class = {
47	.name = "ZFS::VDEV",
48	.version = G_VERSION,
49	.attrchanged = vdev_geom_attrchanged,
50};
51
52DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
53
54SYSCTL_DECL(_vfs_zfs_vdev);
55/* Don't send BIO_FLUSH. */
56static int vdev_geom_bio_flush_disable = 0;
57TUNABLE_INT("vfs.zfs.vdev.bio_flush_disable", &vdev_geom_bio_flush_disable);
58SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RW,
59    &vdev_geom_bio_flush_disable, 0, "Disable BIO_FLUSH");
60/* Don't send BIO_DELETE. */
61static int vdev_geom_bio_delete_disable = 0;
62TUNABLE_INT("vfs.zfs.vdev.bio_delete_disable", &vdev_geom_bio_delete_disable);
63SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_delete_disable, CTLFLAG_RW,
64    &vdev_geom_bio_delete_disable, 0, "Disable BIO_DELETE");
65
66static void
67vdev_geom_set_rotation_rate(vdev_t *vd, struct g_consumer *cp)
68{
69	int error;
70	uint16_t rate;
71
72	error = g_getattr("GEOM::rotation_rate", cp, &rate);
73	if (error == 0)
74		vd->vdev_rotation_rate = rate;
75	else
76		vd->vdev_rotation_rate = VDEV_RATE_UNKNOWN;
77}
78
79static void
80vdev_geom_attrchanged(struct g_consumer *cp, const char *attr)
81{
82	vdev_t *vd;
83	spa_t *spa;
84	char *physpath;
85	int error, physpath_len;
86
87	vd = cp->private;
88	if (vd == NULL)
89		return;
90
91	if (strcmp(attr, "GEOM::rotation_rate") == 0) {
92		vdev_geom_set_rotation_rate(vd, cp);
93		return;
94	}
95
96	if (strcmp(attr, "GEOM::physpath") != 0)
97		return;
98
99	if (g_access(cp, 1, 0, 0) != 0)
100		return;
101
102	/*
103	 * Record/Update physical path information for this device.
104	 */
105	spa = vd->vdev_spa;
106	physpath_len = MAXPATHLEN;
107	physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO);
108	error = g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath);
109	g_access(cp, -1, 0, 0);
110	if (error == 0) {
111		char *old_physpath;
112
113		old_physpath = vd->vdev_physpath;
114		vd->vdev_physpath = spa_strdup(physpath);
115		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
116
117		if (old_physpath != NULL) {
118			int held_lock;
119
120			held_lock = spa_config_held(spa, SCL_STATE, RW_WRITER);
121			if (held_lock == 0) {
122				g_topology_unlock();
123				spa_config_enter(spa, SCL_STATE, FTAG,
124				    RW_WRITER);
125			}
126
127			spa_strfree(old_physpath);
128
129			if (held_lock == 0) {
130				spa_config_exit(spa, SCL_STATE, FTAG);
131				g_topology_lock();
132			}
133		}
134	}
135	g_free(physpath);
136}
137
138static void
139vdev_geom_orphan(struct g_consumer *cp)
140{
141	vdev_t *vd;
142
143	g_topology_assert();
144
145	vd = cp->private;
146	if (vd == NULL) {
147		/* Vdev close in progress.  Ignore the event. */
148		return;
149	}
150
151	/*
152	 * Orphan callbacks occur from the GEOM event thread.
153	 * Concurrent with this call, new I/O requests may be
154	 * working their way through GEOM about to find out
155	 * (only once executed by the g_down thread) that we've
156	 * been orphaned from our disk provider.  These I/Os
157	 * must be retired before we can detach our consumer.
158	 * This is most easily achieved by acquiring the
159	 * SPA ZIO configuration lock as a writer, but doing
160	 * so with the GEOM topology lock held would cause
161	 * a lock order reversal.  Instead, rely on the SPA's
162	 * async removal support to invoke a close on this
163	 * vdev once it is safe to do so.
164	 */
165	vd->vdev_remove_wanted = B_TRUE;
166	spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE);
167}
168
169static struct g_consumer *
170vdev_geom_attach(struct g_provider *pp, vdev_t *vd)
171{
172	struct g_geom *gp;
173	struct g_consumer *cp;
174
175	g_topology_assert();
176
177	ZFS_LOG(1, "Attaching to %s.", pp->name);
178	/* Do we have geom already? No? Create one. */
179	LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) {
180		if (gp->flags & G_GEOM_WITHER)
181			continue;
182		if (strcmp(gp->name, "zfs::vdev") != 0)
183			continue;
184		break;
185	}
186	if (gp == NULL) {
187		gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev");
188		gp->orphan = vdev_geom_orphan;
189		gp->attrchanged = vdev_geom_attrchanged;
190		cp = g_new_consumer(gp);
191		if (g_attach(cp, pp) != 0) {
192			g_wither_geom(gp, ENXIO);
193			return (NULL);
194		}
195		if (g_access(cp, 1, 0, 1) != 0) {
196			g_wither_geom(gp, ENXIO);
197			return (NULL);
198		}
199		ZFS_LOG(1, "Created geom and consumer for %s.", pp->name);
200	} else {
201		/* Check if we are already connected to this provider. */
202		LIST_FOREACH(cp, &gp->consumer, consumer) {
203			if (cp->provider == pp) {
204				ZFS_LOG(1, "Found consumer for %s.", pp->name);
205				break;
206			}
207		}
208		if (cp == NULL) {
209			cp = g_new_consumer(gp);
210			if (g_attach(cp, pp) != 0) {
211				g_destroy_consumer(cp);
212				return (NULL);
213			}
214			if (g_access(cp, 1, 0, 1) != 0) {
215				g_detach(cp);
216				g_destroy_consumer(cp);
217				return (NULL);
218			}
219			ZFS_LOG(1, "Created consumer for %s.", pp->name);
220		} else {
221			if (g_access(cp, 1, 0, 1) != 0)
222				return (NULL);
223			ZFS_LOG(1, "Used existing consumer for %s.", pp->name);
224		}
225	}
226
227	/*
228	 * BUG: cp may already belong to a vdev.  This could happen if:
229	 * 1) That vdev is a shared spare, or
230	 * 2) We are trying to reopen a missing vdev and we are scanning by
231	 *    guid.  In that case, we'll ultimately fail to open this consumer,
232	 *    but not until after setting the private field.
233	 * The solution is to:
234	 * 1) Don't set the private field until after the open succeeds, and
235	 * 2) Set it to a linked list of vdevs, not just a single vdev
236	 */
237	cp->private = vd;
238	vd->vdev_tsd = cp;
239
240	/* Fetch initial physical path information for this device. */
241	vdev_geom_attrchanged(cp, "GEOM::physpath");
242
243	cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
244	return (cp);
245}
246
247static void
248vdev_geom_close_locked(vdev_t *vd)
249{
250	struct g_geom *gp;
251	struct g_consumer *cp;
252
253	g_topology_assert();
254
255	cp = vd->vdev_tsd;
256	if (cp == NULL)
257		return;
258
259	ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
260	KASSERT(vd->vdev_tsd == cp, ("%s: vdev_tsd is not cp", __func__));
261	vd->vdev_tsd = NULL;
262	vd->vdev_delayed_close = B_FALSE;
263	cp->private = NULL;
264
265	gp = cp->geom;
266	g_access(cp, -1, 0, -1);
267	/* Destroy consumer on last close. */
268	if (cp->acr == 0 && cp->ace == 0) {
269		if (cp->acw > 0)
270			g_access(cp, 0, -cp->acw, 0);
271		if (cp->provider != NULL) {
272			ZFS_LOG(1, "Destroyed consumer to %s.",
273			    cp->provider->name);
274			g_detach(cp);
275		}
276		g_destroy_consumer(cp);
277	}
278	/* Destroy geom if there are no consumers left. */
279	if (LIST_EMPTY(&gp->consumer)) {
280		ZFS_LOG(1, "Destroyed geom %s.", gp->name);
281		g_wither_geom(gp, ENXIO);
282	}
283}
284
285static void
286nvlist_get_guids(nvlist_t *list, uint64_t *pguid, uint64_t *vguid)
287{
288
289	(void) nvlist_lookup_uint64(list, ZPOOL_CONFIG_GUID, vguid);
290	(void) nvlist_lookup_uint64(list, ZPOOL_CONFIG_POOL_GUID, pguid);
291}
292
293static int
294vdev_geom_io(struct g_consumer *cp, int cmd, void *data, off_t offset, off_t size)
295{
296	struct bio *bp;
297	u_char *p;
298	off_t off, maxio;
299	int error;
300
301	ASSERT((offset % cp->provider->sectorsize) == 0);
302	ASSERT((size % cp->provider->sectorsize) == 0);
303
304	bp = g_alloc_bio();
305	off = offset;
306	offset += size;
307	p = data;
308	maxio = MAXPHYS - (MAXPHYS % cp->provider->sectorsize);
309	error = 0;
310
311	for (; off < offset; off += maxio, p += maxio, size -= maxio) {
312		bzero(bp, sizeof(*bp));
313		bp->bio_cmd = cmd;
314		bp->bio_done = NULL;
315		bp->bio_offset = off;
316		bp->bio_length = MIN(size, maxio);
317		bp->bio_data = p;
318		g_io_request(bp, cp);
319		error = biowait(bp, "vdev_geom_io");
320		if (error != 0)
321			break;
322	}
323
324	g_destroy_bio(bp);
325	return (error);
326}
327
328static void
329vdev_geom_taste_orphan(struct g_consumer *cp)
330{
331
332	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
333	    cp->provider->name));
334}
335
336static int
337vdev_geom_read_config(struct g_consumer *cp, nvlist_t **config)
338{
339	struct g_provider *pp;
340	vdev_label_t *label;
341	char *p, *buf;
342	size_t buflen;
343	uint64_t psize;
344	off_t offset, size;
345	uint64_t state, txg;
346	int error, l, len;
347
348	g_topology_assert_not();
349
350	pp = cp->provider;
351	ZFS_LOG(1, "Reading config from %s...", pp->name);
352
353	psize = pp->mediasize;
354	psize = P2ALIGN(psize, (uint64_t)sizeof(vdev_label_t));
355
356	size = sizeof(*label) + pp->sectorsize -
357	    ((sizeof(*label) - 1) % pp->sectorsize) - 1;
358
359	label = kmem_alloc(size, KM_SLEEP);
360	buflen = sizeof(label->vl_vdev_phys.vp_nvlist);
361
362	*config = NULL;
363	for (l = 0; l < VDEV_LABELS; l++) {
364
365		offset = vdev_label_offset(psize, l, 0);
366		if ((offset % pp->sectorsize) != 0)
367			continue;
368
369		if (vdev_geom_io(cp, BIO_READ, label, offset, size) != 0)
370			continue;
371		buf = label->vl_vdev_phys.vp_nvlist;
372
373		if (nvlist_unpack(buf, buflen, config, 0) != 0)
374			continue;
375
376		if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
377		    &state) != 0 || state > POOL_STATE_L2CACHE) {
378			nvlist_free(*config);
379			*config = NULL;
380			continue;
381		}
382
383		if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
384		    (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
385		    &txg) != 0 || txg == 0)) {
386			nvlist_free(*config);
387			*config = NULL;
388			continue;
389		}
390
391		break;
392	}
393
394	kmem_free(label, size);
395	return (*config == NULL ? ENOENT : 0);
396}
397
398static void
399resize_configs(nvlist_t ***configs, uint64_t *count, uint64_t id)
400{
401	nvlist_t **new_configs;
402	uint64_t i;
403
404	if (id < *count)
405		return;
406	new_configs = kmem_zalloc((id + 1) * sizeof(nvlist_t *),
407	    KM_SLEEP);
408	for (i = 0; i < *count; i++)
409		new_configs[i] = (*configs)[i];
410	if (*configs != NULL)
411		kmem_free(*configs, *count * sizeof(void *));
412	*configs = new_configs;
413	*count = id + 1;
414}
415
416static void
417process_vdev_config(nvlist_t ***configs, uint64_t *count, nvlist_t *cfg,
418    const char *name, uint64_t* known_pool_guid)
419{
420	nvlist_t *vdev_tree;
421	uint64_t pool_guid;
422	uint64_t vdev_guid, known_guid;
423	uint64_t id, txg, known_txg;
424	char *pname;
425	int i;
426
427	if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &pname) != 0 ||
428	    strcmp(pname, name) != 0)
429		goto ignore;
430
431	if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &pool_guid) != 0)
432		goto ignore;
433
434	if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_TOP_GUID, &vdev_guid) != 0)
435		goto ignore;
436
437	if (nvlist_lookup_nvlist(cfg, ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0)
438		goto ignore;
439
440	if (nvlist_lookup_uint64(vdev_tree, ZPOOL_CONFIG_ID, &id) != 0)
441		goto ignore;
442
443	VERIFY(nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
444
445	if (*known_pool_guid != 0) {
446		if (pool_guid != *known_pool_guid)
447			goto ignore;
448	} else
449		*known_pool_guid = pool_guid;
450
451	resize_configs(configs, count, id);
452
453	if ((*configs)[id] != NULL) {
454		VERIFY(nvlist_lookup_uint64((*configs)[id],
455		    ZPOOL_CONFIG_POOL_TXG, &known_txg) == 0);
456		if (txg <= known_txg)
457			goto ignore;
458		nvlist_free((*configs)[id]);
459	}
460
461	(*configs)[id] = cfg;
462	return;
463
464ignore:
465	nvlist_free(cfg);
466}
467
468static int
469vdev_geom_attach_taster(struct g_consumer *cp, struct g_provider *pp)
470{
471	int error;
472
473	if (pp->flags & G_PF_WITHER)
474		return (EINVAL);
475	g_attach(cp, pp);
476	error = g_access(cp, 1, 0, 0);
477	if (error == 0) {
478		if (pp->sectorsize > VDEV_PAD_SIZE || !ISP2(pp->sectorsize))
479			error = EINVAL;
480		else if (pp->mediasize < SPA_MINDEVSIZE)
481			error = EINVAL;
482		if (error != 0)
483			g_access(cp, -1, 0, 0);
484	}
485	if (error != 0)
486		g_detach(cp);
487	return (error);
488}
489
490static void
491vdev_geom_detach_taster(struct g_consumer *cp)
492{
493	g_access(cp, -1, 0, 0);
494	g_detach(cp);
495}
496
497int
498vdev_geom_read_pool_label(const char *name,
499    nvlist_t ***configs, uint64_t *count)
500{
501	struct g_class *mp;
502	struct g_geom *gp, *zgp;
503	struct g_provider *pp;
504	struct g_consumer *zcp;
505	nvlist_t *vdev_cfg;
506	uint64_t pool_guid;
507	int error;
508
509	DROP_GIANT();
510	g_topology_lock();
511
512	zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste");
513	/* This orphan function should be never called. */
514	zgp->orphan = vdev_geom_taste_orphan;
515	zcp = g_new_consumer(zgp);
516
517	*configs = NULL;
518	*count = 0;
519	pool_guid = 0;
520	LIST_FOREACH(mp, &g_classes, class) {
521		if (mp == &zfs_vdev_class)
522			continue;
523		LIST_FOREACH(gp, &mp->geom, geom) {
524			if (gp->flags & G_GEOM_WITHER)
525				continue;
526			LIST_FOREACH(pp, &gp->provider, provider) {
527				if (pp->flags & G_PF_WITHER)
528					continue;
529				if (vdev_geom_attach_taster(zcp, pp) != 0)
530					continue;
531				g_topology_unlock();
532				error = vdev_geom_read_config(zcp, &vdev_cfg);
533				g_topology_lock();
534				vdev_geom_detach_taster(zcp);
535				if (error)
536					continue;
537				ZFS_LOG(1, "successfully read vdev config");
538
539				process_vdev_config(configs, count,
540				    vdev_cfg, name, &pool_guid);
541			}
542		}
543	}
544
545	g_destroy_consumer(zcp);
546	g_destroy_geom(zgp);
547	g_topology_unlock();
548	PICKUP_GIANT();
549
550	return (*count > 0 ? 0 : ENOENT);
551}
552
553static void
554vdev_geom_read_guids(struct g_consumer *cp, uint64_t *pguid, uint64_t *vguid)
555{
556	nvlist_t *config;
557
558	g_topology_assert_not();
559
560	*pguid = 0;
561	*vguid = 0;
562	if (vdev_geom_read_config(cp, &config) == 0) {
563		nvlist_get_guids(config, pguid, vguid);
564		nvlist_free(config);
565	}
566}
567
568static struct g_consumer *
569vdev_geom_attach_by_guids(vdev_t *vd)
570{
571	struct g_class *mp;
572	struct g_geom *gp, *zgp;
573	struct g_provider *pp;
574	struct g_consumer *cp, *zcp;
575	uint64_t pguid, vguid;
576
577	g_topology_assert();
578
579	zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste");
580	/* This orphan function should be never called. */
581	zgp->orphan = vdev_geom_taste_orphan;
582	zcp = g_new_consumer(zgp);
583
584	cp = NULL;
585	LIST_FOREACH(mp, &g_classes, class) {
586		if (mp == &zfs_vdev_class)
587			continue;
588		LIST_FOREACH(gp, &mp->geom, geom) {
589			if (gp->flags & G_GEOM_WITHER)
590				continue;
591			LIST_FOREACH(pp, &gp->provider, provider) {
592				if (vdev_geom_attach_taster(zcp, pp) != 0)
593					continue;
594				g_topology_unlock();
595				vdev_geom_read_guids(zcp, &pguid, &vguid);
596				g_topology_lock();
597				vdev_geom_detach_taster(zcp);
598				/*
599				 * Check that the label's vdev guid matches the
600				 * desired guid.  If the label has a pool guid,
601				 * check that it matches too. (Inactive spares
602				 * and L2ARCs do not have any pool guid in the
603				 * label.)
604				*/
605				if ((pguid != 0 &&
606				     pguid != spa_guid(vd->vdev_spa)) ||
607				    vguid != vd->vdev_guid)
608					continue;
609				cp = vdev_geom_attach(pp, vd);
610				if (cp == NULL) {
611					printf("ZFS WARNING: Unable to "
612					    "attach to %s.\n", pp->name);
613					continue;
614				}
615				break;
616			}
617			if (cp != NULL)
618				break;
619		}
620		if (cp != NULL)
621			break;
622	}
623end:
624	g_destroy_consumer(zcp);
625	g_destroy_geom(zgp);
626	return (cp);
627}
628
629static struct g_consumer *
630vdev_geom_open_by_guids(vdev_t *vd)
631{
632	struct g_consumer *cp;
633	char *buf;
634	size_t len;
635
636	g_topology_assert();
637
638	ZFS_LOG(1, "Searching by guid [%ju].", (uintmax_t)vd->vdev_guid);
639	cp = vdev_geom_attach_by_guids(vd);
640	if (cp != NULL) {
641		len = strlen(cp->provider->name) + strlen("/dev/") + 1;
642		buf = kmem_alloc(len, KM_SLEEP);
643
644		snprintf(buf, len, "/dev/%s", cp->provider->name);
645		spa_strfree(vd->vdev_path);
646		vd->vdev_path = buf;
647
648		ZFS_LOG(1, "Attach by guid [%ju:%ju] succeeded, provider %s.",
649		    (uintmax_t)spa_guid(vd->vdev_spa),
650		    (uintmax_t)vd->vdev_guid, vd->vdev_path);
651	} else {
652		ZFS_LOG(1, "Search by guid [%ju:%ju] failed.",
653		    (uintmax_t)spa_guid(vd->vdev_spa),
654		    (uintmax_t)vd->vdev_guid);
655	}
656
657	return (cp);
658}
659
660static struct g_consumer *
661vdev_geom_open_by_path(vdev_t *vd, int check_guid)
662{
663	struct g_provider *pp;
664	struct g_consumer *cp;
665	uint64_t pguid, vguid;
666
667	g_topology_assert();
668
669	cp = NULL;
670	pp = g_provider_by_name(vd->vdev_path + sizeof("/dev/") - 1);
671	if (pp != NULL) {
672		ZFS_LOG(1, "Found provider by name %s.", vd->vdev_path);
673		cp = vdev_geom_attach(pp, vd);
674		if (cp != NULL && check_guid && ISP2(pp->sectorsize) &&
675		    pp->sectorsize <= VDEV_PAD_SIZE) {
676			g_topology_unlock();
677			vdev_geom_read_guids(cp, &pguid, &vguid);
678			g_topology_lock();
679			/*
680			 * Check that the label's vdev guid matches the
681			 * desired guid.  If the label has a pool guid,
682			 * check that it matches too. (Inactive spares
683			 * and L2ARCs do not have any pool guid in the
684			 * label.)
685			 */
686			if ((pguid != 0 &&
687			    pguid != spa_guid(vd->vdev_spa)) ||
688			    vguid != vd->vdev_guid) {
689				vdev_geom_close_locked(vd);
690				cp = NULL;
691				ZFS_LOG(1, "guid mismatch for provider %s: "
692				    "%ju:%ju != %ju:%ju.", vd->vdev_path,
693				    (uintmax_t)spa_guid(vd->vdev_spa),
694				    (uintmax_t)vd->vdev_guid,
695				    (uintmax_t)pguid, (uintmax_t)vguid);
696			} else {
697				ZFS_LOG(1, "guid match for provider %s.",
698				    vd->vdev_path);
699			}
700		}
701	}
702
703	return (cp);
704}
705
706static int
707vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
708    uint64_t *logical_ashift, uint64_t *physical_ashift)
709{
710	struct g_provider *pp;
711	struct g_consumer *cp;
712	size_t bufsize;
713	int error;
714
715	/*
716	 * We must have a pathname, and it must be absolute.
717	 */
718	if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
719		vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
720		return (EINVAL);
721	}
722
723	vd->vdev_tsd = NULL;
724
725	DROP_GIANT();
726	g_topology_lock();
727	error = 0;
728
729	if (vd->vdev_spa->spa_splitting_newspa ||
730	    (vd->vdev_prevstate == VDEV_STATE_UNKNOWN &&
731	     vd->vdev_spa->spa_load_state == SPA_LOAD_NONE)) {
732		/*
733		 * We are dealing with a vdev that hasn't been previously
734		 * opened (since boot), and we are not loading an
735		 * existing pool configuration.  This looks like a
736		 * vdev add operation to a new or existing pool.
737		 * Assume the user knows what he/she is doing and find
738		 * GEOM provider by its name, ignoring GUID mismatches.
739		 *
740		 * XXPOLICY: It would be safer to only allow a device
741		 *           that is unlabeled or labeled but missing
742		 *           GUID information to be opened in this fashion,
743		 *           unless we are doing a split, in which case we
744		 *           should allow any guid.
745		 */
746		cp = vdev_geom_open_by_path(vd, 0);
747	} else {
748		/*
749		 * Try using the recorded path for this device, but only
750		 * accept it if its label data contains the expected GUIDs.
751		 */
752		cp = vdev_geom_open_by_path(vd, 1);
753		if (cp == NULL) {
754			/*
755			 * The device at vd->vdev_path doesn't have the
756			 * expected GUIDs. The disks might have merely
757			 * moved around so try all other GEOM providers
758			 * to find one with the right GUIDs.
759			 */
760			cp = vdev_geom_open_by_guids(vd);
761		}
762	}
763
764	if (cp == NULL) {
765		ZFS_LOG(1, "Provider %s not found.", vd->vdev_path);
766		error = ENOENT;
767	} else if (cp->provider->sectorsize > VDEV_PAD_SIZE ||
768	    !ISP2(cp->provider->sectorsize)) {
769		ZFS_LOG(1, "Provider %s has unsupported sectorsize.",
770		    vd->vdev_path);
771
772		vdev_geom_close_locked(vd);
773		error = EINVAL;
774		cp = NULL;
775	} else if (cp->acw == 0 && (spa_mode(vd->vdev_spa) & FWRITE) != 0) {
776		int i;
777
778		for (i = 0; i < 5; i++) {
779			error = g_access(cp, 0, 1, 0);
780			if (error == 0)
781				break;
782			g_topology_unlock();
783			tsleep(vd, 0, "vdev", hz / 2);
784			g_topology_lock();
785		}
786		if (error != 0) {
787			printf("ZFS WARNING: Unable to open %s for writing (error=%d).\n",
788			    vd->vdev_path, error);
789			vdev_geom_close_locked(vd);
790			cp = NULL;
791		}
792	}
793
794	g_topology_unlock();
795	PICKUP_GIANT();
796	if (cp == NULL) {
797		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
798		return (error);
799	}
800	pp = cp->provider;
801
802	/*
803	 * Determine the actual size of the device.
804	 */
805	*max_psize = *psize = pp->mediasize;
806
807	/*
808	 * Determine the device's minimum transfer size and preferred
809	 * transfer size.
810	 */
811	*logical_ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1;
812	*physical_ashift = 0;
813	if (pp->stripesize > (1 << *logical_ashift) && ISP2(pp->stripesize) &&
814	    pp->stripesize <= (1 << SPA_MAXASHIFT) && pp->stripeoffset == 0)
815		*physical_ashift = highbit(pp->stripesize) - 1;
816
817	/*
818	 * Clear the nowritecache settings, so that on a vdev_reopen()
819	 * we will try again.
820	 */
821	vd->vdev_nowritecache = B_FALSE;
822
823	/*
824	 * Determine the device's rotation rate.
825	 */
826	vdev_geom_set_rotation_rate(vd, cp);
827
828	return (0);
829}
830
831static void
832vdev_geom_close(vdev_t *vd)
833{
834
835	DROP_GIANT();
836	g_topology_lock();
837	vdev_geom_close_locked(vd);
838	g_topology_unlock();
839	PICKUP_GIANT();
840}
841
842static void
843vdev_geom_io_intr(struct bio *bp)
844{
845	vdev_t *vd;
846	zio_t *zio;
847
848	zio = bp->bio_caller1;
849	vd = zio->io_vd;
850	zio->io_error = bp->bio_error;
851	if (zio->io_error == 0 && bp->bio_resid != 0)
852		zio->io_error = SET_ERROR(EIO);
853
854	switch(zio->io_error) {
855	case ENOTSUP:
856		/*
857		 * If we get ENOTSUP for BIO_FLUSH or BIO_DELETE we know
858		 * that future attempts will never succeed. In this case
859		 * we set a persistent flag so that we don't bother with
860		 * requests in the future.
861		 */
862		switch(bp->bio_cmd) {
863		case BIO_FLUSH:
864			vd->vdev_nowritecache = B_TRUE;
865			break;
866		case BIO_DELETE:
867			vd->vdev_notrim = B_TRUE;
868			break;
869		}
870		break;
871	case ENXIO:
872		if (!vd->vdev_remove_wanted) {
873			/*
874			 * If provider's error is set we assume it is being
875			 * removed.
876			 */
877			if (bp->bio_to->error != 0) {
878				vd->vdev_remove_wanted = B_TRUE;
879				spa_async_request(zio->io_spa,
880				    SPA_ASYNC_REMOVE);
881			} else if (!vd->vdev_delayed_close) {
882				vd->vdev_delayed_close = B_TRUE;
883			}
884		}
885		break;
886	}
887	g_destroy_bio(bp);
888	zio_delay_interrupt(zio);
889}
890
891static void
892vdev_geom_io_start(zio_t *zio)
893{
894	vdev_t *vd;
895	struct g_consumer *cp;
896	struct bio *bp;
897	int error;
898
899	vd = zio->io_vd;
900
901	switch (zio->io_type) {
902	case ZIO_TYPE_IOCTL:
903		/* XXPOLICY */
904		if (!vdev_readable(vd)) {
905			zio->io_error = SET_ERROR(ENXIO);
906			zio_interrupt(zio);
907			return;
908		} else {
909			switch (zio->io_cmd) {
910			case DKIOCFLUSHWRITECACHE:
911				if (zfs_nocacheflush || vdev_geom_bio_flush_disable)
912					break;
913				if (vd->vdev_nowritecache) {
914					zio->io_error = SET_ERROR(ENOTSUP);
915					break;
916				}
917				goto sendreq;
918			default:
919				zio->io_error = SET_ERROR(ENOTSUP);
920			}
921		}
922
923		zio_execute(zio);
924		return;
925	case ZIO_TYPE_FREE:
926		if (vd->vdev_notrim) {
927			zio->io_error = SET_ERROR(ENOTSUP);
928		} else if (!vdev_geom_bio_delete_disable) {
929			goto sendreq;
930		}
931		zio_execute(zio);
932		return;
933	}
934sendreq:
935	ASSERT(zio->io_type == ZIO_TYPE_READ ||
936	    zio->io_type == ZIO_TYPE_WRITE ||
937	    zio->io_type == ZIO_TYPE_FREE ||
938	    zio->io_type == ZIO_TYPE_IOCTL);
939
940	cp = vd->vdev_tsd;
941	if (cp == NULL) {
942		zio->io_error = SET_ERROR(ENXIO);
943		zio_interrupt(zio);
944		return;
945	}
946	bp = g_alloc_bio();
947	bp->bio_caller1 = zio;
948	switch (zio->io_type) {
949	case ZIO_TYPE_READ:
950	case ZIO_TYPE_WRITE:
951		zio->io_target_timestamp = zio_handle_io_delay(zio);
952		bp->bio_cmd = zio->io_type == ZIO_TYPE_READ ? BIO_READ : BIO_WRITE;
953		bp->bio_data = zio->io_data;
954		bp->bio_offset = zio->io_offset;
955		bp->bio_length = zio->io_size;
956		break;
957	case ZIO_TYPE_FREE:
958		bp->bio_cmd = BIO_DELETE;
959		bp->bio_data = NULL;
960		bp->bio_offset = zio->io_offset;
961		bp->bio_length = zio->io_size;
962		break;
963	case ZIO_TYPE_IOCTL:
964		bp->bio_cmd = BIO_FLUSH;
965		bp->bio_flags |= BIO_ORDERED;
966		bp->bio_data = NULL;
967		bp->bio_offset = cp->provider->mediasize;
968		bp->bio_length = 0;
969		break;
970	}
971	bp->bio_done = vdev_geom_io_intr;
972
973	g_io_request(bp, cp);
974}
975
976static void
977vdev_geom_io_done(zio_t *zio)
978{
979}
980
981static void
982vdev_geom_hold(vdev_t *vd)
983{
984}
985
986static void
987vdev_geom_rele(vdev_t *vd)
988{
989}
990
991vdev_ops_t vdev_geom_ops = {
992	vdev_geom_open,
993	vdev_geom_close,
994	vdev_default_asize,
995	vdev_geom_io_start,
996	vdev_geom_io_done,
997	NULL,
998	vdev_geom_hold,
999	vdev_geom_rele,
1000	VDEV_TYPE_DISK,		/* name of this vdev type */
1001	B_TRUE			/* leaf vdev */
1002};
1003