summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorasomers <asomers@FreeBSD.org>2015-12-10 21:46:21 +0000
committerasomers <asomers@FreeBSD.org>2015-12-10 21:46:21 +0000
commit0e501d366b02ad6c7977ef3afb34f2721dabdd81 (patch)
tree68f76fb5574bdf485b2c21882a05c0a9ac218371
parent82f92a09527f65d380f98575fb1a3181b20eddae (diff)
downloadFreeBSD-src-0e501d366b02ad6c7977ef3afb34f2721dabdd81.zip
FreeBSD-src-0e501d366b02ad6c7977ef3afb34f2721dabdd81.tar.gz
During vdev_geom_open, require that the vdev guids match the device's label
except during split, add, or create operations. This fixes a bug where the wrong disk could be returned, and higher layers of ZFS would immediately eject it again. sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c: o When opening by GUID, require both the pool and vdev GUIDs to match. While it is highly unlikely for two vdevs to have the same vdev GUIDs, the ZFS storage pool allocator only guarantees they are unique within a pool. o Modify the open behavior to: - If we are opening a vdev that hasn't previously been opened, open by path without checking GUIDs. - Otherwise, open by path and verify GUIDs. - If that fails, search all geom providers for a device with matching GUIDs. - If that fails, return ENOENT. Submitted by: gibbs, asomers Reviewed by: smh MFC after: 4 weeks Sponsored by: Spectra Logic Corp Differential Revision: https://reviews.freebsd.org/D4486
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c94
1 files changed, 55 insertions, 39 deletions
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c
index 848340c..7e7dc44 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c
@@ -206,14 +206,12 @@ vdev_geom_detach(void *arg, int flag __unused)
}
}
-static uint64_t
-nvlist_get_guid(nvlist_t *list)
+static void
+nvlist_get_guids(nvlist_t *list, uint64_t *pguid, uint64_t *vguid)
{
- uint64_t value;
- value = 0;
- nvlist_lookup_uint64(list, ZPOOL_CONFIG_GUID, &value);
- return (value);
+ nvlist_lookup_uint64(list, ZPOOL_CONFIG_GUID, vguid);
+ nvlist_lookup_uint64(list, ZPOOL_CONFIG_POOL_GUID, pguid);
}
static int
@@ -268,7 +266,7 @@ vdev_geom_read_config(struct g_consumer *cp, nvlist_t **config)
size_t buflen;
uint64_t psize;
off_t offset, size;
- uint64_t guid, state, txg;
+ uint64_t state, txg;
int error, l, len;
g_topology_assert_not();
@@ -282,7 +280,6 @@ vdev_geom_read_config(struct g_consumer *cp, nvlist_t **config)
size = sizeof(*label) + pp->sectorsize -
((sizeof(*label) - 1) % pp->sectorsize) - 1;
- guid = 0;
label = kmem_alloc(size, KM_SLEEP);
buflen = sizeof(label->vl_vdev_phys.vp_nvlist);
@@ -477,30 +474,29 @@ vdev_geom_read_pool_label(const char *name,
return (*count > 0 ? 0 : ENOENT);
}
-static uint64_t
-vdev_geom_read_guid(struct g_consumer *cp)
+static void
+vdev_geom_read_guids(struct g_consumer *cp, uint64_t *pguid, uint64_t *vguid)
{
nvlist_t *config;
- uint64_t guid;
g_topology_assert_not();
- guid = 0;
+ *pguid = 0;
+ *vguid = 0;
if (vdev_geom_read_config(cp, &config) == 0) {
- guid = nvlist_get_guid(config);
+ nvlist_get_guids(config, pguid, vguid);
nvlist_free(config);
}
- return (guid);
}
static struct g_consumer *
-vdev_geom_attach_by_guid(uint64_t guid)
+vdev_geom_attach_by_guids(uint64_t pool_guid, uint64_t vdev_guid)
{
struct g_class *mp;
struct g_geom *gp, *zgp;
struct g_provider *pp;
struct g_consumer *cp, *zcp;
- uint64_t pguid;
+ uint64_t pguid, vguid;
g_topology_assert();
@@ -520,15 +516,15 @@ vdev_geom_attach_by_guid(uint64_t guid)
if (vdev_geom_attach_taster(zcp, pp) != 0)
continue;
g_topology_unlock();
- pguid = vdev_geom_read_guid(zcp);
+ vdev_geom_read_guids(zcp, &pguid, &vguid);
g_topology_lock();
vdev_geom_detach_taster(zcp);
- if (pguid != guid)
+ if (pguid != pool_guid || vguid != vdev_guid)
continue;
cp = vdev_geom_attach(pp);
if (cp == NULL) {
- printf("ZFS WARNING: Unable to attach to %s.\n",
- pp->name);
+ ZFS_LOG(1, "ZFS WARNING: Unable to "
+ "attach to %s.\n", pp->name);
continue;
}
break;
@@ -546,7 +542,7 @@ end:
}
static struct g_consumer *
-vdev_geom_open_by_guid(vdev_t *vd)
+vdev_geom_open_by_guids(vdev_t *vd)
{
struct g_consumer *cp;
char *buf;
@@ -555,7 +551,7 @@ vdev_geom_open_by_guid(vdev_t *vd)
g_topology_assert();
ZFS_LOG(1, "Searching by guid [%ju].", (uintmax_t)vd->vdev_guid);
- cp = vdev_geom_attach_by_guid(vd->vdev_guid);
+ cp = vdev_geom_attach_by_guids(spa_guid(vd->vdev_spa), vd->vdev_guid);
if (cp != NULL) {
len = strlen(cp->provider->name) + strlen("/dev/") + 1;
buf = kmem_alloc(len, KM_SLEEP);
@@ -564,10 +560,12 @@ vdev_geom_open_by_guid(vdev_t *vd)
spa_strfree(vd->vdev_path);
vd->vdev_path = buf;
- ZFS_LOG(1, "Attach by guid [%ju] succeeded, provider %s.",
+ ZFS_LOG(1, "Attach by guid [%ju:%ju] succeeded, provider %s.",
+ (uintmax_t)spa_guid(vd->vdev_spa),
(uintmax_t)vd->vdev_guid, vd->vdev_path);
} else {
- ZFS_LOG(1, "Search by guid [%ju] failed.",
+ ZFS_LOG(1, "Search by guid [%ju:%ju] failed.",
+ (uintmax_t)spa_guid(vd->vdev_spa),
(uintmax_t)vd->vdev_guid);
}
@@ -579,7 +577,7 @@ vdev_geom_open_by_path(vdev_t *vd, int check_guid)
{
struct g_provider *pp;
struct g_consumer *cp;
- uint64_t guid;
+ uint64_t pguid, vguid;
g_topology_assert();
@@ -591,14 +589,17 @@ vdev_geom_open_by_path(vdev_t *vd, int check_guid)
if (cp != NULL && check_guid && ISP2(pp->sectorsize) &&
pp->sectorsize <= VDEV_PAD_SIZE) {
g_topology_unlock();
- guid = vdev_geom_read_guid(cp);
+ vdev_geom_read_guids(cp, &pguid, &vguid);
g_topology_lock();
- if (guid != vd->vdev_guid) {
+ if (pguid != spa_guid(vd->vdev_spa) ||
+ vguid != vd->vdev_guid) {
vdev_geom_detach(cp, 0);
cp = NULL;
ZFS_LOG(1, "guid mismatch for provider %s: "
- "%ju != %ju.", vd->vdev_path,
- (uintmax_t)vd->vdev_guid, (uintmax_t)guid);
+ "%ju:%ju != %ju:%ju.", vd->vdev_path,
+ (uintmax_t)spa_guid(vd->vdev_spa),
+ (uintmax_t)vd->vdev_guid,
+ (uintmax_t)pguid, (uintmax_t)vguid);
} else {
ZFS_LOG(1, "guid match for provider %s.",
vd->vdev_path);
@@ -632,23 +633,38 @@ vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
g_topology_lock();
error = 0;
- /*
- * If we're creating or splitting a pool, just find the GEOM provider
- * by its name and ignore GUID mismatches.
- */
- if (vd->vdev_spa->spa_load_state == SPA_LOAD_NONE ||
- vd->vdev_spa->spa_splitting_newspa == B_TRUE)
+ if (vd->vdev_spa->spa_splitting_newspa ||
+ (vd->vdev_prevstate == VDEV_STATE_UNKNOWN &&
+ vd->vdev_spa->spa_load_state == SPA_LOAD_NONE)) {
+ /*
+ * We are dealing with a vdev that hasn't been previously
+ * opened (since boot), and we are not loading an
+ * existing pool configuration. This looks like a
+ * vdev add operation to a new or existing pool.
+ * Assume the user knows what he/she is doing and find
+ * GEOM provider by its name, ignoring GUID mismatches.
+ *
+ * XXPOLICY: It would be safer to only allow a device
+ * that is unlabeled or labeled but missing
+ * GUID information to be opened in this fashion,
+ * unless we are doing a split, in which case we
+ * should allow any guid.
+ */
cp = vdev_geom_open_by_path(vd, 0);
- else {
+ } else {
+ /*
+ * Try using the recorded path for this device, but only
+ * accept it if its label data contains the expected GUIDs.
+ */
cp = vdev_geom_open_by_path(vd, 1);
if (cp == NULL) {
/*
* The device at vd->vdev_path doesn't have the
- * expected guid. The disks might have merely
+ * expected GUIDs. The disks might have merely
* moved around so try all other GEOM providers
- * to find one with the right guid.
+ * to find one with the right GUIDs.
*/
- cp = vdev_geom_open_by_guid(vd);
+ cp = vdev_geom_open_by_guids(vd);
}
}
OpenPOWER on IntegriCloud