diff options
-rw-r--r-- | sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa.c | 2 | ||||
-rw-r--r-- | sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c | 104 |
2 files changed, 66 insertions, 40 deletions
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa.c index ee0b52f..69b84b7 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa.c +++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa.c @@ -5951,6 +5951,8 @@ spa_async_remove(spa_t *spa, vdev_t *vd) vd->vdev_stat.vs_checksum_errors = 0; vdev_state_dirty(vd->vdev_top); + /* Tell userspace that the vdev is gone. */ + zfs_post_remove(spa, vd); } for (int c = 0; c < vd->vdev_children; c++) diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c index 4cc0472..763ea1a 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c +++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c @@ -116,7 +116,6 @@ vdev_geom_orphan(struct g_consumer *cp) * async removal support to invoke a close on this * vdev once it is safe to do so. */ - zfs_post_remove(vd->vdev_spa, vd); vd->vdev_remove_wanted = B_TRUE; spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE); } @@ -208,14 +207,12 @@ vdev_geom_detach(void *arg, int flag __unused) } } -static uint64_t -nvlist_get_guid(nvlist_t *list) +static void +nvlist_get_guids(nvlist_t *list, uint64_t *pguid, uint64_t *vguid) { - uint64_t value; - value = 0; - nvlist_lookup_uint64(list, ZPOOL_CONFIG_GUID, &value); - return (value); + (void) nvlist_lookup_uint64(list, ZPOOL_CONFIG_GUID, vguid); + (void) nvlist_lookup_uint64(list, ZPOOL_CONFIG_POOL_GUID, pguid); } static int @@ -270,7 +267,7 @@ vdev_geom_read_config(struct g_consumer *cp, nvlist_t **config) size_t buflen; uint64_t psize; off_t offset, size; - uint64_t guid, state, txg; + uint64_t state, txg; int error, l, len; g_topology_assert_not(); @@ -284,7 +281,6 @@ vdev_geom_read_config(struct g_consumer *cp, nvlist_t **config) size = sizeof(*label) + pp->sectorsize - ((sizeof(*label) - 1) % pp->sectorsize) - 1; - guid = 0; label = kmem_alloc(size, KM_SLEEP); buflen = sizeof(label->vl_vdev_phys.vp_nvlist); @@ -479,30 +475,29 @@ vdev_geom_read_pool_label(const char *name, return (*count > 0 ? 0 : ENOENT); } -static uint64_t -vdev_geom_read_guid(struct g_consumer *cp) +static void +vdev_geom_read_guids(struct g_consumer *cp, uint64_t *pguid, uint64_t *vguid) { nvlist_t *config; - uint64_t guid; g_topology_assert_not(); - guid = 0; + *pguid = 0; + *vguid = 0; if (vdev_geom_read_config(cp, &config) == 0) { - guid = nvlist_get_guid(config); + nvlist_get_guids(config, pguid, vguid); nvlist_free(config); } - return (guid); } static struct g_consumer * -vdev_geom_attach_by_guid(uint64_t guid) +vdev_geom_attach_by_guids(uint64_t pool_guid, uint64_t vdev_guid) { struct g_class *mp; struct g_geom *gp, *zgp; struct g_provider *pp; struct g_consumer *cp, *zcp; - uint64_t pguid; + uint64_t pguid, vguid; g_topology_assert(); @@ -522,15 +517,24 @@ vdev_geom_attach_by_guid(uint64_t guid) if (vdev_geom_attach_taster(zcp, pp) != 0) continue; g_topology_unlock(); - pguid = vdev_geom_read_guid(zcp); + vdev_geom_read_guids(zcp, &pguid, &vguid); g_topology_lock(); vdev_geom_detach_taster(zcp); - if (pguid != guid) + /* + * Check that the label's vdev guid matches the + * desired guid. If the label has a pool guid, + * check that it matches too. (Inactive spares + * and L2ARCs do not have any pool guid in the + * label.) + */ + if ((pguid != 0 && + pguid != pool_guid) || + vguid != vdev_guid) continue; cp = vdev_geom_attach(pp); if (cp == NULL) { - printf("ZFS WARNING: Unable to attach to %s.\n", - pp->name); + printf("ZFS WARNING: Unable to " + "attach to %s.\n", pp->name); continue; } break; @@ -548,7 +552,7 @@ end: } static struct g_consumer * -vdev_geom_open_by_guid(vdev_t *vd) +vdev_geom_open_by_guids(vdev_t *vd) { struct g_consumer *cp; char *buf; @@ -557,7 +561,7 @@ vdev_geom_open_by_guid(vdev_t *vd) g_topology_assert(); ZFS_LOG(1, "Searching by guid [%ju].", (uintmax_t)vd->vdev_guid); - cp = vdev_geom_attach_by_guid(vd->vdev_guid); + cp = vdev_geom_attach_by_guids(spa_guid(vd->vdev_spa), vd->vdev_guid); if (cp != NULL) { len = strlen(cp->provider->name) + strlen("/dev/") + 1; buf = kmem_alloc(len, KM_SLEEP); @@ -566,10 +570,12 @@ vdev_geom_open_by_guid(vdev_t *vd) spa_strfree(vd->vdev_path); vd->vdev_path = buf; - ZFS_LOG(1, "Attach by guid [%ju] succeeded, provider %s.", + ZFS_LOG(1, "Attach by guid [%ju:%ju] succeeded, provider %s.", + (uintmax_t)spa_guid(vd->vdev_spa), (uintmax_t)vd->vdev_guid, vd->vdev_path); } else { - ZFS_LOG(1, "Search by guid [%ju] failed.", + ZFS_LOG(1, "Search by guid [%ju:%ju] failed.", + (uintmax_t)spa_guid(vd->vdev_spa), (uintmax_t)vd->vdev_guid); } @@ -581,7 +587,7 @@ vdev_geom_open_by_path(vdev_t *vd, int check_guid) { struct g_provider *pp; struct g_consumer *cp; - uint64_t guid; + uint64_t pguid, vguid; g_topology_assert(); @@ -593,14 +599,17 @@ vdev_geom_open_by_path(vdev_t *vd, int check_guid) if (cp != NULL && check_guid && ISP2(pp->sectorsize) && pp->sectorsize <= VDEV_PAD_SIZE) { g_topology_unlock(); - guid = vdev_geom_read_guid(cp); + vdev_geom_read_guids(cp, &pguid, &vguid); g_topology_lock(); - if (guid != vd->vdev_guid) { + if (pguid != spa_guid(vd->vdev_spa) || + vguid != vd->vdev_guid) { vdev_geom_detach(cp, 0); cp = NULL; ZFS_LOG(1, "guid mismatch for provider %s: " - "%ju != %ju.", vd->vdev_path, - (uintmax_t)vd->vdev_guid, (uintmax_t)guid); + "%ju:%ju != %ju:%ju.", vd->vdev_path, + (uintmax_t)spa_guid(vd->vdev_spa), + (uintmax_t)vd->vdev_guid, + (uintmax_t)pguid, (uintmax_t)vguid); } else { ZFS_LOG(1, "guid match for provider %s.", vd->vdev_path); @@ -634,23 +643,38 @@ vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize, g_topology_lock(); error = 0; - /* - * If we're creating or splitting a pool, just find the GEOM provider - * by its name and ignore GUID mismatches. - */ - if (vd->vdev_spa->spa_load_state == SPA_LOAD_NONE || - vd->vdev_spa->spa_splitting_newspa == B_TRUE) + if (vd->vdev_spa->spa_splitting_newspa || + (vd->vdev_prevstate == VDEV_STATE_UNKNOWN && + vd->vdev_spa->spa_load_state == SPA_LOAD_NONE)) { + /* + * We are dealing with a vdev that hasn't been previously + * opened (since boot), and we are not loading an + * existing pool configuration. This looks like a + * vdev add operation to a new or existing pool. + * Assume the user knows what he/she is doing and find + * GEOM provider by its name, ignoring GUID mismatches. + * + * XXPOLICY: It would be safer to only allow a device + * that is unlabeled or labeled but missing + * GUID information to be opened in this fashion, + * unless we are doing a split, in which case we + * should allow any guid. + */ cp = vdev_geom_open_by_path(vd, 0); - else { + } else { + /* + * Try using the recorded path for this device, but only + * accept it if its label data contains the expected GUIDs. + */ cp = vdev_geom_open_by_path(vd, 1); if (cp == NULL) { /* * The device at vd->vdev_path doesn't have the - * expected guid. The disks might have merely + * expected GUIDs. The disks might have merely * moved around so try all other GEOM providers - * to find one with the right guid. + * to find one with the right GUIDs. */ - cp = vdev_geom_open_by_guid(vd); + cp = vdev_geom_open_by_guids(vd); } } |