summaryrefslogtreecommitdiffstats
path: root/contrib/opensolaris/lib/libzfs
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/opensolaris/lib/libzfs')
-rw-r--r--contrib/opensolaris/lib/libzfs/common/libzfs.h441
-rw-r--r--contrib/opensolaris/lib/libzfs/common/libzfs_changelist.c606
-rw-r--r--contrib/opensolaris/lib/libzfs/common/libzfs_config.c374
-rw-r--r--contrib/opensolaris/lib/libzfs/common/libzfs_dataset.c3753
-rw-r--r--contrib/opensolaris/lib/libzfs/common/libzfs_graph.c646
-rw-r--r--contrib/opensolaris/lib/libzfs/common/libzfs_impl.h171
-rw-r--r--contrib/opensolaris/lib/libzfs/common/libzfs_import.c994
-rw-r--r--contrib/opensolaris/lib/libzfs/common/libzfs_mount.c986
-rw-r--r--contrib/opensolaris/lib/libzfs/common/libzfs_pool.c2055
-rw-r--r--contrib/opensolaris/lib/libzfs/common/libzfs_status.c289
-rw-r--r--contrib/opensolaris/lib/libzfs/common/libzfs_util.c827
11 files changed, 11142 insertions, 0 deletions
diff --git a/contrib/opensolaris/lib/libzfs/common/libzfs.h b/contrib/opensolaris/lib/libzfs/common/libzfs.h
new file mode 100644
index 0000000..8e9a848
--- /dev/null
+++ b/contrib/opensolaris/lib/libzfs/common/libzfs.h
@@ -0,0 +1,441 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _LIBZFS_H
+#define _LIBZFS_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <assert.h>
+#include <libnvpair.h>
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/varargs.h>
+#include <sys/fs/zfs.h>
+#include <sys/zfs_ioctl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Miscellaneous ZFS constants
+ */
+#define ZFS_MAXNAMELEN MAXNAMELEN
+#define ZPOOL_MAXNAMELEN MAXNAMELEN
+#define ZFS_MAXPROPLEN MAXPATHLEN
+
+/*
+ * libzfs errors
+ */
+enum {
+ EZFS_NOMEM = 2000, /* out of memory */
+ EZFS_BADPROP, /* invalid property value */
+ EZFS_PROPREADONLY, /* cannot set readonly property */
+ EZFS_PROPTYPE, /* property does not apply to dataset type */
+ EZFS_PROPNONINHERIT, /* property is not inheritable */
+ EZFS_PROPSPACE, /* bad quota or reservation */
+ EZFS_BADTYPE, /* dataset is not of appropriate type */
+ EZFS_BUSY, /* pool or dataset is busy */
+ EZFS_EXISTS, /* pool or dataset already exists */
+ EZFS_NOENT, /* no such pool or dataset */
+ EZFS_BADSTREAM, /* bad backup stream */
+ EZFS_DSREADONLY, /* dataset is readonly */
+ EZFS_VOLTOOBIG, /* volume is too large for 32-bit system */
+ EZFS_VOLHASDATA, /* volume already contains data */
+ EZFS_INVALIDNAME, /* invalid dataset name */
+ EZFS_BADRESTORE, /* unable to restore to destination */
+ EZFS_BADBACKUP, /* backup failed */
+ EZFS_BADTARGET, /* bad attach/detach/replace target */
+ EZFS_NODEVICE, /* no such device in pool */
+ EZFS_BADDEV, /* invalid device to add */
+ EZFS_NOREPLICAS, /* no valid replicas */
+ EZFS_RESILVERING, /* currently resilvering */
+ EZFS_BADVERSION, /* unsupported version */
+ EZFS_POOLUNAVAIL, /* pool is currently unavailable */
+ EZFS_DEVOVERFLOW, /* too many devices in one vdev */
+ EZFS_BADPATH, /* must be an absolute path */
+ EZFS_CROSSTARGET, /* rename or clone across pool or dataset */
+ EZFS_ZONED, /* used improperly in local zone */
+ EZFS_MOUNTFAILED, /* failed to mount dataset */
+ EZFS_UMOUNTFAILED, /* failed to unmount dataset */
+ EZFS_UNSHARENFSFAILED, /* unshare(1M) failed */
+ EZFS_SHARENFSFAILED, /* share(1M) failed */
+ EZFS_DEVLINKS, /* failed to create zvol links */
+ EZFS_PERM, /* permission denied */
+ EZFS_NOSPC, /* out of space */
+ EZFS_IO, /* I/O error */
+ EZFS_INTR, /* signal received */
+ EZFS_ISSPARE, /* device is a hot spare */
+ EZFS_INVALCONFIG, /* invalid vdev configuration */
+ EZFS_RECURSIVE, /* recursive dependency */
+ EZFS_NOHISTORY, /* no history object */
+ EZFS_UNSHAREISCSIFAILED, /* iscsitgtd failed request to unshare */
+ EZFS_SHAREISCSIFAILED, /* iscsitgtd failed request to share */
+ EZFS_POOLPROPS, /* couldn't retrieve pool props */
+ EZFS_POOL_NOTSUP, /* ops not supported for this type of pool */
+ EZFS_POOL_INVALARG, /* invalid argument for this pool operation */
+ EZFS_UNKNOWN
+};
+
+/*
+ * Basic handle types
+ */
+typedef struct zfs_handle zfs_handle_t;
+typedef struct zpool_handle zpool_handle_t;
+typedef struct libzfs_handle libzfs_handle_t;
+
+/*
+ * Library initialization
+ */
+extern libzfs_handle_t *libzfs_init(void);
+extern void libzfs_fini(libzfs_handle_t *);
+
+extern libzfs_handle_t *zpool_get_handle(zpool_handle_t *);
+extern libzfs_handle_t *zfs_get_handle(zfs_handle_t *);
+
+extern void libzfs_print_on_error(libzfs_handle_t *, boolean_t);
+
+extern int libzfs_errno(libzfs_handle_t *);
+extern const char *libzfs_error_action(libzfs_handle_t *);
+extern const char *libzfs_error_description(libzfs_handle_t *);
+
+/*
+ * Basic handle functions
+ */
+extern zpool_handle_t *zpool_open(libzfs_handle_t *, const char *);
+extern zpool_handle_t *zpool_open_canfail(libzfs_handle_t *, const char *);
+extern void zpool_close(zpool_handle_t *);
+extern const char *zpool_get_name(zpool_handle_t *);
+extern uint64_t zpool_get_guid(zpool_handle_t *);
+extern uint64_t zpool_get_space_used(zpool_handle_t *);
+extern uint64_t zpool_get_space_total(zpool_handle_t *);
+extern int zpool_get_root(zpool_handle_t *, char *, size_t);
+extern int zpool_get_state(zpool_handle_t *);
+extern uint64_t zpool_get_version(zpool_handle_t *);
+
+/*
+ * Iterate over all active pools in the system.
+ */
+typedef int (*zpool_iter_f)(zpool_handle_t *, void *);
+extern int zpool_iter(libzfs_handle_t *, zpool_iter_f, void *);
+
+/*
+ * Functions to create and destroy pools
+ */
+extern int zpool_create(libzfs_handle_t *, const char *, nvlist_t *,
+ const char *);
+extern int zpool_destroy(zpool_handle_t *);
+extern int zpool_add(zpool_handle_t *, nvlist_t *);
+
+/*
+ * Functions to manipulate pool and vdev state
+ */
+extern int zpool_scrub(zpool_handle_t *, pool_scrub_type_t);
+
+extern int zpool_vdev_online(zpool_handle_t *, const char *);
+extern int zpool_vdev_offline(zpool_handle_t *, const char *, int);
+extern int zpool_vdev_attach(zpool_handle_t *, const char *, const char *,
+ nvlist_t *, int);
+extern int zpool_vdev_detach(zpool_handle_t *, const char *);
+extern int zpool_vdev_remove(zpool_handle_t *, const char *);
+extern int zpool_clear(zpool_handle_t *, const char *);
+extern nvlist_t *zpool_find_vdev(zpool_handle_t *, const char *, boolean_t *);
+
+/*
+ * Functions to manage pool properties
+ */
+extern int zpool_set_prop(zpool_handle_t *, const char *, const char *);
+extern int zpool_get_prop(zpool_handle_t *, zfs_prop_t, char *,
+ size_t proplen, zfs_source_t *);
+extern const char *zpool_prop_to_name(zpool_prop_t);
+extern const char *zpool_prop_values(zpool_prop_t);
+
+/*
+ * Pool health statistics.
+ */
+typedef enum {
+ /*
+ * The following correspond to faults as defined in the (fault.fs.zfs.*)
+ * event namespace. Each is associated with a corresponding message ID.
+ */
+ ZPOOL_STATUS_CORRUPT_CACHE, /* corrupt /kernel/drv/zpool.cache */
+ ZPOOL_STATUS_MISSING_DEV_R, /* missing device with replicas */
+ ZPOOL_STATUS_MISSING_DEV_NR, /* missing device with no replicas */
+ ZPOOL_STATUS_CORRUPT_LABEL_R, /* bad device label with replicas */
+ ZPOOL_STATUS_CORRUPT_LABEL_NR, /* bad device label with no replicas */
+ ZPOOL_STATUS_BAD_GUID_SUM, /* sum of device guids didn't match */
+ ZPOOL_STATUS_CORRUPT_POOL, /* pool metadata is corrupted */
+ ZPOOL_STATUS_CORRUPT_DATA, /* data errors in user (meta)data */
+ ZPOOL_STATUS_FAILING_DEV, /* device experiencing errors */
+ ZPOOL_STATUS_VERSION_NEWER, /* newer on-disk version */
+
+ /*
+ * The following are not faults per se, but still an error possibly
+ * requiring administrative attention. There is no corresponding
+ * message ID.
+ */
+ ZPOOL_STATUS_VERSION_OLDER, /* older on-disk version */
+ ZPOOL_STATUS_RESILVERING, /* device being resilvered */
+ ZPOOL_STATUS_OFFLINE_DEV, /* device online */
+
+ /*
+ * Finally, the following indicates a healthy pool.
+ */
+ ZPOOL_STATUS_OK
+} zpool_status_t;
+
+extern zpool_status_t zpool_get_status(zpool_handle_t *, char **);
+extern zpool_status_t zpool_import_status(nvlist_t *, char **);
+
+/*
+ * Statistics and configuration functions.
+ */
+extern nvlist_t *zpool_get_config(zpool_handle_t *, nvlist_t **);
+extern int zpool_refresh_stats(zpool_handle_t *, boolean_t *);
+extern int zpool_get_errlog(zpool_handle_t *, nvlist_t **);
+
+/*
+ * Import and export functions
+ */
+extern int zpool_export(zpool_handle_t *);
+extern int zpool_import(libzfs_handle_t *, nvlist_t *, const char *,
+ const char *);
+
+/*
+ * Search for pools to import
+ */
+extern nvlist_t *zpool_find_import(libzfs_handle_t *, int, char **);
+
+/*
+ * Miscellaneous pool functions
+ */
+extern char *zpool_vdev_name(libzfs_handle_t *, zpool_handle_t *, nvlist_t *);
+extern int zpool_upgrade(zpool_handle_t *);
+extern int zpool_get_history(zpool_handle_t *, nvlist_t **);
+extern void zpool_log_history(libzfs_handle_t *, int, char **, const char *,
+ boolean_t, boolean_t);
+extern void zpool_obj_to_path(zpool_handle_t *, uint64_t, uint64_t, char *,
+ size_t len);
+
+/*
+ * Basic handle manipulations. These functions do not create or destroy the
+ * underlying datasets, only the references to them.
+ */
+extern zfs_handle_t *zfs_open(libzfs_handle_t *, const char *, int);
+extern void zfs_close(zfs_handle_t *);
+extern zfs_type_t zfs_get_type(const zfs_handle_t *);
+extern const char *zfs_get_name(const zfs_handle_t *);
+
+/*
+ * Property management functions. Some functions are shared with the kernel,
+ * and are found in sys/fs/zfs.h.
+ */
+extern const char *zfs_prop_to_name(zfs_prop_t);
+extern int zfs_prop_set(zfs_handle_t *, const char *, const char *);
+extern int zfs_prop_get(zfs_handle_t *, zfs_prop_t, char *, size_t,
+ zfs_source_t *, char *, size_t, boolean_t);
+extern int zfs_prop_get_numeric(zfs_handle_t *, zfs_prop_t, uint64_t *,
+ zfs_source_t *, char *, size_t);
+extern uint64_t zfs_prop_get_int(zfs_handle_t *, zfs_prop_t);
+extern const char *zfs_prop_get_string(zfs_handle_t *, zfs_prop_t);
+extern int zfs_prop_inherit(zfs_handle_t *, const char *);
+extern const char *zfs_prop_values(zfs_prop_t);
+extern int zfs_prop_valid_for_type(zfs_prop_t, int);
+extern const char *zfs_prop_default_string(zfs_prop_t prop);
+extern uint64_t zfs_prop_default_numeric(zfs_prop_t);
+extern int zfs_prop_is_string(zfs_prop_t prop);
+extern const char *zfs_prop_column_name(zfs_prop_t);
+extern boolean_t zfs_prop_align_right(zfs_prop_t);
+extern void nicebool(int value, char *buf, size_t buflen);
+
+typedef struct zfs_proplist {
+ zfs_prop_t pl_prop;
+ char *pl_user_prop;
+ struct zfs_proplist *pl_next;
+ boolean_t pl_all;
+ size_t pl_width;
+ boolean_t pl_fixed;
+} zfs_proplist_t;
+
+typedef zfs_proplist_t zpool_proplist_t;
+
+extern int zfs_get_proplist(libzfs_handle_t *, char *, zfs_proplist_t **);
+extern int zpool_get_proplist(libzfs_handle_t *, char *, zpool_proplist_t **);
+extern int zfs_expand_proplist(zfs_handle_t *, zfs_proplist_t **);
+extern int zpool_expand_proplist(zpool_handle_t *, zpool_proplist_t **);
+extern void zfs_free_proplist(zfs_proplist_t *);
+extern nvlist_t *zfs_get_user_props(zfs_handle_t *);
+
+#define ZFS_MOUNTPOINT_NONE "none"
+#define ZFS_MOUNTPOINT_LEGACY "legacy"
+
+/*
+ * Functions for printing properties from zfs/zpool
+ */
+typedef struct libzfs_get_cbdata {
+ int cb_sources;
+ int cb_columns[4];
+ int cb_colwidths[5];
+ boolean_t cb_scripted;
+ boolean_t cb_literal;
+ boolean_t cb_first;
+ zfs_proplist_t *cb_proplist;
+} libzfs_get_cbdata_t;
+
+void libzfs_print_one_property(const char *, libzfs_get_cbdata_t *,
+ const char *, const char *, zfs_source_t, const char *);
+
+#define GET_COL_NAME 1
+#define GET_COL_PROPERTY 2
+#define GET_COL_VALUE 3
+#define GET_COL_SOURCE 4
+
+/*
+ * Iterator functions.
+ */
+typedef int (*zfs_iter_f)(zfs_handle_t *, void *);
+extern int zfs_iter_root(libzfs_handle_t *, zfs_iter_f, void *);
+extern int zfs_iter_children(zfs_handle_t *, zfs_iter_f, void *);
+extern int zfs_iter_dependents(zfs_handle_t *, boolean_t, zfs_iter_f, void *);
+extern int zfs_iter_filesystems(zfs_handle_t *, zfs_iter_f, void *);
+extern int zfs_iter_snapshots(zfs_handle_t *, zfs_iter_f, void *);
+
+/*
+ * Functions to create and destroy datasets.
+ */
+extern int zfs_create(libzfs_handle_t *, const char *, zfs_type_t,
+ nvlist_t *);
+extern int zfs_destroy(zfs_handle_t *);
+extern int zfs_destroy_snaps(zfs_handle_t *, char *);
+extern int zfs_clone(zfs_handle_t *, const char *, nvlist_t *);
+extern int zfs_snapshot(libzfs_handle_t *, const char *, boolean_t);
+extern int zfs_rollback(zfs_handle_t *, zfs_handle_t *, int);
+extern int zfs_rename(zfs_handle_t *, const char *);
+extern int zfs_send(zfs_handle_t *, const char *, int);
+extern int zfs_receive(libzfs_handle_t *, const char *, int, int, int,
+ boolean_t, int);
+extern int zfs_promote(zfs_handle_t *);
+
+/*
+ * Miscellaneous functions.
+ */
+extern const char *zfs_type_to_name(zfs_type_t);
+extern void zfs_refresh_properties(zfs_handle_t *);
+extern int zfs_name_valid(const char *, zfs_type_t);
+extern int zfs_disable(zfs_handle_t *);
+extern int zfs_enable(zfs_handle_t *);
+extern zfs_handle_t *zfs_path_to_zhandle(libzfs_handle_t *, char *, zfs_type_t);
+
+/*
+ * Mount support functions.
+ */
+extern boolean_t is_mounted(libzfs_handle_t *, const char *special, char **);
+extern boolean_t zfs_is_mounted(zfs_handle_t *, char **);
+extern int zfs_mount(zfs_handle_t *, const char *, int);
+extern int zfs_unmount(zfs_handle_t *, const char *, int);
+extern int zfs_unmountall(zfs_handle_t *, int);
+
+/*
+ * Share support functions.
+ */
+extern boolean_t zfs_is_shared(zfs_handle_t *);
+extern int zfs_share(zfs_handle_t *);
+extern int zfs_unshare(zfs_handle_t *);
+
+/*
+ * Protocol-specifc share support functions.
+ */
+extern boolean_t zfs_is_shared_nfs(zfs_handle_t *, char **);
+extern int zfs_share_nfs(zfs_handle_t *);
+extern int zfs_unshare_nfs(zfs_handle_t *, const char *);
+extern int zfs_unshareall_nfs(zfs_handle_t *);
+extern boolean_t zfs_is_shared_iscsi(zfs_handle_t *);
+extern int zfs_share_iscsi(zfs_handle_t *);
+extern int zfs_unshare_iscsi(zfs_handle_t *);
+
+/*
+ * FreeBSD-specific jail support function.
+ */
+extern int zfs_jail(zfs_handle_t *, int, int);
+
+/*
+ * When dealing with nvlists, verify() is extremely useful
+ */
+#ifndef verify
+#ifdef NDEBUG
+#define verify(EX) ((void)(EX))
+#else
+#define verify(EX) assert(EX)
+#endif
+#endif
+
+/*
+ * Utility function to convert a number to a human-readable form.
+ */
+extern void zfs_nicenum(uint64_t, char *, size_t);
+extern int zfs_nicestrtonum(libzfs_handle_t *, const char *, uint64_t *);
+
+/*
+ * Pool destroy special. Remove the device information without destroying
+ * the underlying dataset.
+ */
+extern int zfs_remove_link(zfs_handle_t *);
+
+/*
+ * Given a device or file, determine if it is part of a pool.
+ */
+extern int zpool_in_use(libzfs_handle_t *, int, pool_state_t *, char **,
+ boolean_t *);
+
+/*
+ * ftyp special. Read the label from a given device.
+ */
+extern int zpool_read_label(int, nvlist_t **);
+
+/*
+ * Create and remove zvol /dev links.
+ */
+extern int zpool_create_zvol_links(zpool_handle_t *);
+extern int zpool_remove_zvol_links(zpool_handle_t *);
+
+/*
+ * Enable and disable datasets within a pool by mounting/unmounting and
+ * sharing/unsharing them.
+ */
+extern int zpool_enable_datasets(zpool_handle_t *, const char *, int);
+extern int zpool_disable_datasets(zpool_handle_t *, boolean_t);
+
+#ifdef __FreeBSD__
+extern int zmount(const char *, const char *, int, char *, char *, int, char *,
+ int);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _LIBZFS_H */
diff --git a/contrib/opensolaris/lib/libzfs/common/libzfs_changelist.c b/contrib/opensolaris/lib/libzfs/common/libzfs_changelist.c
new file mode 100644
index 0000000..d68c420
--- /dev/null
+++ b/contrib/opensolaris/lib/libzfs/common/libzfs_changelist.c
@@ -0,0 +1,606 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <libintl.h>
+#include <libuutil.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <zone.h>
+
+#include <libzfs.h>
+
+#include "libzfs_impl.h"
+
+/*
+ * Structure to keep track of dataset state. Before changing the 'sharenfs' or
+ * 'mountpoint' property, we record whether the filesystem was previously
+ * mounted/shared. This prior state dictates whether we remount/reshare the
+ * dataset after the property has been changed.
+ *
+ * The interface consists of the following sequence of functions:
+ *
+ * changelist_gather()
+ * changelist_prefix()
+ * < change property >
+ * changelist_postfix()
+ * changelist_free()
+ *
+ * Other interfaces:
+ *
+ * changelist_remove() - remove a node from a gathered list
+ * changelist_rename() - renames all datasets appropriately when doing a rename
+ * changelist_unshare() - unshares all the nodes in a given changelist
+ * changelist_haszonedchild() - check if there is any child exported to
+ * a local zone
+ */
+typedef struct prop_changenode {
+ zfs_handle_t *cn_handle;
+ int cn_shared;
+ int cn_mounted;
+ int cn_zoned;
+ uu_list_node_t cn_listnode;
+} prop_changenode_t;
+
+struct prop_changelist {
+ zfs_prop_t cl_prop;
+ zfs_prop_t cl_realprop;
+ uu_list_pool_t *cl_pool;
+ uu_list_t *cl_list;
+ boolean_t cl_waslegacy;
+ boolean_t cl_allchildren;
+ boolean_t cl_alldependents;
+ int cl_flags;
+ boolean_t cl_haszonedchild;
+ boolean_t cl_sorted;
+};
+
+/*
+ * If the property is 'mountpoint', go through and unmount filesystems as
+ * necessary. We don't do the same for 'sharenfs', because we can just re-share
+ * with different options without interrupting service.
+ */
+int
+changelist_prefix(prop_changelist_t *clp)
+{
+ prop_changenode_t *cn;
+ int ret = 0;
+
+ if (clp->cl_prop != ZFS_PROP_MOUNTPOINT)
+ return (0);
+
+ for (cn = uu_list_first(clp->cl_list); cn != NULL;
+ cn = uu_list_next(clp->cl_list, cn)) {
+ /*
+ * If we are in the global zone, but this dataset is exported
+ * to a local zone, do nothing.
+ */
+ if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned)
+ continue;
+
+ if (ZFS_IS_VOLUME(cn->cn_handle)) {
+ switch (clp->cl_realprop) {
+ case ZFS_PROP_NAME:
+ /*
+ * If this was a rename, unshare the zvol, and
+ * remove the /dev/zvol links.
+ */
+ (void) zfs_unshare_iscsi(cn->cn_handle);
+
+ if (zvol_remove_link(cn->cn_handle->zfs_hdl,
+ cn->cn_handle->zfs_name) != 0)
+ ret = -1;
+ break;
+
+ case ZFS_PROP_VOLSIZE:
+ /*
+ * If this was a change to the volume size, we
+ * need to unshare and reshare the volume.
+ */
+ (void) zfs_unshare_iscsi(cn->cn_handle);
+ break;
+ }
+ } else if (zfs_unmount(cn->cn_handle, NULL, clp->cl_flags) != 0)
+ ret = -1;
+ }
+
+ return (ret);
+}
+
+/*
+ * If the property is 'mountpoint' or 'sharenfs', go through and remount and/or
+ * reshare the filesystems as necessary. In changelist_gather() we recorded
+ * whether the filesystem was previously shared or mounted. The action we take
+ * depends on the previous state, and whether the value was previously 'legacy'.
+ * For non-legacy properties, we only remount/reshare the filesystem if it was
+ * previously mounted/shared. Otherwise, we always remount/reshare the
+ * filesystem.
+ */
+int
+changelist_postfix(prop_changelist_t *clp)
+{
+ prop_changenode_t *cn;
+ char shareopts[ZFS_MAXPROPLEN];
+ int ret = 0;
+
+ /*
+ * If we're changing the mountpoint, attempt to destroy the underlying
+ * mountpoint. All other datasets will have inherited from this dataset
+ * (in which case their mountpoints exist in the filesystem in the new
+ * location), or have explicit mountpoints set (in which case they won't
+ * be in the changelist).
+ */
+ if ((cn = uu_list_last(clp->cl_list)) == NULL)
+ return (0);
+
+ if (clp->cl_prop == ZFS_PROP_MOUNTPOINT)
+ remove_mountpoint(cn->cn_handle);
+
+ /*
+ * We walk the datasets in reverse, because we want to mount any parent
+ * datasets before mounting the children.
+ */
+ for (cn = uu_list_last(clp->cl_list); cn != NULL;
+ cn = uu_list_prev(clp->cl_list, cn)) {
+ /*
+ * If we are in the global zone, but this dataset is exported
+ * to a local zone, do nothing.
+ */
+ if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned)
+ continue;
+
+ zfs_refresh_properties(cn->cn_handle);
+
+ if (ZFS_IS_VOLUME(cn->cn_handle)) {
+ /*
+ * If we're doing a rename, recreate the /dev/zvol
+ * links.
+ */
+ if (clp->cl_realprop == ZFS_PROP_NAME &&
+ zvol_create_link(cn->cn_handle->zfs_hdl,
+ cn->cn_handle->zfs_name) != 0) {
+ ret = -1;
+ } else if (cn->cn_shared ||
+ clp->cl_prop == ZFS_PROP_SHAREISCSI) {
+ if (zfs_prop_get(cn->cn_handle,
+ ZFS_PROP_SHAREISCSI, shareopts,
+ sizeof (shareopts), NULL, NULL, 0,
+ B_FALSE) == 0 &&
+ strcmp(shareopts, "off") == 0) {
+ ret = zfs_unshare_iscsi(cn->cn_handle);
+ } else {
+ ret = zfs_share_iscsi(cn->cn_handle);
+ }
+ }
+
+ continue;
+ }
+
+ if ((clp->cl_waslegacy || cn->cn_mounted) &&
+ !zfs_is_mounted(cn->cn_handle, NULL) &&
+ zfs_mount(cn->cn_handle, NULL, 0) != 0)
+ ret = -1;
+
+ /*
+ * We always re-share even if the filesystem is currently
+ * shared, so that we can adopt any new options.
+ */
+ if (cn->cn_shared ||
+ (clp->cl_prop == ZFS_PROP_SHARENFS && clp->cl_waslegacy)) {
+ if (zfs_prop_get(cn->cn_handle, ZFS_PROP_SHARENFS,
+ shareopts, sizeof (shareopts), NULL, NULL, 0,
+ B_FALSE) == 0 && strcmp(shareopts, "off") == 0) {
+ ret = zfs_unshare_nfs(cn->cn_handle, NULL);
+ } else {
+ ret = zfs_share_nfs(cn->cn_handle);
+ }
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * Is this "dataset" a child of "parent"?
+ */
+static boolean_t
+isa_child_of(const char *dataset, const char *parent)
+{
+ int len;
+
+ len = strlen(parent);
+
+ if (strncmp(dataset, parent, len) == 0 &&
+ (dataset[len] == '@' || dataset[len] == '/' ||
+ dataset[len] == '\0'))
+ return (B_TRUE);
+ else
+ return (B_FALSE);
+
+}
+
+/*
+ * If we rename a filesystem, child filesystem handles are no longer valid
+ * since we identify each dataset by its name in the ZFS namespace. As a
+ * result, we have to go through and fix up all the names appropriately. We
+ * could do this automatically if libzfs kept track of all open handles, but
+ * this is a lot less work.
+ */
+void
+changelist_rename(prop_changelist_t *clp, const char *src, const char *dst)
+{
+ prop_changenode_t *cn;
+ char newname[ZFS_MAXNAMELEN];
+
+ for (cn = uu_list_first(clp->cl_list); cn != NULL;
+ cn = uu_list_next(clp->cl_list, cn)) {
+ /*
+ * Do not rename a clone that's not in the source hierarchy.
+ */
+ if (!isa_child_of(cn->cn_handle->zfs_name, src))
+ continue;
+
+ /*
+ * Destroy the previous mountpoint if needed.
+ */
+ remove_mountpoint(cn->cn_handle);
+
+ (void) strlcpy(newname, dst, sizeof (newname));
+ (void) strcat(newname, cn->cn_handle->zfs_name + strlen(src));
+
+ (void) strlcpy(cn->cn_handle->zfs_name, newname,
+ sizeof (cn->cn_handle->zfs_name));
+ }
+}
+
+/*
+ * Given a gathered changelist for the 'sharenfs' property, unshare all the
+ * datasets in the list.
+ */
+int
+changelist_unshare(prop_changelist_t *clp)
+{
+ prop_changenode_t *cn;
+ int ret = 0;
+
+ if (clp->cl_prop != ZFS_PROP_SHARENFS)
+ return (0);
+
+ for (cn = uu_list_first(clp->cl_list); cn != NULL;
+ cn = uu_list_next(clp->cl_list, cn)) {
+ if (zfs_unshare_nfs(cn->cn_handle, NULL) != 0)
+ ret = -1;
+ }
+
+ return (ret);
+}
+
+/*
+ * Check if there is any child exported to a local zone in a given changelist.
+ * This information has already been recorded while gathering the changelist
+ * via changelist_gather().
+ */
+int
+changelist_haszonedchild(prop_changelist_t *clp)
+{
+ return (clp->cl_haszonedchild);
+}
+
+/*
+ * Remove a node from a gathered list.
+ */
+void
+changelist_remove(zfs_handle_t *zhp, prop_changelist_t *clp)
+{
+ prop_changenode_t *cn;
+
+ for (cn = uu_list_first(clp->cl_list); cn != NULL;
+ cn = uu_list_next(clp->cl_list, cn)) {
+
+ if (strcmp(cn->cn_handle->zfs_name, zhp->zfs_name) == 0) {
+ uu_list_remove(clp->cl_list, cn);
+ zfs_close(cn->cn_handle);
+ free(cn);
+ return;
+ }
+ }
+}
+
+/*
+ * Release any memory associated with a changelist.
+ */
+void
+changelist_free(prop_changelist_t *clp)
+{
+ prop_changenode_t *cn;
+ uu_list_walk_t *walk;
+
+ if (clp->cl_list) {
+ verify((walk = uu_list_walk_start(clp->cl_list,
+ UU_WALK_ROBUST)) != NULL);
+
+ while ((cn = uu_list_walk_next(walk)) != NULL) {
+
+ uu_list_remove(clp->cl_list, cn);
+
+ zfs_close(cn->cn_handle);
+ free(cn);
+ }
+
+ uu_list_walk_end(walk);
+
+ uu_list_destroy(clp->cl_list);
+ }
+ if (clp->cl_pool)
+ uu_list_pool_destroy(clp->cl_pool);
+
+ free(clp);
+}
+
+static int
+change_one(zfs_handle_t *zhp, void *data)
+{
+ prop_changelist_t *clp = data;
+ char property[ZFS_MAXPROPLEN];
+ char where[64];
+ prop_changenode_t *cn;
+ zfs_source_t sourcetype;
+
+ /*
+ * We only want to unmount/unshare those filesystems that may inherit
+ * from the target filesystem. If we find any filesystem with a
+ * locally set mountpoint, we ignore any children since changing the
+ * property will not affect them. If this is a rename, we iterate
+ * over all children regardless, since we need them unmounted in
+ * order to do the rename. Also, if this is a volume and we're doing
+ * a rename, then always add it to the changelist.
+ */
+
+ if (!(ZFS_IS_VOLUME(zhp) && clp->cl_realprop == ZFS_PROP_NAME) &&
+ zfs_prop_get(zhp, clp->cl_prop, property,
+ sizeof (property), &sourcetype, where, sizeof (where),
+ B_FALSE) != 0) {
+ zfs_close(zhp);
+ return (0);
+ }
+
+ if (clp->cl_alldependents || clp->cl_allchildren ||
+ sourcetype == ZFS_SRC_DEFAULT || sourcetype == ZFS_SRC_INHERITED) {
+ if ((cn = zfs_alloc(zfs_get_handle(zhp),
+ sizeof (prop_changenode_t))) == NULL) {
+ zfs_close(zhp);
+ return (-1);
+ }
+
+ cn->cn_handle = zhp;
+ cn->cn_mounted = zfs_is_mounted(zhp, NULL);
+ cn->cn_shared = zfs_is_shared(zhp);
+ cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
+
+ /* Indicate if any child is exported to a local zone. */
+ if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned)
+ clp->cl_haszonedchild = B_TRUE;
+
+ uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool);
+
+ if (clp->cl_sorted) {
+ uu_list_index_t idx;
+
+ (void) uu_list_find(clp->cl_list, cn, NULL,
+ &idx);
+ uu_list_insert(clp->cl_list, cn, idx);
+ } else {
+ ASSERT(!clp->cl_alldependents);
+ verify(uu_list_insert_before(clp->cl_list,
+ uu_list_first(clp->cl_list), cn) == 0);
+ }
+
+ if (!clp->cl_alldependents)
+ return (zfs_iter_children(zhp, change_one, data));
+ } else {
+ zfs_close(zhp);
+ }
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+compare_mountpoints(const void *a, const void *b, void *unused)
+{
+ const prop_changenode_t *ca = a;
+ const prop_changenode_t *cb = b;
+
+ char mounta[MAXPATHLEN];
+ char mountb[MAXPATHLEN];
+
+ boolean_t hasmounta, hasmountb;
+
+ /*
+ * When unsharing or unmounting filesystems, we need to do it in
+ * mountpoint order. This allows the user to have a mountpoint
+ * hierarchy that is different from the dataset hierarchy, and still
+ * allow it to be changed. However, if either dataset doesn't have a
+ * mountpoint (because it is a volume or a snapshot), we place it at the
+ * end of the list, because it doesn't affect our change at all.
+ */
+ hasmounta = (zfs_prop_get(ca->cn_handle, ZFS_PROP_MOUNTPOINT, mounta,
+ sizeof (mounta), NULL, NULL, 0, B_FALSE) == 0);
+ hasmountb = (zfs_prop_get(cb->cn_handle, ZFS_PROP_MOUNTPOINT, mountb,
+ sizeof (mountb), NULL, NULL, 0, B_FALSE) == 0);
+
+ if (!hasmounta && hasmountb)
+ return (-1);
+ else if (hasmounta && !hasmountb)
+ return (1);
+ else if (!hasmounta && !hasmountb)
+ return (0);
+ else
+ return (strcmp(mountb, mounta));
+}
+
+/*
+ * Given a ZFS handle and a property, construct a complete list of datasets
+ * that need to be modified as part of this process. For anything but the
+ * 'mountpoint' and 'sharenfs' properties, this just returns an empty list.
+ * Otherwise, we iterate over all children and look for any datasets that
+ * inherit the property. For each such dataset, we add it to the list and
+ * mark whether it was shared beforehand.
+ */
+prop_changelist_t *
+changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int flags)
+{
+ prop_changelist_t *clp;
+ prop_changenode_t *cn;
+ zfs_handle_t *temp;
+ char property[ZFS_MAXPROPLEN];
+ uu_compare_fn_t *compare = NULL;
+
+ if ((clp = zfs_alloc(zhp->zfs_hdl, sizeof (prop_changelist_t))) == NULL)
+ return (NULL);
+
+ /*
+ * For mountpoint-related tasks, we want to sort everything by
+ * mountpoint, so that we mount and unmount them in the appropriate
+ * order, regardless of their position in the hierarchy.
+ */
+ if (prop == ZFS_PROP_NAME || prop == ZFS_PROP_ZONED ||
+ prop == ZFS_PROP_MOUNTPOINT || prop == ZFS_PROP_SHARENFS) {
+ compare = compare_mountpoints;
+ clp->cl_sorted = B_TRUE;
+ }
+
+ clp->cl_pool = uu_list_pool_create("changelist_pool",
+ sizeof (prop_changenode_t),
+ offsetof(prop_changenode_t, cn_listnode),
+ compare, 0);
+ if (clp->cl_pool == NULL) {
+ assert(uu_error() == UU_ERROR_NO_MEMORY);
+ (void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error");
+ changelist_free(clp);
+ return (NULL);
+ }
+
+ clp->cl_list = uu_list_create(clp->cl_pool, NULL,
+ clp->cl_sorted ? UU_LIST_SORTED : 0);
+ clp->cl_flags = flags;
+
+ if (clp->cl_list == NULL) {
+ assert(uu_error() == UU_ERROR_NO_MEMORY);
+ (void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error");
+ changelist_free(clp);
+ return (NULL);
+ }
+
+ /*
+ * If this is a rename or the 'zoned' property, we pretend we're
+ * changing the mountpoint and flag it so we can catch all children in
+ * change_one().
+ *
+ * Flag cl_alldependents to catch all children plus the dependents
+ * (clones) that are not in the hierarchy.
+ */
+ if (prop == ZFS_PROP_NAME) {
+ clp->cl_prop = ZFS_PROP_MOUNTPOINT;
+ clp->cl_alldependents = B_TRUE;
+ } else if (prop == ZFS_PROP_ZONED) {
+ clp->cl_prop = ZFS_PROP_MOUNTPOINT;
+ clp->cl_allchildren = B_TRUE;
+ } else if (prop == ZFS_PROP_CANMOUNT) {
+ clp->cl_prop = ZFS_PROP_MOUNTPOINT;
+ } else if (prop == ZFS_PROP_VOLSIZE) {
+ clp->cl_prop = ZFS_PROP_MOUNTPOINT;
+ } else {
+ clp->cl_prop = prop;
+ }
+ clp->cl_realprop = prop;
+
+ if (clp->cl_prop != ZFS_PROP_MOUNTPOINT &&
+ clp->cl_prop != ZFS_PROP_SHARENFS &&
+ clp->cl_prop != ZFS_PROP_SHAREISCSI)
+ return (clp);
+
+ if (clp->cl_alldependents) {
+ if (zfs_iter_dependents(zhp, B_TRUE, change_one, clp) != 0) {
+ changelist_free(clp);
+ return (NULL);
+ }
+ } else if (zfs_iter_children(zhp, change_one, clp) != 0) {
+ changelist_free(clp);
+ return (NULL);
+ }
+
+ /*
+ * We have to re-open ourselves because we auto-close all the handles
+ * and can't tell the difference.
+ */
+ if ((temp = zfs_open(zhp->zfs_hdl, zfs_get_name(zhp),
+ ZFS_TYPE_ANY)) == NULL) {
+ changelist_free(clp);
+ return (NULL);
+ }
+
+ /*
+ * Always add ourself to the list. We add ourselves to the end so that
+ * we're the last to be unmounted.
+ */
+ if ((cn = zfs_alloc(zhp->zfs_hdl,
+ sizeof (prop_changenode_t))) == NULL) {
+ zfs_close(temp);
+ changelist_free(clp);
+ return (NULL);
+ }
+
+ cn->cn_handle = temp;
+ cn->cn_mounted = zfs_is_mounted(temp, NULL);
+ cn->cn_shared = zfs_is_shared(temp);
+ cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
+
+ uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool);
+ if (clp->cl_sorted) {
+ uu_list_index_t idx;
+ (void) uu_list_find(clp->cl_list, cn, NULL, &idx);
+ uu_list_insert(clp->cl_list, cn, idx);
+ } else {
+ verify(uu_list_insert_after(clp->cl_list,
+ uu_list_last(clp->cl_list), cn) == 0);
+ }
+
+ /*
+ * If the property was previously 'legacy' or 'none', record this fact,
+ * as the behavior of changelist_postfix() will be different.
+ */
+ if (zfs_prop_get(zhp, prop, property, sizeof (property),
+ NULL, NULL, 0, B_FALSE) == 0 &&
+ (strcmp(property, "legacy") == 0 || strcmp(property, "none") == 0 ||
+ strcmp(property, "off") == 0))
+ clp->cl_waslegacy = B_TRUE;
+
+ return (clp);
+}
diff --git a/contrib/opensolaris/lib/libzfs/common/libzfs_config.c b/contrib/opensolaris/lib/libzfs/common/libzfs_config.c
new file mode 100644
index 0000000..45e2920
--- /dev/null
+++ b/contrib/opensolaris/lib/libzfs/common/libzfs_config.c
@@ -0,0 +1,374 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * The pool configuration repository is stored in /etc/zfs/zpool.cache as a
+ * single packed nvlist. While it would be nice to just read in this
+ * file from userland, this wouldn't work from a local zone. So we have to have
+ * a zpool ioctl to return the complete configuration for all pools. In the
+ * global zone, this will be identical to reading the file and unpacking it in
+ * userland.
+ */
+
+#include <errno.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <string.h>
+#include <unistd.h>
+#include <libintl.h>
+#include <libuutil.h>
+
+#include "libzfs_impl.h"
+
+typedef struct config_node {
+ char *cn_name;
+ nvlist_t *cn_config;
+ uu_avl_node_t cn_avl;
+} config_node_t;
+
+/* ARGSUSED */
+static int
+config_node_compare(const void *a, const void *b, void *unused)
+{
+ int ret;
+
+ const config_node_t *ca = (config_node_t *)a;
+ const config_node_t *cb = (config_node_t *)b;
+
+ ret = strcmp(ca->cn_name, cb->cn_name);
+
+ if (ret < 0)
+ return (-1);
+ else if (ret > 0)
+ return (1);
+ else
+ return (0);
+}
+
+void
+namespace_clear(libzfs_handle_t *hdl)
+{
+ if (hdl->libzfs_ns_avl) {
+ uu_avl_walk_t *walk;
+ config_node_t *cn;
+
+ if ((walk = uu_avl_walk_start(hdl->libzfs_ns_avl,
+ UU_WALK_ROBUST)) == NULL)
+ return;
+
+ while ((cn = uu_avl_walk_next(walk)) != NULL) {
+ uu_avl_remove(hdl->libzfs_ns_avl, cn);
+ nvlist_free(cn->cn_config);
+ free(cn->cn_name);
+ free(cn);
+ }
+
+ uu_avl_walk_end(walk);
+
+ uu_avl_destroy(hdl->libzfs_ns_avl);
+ hdl->libzfs_ns_avl = NULL;
+ }
+
+ if (hdl->libzfs_ns_avlpool) {
+ uu_avl_pool_destroy(hdl->libzfs_ns_avlpool);
+ hdl->libzfs_ns_avlpool = NULL;
+ }
+}
+
+/*
+ * Loads the pool namespace, or re-loads it if the cache has changed.
+ */
+static int
+namespace_reload(libzfs_handle_t *hdl)
+{
+ nvlist_t *config;
+ config_node_t *cn;
+ nvpair_t *elem;
+ zfs_cmd_t zc = { 0 };
+ uu_avl_walk_t *walk;
+
+ if (hdl->libzfs_ns_gen == 0) {
+ /*
+ * This is the first time we've accessed the configuration
+ * cache. Initialize the AVL tree and then fall through to the
+ * common code.
+ */
+ if ((hdl->libzfs_ns_avlpool = uu_avl_pool_create("config_pool",
+ sizeof (config_node_t),
+ offsetof(config_node_t, cn_avl),
+ config_node_compare, UU_DEFAULT)) == NULL)
+ return (no_memory(hdl));
+
+ if ((hdl->libzfs_ns_avl = uu_avl_create(hdl->libzfs_ns_avlpool,
+ NULL, UU_DEFAULT)) == NULL)
+ return (no_memory(hdl));
+ }
+
+ if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
+ return (-1);
+
+ for (;;) {
+ zc.zc_cookie = hdl->libzfs_ns_gen;
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CONFIGS, &zc) != 0) {
+ switch (errno) {
+ case EEXIST:
+ /*
+ * The namespace hasn't changed.
+ */
+ zcmd_free_nvlists(&zc);
+ return (0);
+
+ case ENOMEM:
+ if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+ break;
+
+ default:
+ zcmd_free_nvlists(&zc);
+ return (zfs_standard_error(hdl, errno,
+ dgettext(TEXT_DOMAIN, "failed to read "
+ "pool configuration")));
+ }
+ } else {
+ hdl->libzfs_ns_gen = zc.zc_cookie;
+ break;
+ }
+ }
+
+ if (zcmd_read_dst_nvlist(hdl, &zc, &config) != 0) {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+
+ zcmd_free_nvlists(&zc);
+
+ /*
+ * Clear out any existing configuration information.
+ */
+ if ((walk = uu_avl_walk_start(hdl->libzfs_ns_avl,
+ UU_WALK_ROBUST)) == NULL) {
+ nvlist_free(config);
+ return (no_memory(hdl));
+ }
+
+ while ((cn = uu_avl_walk_next(walk)) != NULL) {
+ uu_avl_remove(hdl->libzfs_ns_avl, cn);
+ nvlist_free(cn->cn_config);
+ free(cn->cn_name);
+ free(cn);
+ }
+
+ uu_avl_walk_end(walk);
+
+ elem = NULL;
+ while ((elem = nvlist_next_nvpair(config, elem)) != NULL) {
+ nvlist_t *child;
+ uu_avl_index_t where;
+
+ if ((cn = zfs_alloc(hdl, sizeof (config_node_t))) == NULL) {
+ nvlist_free(config);
+ return (-1);
+ }
+
+ if ((cn->cn_name = zfs_strdup(hdl,
+ nvpair_name(elem))) == NULL) {
+ free(cn);
+ nvlist_free(config);
+ return (-1);
+ }
+
+ verify(nvpair_value_nvlist(elem, &child) == 0);
+ if (nvlist_dup(child, &cn->cn_config, 0) != 0) {
+ free(cn->cn_name);
+ free(cn);
+ nvlist_free(config);
+ return (no_memory(hdl));
+ }
+ verify(uu_avl_find(hdl->libzfs_ns_avl, cn, NULL, &where)
+ == NULL);
+
+ uu_avl_insert(hdl->libzfs_ns_avl, cn, where);
+ }
+
+ nvlist_free(config);
+ return (0);
+}
+
+/*
+ * Retrive the configuration for the given pool. The configuration is a nvlist
+ * describing the vdevs, as well as the statistics associated with each one.
+ */
+nvlist_t *
+zpool_get_config(zpool_handle_t *zhp, nvlist_t **oldconfig)
+{
+ if (oldconfig)
+ *oldconfig = zhp->zpool_old_config;
+ return (zhp->zpool_config);
+}
+
+/*
+ * Refresh the vdev statistics associated with the given pool. This is used in
+ * iostat to show configuration changes and determine the delta from the last
+ * time the function was called. This function can fail, in case the pool has
+ * been destroyed.
+ */
+int
+zpool_refresh_stats(zpool_handle_t *zhp, boolean_t *missing)
+{
+ zfs_cmd_t zc = { 0 };
+ int error;
+ nvlist_t *config;
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ *missing = B_FALSE;
+ (void) strcpy(zc.zc_name, zhp->zpool_name);
+
+ if (zhp->zpool_config_size == 0)
+ zhp->zpool_config_size = 1 << 16;
+
+ if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size) != 0)
+ return (-1);
+
+ for (;;) {
+ if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_STATS,
+ &zc) == 0) {
+ /*
+ * The real error is returned in the zc_cookie field.
+ */
+ error = zc.zc_cookie;
+ break;
+ }
+
+ if (errno == ENOMEM) {
+ if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+ } else {
+ zcmd_free_nvlists(&zc);
+ if (errno == ENOENT || errno == EINVAL)
+ *missing = B_TRUE;
+ zhp->zpool_state = POOL_STATE_UNAVAIL;
+ return (0);
+ }
+ }
+
+ if (zcmd_read_dst_nvlist(hdl, &zc, &config) != 0) {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+
+ zcmd_free_nvlists(&zc);
+
+ zhp->zpool_config_size = zc.zc_nvlist_dst_size;
+
+ if (zhp->zpool_config != NULL) {
+ uint64_t oldtxg, newtxg;
+
+ verify(nvlist_lookup_uint64(zhp->zpool_config,
+ ZPOOL_CONFIG_POOL_TXG, &oldtxg) == 0);
+ verify(nvlist_lookup_uint64(config,
+ ZPOOL_CONFIG_POOL_TXG, &newtxg) == 0);
+
+ if (zhp->zpool_old_config != NULL)
+ nvlist_free(zhp->zpool_old_config);
+
+ if (oldtxg != newtxg) {
+ nvlist_free(zhp->zpool_config);
+ zhp->zpool_old_config = NULL;
+ } else {
+ zhp->zpool_old_config = zhp->zpool_config;
+ }
+ }
+
+ zhp->zpool_config = config;
+ if (error)
+ zhp->zpool_state = POOL_STATE_UNAVAIL;
+ else
+ zhp->zpool_state = POOL_STATE_ACTIVE;
+
+ return (0);
+}
+
+/*
+ * Iterate over all pools in the system.
+ */
+int
+zpool_iter(libzfs_handle_t *hdl, zpool_iter_f func, void *data)
+{
+ config_node_t *cn;
+ zpool_handle_t *zhp;
+ int ret;
+
+ if (namespace_reload(hdl) != 0)
+ return (-1);
+
+ for (cn = uu_avl_first(hdl->libzfs_ns_avl); cn != NULL;
+ cn = uu_avl_next(hdl->libzfs_ns_avl, cn)) {
+
+ if (zpool_open_silent(hdl, cn->cn_name, &zhp) != 0)
+ return (-1);
+
+ if (zhp == NULL)
+ continue;
+
+ if ((ret = func(zhp, data)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * Iterate over root datasets, calling the given function for each. The zfs
+ * handle passed each time must be explicitly closed by the callback.
+ */
+int
+zfs_iter_root(libzfs_handle_t *hdl, zfs_iter_f func, void *data)
+{
+ config_node_t *cn;
+ zfs_handle_t *zhp;
+ int ret;
+
+ if (namespace_reload(hdl) != 0)
+ return (-1);
+
+ for (cn = uu_avl_first(hdl->libzfs_ns_avl); cn != NULL;
+ cn = uu_avl_next(hdl->libzfs_ns_avl, cn)) {
+
+ if ((zhp = make_dataset_handle(hdl, cn->cn_name)) == NULL)
+ continue;
+
+ if ((ret = func(zhp, data)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
diff --git a/contrib/opensolaris/lib/libzfs/common/libzfs_dataset.c b/contrib/opensolaris/lib/libzfs/common/libzfs_dataset.c
new file mode 100644
index 0000000..5bc6c7b
--- /dev/null
+++ b/contrib/opensolaris/lib/libzfs/common/libzfs_dataset.c
@@ -0,0 +1,3753 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libintl.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <zone.h>
+#include <fcntl.h>
+#include <sys/mntent.h>
+#include <sys/mnttab.h>
+#include <sys/mount.h>
+
+#include <sys/spa.h>
+#include <sys/zio.h>
+#include <sys/zap.h>
+#include <libzfs.h>
+
+#include "zfs_namecheck.h"
+#include "zfs_prop.h"
+#include "libzfs_impl.h"
+
+/*
+ * Given a single type (not a mask of types), return the type in a human
+ * readable form.
+ */
+const char *
+zfs_type_to_name(zfs_type_t type)
+{
+ switch (type) {
+ case ZFS_TYPE_FILESYSTEM:
+ return (dgettext(TEXT_DOMAIN, "filesystem"));
+ case ZFS_TYPE_SNAPSHOT:
+ return (dgettext(TEXT_DOMAIN, "snapshot"));
+ case ZFS_TYPE_VOLUME:
+ return (dgettext(TEXT_DOMAIN, "volume"));
+ }
+
+ return (NULL);
+}
+
+/*
+ * Given a path and mask of ZFS types, return a string describing this dataset.
+ * This is used when we fail to open a dataset and we cannot get an exact type.
+ * We guess what the type would have been based on the path and the mask of
+ * acceptable types.
+ */
+static const char *
+path_to_str(const char *path, int types)
+{
+ /*
+ * When given a single type, always report the exact type.
+ */
+ if (types == ZFS_TYPE_SNAPSHOT)
+ return (dgettext(TEXT_DOMAIN, "snapshot"));
+ if (types == ZFS_TYPE_FILESYSTEM)
+ return (dgettext(TEXT_DOMAIN, "filesystem"));
+ if (types == ZFS_TYPE_VOLUME)
+ return (dgettext(TEXT_DOMAIN, "volume"));
+
+ /*
+ * The user is requesting more than one type of dataset. If this is the
+ * case, consult the path itself. If we're looking for a snapshot, and
+ * a '@' is found, then report it as "snapshot". Otherwise, remove the
+ * snapshot attribute and try again.
+ */
+ if (types & ZFS_TYPE_SNAPSHOT) {
+ if (strchr(path, '@') != NULL)
+ return (dgettext(TEXT_DOMAIN, "snapshot"));
+ return (path_to_str(path, types & ~ZFS_TYPE_SNAPSHOT));
+ }
+
+
+ /*
+ * The user has requested either filesystems or volumes.
+ * We have no way of knowing a priori what type this would be, so always
+ * report it as "filesystem" or "volume", our two primitive types.
+ */
+ if (types & ZFS_TYPE_FILESYSTEM)
+ return (dgettext(TEXT_DOMAIN, "filesystem"));
+
+ assert(types & ZFS_TYPE_VOLUME);
+ return (dgettext(TEXT_DOMAIN, "volume"));
+}
+
+/*
+ * Validate a ZFS path. This is used even before trying to open the dataset, to
+ * provide a more meaningful error message. We place a more useful message in
+ * 'buf' detailing exactly why the name was not valid.
+ */
+static int
+zfs_validate_name(libzfs_handle_t *hdl, const char *path, int type)
+{
+ namecheck_err_t why;
+ char what;
+
+ if (dataset_namecheck(path, &why, &what) != 0) {
+ if (hdl != NULL) {
+ switch (why) {
+ case NAME_ERR_TOOLONG:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "name is too long"));
+ break;
+
+ case NAME_ERR_LEADING_SLASH:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "leading slash in name"));
+ break;
+
+ case NAME_ERR_EMPTY_COMPONENT:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "empty component in name"));
+ break;
+
+ case NAME_ERR_TRAILING_SLASH:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "trailing slash in name"));
+ break;
+
+ case NAME_ERR_INVALCHAR:
+ zfs_error_aux(hdl,
+ dgettext(TEXT_DOMAIN, "invalid character "
+ "'%c' in name"), what);
+ break;
+
+ case NAME_ERR_MULTIPLE_AT:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "multiple '@' delimiters in name"));
+ break;
+
+ case NAME_ERR_NOLETTER:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "pool doesn't begin with a letter"));
+ break;
+
+ case NAME_ERR_RESERVED:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "name is reserved"));
+ break;
+
+ case NAME_ERR_DISKLIKE:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "reserved disk name"));
+ break;
+ }
+ }
+
+ return (0);
+ }
+
+ if (!(type & ZFS_TYPE_SNAPSHOT) && strchr(path, '@') != NULL) {
+ if (hdl != NULL)
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "snapshot delimiter '@' in filesystem name"));
+ return (0);
+ }
+
+ if (type == ZFS_TYPE_SNAPSHOT && strchr(path, '@') == NULL) {
+ if (hdl != NULL)
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "missing '@' delimiter in snapshot name"));
+ return (0);
+ }
+
+ return (-1);
+}
+
+int
+zfs_name_valid(const char *name, zfs_type_t type)
+{
+ return (zfs_validate_name(NULL, name, type));
+}
+
+/*
+ * This function takes the raw DSL properties, and filters out the user-defined
+ * properties into a separate nvlist.
+ */
+static int
+process_user_props(zfs_handle_t *zhp)
+{
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+ nvpair_t *elem;
+ nvlist_t *propval;
+
+ nvlist_free(zhp->zfs_user_props);
+
+ if (nvlist_alloc(&zhp->zfs_user_props, NV_UNIQUE_NAME, 0) != 0)
+ return (no_memory(hdl));
+
+ elem = NULL;
+ while ((elem = nvlist_next_nvpair(zhp->zfs_props, elem)) != NULL) {
+ if (!zfs_prop_user(nvpair_name(elem)))
+ continue;
+
+ verify(nvpair_value_nvlist(elem, &propval) == 0);
+ if (nvlist_add_nvlist(zhp->zfs_user_props,
+ nvpair_name(elem), propval) != 0)
+ return (no_memory(hdl));
+ }
+
+ return (0);
+}
+
+/*
+ * Utility function to gather stats (objset and zpl) for the given object.
+ */
+static int
+get_stats(zfs_handle_t *zhp)
+{
+ zfs_cmd_t zc = { 0 };
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+ if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
+ return (-1);
+
+ while (ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0) {
+ if (errno == ENOMEM) {
+ if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+ } else {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+ }
+
+ zhp->zfs_dmustats = zc.zc_objset_stats; /* structure assignment */
+
+ (void) strlcpy(zhp->zfs_root, zc.zc_value, sizeof (zhp->zfs_root));
+
+ if (zhp->zfs_props) {
+ nvlist_free(zhp->zfs_props);
+ zhp->zfs_props = NULL;
+ }
+
+ if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zfs_props) != 0) {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+
+ zcmd_free_nvlists(&zc);
+
+ if (process_user_props(zhp) != 0)
+ return (-1);
+
+ return (0);
+}
+
+/*
+ * Refresh the properties currently stored in the handle.
+ */
+void
+zfs_refresh_properties(zfs_handle_t *zhp)
+{
+ (void) get_stats(zhp);
+}
+
+/*
+ * Makes a handle from the given dataset name. Used by zfs_open() and
+ * zfs_iter_* to create child handles on the fly.
+ */
+zfs_handle_t *
+make_dataset_handle(libzfs_handle_t *hdl, const char *path)
+{
+ zfs_handle_t *zhp = calloc(sizeof (zfs_handle_t), 1);
+
+ if (zhp == NULL)
+ return (NULL);
+
+ zhp->zfs_hdl = hdl;
+
+top:
+ (void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name));
+
+ if (get_stats(zhp) != 0) {
+ free(zhp);
+ return (NULL);
+ }
+
+ if (zhp->zfs_dmustats.dds_inconsistent) {
+ zfs_cmd_t zc = { 0 };
+
+ /*
+ * If it is dds_inconsistent, then we've caught it in
+ * the middle of a 'zfs receive' or 'zfs destroy', and
+ * it is inconsistent from the ZPL's point of view, so
+ * can't be mounted. However, it could also be that we
+ * have crashed in the middle of one of those
+ * operations, in which case we need to get rid of the
+ * inconsistent state. We do that by either rolling
+ * back to the previous snapshot (which will fail if
+ * there is none), or destroying the filesystem. Note
+ * that if we are still in the middle of an active
+ * 'receive' or 'destroy', then the rollback and destroy
+ * will fail with EBUSY and we will drive on as usual.
+ */
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+ if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL) {
+ (void) zvol_remove_link(hdl, zhp->zfs_name);
+ zc.zc_objset_type = DMU_OST_ZVOL;
+ } else {
+ zc.zc_objset_type = DMU_OST_ZFS;
+ }
+
+ /* If we can successfully roll it back, reget the stats */
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_ROLLBACK, &zc) == 0)
+ goto top;
+ /*
+ * If we can sucessfully destroy it, pretend that it
+ * never existed.
+ */
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_DESTROY, &zc) == 0) {
+ free(zhp);
+ errno = ENOENT;
+ return (NULL);
+ }
+ }
+
+ /*
+ * We've managed to open the dataset and gather statistics. Determine
+ * the high-level type.
+ */
+ if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL)
+ zhp->zfs_head_type = ZFS_TYPE_VOLUME;
+ else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS)
+ zhp->zfs_head_type = ZFS_TYPE_FILESYSTEM;
+ else
+ abort();
+
+ if (zhp->zfs_dmustats.dds_is_snapshot)
+ zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
+ else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL)
+ zhp->zfs_type = ZFS_TYPE_VOLUME;
+ else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS)
+ zhp->zfs_type = ZFS_TYPE_FILESYSTEM;
+ else
+ abort(); /* we should never see any other types */
+
+ return (zhp);
+}
+
+/*
+ * Opens the given snapshot, filesystem, or volume. The 'types'
+ * argument is a mask of acceptable types. The function will print an
+ * appropriate error message and return NULL if it can't be opened.
+ */
+zfs_handle_t *
+zfs_open(libzfs_handle_t *hdl, const char *path, int types)
+{
+ zfs_handle_t *zhp;
+ char errbuf[1024];
+
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "cannot open '%s'"), path);
+
+ /*
+ * Validate the name before we even try to open it.
+ */
+ if (!zfs_validate_name(hdl, path, ZFS_TYPE_ANY)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid dataset name"));
+ (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
+ return (NULL);
+ }
+
+ /*
+ * Try to get stats for the dataset, which will tell us if it exists.
+ */
+ errno = 0;
+ if ((zhp = make_dataset_handle(hdl, path)) == NULL) {
+ (void) zfs_standard_error(hdl, errno, errbuf);
+ return (NULL);
+ }
+
+ if (!(types & zhp->zfs_type)) {
+ (void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
+ zfs_close(zhp);
+ return (NULL);
+ }
+
+ return (zhp);
+}
+
+/*
+ * Release a ZFS handle. Nothing to do but free the associated memory.
+ */
+void
+zfs_close(zfs_handle_t *zhp)
+{
+ if (zhp->zfs_mntopts)
+ free(zhp->zfs_mntopts);
+ nvlist_free(zhp->zfs_props);
+ nvlist_free(zhp->zfs_user_props);
+ free(zhp);
+}
+
+/*
+ * Given a numeric suffix, convert the value into a number of bits that the
+ * resulting value must be shifted.
+ */
+static int
+str2shift(libzfs_handle_t *hdl, const char *buf)
+{
+ const char *ends = "BKMGTPEZ";
+ int i;
+
+ if (buf[0] == '\0')
+ return (0);
+ for (i = 0; i < strlen(ends); i++) {
+ if (toupper(buf[0]) == ends[i])
+ break;
+ }
+ if (i == strlen(ends)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid numeric suffix '%s'"), buf);
+ return (-1);
+ }
+
+ /*
+ * We want to allow trailing 'b' characters for 'GB' or 'Mb'. But don't
+ * allow 'BB' - that's just weird.
+ */
+ if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0' &&
+ toupper(buf[0]) != 'B'))
+ return (10*i);
+
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid numeric suffix '%s'"), buf);
+ return (-1);
+}
+
+/*
+ * Convert a string of the form '100G' into a real number. Used when setting
+ * properties or creating a volume. 'buf' is used to place an extended error
+ * message for the caller to use.
+ */
+static int
+nicestrtonum(libzfs_handle_t *hdl, const char *value, uint64_t *num)
+{
+ char *end;
+ int shift;
+
+ *num = 0;
+
+ /* Check to see if this looks like a number. */
+ if ((value[0] < '0' || value[0] > '9') && value[0] != '.') {
+ if (hdl)
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "bad numeric value '%s'"), value);
+ return (-1);
+ }
+
+ /* Rely on stroll() to process the numeric portion. */
+ errno = 0;
+ *num = strtoll(value, &end, 10);
+
+ /*
+ * Check for ERANGE, which indicates that the value is too large to fit
+ * in a 64-bit value.
+ */
+ if (errno == ERANGE) {
+ if (hdl)
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "numeric value is too large"));
+ return (-1);
+ }
+
+ /*
+ * If we have a decimal value, then do the computation with floating
+ * point arithmetic. Otherwise, use standard arithmetic.
+ */
+ if (*end == '.') {
+ double fval = strtod(value, &end);
+
+ if ((shift = str2shift(hdl, end)) == -1)
+ return (-1);
+
+ fval *= pow(2, shift);
+
+ if (fval > UINT64_MAX) {
+ if (hdl)
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "numeric value is too large"));
+ return (-1);
+ }
+
+ *num = (uint64_t)fval;
+ } else {
+ if ((shift = str2shift(hdl, end)) == -1)
+ return (-1);
+
+ /* Check for overflow */
+ if (shift >= 64 || (*num << shift) >> shift != *num) {
+ if (hdl)
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "numeric value is too large"));
+ return (-1);
+ }
+
+ *num <<= shift;
+ }
+
+ return (0);
+}
+
+int
+zfs_nicestrtonum(libzfs_handle_t *hdl, const char *str, uint64_t *val)
+{
+ return (nicestrtonum(hdl, str, val));
+}
+
+/*
+ * The prop_parse_*() functions are designed to allow flexibility in callers
+ * when setting properties. At the DSL layer, all properties are either 64-bit
+ * numbers or strings. We want the user to be able to ignore this fact and
+ * specify properties as native values (boolean, for example) or as strings (to
+ * simplify command line utilities). This also handles converting index types
+ * (compression, checksum, etc) from strings to their on-disk index.
+ */
+
+static int
+prop_parse_boolean(libzfs_handle_t *hdl, nvpair_t *elem, uint64_t *val)
+{
+ uint64_t ret;
+
+ switch (nvpair_type(elem)) {
+ case DATA_TYPE_STRING:
+ {
+ char *value;
+ verify(nvpair_value_string(elem, &value) == 0);
+
+ if (strcmp(value, "on") == 0) {
+ ret = 1;
+ } else if (strcmp(value, "off") == 0) {
+ ret = 0;
+ } else {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "property '%s' must be 'on' or 'off'"),
+ nvpair_name(elem));
+ return (-1);
+ }
+ break;
+ }
+
+ case DATA_TYPE_UINT64:
+ {
+ verify(nvpair_value_uint64(elem, &ret) == 0);
+ if (ret > 1) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be a boolean value"),
+ nvpair_name(elem));
+ return (-1);
+ }
+ break;
+ }
+
+ case DATA_TYPE_BOOLEAN_VALUE:
+ {
+ boolean_t value;
+ verify(nvpair_value_boolean_value(elem, &value) == 0);
+ ret = value;
+ break;
+ }
+
+ default:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be a boolean value"),
+ nvpair_name(elem));
+ return (-1);
+ }
+
+ *val = ret;
+ return (0);
+}
+
+static int
+prop_parse_number(libzfs_handle_t *hdl, nvpair_t *elem, zfs_prop_t prop,
+ uint64_t *val)
+{
+ uint64_t ret;
+ boolean_t isnone = B_FALSE;
+
+ switch (nvpair_type(elem)) {
+ case DATA_TYPE_STRING:
+ {
+ char *value;
+ (void) nvpair_value_string(elem, &value);
+ if (strcmp(value, "none") == 0) {
+ isnone = B_TRUE;
+ ret = 0;
+ } else if (nicestrtonum(hdl, value, &ret) != 0) {
+ return (-1);
+ }
+ break;
+ }
+
+ case DATA_TYPE_UINT64:
+ (void) nvpair_value_uint64(elem, &ret);
+ break;
+
+ default:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be a number"),
+ nvpair_name(elem));
+ return (-1);
+ }
+
+ /*
+ * Quota special: force 'none' and don't allow 0.
+ */
+ if (ret == 0 && !isnone && prop == ZFS_PROP_QUOTA) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "use 'none' to disable quota"));
+ return (-1);
+ }
+
+ *val = ret;
+ return (0);
+}
+
+static int
+prop_parse_index(libzfs_handle_t *hdl, nvpair_t *elem, zfs_prop_t prop,
+ uint64_t *val)
+{
+ char *propname = nvpair_name(elem);
+ char *value;
+
+ if (nvpair_type(elem) != DATA_TYPE_STRING) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be a string"), propname);
+ return (-1);
+ }
+
+ (void) nvpair_value_string(elem, &value);
+
+ if (zfs_prop_string_to_index(prop, value, val) != 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be one of '%s'"), propname,
+ zfs_prop_values(prop));
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * Check if the bootfs name has the same pool name as it is set to.
+ * Assuming bootfs is a valid dataset name.
+ */
+static boolean_t
+bootfs_poolname_valid(char *pool, char *bootfs)
+{
+ char ch, *pname;
+
+ /* get the pool name from the bootfs name */
+ pname = bootfs;
+ while (*bootfs && !isspace(*bootfs) && *bootfs != '/')
+ bootfs++;
+
+ ch = *bootfs;
+ *bootfs = 0;
+
+ if (strcmp(pool, pname) == 0) {
+ *bootfs = ch;
+ return (B_TRUE);
+ }
+
+ *bootfs = ch;
+ return (B_FALSE);
+}
+
+/*
+ * Given an nvlist of properties to set, validates that they are correct, and
+ * parses any numeric properties (index, boolean, etc) if they are specified as
+ * strings.
+ */
+nvlist_t *
+zfs_validate_properties(libzfs_handle_t *hdl, zfs_type_t type, char *pool_name,
+ nvlist_t *nvl, uint64_t zoned, zfs_handle_t *zhp, const char *errbuf)
+{
+ nvpair_t *elem;
+ const char *propname;
+ zfs_prop_t prop;
+ uint64_t intval;
+ char *strval;
+ nvlist_t *ret;
+ int isuser;
+
+ if (nvlist_alloc(&ret, NV_UNIQUE_NAME, 0) != 0) {
+ (void) no_memory(hdl);
+ return (NULL);
+ }
+
+ if (type == ZFS_TYPE_SNAPSHOT) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "snapshot properties cannot be modified"));
+ (void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
+ goto error;
+ }
+
+ elem = NULL;
+ while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) {
+ propname = nvpair_name(elem);
+
+ /*
+ * Make sure this property is valid and applies to this type.
+ */
+ if ((prop = zfs_name_to_prop_common(propname, type))
+ == ZFS_PROP_INVAL) {
+ isuser = zfs_prop_user(propname);
+ if (!isuser || (isuser && (type & ZFS_TYPE_POOL))) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid property '%s'"),
+ propname);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ } else {
+ /*
+ * If this is a user property, make sure it's a
+ * string, and that it's less than
+ * ZAP_MAXNAMELEN.
+ */
+ if (nvpair_type(elem) != DATA_TYPE_STRING) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be a string"),
+ propname);
+ (void) zfs_error(hdl, EZFS_BADPROP,
+ errbuf);
+ goto error;
+ }
+
+ if (strlen(nvpair_name(elem)) >=
+ ZAP_MAXNAMELEN) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "property name '%s' is too long"),
+ propname);
+ (void) zfs_error(hdl, EZFS_BADPROP,
+ errbuf);
+ goto error;
+ }
+ }
+
+ (void) nvpair_value_string(elem, &strval);
+ if (nvlist_add_string(ret, propname, strval) != 0) {
+ (void) no_memory(hdl);
+ goto error;
+ }
+ continue;
+ }
+
+ /*
+ * Normalize the name, to get rid of shorthand abbrevations.
+ */
+ propname = zfs_prop_to_name(prop);
+
+ if (!zfs_prop_valid_for_type(prop, type)) {
+ zfs_error_aux(hdl,
+ dgettext(TEXT_DOMAIN, "'%s' does not "
+ "apply to datasets of this type"), propname);
+ (void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
+ goto error;
+ }
+
+ if (zfs_prop_readonly(prop) &&
+ (prop != ZFS_PROP_VOLBLOCKSIZE || zhp != NULL)) {
+ zfs_error_aux(hdl,
+ dgettext(TEXT_DOMAIN, "'%s' is readonly"),
+ propname);
+ (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
+ goto error;
+ }
+
+ /*
+ * Convert any properties to the internal DSL value types.
+ */
+ strval = NULL;
+ switch (zfs_prop_get_type(prop)) {
+ case prop_type_boolean:
+ if (prop_parse_boolean(hdl, elem, &intval) != 0) {
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+ break;
+
+ case prop_type_string:
+ if (nvpair_type(elem) != DATA_TYPE_STRING) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be a string"),
+ propname);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+ (void) nvpair_value_string(elem, &strval);
+ if (strlen(strval) >= ZFS_MAXPROPLEN) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' is too long"), propname);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+ break;
+
+ case prop_type_number:
+ if (prop_parse_number(hdl, elem, prop, &intval) != 0) {
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+ break;
+
+ case prop_type_index:
+ if (prop_parse_index(hdl, elem, prop, &intval) != 0) {
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+ break;
+
+ default:
+ abort();
+ }
+
+ /*
+ * Add the result to our return set of properties.
+ */
+ if (strval) {
+ if (nvlist_add_string(ret, propname, strval) != 0) {
+ (void) no_memory(hdl);
+ goto error;
+ }
+ } else if (nvlist_add_uint64(ret, propname, intval) != 0) {
+ (void) no_memory(hdl);
+ goto error;
+ }
+
+ /*
+ * Perform some additional checks for specific properties.
+ */
+ switch (prop) {
+ case ZFS_PROP_RECORDSIZE:
+ case ZFS_PROP_VOLBLOCKSIZE:
+ /* must be power of two within SPA_{MIN,MAX}BLOCKSIZE */
+ if (intval < SPA_MINBLOCKSIZE ||
+ intval > SPA_MAXBLOCKSIZE || !ISP2(intval)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be power of 2 from %u "
+ "to %uk"), propname,
+ (uint_t)SPA_MINBLOCKSIZE,
+ (uint_t)SPA_MAXBLOCKSIZE >> 10);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+ break;
+
+ case ZFS_PROP_SHAREISCSI:
+ if (strcmp(strval, "off") != 0 &&
+ strcmp(strval, "on") != 0 &&
+ strcmp(strval, "type=disk") != 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be 'on', 'off', or 'type=disk'"),
+ propname);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+
+ break;
+
+ case ZFS_PROP_MOUNTPOINT:
+ if (strcmp(strval, ZFS_MOUNTPOINT_NONE) == 0 ||
+ strcmp(strval, ZFS_MOUNTPOINT_LEGACY) == 0)
+ break;
+
+ if (strval[0] != '/') {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be an absolute path, "
+ "'none', or 'legacy'"), propname);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+ /*FALLTHRU*/
+
+ case ZFS_PROP_SHARENFS:
+ /*
+ * For the mountpoint and sharenfs properties, check if
+ * it can be set in a global/non-global zone based on
+ * the zoned property value:
+ *
+ * global zone non-global zone
+ * --------------------------------------------------
+ * zoned=on mountpoint (no) mountpoint (yes)
+ * sharenfs (no) sharenfs (no)
+ *
+ * zoned=off mountpoint (yes) N/A
+ * sharenfs (yes)
+ */
+ if (zoned) {
+ if (getzoneid() == GLOBAL_ZONEID) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' cannot be set on "
+ "dataset in a non-global zone"),
+ propname);
+ (void) zfs_error(hdl, EZFS_ZONED,
+ errbuf);
+ goto error;
+ } else if (prop == ZFS_PROP_SHARENFS) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' cannot be set in "
+ "a non-global zone"), propname);
+ (void) zfs_error(hdl, EZFS_ZONED,
+ errbuf);
+ goto error;
+ }
+ } else if (getzoneid() != GLOBAL_ZONEID) {
+ /*
+ * If zoned property is 'off', this must be in
+ * a globle zone. If not, something is wrong.
+ */
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' cannot be set while dataset "
+ "'zoned' property is set"), propname);
+ (void) zfs_error(hdl, EZFS_ZONED, errbuf);
+ goto error;
+ }
+
+ break;
+
+ case ZFS_PROP_BOOTFS:
+ /*
+ * bootfs property value has to be a dataset name and
+ * the dataset has to be in the same pool as it sets to.
+ */
+ if (strval[0] != '\0' && (!zfs_name_valid(strval,
+ ZFS_TYPE_FILESYSTEM) || !bootfs_poolname_valid(
+ pool_name, strval))) {
+
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
+ "is an invalid name"), strval);
+ (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
+ goto error;
+ }
+ break;
+ }
+
+ /*
+ * For changes to existing volumes, we have some additional
+ * checks to enforce.
+ */
+ if (type == ZFS_TYPE_VOLUME && zhp != NULL) {
+ uint64_t volsize = zfs_prop_get_int(zhp,
+ ZFS_PROP_VOLSIZE);
+ uint64_t blocksize = zfs_prop_get_int(zhp,
+ ZFS_PROP_VOLBLOCKSIZE);
+ char buf[64];
+
+ switch (prop) {
+ case ZFS_PROP_RESERVATION:
+ if (intval > volsize) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' is greater than current "
+ "volume size"), propname);
+ (void) zfs_error(hdl, EZFS_BADPROP,
+ errbuf);
+ goto error;
+ }
+ break;
+
+ case ZFS_PROP_VOLSIZE:
+ if (intval % blocksize != 0) {
+ zfs_nicenum(blocksize, buf,
+ sizeof (buf));
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be a multiple of "
+ "volume block size (%s)"),
+ propname, buf);
+ (void) zfs_error(hdl, EZFS_BADPROP,
+ errbuf);
+ goto error;
+ }
+
+ if (intval == 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' cannot be zero"),
+ propname);
+ (void) zfs_error(hdl, EZFS_BADPROP,
+ errbuf);
+ goto error;
+ }
+ break;
+ }
+ }
+ }
+
+ /*
+ * If this is an existing volume, and someone is setting the volsize,
+ * make sure that it matches the reservation, or add it if necessary.
+ */
+ if (zhp != NULL && type == ZFS_TYPE_VOLUME &&
+ nvlist_lookup_uint64(ret, zfs_prop_to_name(ZFS_PROP_VOLSIZE),
+ &intval) == 0) {
+ uint64_t old_volsize = zfs_prop_get_int(zhp,
+ ZFS_PROP_VOLSIZE);
+ uint64_t old_reservation = zfs_prop_get_int(zhp,
+ ZFS_PROP_RESERVATION);
+ uint64_t new_reservation;
+
+ if (old_volsize == old_reservation &&
+ nvlist_lookup_uint64(ret,
+ zfs_prop_to_name(ZFS_PROP_RESERVATION),
+ &new_reservation) != 0) {
+ if (nvlist_add_uint64(ret,
+ zfs_prop_to_name(ZFS_PROP_RESERVATION),
+ intval) != 0) {
+ (void) no_memory(hdl);
+ goto error;
+ }
+ }
+ }
+
+ return (ret);
+
+error:
+ nvlist_free(ret);
+ return (NULL);
+}
+
+/*
+ * Given a property name and value, set the property for the given dataset.
+ */
+int
+zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval)
+{
+ zfs_cmd_t zc = { 0 };
+ int ret = -1;
+ prop_changelist_t *cl = NULL;
+ char errbuf[1024];
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+ nvlist_t *nvl = NULL, *realprops;
+ zfs_prop_t prop;
+
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
+ zhp->zfs_name);
+
+ if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
+ nvlist_add_string(nvl, propname, propval) != 0) {
+ (void) no_memory(hdl);
+ goto error;
+ }
+
+ if ((realprops = zfs_validate_properties(hdl, zhp->zfs_type, NULL, nvl,
+ zfs_prop_get_int(zhp, ZFS_PROP_ZONED), zhp, errbuf)) == NULL)
+ goto error;
+ nvlist_free(nvl);
+ nvl = realprops;
+
+ prop = zfs_name_to_prop(propname);
+
+ /* We don't support those properties on FreeBSD. */
+ switch (prop) {
+ case ZFS_PROP_SHAREISCSI:
+ case ZFS_PROP_DEVICES:
+ case ZFS_PROP_ACLMODE:
+ case ZFS_PROP_ACLINHERIT:
+ case ZFS_PROP_ISCSIOPTIONS:
+ (void) snprintf(errbuf, sizeof (errbuf),
+ "property '%s' not supported on FreeBSD", propname);
+ ret = zfs_error(hdl, EZFS_PERM, errbuf);
+ goto error;
+ }
+
+ if ((cl = changelist_gather(zhp, prop, 0)) == NULL)
+ goto error;
+
+ if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "child dataset with inherited mountpoint is used "
+ "in a non-global zone"));
+ ret = zfs_error(hdl, EZFS_ZONED, errbuf);
+ goto error;
+ }
+
+ if ((ret = changelist_prefix(cl)) != 0)
+ goto error;
+
+ /*
+ * Execute the corresponding ioctl() to set this property.
+ */
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+ if (zcmd_write_src_nvlist(hdl, &zc, nvl, NULL) != 0)
+ goto error;
+
+ ret = ioctl(hdl->libzfs_fd, ZFS_IOC_SET_PROP, &zc);
+
+ if (ret != 0) {
+ switch (errno) {
+
+ case ENOSPC:
+ /*
+ * For quotas and reservations, ENOSPC indicates
+ * something different; setting a quota or reservation
+ * doesn't use any disk space.
+ */
+ switch (prop) {
+ case ZFS_PROP_QUOTA:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "size is less than current used or "
+ "reserved space"));
+ (void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
+ break;
+
+ case ZFS_PROP_RESERVATION:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "size is greater than available space"));
+ (void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
+ break;
+
+ default:
+ (void) zfs_standard_error(hdl, errno, errbuf);
+ break;
+ }
+ break;
+
+ case EBUSY:
+ if (prop == ZFS_PROP_VOLBLOCKSIZE)
+ (void) zfs_error(hdl, EZFS_VOLHASDATA, errbuf);
+ else
+ (void) zfs_standard_error(hdl, EBUSY, errbuf);
+ break;
+
+ case EROFS:
+ (void) zfs_error(hdl, EZFS_DSREADONLY, errbuf);
+ break;
+
+ case ENOTSUP:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "pool must be upgraded to allow gzip compression"));
+ (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
+ break;
+
+ case EOVERFLOW:
+ /*
+ * This platform can't address a volume this big.
+ */
+#ifdef _ILP32
+ if (prop == ZFS_PROP_VOLSIZE) {
+ (void) zfs_error(hdl, EZFS_VOLTOOBIG, errbuf);
+ break;
+ }
+#endif
+ /* FALLTHROUGH */
+ default:
+ (void) zfs_standard_error(hdl, errno, errbuf);
+ }
+ } else {
+ /*
+ * Refresh the statistics so the new property value
+ * is reflected.
+ */
+ if ((ret = changelist_postfix(cl)) == 0)
+ (void) get_stats(zhp);
+ }
+
+error:
+ nvlist_free(nvl);
+ zcmd_free_nvlists(&zc);
+ if (cl)
+ changelist_free(cl);
+ return (ret);
+}
+
+/*
+ * Given a property, inherit the value from the parent dataset.
+ */
+int
+zfs_prop_inherit(zfs_handle_t *zhp, const char *propname)
+{
+ zfs_cmd_t zc = { 0 };
+ int ret;
+ prop_changelist_t *cl;
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+ char errbuf[1024];
+ zfs_prop_t prop;
+
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot inherit %s for '%s'"), propname, zhp->zfs_name);
+
+ if ((prop = zfs_name_to_prop(propname)) == ZFS_PROP_INVAL) {
+ /*
+ * For user properties, the amount of work we have to do is very
+ * small, so just do it here.
+ */
+ if (!zfs_prop_user(propname)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid property"));
+ return (zfs_error(hdl, EZFS_BADPROP, errbuf));
+ }
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+ (void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value));
+
+ if (ioctl(zhp->zfs_hdl->libzfs_fd,
+ ZFS_IOC_SET_PROP, &zc) != 0)
+ return (zfs_standard_error(hdl, errno, errbuf));
+
+ return (0);
+ }
+
+ /*
+ * Verify that this property is inheritable.
+ */
+ if (zfs_prop_readonly(prop))
+ return (zfs_error(hdl, EZFS_PROPREADONLY, errbuf));
+
+ if (!zfs_prop_inheritable(prop))
+ return (zfs_error(hdl, EZFS_PROPNONINHERIT, errbuf));
+
+ /*
+ * Check to see if the value applies to this type
+ */
+ if (!zfs_prop_valid_for_type(prop, zhp->zfs_type))
+ return (zfs_error(hdl, EZFS_PROPTYPE, errbuf));
+
+ /*
+ * Normalize the name, to get rid of shorthand abbrevations.
+ */
+ propname = zfs_prop_to_name(prop);
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+ (void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value));
+
+ if (prop == ZFS_PROP_MOUNTPOINT && getzoneid() == GLOBAL_ZONEID &&
+ zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "dataset is used in a non-global zone"));
+ return (zfs_error(hdl, EZFS_ZONED, errbuf));
+ }
+
+ /*
+ * Determine datasets which will be affected by this change, if any.
+ */
+ if ((cl = changelist_gather(zhp, prop, 0)) == NULL)
+ return (-1);
+
+ if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "child dataset with inherited mountpoint is used "
+ "in a non-global zone"));
+ ret = zfs_error(hdl, EZFS_ZONED, errbuf);
+ goto error;
+ }
+
+ if ((ret = changelist_prefix(cl)) != 0)
+ goto error;
+
+ if ((ret = ioctl(zhp->zfs_hdl->libzfs_fd,
+ ZFS_IOC_SET_PROP, &zc)) != 0) {
+ return (zfs_standard_error(hdl, errno, errbuf));
+ } else {
+
+ if ((ret = changelist_postfix(cl)) != 0)
+ goto error;
+
+ /*
+ * Refresh the statistics so the new property is reflected.
+ */
+ (void) get_stats(zhp);
+ }
+
+error:
+ changelist_free(cl);
+ return (ret);
+}
+
+void
+nicebool(int value, char *buf, size_t buflen)
+{
+ if (value)
+ (void) strlcpy(buf, "on", buflen);
+ else
+ (void) strlcpy(buf, "off", buflen);
+}
+
+/*
+ * True DSL properties are stored in an nvlist. The following two functions
+ * extract them appropriately.
+ */
+static uint64_t
+getprop_uint64(zfs_handle_t *zhp, zfs_prop_t prop, char **source)
+{
+ nvlist_t *nv;
+ uint64_t value;
+
+ *source = NULL;
+ if (nvlist_lookup_nvlist(zhp->zfs_props,
+ zfs_prop_to_name(prop), &nv) == 0) {
+ verify(nvlist_lookup_uint64(nv, ZFS_PROP_VALUE, &value) == 0);
+ (void) nvlist_lookup_string(nv, ZFS_PROP_SOURCE, source);
+ } else {
+ value = zfs_prop_default_numeric(prop);
+ *source = "";
+ }
+
+ return (value);
+}
+
+static char *
+getprop_string(zfs_handle_t *zhp, zfs_prop_t prop, char **source)
+{
+ nvlist_t *nv;
+ char *value;
+
+ *source = NULL;
+ if (nvlist_lookup_nvlist(zhp->zfs_props,
+ zfs_prop_to_name(prop), &nv) == 0) {
+ verify(nvlist_lookup_string(nv, ZFS_PROP_VALUE, &value) == 0);
+ (void) nvlist_lookup_string(nv, ZFS_PROP_SOURCE, source);
+ } else {
+ if ((value = (char *)zfs_prop_default_string(prop)) == NULL)
+ value = "";
+ *source = "";
+ }
+
+ return (value);
+}
+
+/*
+ * Internal function for getting a numeric property. Both zfs_prop_get() and
+ * zfs_prop_get_int() are built using this interface.
+ *
+ * Certain properties can be overridden using 'mount -o'. In this case, scan
+ * the contents of the /etc/mnttab entry, searching for the appropriate options.
+ * If they differ from the on-disk values, report the current values and mark
+ * the source "temporary".
+ */
+static int
+get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zfs_source_t *src,
+ char **source, uint64_t *val)
+{
+ struct mnttab mnt;
+ char *mntopt_on = NULL;
+ char *mntopt_off = NULL;
+
+ *source = NULL;
+
+ switch (prop) {
+ case ZFS_PROP_ATIME:
+ mntopt_on = MNTOPT_ATIME;
+ mntopt_off = MNTOPT_NOATIME;
+ break;
+
+ case ZFS_PROP_DEVICES:
+ mntopt_on = MNTOPT_DEVICES;
+ mntopt_off = MNTOPT_NODEVICES;
+ break;
+
+ case ZFS_PROP_EXEC:
+ mntopt_on = MNTOPT_EXEC;
+ mntopt_off = MNTOPT_NOEXEC;
+ break;
+
+ case ZFS_PROP_READONLY:
+ mntopt_on = MNTOPT_RO;
+ mntopt_off = MNTOPT_RW;
+ break;
+
+ case ZFS_PROP_SETUID:
+ mntopt_on = MNTOPT_SETUID;
+ mntopt_off = MNTOPT_NOSETUID;
+ break;
+
+ case ZFS_PROP_XATTR:
+ mntopt_on = MNTOPT_XATTR;
+ mntopt_off = MNTOPT_NOXATTR;
+ break;
+ }
+
+ /*
+ * Because looking up the mount options is potentially expensive
+ * (iterating over all of /etc/mnttab), we defer its calculation until
+ * we're looking up a property which requires its presence.
+ */
+ if (!zhp->zfs_mntcheck &&
+ (mntopt_on != NULL || prop == ZFS_PROP_MOUNTED)) {
+ struct mnttab entry, search = { 0 };
+ FILE *mnttab = zhp->zfs_hdl->libzfs_mnttab;
+
+ search.mnt_special = (char *)zhp->zfs_name;
+ search.mnt_fstype = MNTTYPE_ZFS;
+ rewind(mnttab);
+
+ if (getmntany(mnttab, &entry, &search) == 0) {
+ zhp->zfs_mntopts = zfs_strdup(zhp->zfs_hdl,
+ entry.mnt_mntopts);
+ if (zhp->zfs_mntopts == NULL)
+ return (-1);
+ }
+
+ zhp->zfs_mntcheck = B_TRUE;
+ }
+
+ if (zhp->zfs_mntopts == NULL)
+ mnt.mnt_mntopts = "";
+ else
+ mnt.mnt_mntopts = zhp->zfs_mntopts;
+
+ switch (prop) {
+ case ZFS_PROP_ATIME:
+ case ZFS_PROP_DEVICES:
+ case ZFS_PROP_EXEC:
+ case ZFS_PROP_READONLY:
+ case ZFS_PROP_SETUID:
+ case ZFS_PROP_XATTR:
+ *val = getprop_uint64(zhp, prop, source);
+
+ if (hasmntopt(&mnt, mntopt_on) && !*val) {
+ *val = B_TRUE;
+ if (src)
+ *src = ZFS_SRC_TEMPORARY;
+ } else if (hasmntopt(&mnt, mntopt_off) && *val) {
+ *val = B_FALSE;
+ if (src)
+ *src = ZFS_SRC_TEMPORARY;
+ }
+ break;
+
+ case ZFS_PROP_RECORDSIZE:
+ case ZFS_PROP_COMPRESSION:
+ case ZFS_PROP_ZONED:
+ case ZFS_PROP_CREATION:
+ case ZFS_PROP_COMPRESSRATIO:
+ case ZFS_PROP_REFERENCED:
+ case ZFS_PROP_USED:
+ case ZFS_PROP_CREATETXG:
+ case ZFS_PROP_AVAILABLE:
+ case ZFS_PROP_VOLSIZE:
+ case ZFS_PROP_VOLBLOCKSIZE:
+ *val = getprop_uint64(zhp, prop, source);
+ break;
+
+ case ZFS_PROP_CANMOUNT:
+ *val = getprop_uint64(zhp, prop, source);
+ if (*val == 0)
+ *source = zhp->zfs_name;
+ else
+ *source = ""; /* default */
+ break;
+
+ case ZFS_PROP_QUOTA:
+ case ZFS_PROP_RESERVATION:
+ *val = getprop_uint64(zhp, prop, source);
+ if (*val == 0)
+ *source = ""; /* default */
+ else
+ *source = zhp->zfs_name;
+ break;
+
+ case ZFS_PROP_MOUNTED:
+ *val = (zhp->zfs_mntopts != NULL);
+ break;
+
+ case ZFS_PROP_NUMCLONES:
+ *val = zhp->zfs_dmustats.dds_num_clones;
+ break;
+
+ default:
+ zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
+ "cannot get non-numeric property"));
+ return (zfs_error(zhp->zfs_hdl, EZFS_BADPROP,
+ dgettext(TEXT_DOMAIN, "internal error")));
+ }
+
+ return (0);
+}
+
+/*
+ * Calculate the source type, given the raw source string.
+ */
+static void
+get_source(zfs_handle_t *zhp, zfs_source_t *srctype, char *source,
+ char *statbuf, size_t statlen)
+{
+ if (statbuf == NULL || *srctype == ZFS_SRC_TEMPORARY)
+ return;
+
+ if (source == NULL) {
+ *srctype = ZFS_SRC_NONE;
+ } else if (source[0] == '\0') {
+ *srctype = ZFS_SRC_DEFAULT;
+ } else {
+ if (strcmp(source, zhp->zfs_name) == 0) {
+ *srctype = ZFS_SRC_LOCAL;
+ } else {
+ (void) strlcpy(statbuf, source, statlen);
+ *srctype = ZFS_SRC_INHERITED;
+ }
+ }
+
+}
+
+/*
+ * Retrieve a property from the given object. If 'literal' is specified, then
+ * numbers are left as exact values. Otherwise, numbers are converted to a
+ * human-readable form.
+ *
+ * Returns 0 on success, or -1 on error.
+ */
+int
+zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen,
+ zfs_source_t *src, char *statbuf, size_t statlen, boolean_t literal)
+{
+ char *source = NULL;
+ uint64_t val;
+ char *str;
+ const char *root;
+ const char *strval;
+
+ /*
+ * Check to see if this property applies to our object
+ */
+ if (!zfs_prop_valid_for_type(prop, zhp->zfs_type))
+ return (-1);
+
+ if (src)
+ *src = ZFS_SRC_NONE;
+
+ switch (prop) {
+ case ZFS_PROP_ATIME:
+ case ZFS_PROP_READONLY:
+ case ZFS_PROP_SETUID:
+ case ZFS_PROP_ZONED:
+ case ZFS_PROP_DEVICES:
+ case ZFS_PROP_EXEC:
+ case ZFS_PROP_CANMOUNT:
+ case ZFS_PROP_XATTR:
+ /*
+ * Basic boolean values are built on top of
+ * get_numeric_property().
+ */
+ if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
+ return (-1);
+ nicebool(val, propbuf, proplen);
+
+ break;
+
+ case ZFS_PROP_AVAILABLE:
+ case ZFS_PROP_RECORDSIZE:
+ case ZFS_PROP_CREATETXG:
+ case ZFS_PROP_REFERENCED:
+ case ZFS_PROP_USED:
+ case ZFS_PROP_VOLSIZE:
+ case ZFS_PROP_VOLBLOCKSIZE:
+ case ZFS_PROP_NUMCLONES:
+ /*
+ * Basic numeric values are built on top of
+ * get_numeric_property().
+ */
+ if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
+ return (-1);
+ if (literal)
+ (void) snprintf(propbuf, proplen, "%llu",
+ (u_longlong_t)val);
+ else
+ zfs_nicenum(val, propbuf, proplen);
+ break;
+
+ case ZFS_PROP_COMPRESSION:
+ case ZFS_PROP_CHECKSUM:
+ case ZFS_PROP_SNAPDIR:
+#ifdef ZFS_NO_ACL
+ case ZFS_PROP_ACLMODE:
+ case ZFS_PROP_ACLINHERIT:
+ case ZFS_PROP_COPIES:
+ val = getprop_uint64(zhp, prop, &source);
+ verify(zfs_prop_index_to_string(prop, val, &strval) == 0);
+ (void) strlcpy(propbuf, strval, proplen);
+ break;
+#else /* ZFS_NO_ACL */
+ case ZFS_PROP_ACLMODE:
+ case ZFS_PROP_ACLINHERIT:
+ (void) strlcpy(propbuf, "<unsupported>", proplen);
+ break;
+#endif /* ZFS_NO_ACL */
+
+ case ZFS_PROP_CREATION:
+ /*
+ * 'creation' is a time_t stored in the statistics. We convert
+ * this into a string unless 'literal' is specified.
+ */
+ {
+ val = getprop_uint64(zhp, prop, &source);
+ time_t time = (time_t)val;
+ struct tm t;
+
+ if (literal ||
+ localtime_r(&time, &t) == NULL ||
+ strftime(propbuf, proplen, "%a %b %e %k:%M %Y",
+ &t) == 0)
+ (void) snprintf(propbuf, proplen, "%llu", val);
+ }
+ break;
+
+ case ZFS_PROP_MOUNTPOINT:
+ /*
+ * Getting the precise mountpoint can be tricky.
+ *
+ * - for 'none' or 'legacy', return those values.
+ * - for default mountpoints, construct it as /zfs/<dataset>
+ * - for inherited mountpoints, we want to take everything
+ * after our ancestor and append it to the inherited value.
+ *
+ * If the pool has an alternate root, we want to prepend that
+ * root to any values we return.
+ */
+ root = zhp->zfs_root;
+ str = getprop_string(zhp, prop, &source);
+
+ if (str[0] == '\0') {
+ (void) snprintf(propbuf, proplen, "%s/zfs/%s",
+ root, zhp->zfs_name);
+ } else if (str[0] == '/') {
+ const char *relpath = zhp->zfs_name + strlen(source);
+
+ if (relpath[0] == '/')
+ relpath++;
+ if (str[1] == '\0')
+ str++;
+
+ if (relpath[0] == '\0')
+ (void) snprintf(propbuf, proplen, "%s%s",
+ root, str);
+ else
+ (void) snprintf(propbuf, proplen, "%s%s%s%s",
+ root, str, relpath[0] == '@' ? "" : "/",
+ relpath);
+ } else {
+ /* 'legacy' or 'none' */
+ (void) strlcpy(propbuf, str, proplen);
+ }
+
+ break;
+
+ case ZFS_PROP_SHARENFS:
+ case ZFS_PROP_SHAREISCSI:
+ case ZFS_PROP_ISCSIOPTIONS:
+ (void) strlcpy(propbuf, getprop_string(zhp, prop, &source),
+ proplen);
+ break;
+
+ case ZFS_PROP_ORIGIN:
+ (void) strlcpy(propbuf, getprop_string(zhp, prop, &source),
+ proplen);
+ /*
+ * If there is no parent at all, return failure to indicate that
+ * it doesn't apply to this dataset.
+ */
+ if (propbuf[0] == '\0')
+ return (-1);
+ break;
+
+ case ZFS_PROP_QUOTA:
+ case ZFS_PROP_RESERVATION:
+ if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
+ return (-1);
+
+ /*
+ * If quota or reservation is 0, we translate this into 'none'
+ * (unless literal is set), and indicate that it's the default
+ * value. Otherwise, we print the number nicely and indicate
+ * that its set locally.
+ */
+ if (val == 0) {
+ if (literal)
+ (void) strlcpy(propbuf, "0", proplen);
+ else
+ (void) strlcpy(propbuf, "none", proplen);
+ } else {
+ if (literal)
+ (void) snprintf(propbuf, proplen, "%llu",
+ (u_longlong_t)val);
+ else
+ zfs_nicenum(val, propbuf, proplen);
+ }
+ break;
+
+ case ZFS_PROP_COMPRESSRATIO:
+ if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
+ return (-1);
+ (void) snprintf(propbuf, proplen, "%lld.%02lldx", (longlong_t)
+ val / 100, (longlong_t)val % 100);
+ break;
+
+ case ZFS_PROP_TYPE:
+ switch (zhp->zfs_type) {
+ case ZFS_TYPE_FILESYSTEM:
+ str = "filesystem";
+ break;
+ case ZFS_TYPE_VOLUME:
+ str = "volume";
+ break;
+ case ZFS_TYPE_SNAPSHOT:
+ str = "snapshot";
+ break;
+ default:
+ abort();
+ }
+ (void) snprintf(propbuf, proplen, "%s", str);
+ break;
+
+ case ZFS_PROP_MOUNTED:
+ /*
+ * The 'mounted' property is a pseudo-property that described
+ * whether the filesystem is currently mounted. Even though
+ * it's a boolean value, the typical values of "on" and "off"
+ * don't make sense, so we translate to "yes" and "no".
+ */
+ if (get_numeric_property(zhp, ZFS_PROP_MOUNTED,
+ src, &source, &val) != 0)
+ return (-1);
+ if (val)
+ (void) strlcpy(propbuf, "yes", proplen);
+ else
+ (void) strlcpy(propbuf, "no", proplen);
+ break;
+
+ case ZFS_PROP_NAME:
+ /*
+ * The 'name' property is a pseudo-property derived from the
+ * dataset name. It is presented as a real property to simplify
+ * consumers.
+ */
+ (void) strlcpy(propbuf, zhp->zfs_name, proplen);
+ break;
+
+ default:
+ abort();
+ }
+
+ get_source(zhp, src, source, statbuf, statlen);
+
+ return (0);
+}
+
+/*
+ * Utility function to get the given numeric property. Does no validation that
+ * the given property is the appropriate type; should only be used with
+ * hard-coded property types.
+ */
+uint64_t
+zfs_prop_get_int(zfs_handle_t *zhp, zfs_prop_t prop)
+{
+ char *source;
+ zfs_source_t sourcetype = ZFS_SRC_NONE;
+ uint64_t val;
+
+ (void) get_numeric_property(zhp, prop, &sourcetype, &source, &val);
+
+ return (val);
+}
+
+/*
+ * Similar to zfs_prop_get(), but returns the value as an integer.
+ */
+int
+zfs_prop_get_numeric(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t *value,
+ zfs_source_t *src, char *statbuf, size_t statlen)
+{
+ char *source;
+
+ /*
+ * Check to see if this property applies to our object
+ */
+ if (!zfs_prop_valid_for_type(prop, zhp->zfs_type))
+ return (zfs_error_fmt(zhp->zfs_hdl, EZFS_PROPTYPE,
+ dgettext(TEXT_DOMAIN, "cannot get property '%s'"),
+ zfs_prop_to_name(prop)));
+
+ if (src)
+ *src = ZFS_SRC_NONE;
+
+ if (get_numeric_property(zhp, prop, src, &source, value) != 0)
+ return (-1);
+
+ get_source(zhp, src, source, statbuf, statlen);
+
+ return (0);
+}
+
+/*
+ * Returns the name of the given zfs handle.
+ */
+const char *
+zfs_get_name(const zfs_handle_t *zhp)
+{
+ return (zhp->zfs_name);
+}
+
+/*
+ * Returns the type of the given zfs handle.
+ */
+zfs_type_t
+zfs_get_type(const zfs_handle_t *zhp)
+{
+ return (zhp->zfs_type);
+}
+
+/*
+ * Iterate over all child filesystems
+ */
+int
+zfs_iter_filesystems(zfs_handle_t *zhp, zfs_iter_f func, void *data)
+{
+ zfs_cmd_t zc = { 0 };
+ zfs_handle_t *nzhp;
+ int ret;
+
+ for ((void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+ ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_DATASET_LIST_NEXT, &zc) == 0;
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name))) {
+ /*
+ * Ignore private dataset names.
+ */
+ if (dataset_name_hidden(zc.zc_name))
+ continue;
+
+ /*
+ * Silently ignore errors, as the only plausible explanation is
+ * that the pool has since been removed.
+ */
+ if ((nzhp = make_dataset_handle(zhp->zfs_hdl,
+ zc.zc_name)) == NULL)
+ continue;
+
+ if ((ret = func(nzhp, data)) != 0)
+ return (ret);
+ }
+
+ /*
+ * An errno value of ESRCH indicates normal completion. If ENOENT is
+ * returned, then the underlying dataset has been removed since we
+ * obtained the handle.
+ */
+ if (errno != ESRCH && errno != ENOENT)
+ return (zfs_standard_error(zhp->zfs_hdl, errno,
+ dgettext(TEXT_DOMAIN, "cannot iterate filesystems")));
+
+ return (0);
+}
+
+/*
+ * Iterate over all snapshots
+ */
+int
+zfs_iter_snapshots(zfs_handle_t *zhp, zfs_iter_f func, void *data)
+{
+ zfs_cmd_t zc = { 0 };
+ zfs_handle_t *nzhp;
+ int ret;
+
+ for ((void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+ ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_SNAPSHOT_LIST_NEXT,
+ &zc) == 0;
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name))) {
+
+ if ((nzhp = make_dataset_handle(zhp->zfs_hdl,
+ zc.zc_name)) == NULL)
+ continue;
+
+ if ((ret = func(nzhp, data)) != 0)
+ return (ret);
+ }
+
+ /*
+ * An errno value of ESRCH indicates normal completion. If ENOENT is
+ * returned, then the underlying dataset has been removed since we
+ * obtained the handle. Silently ignore this case, and return success.
+ */
+ if (errno != ESRCH && errno != ENOENT)
+ return (zfs_standard_error(zhp->zfs_hdl, errno,
+ dgettext(TEXT_DOMAIN, "cannot iterate filesystems")));
+
+ return (0);
+}
+
+/*
+ * Iterate over all children, snapshots and filesystems
+ */
+int
+zfs_iter_children(zfs_handle_t *zhp, zfs_iter_f func, void *data)
+{
+ int ret;
+
+ if ((ret = zfs_iter_filesystems(zhp, func, data)) != 0)
+ return (ret);
+
+ return (zfs_iter_snapshots(zhp, func, data));
+}
+
+/*
+ * Given a complete name, return just the portion that refers to the parent.
+ * Can return NULL if this is a pool.
+ */
+static int
+parent_name(const char *path, char *buf, size_t buflen)
+{
+ char *loc;
+
+ if ((loc = strrchr(path, '/')) == NULL)
+ return (-1);
+
+ (void) strncpy(buf, path, MIN(buflen, loc - path));
+ buf[loc - path] = '\0';
+
+ return (0);
+}
+
+/*
+ * Checks to make sure that the given path has a parent, and that it exists. We
+ * also fetch the 'zoned' property, which is used to validate property settings
+ * when creating new datasets.
+ */
+static int
+check_parents(libzfs_handle_t *hdl, const char *path, uint64_t *zoned)
+{
+ zfs_cmd_t zc = { 0 };
+ char parent[ZFS_MAXNAMELEN];
+ char *slash;
+ zfs_handle_t *zhp;
+ char errbuf[1024];
+
+ (void) snprintf(errbuf, sizeof (errbuf), "cannot create '%s'",
+ path);
+
+ /* get parent, and check to see if this is just a pool */
+ if (parent_name(path, parent, sizeof (parent)) != 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "missing dataset name"));
+ return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+ }
+
+ /* check to see if the pool exists */
+ if ((slash = strchr(parent, '/')) == NULL)
+ slash = parent + strlen(parent);
+ (void) strncpy(zc.zc_name, parent, slash - parent);
+ zc.zc_name[slash - parent] = '\0';
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 &&
+ errno == ENOENT) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "no such pool '%s'"), zc.zc_name);
+ return (zfs_error(hdl, EZFS_NOENT, errbuf));
+ }
+
+ /* check to see if the parent dataset exists */
+ if ((zhp = make_dataset_handle(hdl, parent)) == NULL) {
+ switch (errno) {
+ case ENOENT:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "parent does not exist"));
+ return (zfs_error(hdl, EZFS_NOENT, errbuf));
+
+ default:
+ return (zfs_standard_error(hdl, errno, errbuf));
+ }
+ }
+
+ *zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
+ /* we are in a non-global zone, but parent is in the global zone */
+ if (getzoneid() != GLOBAL_ZONEID && !(*zoned)) {
+ (void) zfs_standard_error(hdl, EPERM, errbuf);
+ zfs_close(zhp);
+ return (-1);
+ }
+
+ /* make sure parent is a filesystem */
+ if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "parent is not a filesystem"));
+ (void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
+ zfs_close(zhp);
+ return (-1);
+ }
+
+ zfs_close(zhp);
+ return (0);
+}
+
+/*
+ * Create a new filesystem or volume.
+ */
+int
+zfs_create(libzfs_handle_t *hdl, const char *path, zfs_type_t type,
+ nvlist_t *props)
+{
+ zfs_cmd_t zc = { 0 };
+ int ret;
+ uint64_t size = 0;
+ uint64_t blocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
+ char errbuf[1024];
+ uint64_t zoned;
+
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot create '%s'"), path);
+
+ /* validate the path, taking care to note the extended error message */
+ if (!zfs_validate_name(hdl, path, type))
+ return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+
+ /* validate parents exist */
+ if (check_parents(hdl, path, &zoned) != 0)
+ return (-1);
+
+ /*
+ * The failure modes when creating a dataset of a different type over
+ * one that already exists is a little strange. In particular, if you
+ * try to create a dataset on top of an existing dataset, the ioctl()
+ * will return ENOENT, not EEXIST. To prevent this from happening, we
+ * first try to see if the dataset exists.
+ */
+ (void) strlcpy(zc.zc_name, path, sizeof (zc.zc_name));
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) == 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "dataset already exists"));
+ return (zfs_error(hdl, EZFS_EXISTS, errbuf));
+ }
+
+ if (type == ZFS_TYPE_VOLUME)
+ zc.zc_objset_type = DMU_OST_ZVOL;
+ else
+ zc.zc_objset_type = DMU_OST_ZFS;
+
+ if (props && (props = zfs_validate_properties(hdl, type, NULL, props,
+ zoned, NULL, errbuf)) == 0)
+ return (-1);
+
+ if (type == ZFS_TYPE_VOLUME) {
+ /*
+ * If we are creating a volume, the size and block size must
+ * satisfy a few restraints. First, the blocksize must be a
+ * valid block size between SPA_{MIN,MAX}BLOCKSIZE. Second, the
+ * volsize must be a multiple of the block size, and cannot be
+ * zero.
+ */
+ if (props == NULL || nvlist_lookup_uint64(props,
+ zfs_prop_to_name(ZFS_PROP_VOLSIZE), &size) != 0) {
+ nvlist_free(props);
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "missing volume size"));
+ return (zfs_error(hdl, EZFS_BADPROP, errbuf));
+ }
+
+ if ((ret = nvlist_lookup_uint64(props,
+ zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
+ &blocksize)) != 0) {
+ if (ret == ENOENT) {
+ blocksize = zfs_prop_default_numeric(
+ ZFS_PROP_VOLBLOCKSIZE);
+ } else {
+ nvlist_free(props);
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "missing volume block size"));
+ return (zfs_error(hdl, EZFS_BADPROP, errbuf));
+ }
+ }
+
+ if (size == 0) {
+ nvlist_free(props);
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "volume size cannot be zero"));
+ return (zfs_error(hdl, EZFS_BADPROP, errbuf));
+ }
+
+ if (size % blocksize != 0) {
+ nvlist_free(props);
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "volume size must be a multiple of volume block "
+ "size"));
+ return (zfs_error(hdl, EZFS_BADPROP, errbuf));
+ }
+ }
+
+ if (props &&
+ zcmd_write_src_nvlist(hdl, &zc, props, NULL) != 0)
+ return (-1);
+ nvlist_free(props);
+
+ /* create the dataset */
+ ret = ioctl(hdl->libzfs_fd, ZFS_IOC_CREATE, &zc);
+
+ if (ret == 0 && type == ZFS_TYPE_VOLUME) {
+ ret = zvol_create_link(hdl, path);
+ if (ret) {
+ (void) zfs_standard_error(hdl, errno,
+ dgettext(TEXT_DOMAIN,
+ "Volume successfully created, but device links "
+ "were not created"));
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+ }
+
+ zcmd_free_nvlists(&zc);
+
+ /* check for failure */
+ if (ret != 0) {
+ char parent[ZFS_MAXNAMELEN];
+ (void) parent_name(path, parent, sizeof (parent));
+
+ switch (errno) {
+ case ENOENT:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "no such parent '%s'"), parent);
+ return (zfs_error(hdl, EZFS_NOENT, errbuf));
+
+ case EINVAL:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "parent '%s' is not a filesystem"), parent);
+ return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
+
+ case EDOM:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "volume block size must be power of 2 from "
+ "%u to %uk"),
+ (uint_t)SPA_MINBLOCKSIZE,
+ (uint_t)SPA_MAXBLOCKSIZE >> 10);
+
+ return (zfs_error(hdl, EZFS_BADPROP, errbuf));
+
+#ifdef _ILP32
+ case EOVERFLOW:
+ /*
+ * This platform can't address a volume this big.
+ */
+ if (type == ZFS_TYPE_VOLUME)
+ return (zfs_error(hdl, EZFS_VOLTOOBIG,
+ errbuf));
+#endif
+ /* FALLTHROUGH */
+ default:
+ return (zfs_standard_error(hdl, errno, errbuf));
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Destroys the given dataset. The caller must make sure that the filesystem
+ * isn't mounted, and that there are no active dependents.
+ */
+int
+zfs_destroy(zfs_handle_t *zhp)
+{
+ zfs_cmd_t zc = { 0 };
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+ if (ZFS_IS_VOLUME(zhp)) {
+ /*
+ * Unconditionally unshare this zvol ignoring failure as it
+ * indicates only that the volume wasn't shared initially.
+ */
+ (void) zfs_unshare_iscsi(zhp);
+
+ if (zvol_remove_link(zhp->zfs_hdl, zhp->zfs_name) != 0)
+ return (-1);
+
+ zc.zc_objset_type = DMU_OST_ZVOL;
+ } else {
+ zc.zc_objset_type = DMU_OST_ZFS;
+ }
+
+ if (ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_DESTROY, &zc) != 0) {
+ return (zfs_standard_error_fmt(zhp->zfs_hdl, errno,
+ dgettext(TEXT_DOMAIN, "cannot destroy '%s'"),
+ zhp->zfs_name));
+ }
+
+ remove_mountpoint(zhp);
+
+ return (0);
+}
+
+struct destroydata {
+ char *snapname;
+ boolean_t gotone;
+ boolean_t closezhp;
+};
+
+static int
+zfs_remove_link_cb(zfs_handle_t *zhp, void *arg)
+{
+ struct destroydata *dd = arg;
+ zfs_handle_t *szhp;
+ char name[ZFS_MAXNAMELEN];
+ boolean_t closezhp = dd->closezhp;
+ int rv;
+
+ (void) strlcpy(name, zhp->zfs_name, sizeof (name));
+ (void) strlcat(name, "@", sizeof (name));
+ (void) strlcat(name, dd->snapname, sizeof (name));
+
+ szhp = make_dataset_handle(zhp->zfs_hdl, name);
+ if (szhp) {
+ dd->gotone = B_TRUE;
+ zfs_close(szhp);
+ }
+
+ if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
+ (void) zvol_remove_link(zhp->zfs_hdl, name);
+ /*
+ * NB: this is simply a best-effort. We don't want to
+ * return an error, because then we wouldn't visit all
+ * the volumes.
+ */
+ }
+
+ dd->closezhp = B_TRUE;
+ rv = zfs_iter_filesystems(zhp, zfs_remove_link_cb, arg);
+ if (closezhp)
+ zfs_close(zhp);
+ return (rv);
+}
+
+/*
+ * Destroys all snapshots with the given name in zhp & descendants.
+ */
+int
+zfs_destroy_snaps(zfs_handle_t *zhp, char *snapname)
+{
+ zfs_cmd_t zc = { 0 };
+ int ret;
+ struct destroydata dd = { 0 };
+
+ dd.snapname = snapname;
+ (void) zfs_remove_link_cb(zhp, &dd);
+
+ if (!dd.gotone) {
+ return (zfs_standard_error_fmt(zhp->zfs_hdl, ENOENT,
+ dgettext(TEXT_DOMAIN, "cannot destroy '%s@%s'"),
+ zhp->zfs_name, snapname));
+ }
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+ (void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
+
+ ret = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_DESTROY_SNAPS, &zc);
+ if (ret != 0) {
+ char errbuf[1024];
+
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot destroy '%s@%s'"), zc.zc_name, snapname);
+
+ switch (errno) {
+ case EEXIST:
+ zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
+ "snapshot is cloned"));
+ return (zfs_error(zhp->zfs_hdl, EZFS_EXISTS, errbuf));
+
+ default:
+ return (zfs_standard_error(zhp->zfs_hdl, errno,
+ errbuf));
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Clones the given dataset. The target must be of the same type as the source.
+ */
+int
+zfs_clone(zfs_handle_t *zhp, const char *target, nvlist_t *props)
+{
+ zfs_cmd_t zc = { 0 };
+ char parent[ZFS_MAXNAMELEN];
+ int ret;
+ char errbuf[1024];
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+ zfs_type_t type;
+ uint64_t zoned;
+
+ assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
+
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot create '%s'"), target);
+
+ /* validate the target name */
+ if (!zfs_validate_name(hdl, target, ZFS_TYPE_FILESYSTEM))
+ return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+
+ /* validate parents exist */
+ if (check_parents(hdl, target, &zoned) != 0)
+ return (-1);
+
+ (void) parent_name(target, parent, sizeof (parent));
+
+ /* do the clone */
+ if (ZFS_IS_VOLUME(zhp)) {
+ zc.zc_objset_type = DMU_OST_ZVOL;
+ type = ZFS_TYPE_VOLUME;
+ } else {
+ zc.zc_objset_type = DMU_OST_ZFS;
+ type = ZFS_TYPE_FILESYSTEM;
+ }
+
+ if (props) {
+ if ((props = zfs_validate_properties(hdl, type, NULL, props,
+ zoned, zhp, errbuf)) == NULL)
+ return (-1);
+
+ if (zcmd_write_src_nvlist(hdl, &zc, props, NULL) != 0) {
+ nvlist_free(props);
+ return (-1);
+ }
+
+ nvlist_free(props);
+ }
+
+ (void) strlcpy(zc.zc_name, target, sizeof (zc.zc_name));
+ (void) strlcpy(zc.zc_value, zhp->zfs_name, sizeof (zc.zc_value));
+ ret = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_CREATE, &zc);
+
+ zcmd_free_nvlists(&zc);
+
+ if (ret != 0) {
+ switch (errno) {
+
+ case ENOENT:
+ /*
+ * The parent doesn't exist. We should have caught this
+ * above, but there may a race condition that has since
+ * destroyed the parent.
+ *
+ * At this point, we don't know whether it's the source
+ * that doesn't exist anymore, or whether the target
+ * dataset doesn't exist.
+ */
+ zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
+ "no such parent '%s'"), parent);
+ return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf));
+
+ case EXDEV:
+ zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
+ "source and target pools differ"));
+ return (zfs_error(zhp->zfs_hdl, EZFS_CROSSTARGET,
+ errbuf));
+
+ default:
+ return (zfs_standard_error(zhp->zfs_hdl, errno,
+ errbuf));
+ }
+ } else if (ZFS_IS_VOLUME(zhp)) {
+ ret = zvol_create_link(zhp->zfs_hdl, target);
+ }
+
+ return (ret);
+}
+
+typedef struct promote_data {
+ char cb_mountpoint[MAXPATHLEN];
+ const char *cb_target;
+ const char *cb_errbuf;
+ uint64_t cb_pivot_txg;
+} promote_data_t;
+
+static int
+promote_snap_cb(zfs_handle_t *zhp, void *data)
+{
+ promote_data_t *pd = data;
+ zfs_handle_t *szhp;
+ char snapname[MAXPATHLEN];
+ int rv = 0;
+
+ /* We don't care about snapshots after the pivot point */
+ if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > pd->cb_pivot_txg) {
+ zfs_close(zhp);
+ return (0);
+ }
+
+ /* Remove the device link if it's a zvol. */
+ if (ZFS_IS_VOLUME(zhp))
+ (void) zvol_remove_link(zhp->zfs_hdl, zhp->zfs_name);
+
+ /* Check for conflicting names */
+ (void) strlcpy(snapname, pd->cb_target, sizeof (snapname));
+ (void) strlcat(snapname, strchr(zhp->zfs_name, '@'), sizeof (snapname));
+ szhp = make_dataset_handle(zhp->zfs_hdl, snapname);
+ if (szhp != NULL) {
+ zfs_close(szhp);
+ zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
+ "snapshot name '%s' from origin \n"
+ "conflicts with '%s' from target"),
+ zhp->zfs_name, snapname);
+ rv = zfs_error(zhp->zfs_hdl, EZFS_EXISTS, pd->cb_errbuf);
+ }
+ zfs_close(zhp);
+ return (rv);
+}
+
+static int
+promote_snap_done_cb(zfs_handle_t *zhp, void *data)
+{
+ promote_data_t *pd = data;
+
+ /* We don't care about snapshots after the pivot point */
+ if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) <= pd->cb_pivot_txg) {
+ /* Create the device link if it's a zvol. */
+ if (ZFS_IS_VOLUME(zhp))
+ (void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
+ }
+
+ zfs_close(zhp);
+ return (0);
+}
+
+/*
+ * Promotes the given clone fs to be the clone parent.
+ */
+int
+zfs_promote(zfs_handle_t *zhp)
+{
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+ zfs_cmd_t zc = { 0 };
+ char parent[MAXPATHLEN];
+ char *cp;
+ int ret;
+ zfs_handle_t *pzhp;
+ promote_data_t pd;
+ char errbuf[1024];
+
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot promote '%s'"), zhp->zfs_name);
+
+ if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "snapshots can not be promoted"));
+ return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
+ }
+
+ (void) strlcpy(parent, zhp->zfs_dmustats.dds_clone_of, sizeof (parent));
+ if (parent[0] == '\0') {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "not a cloned filesystem"));
+ return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
+ }
+ cp = strchr(parent, '@');
+ *cp = '\0';
+
+ /* Walk the snapshots we will be moving */
+ pzhp = zfs_open(hdl, zhp->zfs_dmustats.dds_clone_of, ZFS_TYPE_SNAPSHOT);
+ if (pzhp == NULL)
+ return (-1);
+ pd.cb_pivot_txg = zfs_prop_get_int(pzhp, ZFS_PROP_CREATETXG);
+ zfs_close(pzhp);
+ pd.cb_target = zhp->zfs_name;
+ pd.cb_errbuf = errbuf;
+ pzhp = zfs_open(hdl, parent, ZFS_TYPE_ANY);
+ if (pzhp == NULL)
+ return (-1);
+ (void) zfs_prop_get(pzhp, ZFS_PROP_MOUNTPOINT, pd.cb_mountpoint,
+ sizeof (pd.cb_mountpoint), NULL, NULL, 0, FALSE);
+ ret = zfs_iter_snapshots(pzhp, promote_snap_cb, &pd);
+ if (ret != 0) {
+ zfs_close(pzhp);
+ return (-1);
+ }
+
+ /* issue the ioctl */
+ (void) strlcpy(zc.zc_value, zhp->zfs_dmustats.dds_clone_of,
+ sizeof (zc.zc_value));
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+ ret = ioctl(hdl->libzfs_fd, ZFS_IOC_PROMOTE, &zc);
+
+ if (ret != 0) {
+ int save_errno = errno;
+
+ (void) zfs_iter_snapshots(pzhp, promote_snap_done_cb, &pd);
+ zfs_close(pzhp);
+
+ switch (save_errno) {
+ case EEXIST:
+ /*
+ * There is a conflicting snapshot name. We
+ * should have caught this above, but they could
+ * have renamed something in the mean time.
+ */
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "conflicting snapshot name from parent '%s'"),
+ parent);
+ return (zfs_error(hdl, EZFS_EXISTS, errbuf));
+
+ default:
+ return (zfs_standard_error(hdl, save_errno, errbuf));
+ }
+ } else {
+ (void) zfs_iter_snapshots(zhp, promote_snap_done_cb, &pd);
+ }
+
+ zfs_close(pzhp);
+ return (ret);
+}
+
+static int
+zfs_create_link_cb(zfs_handle_t *zhp, void *arg)
+{
+ char *snapname = arg;
+ int ret;
+
+ if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
+ char name[MAXPATHLEN];
+
+ (void) strlcpy(name, zhp->zfs_name, sizeof (name));
+ (void) strlcat(name, "@", sizeof (name));
+ (void) strlcat(name, snapname, sizeof (name));
+ (void) zvol_create_link(zhp->zfs_hdl, name);
+ /*
+ * NB: this is simply a best-effort. We don't want to
+ * return an error, because then we wouldn't visit all
+ * the volumes.
+ */
+ }
+
+ ret = zfs_iter_filesystems(zhp, zfs_create_link_cb, snapname);
+
+ zfs_close(zhp);
+
+ return (ret);
+}
+
+/*
+ * Takes a snapshot of the given dataset.
+ */
+int
+zfs_snapshot(libzfs_handle_t *hdl, const char *path, boolean_t recursive)
+{
+ const char *delim;
+ char *parent;
+ zfs_handle_t *zhp;
+ zfs_cmd_t zc = { 0 };
+ int ret;
+ char errbuf[1024];
+
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot snapshot '%s'"), path);
+
+ /* validate the target name */
+ if (!zfs_validate_name(hdl, path, ZFS_TYPE_SNAPSHOT))
+ return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+
+ /* make sure the parent exists and is of the appropriate type */
+ delim = strchr(path, '@');
+ if ((parent = zfs_alloc(hdl, delim - path + 1)) == NULL)
+ return (-1);
+ (void) strncpy(parent, path, delim - path);
+ parent[delim - path] = '\0';
+
+ if ((zhp = zfs_open(hdl, parent, ZFS_TYPE_FILESYSTEM |
+ ZFS_TYPE_VOLUME)) == NULL) {
+ free(parent);
+ return (-1);
+ }
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+ (void) strlcpy(zc.zc_value, delim+1, sizeof (zc.zc_value));
+ zc.zc_cookie = recursive;
+ ret = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_SNAPSHOT, &zc);
+
+ /*
+ * if it was recursive, the one that actually failed will be in
+ * zc.zc_name.
+ */
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot create snapshot '%s@%s'"), zc.zc_name, zc.zc_value);
+ if (ret == 0 && recursive) {
+ (void) zfs_iter_filesystems(zhp,
+ zfs_create_link_cb, (char *)delim+1);
+ }
+ if (ret == 0 && zhp->zfs_type == ZFS_TYPE_VOLUME) {
+ ret = zvol_create_link(zhp->zfs_hdl, path);
+ if (ret != 0) {
+ (void) ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_DESTROY,
+ &zc);
+ }
+ }
+
+ if (ret != 0)
+ (void) zfs_standard_error(hdl, errno, errbuf);
+
+ free(parent);
+ zfs_close(zhp);
+
+ return (ret);
+}
+
+/*
+ * Dumps a backup of the given snapshot (incremental from fromsnap if it's not
+ * NULL) to the file descriptor specified by outfd.
+ */
+int
+zfs_send(zfs_handle_t *zhp, const char *fromsnap, int outfd)
+{
+ zfs_cmd_t zc = { 0 };
+ char errbuf[1024];
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+
+ assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+ if (fromsnap)
+ (void) strlcpy(zc.zc_value, fromsnap, sizeof (zc.zc_name));
+ zc.zc_cookie = outfd;
+
+ if (ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_SENDBACKUP, &zc) != 0) {
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot send '%s'"), zhp->zfs_name);
+
+ switch (errno) {
+
+ case EXDEV:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "not an earlier snapshot from the same fs"));
+ return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
+
+ case EDQUOT:
+ case EFBIG:
+ case EIO:
+ case ENOLINK:
+ case ENOSPC:
+ case ENXIO:
+ case EPIPE:
+ case ERANGE:
+ case EFAULT:
+ case EROFS:
+ zfs_error_aux(hdl, strerror(errno));
+ return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
+
+ default:
+ return (zfs_standard_error(hdl, errno, errbuf));
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Create ancestors of 'target', but not target itself, and not
+ * ancestors whose names are shorter than prefixlen. Die if
+ * prefixlen-ancestor does not exist.
+ */
+static int
+create_parents(libzfs_handle_t *hdl, char *target, int prefixlen)
+{
+ zfs_handle_t *h;
+ char *cp;
+
+ /* make sure prefix exists */
+ cp = strchr(target + prefixlen, '/');
+ *cp = '\0';
+ h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
+ *cp = '/';
+ if (h == NULL)
+ return (-1);
+ zfs_close(h);
+
+ /*
+ * Attempt to create, mount, and share any ancestor filesystems,
+ * up to the prefixlen-long one.
+ */
+ for (cp = target + prefixlen + 1;
+ cp = strchr(cp, '/'); *cp = '/', cp++) {
+ const char *opname;
+
+ *cp = '\0';
+
+ h = make_dataset_handle(hdl, target);
+ if (h) {
+ /* it already exists, nothing to do here */
+ zfs_close(h);
+ continue;
+ }
+
+ opname = dgettext(TEXT_DOMAIN, "create");
+ if (zfs_create(hdl, target, ZFS_TYPE_FILESYSTEM,
+ NULL) != 0)
+ goto ancestorerr;
+
+ opname = dgettext(TEXT_DOMAIN, "open");
+ h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
+ if (h == NULL)
+ goto ancestorerr;
+
+ opname = dgettext(TEXT_DOMAIN, "mount");
+ if (zfs_mount(h, NULL, 0) != 0)
+ goto ancestorerr;
+
+ opname = dgettext(TEXT_DOMAIN, "share");
+ if (zfs_share(h) != 0)
+ goto ancestorerr;
+
+ zfs_close(h);
+
+ continue;
+ancestorerr:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "failed to %s ancestor '%s'"), opname, target);
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * Restores a backup of tosnap from the file descriptor specified by infd.
+ */
+int
+zfs_receive(libzfs_handle_t *hdl, const char *tosnap, int isprefix,
+ int verbose, int dryrun, boolean_t force, int infd)
+{
+ zfs_cmd_t zc = { 0 };
+ time_t begin_time;
+ int ioctl_err, err, bytes, size, choplen;
+ char *cp;
+ dmu_replay_record_t drr;
+ struct drr_begin *drrb = &zc.zc_begin_record;
+ char errbuf[1024];
+ prop_changelist_t *clp;
+ char chopprefix[ZFS_MAXNAMELEN];
+
+ begin_time = time(NULL);
+
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot receive"));
+
+ /* read in the BEGIN record */
+ cp = (char *)&drr;
+ bytes = 0;
+ do {
+ size = read(infd, cp, sizeof (drr) - bytes);
+ cp += size;
+ bytes += size;
+ } while (size > 0);
+
+ if (size < 0 || bytes != sizeof (drr)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
+ "stream (failed to read first record)"));
+ return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
+ }
+
+ zc.zc_begin_record = drr.drr_u.drr_begin;
+
+ if (drrb->drr_magic != DMU_BACKUP_MAGIC &&
+ drrb->drr_magic != BSWAP_64(DMU_BACKUP_MAGIC)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
+ "stream (bad magic number)"));
+ return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
+ }
+
+ if (drrb->drr_version != DMU_BACKUP_VERSION &&
+ drrb->drr_version != BSWAP_64(DMU_BACKUP_VERSION)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only version "
+ "0x%llx is supported (stream is version 0x%llx)"),
+ DMU_BACKUP_VERSION, drrb->drr_version);
+ return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
+ }
+
+ if (strchr(drr.drr_u.drr_begin.drr_toname, '@') == NULL) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
+ "stream (bad snapshot name)"));
+ return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
+ }
+ /*
+ * Determine how much of the snapshot name stored in the stream
+ * we are going to tack on to the name they specified on the
+ * command line, and how much we are going to chop off.
+ *
+ * If they specified a snapshot, chop the entire name stored in
+ * the stream.
+ */
+ (void) strcpy(chopprefix, drr.drr_u.drr_begin.drr_toname);
+ if (isprefix) {
+ /*
+ * They specified a fs with -d, we want to tack on
+ * everything but the pool name stored in the stream
+ */
+ if (strchr(tosnap, '@')) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
+ "argument - snapshot not allowed with -d"));
+ return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+ }
+ cp = strchr(chopprefix, '/');
+ if (cp == NULL)
+ cp = strchr(chopprefix, '@');
+ *cp = '\0';
+ } else if (strchr(tosnap, '@') == NULL) {
+ /*
+ * If they specified a filesystem without -d, we want to
+ * tack on everything after the fs specified in the
+ * first name from the stream.
+ */
+ cp = strchr(chopprefix, '@');
+ *cp = '\0';
+ }
+ choplen = strlen(chopprefix);
+
+ /*
+ * Determine name of destination snapshot, store in zc_value.
+ */
+ (void) strcpy(zc.zc_value, tosnap);
+ (void) strncat(zc.zc_value, drr.drr_u.drr_begin.drr_toname+choplen,
+ sizeof (zc.zc_value));
+ if (!zfs_validate_name(hdl, zc.zc_value, ZFS_TYPE_SNAPSHOT))
+ return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+
+ (void) strcpy(zc.zc_name, zc.zc_value);
+ if (drrb->drr_fromguid) {
+ /* incremental backup stream */
+ zfs_handle_t *h;
+
+ /* do the recvbackup ioctl to the containing fs */
+ *strchr(zc.zc_name, '@') = '\0';
+
+ /* make sure destination fs exists */
+ h = zfs_open(hdl, zc.zc_name,
+ ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
+ if (h == NULL)
+ return (-1);
+ if (!dryrun) {
+ /*
+ * We need to unmount all the dependents of the dataset
+ * and the dataset itself. If it's a volume
+ * then remove device link.
+ */
+ if (h->zfs_type == ZFS_TYPE_FILESYSTEM) {
+ clp = changelist_gather(h, ZFS_PROP_NAME, 0);
+ if (clp == NULL)
+ return (-1);
+ if (changelist_prefix(clp) != 0) {
+ changelist_free(clp);
+ return (-1);
+ }
+ } else {
+ (void) zvol_remove_link(hdl, h->zfs_name);
+ }
+ }
+ zfs_close(h);
+ } else {
+ /* full backup stream */
+
+ /* Make sure destination fs does not exist */
+ *strchr(zc.zc_name, '@') = '\0';
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) == 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "destination '%s' exists"), zc.zc_name);
+ return (zfs_error(hdl, EZFS_EXISTS, errbuf));
+ }
+
+ if (strchr(zc.zc_name, '/') == NULL) {
+ /*
+ * they're trying to do a recv into a
+ * nonexistant topmost filesystem.
+ */
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "destination does not exist"), zc.zc_name);
+ return (zfs_error(hdl, EZFS_EXISTS, errbuf));
+ }
+
+ /* Do the recvbackup ioctl to the fs's parent. */
+ *strrchr(zc.zc_name, '/') = '\0';
+
+ if (isprefix && (err = create_parents(hdl,
+ zc.zc_value, strlen(tosnap))) != 0) {
+ return (zfs_error(hdl, EZFS_BADRESTORE, errbuf));
+ }
+
+ }
+
+ zc.zc_cookie = infd;
+ zc.zc_guid = force;
+ if (verbose) {
+ (void) printf("%s %s stream of %s into %s\n",
+ dryrun ? "would receive" : "receiving",
+ drrb->drr_fromguid ? "incremental" : "full",
+ drr.drr_u.drr_begin.drr_toname,
+ zc.zc_value);
+ (void) fflush(stdout);
+ }
+ if (dryrun)
+ return (0);
+ err = ioctl_err = ioctl(hdl->libzfs_fd, ZFS_IOC_RECVBACKUP, &zc);
+ if (ioctl_err != 0) {
+ switch (errno) {
+ case ENODEV:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "most recent snapshot does not match incremental "
+ "source"));
+ (void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
+ break;
+ case ETXTBSY:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "destination has been modified since most recent "
+ "snapshot"));
+ (void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
+ break;
+ case EEXIST:
+ if (drrb->drr_fromguid == 0) {
+ /* it's the containing fs that exists */
+ cp = strchr(zc.zc_value, '@');
+ *cp = '\0';
+ }
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "destination already exists"));
+ (void) zfs_error_fmt(hdl, EZFS_EXISTS,
+ dgettext(TEXT_DOMAIN, "cannot restore to %s"),
+ zc.zc_value);
+ break;
+ case EINVAL:
+ (void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
+ break;
+ case ECKSUM:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid stream (checksum mismatch)"));
+ (void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
+ break;
+ default:
+ (void) zfs_standard_error(hdl, errno, errbuf);
+ }
+ }
+
+ /*
+ * Mount or recreate the /dev links for the target filesystem
+ * (if created, or if we tore them down to do an incremental
+ * restore), and the /dev links for the new snapshot (if
+ * created). Also mount any children of the target filesystem
+ * if we did an incremental receive.
+ */
+ cp = strchr(zc.zc_value, '@');
+ if (cp && (ioctl_err == 0 || drrb->drr_fromguid)) {
+ zfs_handle_t *h;
+
+ *cp = '\0';
+ h = zfs_open(hdl, zc.zc_value,
+ ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
+ *cp = '@';
+ if (h) {
+ if (h->zfs_type == ZFS_TYPE_VOLUME) {
+ err = zvol_create_link(hdl, h->zfs_name);
+ if (err == 0 && ioctl_err == 0)
+ err = zvol_create_link(hdl,
+ zc.zc_value);
+ } else {
+ if (drrb->drr_fromguid) {
+ err = changelist_postfix(clp);
+ changelist_free(clp);
+ } else {
+ err = zfs_mount(h, NULL, 0);
+ }
+ }
+ zfs_close(h);
+ }
+ }
+
+ if (err || ioctl_err)
+ return (-1);
+
+ if (verbose) {
+ char buf1[64];
+ char buf2[64];
+ uint64_t bytes = zc.zc_cookie;
+ time_t delta = time(NULL) - begin_time;
+ if (delta == 0)
+ delta = 1;
+ zfs_nicenum(bytes, buf1, sizeof (buf1));
+ zfs_nicenum(bytes/delta, buf2, sizeof (buf1));
+
+ (void) printf("received %sb stream in %lu seconds (%sb/sec)\n",
+ buf1, delta, buf2);
+ }
+
+ return (0);
+}
+
+/*
+ * Destroy any more recent snapshots. We invoke this callback on any dependents
+ * of the snapshot first. If the 'cb_dependent' member is non-zero, then this
+ * is a dependent and we should just destroy it without checking the transaction
+ * group.
+ */
+typedef struct rollback_data {
+ const char *cb_target; /* the snapshot */
+ uint64_t cb_create; /* creation time reference */
+ prop_changelist_t *cb_clp; /* changelist pointer */
+ int cb_error;
+ boolean_t cb_dependent;
+} rollback_data_t;
+
+static int
+rollback_destroy(zfs_handle_t *zhp, void *data)
+{
+ rollback_data_t *cbp = data;
+
+ if (!cbp->cb_dependent) {
+ if (strcmp(zhp->zfs_name, cbp->cb_target) != 0 &&
+ zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT &&
+ zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) >
+ cbp->cb_create) {
+
+ cbp->cb_dependent = B_TRUE;
+ if (zfs_iter_dependents(zhp, B_FALSE, rollback_destroy,
+ cbp) != 0)
+ cbp->cb_error = 1;
+ cbp->cb_dependent = B_FALSE;
+
+ if (zfs_destroy(zhp) != 0)
+ cbp->cb_error = 1;
+ else
+ changelist_remove(zhp, cbp->cb_clp);
+ }
+ } else {
+ if (zfs_destroy(zhp) != 0)
+ cbp->cb_error = 1;
+ else
+ changelist_remove(zhp, cbp->cb_clp);
+ }
+
+ zfs_close(zhp);
+ return (0);
+}
+
+/*
+ * Rollback the dataset to its latest snapshot.
+ */
+static int
+do_rollback(zfs_handle_t *zhp)
+{
+ int ret;
+ zfs_cmd_t zc = { 0 };
+
+ assert(zhp->zfs_type == ZFS_TYPE_FILESYSTEM ||
+ zhp->zfs_type == ZFS_TYPE_VOLUME);
+
+ if (zhp->zfs_type == ZFS_TYPE_VOLUME &&
+ zvol_remove_link(zhp->zfs_hdl, zhp->zfs_name) != 0)
+ return (-1);
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+ if (ZFS_IS_VOLUME(zhp))
+ zc.zc_objset_type = DMU_OST_ZVOL;
+ else
+ zc.zc_objset_type = DMU_OST_ZFS;
+
+ /*
+ * We rely on the consumer to verify that there are no newer snapshots
+ * for the given dataset. Given these constraints, we can simply pass
+ * the name on to the ioctl() call. There is still an unlikely race
+ * condition where the user has taken a snapshot since we verified that
+ * this was the most recent.
+ */
+ if ((ret = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_ROLLBACK,
+ &zc)) != 0) {
+ (void) zfs_standard_error_fmt(zhp->zfs_hdl, errno,
+ dgettext(TEXT_DOMAIN, "cannot rollback '%s'"),
+ zhp->zfs_name);
+ } else if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
+ ret = zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
+ }
+
+ return (ret);
+}
+
+/*
+ * Given a dataset, rollback to a specific snapshot, discarding any
+ * data changes since then and making it the active dataset.
+ *
+ * Any snapshots more recent than the target are destroyed, along with
+ * their dependents.
+ */
+int
+zfs_rollback(zfs_handle_t *zhp, zfs_handle_t *snap, int flag)
+{
+ int ret;
+ rollback_data_t cb = { 0 };
+ prop_changelist_t *clp;
+
+ /*
+ * Unmount all dependendents of the dataset and the dataset itself.
+ * The list we need to gather is the same as for doing rename
+ */
+ clp = changelist_gather(zhp, ZFS_PROP_NAME, flag ? MS_FORCE: 0);
+ if (clp == NULL)
+ return (-1);
+
+ if ((ret = changelist_prefix(clp)) != 0)
+ goto out;
+
+ /*
+ * Destroy all recent snapshots and its dependends.
+ */
+ cb.cb_target = snap->zfs_name;
+ cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG);
+ cb.cb_clp = clp;
+ (void) zfs_iter_children(zhp, rollback_destroy, &cb);
+
+ if ((ret = cb.cb_error) != 0) {
+ (void) changelist_postfix(clp);
+ goto out;
+ }
+
+ /*
+ * Now that we have verified that the snapshot is the latest,
+ * rollback to the given snapshot.
+ */
+ ret = do_rollback(zhp);
+
+ if (ret != 0) {
+ (void) changelist_postfix(clp);
+ goto out;
+ }
+
+ /*
+ * We only want to re-mount the filesystem if it was mounted in the
+ * first place.
+ */
+ ret = changelist_postfix(clp);
+
+out:
+ changelist_free(clp);
+ return (ret);
+}
+
+/*
+ * Iterate over all dependents for a given dataset. This includes both
+ * hierarchical dependents (children) and data dependents (snapshots and
+ * clones). The bulk of the processing occurs in get_dependents() in
+ * libzfs_graph.c.
+ */
+int
+zfs_iter_dependents(zfs_handle_t *zhp, boolean_t allowrecursion,
+ zfs_iter_f func, void *data)
+{
+ char **dependents;
+ size_t count;
+ int i;
+ zfs_handle_t *child;
+ int ret = 0;
+
+ if (get_dependents(zhp->zfs_hdl, allowrecursion, zhp->zfs_name,
+ &dependents, &count) != 0)
+ return (-1);
+
+ for (i = 0; i < count; i++) {
+ if ((child = make_dataset_handle(zhp->zfs_hdl,
+ dependents[i])) == NULL)
+ continue;
+
+ if ((ret = func(child, data)) != 0)
+ break;
+ }
+
+ for (i = 0; i < count; i++)
+ free(dependents[i]);
+ free(dependents);
+
+ return (ret);
+}
+
+/*
+ * Renames the given dataset.
+ */
+int
+zfs_rename(zfs_handle_t *zhp, const char *target)
+{
+ int ret;
+ zfs_cmd_t zc = { 0 };
+ char *delim;
+ prop_changelist_t *cl;
+ char parent[ZFS_MAXNAMELEN];
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+ char errbuf[1024];
+
+ /* if we have the same exact name, just return success */
+ if (strcmp(zhp->zfs_name, target) == 0)
+ return (0);
+
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot rename to '%s'"), target);
+
+ /*
+ * Make sure the target name is valid
+ */
+ if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
+ if ((strchr(target, '@') == NULL) ||
+ *target == '@') {
+ /*
+ * Snapshot target name is abbreviated,
+ * reconstruct full dataset name
+ */
+ (void) strlcpy(parent, zhp->zfs_name,
+ sizeof (parent));
+ delim = strchr(parent, '@');
+ if (strchr(target, '@') == NULL)
+ *(++delim) = '\0';
+ else
+ *delim = '\0';
+ (void) strlcat(parent, target, sizeof (parent));
+ target = parent;
+ } else {
+ /*
+ * Make sure we're renaming within the same dataset.
+ */
+ delim = strchr(target, '@');
+ if (strncmp(zhp->zfs_name, target, delim - target)
+ != 0 || zhp->zfs_name[delim - target] != '@') {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "snapshots must be part of same "
+ "dataset"));
+ return (zfs_error(hdl, EZFS_CROSSTARGET,
+ errbuf));
+ }
+ }
+ if (!zfs_validate_name(hdl, target, zhp->zfs_type))
+ return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+ } else {
+ if (!zfs_validate_name(hdl, target, zhp->zfs_type))
+ return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+ uint64_t unused;
+
+ /* validate parents */
+ if (check_parents(hdl, target, &unused) != 0)
+ return (-1);
+
+ (void) parent_name(target, parent, sizeof (parent));
+
+ /* make sure we're in the same pool */
+ verify((delim = strchr(target, '/')) != NULL);
+ if (strncmp(zhp->zfs_name, target, delim - target) != 0 ||
+ zhp->zfs_name[delim - target] != '/') {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "datasets must be within same pool"));
+ return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
+ }
+
+ /* new name cannot be a child of the current dataset name */
+ if (strncmp(parent, zhp->zfs_name,
+ strlen(zhp->zfs_name)) == 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "New dataset name cannot be a descendent of "
+ "current dataset name"));
+ return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+ }
+ }
+
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "cannot rename '%s'"), zhp->zfs_name);
+
+ if (getzoneid() == GLOBAL_ZONEID &&
+ zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "dataset is used in a non-global zone"));
+ return (zfs_error(hdl, EZFS_ZONED, errbuf));
+ }
+
+ if ((cl = changelist_gather(zhp, ZFS_PROP_NAME, 0)) == NULL)
+ return (-1);
+
+ if (changelist_haszonedchild(cl)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "child dataset with inherited mountpoint is used "
+ "in a non-global zone"));
+ (void) zfs_error(hdl, EZFS_ZONED, errbuf);
+ goto error;
+ }
+
+ if ((ret = changelist_prefix(cl)) != 0)
+ goto error;
+
+ if (ZFS_IS_VOLUME(zhp))
+ zc.zc_objset_type = DMU_OST_ZVOL;
+ else
+ zc.zc_objset_type = DMU_OST_ZFS;
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+ (void) strlcpy(zc.zc_value, target, sizeof (zc.zc_value));
+
+ if ((ret = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_RENAME, &zc)) != 0) {
+ (void) zfs_standard_error(zhp->zfs_hdl, errno, errbuf);
+
+ /*
+ * On failure, we still want to remount any filesystems that
+ * were previously mounted, so we don't alter the system state.
+ */
+ (void) changelist_postfix(cl);
+ } else {
+ changelist_rename(cl, zfs_get_name(zhp), target);
+
+ ret = changelist_postfix(cl);
+ }
+
+error:
+ changelist_free(cl);
+ return (ret);
+}
+
+/*
+ * Given a zvol dataset, issue the ioctl to create the appropriate minor node,
+ * poke devfsadm to create the /dev link, and then wait for the link to appear.
+ */
+int
+zvol_create_link(libzfs_handle_t *hdl, const char *dataset)
+{
+ zfs_cmd_t zc = { 0 };
+#if 0
+ di_devlink_handle_t dhdl;
+#endif
+
+ (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
+
+ /*
+ * Issue the appropriate ioctl.
+ */
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_CREATE_MINOR, &zc) != 0) {
+ switch (errno) {
+ case EEXIST:
+ /*
+ * Silently ignore the case where the link already
+ * exists. This allows 'zfs volinit' to be run multiple
+ * times without errors.
+ */
+ return (0);
+
+ default:
+ return (zfs_standard_error_fmt(hdl, errno,
+ dgettext(TEXT_DOMAIN, "cannot create device links "
+ "for '%s'"), dataset));
+ }
+ }
+
+#if 0
+ /*
+ * Call devfsadm and wait for the links to magically appear.
+ */
+ if ((dhdl = di_devlink_init(ZFS_DRIVER, DI_MAKE_LINK)) == NULL) {
+ zfs_error_aux(hdl, strerror(errno));
+ (void) zfs_error_fmt(hdl, EZFS_DEVLINKS,
+ dgettext(TEXT_DOMAIN, "cannot create device links "
+ "for '%s'"), dataset);
+ (void) ioctl(hdl->libzfs_fd, ZFS_IOC_REMOVE_MINOR, &zc);
+ return (-1);
+ } else {
+ (void) di_devlink_fini(&dhdl);
+ }
+#endif
+
+ return (0);
+}
+
+/*
+ * Remove a minor node for the given zvol and the associated /dev links.
+ */
+int
+zvol_remove_link(libzfs_handle_t *hdl, const char *dataset)
+{
+ zfs_cmd_t zc = { 0 };
+
+ (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
+
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_REMOVE_MINOR, &zc) != 0) {
+ switch (errno) {
+ case ENXIO:
+ /*
+ * Silently ignore the case where the link no longer
+ * exists, so that 'zfs volfini' can be run multiple
+ * times without errors.
+ */
+ return (0);
+
+ default:
+ return (zfs_standard_error_fmt(hdl, errno,
+ dgettext(TEXT_DOMAIN, "cannot remove device "
+ "links for '%s'"), dataset));
+ }
+ }
+
+ return (0);
+}
+
+nvlist_t *
+zfs_get_user_props(zfs_handle_t *zhp)
+{
+ return (zhp->zfs_user_props);
+}
+
+/*
+ * Given a comma-separated list of properties, contruct a property list
+ * containing both user-defined and native properties. This function will
+ * return a NULL list if 'all' is specified, which can later be expanded on a
+ * per-dataset basis by zfs_expand_proplist().
+ */
+int
+zfs_get_proplist_common(libzfs_handle_t *hdl, char *fields,
+ zfs_proplist_t **listp, zfs_type_t type)
+{
+ size_t len;
+ char *s, *p;
+ char c;
+ zfs_prop_t prop;
+ zfs_proplist_t *entry;
+ zfs_proplist_t **last;
+
+ *listp = NULL;
+ last = listp;
+
+ /*
+ * If 'all' is specified, return a NULL list.
+ */
+ if (strcmp(fields, "all") == 0)
+ return (0);
+
+ /*
+ * If no fields were specified, return an error.
+ */
+ if (fields[0] == '\0') {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "no properties specified"));
+ return (zfs_error(hdl, EZFS_BADPROP, dgettext(TEXT_DOMAIN,
+ "bad property list")));
+ }
+
+ /*
+ * It would be nice to use getsubopt() here, but the inclusion of column
+ * aliases makes this more effort than it's worth.
+ */
+ s = fields;
+ while (*s != '\0') {
+ if ((p = strchr(s, ',')) == NULL) {
+ len = strlen(s);
+ p = s + len;
+ } else {
+ len = p - s;
+ }
+
+ /*
+ * Check for empty options.
+ */
+ if (len == 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "empty property name"));
+ return (zfs_error(hdl, EZFS_BADPROP,
+ dgettext(TEXT_DOMAIN, "bad property list")));
+ }
+
+ /*
+ * Check all regular property names.
+ */
+ c = s[len];
+ s[len] = '\0';
+ prop = zfs_name_to_prop_common(s, type);
+
+ if (prop != ZFS_PROP_INVAL &&
+ !zfs_prop_valid_for_type(prop, type))
+ prop = ZFS_PROP_INVAL;
+
+ /*
+ * When no property table entry can be found, return failure if
+ * this is a pool property or if this isn't a user-defined
+ * dataset property,
+ */
+ if (prop == ZFS_PROP_INVAL &&
+ (type & ZFS_TYPE_POOL || !zfs_prop_user(s))) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid property '%s'"), s);
+ return (zfs_error(hdl, EZFS_BADPROP,
+ dgettext(TEXT_DOMAIN, "bad property list")));
+ }
+
+ if ((entry = zfs_alloc(hdl, sizeof (zfs_proplist_t))) == NULL)
+ return (-1);
+
+ entry->pl_prop = prop;
+ if (prop == ZFS_PROP_INVAL) {
+ if ((entry->pl_user_prop =
+ zfs_strdup(hdl, s)) == NULL) {
+ free(entry);
+ return (-1);
+ }
+ entry->pl_width = strlen(s);
+ } else {
+ entry->pl_width = zfs_prop_width(prop,
+ &entry->pl_fixed);
+ }
+
+ *last = entry;
+ last = &entry->pl_next;
+
+ s = p;
+ if (c == ',')
+ s++;
+ }
+
+ return (0);
+}
+
+int
+zfs_get_proplist(libzfs_handle_t *hdl, char *fields, zfs_proplist_t **listp)
+{
+ return (zfs_get_proplist_common(hdl, fields, listp, ZFS_TYPE_ANY));
+}
+
+void
+zfs_free_proplist(zfs_proplist_t *pl)
+{
+ zfs_proplist_t *next;
+
+ while (pl != NULL) {
+ next = pl->pl_next;
+ free(pl->pl_user_prop);
+ free(pl);
+ pl = next;
+ }
+}
+
+typedef struct expand_data {
+ zfs_proplist_t **last;
+ libzfs_handle_t *hdl;
+} expand_data_t;
+
+static zfs_prop_t
+zfs_expand_proplist_cb(zfs_prop_t prop, void *cb)
+{
+ zfs_proplist_t *entry;
+ expand_data_t *edp = cb;
+
+ if ((entry = zfs_alloc(edp->hdl, sizeof (zfs_proplist_t))) == NULL)
+ return (ZFS_PROP_INVAL);
+
+ entry->pl_prop = prop;
+ entry->pl_width = zfs_prop_width(prop, &entry->pl_fixed);
+ entry->pl_all = B_TRUE;
+
+ *(edp->last) = entry;
+ edp->last = &entry->pl_next;
+
+ return (ZFS_PROP_CONT);
+}
+
+int
+zfs_expand_proplist_common(libzfs_handle_t *hdl, zfs_proplist_t **plp,
+ zfs_type_t type)
+{
+ zfs_proplist_t *entry;
+ zfs_proplist_t **last;
+ expand_data_t exp;
+
+ if (*plp == NULL) {
+ /*
+ * If this is the very first time we've been called for an 'all'
+ * specification, expand the list to include all native
+ * properties.
+ */
+ last = plp;
+
+ exp.last = last;
+ exp.hdl = hdl;
+
+ if (zfs_prop_iter_common(zfs_expand_proplist_cb, &exp, type,
+ B_FALSE) == ZFS_PROP_INVAL)
+ return (-1);
+
+ /*
+ * Add 'name' to the beginning of the list, which is handled
+ * specially.
+ */
+ if ((entry = zfs_alloc(hdl,
+ sizeof (zfs_proplist_t))) == NULL)
+ return (-1);
+
+ entry->pl_prop = ZFS_PROP_NAME;
+ entry->pl_width = zfs_prop_width(ZFS_PROP_NAME,
+ &entry->pl_fixed);
+ entry->pl_all = B_TRUE;
+ entry->pl_next = *plp;
+ *plp = entry;
+ }
+ return (0);
+}
+
+/*
+ * This function is used by 'zfs list' to determine the exact set of columns to
+ * display, and their maximum widths. This does two main things:
+ *
+ * - If this is a list of all properties, then expand the list to include
+ * all native properties, and set a flag so that for each dataset we look
+ * for new unique user properties and add them to the list.
+ *
+ * - For non fixed-width properties, keep track of the maximum width seen
+ * so that we can size the column appropriately.
+ */
+int
+zfs_expand_proplist(zfs_handle_t *zhp, zfs_proplist_t **plp)
+{
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+ zfs_proplist_t *entry;
+ zfs_proplist_t **last, **start;
+ nvlist_t *userprops, *propval;
+ nvpair_t *elem;
+ char *strval;
+ char buf[ZFS_MAXPROPLEN];
+
+ if (zfs_expand_proplist_common(hdl, plp, ZFS_TYPE_ANY) != 0)
+ return (-1);
+
+ userprops = zfs_get_user_props(zhp);
+
+ entry = *plp;
+ if (entry->pl_all && nvlist_next_nvpair(userprops, NULL) != NULL) {
+ /*
+ * Go through and add any user properties as necessary. We
+ * start by incrementing our list pointer to the first
+ * non-native property.
+ */
+ start = plp;
+ while (*start != NULL) {
+ if ((*start)->pl_prop == ZFS_PROP_INVAL)
+ break;
+ start = &(*start)->pl_next;
+ }
+
+ elem = NULL;
+ while ((elem = nvlist_next_nvpair(userprops, elem)) != NULL) {
+ /*
+ * See if we've already found this property in our list.
+ */
+ for (last = start; *last != NULL;
+ last = &(*last)->pl_next) {
+ if (strcmp((*last)->pl_user_prop,
+ nvpair_name(elem)) == 0)
+ break;
+ }
+
+ if (*last == NULL) {
+ if ((entry = zfs_alloc(hdl,
+ sizeof (zfs_proplist_t))) == NULL ||
+ ((entry->pl_user_prop = zfs_strdup(hdl,
+ nvpair_name(elem)))) == NULL) {
+ free(entry);
+ return (-1);
+ }
+
+ entry->pl_prop = ZFS_PROP_INVAL;
+ entry->pl_width = strlen(nvpair_name(elem));
+ entry->pl_all = B_TRUE;
+ *last = entry;
+ }
+ }
+ }
+
+ /*
+ * Now go through and check the width of any non-fixed columns
+ */
+ for (entry = *plp; entry != NULL; entry = entry->pl_next) {
+ if (entry->pl_fixed)
+ continue;
+
+ if (entry->pl_prop != ZFS_PROP_INVAL) {
+ if (zfs_prop_get(zhp, entry->pl_prop,
+ buf, sizeof (buf), NULL, NULL, 0, B_FALSE) == 0) {
+ if (strlen(buf) > entry->pl_width)
+ entry->pl_width = strlen(buf);
+ }
+ } else if (nvlist_lookup_nvlist(userprops,
+ entry->pl_user_prop, &propval) == 0) {
+ verify(nvlist_lookup_string(propval,
+ ZFS_PROP_VALUE, &strval) == 0);
+ if (strlen(strval) > entry->pl_width)
+ entry->pl_width = strlen(strval);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Attach/detach the given filesystem to/from the given jail.
+ */
+int
+zfs_jail(zfs_handle_t *zhp, int jailid, int attach)
+{
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+ zfs_cmd_t zc = { 0 };
+ char errbuf[1024];
+ int cmd, ret;
+
+ if (attach) {
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "cannot jail '%s'"), zhp->zfs_name);
+ } else {
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "cannot jail '%s'"), zhp->zfs_name);
+ }
+
+ switch (zhp->zfs_type) {
+ case ZFS_TYPE_VOLUME:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "volumes can not be jailed"));
+ return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
+ case ZFS_TYPE_SNAPSHOT:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "snapshots can not be jailed"));
+ return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
+ }
+ assert(zhp->zfs_type == ZFS_TYPE_FILESYSTEM);
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+ zc.zc_objset_type = DMU_OST_ZFS;
+ zc.zc_jailid = jailid;
+
+ cmd = attach ? ZFS_IOC_JAIL : ZFS_IOC_UNJAIL;
+ if ((ret = ioctl(hdl->libzfs_fd, cmd, &zc)) != 0)
+ zfs_standard_error(hdl, errno, errbuf);
+
+ return (ret);
+}
diff --git a/contrib/opensolaris/lib/libzfs/common/libzfs_graph.c b/contrib/opensolaris/lib/libzfs/common/libzfs_graph.c
new file mode 100644
index 0000000..c283016
--- /dev/null
+++ b/contrib/opensolaris/lib/libzfs/common/libzfs_graph.c
@@ -0,0 +1,646 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Iterate over all children of the current object. This includes the normal
+ * dataset hierarchy, but also arbitrary hierarchies due to clones. We want to
+ * walk all datasets in the pool, and construct a directed graph of the form:
+ *
+ * home
+ * |
+ * +----+----+
+ * | |
+ * v v ws
+ * bar baz |
+ * | |
+ * v v
+ * @yesterday ----> foo
+ *
+ * In order to construct this graph, we have to walk every dataset in the pool,
+ * because the clone parent is stored as a property of the child, not the
+ * parent. The parent only keeps track of the number of clones.
+ *
+ * In the normal case (without clones) this would be rather expensive. To avoid
+ * unnecessary computation, we first try a walk of the subtree hierarchy
+ * starting from the initial node. At each dataset, we construct a node in the
+ * graph and an edge leading from its parent. If we don't see any snapshots
+ * with a non-zero clone count, then we are finished.
+ *
+ * If we do find a cloned snapshot, then we finish the walk of the current
+ * subtree, but indicate that we need to do a complete walk. We then perform a
+ * global walk of all datasets, avoiding the subtree we already processed.
+ *
+ * At the end of this, we'll end up with a directed graph of all relevant (and
+ * possible some irrelevant) datasets in the system. We need to both find our
+ * limiting subgraph and determine a safe ordering in which to destroy the
+ * datasets. We do a topological ordering of our graph starting at our target
+ * dataset, and then walk the results in reverse.
+ *
+ * It's possible for the graph to have cycles if, for example, the user renames
+ * a clone to be the parent of its origin snapshot. The user can request to
+ * generate an error in this case, or ignore the cycle and continue.
+ *
+ * When removing datasets, we want to destroy the snapshots in chronological
+ * order (because this is the most efficient method). In order to accomplish
+ * this, we store the creation transaction group with each vertex and keep each
+ * vertex's edges sorted according to this value. The topological sort will
+ * automatically walk the snapshots in the correct order.
+ */
+
+#include <assert.h>
+#include <libintl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+
+#include <libzfs.h>
+
+#include "libzfs_impl.h"
+#include "zfs_namecheck.h"
+
+#define MIN_EDGECOUNT 4
+
+/*
+ * Vertex structure. Indexed by dataset name, this structure maintains a list
+ * of edges to other vertices.
+ */
+struct zfs_edge;
+typedef struct zfs_vertex {
+ char zv_dataset[ZFS_MAXNAMELEN];
+ struct zfs_vertex *zv_next;
+ int zv_visited;
+ uint64_t zv_txg;
+ struct zfs_edge **zv_edges;
+ int zv_edgecount;
+ int zv_edgealloc;
+} zfs_vertex_t;
+
+enum {
+ VISIT_SEEN = 1,
+ VISIT_SORT_PRE,
+ VISIT_SORT_POST
+};
+
+/*
+ * Edge structure. Simply maintains a pointer to the destination vertex. There
+ * is no need to store the source vertex, since we only use edges in the context
+ * of the source vertex.
+ */
+typedef struct zfs_edge {
+ zfs_vertex_t *ze_dest;
+ struct zfs_edge *ze_next;
+} zfs_edge_t;
+
+#define ZFS_GRAPH_SIZE 1027 /* this could be dynamic some day */
+
+/*
+ * Graph structure. Vertices are maintained in a hash indexed by dataset name.
+ */
+typedef struct zfs_graph {
+ zfs_vertex_t **zg_hash;
+ size_t zg_size;
+ size_t zg_nvertex;
+} zfs_graph_t;
+
+/*
+ * Allocate a new edge pointing to the target vertex.
+ */
+static zfs_edge_t *
+zfs_edge_create(libzfs_handle_t *hdl, zfs_vertex_t *dest)
+{
+ zfs_edge_t *zep = zfs_alloc(hdl, sizeof (zfs_edge_t));
+
+ if (zep == NULL)
+ return (NULL);
+
+ zep->ze_dest = dest;
+
+ return (zep);
+}
+
+/*
+ * Destroy an edge.
+ */
+static void
+zfs_edge_destroy(zfs_edge_t *zep)
+{
+ free(zep);
+}
+
+/*
+ * Allocate a new vertex with the given name.
+ */
+static zfs_vertex_t *
+zfs_vertex_create(libzfs_handle_t *hdl, const char *dataset)
+{
+ zfs_vertex_t *zvp = zfs_alloc(hdl, sizeof (zfs_vertex_t));
+
+ if (zvp == NULL)
+ return (NULL);
+
+ assert(strlen(dataset) < ZFS_MAXNAMELEN);
+
+ (void) strlcpy(zvp->zv_dataset, dataset, sizeof (zvp->zv_dataset));
+
+ if ((zvp->zv_edges = zfs_alloc(hdl,
+ MIN_EDGECOUNT * sizeof (void *))) == NULL) {
+ free(zvp);
+ return (NULL);
+ }
+
+ zvp->zv_edgealloc = MIN_EDGECOUNT;
+
+ return (zvp);
+}
+
+/*
+ * Destroy a vertex. Frees up any associated edges.
+ */
+static void
+zfs_vertex_destroy(zfs_vertex_t *zvp)
+{
+ int i;
+
+ for (i = 0; i < zvp->zv_edgecount; i++)
+ zfs_edge_destroy(zvp->zv_edges[i]);
+
+ free(zvp->zv_edges);
+ free(zvp);
+}
+
+/*
+ * Given a vertex, add an edge to the destination vertex.
+ */
+static int
+zfs_vertex_add_edge(libzfs_handle_t *hdl, zfs_vertex_t *zvp,
+ zfs_vertex_t *dest)
+{
+ zfs_edge_t *zep = zfs_edge_create(hdl, dest);
+
+ if (zep == NULL)
+ return (-1);
+
+ if (zvp->zv_edgecount == zvp->zv_edgealloc) {
+ void *ptr;
+
+ if ((ptr = zfs_realloc(hdl, zvp->zv_edges,
+ zvp->zv_edgealloc * sizeof (void *),
+ zvp->zv_edgealloc * 2 * sizeof (void *))) == NULL)
+ return (-1);
+
+ zvp->zv_edges = ptr;
+ zvp->zv_edgealloc *= 2;
+ }
+
+ zvp->zv_edges[zvp->zv_edgecount++] = zep;
+
+ return (0);
+}
+
+static int
+zfs_edge_compare(const void *a, const void *b)
+{
+ const zfs_edge_t *ea = *((zfs_edge_t **)a);
+ const zfs_edge_t *eb = *((zfs_edge_t **)b);
+
+ if (ea->ze_dest->zv_txg < eb->ze_dest->zv_txg)
+ return (-1);
+ if (ea->ze_dest->zv_txg > eb->ze_dest->zv_txg)
+ return (1);
+ return (0);
+}
+
+/*
+ * Sort the given vertex edges according to the creation txg of each vertex.
+ */
+static void
+zfs_vertex_sort_edges(zfs_vertex_t *zvp)
+{
+ if (zvp->zv_edgecount == 0)
+ return;
+
+ qsort(zvp->zv_edges, zvp->zv_edgecount, sizeof (void *),
+ zfs_edge_compare);
+}
+
+/*
+ * Construct a new graph object. We allow the size to be specified as a
+ * parameter so in the future we can size the hash according to the number of
+ * datasets in the pool.
+ */
+static zfs_graph_t *
+zfs_graph_create(libzfs_handle_t *hdl, size_t size)
+{
+ zfs_graph_t *zgp = zfs_alloc(hdl, sizeof (zfs_graph_t));
+
+ if (zgp == NULL)
+ return (NULL);
+
+ zgp->zg_size = size;
+ if ((zgp->zg_hash = zfs_alloc(hdl,
+ size * sizeof (zfs_vertex_t *))) == NULL) {
+ free(zgp);
+ return (NULL);
+ }
+
+ return (zgp);
+}
+
+/*
+ * Destroy a graph object. We have to iterate over all the hash chains,
+ * destroying each vertex in the process.
+ */
+static void
+zfs_graph_destroy(zfs_graph_t *zgp)
+{
+ int i;
+ zfs_vertex_t *current, *next;
+
+ for (i = 0; i < zgp->zg_size; i++) {
+ current = zgp->zg_hash[i];
+ while (current != NULL) {
+ next = current->zv_next;
+ zfs_vertex_destroy(current);
+ current = next;
+ }
+ }
+
+ free(zgp->zg_hash);
+ free(zgp);
+}
+
+/*
+ * Graph hash function. Classic bernstein k=33 hash function, taken from
+ * usr/src/cmd/sgs/tools/common/strhash.c
+ */
+static size_t
+zfs_graph_hash(zfs_graph_t *zgp, const char *str)
+{
+ size_t hash = 5381;
+ int c;
+
+ while ((c = *str++) != 0)
+ hash = ((hash << 5) + hash) + c; /* hash * 33 + c */
+
+ return (hash % zgp->zg_size);
+}
+
+/*
+ * Given a dataset name, finds the associated vertex, creating it if necessary.
+ */
+static zfs_vertex_t *
+zfs_graph_lookup(libzfs_handle_t *hdl, zfs_graph_t *zgp, const char *dataset,
+ uint64_t txg)
+{
+ size_t idx = zfs_graph_hash(zgp, dataset);
+ zfs_vertex_t *zvp;
+
+ for (zvp = zgp->zg_hash[idx]; zvp != NULL; zvp = zvp->zv_next) {
+ if (strcmp(zvp->zv_dataset, dataset) == 0) {
+ if (zvp->zv_txg == 0)
+ zvp->zv_txg = txg;
+ return (zvp);
+ }
+ }
+
+ if ((zvp = zfs_vertex_create(hdl, dataset)) == NULL)
+ return (NULL);
+
+ zvp->zv_next = zgp->zg_hash[idx];
+ zvp->zv_txg = txg;
+ zgp->zg_hash[idx] = zvp;
+ zgp->zg_nvertex++;
+
+ return (zvp);
+}
+
+/*
+ * Given two dataset names, create an edge between them. For the source vertex,
+ * mark 'zv_visited' to indicate that we have seen this vertex, and not simply
+ * created it as a destination of another edge. If 'dest' is NULL, then this
+ * is an individual vertex (i.e. the starting vertex), so don't add an edge.
+ */
+static int
+zfs_graph_add(libzfs_handle_t *hdl, zfs_graph_t *zgp, const char *source,
+ const char *dest, uint64_t txg)
+{
+ zfs_vertex_t *svp, *dvp;
+
+ if ((svp = zfs_graph_lookup(hdl, zgp, source, 0)) == NULL)
+ return (-1);
+ svp->zv_visited = VISIT_SEEN;
+ if (dest != NULL) {
+ dvp = zfs_graph_lookup(hdl, zgp, dest, txg);
+ if (dvp == NULL)
+ return (-1);
+ if (zfs_vertex_add_edge(hdl, svp, dvp) != 0)
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * Iterate over all children of the given dataset, adding any vertices as
+ * necessary. Returns 0 if no cloned snapshots were seen, -1 if there was an
+ * error, or 1 otherwise. This is a simple recursive algorithm - the ZFS
+ * namespace typically is very flat. We manually invoke the necessary ioctl()
+ * calls to avoid the overhead and additional semantics of zfs_open().
+ */
+static int
+iterate_children(libzfs_handle_t *hdl, zfs_graph_t *zgp, const char *dataset)
+{
+ zfs_cmd_t zc = { 0 };
+ int ret = 0, err;
+ zfs_vertex_t *zvp;
+
+ /*
+ * Look up the source vertex, and avoid it if we've seen it before.
+ */
+ zvp = zfs_graph_lookup(hdl, zgp, dataset, 0);
+ if (zvp == NULL)
+ return (-1);
+ if (zvp->zv_visited == VISIT_SEEN)
+ return (0);
+
+ /*
+ * We check the clone parent here instead of within the loop, so that if
+ * the root dataset has been promoted from a clone, we find its parent
+ * appropriately.
+ */
+ (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) == 0 &&
+ zc.zc_objset_stats.dds_clone_of[0] != '\0') {
+ if (zfs_graph_add(hdl, zgp, zc.zc_objset_stats.dds_clone_of,
+ zc.zc_name, zc.zc_objset_stats.dds_creation_txg) != 0)
+ return (-1);
+ }
+
+ for ((void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
+ ioctl(hdl->libzfs_fd, ZFS_IOC_DATASET_LIST_NEXT, &zc) == 0;
+ (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name))) {
+
+ /*
+ * Ignore private dataset names.
+ */
+ if (dataset_name_hidden(zc.zc_name))
+ continue;
+
+ /*
+ * Get statistics for this dataset, to determine the type of the
+ * dataset and clone statistics. If this fails, the dataset has
+ * since been removed, and we're pretty much screwed anyway.
+ */
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0)
+ continue;
+
+ /*
+ * Add an edge between the parent and the child.
+ */
+ if (zfs_graph_add(hdl, zgp, dataset, zc.zc_name,
+ zc.zc_objset_stats.dds_creation_txg) != 0)
+ return (-1);
+
+ /*
+ * Iterate over all children
+ */
+ err = iterate_children(hdl, zgp, zc.zc_name);
+ if (err == -1)
+ return (-1);
+ else if (err == 1)
+ ret = 1;
+
+ /*
+ * Indicate if we found a dataset with a non-zero clone count.
+ */
+ if (zc.zc_objset_stats.dds_num_clones != 0)
+ ret = 1;
+ }
+
+ /*
+ * Now iterate over all snapshots.
+ */
+ bzero(&zc, sizeof (zc));
+
+ for ((void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
+ ioctl(hdl->libzfs_fd, ZFS_IOC_SNAPSHOT_LIST_NEXT, &zc) == 0;
+ (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name))) {
+
+ /*
+ * Get statistics for this dataset, to determine the type of the
+ * dataset and clone statistics. If this fails, the dataset has
+ * since been removed, and we're pretty much screwed anyway.
+ */
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0)
+ continue;
+
+ /*
+ * Add an edge between the parent and the child.
+ */
+ if (zfs_graph_add(hdl, zgp, dataset, zc.zc_name,
+ zc.zc_objset_stats.dds_creation_txg) != 0)
+ return (-1);
+
+ /*
+ * Indicate if we found a dataset with a non-zero clone count.
+ */
+ if (zc.zc_objset_stats.dds_num_clones != 0)
+ ret = 1;
+ }
+
+ zvp->zv_visited = VISIT_SEEN;
+
+ return (ret);
+}
+
+/*
+ * Construct a complete graph of all necessary vertices. First, we iterate over
+ * only our object's children. If we don't find any cloned snapshots, then we
+ * simple return that. Otherwise, we have to start at the pool root and iterate
+ * over all datasets.
+ */
+static zfs_graph_t *
+construct_graph(libzfs_handle_t *hdl, const char *dataset)
+{
+ zfs_graph_t *zgp = zfs_graph_create(hdl, ZFS_GRAPH_SIZE);
+ zfs_cmd_t zc = { 0 };
+ int ret = 0;
+
+ if (zgp == NULL)
+ return (zgp);
+
+ /*
+ * We need to explicitly check whether this dataset has clones or not,
+ * since iterate_children() only checks the children.
+ */
+ (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
+ (void) ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc);
+
+ if (zc.zc_objset_stats.dds_num_clones != 0 ||
+ (ret = iterate_children(hdl, zgp, dataset)) != 0) {
+ /*
+ * Determine pool name and try again.
+ */
+ char *pool, *slash;
+
+ if ((slash = strchr(dataset, '/')) != NULL ||
+ (slash = strchr(dataset, '@')) != NULL) {
+ pool = zfs_alloc(hdl, slash - dataset + 1);
+ if (pool == NULL) {
+ zfs_graph_destroy(zgp);
+ return (NULL);
+ }
+ (void) strncpy(pool, dataset, slash - dataset);
+ pool[slash - dataset] = '\0';
+
+ if (iterate_children(hdl, zgp, pool) == -1 ||
+ zfs_graph_add(hdl, zgp, pool, NULL, 0) != 0) {
+ free(pool);
+ zfs_graph_destroy(zgp);
+ return (NULL);
+ }
+
+ free(pool);
+ }
+ }
+
+ if (ret == -1 || zfs_graph_add(hdl, zgp, dataset, NULL, 0) != 0) {
+ zfs_graph_destroy(zgp);
+ return (NULL);
+ }
+
+ return (zgp);
+}
+
+/*
+ * Given a graph, do a recursive topological sort into the given array. This is
+ * really just a depth first search, so that the deepest nodes appear first.
+ * hijack the 'zv_visited' marker to avoid visiting the same vertex twice.
+ */
+static int
+topo_sort(libzfs_handle_t *hdl, boolean_t allowrecursion, char **result,
+ size_t *idx, zfs_vertex_t *zgv)
+{
+ int i;
+
+ if (zgv->zv_visited == VISIT_SORT_PRE && !allowrecursion) {
+ /*
+ * If we've already seen this vertex as part of our depth-first
+ * search, then we have a cyclic dependency, and we must return
+ * an error.
+ */
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "recursive dependency at '%s'"),
+ zgv->zv_dataset);
+ return (zfs_error(hdl, EZFS_RECURSIVE,
+ dgettext(TEXT_DOMAIN,
+ "cannot determine dependent datasets")));
+ } else if (zgv->zv_visited >= VISIT_SORT_PRE) {
+ /*
+ * If we've already processed this as part of the topological
+ * sort, then don't bother doing so again.
+ */
+ return (0);
+ }
+
+ zgv->zv_visited = VISIT_SORT_PRE;
+
+ /* avoid doing a search if we don't have to */
+ zfs_vertex_sort_edges(zgv);
+ for (i = 0; i < zgv->zv_edgecount; i++) {
+ if (topo_sort(hdl, allowrecursion, result, idx,
+ zgv->zv_edges[i]->ze_dest) != 0)
+ return (-1);
+ }
+
+ /* we may have visited this in the course of the above */
+ if (zgv->zv_visited == VISIT_SORT_POST)
+ return (0);
+
+ if ((result[*idx] = zfs_alloc(hdl,
+ strlen(zgv->zv_dataset) + 1)) == NULL)
+ return (-1);
+
+ (void) strcpy(result[*idx], zgv->zv_dataset);
+ *idx += 1;
+ zgv->zv_visited = VISIT_SORT_POST;
+ return (0);
+}
+
+/*
+ * The only public interface for this file. Do the dirty work of constructing a
+ * child list for the given object. Construct the graph, do the toplogical
+ * sort, and then return the array of strings to the caller.
+ *
+ * The 'allowrecursion' parameter controls behavior when cycles are found. If
+ * it is set, the the cycle is ignored and the results returned as if the cycle
+ * did not exist. If it is not set, then the routine will generate an error if
+ * a cycle is found.
+ */
+int
+get_dependents(libzfs_handle_t *hdl, boolean_t allowrecursion,
+ const char *dataset, char ***result, size_t *count)
+{
+ zfs_graph_t *zgp;
+ zfs_vertex_t *zvp;
+
+ if ((zgp = construct_graph(hdl, dataset)) == NULL)
+ return (-1);
+
+ if ((*result = zfs_alloc(hdl,
+ zgp->zg_nvertex * sizeof (char *))) == NULL) {
+ zfs_graph_destroy(zgp);
+ return (-1);
+ }
+
+ if ((zvp = zfs_graph_lookup(hdl, zgp, dataset, 0)) == NULL) {
+ free(*result);
+ zfs_graph_destroy(zgp);
+ return (-1);
+ }
+
+ *count = 0;
+ if (topo_sort(hdl, allowrecursion, *result, count, zvp) != 0) {
+ free(*result);
+ zfs_graph_destroy(zgp);
+ return (-1);
+ }
+
+ /*
+ * Get rid of the last entry, which is our starting vertex and not
+ * strictly a dependent.
+ */
+ assert(*count > 0);
+ free((*result)[*count - 1]);
+ (*count)--;
+
+ zfs_graph_destroy(zgp);
+
+ return (0);
+}
diff --git a/contrib/opensolaris/lib/libzfs/common/libzfs_impl.h b/contrib/opensolaris/lib/libzfs/common/libzfs_impl.h
new file mode 100644
index 0000000..9581331
--- /dev/null
+++ b/contrib/opensolaris/lib/libzfs/common/libzfs_impl.h
@@ -0,0 +1,171 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _LIBFS_IMPL_H
+#define _LIBFS_IMPL_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/dmu.h>
+#include <sys/fs/zfs.h>
+#include <sys/zfs_ioctl.h>
+#include <sys/zfs_acl.h>
+#include <sys/nvpair.h>
+
+#include <libuutil.h>
+#include <libzfs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct libzfs_handle {
+ int libzfs_error;
+ int libzfs_fd;
+ FILE *libzfs_mnttab;
+ FILE *libzfs_sharetab;
+ uu_avl_pool_t *libzfs_ns_avlpool;
+ uu_avl_t *libzfs_ns_avl;
+ uint64_t libzfs_ns_gen;
+ int libzfs_desc_active;
+ char libzfs_action[1024];
+ char libzfs_desc[1024];
+ int libzfs_printerr;
+};
+
+struct zfs_handle {
+ libzfs_handle_t *zfs_hdl;
+ char zfs_name[ZFS_MAXNAMELEN];
+ zfs_type_t zfs_type; /* type including snapshot */
+ zfs_type_t zfs_head_type; /* type excluding snapshot */
+ dmu_objset_stats_t zfs_dmustats;
+ nvlist_t *zfs_props;
+ nvlist_t *zfs_user_props;
+ boolean_t zfs_mntcheck;
+ char *zfs_mntopts;
+ char zfs_root[MAXPATHLEN];
+};
+
+/*
+ * This is different from checking zfs_type, because it will also catch
+ * snapshots of volumes.
+ */
+#define ZFS_IS_VOLUME(zhp) ((zhp)->zfs_head_type == ZFS_TYPE_VOLUME)
+
+struct zpool_handle {
+ libzfs_handle_t *zpool_hdl;
+ char zpool_name[ZPOOL_MAXNAMELEN];
+ int zpool_state;
+ size_t zpool_config_size;
+ nvlist_t *zpool_config;
+ nvlist_t *zpool_old_config;
+ nvlist_t *zpool_props;
+};
+
+int zfs_error(libzfs_handle_t *, int, const char *);
+int zfs_error_fmt(libzfs_handle_t *, int, const char *, ...);
+void zfs_error_aux(libzfs_handle_t *, const char *, ...);
+void *zfs_alloc(libzfs_handle_t *, size_t);
+void *zfs_realloc(libzfs_handle_t *, void *, size_t, size_t);
+char *zfs_strdup(libzfs_handle_t *, const char *);
+int no_memory(libzfs_handle_t *);
+
+int zfs_standard_error(libzfs_handle_t *, int, const char *);
+int zfs_standard_error_fmt(libzfs_handle_t *, int, const char *, ...);
+int zpool_standard_error(libzfs_handle_t *, int, const char *);
+int zpool_standard_error_fmt(libzfs_handle_t *, int, const char *, ...);
+
+int get_dependents(libzfs_handle_t *, boolean_t, const char *, char ***,
+ size_t *);
+
+int zfs_expand_proplist_common(libzfs_handle_t *, zfs_proplist_t **,
+ zfs_type_t);
+int zfs_get_proplist_common(libzfs_handle_t *, char *, zfs_proplist_t **,
+ zfs_type_t);
+zfs_prop_t zfs_prop_iter_common(zfs_prop_f, void *, zfs_type_t, boolean_t);
+zfs_prop_t zfs_name_to_prop_common(const char *, zfs_type_t);
+
+nvlist_t *zfs_validate_properties(libzfs_handle_t *, zfs_type_t, char *,
+ nvlist_t *, uint64_t, zfs_handle_t *zhp, const char *errbuf);
+
+typedef struct prop_changelist prop_changelist_t;
+
+int zcmd_alloc_dst_nvlist(libzfs_handle_t *, zfs_cmd_t *, size_t);
+int zcmd_write_src_nvlist(libzfs_handle_t *, zfs_cmd_t *, nvlist_t *, size_t *);
+int zcmd_expand_dst_nvlist(libzfs_handle_t *, zfs_cmd_t *);
+int zcmd_read_dst_nvlist(libzfs_handle_t *, zfs_cmd_t *, nvlist_t **);
+void zcmd_free_nvlists(zfs_cmd_t *);
+
+int changelist_prefix(prop_changelist_t *);
+int changelist_postfix(prop_changelist_t *);
+void changelist_rename(prop_changelist_t *, const char *, const char *);
+void changelist_remove(zfs_handle_t *, prop_changelist_t *);
+void changelist_free(prop_changelist_t *);
+prop_changelist_t *changelist_gather(zfs_handle_t *, zfs_prop_t, int);
+int changelist_unshare(prop_changelist_t *);
+int changelist_haszonedchild(prop_changelist_t *);
+
+void remove_mountpoint(zfs_handle_t *);
+
+zfs_handle_t *make_dataset_handle(libzfs_handle_t *, const char *);
+
+int zpool_open_silent(libzfs_handle_t *, const char *, zpool_handle_t **);
+
+int zvol_create_link(libzfs_handle_t *, const char *);
+int zvol_remove_link(libzfs_handle_t *, const char *);
+int zpool_iter_zvol(zpool_handle_t *, int (*)(const char *, void *), void *);
+
+void namespace_clear(libzfs_handle_t *);
+
+#ifdef __FreeBSD__
+/*
+ * This is FreeBSD version of ioctl, because Solaris' ioctl() updates
+ * zc_nvlist_dst_size even if an error is returned, on FreeBSD if an
+ * error is returned zc_nvlist_dst_size won't be updated.
+ */
+static __inline int
+zcmd_ioctl(int fd, unsigned long cmd, zfs_cmd_t *zc)
+{
+ size_t oldsize;
+ int ret;
+
+ oldsize = zc->zc_nvlist_dst_size;
+ ret = ioctl(fd, cmd, zc);
+ if (ret == 0 && oldsize < zc->zc_nvlist_dst_size) {
+ ret = -1;
+ errno = ENOMEM;
+ }
+
+ return (ret);
+}
+#define ioctl(fd, cmd, zc) zcmd_ioctl((fd), (cmd), (zc))
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _LIBFS_IMPL_H */
diff --git a/contrib/opensolaris/lib/libzfs/common/libzfs_import.c b/contrib/opensolaris/lib/libzfs/common/libzfs_import.c
new file mode 100644
index 0000000..cab969b
--- /dev/null
+++ b/contrib/opensolaris/lib/libzfs/common/libzfs_import.c
@@ -0,0 +1,994 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Pool import support functions.
+ *
+ * To import a pool, we rely on reading the configuration information from the
+ * ZFS label of each device. If we successfully read the label, then we
+ * organize the configuration information in the following hierarchy:
+ *
+ * pool guid -> toplevel vdev guid -> label txg
+ *
+ * Duplicate entries matching this same tuple will be discarded. Once we have
+ * examined every device, we pick the best label txg config for each toplevel
+ * vdev. We then arrange these toplevel vdevs into a complete pool config, and
+ * update any paths that have changed. Finally, we attempt to import the pool
+ * using our derived config, and record the results.
+ */
+
+#include <devid.h>
+#include <dirent.h>
+#include <errno.h>
+#include <libintl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <libgeom.h>
+
+#include <sys/vdev_impl.h>
+
+#include "libzfs.h"
+#include "libzfs_impl.h"
+
+/*
+ * Intermediate structures used to gather configuration information.
+ */
+typedef struct config_entry {
+ uint64_t ce_txg;
+ nvlist_t *ce_config;
+ struct config_entry *ce_next;
+} config_entry_t;
+
+typedef struct vdev_entry {
+ uint64_t ve_guid;
+ config_entry_t *ve_configs;
+ struct vdev_entry *ve_next;
+} vdev_entry_t;
+
+typedef struct pool_entry {
+ uint64_t pe_guid;
+ vdev_entry_t *pe_vdevs;
+ struct pool_entry *pe_next;
+} pool_entry_t;
+
+typedef struct name_entry {
+ char *ne_name;
+ uint64_t ne_guid;
+ struct name_entry *ne_next;
+} name_entry_t;
+
+typedef struct pool_list {
+ pool_entry_t *pools;
+ name_entry_t *names;
+} pool_list_t;
+
+static char *
+get_devid(const char *path)
+{
+ int fd;
+ ddi_devid_t devid;
+ char *minor, *ret;
+
+ if ((fd = open(path, O_RDONLY)) < 0)
+ return (NULL);
+
+ minor = NULL;
+ ret = NULL;
+ if (devid_get(fd, &devid) == 0) {
+ if (devid_get_minor_name(fd, &minor) == 0)
+ ret = devid_str_encode(devid, minor);
+ if (minor != NULL)
+ devid_str_free(minor);
+ devid_free(devid);
+ }
+ (void) close(fd);
+
+ return (ret);
+}
+
+/*
+ * Go through and fix up any path and/or devid information for the given vdev
+ * configuration.
+ */
+static int
+fix_paths(nvlist_t *nv, name_entry_t *names)
+{
+ nvlist_t **child;
+ uint_t c, children;
+ uint64_t guid;
+ name_entry_t *ne, *best;
+ char *path, *devid;
+ int matched;
+
+ if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+ &child, &children) == 0) {
+ for (c = 0; c < children; c++)
+ if (fix_paths(child[c], names) != 0)
+ return (-1);
+ return (0);
+ }
+
+ /*
+ * This is a leaf (file or disk) vdev. In either case, go through
+ * the name list and see if we find a matching guid. If so, replace
+ * the path and see if we can calculate a new devid.
+ *
+ * There may be multiple names associated with a particular guid, in
+ * which case we have overlapping slices or multiple paths to the same
+ * disk. If this is the case, then we want to pick the path that is
+ * the most similar to the original, where "most similar" is the number
+ * of matching characters starting from the end of the path. This will
+ * preserve slice numbers even if the disks have been reorganized, and
+ * will also catch preferred disk names if multiple paths exist.
+ */
+ verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
+ if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
+ path = NULL;
+
+ matched = 0;
+ best = NULL;
+ for (ne = names; ne != NULL; ne = ne->ne_next) {
+ if (ne->ne_guid == guid) {
+ const char *src, *dst;
+ int count;
+
+ if (path == NULL) {
+ best = ne;
+ break;
+ }
+
+ src = ne->ne_name + strlen(ne->ne_name) - 1;
+ dst = path + strlen(path) - 1;
+ for (count = 0; src >= ne->ne_name && dst >= path;
+ src--, dst--, count++)
+ if (*src != *dst)
+ break;
+
+ /*
+ * At this point, 'count' is the number of characters
+ * matched from the end.
+ */
+ if (count > matched || best == NULL) {
+ best = ne;
+ matched = count;
+ }
+ }
+ }
+
+ if (best == NULL)
+ return (0);
+
+ if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
+ return (-1);
+
+ if ((devid = get_devid(best->ne_name)) == NULL) {
+ (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
+ } else {
+ if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)
+ return (-1);
+ devid_str_free(devid);
+ }
+
+ return (0);
+}
+
+/*
+ * Add the given configuration to the list of known devices.
+ */
+static int
+add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
+ nvlist_t *config)
+{
+ uint64_t pool_guid, vdev_guid, top_guid, txg, state;
+ pool_entry_t *pe;
+ vdev_entry_t *ve;
+ config_entry_t *ce;
+ name_entry_t *ne;
+
+ /*
+ * If this is a hot spare not currently in use, add it to the list of
+ * names to translate, but don't do anything else.
+ */
+ if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
+ &state) == 0 && state == POOL_STATE_SPARE &&
+ nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
+ if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
+ return (-1);
+
+ if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
+ free(ne);
+ return (-1);
+ }
+ ne->ne_guid = vdev_guid;
+ ne->ne_next = pl->names;
+ pl->names = ne;
+ return (0);
+ }
+
+ /*
+ * If we have a valid config but cannot read any of these fields, then
+ * it means we have a half-initialized label. In vdev_label_init()
+ * we write a label with txg == 0 so that we can identify the device
+ * in case the user refers to the same disk later on. If we fail to
+ * create the pool, we'll be left with a label in this state
+ * which should not be considered part of a valid pool.
+ */
+ if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
+ &pool_guid) != 0 ||
+ nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
+ &vdev_guid) != 0 ||
+ nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
+ &top_guid) != 0 ||
+ nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
+ &txg) != 0 || txg == 0) {
+ nvlist_free(config);
+ return (0);
+ }
+
+ /*
+ * First, see if we know about this pool. If not, then add it to the
+ * list of known pools.
+ */
+ for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
+ if (pe->pe_guid == pool_guid)
+ break;
+ }
+
+ if (pe == NULL) {
+ if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
+ nvlist_free(config);
+ return (-1);
+ }
+ pe->pe_guid = pool_guid;
+ pe->pe_next = pl->pools;
+ pl->pools = pe;
+ }
+
+ /*
+ * Second, see if we know about this toplevel vdev. Add it if its
+ * missing.
+ */
+ for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
+ if (ve->ve_guid == top_guid)
+ break;
+ }
+
+ if (ve == NULL) {
+ if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
+ nvlist_free(config);
+ return (-1);
+ }
+ ve->ve_guid = top_guid;
+ ve->ve_next = pe->pe_vdevs;
+ pe->pe_vdevs = ve;
+ }
+
+ /*
+ * Third, see if we have a config with a matching transaction group. If
+ * so, then we do nothing. Otherwise, add it to the list of known
+ * configs.
+ */
+ for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
+ if (ce->ce_txg == txg)
+ break;
+ }
+
+ if (ce == NULL) {
+ if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
+ nvlist_free(config);
+ return (-1);
+ }
+ ce->ce_txg = txg;
+ ce->ce_config = config;
+ ce->ce_next = ve->ve_configs;
+ ve->ve_configs = ce;
+ } else {
+ nvlist_free(config);
+ }
+
+ /*
+ * At this point we've successfully added our config to the list of
+ * known configs. The last thing to do is add the vdev guid -> path
+ * mappings so that we can fix up the configuration as necessary before
+ * doing the import.
+ */
+ if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
+ return (-1);
+
+ if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
+ free(ne);
+ return (-1);
+ }
+
+ ne->ne_guid = vdev_guid;
+ ne->ne_next = pl->names;
+ pl->names = ne;
+
+ return (0);
+}
+
+/*
+ * Returns true if the named pool matches the given GUID.
+ */
+static int
+pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
+ boolean_t *isactive)
+{
+ zpool_handle_t *zhp;
+ uint64_t theguid;
+
+ if (zpool_open_silent(hdl, name, &zhp) != 0)
+ return (-1);
+
+ if (zhp == NULL) {
+ *isactive = B_FALSE;
+ return (0);
+ }
+
+ verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
+ &theguid) == 0);
+
+ zpool_close(zhp);
+
+ *isactive = (theguid == guid);
+ return (0);
+}
+
+/*
+ * Convert our list of pools into the definitive set of configurations. We
+ * start by picking the best config for each toplevel vdev. Once that's done,
+ * we assemble the toplevel vdevs into a full config for the pool. We make a
+ * pass to fix up any incorrect paths, and then add it to the main list to
+ * return to the user.
+ */
+static nvlist_t *
+get_configs(libzfs_handle_t *hdl, pool_list_t *pl)
+{
+ pool_entry_t *pe;
+ vdev_entry_t *ve;
+ config_entry_t *ce;
+ nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot;
+ nvlist_t **spares;
+ uint_t i, nspares;
+ boolean_t config_seen;
+ uint64_t best_txg;
+ char *name;
+ zfs_cmd_t zc = { 0 };
+ uint64_t version, guid;
+ size_t len;
+ int err;
+ uint_t children = 0;
+ nvlist_t **child = NULL;
+ uint_t c;
+ boolean_t isactive;
+
+ if (nvlist_alloc(&ret, 0, 0) != 0)
+ goto nomem;
+
+ for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
+ uint64_t id;
+
+ if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
+ goto nomem;
+ config_seen = B_FALSE;
+
+ /*
+ * Iterate over all toplevel vdevs. Grab the pool configuration
+ * from the first one we find, and then go through the rest and
+ * add them as necessary to the 'vdevs' member of the config.
+ */
+ for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
+
+ /*
+ * Determine the best configuration for this vdev by
+ * selecting the config with the latest transaction
+ * group.
+ */
+ best_txg = 0;
+ for (ce = ve->ve_configs; ce != NULL;
+ ce = ce->ce_next) {
+
+ if (ce->ce_txg > best_txg) {
+ tmp = ce->ce_config;
+ best_txg = ce->ce_txg;
+ }
+ }
+
+ if (!config_seen) {
+ /*
+ * Copy the relevant pieces of data to the pool
+ * configuration:
+ *
+ * version
+ * pool guid
+ * name
+ * pool state
+ */
+ uint64_t state;
+
+ verify(nvlist_lookup_uint64(tmp,
+ ZPOOL_CONFIG_VERSION, &version) == 0);
+ if (nvlist_add_uint64(config,
+ ZPOOL_CONFIG_VERSION, version) != 0)
+ goto nomem;
+ verify(nvlist_lookup_uint64(tmp,
+ ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
+ if (nvlist_add_uint64(config,
+ ZPOOL_CONFIG_POOL_GUID, guid) != 0)
+ goto nomem;
+ verify(nvlist_lookup_string(tmp,
+ ZPOOL_CONFIG_POOL_NAME, &name) == 0);
+ if (nvlist_add_string(config,
+ ZPOOL_CONFIG_POOL_NAME, name) != 0)
+ goto nomem;
+ verify(nvlist_lookup_uint64(tmp,
+ ZPOOL_CONFIG_POOL_STATE, &state) == 0);
+ if (nvlist_add_uint64(config,
+ ZPOOL_CONFIG_POOL_STATE, state) != 0)
+ goto nomem;
+
+ config_seen = B_TRUE;
+ }
+
+ /*
+ * Add this top-level vdev to the child array.
+ */
+ verify(nvlist_lookup_nvlist(tmp,
+ ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
+ verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
+ &id) == 0);
+ if (id >= children) {
+ nvlist_t **newchild;
+
+ newchild = zfs_alloc(hdl, (id + 1) *
+ sizeof (nvlist_t *));
+ if (newchild == NULL)
+ goto nomem;
+
+ for (c = 0; c < children; c++)
+ newchild[c] = child[c];
+
+ free(child);
+ child = newchild;
+ children = id + 1;
+ }
+ if (nvlist_dup(nvtop, &child[id], 0) != 0)
+ goto nomem;
+
+ }
+
+ verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
+ &guid) == 0);
+
+ /*
+ * Look for any missing top-level vdevs. If this is the case,
+ * create a faked up 'missing' vdev as a placeholder. We cannot
+ * simply compress the child array, because the kernel performs
+ * certain checks to make sure the vdev IDs match their location
+ * in the configuration.
+ */
+ for (c = 0; c < children; c++)
+ if (child[c] == NULL) {
+ nvlist_t *missing;
+ if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
+ 0) != 0)
+ goto nomem;
+ if (nvlist_add_string(missing,
+ ZPOOL_CONFIG_TYPE,
+ VDEV_TYPE_MISSING) != 0 ||
+ nvlist_add_uint64(missing,
+ ZPOOL_CONFIG_ID, c) != 0 ||
+ nvlist_add_uint64(missing,
+ ZPOOL_CONFIG_GUID, 0ULL) != 0) {
+ nvlist_free(missing);
+ goto nomem;
+ }
+ child[c] = missing;
+ }
+
+ /*
+ * Put all of this pool's top-level vdevs into a root vdev.
+ */
+ if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
+ goto nomem;
+ if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
+ VDEV_TYPE_ROOT) != 0 ||
+ nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
+ nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
+ nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
+ child, children) != 0) {
+ nvlist_free(nvroot);
+ goto nomem;
+ }
+
+ for (c = 0; c < children; c++)
+ nvlist_free(child[c]);
+ free(child);
+ children = 0;
+ child = NULL;
+
+ /*
+ * Go through and fix up any paths and/or devids based on our
+ * known list of vdev GUID -> path mappings.
+ */
+ if (fix_paths(nvroot, pl->names) != 0) {
+ nvlist_free(nvroot);
+ goto nomem;
+ }
+
+ /*
+ * Add the root vdev to this pool's configuration.
+ */
+ if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+ nvroot) != 0) {
+ nvlist_free(nvroot);
+ goto nomem;
+ }
+ nvlist_free(nvroot);
+
+ /*
+ * Determine if this pool is currently active, in which case we
+ * can't actually import it.
+ */
+ verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
+ &name) == 0);
+ verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
+ &guid) == 0);
+
+ if (pool_active(hdl, name, guid, &isactive) != 0)
+ goto error;
+
+ if (isactive) {
+ nvlist_free(config);
+ config = NULL;
+ continue;
+ }
+
+ /*
+ * Try to do the import in order to get vdev state.
+ */
+ if (zcmd_write_src_nvlist(hdl, &zc, config, &len) != 0)
+ goto error;
+
+ nvlist_free(config);
+ config = NULL;
+
+ if (zcmd_alloc_dst_nvlist(hdl, &zc, len * 2) != 0) {
+ zcmd_free_nvlists(&zc);
+ goto error;
+ }
+
+ while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
+ &zc)) != 0 && errno == ENOMEM) {
+ if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
+ zcmd_free_nvlists(&zc);
+ goto error;
+ }
+ }
+
+ if (err) {
+ (void) zpool_standard_error(hdl, errno,
+ dgettext(TEXT_DOMAIN, "cannot discover pools"));
+ zcmd_free_nvlists(&zc);
+ goto error;
+ }
+
+ if (zcmd_read_dst_nvlist(hdl, &zc, &config) != 0) {
+ zcmd_free_nvlists(&zc);
+ goto error;
+ }
+
+ zcmd_free_nvlists(&zc);
+
+ /*
+ * Go through and update the paths for spares, now that we have
+ * them.
+ */
+ verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+ &nvroot) == 0);
+ if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
+ &spares, &nspares) == 0) {
+ for (i = 0; i < nspares; i++) {
+ if (fix_paths(spares[i], pl->names) != 0)
+ goto nomem;
+ }
+ }
+
+ /*
+ * Add this pool to the list of configs.
+ */
+ verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
+ &name) == 0);
+ if (nvlist_add_nvlist(ret, name, config) != 0)
+ goto nomem;
+
+ nvlist_free(config);
+ config = NULL;
+ }
+
+ return (ret);
+
+nomem:
+ (void) no_memory(hdl);
+error:
+ nvlist_free(config);
+ nvlist_free(ret);
+ for (c = 0; c < children; c++)
+ nvlist_free(child[c]);
+ free(child);
+
+ return (NULL);
+}
+
+/*
+ * Return the offset of the given label.
+ */
+static uint64_t
+label_offset(size_t size, int l)
+{
+ return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
+ 0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
+}
+
+/*
+ * Given a file descriptor, read the label information and return an nvlist
+ * describing the configuration, if there is one.
+ */
+int
+zpool_read_label(int fd, nvlist_t **config)
+{
+ struct stat64 statbuf;
+ int l;
+ vdev_label_t *label;
+ uint64_t state, txg;
+
+ *config = NULL;
+
+ if (fstat64(fd, &statbuf) == -1)
+ return (0);
+
+ if ((label = malloc(sizeof (vdev_label_t))) == NULL)
+ return (-1);
+
+ for (l = 0; l < VDEV_LABELS; l++) {
+ if (pread(fd, label, sizeof (vdev_label_t),
+ label_offset(statbuf.st_size, l)) != sizeof (vdev_label_t))
+ continue;
+
+ if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
+ sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
+ continue;
+
+ if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
+ &state) != 0 || state > POOL_STATE_SPARE) {
+ nvlist_free(*config);
+ continue;
+ }
+
+ if (state != POOL_STATE_SPARE &&
+ (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
+ &txg) != 0 || txg == 0)) {
+ nvlist_free(*config);
+ continue;
+ }
+
+ free(label);
+ return (0);
+ }
+
+ free(label);
+ *config = NULL;
+ return (0);
+}
+
+/*
+ * Given a list of directories to search, find all pools stored on disk. This
+ * includes partial pools which are not available to import. If no args are
+ * given (argc is 0), then the default directory (/dev) is searched.
+ */
+nvlist_t *
+zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
+{
+ int i;
+ DIR *dirp;
+ char path[MAXPATHLEN];
+ nvlist_t *ret = NULL, *config;
+ int fd;
+ pool_list_t pools = { 0 };
+ pool_entry_t *pe, *penext;
+ vdev_entry_t *ve, *venext;
+ config_entry_t *ce, *cenext;
+ name_entry_t *ne, *nenext;
+ struct gmesh mesh;
+ struct gclass *mp;
+ struct ggeom *gp;
+ struct gprovider *pp;
+
+ /*
+ * Go through and read the label configuration information from every
+ * possible device, organizing the information according to pool GUID
+ * and toplevel GUID.
+ */
+
+ fd = geom_gettree(&mesh);
+ assert(fd == 0);
+
+ LIST_FOREACH(mp, &mesh.lg_class, lg_class) {
+ LIST_FOREACH(gp, &mp->lg_geom, lg_geom) {
+ LIST_FOREACH(pp, &gp->lg_provider, lg_provider) {
+
+ (void) snprintf(path, sizeof (path), "%s%s",
+ _PATH_DEV, pp->lg_name);
+
+ if ((fd = open64(path, O_RDONLY)) < 0)
+ continue;
+
+ if ((zpool_read_label(fd, &config)) != 0) {
+ (void) no_memory(hdl);
+ goto error;
+ }
+
+ (void) close(fd);
+
+ if (config == NULL)
+ continue;
+
+ if (add_config(hdl, &pools, path, config) != 0)
+ goto error;
+ }
+ }
+ }
+
+ geom_deletetree(&mesh);
+
+ ret = get_configs(hdl, &pools);
+
+error:
+ for (pe = pools.pools; pe != NULL; pe = penext) {
+ penext = pe->pe_next;
+ for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
+ venext = ve->ve_next;
+ for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
+ cenext = ce->ce_next;
+ if (ce->ce_config)
+ nvlist_free(ce->ce_config);
+ free(ce);
+ }
+ free(ve);
+ }
+ free(pe);
+ }
+
+ for (ne = pools.names; ne != NULL; ne = nenext) {
+ nenext = ne->ne_next;
+ if (ne->ne_name)
+ free(ne->ne_name);
+ free(ne);
+ }
+
+
+ return (ret);
+}
+
+boolean_t
+find_guid(nvlist_t *nv, uint64_t guid)
+{
+ uint64_t tmp;
+ nvlist_t **child;
+ uint_t c, children;
+
+ verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
+ if (tmp == guid)
+ return (B_TRUE);
+
+ if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+ &child, &children) == 0) {
+ for (c = 0; c < children; c++)
+ if (find_guid(child[c], guid))
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+typedef struct spare_cbdata {
+ uint64_t cb_guid;
+ zpool_handle_t *cb_zhp;
+} spare_cbdata_t;
+
+static int
+find_spare(zpool_handle_t *zhp, void *data)
+{
+ spare_cbdata_t *cbp = data;
+ nvlist_t **spares;
+ uint_t i, nspares;
+ uint64_t guid;
+ nvlist_t *nvroot;
+
+ verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
+ &nvroot) == 0);
+
+ if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
+ &spares, &nspares) == 0) {
+ for (i = 0; i < nspares; i++) {
+ verify(nvlist_lookup_uint64(spares[i],
+ ZPOOL_CONFIG_GUID, &guid) == 0);
+ if (guid == cbp->cb_guid) {
+ cbp->cb_zhp = zhp;
+ return (1);
+ }
+ }
+ }
+
+ zpool_close(zhp);
+ return (0);
+}
+
+/*
+ * Determines if the pool is in use. If so, it returns true and the state of
+ * the pool as well as the name of the pool. Both strings are allocated and
+ * must be freed by the caller.
+ */
+int
+zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
+ boolean_t *inuse)
+{
+ nvlist_t *config;
+ char *name;
+ boolean_t ret;
+ uint64_t guid, vdev_guid;
+ zpool_handle_t *zhp;
+ nvlist_t *pool_config;
+ uint64_t stateval, isspare;
+ spare_cbdata_t cb = { 0 };
+ boolean_t isactive;
+
+ *inuse = B_FALSE;
+
+ if (zpool_read_label(fd, &config) != 0) {
+ (void) no_memory(hdl);
+ return (-1);
+ }
+
+ if (config == NULL)
+ return (0);
+
+ verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
+ &stateval) == 0);
+ verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
+ &vdev_guid) == 0);
+
+ if (stateval != POOL_STATE_SPARE) {
+ verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
+ &name) == 0);
+ verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
+ &guid) == 0);
+ }
+
+ switch (stateval) {
+ case POOL_STATE_EXPORTED:
+ ret = B_TRUE;
+ break;
+
+ case POOL_STATE_ACTIVE:
+ /*
+ * For an active pool, we have to determine if it's really part
+ * of a currently active pool (in which case the pool will exist
+ * and the guid will be the same), or whether it's part of an
+ * active pool that was disconnected without being explicitly
+ * exported.
+ */
+ if (pool_active(hdl, name, guid, &isactive) != 0) {
+ nvlist_free(config);
+ return (-1);
+ }
+
+ if (isactive) {
+ /*
+ * Because the device may have been removed while
+ * offlined, we only report it as active if the vdev is
+ * still present in the config. Otherwise, pretend like
+ * it's not in use.
+ */
+ if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
+ (pool_config = zpool_get_config(zhp, NULL))
+ != NULL) {
+ nvlist_t *nvroot;
+
+ verify(nvlist_lookup_nvlist(pool_config,
+ ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
+ ret = find_guid(nvroot, vdev_guid);
+ } else {
+ ret = B_FALSE;
+ }
+
+ /*
+ * If this is an active spare within another pool, we
+ * treat it like an unused hot spare. This allows the
+ * user to create a pool with a hot spare that currently
+ * in use within another pool. Since we return B_TRUE,
+ * libdiskmgt will continue to prevent generic consumers
+ * from using the device.
+ */
+ if (ret && nvlist_lookup_uint64(config,
+ ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
+ stateval = POOL_STATE_SPARE;
+
+ if (zhp != NULL)
+ zpool_close(zhp);
+ } else {
+ stateval = POOL_STATE_POTENTIALLY_ACTIVE;
+ ret = B_TRUE;
+ }
+ break;
+
+ case POOL_STATE_SPARE:
+ /*
+ * For a hot spare, it can be either definitively in use, or
+ * potentially active. To determine if it's in use, we iterate
+ * over all pools in the system and search for one with a spare
+ * with a matching guid.
+ *
+ * Due to the shared nature of spares, we don't actually report
+ * the potentially active case as in use. This means the user
+ * can freely create pools on the hot spares of exported pools,
+ * but to do otherwise makes the resulting code complicated, and
+ * we end up having to deal with this case anyway.
+ */
+ cb.cb_zhp = NULL;
+ cb.cb_guid = vdev_guid;
+ if (zpool_iter(hdl, find_spare, &cb) == 1) {
+ name = (char *)zpool_get_name(cb.cb_zhp);
+ ret = TRUE;
+ } else {
+ ret = FALSE;
+ }
+ break;
+
+ default:
+ ret = B_FALSE;
+ }
+
+
+ if (ret) {
+ if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
+ nvlist_free(config);
+ return (-1);
+ }
+ *state = (pool_state_t)stateval;
+ }
+
+ if (cb.cb_zhp)
+ zpool_close(cb.cb_zhp);
+
+ nvlist_free(config);
+ *inuse = ret;
+ return (0);
+}
diff --git a/contrib/opensolaris/lib/libzfs/common/libzfs_mount.c b/contrib/opensolaris/lib/libzfs/common/libzfs_mount.c
new file mode 100644
index 0000000..b4bc945
--- /dev/null
+++ b/contrib/opensolaris/lib/libzfs/common/libzfs_mount.c
@@ -0,0 +1,986 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Routines to manage ZFS mounts. We separate all the nasty routines that have
+ * to deal with the OS. The following functions are the main entry points --
+ * they are used by mount and unmount and when changing a filesystem's
+ * mountpoint.
+ *
+ * zfs_is_mounted()
+ * zfs_mount()
+ * zfs_unmount()
+ * zfs_unmountall()
+ *
+ * This file also contains the functions used to manage sharing filesystems via
+ * NFS and iSCSI:
+ *
+ * zfs_is_shared()
+ * zfs_share()
+ * zfs_unshare()
+ *
+ * zfs_is_shared_nfs()
+ * zfs_share_nfs()
+ * zfs_unshare_nfs()
+ * zfs_unshareall_nfs()
+ * zfs_is_shared_iscsi()
+ * zfs_share_iscsi()
+ * zfs_unshare_iscsi()
+ *
+ * The following functions are available for pool consumers, and will
+ * mount/unmount and share/unshare all datasets within pool:
+ *
+ * zpool_enable_datasets()
+ * zpool_disable_datasets()
+ */
+
+#include <dirent.h>
+#include <dlfcn.h>
+#include <errno.h>
+#include <libgen.h>
+#include <libintl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <zone.h>
+#include <sys/mntent.h>
+#include <sys/mnttab.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+
+#include <libzfs.h>
+
+#include "libzfs_impl.h"
+
+static int (*iscsitgt_zfs_share)(const char *);
+static int (*iscsitgt_zfs_unshare)(const char *);
+static int (*iscsitgt_zfs_is_shared)(const char *);
+
+#pragma init(zfs_iscsi_init)
+static void
+zfs_iscsi_init(void)
+{
+ void *libiscsitgt;
+
+ if ((libiscsitgt = dlopen("/lib/libiscsitgt.so.1",
+ RTLD_LAZY | RTLD_GLOBAL)) == NULL ||
+ (iscsitgt_zfs_share = (int (*)(const char *))dlsym(libiscsitgt,
+ "iscsitgt_zfs_share")) == NULL ||
+ (iscsitgt_zfs_unshare = (int (*)(const char *))dlsym(libiscsitgt,
+ "iscsitgt_zfs_unshare")) == NULL ||
+ (iscsitgt_zfs_is_shared = (int (*)(const char *))dlsym(libiscsitgt,
+ "iscsitgt_zfs_is_shared")) == NULL) {
+ iscsitgt_zfs_share = NULL;
+ iscsitgt_zfs_unshare = NULL;
+ iscsitgt_zfs_is_shared = NULL;
+ }
+}
+
+/*
+ * Search the sharetab for the given mountpoint, returning true if it is found.
+ */
+static boolean_t
+is_shared(libzfs_handle_t *hdl, const char *mountpoint)
+{
+ char buf[MAXPATHLEN], *tab;
+
+ if (hdl->libzfs_sharetab == NULL)
+ return (0);
+
+ (void) fseek(hdl->libzfs_sharetab, 0, SEEK_SET);
+
+ while (fgets(buf, sizeof (buf), hdl->libzfs_sharetab) != NULL) {
+
+ /* the mountpoint is the first entry on each line */
+ if ((tab = strchr(buf, '\t')) != NULL) {
+ *tab = '\0';
+ if (strcmp(buf, mountpoint) == 0)
+ return (B_TRUE);
+ }
+ }
+
+ return (B_FALSE);
+}
+
+#if 0
+/*
+ * Returns true if the specified directory is empty. If we can't open the
+ * directory at all, return true so that the mount can fail with a more
+ * informative error message.
+ */
+static boolean_t
+dir_is_empty(const char *dirname)
+{
+ DIR *dirp;
+ struct dirent64 *dp;
+
+ if ((dirp = opendir(dirname)) == NULL)
+ return (B_TRUE);
+
+ while ((dp = readdir64(dirp)) != NULL) {
+
+ if (strcmp(dp->d_name, ".") == 0 ||
+ strcmp(dp->d_name, "..") == 0)
+ continue;
+
+ (void) closedir(dirp);
+ return (B_FALSE);
+ }
+
+ (void) closedir(dirp);
+ return (B_TRUE);
+}
+#endif
+
+/*
+ * Checks to see if the mount is active. If the filesystem is mounted, we fill
+ * in 'where' with the current mountpoint, and return 1. Otherwise, we return
+ * 0.
+ */
+boolean_t
+is_mounted(libzfs_handle_t *zfs_hdl, const char *special, char **where)
+{
+ struct mnttab search = { 0 }, entry;
+
+ /*
+ * Search for the entry in /etc/mnttab. We don't bother getting the
+ * mountpoint, as we can just search for the special device. This will
+ * also let us find mounts when the mountpoint is 'legacy'.
+ */
+ search.mnt_special = (char *)special;
+ search.mnt_fstype = MNTTYPE_ZFS;
+
+ rewind(zfs_hdl->libzfs_mnttab);
+ if (getmntany(zfs_hdl->libzfs_mnttab, &entry, &search) != 0)
+ return (B_FALSE);
+
+ if (where != NULL)
+ *where = zfs_strdup(zfs_hdl, entry.mnt_mountp);
+
+ return (B_TRUE);
+}
+
+boolean_t
+zfs_is_mounted(zfs_handle_t *zhp, char **where)
+{
+ return (is_mounted(zhp->zfs_hdl, zfs_get_name(zhp), where));
+}
+
+/*
+ * Returns true if the given dataset is mountable, false otherwise. Returns the
+ * mountpoint in 'buf'.
+ */
+static boolean_t
+zfs_is_mountable(zfs_handle_t *zhp, char *buf, size_t buflen,
+ zfs_source_t *source)
+{
+ char sourceloc[ZFS_MAXNAMELEN];
+ zfs_source_t sourcetype;
+
+ if (!zfs_prop_valid_for_type(ZFS_PROP_MOUNTPOINT, zhp->zfs_type))
+ return (B_FALSE);
+
+ verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, buf, buflen,
+ &sourcetype, sourceloc, sizeof (sourceloc), B_FALSE) == 0);
+
+ if (strcmp(buf, ZFS_MOUNTPOINT_NONE) == 0 ||
+ strcmp(buf, ZFS_MOUNTPOINT_LEGACY) == 0)
+ return (B_FALSE);
+
+ if (!zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT))
+ return (B_FALSE);
+
+ if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED) &&
+ getzoneid() == GLOBAL_ZONEID)
+ return (B_FALSE);
+
+ if (source)
+ *source = sourcetype;
+
+ return (B_TRUE);
+}
+
+/*
+ * Mount the given filesystem.
+ */
+int
+zfs_mount(zfs_handle_t *zhp, const char *options, int flags)
+{
+ struct stat buf;
+ char mountpoint[ZFS_MAXPROPLEN];
+ char mntopts[MNT_LINE_MAX];
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+
+ if (options == NULL)
+ mntopts[0] = '\0';
+ else
+ (void) strlcpy(mntopts, options, sizeof (mntopts));
+
+ if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL))
+ return (0);
+
+ /* Create the directory if it doesn't already exist */
+ if (lstat(mountpoint, &buf) != 0) {
+ if (mkdirp(mountpoint, 0755) != 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "failed to create mountpoint"));
+ return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
+ dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
+ mountpoint));
+ }
+ }
+
+#if 0 /* FreeBSD: overlay mounts are not checked. */
+ /*
+ * Determine if the mountpoint is empty. If so, refuse to perform the
+ * mount. We don't perform this check if MS_OVERLAY is specified, which
+ * would defeat the point. We also avoid this check if 'remount' is
+ * specified.
+ */
+ if ((flags & MS_OVERLAY) == 0 &&
+ strstr(mntopts, MNTOPT_REMOUNT) == NULL &&
+ !dir_is_empty(mountpoint)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "directory is not empty"));
+ return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
+ dgettext(TEXT_DOMAIN, "cannot mount '%s'"), mountpoint));
+ }
+#endif
+
+ /* perform the mount */
+ if (zmount(zfs_get_name(zhp), mountpoint, flags,
+ MNTTYPE_ZFS, NULL, 0, mntopts, sizeof (mntopts)) != 0) {
+ /*
+ * Generic errors are nasty, but there are just way too many
+ * from mount(), and they're well-understood. We pick a few
+ * common ones to improve upon.
+ */
+ if (errno == EBUSY)
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "mountpoint or dataset is busy"));
+ else
+ zfs_error_aux(hdl, strerror(errno));
+
+ return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
+ dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
+ zhp->zfs_name));
+ }
+
+ return (0);
+}
+
+/*
+ * Unmount a single filesystem.
+ */
+static int
+unmount_one(libzfs_handle_t *hdl, const char *mountpoint, int flags)
+{
+ if (unmount(mountpoint, flags) != 0) {
+ zfs_error_aux(hdl, strerror(errno));
+ return (zfs_error_fmt(hdl, EZFS_UMOUNTFAILED,
+ dgettext(TEXT_DOMAIN, "cannot unmount '%s'"),
+ mountpoint));
+ }
+
+ return (0);
+}
+
+/*
+ * Unmount the given filesystem.
+ */
+int
+zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags)
+{
+ struct mnttab search = { 0 }, entry;
+
+ /* check to see if need to unmount the filesystem */
+ search.mnt_special = zhp->zfs_name;
+ search.mnt_fstype = MNTTYPE_ZFS;
+ rewind(zhp->zfs_hdl->libzfs_mnttab);
+ if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
+ getmntany(zhp->zfs_hdl->libzfs_mnttab, &entry, &search) == 0)) {
+
+ if (mountpoint == NULL)
+ mountpoint = entry.mnt_mountp;
+
+ /*
+ * Unshare and unmount the filesystem
+ */
+ if (zfs_unshare_nfs(zhp, mountpoint) != 0 ||
+ unmount_one(zhp->zfs_hdl, mountpoint, flags) != 0)
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * Unmount this filesystem and any children inheriting the mountpoint property.
+ * To do this, just act like we're changing the mountpoint property, but don't
+ * remount the filesystems afterwards.
+ */
+int
+zfs_unmountall(zfs_handle_t *zhp, int flags)
+{
+ prop_changelist_t *clp;
+ int ret;
+
+ clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT, flags);
+ if (clp == NULL)
+ return (-1);
+
+ ret = changelist_prefix(clp);
+ changelist_free(clp);
+
+ return (ret);
+}
+
+boolean_t
+zfs_is_shared(zfs_handle_t *zhp)
+{
+ if (ZFS_IS_VOLUME(zhp))
+ return (zfs_is_shared_iscsi(zhp));
+
+ return (zfs_is_shared_nfs(zhp, NULL));
+}
+
+int
+zfs_share(zfs_handle_t *zhp)
+{
+ if (ZFS_IS_VOLUME(zhp))
+ return (zfs_share_iscsi(zhp));
+
+ return (zfs_share_nfs(zhp));
+}
+
+int
+zfs_unshare(zfs_handle_t *zhp)
+{
+ if (ZFS_IS_VOLUME(zhp))
+ return (zfs_unshare_iscsi(zhp));
+
+ return (zfs_unshare_nfs(zhp, NULL));
+}
+
+/*
+ * Check to see if the filesystem is currently shared.
+ */
+boolean_t
+zfs_is_shared_nfs(zfs_handle_t *zhp, char **where)
+{
+ char *mountpoint;
+
+ if (!zfs_is_mounted(zhp, &mountpoint))
+ return (B_FALSE);
+
+ if (is_shared(zhp->zfs_hdl, mountpoint)) {
+ if (where != NULL)
+ *where = mountpoint;
+ else
+ free(mountpoint);
+ return (B_TRUE);
+ } else {
+ free(mountpoint);
+ return (B_FALSE);
+ }
+}
+
+/*
+ * Share the given filesystem according to the options in 'sharenfs'. We rely
+ * on share(1M) to the dirty work for us.
+ */
+int
+zfs_share_nfs(zfs_handle_t *zhp)
+{
+ char mountpoint[ZFS_MAXPROPLEN];
+ char shareopts[ZFS_MAXPROPLEN];
+ char buf[MAXPATHLEN];
+ FILE *fp;
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+
+ if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL))
+ return (0);
+
+ /*
+ * Return success if there are no share options.
+ */
+ if (zfs_prop_get(zhp, ZFS_PROP_SHARENFS, shareopts, sizeof (shareopts),
+ NULL, NULL, 0, B_FALSE) != 0 ||
+ strcmp(shareopts, "off") == 0)
+ return (0);
+
+ /*
+ * If the 'zoned' property is set, then zfs_is_mountable() will have
+ * already bailed out if we are in the global zone. But local
+ * zones cannot be NFS servers, so we ignore it for local zones as well.
+ */
+ if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED))
+ return (0);
+
+#ifdef __FreeBSD__
+ {
+ int error;
+
+ if (strcmp(shareopts, "on") == 0)
+ error = fsshare(ZFS_EXPORTS_PATH, mountpoint, "");
+ else
+ error = fsshare(ZFS_EXPORTS_PATH, mountpoint, shareopts);
+ if (error != 0) {
+ zfs_error_aux(hdl, "%s", strerror(error));
+ (void) zfs_error_fmt(hdl, EZFS_SHARENFSFAILED,
+ dgettext(TEXT_DOMAIN, "cannot share '%s'"),
+ zfs_get_name(zhp));
+ return (-1);
+ }
+ }
+#else
+ /*
+ * Invoke the share(1M) command. We always do this, even if it's
+ * currently shared, as the options may have changed.
+ */
+ if (strcmp(shareopts, "on") == 0)
+ (void) snprintf(buf, sizeof (buf), "/usr/sbin/share "
+ "-F nfs \"%s\" 2>&1", mountpoint);
+ else
+ (void) snprintf(buf, sizeof (buf), "/usr/sbin/share "
+ "-F nfs -o \"%s\" \"%s\" 2>&1", shareopts,
+ mountpoint);
+
+ if ((fp = popen(buf, "r")) == NULL)
+ return (zfs_error_fmt(hdl, EZFS_SHARENFSFAILED,
+ dgettext(TEXT_DOMAIN, "cannot share '%s'"),
+ zfs_get_name(zhp)));
+
+ /*
+ * share(1M) should only produce output if there is some kind
+ * of error. All output begins with "share_nfs: ", so we trim
+ * this off to get to the real error.
+ */
+ if (fgets(buf, sizeof (buf), fp) != NULL) {
+ char *colon = strchr(buf, ':');
+
+ while (buf[strlen(buf) - 1] == '\n')
+ buf[strlen(buf) - 1] = '\0';
+
+ if (colon != NULL)
+ zfs_error_aux(hdl, colon + 2);
+
+ (void) zfs_error_fmt(hdl, EZFS_SHARENFSFAILED,
+ dgettext(TEXT_DOMAIN, "cannot share '%s'"),
+ zfs_get_name(zhp));
+
+ verify(pclose(fp) != 0);
+ return (-1);
+ }
+
+ verify(pclose(fp) == 0);
+#endif
+
+ return (0);
+}
+
+/*
+ * Unshare a filesystem by mountpoint.
+ */
+static int
+unshare_one(libzfs_handle_t *hdl, const char *name, const char *mountpoint)
+{
+ char buf[MAXPATHLEN];
+ FILE *fp;
+
+#ifdef __FreeBSD__
+ {
+ int error;
+
+ error = fsunshare(ZFS_EXPORTS_PATH, mountpoint);
+ if (error != 0) {
+ zfs_error_aux(hdl, "%s", strerror(error));
+ return (zfs_error_fmt(hdl, EZFS_UNSHARENFSFAILED,
+ dgettext(TEXT_DOMAIN,
+ "cannot unshare '%s'"), name));
+ }
+ }
+#else
+ (void) snprintf(buf, sizeof (buf),
+ "/usr/sbin/unshare \"%s\" 2>&1",
+ mountpoint);
+
+ if ((fp = popen(buf, "r")) == NULL)
+ return (zfs_error_fmt(hdl, EZFS_UNSHARENFSFAILED,
+ dgettext(TEXT_DOMAIN,
+ "cannot unshare '%s'"), name));
+
+ /*
+ * unshare(1M) should only produce output if there is
+ * some kind of error. All output begins with "unshare
+ * nfs: ", so we trim this off to get to the real error.
+ */
+ if (fgets(buf, sizeof (buf), fp) != NULL) {
+ char *colon = strchr(buf, ':');
+
+ while (buf[strlen(buf) - 1] == '\n')
+ buf[strlen(buf) - 1] = '\0';
+
+ if (colon != NULL)
+ zfs_error_aux(hdl, colon + 2);
+
+ verify(pclose(fp) != 0);
+
+ return (zfs_error_fmt(hdl, EZFS_UNSHARENFSFAILED,
+ dgettext(TEXT_DOMAIN,
+ "cannot unshare '%s'"), name));
+ }
+
+ verify(pclose(fp) == 0);
+#endif
+
+ return (0);
+}
+
+/*
+ * Unshare the given filesystem.
+ */
+int
+zfs_unshare_nfs(zfs_handle_t *zhp, const char *mountpoint)
+{
+ struct mnttab search = { 0 }, entry;
+
+ /* check to see if need to unmount the filesystem */
+ search.mnt_special = (char *)zfs_get_name(zhp);
+ search.mnt_fstype = MNTTYPE_ZFS;
+ rewind(zhp->zfs_hdl->libzfs_mnttab);
+ if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
+ getmntany(zhp->zfs_hdl->libzfs_mnttab, &entry, &search) == 0)) {
+
+ if (mountpoint == NULL)
+ mountpoint = entry.mnt_mountp;
+
+ if (is_shared(zhp->zfs_hdl, mountpoint) &&
+ unshare_one(zhp->zfs_hdl, zhp->zfs_name, mountpoint) != 0)
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * Same as zfs_unmountall(), but for NFS unshares.
+ */
+int
+zfs_unshareall_nfs(zfs_handle_t *zhp)
+{
+ prop_changelist_t *clp;
+ int ret;
+
+ clp = changelist_gather(zhp, ZFS_PROP_SHARENFS, 0);
+ if (clp == NULL)
+ return (-1);
+
+ ret = changelist_unshare(clp);
+ changelist_free(clp);
+
+ return (ret);
+}
+
+/*
+ * Remove the mountpoint associated with the current dataset, if necessary.
+ * We only remove the underlying directory if:
+ *
+ * - The mountpoint is not 'none' or 'legacy'
+ * - The mountpoint is non-empty
+ * - The mountpoint is the default or inherited
+ * - The 'zoned' property is set, or we're in a local zone
+ *
+ * Any other directories we leave alone.
+ */
+void
+remove_mountpoint(zfs_handle_t *zhp)
+{
+ char mountpoint[ZFS_MAXPROPLEN];
+ zfs_source_t source;
+
+ if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint),
+ &source))
+ return;
+
+ if (source == ZFS_SRC_DEFAULT ||
+ source == ZFS_SRC_INHERITED) {
+ /*
+ * Try to remove the directory, silently ignoring any errors.
+ * The filesystem may have since been removed or moved around,
+ * and this error isn't really useful to the administrator in
+ * any way.
+ */
+ (void) rmdir(mountpoint);
+ }
+}
+
+boolean_t
+zfs_is_shared_iscsi(zfs_handle_t *zhp)
+{
+ return (iscsitgt_zfs_is_shared != NULL &&
+ iscsitgt_zfs_is_shared(zhp->zfs_name) != 0);
+}
+
+int
+zfs_share_iscsi(zfs_handle_t *zhp)
+{
+ char shareopts[ZFS_MAXPROPLEN];
+ const char *dataset = zhp->zfs_name;
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+
+ /*
+ * Return success if there are no share options.
+ */
+ if (zfs_prop_get(zhp, ZFS_PROP_SHAREISCSI, shareopts,
+ sizeof (shareopts), NULL, NULL, 0, B_FALSE) != 0 ||
+ strcmp(shareopts, "off") == 0)
+ return (0);
+
+/* We don't support iSCSI on FreeBSD yet. */
+#ifdef TODO
+ if (iscsitgt_zfs_share == NULL || iscsitgt_zfs_share(dataset) != 0)
+ return (zfs_error_fmt(hdl, EZFS_SHAREISCSIFAILED,
+ dgettext(TEXT_DOMAIN, "cannot share '%s'"), dataset));
+#endif
+
+ return (0);
+}
+
+int
+zfs_unshare_iscsi(zfs_handle_t *zhp)
+{
+ const char *dataset = zfs_get_name(zhp);
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+
+/* We don't support iSCSI on FreeBSD yet. */
+#ifdef TODO
+ /*
+ * Return if the volume is not shared
+ */
+ if (!zfs_is_shared_iscsi(zhp))
+ return (0);
+
+ /*
+ * If this fails with ENODEV it indicates that zvol wasn't shared so
+ * we should return success in that case.
+ */
+ if (iscsitgt_zfs_unshare == NULL ||
+ (iscsitgt_zfs_unshare(dataset) != 0 && errno != ENODEV))
+ return (zfs_error_fmt(hdl, EZFS_UNSHAREISCSIFAILED,
+ dgettext(TEXT_DOMAIN, "cannot unshare '%s'"), dataset));
+#endif
+
+ return (0);
+}
+
+typedef struct mount_cbdata {
+ zfs_handle_t **cb_datasets;
+ int cb_used;
+ int cb_alloc;
+} mount_cbdata_t;
+
+static int
+mount_cb(zfs_handle_t *zhp, void *data)
+{
+ mount_cbdata_t *cbp = data;
+
+ if (!(zfs_get_type(zhp) & (ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME))) {
+ zfs_close(zhp);
+ return (0);
+ }
+
+ if (cbp->cb_alloc == cbp->cb_used) {
+ void *ptr;
+
+ if ((ptr = zfs_realloc(zhp->zfs_hdl,
+ cbp->cb_datasets, cbp->cb_alloc * sizeof (void *),
+ cbp->cb_alloc * 2 * sizeof (void *))) == NULL)
+ return (-1);
+ cbp->cb_datasets = ptr;
+
+ cbp->cb_alloc *= 2;
+ }
+
+ cbp->cb_datasets[cbp->cb_used++] = zhp;
+
+ return (zfs_iter_children(zhp, mount_cb, cbp));
+}
+
+static int
+dataset_cmp(const void *a, const void *b)
+{
+ zfs_handle_t **za = (zfs_handle_t **)a;
+ zfs_handle_t **zb = (zfs_handle_t **)b;
+ char mounta[MAXPATHLEN];
+ char mountb[MAXPATHLEN];
+ boolean_t gota, gotb;
+
+ if ((gota = (zfs_get_type(*za) == ZFS_TYPE_FILESYSTEM)) != 0)
+ verify(zfs_prop_get(*za, ZFS_PROP_MOUNTPOINT, mounta,
+ sizeof (mounta), NULL, NULL, 0, B_FALSE) == 0);
+ if ((gotb = (zfs_get_type(*zb) == ZFS_TYPE_FILESYSTEM)) != 0)
+ verify(zfs_prop_get(*zb, ZFS_PROP_MOUNTPOINT, mountb,
+ sizeof (mountb), NULL, NULL, 0, B_FALSE) == 0);
+
+ if (gota && gotb)
+ return (strcmp(mounta, mountb));
+
+ if (gota)
+ return (-1);
+ if (gotb)
+ return (1);
+
+ return (strcmp(zfs_get_name(a), zfs_get_name(b)));
+}
+
+/*
+ * Mount and share all datasets within the given pool. This assumes that no
+ * datasets within the pool are currently mounted. Because users can create
+ * complicated nested hierarchies of mountpoints, we first gather all the
+ * datasets and mountpoints within the pool, and sort them by mountpoint. Once
+ * we have the list of all filesystems, we iterate over them in order and mount
+ * and/or share each one.
+ */
+#pragma weak zpool_mount_datasets = zpool_enable_datasets
+int
+zpool_enable_datasets(zpool_handle_t *zhp, const char *mntopts, int flags)
+{
+ mount_cbdata_t cb = { 0 };
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+ zfs_handle_t *zfsp;
+ int i, ret = -1;
+
+ /*
+ * Gather all datasets within the pool.
+ */
+ if ((cb.cb_datasets = zfs_alloc(hdl, 4 * sizeof (void *))) == NULL)
+ return (-1);
+ cb.cb_alloc = 4;
+
+ if ((zfsp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_ANY)) == NULL)
+ goto out;
+
+ cb.cb_datasets[0] = zfsp;
+ cb.cb_used = 1;
+
+ if (zfs_iter_children(zfsp, mount_cb, &cb) != 0)
+ goto out;
+
+ /*
+ * Sort the datasets by mountpoint.
+ */
+ qsort(cb.cb_datasets, cb.cb_used, sizeof (void *), dataset_cmp);
+
+ /*
+ * And mount all the datasets.
+ */
+ ret = 0;
+ for (i = 0; i < cb.cb_used; i++) {
+ if (zfs_mount(cb.cb_datasets[i], mntopts, flags) != 0 ||
+ zfs_share(cb.cb_datasets[i]) != 0)
+ ret = -1;
+ }
+
+out:
+ for (i = 0; i < cb.cb_used; i++)
+ zfs_close(cb.cb_datasets[i]);
+ free(cb.cb_datasets);
+
+ return (ret);
+}
+
+
+static int
+zvol_cb(const char *dataset, void *data)
+{
+ libzfs_handle_t *hdl = data;
+ zfs_handle_t *zhp;
+
+ /*
+ * Ignore snapshots and ignore failures from non-existant datasets.
+ */
+ if (strchr(dataset, '@') != NULL ||
+ (zhp = zfs_open(hdl, dataset, ZFS_TYPE_VOLUME)) == NULL)
+ return (0);
+
+ (void) zfs_unshare_iscsi(zhp);
+
+ zfs_close(zhp);
+
+ return (0);
+}
+
+static int
+mountpoint_compare(const void *a, const void *b)
+{
+ const char *mounta = *((char **)a);
+ const char *mountb = *((char **)b);
+
+ return (strcmp(mountb, mounta));
+}
+
+/*
+ * Unshare and unmount all datasets within the given pool. We don't want to
+ * rely on traversing the DSL to discover the filesystems within the pool,
+ * because this may be expensive (if not all of them are mounted), and can fail
+ * arbitrarily (on I/O error, for example). Instead, we walk /etc/mnttab and
+ * gather all the filesystems that are currently mounted.
+ */
+#pragma weak zpool_unmount_datasets = zpool_disable_datasets
+int
+zpool_disable_datasets(zpool_handle_t *zhp, boolean_t force)
+{
+ int used, alloc;
+ struct statfs *sfs;
+ size_t namelen;
+ char **mountpoints = NULL;
+ zfs_handle_t **datasets = NULL;
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+ int i, j, n;
+ int ret = -1;
+ int flags = (force ? MS_FORCE : 0);
+
+ /*
+ * First unshare all zvols.
+ */
+ if (zpool_iter_zvol(zhp, zvol_cb, hdl) != 0)
+ return (-1);
+
+ namelen = strlen(zhp->zpool_name);
+
+ used = alloc = 0;
+ if ((n = getmntinfo(&sfs, MNT_WAIT)) == 0) {
+ fprintf(stderr, "getmntinfo(): %s\n", strerror(errno));
+ return (-1);
+ }
+ for (j = 0; j < n; j++) {
+ /*
+ * Ignore non-ZFS entries.
+ */
+ if (strcmp(sfs[j].f_fstypename, MNTTYPE_ZFS) != 0)
+ continue;
+
+ /*
+ * Ignore filesystems not within this pool.
+ */
+ if (strncmp(sfs[j].f_mntfromname, zhp->zpool_name, namelen) != 0 ||
+ (sfs[j].f_mntfromname[namelen] != '/' &&
+ sfs[j].f_mntfromname[namelen] != '\0'))
+ continue;
+
+ /*
+ * At this point we've found a filesystem within our pool. Add
+ * it to our growing list.
+ */
+ if (used == alloc) {
+ if (alloc == 0) {
+ if ((mountpoints = zfs_alloc(hdl,
+ 8 * sizeof (void *))) == NULL)
+ goto out;
+
+ if ((datasets = zfs_alloc(hdl,
+ 8 * sizeof (void *))) == NULL)
+ goto out;
+
+ alloc = 8;
+ } else {
+ void *ptr;
+
+ if ((ptr = zfs_realloc(hdl, mountpoints,
+ alloc * sizeof (void *),
+ alloc * 2 * sizeof (void *))) == NULL)
+ goto out;
+ mountpoints = ptr;
+
+ if ((ptr = zfs_realloc(hdl, datasets,
+ alloc * sizeof (void *),
+ alloc * 2 * sizeof (void *))) == NULL)
+ goto out;
+ datasets = ptr;
+
+ alloc *= 2;
+ }
+ }
+
+ if ((mountpoints[used] = zfs_strdup(hdl,
+ sfs[j].f_mntonname)) == NULL)
+ goto out;
+
+ /*
+ * This is allowed to fail, in case there is some I/O error. It
+ * is only used to determine if we need to remove the underlying
+ * mountpoint, so failure is not fatal.
+ */
+ datasets[used] = make_dataset_handle(hdl, sfs[j].f_mntfromname);
+
+ used++;
+ }
+
+ /*
+ * At this point, we have the entire list of filesystems, so sort it by
+ * mountpoint.
+ */
+ qsort(mountpoints, used, sizeof (char *), mountpoint_compare);
+
+ /*
+ * Walk through and first unshare everything.
+ */
+ for (i = 0; i < used; i++) {
+ if (is_shared(hdl, mountpoints[i]) &&
+ unshare_one(hdl, mountpoints[i], mountpoints[i]) != 0)
+ goto out;
+ }
+
+ /*
+ * Now unmount everything, removing the underlying directories as
+ * appropriate.
+ */
+ for (i = 0; i < used; i++) {
+ if (unmount_one(hdl, mountpoints[i], flags) != 0)
+ goto out;
+ }
+
+ for (i = 0; i < used; i++) {
+ if (datasets[i])
+ remove_mountpoint(datasets[i]);
+ }
+
+ ret = 0;
+out:
+ for (i = 0; i < used; i++) {
+ if (datasets[i])
+ zfs_close(datasets[i]);
+ free(mountpoints[i]);
+ }
+ free(datasets);
+ free(mountpoints);
+
+ return (ret);
+}
diff --git a/contrib/opensolaris/lib/libzfs/common/libzfs_pool.c b/contrib/opensolaris/lib/libzfs/common/libzfs_pool.c
new file mode 100644
index 0000000..8580837
--- /dev/null
+++ b/contrib/opensolaris/lib/libzfs/common/libzfs_pool.c
@@ -0,0 +1,2055 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <devid.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <libintl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/zfs_ioctl.h>
+#include <sys/zio.h>
+#include <strings.h>
+#include <umem.h>
+
+#include "zfs_namecheck.h"
+#include "zfs_prop.h"
+#include "libzfs_impl.h"
+
+/*
+ * Validate the given pool name, optionally putting an extended error message in
+ * 'buf'.
+ */
+static boolean_t
+zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
+{
+ namecheck_err_t why;
+ char what;
+ int ret;
+
+ ret = pool_namecheck(pool, &why, &what);
+
+ /*
+ * The rules for reserved pool names were extended at a later point.
+ * But we need to support users with existing pools that may now be
+ * invalid. So we only check for this expanded set of names during a
+ * create (or import), and only in userland.
+ */
+ if (ret == 0 && !isopen &&
+ (strncmp(pool, "mirror", 6) == 0 ||
+ strncmp(pool, "raidz", 5) == 0 ||
+ strncmp(pool, "spare", 5) == 0)) {
+ zfs_error_aux(hdl,
+ dgettext(TEXT_DOMAIN, "name is reserved"));
+ return (B_FALSE);
+ }
+
+
+ if (ret != 0) {
+ if (hdl != NULL) {
+ switch (why) {
+ case NAME_ERR_TOOLONG:
+ zfs_error_aux(hdl,
+ dgettext(TEXT_DOMAIN, "name is too long"));
+ break;
+
+ case NAME_ERR_INVALCHAR:
+ zfs_error_aux(hdl,
+ dgettext(TEXT_DOMAIN, "invalid character "
+ "'%c' in pool name"), what);
+ break;
+
+ case NAME_ERR_NOLETTER:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "name must begin with a letter"));
+ break;
+
+ case NAME_ERR_RESERVED:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "name is reserved"));
+ break;
+
+ case NAME_ERR_DISKLIKE:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "pool name is reserved"));
+ break;
+
+ case NAME_ERR_LEADING_SLASH:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "leading slash in name"));
+ break;
+
+ case NAME_ERR_EMPTY_COMPONENT:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "empty component in name"));
+ break;
+
+ case NAME_ERR_TRAILING_SLASH:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "trailing slash in name"));
+ break;
+
+ case NAME_ERR_MULTIPLE_AT:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "multiple '@' delimiters in name"));
+ break;
+
+ }
+ }
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+static int
+zpool_get_all_props(zpool_handle_t *zhp)
+{
+ zfs_cmd_t zc = { 0 };
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+
+ if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
+ return (-1);
+
+ while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
+ if (errno == ENOMEM) {
+ if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+ } else {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+ }
+
+ if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+
+ zcmd_free_nvlists(&zc);
+
+ return (0);
+}
+
+/*
+ * Open a handle to the given pool, even if the pool is currently in the FAULTED
+ * state.
+ */
+zpool_handle_t *
+zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
+{
+ zpool_handle_t *zhp;
+ boolean_t missing;
+
+ /*
+ * Make sure the pool name is valid.
+ */
+ if (!zpool_name_valid(hdl, B_TRUE, pool)) {
+ (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
+ dgettext(TEXT_DOMAIN, "cannot open '%s'"),
+ pool);
+ return (NULL);
+ }
+
+ if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
+ return (NULL);
+
+ zhp->zpool_hdl = hdl;
+ (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
+
+ if (zpool_refresh_stats(zhp, &missing) != 0) {
+ zpool_close(zhp);
+ return (NULL);
+ }
+
+ if (missing) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "no such pool"));
+ (void) zfs_error_fmt(hdl, EZFS_NOENT,
+ dgettext(TEXT_DOMAIN, "cannot open '%s'"),
+ pool);
+ zpool_close(zhp);
+ return (NULL);
+ }
+
+ return (zhp);
+}
+
+/*
+ * Like the above, but silent on error. Used when iterating over pools (because
+ * the configuration cache may be out of date).
+ */
+int
+zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
+{
+ zpool_handle_t *zhp;
+ boolean_t missing;
+
+ if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
+ return (-1);
+
+ zhp->zpool_hdl = hdl;
+ (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
+
+ if (zpool_refresh_stats(zhp, &missing) != 0) {
+ zpool_close(zhp);
+ return (-1);
+ }
+
+ if (missing) {
+ zpool_close(zhp);
+ *ret = NULL;
+ return (0);
+ }
+
+ *ret = zhp;
+ return (0);
+}
+
+/*
+ * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
+ * state.
+ */
+zpool_handle_t *
+zpool_open(libzfs_handle_t *hdl, const char *pool)
+{
+ zpool_handle_t *zhp;
+
+ if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
+ return (NULL);
+
+ if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
+ (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
+ dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
+ zpool_close(zhp);
+ return (NULL);
+ }
+
+ return (zhp);
+}
+
+/*
+ * Close the handle. Simply frees the memory associated with the handle.
+ */
+void
+zpool_close(zpool_handle_t *zhp)
+{
+ if (zhp->zpool_config)
+ nvlist_free(zhp->zpool_config);
+ if (zhp->zpool_old_config)
+ nvlist_free(zhp->zpool_old_config);
+ if (zhp->zpool_props)
+ nvlist_free(zhp->zpool_props);
+ free(zhp);
+}
+
+/*
+ * Return the name of the pool.
+ */
+const char *
+zpool_get_name(zpool_handle_t *zhp)
+{
+ return (zhp->zpool_name);
+}
+
+/*
+ * Return the GUID of the pool.
+ */
+uint64_t
+zpool_get_guid(zpool_handle_t *zhp)
+{
+ uint64_t guid;
+
+ verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
+ &guid) == 0);
+ return (guid);
+}
+
+/*
+ * Return the version of the pool.
+ */
+uint64_t
+zpool_get_version(zpool_handle_t *zhp)
+{
+ uint64_t version;
+
+ verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION,
+ &version) == 0);
+
+ return (version);
+}
+
+/*
+ * Return the amount of space currently consumed by the pool.
+ */
+uint64_t
+zpool_get_space_used(zpool_handle_t *zhp)
+{
+ nvlist_t *nvroot;
+ vdev_stat_t *vs;
+ uint_t vsc;
+
+ verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
+ &nvroot) == 0);
+ verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
+ (uint64_t **)&vs, &vsc) == 0);
+
+ return (vs->vs_alloc);
+}
+
+/*
+ * Return the total space in the pool.
+ */
+uint64_t
+zpool_get_space_total(zpool_handle_t *zhp)
+{
+ nvlist_t *nvroot;
+ vdev_stat_t *vs;
+ uint_t vsc;
+
+ verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
+ &nvroot) == 0);
+ verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
+ (uint64_t **)&vs, &vsc) == 0);
+
+ return (vs->vs_space);
+}
+
+/*
+ * Return the alternate root for this pool, if any.
+ */
+int
+zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
+{
+ zfs_cmd_t zc = { 0 };
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
+ zc.zc_value[0] == '\0')
+ return (-1);
+
+ (void) strlcpy(buf, zc.zc_value, buflen);
+
+ return (0);
+}
+
+/*
+ * Return the state of the pool (ACTIVE or UNAVAILABLE)
+ */
+int
+zpool_get_state(zpool_handle_t *zhp)
+{
+ return (zhp->zpool_state);
+}
+
+/*
+ * Create the named pool, using the provided vdev list. It is assumed
+ * that the consumer has already validated the contents of the nvlist, so we
+ * don't have to worry about error semantics.
+ */
+int
+zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
+ const char *altroot)
+{
+ zfs_cmd_t zc = { 0 };
+ char msg[1024];
+
+ (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+ "cannot create '%s'"), pool);
+
+ if (!zpool_name_valid(hdl, B_FALSE, pool))
+ return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
+
+ if (altroot != NULL && altroot[0] != '/')
+ return (zfs_error_fmt(hdl, EZFS_BADPATH,
+ dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));
+
+ if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
+ return (-1);
+
+ (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
+
+ if (altroot != NULL)
+ (void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
+
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) {
+ zcmd_free_nvlists(&zc);
+
+ switch (errno) {
+ case EBUSY:
+ /*
+ * This can happen if the user has specified the same
+ * device multiple times. We can't reliably detect this
+ * until we try to add it and see we already have a
+ * label.
+ */
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "one or more vdevs refer to the same device"));
+ return (zfs_error(hdl, EZFS_BADDEV, msg));
+
+ case EOVERFLOW:
+ /*
+ * This occurs when one of the devices is below
+ * SPA_MINDEVSIZE. Unfortunately, we can't detect which
+ * device was the problem device since there's no
+ * reliable way to determine device size from userland.
+ */
+ {
+ char buf[64];
+
+ zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
+
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "one or more devices is less than the "
+ "minimum size (%s)"), buf);
+ }
+ return (zfs_error(hdl, EZFS_BADDEV, msg));
+
+ case ENOSPC:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "one or more devices is out of space"));
+ return (zfs_error(hdl, EZFS_BADDEV, msg));
+
+ default:
+ return (zpool_standard_error(hdl, errno, msg));
+ }
+ }
+
+ zcmd_free_nvlists(&zc);
+
+ /*
+ * If this is an alternate root pool, then we automatically set the
+ * mountpoint of the root dataset to be '/'.
+ */
+ if (altroot != NULL) {
+ zfs_handle_t *zhp;
+
+ verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
+ verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
+ "/") == 0);
+
+ zfs_close(zhp);
+ }
+
+ return (0);
+}
+
+/*
+ * Destroy the given pool. It is up to the caller to ensure that there are no
+ * datasets left in the pool.
+ */
+int
+zpool_destroy(zpool_handle_t *zhp)
+{
+ zfs_cmd_t zc = { 0 };
+ zfs_handle_t *zfp = NULL;
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+ char msg[1024];
+
+ if (zhp->zpool_state == POOL_STATE_ACTIVE &&
+ (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
+ ZFS_TYPE_FILESYSTEM)) == NULL)
+ return (-1);
+
+ if (zpool_remove_zvol_links(zhp) != 0)
+ return (-1);
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+
+ if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
+ (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+ "cannot destroy '%s'"), zhp->zpool_name);
+
+ if (errno == EROFS) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "one or more devices is read only"));
+ (void) zfs_error(hdl, EZFS_BADDEV, msg);
+ } else {
+ (void) zpool_standard_error(hdl, errno, msg);
+ }
+
+ if (zfp)
+ zfs_close(zfp);
+ return (-1);
+ }
+
+ if (zfp) {
+ remove_mountpoint(zfp);
+ zfs_close(zfp);
+ }
+
+ return (0);
+}
+
+/*
+ * Add the given vdevs to the pool. The caller must have already performed the
+ * necessary verification to ensure that the vdev specification is well-formed.
+ */
+int
+zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
+{
+ zfs_cmd_t zc = { 0 };
+ int ret;
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+ char msg[1024];
+ nvlist_t **spares;
+ uint_t nspares;
+
+ (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+ "cannot add to '%s'"), zhp->zpool_name);
+
+ if (zpool_get_version(zhp) < ZFS_VERSION_SPARES &&
+ nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
+ &spares, &nspares) == 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
+ "upgraded to add hot spares"));
+ return (zfs_error(hdl, EZFS_BADVERSION, msg));
+ }
+
+ if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
+ return (-1);
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+
+ if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) {
+ switch (errno) {
+ case EBUSY:
+ /*
+ * This can happen if the user has specified the same
+ * device multiple times. We can't reliably detect this
+ * until we try to add it and see we already have a
+ * label.
+ */
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "one or more vdevs refer to the same device"));
+ (void) zfs_error(hdl, EZFS_BADDEV, msg);
+ break;
+
+ case EOVERFLOW:
+ /*
+ * This occurrs when one of the devices is below
+ * SPA_MINDEVSIZE. Unfortunately, we can't detect which
+ * device was the problem device since there's no
+ * reliable way to determine device size from userland.
+ */
+ {
+ char buf[64];
+
+ zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
+
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "device is less than the minimum "
+ "size (%s)"), buf);
+ }
+ (void) zfs_error(hdl, EZFS_BADDEV, msg);
+ break;
+
+ case ENOTSUP:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "pool must be upgraded to add raidz2 vdevs"));
+ (void) zfs_error(hdl, EZFS_BADVERSION, msg);
+ break;
+
+ case EDOM:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "root pool can not have concatenated devices"));
+ (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
+ break;
+
+ default:
+ (void) zpool_standard_error(hdl, errno, msg);
+ }
+
+ ret = -1;
+ } else {
+ ret = 0;
+ }
+
+ zcmd_free_nvlists(&zc);
+
+ return (ret);
+}
+
+/*
+ * Exports the pool from the system. The caller must ensure that there are no
+ * mounted datasets in the pool.
+ */
+int
+zpool_export(zpool_handle_t *zhp)
+{
+ zfs_cmd_t zc = { 0 };
+
+ if (zpool_remove_zvol_links(zhp) != 0)
+ return (-1);
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+
+ if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0)
+ return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
+ dgettext(TEXT_DOMAIN, "cannot export '%s'"),
+ zhp->zpool_name));
+ return (0);
+}
+
+/*
+ * Import the given pool using the known configuration. The configuration
+ * should have come from zpool_find_import(). The 'newname' and 'altroot'
+ * parameters control whether the pool is imported with a different name or with
+ * an alternate root, respectively.
+ */
+int
+zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
+ const char *altroot)
+{
+ zfs_cmd_t zc = { 0 };
+ char *thename;
+ char *origname;
+ int ret;
+
+ verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
+ &origname) == 0);
+
+ if (newname != NULL) {
+ if (!zpool_name_valid(hdl, B_FALSE, newname))
+ return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
+ dgettext(TEXT_DOMAIN, "cannot import '%s'"),
+ newname));
+ thename = (char *)newname;
+ } else {
+ thename = origname;
+ }
+
+ if (altroot != NULL && altroot[0] != '/')
+ return (zfs_error_fmt(hdl, EZFS_BADPATH,
+ dgettext(TEXT_DOMAIN, "bad alternate root '%s'"),
+ altroot));
+
+ (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
+
+ if (altroot != NULL)
+ (void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
+ else
+ zc.zc_value[0] = '\0';
+
+ verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
+ &zc.zc_guid) == 0);
+
+ if (zcmd_write_src_nvlist(hdl, &zc, config, NULL) != 0)
+ return (-1);
+
+ ret = 0;
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
+ char desc[1024];
+ if (newname == NULL)
+ (void) snprintf(desc, sizeof (desc),
+ dgettext(TEXT_DOMAIN, "cannot import '%s'"),
+ thename);
+ else
+ (void) snprintf(desc, sizeof (desc),
+ dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
+ origname, thename);
+
+ switch (errno) {
+ case ENOTSUP:
+ /*
+ * Unsupported version.
+ */
+ (void) zfs_error(hdl, EZFS_BADVERSION, desc);
+ break;
+
+ case EINVAL:
+ (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
+ break;
+
+ default:
+ (void) zpool_standard_error(hdl, errno, desc);
+ }
+
+ ret = -1;
+ } else {
+ zpool_handle_t *zhp;
+ /*
+ * This should never fail, but play it safe anyway.
+ */
+ if (zpool_open_silent(hdl, thename, &zhp) != 0) {
+ ret = -1;
+ } else if (zhp != NULL) {
+ ret = zpool_create_zvol_links(zhp);
+ zpool_close(zhp);
+ }
+ }
+
+ zcmd_free_nvlists(&zc);
+ return (ret);
+}
+
+/*
+ * Scrub the pool.
+ */
+int
+zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
+{
+ zfs_cmd_t zc = { 0 };
+ char msg[1024];
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ zc.zc_cookie = type;
+
+ if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0)
+ return (0);
+
+ (void) snprintf(msg, sizeof (msg),
+ dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
+
+ if (errno == EBUSY)
+ return (zfs_error(hdl, EZFS_RESILVERING, msg));
+ else
+ return (zpool_standard_error(hdl, errno, msg));
+}
+
+/*
+ * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
+ * spare; but FALSE if its an INUSE spare.
+ */
+static nvlist_t *
+vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
+ boolean_t *avail_spare)
+{
+ uint_t c, children;
+ nvlist_t **child;
+ uint64_t theguid, present;
+ char *path;
+ uint64_t wholedisk = 0;
+ nvlist_t *ret;
+
+ verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
+
+ if (search == NULL &&
+ nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
+ /*
+ * If the device has never been present since import, the only
+ * reliable way to match the vdev is by GUID.
+ */
+ if (theguid == guid)
+ return (nv);
+ } else if (search != NULL &&
+ nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
+ (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
+ &wholedisk);
+ if (wholedisk) {
+ /*
+ * For whole disks, the internal path has 's0', but the
+ * path passed in by the user doesn't.
+ */
+ if (strlen(search) == strlen(path) - 2 &&
+ strncmp(search, path, strlen(search)) == 0)
+ return (nv);
+ } else if (strcmp(search, path) == 0) {
+ return (nv);
+ }
+ }
+
+ if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+ &child, &children) != 0)
+ return (NULL);
+
+ for (c = 0; c < children; c++)
+ if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
+ avail_spare)) != NULL)
+ return (ret);
+
+ if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
+ &child, &children) == 0) {
+ for (c = 0; c < children; c++) {
+ if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
+ avail_spare)) != NULL) {
+ *avail_spare = B_TRUE;
+ return (ret);
+ }
+ }
+ }
+
+ return (NULL);
+}
+
+nvlist_t *
+zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare)
+{
+ char buf[MAXPATHLEN];
+ const char *search;
+ char *end;
+ nvlist_t *nvroot;
+ uint64_t guid;
+
+ guid = strtoull(path, &end, 10);
+ if (guid != 0 && *end == '\0') {
+ search = NULL;
+ } else if (path[0] != '/') {
+ (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path);
+ search = buf;
+ } else {
+ search = path;
+ }
+
+ verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
+ &nvroot) == 0);
+
+ *avail_spare = B_FALSE;
+ return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare));
+}
+
+/*
+ * Returns TRUE if the given guid corresponds to a spare (INUSE or not).
+ */
+static boolean_t
+is_spare(zpool_handle_t *zhp, uint64_t guid)
+{
+ uint64_t spare_guid;
+ nvlist_t *nvroot;
+ nvlist_t **spares;
+ uint_t nspares;
+ int i;
+
+ verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
+ &nvroot) == 0);
+ if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
+ &spares, &nspares) == 0) {
+ for (i = 0; i < nspares; i++) {
+ verify(nvlist_lookup_uint64(spares[i],
+ ZPOOL_CONFIG_GUID, &spare_guid) == 0);
+ if (guid == spare_guid)
+ return (B_TRUE);
+ }
+ }
+
+ return (B_FALSE);
+}
+
+/*
+ * Bring the specified vdev online
+ */
+int
+zpool_vdev_online(zpool_handle_t *zhp, const char *path)
+{
+ zfs_cmd_t zc = { 0 };
+ char msg[1024];
+ nvlist_t *tgt;
+ boolean_t avail_spare;
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ (void) snprintf(msg, sizeof (msg),
+ dgettext(TEXT_DOMAIN, "cannot online %s"), path);
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
+ return (zfs_error(hdl, EZFS_NODEVICE, msg));
+
+ verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
+
+ if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
+ return (zfs_error(hdl, EZFS_ISSPARE, msg));
+
+ if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ONLINE, &zc) == 0)
+ return (0);
+
+ return (zpool_standard_error(hdl, errno, msg));
+}
+
+/*
+ * Take the specified vdev offline
+ */
+int
+zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp)
+{
+ zfs_cmd_t zc = { 0 };
+ char msg[1024];
+ nvlist_t *tgt;
+ boolean_t avail_spare;
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ (void) snprintf(msg, sizeof (msg),
+ dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
+ return (zfs_error(hdl, EZFS_NODEVICE, msg));
+
+ verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
+
+ if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
+ return (zfs_error(hdl, EZFS_ISSPARE, msg));
+
+ zc.zc_cookie = istmp;
+
+ if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_OFFLINE, &zc) == 0)
+ return (0);
+
+ switch (errno) {
+ case EBUSY:
+
+ /*
+ * There are no other replicas of this device.
+ */
+ return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
+
+ default:
+ return (zpool_standard_error(hdl, errno, msg));
+ }
+}
+
+/*
+ * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
+ * a hot spare.
+ */
+static boolean_t
+is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
+{
+ nvlist_t **child;
+ uint_t c, children;
+ char *type;
+
+ if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
+ &children) == 0) {
+ verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
+ &type) == 0);
+
+ if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
+ children == 2 && child[which] == tgt)
+ return (B_TRUE);
+
+ for (c = 0; c < children; c++)
+ if (is_replacing_spare(child[c], tgt, which))
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+/*
+ * Attach new_disk (fully described by nvroot) to old_disk.
+ * If 'replacing' is specified, tne new disk will replace the old one.
+ */
+int
+zpool_vdev_attach(zpool_handle_t *zhp,
+ const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
+{
+ zfs_cmd_t zc = { 0 };
+ char msg[1024];
+ int ret;
+ nvlist_t *tgt;
+ boolean_t avail_spare;
+ uint64_t val;
+ char *path;
+ nvlist_t **child;
+ uint_t children;
+ nvlist_t *config_root;
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ if (replacing)
+ (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+ "cannot replace %s with %s"), old_disk, new_disk);
+ else
+ (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+ "cannot attach %s to %s"), new_disk, old_disk);
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0)
+ return (zfs_error(hdl, EZFS_NODEVICE, msg));
+
+ if (avail_spare)
+ return (zfs_error(hdl, EZFS_ISSPARE, msg));
+
+ verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
+ zc.zc_cookie = replacing;
+
+ if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
+ &child, &children) != 0 || children != 1) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "new device must be a single disk"));
+ return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
+ }
+
+ verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
+ ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
+
+ /*
+ * If the target is a hot spare that has been swapped in, we can only
+ * replace it with another hot spare.
+ */
+ if (replacing &&
+ nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
+ nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
+ (zpool_find_vdev(zhp, path, &avail_spare) == NULL ||
+ !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "can only be replaced by another hot spare"));
+ return (zfs_error(hdl, EZFS_BADTARGET, msg));
+ }
+
+ /*
+ * If we are attempting to replace a spare, it canot be applied to an
+ * already spared device.
+ */
+ if (replacing &&
+ nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
+ zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare &&
+ is_replacing_spare(config_root, tgt, 0)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "device has already been replaced with a spare"));
+ return (zfs_error(hdl, EZFS_BADTARGET, msg));
+ }
+
+ if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
+ return (-1);
+
+ ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ATTACH, &zc);
+
+ zcmd_free_nvlists(&zc);
+
+ if (ret == 0)
+ return (0);
+
+ switch (errno) {
+ case ENOTSUP:
+ /*
+ * Can't attach to or replace this type of vdev.
+ */
+ if (replacing)
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "cannot replace a replacing device"));
+ else
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "can only attach to mirrors and top-level "
+ "disks"));
+ (void) zfs_error(hdl, EZFS_BADTARGET, msg);
+ break;
+
+ case EINVAL:
+ /*
+ * The new device must be a single disk.
+ */
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "new device must be a single disk"));
+ (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
+ break;
+
+ case EBUSY:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
+ new_disk);
+ (void) zfs_error(hdl, EZFS_BADDEV, msg);
+ break;
+
+ case EOVERFLOW:
+ /*
+ * The new device is too small.
+ */
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "device is too small"));
+ (void) zfs_error(hdl, EZFS_BADDEV, msg);
+ break;
+
+ case EDOM:
+ /*
+ * The new device has a different alignment requirement.
+ */
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "devices have different sector alignment"));
+ (void) zfs_error(hdl, EZFS_BADDEV, msg);
+ break;
+
+ case ENAMETOOLONG:
+ /*
+ * The resulting top-level vdev spec won't fit in the label.
+ */
+ (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
+ break;
+
+ default:
+ (void) zpool_standard_error(hdl, errno, msg);
+ }
+
+ return (-1);
+}
+
+/*
+ * Detach the specified device.
+ */
+int
+zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
+{
+ zfs_cmd_t zc = { 0 };
+ char msg[1024];
+ nvlist_t *tgt;
+ boolean_t avail_spare;
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ (void) snprintf(msg, sizeof (msg),
+ dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
+ return (zfs_error(hdl, EZFS_NODEVICE, msg));
+
+ if (avail_spare)
+ return (zfs_error(hdl, EZFS_ISSPARE, msg));
+
+ verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
+
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0)
+ return (0);
+
+ switch (errno) {
+
+ case ENOTSUP:
+ /*
+ * Can't detach from this type of vdev.
+ */
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
+ "applicable to mirror and replacing vdevs"));
+ (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
+ break;
+
+ case EBUSY:
+ /*
+ * There are no other replicas of this device.
+ */
+ (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
+ break;
+
+ default:
+ (void) zpool_standard_error(hdl, errno, msg);
+ }
+
+ return (-1);
+}
+
+/*
+ * Remove the given device. Currently, this is supported only for hot spares.
+ */
+int
+zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
+{
+ zfs_cmd_t zc = { 0 };
+ char msg[1024];
+ nvlist_t *tgt;
+ boolean_t avail_spare;
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ (void) snprintf(msg, sizeof (msg),
+ dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
+ return (zfs_error(hdl, EZFS_NODEVICE, msg));
+
+ if (!avail_spare) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "only inactive hot spares can be removed"));
+ return (zfs_error(hdl, EZFS_NODEVICE, msg));
+ }
+
+ verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
+
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
+ return (0);
+
+ return (zpool_standard_error(hdl, errno, msg));
+}
+
+/*
+ * Clear the errors for the pool, or the particular device if specified.
+ */
+int
+zpool_clear(zpool_handle_t *zhp, const char *path)
+{
+ zfs_cmd_t zc = { 0 };
+ char msg[1024];
+ nvlist_t *tgt;
+ boolean_t avail_spare;
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ if (path)
+ (void) snprintf(msg, sizeof (msg),
+ dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
+ path);
+ else
+ (void) snprintf(msg, sizeof (msg),
+ dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
+ zhp->zpool_name);
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ if (path) {
+ if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
+ return (zfs_error(hdl, EZFS_NODEVICE, msg));
+
+ if (avail_spare)
+ return (zfs_error(hdl, EZFS_ISSPARE, msg));
+
+ verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
+ &zc.zc_guid) == 0);
+ }
+
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
+ return (0);
+
+ return (zpool_standard_error(hdl, errno, msg));
+}
+
+/*
+ * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
+ * hierarchy.
+ */
+int
+zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
+ void *data)
+{
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+ char (*paths)[MAXPATHLEN];
+ char path[MAXPATHLEN];
+ size_t size = 4;
+ int curr, fd, base, ret = 0;
+ DIR *dirp;
+ struct dirent *dp;
+ struct stat st;
+
+ if ((base = open(ZVOL_FULL_DEV_DIR, O_RDONLY)) < 0)
+ return (errno == ENOENT ? 0 : -1);
+
+ snprintf(path, sizeof(path), "%s/%s", ZVOL_FULL_DEV_DIR,
+ zhp->zpool_name);
+ if (stat(path, &st) != 0) {
+ int err = errno;
+ (void) close(base);
+ return (err == ENOENT ? 0 : -1);
+ }
+
+ /*
+ * Oddly this wasn't a directory -- ignore that failure since we
+ * know there are no links lower in the (non-existant) hierarchy.
+ */
+ if (!S_ISDIR(st.st_mode)) {
+ (void) close(base);
+ return (0);
+ }
+
+ if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
+ (void) close(base);
+ return (-1);
+ }
+
+ (void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
+ curr = 0;
+
+ while (curr >= 0) {
+ snprintf(path, sizeof(path), "%s/%s", ZVOL_FULL_DEV_DIR,
+ paths[curr]);
+ if (lstat(path, &st) != 0)
+ goto err;
+
+ if (S_ISDIR(st.st_mode)) {
+ if ((dirp = opendir(path)) == NULL) {
+ goto err;
+ }
+
+ while ((dp = readdir(dirp)) != NULL) {
+ if (dp->d_name[0] == '.')
+ continue;
+
+ if (curr + 1 == size) {
+ paths = zfs_realloc(hdl, paths,
+ size * sizeof (paths[0]),
+ size * 2 * sizeof (paths[0]));
+ if (paths == NULL) {
+ (void) closedir(dirp);
+ goto err;
+ }
+
+ size *= 2;
+ }
+
+ (void) strlcpy(paths[curr + 1], paths[curr],
+ sizeof (paths[curr + 1]));
+ (void) strlcat(paths[curr], "/",
+ sizeof (paths[curr]));
+ (void) strlcat(paths[curr], dp->d_name,
+ sizeof (paths[curr]));
+ curr++;
+ }
+
+ (void) closedir(dirp);
+
+ } else {
+ if ((ret = cb(paths[curr], data)) != 0)
+ break;
+ }
+
+ curr--;
+ }
+
+ free(paths);
+ (void) close(base);
+
+ return (ret);
+
+err:
+ free(paths);
+ (void) close(base);
+ return (-1);
+}
+
+typedef struct zvol_cb {
+ zpool_handle_t *zcb_pool;
+ boolean_t zcb_create;
+} zvol_cb_t;
+
+/*ARGSUSED*/
+static int
+do_zvol_create(zfs_handle_t *zhp, void *data)
+{
+ int ret;
+
+ if (ZFS_IS_VOLUME(zhp))
+ (void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
+
+ ret = zfs_iter_children(zhp, do_zvol_create, NULL);
+
+ zfs_close(zhp);
+
+ return (ret);
+}
+
+/*
+ * Iterate over all zvols in the pool and make any necessary minor nodes.
+ */
+int
+zpool_create_zvol_links(zpool_handle_t *zhp)
+{
+ zfs_handle_t *zfp;
+ int ret;
+
+ /*
+ * If the pool is unavailable, just return success.
+ */
+ if ((zfp = make_dataset_handle(zhp->zpool_hdl,
+ zhp->zpool_name)) == NULL)
+ return (0);
+
+ ret = zfs_iter_children(zfp, do_zvol_create, NULL);
+
+ zfs_close(zfp);
+ return (ret);
+}
+
+static int
+do_zvol_remove(const char *dataset, void *data)
+{
+ zpool_handle_t *zhp = data;
+
+ return (zvol_remove_link(zhp->zpool_hdl, dataset));
+}
+
+/*
+ * Iterate over all zvols in the pool and remove any minor nodes. We iterate
+ * by examining the /dev links so that a corrupted pool doesn't impede this
+ * operation.
+ */
+int
+zpool_remove_zvol_links(zpool_handle_t *zhp)
+{
+ return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
+}
+
+/*
+ * Convert from a devid string to a path.
+ */
+static char *
+devid_to_path(char *devid_str)
+{
+ ddi_devid_t devid;
+ char *minor;
+ char *path;
+ devid_nmlist_t *list = NULL;
+ int ret;
+
+ if (devid_str_decode(devid_str, &devid, &minor) != 0)
+ return (NULL);
+
+ ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
+
+ devid_str_free(minor);
+ devid_free(devid);
+
+ if (ret != 0)
+ return (NULL);
+
+ if ((path = strdup(list[0].devname)) == NULL)
+ return (NULL);
+
+ devid_free_nmlist(list);
+
+ return (path);
+}
+
+/*
+ * Convert from a path to a devid string.
+ */
+static char *
+path_to_devid(const char *path)
+{
+ int fd;
+ ddi_devid_t devid;
+ char *minor, *ret;
+
+ if ((fd = open(path, O_RDONLY)) < 0)
+ return (NULL);
+
+ minor = NULL;
+ ret = NULL;
+ if (devid_get(fd, &devid) == 0) {
+ if (devid_get_minor_name(fd, &minor) == 0)
+ ret = devid_str_encode(devid, minor);
+ if (minor != NULL)
+ devid_str_free(minor);
+ devid_free(devid);
+ }
+ (void) close(fd);
+
+ return (ret);
+}
+
+/*
+ * Issue the necessary ioctl() to update the stored path value for the vdev. We
+ * ignore any failure here, since a common case is for an unprivileged user to
+ * type 'zpool status', and we'll display the correct information anyway.
+ */
+static void
+set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
+{
+ zfs_cmd_t zc = { 0 };
+
+ (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
+ verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
+ &zc.zc_guid) == 0);
+
+ (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
+}
+
+/*
+ * Given a vdev, return the name to display in iostat. If the vdev has a path,
+ * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
+ * We also check if this is a whole disk, in which case we strip off the
+ * trailing 's0' slice name.
+ *
+ * This routine is also responsible for identifying when disks have been
+ * reconfigured in a new location. The kernel will have opened the device by
+ * devid, but the path will still refer to the old location. To catch this, we
+ * first do a path -> devid translation (which is fast for the common case). If
+ * the devid matches, we're done. If not, we do a reverse devid -> path
+ * translation and issue the appropriate ioctl() to update the path of the vdev.
+ * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
+ * of these checks.
+ */
+char *
+zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
+{
+ char *path, *devid;
+ uint64_t value;
+ char buf[64];
+
+ if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
+ &value) == 0) {
+ verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
+ &value) == 0);
+ (void) snprintf(buf, sizeof (buf), "%llu",
+ (u_longlong_t)value);
+ path = buf;
+ } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
+
+ if (zhp != NULL &&
+ nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
+ /*
+ * Determine if the current path is correct.
+ */
+ char *newdevid = path_to_devid(path);
+
+ if (newdevid == NULL ||
+ strcmp(devid, newdevid) != 0) {
+ char *newpath;
+
+ if ((newpath = devid_to_path(devid)) != NULL) {
+ /*
+ * Update the path appropriately.
+ */
+ set_path(zhp, nv, newpath);
+ if (nvlist_add_string(nv,
+ ZPOOL_CONFIG_PATH, newpath) == 0)
+ verify(nvlist_lookup_string(nv,
+ ZPOOL_CONFIG_PATH,
+ &path) == 0);
+ free(newpath);
+ }
+ }
+
+ if (newdevid)
+ devid_str_free(newdevid);
+ }
+
+ if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
+ path += sizeof(_PATH_DEV) - 1;
+
+ if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
+ &value) == 0 && value) {
+ char *tmp = zfs_strdup(hdl, path);
+ if (tmp == NULL)
+ return (NULL);
+ tmp[strlen(path) - 2] = '\0';
+ return (tmp);
+ }
+ } else {
+ verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
+
+ /*
+ * If it's a raidz device, we need to stick in the parity level.
+ */
+ if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
+ verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
+ &value) == 0);
+ (void) snprintf(buf, sizeof (buf), "%s%llu", path,
+ (u_longlong_t)value);
+ path = buf;
+ }
+ }
+
+ return (zfs_strdup(hdl, path));
+}
+
+static int
+zbookmark_compare(const void *a, const void *b)
+{
+ return (memcmp(a, b, sizeof (zbookmark_t)));
+}
+
+/*
+ * Retrieve the persistent error log, uniquify the members, and return to the
+ * caller.
+ */
+int
+zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
+{
+ zfs_cmd_t zc = { 0 };
+ uint64_t count;
+ zbookmark_t *zb = NULL;
+ int i;
+
+ /*
+ * Retrieve the raw error list from the kernel. If the number of errors
+ * has increased, allocate more space and continue until we get the
+ * entire list.
+ */
+ verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
+ &count) == 0);
+ if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
+ count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
+ return (-1);
+ zc.zc_nvlist_dst_size = count;
+ (void) strcpy(zc.zc_name, zhp->zpool_name);
+ for (;;) {
+ if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
+ &zc) != 0) {
+ free((void *)(uintptr_t)zc.zc_nvlist_dst);
+ if (errno == ENOMEM) {
+ count = zc.zc_nvlist_dst_size;
+ if ((zc.zc_nvlist_dst = (uintptr_t)
+ zfs_alloc(zhp->zpool_hdl, count *
+ sizeof (zbookmark_t))) == (uintptr_t)NULL)
+ return (-1);
+ } else {
+ return (-1);
+ }
+ } else {
+ break;
+ }
+ }
+
+ /*
+ * Sort the resulting bookmarks. This is a little confusing due to the
+ * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
+ * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
+ * _not_ copied as part of the process. So we point the start of our
+ * array appropriate and decrement the total number of elements.
+ */
+ zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
+ zc.zc_nvlist_dst_size;
+ count -= zc.zc_nvlist_dst_size;
+
+ qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
+
+ verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
+
+ /*
+ * Fill in the nverrlistp with nvlist's of dataset and object numbers.
+ */
+ for (i = 0; i < count; i++) {
+ nvlist_t *nv;
+
+ /* ignoring zb_blkid and zb_level for now */
+ if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
+ zb[i-1].zb_object == zb[i].zb_object)
+ continue;
+
+ if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
+ goto nomem;
+ if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
+ zb[i].zb_objset) != 0) {
+ nvlist_free(nv);
+ goto nomem;
+ }
+ if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
+ zb[i].zb_object) != 0) {
+ nvlist_free(nv);
+ goto nomem;
+ }
+ if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
+ nvlist_free(nv);
+ goto nomem;
+ }
+ nvlist_free(nv);
+ }
+
+ free((void *)(uintptr_t)zc.zc_nvlist_dst);
+ return (0);
+
+nomem:
+ free((void *)(uintptr_t)zc.zc_nvlist_dst);
+ return (no_memory(zhp->zpool_hdl));
+}
+
+/*
+ * Upgrade a ZFS pool to the latest on-disk version.
+ */
+int
+zpool_upgrade(zpool_handle_t *zhp)
+{
+ zfs_cmd_t zc = { 0 };
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ (void) strcpy(zc.zc_name, zhp->zpool_name);
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
+ return (zpool_standard_error_fmt(hdl, errno,
+ dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
+ zhp->zpool_name));
+
+ return (0);
+}
+
+/*
+ * Log command history.
+ *
+ * 'pool' is B_TRUE if we are logging a command for 'zpool'; B_FALSE
+ * otherwise ('zfs'). 'pool_create' is B_TRUE if we are logging the creation
+ * of the pool; B_FALSE otherwise. 'path' is the pathanme containing the
+ * poolname. 'argc' and 'argv' are used to construct the command string.
+ */
+void
+zpool_log_history(libzfs_handle_t *hdl, int argc, char **argv, const char *path,
+ boolean_t pool, boolean_t pool_create)
+{
+ char cmd_buf[HIS_MAX_RECORD_LEN];
+ char *dspath;
+ zfs_cmd_t zc = { 0 };
+ int i;
+
+ /* construct the command string */
+ (void) strcpy(cmd_buf, pool ? "zpool" : "zfs");
+ for (i = 0; i < argc; i++) {
+ if (strlen(cmd_buf) + 1 + strlen(argv[i]) > HIS_MAX_RECORD_LEN)
+ break;
+ (void) strcat(cmd_buf, " ");
+ (void) strcat(cmd_buf, argv[i]);
+ }
+
+ /* figure out the poolname */
+ dspath = strpbrk(path, "/@");
+ if (dspath == NULL) {
+ (void) strcpy(zc.zc_name, path);
+ } else {
+ (void) strncpy(zc.zc_name, path, dspath - path);
+ zc.zc_name[dspath-path] = '\0';
+ }
+
+ zc.zc_history = (uint64_t)(uintptr_t)cmd_buf;
+ zc.zc_history_len = strlen(cmd_buf);
+
+ /* overloading zc_history_offset */
+ zc.zc_history_offset = pool_create;
+
+ (void) ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_LOG_HISTORY, &zc);
+}
+
+/*
+ * Perform ioctl to get some command history of a pool.
+ *
+ * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
+ * logical offset of the history buffer to start reading from.
+ *
+ * Upon return, 'off' is the next logical offset to read from and
+ * 'len' is the actual amount of bytes read into 'buf'.
+ */
+static int
+get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
+{
+ zfs_cmd_t zc = { 0 };
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+
+ zc.zc_history = (uint64_t)(uintptr_t)buf;
+ zc.zc_history_len = *len;
+ zc.zc_history_offset = *off;
+
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
+ switch (errno) {
+ case EPERM:
+ return (zfs_error_fmt(hdl, EZFS_PERM,
+ dgettext(TEXT_DOMAIN,
+ "cannot show history for pool '%s'"),
+ zhp->zpool_name));
+ case ENOENT:
+ return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
+ dgettext(TEXT_DOMAIN, "cannot get history for pool "
+ "'%s'"), zhp->zpool_name));
+ case ENOTSUP:
+ return (zfs_error_fmt(hdl, EZFS_BADVERSION,
+ dgettext(TEXT_DOMAIN, "cannot get history for pool "
+ "'%s', pool must be upgraded"), zhp->zpool_name));
+ default:
+ return (zpool_standard_error_fmt(hdl, errno,
+ dgettext(TEXT_DOMAIN,
+ "cannot get history for '%s'"), zhp->zpool_name));
+ }
+ }
+
+ *len = zc.zc_history_len;
+ *off = zc.zc_history_offset;
+
+ return (0);
+}
+
+/*
+ * Process the buffer of nvlists, unpacking and storing each nvlist record
+ * into 'records'. 'leftover' is set to the number of bytes that weren't
+ * processed as there wasn't a complete record.
+ */
+static int
+zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
+ nvlist_t ***records, uint_t *numrecords)
+{
+ uint64_t reclen;
+ nvlist_t *nv;
+ int i;
+
+ while (bytes_read > sizeof (reclen)) {
+
+ /* get length of packed record (stored as little endian) */
+ for (i = 0, reclen = 0; i < sizeof (reclen); i++)
+ reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
+
+ if (bytes_read < sizeof (reclen) + reclen)
+ break;
+
+ /* unpack record */
+ if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
+ return (ENOMEM);
+ bytes_read -= sizeof (reclen) + reclen;
+ buf += sizeof (reclen) + reclen;
+
+ /* add record to nvlist array */
+ (*numrecords)++;
+ if (ISP2(*numrecords + 1)) {
+ *records = realloc(*records,
+ *numrecords * 2 * sizeof (nvlist_t *));
+ }
+ (*records)[*numrecords - 1] = nv;
+ }
+
+ *leftover = bytes_read;
+ return (0);
+}
+
+#define HIS_BUF_LEN (128*1024)
+
+/*
+ * Retrieve the command history of a pool.
+ */
+int
+zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
+{
+ char buf[HIS_BUF_LEN];
+ uint64_t off = 0;
+ nvlist_t **records = NULL;
+ uint_t numrecords = 0;
+ int err, i;
+
+ do {
+ uint64_t bytes_read = sizeof (buf);
+ uint64_t leftover;
+
+ if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
+ break;
+
+ /* if nothing else was read in, we're at EOF, just return */
+ if (!bytes_read)
+ break;
+
+ if ((err = zpool_history_unpack(buf, bytes_read,
+ &leftover, &records, &numrecords)) != 0)
+ break;
+ off -= leftover;
+
+ /* CONSTCOND */
+ } while (1);
+
+ if (!err) {
+ verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
+ verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
+ records, numrecords) == 0);
+ }
+ for (i = 0; i < numrecords; i++)
+ nvlist_free(records[i]);
+ free(records);
+
+ return (err);
+}
+
+void
+zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
+ char *pathname, size_t len)
+{
+ zfs_cmd_t zc = { 0 };
+ boolean_t mounted = B_FALSE;
+ char *mntpnt = NULL;
+ char dsname[MAXNAMELEN];
+
+ if (dsobj == 0) {
+ /* special case for the MOS */
+ (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
+ return;
+ }
+
+ /* get the dataset's name */
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ zc.zc_obj = dsobj;
+ if (ioctl(zhp->zpool_hdl->libzfs_fd,
+ ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
+ /* just write out a path of two object numbers */
+ (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
+ dsobj, obj);
+ return;
+ }
+ (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
+
+ /* find out if the dataset is mounted */
+ mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
+
+ /* get the corrupted object's path */
+ (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
+ zc.zc_obj = obj;
+ if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
+ &zc) == 0) {
+ if (mounted) {
+ (void) snprintf(pathname, len, "%s%s", mntpnt,
+ zc.zc_value);
+ } else {
+ (void) snprintf(pathname, len, "%s:%s",
+ dsname, zc.zc_value);
+ }
+ } else {
+ (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
+ }
+ free(mntpnt);
+}
+
+int
+zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
+{
+ zfs_cmd_t zc = { 0 };
+ int ret = -1;
+ char errbuf[1024];
+ nvlist_t *nvl = NULL;
+ nvlist_t *realprops;
+
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
+ zhp->zpool_name);
+
+ if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
+ zfs_error_aux(zhp->zpool_hdl,
+ dgettext(TEXT_DOMAIN, "pool must be "
+ "upgraded to support pool properties"));
+ return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, errbuf));
+ }
+
+ if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
+ return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
+
+ if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
+ nvlist_add_string(nvl, propname, propval) != 0) {
+ return (no_memory(zhp->zpool_hdl));
+ }
+
+ if ((realprops = zfs_validate_properties(zhp->zpool_hdl, ZFS_TYPE_POOL,
+ zhp->zpool_name, nvl, 0, NULL, errbuf)) == NULL) {
+ nvlist_free(nvl);
+ return (-1);
+ }
+
+ nvlist_free(nvl);
+ nvl = realprops;
+
+ /*
+ * Execute the corresponding ioctl() to set this property.
+ */
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+
+ if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl, NULL) != 0)
+ return (-1);
+
+ ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SET_PROPS, &zc);
+ zcmd_free_nvlists(&zc);
+
+ if (ret)
+ (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
+
+ return (ret);
+}
+
+int
+zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *propbuf,
+ size_t proplen, zfs_source_t *srctype)
+{
+ uint64_t value;
+ char msg[1024], *strvalue;
+ nvlist_t *nvp;
+ zfs_source_t src = ZFS_SRC_NONE;
+
+ (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+ "cannot get property '%s'"), zpool_prop_to_name(prop));
+
+ if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
+ zfs_error_aux(zhp->zpool_hdl,
+ dgettext(TEXT_DOMAIN, "pool must be "
+ "upgraded to support pool properties"));
+ return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, msg));
+ }
+
+ if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
+ return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, msg));
+
+ /*
+ * the "name" property is special cased
+ */
+ if (!zfs_prop_valid_for_type(prop, ZFS_TYPE_POOL) &&
+ prop != ZFS_PROP_NAME)
+ return (-1);
+
+ switch (prop) {
+ case ZFS_PROP_NAME:
+ (void) strlcpy(propbuf, zhp->zpool_name, proplen);
+ break;
+
+ case ZFS_PROP_BOOTFS:
+ if (nvlist_lookup_nvlist(zhp->zpool_props,
+ zpool_prop_to_name(prop), &nvp) != 0) {
+ strvalue = (char *)zfs_prop_default_string(prop);
+ if (strvalue == NULL)
+ strvalue = "-";
+ src = ZFS_SRC_DEFAULT;
+ } else {
+ VERIFY(nvlist_lookup_uint64(nvp,
+ ZFS_PROP_SOURCE, &value) == 0);
+ src = value;
+ VERIFY(nvlist_lookup_string(nvp, ZFS_PROP_VALUE,
+ &strvalue) == 0);
+ if (strlen(strvalue) >= proplen)
+ return (-1);
+ }
+ (void) strcpy(propbuf, strvalue);
+ break;
+
+ default:
+ return (-1);
+ }
+ if (srctype)
+ *srctype = src;
+ return (0);
+}
+
+int
+zpool_get_proplist(libzfs_handle_t *hdl, char *fields, zpool_proplist_t **listp)
+{
+ return (zfs_get_proplist_common(hdl, fields, listp, ZFS_TYPE_POOL));
+}
+
+
+int
+zpool_expand_proplist(zpool_handle_t *zhp, zpool_proplist_t **plp)
+{
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+ zpool_proplist_t *entry;
+ char buf[ZFS_MAXPROPLEN];
+
+ if (zfs_expand_proplist_common(hdl, plp, ZFS_TYPE_POOL) != 0)
+ return (-1);
+
+ for (entry = *plp; entry != NULL; entry = entry->pl_next) {
+
+ if (entry->pl_fixed)
+ continue;
+
+ if (entry->pl_prop != ZFS_PROP_INVAL &&
+ zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
+ NULL) == 0) {
+ if (strlen(buf) > entry->pl_width)
+ entry->pl_width = strlen(buf);
+ }
+ }
+
+ return (0);
+}
diff --git a/contrib/opensolaris/lib/libzfs/common/libzfs_status.c b/contrib/opensolaris/lib/libzfs/common/libzfs_status.c
new file mode 100644
index 0000000..2a41649
--- /dev/null
+++ b/contrib/opensolaris/lib/libzfs/common/libzfs_status.c
@@ -0,0 +1,289 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * This file contains the functions which analyze the status of a pool. This
+ * include both the status of an active pool, as well as the status exported
+ * pools. Returns one of the ZPOOL_STATUS_* defines describing the status of
+ * the pool. This status is independent (to a certain degree) from the state of
+ * the pool. A pool's state descsribes only whether or not it is capable of
+ * providing the necessary fault tolerance for data. The status describes the
+ * overall status of devices. A pool that is online can still have a device
+ * that is experiencing errors.
+ *
+ * Only a subset of the possible faults can be detected using 'zpool status',
+ * and not all possible errors correspond to a FMA message ID. The explanation
+ * is left up to the caller, depending on whether it is a live pool or an
+ * import.
+ */
+
+#include <libzfs.h>
+#include <string.h>
+#include "libzfs_impl.h"
+
+/*
+ * Message ID table. This must be kep in sync with the ZPOOL_STATUS_* defines
+ * in libzfs.h. Note that there are some status results which go past the end
+ * of this table, and hence have no associated message ID.
+ */
+static char *msgid_table[] = {
+ "ZFS-8000-14",
+ "ZFS-8000-2Q",
+ "ZFS-8000-3C",
+ "ZFS-8000-4J",
+ "ZFS-8000-5E",
+ "ZFS-8000-6X",
+ "ZFS-8000-72",
+ "ZFS-8000-8A",
+ "ZFS-8000-9P",
+ "ZFS-8000-A5"
+};
+
+/*
+ * If the pool is active, a certain class of static errors is overridden by the
+ * faults as analayzed by FMA. These faults have separate knowledge articles,
+ * and the article referred to by 'zpool status' must match that indicated by
+ * the syslog error message. We override missing data as well as corrupt pool.
+ */
+static char *msgid_table_active[] = {
+ "ZFS-8000-14",
+ "ZFS-8000-D3", /* overridden */
+ "ZFS-8000-D3", /* overridden */
+ "ZFS-8000-4J",
+ "ZFS-8000-5E",
+ "ZFS-8000-6X",
+ "ZFS-8000-CS", /* overridden */
+ "ZFS-8000-8A",
+ "ZFS-8000-9P",
+ "ZFS-8000-CS", /* overridden */
+};
+
+#define NMSGID (sizeof (msgid_table) / sizeof (msgid_table[0]))
+
+/* ARGSUSED */
+static int
+vdev_missing(uint64_t state, uint64_t aux, uint64_t errs)
+{
+ return (state == VDEV_STATE_CANT_OPEN &&
+ aux == VDEV_AUX_OPEN_FAILED);
+}
+
+/* ARGSUSED */
+static int
+vdev_errors(uint64_t state, uint64_t aux, uint64_t errs)
+{
+ return (errs != 0);
+}
+
+/* ARGSUSED */
+static int
+vdev_broken(uint64_t state, uint64_t aux, uint64_t errs)
+{
+ return (state == VDEV_STATE_CANT_OPEN);
+}
+
+/* ARGSUSED */
+static int
+vdev_offlined(uint64_t state, uint64_t aux, uint64_t errs)
+{
+ return (state == VDEV_STATE_OFFLINE);
+}
+
+/*
+ * Detect if any leaf devices that have seen errors or could not be opened.
+ */
+static boolean_t
+find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t))
+{
+ nvlist_t **child;
+ vdev_stat_t *vs;
+ uint_t c, children;
+ char *type;
+
+ /*
+ * Ignore problems within a 'replacing' vdev, since we're presumably in
+ * the process of repairing any such errors, and don't want to call them
+ * out again. We'll pick up the fact that a resilver is happening
+ * later.
+ */
+ verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE, &type) == 0);
+ if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
+ return (B_FALSE);
+
+ if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
+ &children) == 0) {
+ for (c = 0; c < children; c++)
+ if (find_vdev_problem(child[c], func))
+ return (B_TRUE);
+ } else {
+ verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_STATS,
+ (uint64_t **)&vs, &c) == 0);
+
+ if (func(vs->vs_state, vs->vs_aux,
+ vs->vs_read_errors +
+ vs->vs_write_errors +
+ vs->vs_checksum_errors))
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+/*
+ * Active pool health status.
+ *
+ * To determine the status for a pool, we make several passes over the config,
+ * picking the most egregious error we find. In order of importance, we do the
+ * following:
+ *
+ * - Check for a complete and valid configuration
+ * - Look for any missing devices in a non-replicated config
+ * - Check for any data errors
+ * - Check for any missing devices in a replicated config
+ * - Look for any devices showing errors
+ * - Check for any resilvering devices
+ *
+ * There can obviously be multiple errors within a single pool, so this routine
+ * only picks the most damaging of all the current errors to report.
+ */
+static zpool_status_t
+check_status(nvlist_t *config, boolean_t isimport)
+{
+ nvlist_t *nvroot;
+ vdev_stat_t *vs;
+ uint_t vsc;
+ uint64_t nerr;
+ uint64_t version;
+
+ verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
+ &version) == 0);
+ verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+ &nvroot) == 0);
+ verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
+ (uint64_t **)&vs, &vsc) == 0);
+
+ /*
+ * Newer on-disk version.
+ */
+ if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
+ vs->vs_aux == VDEV_AUX_VERSION_NEWER)
+ return (ZPOOL_STATUS_VERSION_NEWER);
+
+ /*
+ * Check that the config is complete.
+ */
+ if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
+ vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
+ return (ZPOOL_STATUS_BAD_GUID_SUM);
+
+ /*
+ * Missing devices in non-replicated config.
+ */
+ if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
+ find_vdev_problem(nvroot, vdev_missing))
+ return (ZPOOL_STATUS_MISSING_DEV_NR);
+
+ if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
+ find_vdev_problem(nvroot, vdev_broken))
+ return (ZPOOL_STATUS_CORRUPT_LABEL_NR);
+
+ /*
+ * Corrupted pool metadata
+ */
+ if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
+ vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
+ return (ZPOOL_STATUS_CORRUPT_POOL);
+
+ /*
+ * Persistent data errors.
+ */
+ if (!isimport) {
+ if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
+ &nerr) == 0 && nerr != 0)
+ return (ZPOOL_STATUS_CORRUPT_DATA);
+ }
+
+ /*
+ * Missing devices in a replicated config.
+ */
+ if (find_vdev_problem(nvroot, vdev_missing))
+ return (ZPOOL_STATUS_MISSING_DEV_R);
+ if (find_vdev_problem(nvroot, vdev_broken))
+ return (ZPOOL_STATUS_CORRUPT_LABEL_R);
+
+ /*
+ * Devices with errors
+ */
+ if (!isimport && find_vdev_problem(nvroot, vdev_errors))
+ return (ZPOOL_STATUS_FAILING_DEV);
+
+ /*
+ * Offlined devices
+ */
+ if (find_vdev_problem(nvroot, vdev_offlined))
+ return (ZPOOL_STATUS_OFFLINE_DEV);
+
+ /*
+ * Currently resilvering
+ */
+ if (!vs->vs_scrub_complete && vs->vs_scrub_type == POOL_SCRUB_RESILVER)
+ return (ZPOOL_STATUS_RESILVERING);
+
+ /*
+ * Outdated, but usable, version
+ */
+ if (version < ZFS_VERSION)
+ return (ZPOOL_STATUS_VERSION_OLDER);
+
+ return (ZPOOL_STATUS_OK);
+}
+
+zpool_status_t
+zpool_get_status(zpool_handle_t *zhp, char **msgid)
+{
+ zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE);
+
+ if (ret >= NMSGID)
+ *msgid = NULL;
+ else
+ *msgid = msgid_table_active[ret];
+
+ return (ret);
+}
+
+zpool_status_t
+zpool_import_status(nvlist_t *config, char **msgid)
+{
+ zpool_status_t ret = check_status(config, B_TRUE);
+
+ if (ret >= NMSGID)
+ *msgid = NULL;
+ else
+ *msgid = msgid_table[ret];
+
+ return (ret);
+}
diff --git a/contrib/opensolaris/lib/libzfs/common/libzfs_util.c b/contrib/opensolaris/lib/libzfs/common/libzfs_util.c
new file mode 100644
index 0000000..279835b
--- /dev/null
+++ b/contrib/opensolaris/lib/libzfs/common/libzfs_util.c
@@ -0,0 +1,827 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Internal utility routines for the ZFS library.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <libintl.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/mnttab.h>
+#include <sys/mntent.h>
+#include <sys/types.h>
+
+#include <libzfs.h>
+
+#include "libzfs_impl.h"
+
+int
+libzfs_errno(libzfs_handle_t *hdl)
+{
+ return (hdl->libzfs_error);
+}
+
+const char *
+libzfs_error_action(libzfs_handle_t *hdl)
+{
+ return (hdl->libzfs_action);
+}
+
+const char *
+libzfs_error_description(libzfs_handle_t *hdl)
+{
+ if (hdl->libzfs_desc[0] != '\0')
+ return (hdl->libzfs_desc);
+
+ switch (hdl->libzfs_error) {
+ case EZFS_NOMEM:
+ return (dgettext(TEXT_DOMAIN, "out of memory"));
+ case EZFS_BADPROP:
+ return (dgettext(TEXT_DOMAIN, "invalid property value"));
+ case EZFS_PROPREADONLY:
+ return (dgettext(TEXT_DOMAIN, "read only property"));
+ case EZFS_PROPTYPE:
+ return (dgettext(TEXT_DOMAIN, "property doesn't apply to "
+ "datasets of this type"));
+ case EZFS_PROPNONINHERIT:
+ return (dgettext(TEXT_DOMAIN, "property cannot be inherited"));
+ case EZFS_PROPSPACE:
+ return (dgettext(TEXT_DOMAIN, "invalid quota or reservation"));
+ case EZFS_BADTYPE:
+ return (dgettext(TEXT_DOMAIN, "operation not applicable to "
+ "datasets of this type"));
+ case EZFS_BUSY:
+ return (dgettext(TEXT_DOMAIN, "pool or dataset is busy"));
+ case EZFS_EXISTS:
+ return (dgettext(TEXT_DOMAIN, "pool or dataset exists"));
+ case EZFS_NOENT:
+ return (dgettext(TEXT_DOMAIN, "no such pool or dataset"));
+ case EZFS_BADSTREAM:
+ return (dgettext(TEXT_DOMAIN, "invalid backup stream"));
+ case EZFS_DSREADONLY:
+ return (dgettext(TEXT_DOMAIN, "dataset is read only"));
+ case EZFS_VOLTOOBIG:
+ return (dgettext(TEXT_DOMAIN, "volume size exceeds limit for "
+ "this system"));
+ case EZFS_VOLHASDATA:
+ return (dgettext(TEXT_DOMAIN, "volume has data"));
+ case EZFS_INVALIDNAME:
+ return (dgettext(TEXT_DOMAIN, "invalid name"));
+ case EZFS_BADRESTORE:
+ return (dgettext(TEXT_DOMAIN, "unable to restore to "
+ "destination"));
+ case EZFS_BADBACKUP:
+ return (dgettext(TEXT_DOMAIN, "backup failed"));
+ case EZFS_BADTARGET:
+ return (dgettext(TEXT_DOMAIN, "invalid target vdev"));
+ case EZFS_NODEVICE:
+ return (dgettext(TEXT_DOMAIN, "no such device in pool"));
+ case EZFS_BADDEV:
+ return (dgettext(TEXT_DOMAIN, "invalid device"));
+ case EZFS_NOREPLICAS:
+ return (dgettext(TEXT_DOMAIN, "no valid replicas"));
+ case EZFS_RESILVERING:
+ return (dgettext(TEXT_DOMAIN, "currently resilvering"));
+ case EZFS_BADVERSION:
+ return (dgettext(TEXT_DOMAIN, "unsupported version"));
+ case EZFS_POOLUNAVAIL:
+ return (dgettext(TEXT_DOMAIN, "pool is unavailable"));
+ case EZFS_DEVOVERFLOW:
+ return (dgettext(TEXT_DOMAIN, "too many devices in one vdev"));
+ case EZFS_BADPATH:
+ return (dgettext(TEXT_DOMAIN, "must be an absolute path"));
+ case EZFS_CROSSTARGET:
+ return (dgettext(TEXT_DOMAIN, "operation crosses datasets or "
+ "pools"));
+ case EZFS_ZONED:
+ return (dgettext(TEXT_DOMAIN, "dataset in use by local zone"));
+ case EZFS_MOUNTFAILED:
+ return (dgettext(TEXT_DOMAIN, "mount failed"));
+ case EZFS_UMOUNTFAILED:
+ return (dgettext(TEXT_DOMAIN, "umount failed"));
+ case EZFS_UNSHARENFSFAILED:
+ return (dgettext(TEXT_DOMAIN, "unshare(1M) failed"));
+ case EZFS_SHARENFSFAILED:
+ return (dgettext(TEXT_DOMAIN, "share(1M) failed"));
+ case EZFS_DEVLINKS:
+ return (dgettext(TEXT_DOMAIN, "failed to create /dev links"));
+ case EZFS_PERM:
+ return (dgettext(TEXT_DOMAIN, "permission denied"));
+ case EZFS_NOSPC:
+ return (dgettext(TEXT_DOMAIN, "out of space"));
+ case EZFS_IO:
+ return (dgettext(TEXT_DOMAIN, "I/O error"));
+ case EZFS_INTR:
+ return (dgettext(TEXT_DOMAIN, "signal received"));
+ case EZFS_ISSPARE:
+ return (dgettext(TEXT_DOMAIN, "device is reserved as a hot "
+ "spare"));
+ case EZFS_INVALCONFIG:
+ return (dgettext(TEXT_DOMAIN, "invalid vdev configuration"));
+ case EZFS_RECURSIVE:
+ return (dgettext(TEXT_DOMAIN, "recursive dataset dependency"));
+ case EZFS_NOHISTORY:
+ return (dgettext(TEXT_DOMAIN, "no history available"));
+ case EZFS_UNSHAREISCSIFAILED:
+ return (dgettext(TEXT_DOMAIN,
+ "iscsitgtd failed request to unshare"));
+ case EZFS_SHAREISCSIFAILED:
+ return (dgettext(TEXT_DOMAIN,
+ "iscsitgtd failed request to share"));
+ case EZFS_POOLPROPS:
+ return (dgettext(TEXT_DOMAIN, "failed to retrieve "
+ "pool properties"));
+ case EZFS_POOL_NOTSUP:
+ return (dgettext(TEXT_DOMAIN, "operation not supported "
+ "on this type of pool"));
+ case EZFS_POOL_INVALARG:
+ return (dgettext(TEXT_DOMAIN, "invalid argument for "
+ "this pool operation"));
+ case EZFS_UNKNOWN:
+ return (dgettext(TEXT_DOMAIN, "unknown error"));
+ default:
+ assert(hdl->libzfs_error == 0);
+ return (dgettext(TEXT_DOMAIN, "no error"));
+ }
+}
+
+/*PRINTFLIKE2*/
+void
+zfs_error_aux(libzfs_handle_t *hdl, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+
+ (void) vsnprintf(hdl->libzfs_desc, sizeof (hdl->libzfs_desc),
+ fmt, ap);
+ hdl->libzfs_desc_active = 1;
+
+ va_end(ap);
+}
+
+static void
+zfs_verror(libzfs_handle_t *hdl, int error, const char *fmt, va_list ap)
+{
+ (void) vsnprintf(hdl->libzfs_action, sizeof (hdl->libzfs_action),
+ fmt, ap);
+ hdl->libzfs_error = error;
+
+ if (hdl->libzfs_desc_active)
+ hdl->libzfs_desc_active = 0;
+ else
+ hdl->libzfs_desc[0] = '\0';
+
+ if (hdl->libzfs_printerr) {
+ if (error == EZFS_UNKNOWN) {
+ (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "internal "
+ "error: %s\n"), libzfs_error_description(hdl));
+ abort();
+ }
+
+ (void) fprintf(stderr, "%s: %s\n", hdl->libzfs_action,
+ libzfs_error_description(hdl));
+ if (error == EZFS_NOMEM)
+ exit(1);
+ }
+}
+
+int
+zfs_error(libzfs_handle_t *hdl, int error, const char *msg)
+{
+ return (zfs_error_fmt(hdl, error, "%s", msg));
+}
+
+/*PRINTFLIKE3*/
+int
+zfs_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+
+ zfs_verror(hdl, error, fmt, ap);
+
+ va_end(ap);
+
+ return (-1);
+}
+
+static int
+zfs_common_error(libzfs_handle_t *hdl, int error, const char *fmt,
+ va_list ap)
+{
+ switch (error) {
+ case EPERM:
+ case EACCES:
+ zfs_verror(hdl, EZFS_PERM, fmt, ap);
+ return (-1);
+
+ case EIO:
+ zfs_verror(hdl, EZFS_IO, fmt, ap);
+ return (-1);
+
+ case EINTR:
+ zfs_verror(hdl, EZFS_INTR, fmt, ap);
+ return (-1);
+ }
+
+ return (0);
+}
+
+int
+zfs_standard_error(libzfs_handle_t *hdl, int error, const char *msg)
+{
+ return (zfs_standard_error_fmt(hdl, error, "%s", msg));
+}
+
+/*PRINTFLIKE3*/
+int
+zfs_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+
+ if (zfs_common_error(hdl, error, fmt, ap) != 0) {
+ va_end(ap);
+ return (-1);
+ }
+
+
+ switch (error) {
+ case ENXIO:
+ zfs_verror(hdl, EZFS_IO, fmt, ap);
+ break;
+
+ case ENOENT:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "dataset does not exist"));
+ zfs_verror(hdl, EZFS_NOENT, fmt, ap);
+ break;
+
+ case ENOSPC:
+ case EDQUOT:
+ zfs_verror(hdl, EZFS_NOSPC, fmt, ap);
+ return (-1);
+
+ case EEXIST:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "dataset already exists"));
+ zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
+ break;
+
+ case EBUSY:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "dataset is busy"));
+ zfs_verror(hdl, EZFS_BUSY, fmt, ap);
+ break;
+ default:
+ zfs_error_aux(hdl, strerror(errno));
+ zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
+ break;
+ }
+
+ va_end(ap);
+ return (-1);
+}
+
+int
+zpool_standard_error(libzfs_handle_t *hdl, int error, const char *msg)
+{
+ return (zpool_standard_error_fmt(hdl, error, "%s", msg));
+}
+
+/*PRINTFLIKE3*/
+int
+zpool_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+
+ if (zfs_common_error(hdl, error, fmt, ap) != 0) {
+ va_end(ap);
+ return (-1);
+ }
+
+ switch (error) {
+ case ENODEV:
+ zfs_verror(hdl, EZFS_NODEVICE, fmt, ap);
+ break;
+
+ case ENOENT:
+ zfs_error_aux(hdl,
+ dgettext(TEXT_DOMAIN, "no such pool or dataset"));
+ zfs_verror(hdl, EZFS_NOENT, fmt, ap);
+ break;
+
+ case EEXIST:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "pool already exists"));
+ zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
+ break;
+
+ case EBUSY:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool is busy"));
+ zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
+ break;
+
+ case ENXIO:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "one or more devices is currently unavailable"));
+ zfs_verror(hdl, EZFS_BADDEV, fmt, ap);
+ break;
+
+ case ENAMETOOLONG:
+ zfs_verror(hdl, EZFS_DEVOVERFLOW, fmt, ap);
+ break;
+
+ case ENOTSUP:
+ zfs_verror(hdl, EZFS_POOL_NOTSUP, fmt, ap);
+ break;
+
+ case EINVAL:
+ zfs_verror(hdl, EZFS_POOL_INVALARG, fmt, ap);
+ break;
+
+ default:
+ zfs_error_aux(hdl, strerror(error));
+ zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
+ }
+
+ va_end(ap);
+ return (-1);
+}
+
+/*
+ * Display an out of memory error message and abort the current program.
+ */
+int
+no_memory(libzfs_handle_t *hdl)
+{
+ return (zfs_error(hdl, EZFS_NOMEM, "internal error"));
+}
+
+/*
+ * A safe form of malloc() which will die if the allocation fails.
+ */
+void *
+zfs_alloc(libzfs_handle_t *hdl, size_t size)
+{
+ void *data;
+
+ if ((data = calloc(1, size)) == NULL)
+ (void) no_memory(hdl);
+
+ return (data);
+}
+
+/*
+ * A safe form of realloc(), which also zeroes newly allocated space.
+ */
+void *
+zfs_realloc(libzfs_handle_t *hdl, void *ptr, size_t oldsize, size_t newsize)
+{
+ void *ret;
+
+ if ((ret = realloc(ptr, newsize)) == NULL) {
+ (void) no_memory(hdl);
+ free(ptr);
+ return (NULL);
+ }
+
+ bzero((char *)ret + oldsize, (newsize - oldsize));
+ return (ret);
+}
+
+/*
+ * A safe form of strdup() which will die if the allocation fails.
+ */
+char *
+zfs_strdup(libzfs_handle_t *hdl, const char *str)
+{
+ char *ret;
+
+ if ((ret = strdup(str)) == NULL)
+ (void) no_memory(hdl);
+
+ return (ret);
+}
+
+/*
+ * Convert a number to an appropriately human-readable output.
+ */
+void
+zfs_nicenum(uint64_t num, char *buf, size_t buflen)
+{
+ uint64_t n = num;
+ int index = 0;
+ char u;
+
+ while (n >= 1024) {
+ n /= 1024;
+ index++;
+ }
+
+ u = " KMGTPE"[index];
+
+ if (index == 0) {
+ (void) snprintf(buf, buflen, "%llu", n);
+ } else if ((num & ((1ULL << 10 * index) - 1)) == 0) {
+ /*
+ * If this is an even multiple of the base, always display
+ * without any decimal precision.
+ */
+ (void) snprintf(buf, buflen, "%llu%c", n, u);
+ } else {
+ /*
+ * We want to choose a precision that reflects the best choice
+ * for fitting in 5 characters. This can get rather tricky when
+ * we have numbers that are very close to an order of magnitude.
+ * For example, when displaying 10239 (which is really 9.999K),
+ * we want only a single place of precision for 10.0K. We could
+ * develop some complex heuristics for this, but it's much
+ * easier just to try each combination in turn.
+ */
+ int i;
+ for (i = 2; i >= 0; i--) {
+ (void) snprintf(buf, buflen, "%.*f%c", i,
+ (double)num / (1ULL << 10 * index), u);
+ if (strlen(buf) <= 5)
+ break;
+ }
+ }
+}
+
+void
+libzfs_print_on_error(libzfs_handle_t *hdl, boolean_t printerr)
+{
+ hdl->libzfs_printerr = printerr;
+}
+
+libzfs_handle_t *
+libzfs_init(void)
+{
+ libzfs_handle_t *hdl;
+
+ if ((hdl = calloc(sizeof (libzfs_handle_t), 1)) == NULL) {
+ return (NULL);
+ }
+
+ if ((hdl->libzfs_fd = open(ZFS_DEV, O_RDWR)) < 0) {
+ free(hdl);
+ return (NULL);
+ }
+
+ if ((hdl->libzfs_mnttab = fopen(MNTTAB, "r")) == NULL) {
+ (void) close(hdl->libzfs_fd);
+ free(hdl);
+ return (NULL);
+ }
+
+ hdl->libzfs_sharetab = fopen(ZFS_EXPORTS_PATH, "r");
+
+ return (hdl);
+}
+
+void
+libzfs_fini(libzfs_handle_t *hdl)
+{
+ (void) close(hdl->libzfs_fd);
+ if (hdl->libzfs_mnttab)
+ (void) fclose(hdl->libzfs_mnttab);
+ if (hdl->libzfs_sharetab)
+ (void) fclose(hdl->libzfs_sharetab);
+ namespace_clear(hdl);
+ free(hdl);
+}
+
+libzfs_handle_t *
+zpool_get_handle(zpool_handle_t *zhp)
+{
+ return (zhp->zpool_hdl);
+}
+
+libzfs_handle_t *
+zfs_get_handle(zfs_handle_t *zhp)
+{
+ return (zhp->zfs_hdl);
+}
+
+/*
+ * Given a name, determine whether or not it's a valid path
+ * (starts with '/' or "./"). If so, walk the mnttab trying
+ * to match the device number. If not, treat the path as an
+ * fs/vol/snap name.
+ */
+zfs_handle_t *
+zfs_path_to_zhandle(libzfs_handle_t *hdl, char *path, zfs_type_t argtype)
+{
+ struct statfs statbuf;
+
+ if (path[0] != '/' && strncmp(path, "./", strlen("./")) != 0) {
+ /*
+ * It's not a valid path, assume it's a name of type 'argtype'.
+ */
+ return (zfs_open(hdl, path, argtype));
+ }
+
+ if (statfs(path, &statbuf) != 0) {
+ (void) fprintf(stderr, "%s: %s\n", path, strerror(errno));
+ return (NULL);
+ }
+
+ if (strcmp(statbuf.f_fstypename, MNTTYPE_ZFS) != 0) {
+ (void) fprintf(stderr, gettext("'%s': not a ZFS filesystem\n"),
+ path);
+ return (NULL);
+ }
+
+ return (zfs_open(hdl, statbuf.f_mntfromname, ZFS_TYPE_FILESYSTEM));
+}
+
+/*
+ * Initialize the zc_nvlist_dst member to prepare for receiving an nvlist from
+ * an ioctl().
+ */
+int
+zcmd_alloc_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, size_t len)
+{
+ if (len == 0)
+ len = 2048;
+ zc->zc_nvlist_dst_size = len;
+ if ((zc->zc_nvlist_dst = (uint64_t)(uintptr_t)
+ zfs_alloc(hdl, zc->zc_nvlist_dst_size)) == 0)
+ return (-1);
+
+ return (0);
+}
+
+/*
+ * Called when an ioctl() which returns an nvlist fails with ENOMEM. This will
+ * expand the nvlist to the size specified in 'zc_nvlist_dst_size', which was
+ * filled in by the kernel to indicate the actual required size.
+ */
+int
+zcmd_expand_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc)
+{
+ free((void *)(uintptr_t)zc->zc_nvlist_dst);
+ if ((zc->zc_nvlist_dst = (uint64_t)(uintptr_t)
+ zfs_alloc(hdl, zc->zc_nvlist_dst_size))
+ == 0)
+ return (-1);
+
+ return (0);
+}
+
+/*
+ * Called to free the src and dst nvlists stored in the command structure.
+ */
+void
+zcmd_free_nvlists(zfs_cmd_t *zc)
+{
+ free((void *)(uintptr_t)zc->zc_nvlist_src);
+ free((void *)(uintptr_t)zc->zc_nvlist_dst);
+}
+
+int
+zcmd_write_src_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl,
+ size_t *size)
+{
+ char *packed;
+ size_t len;
+
+ verify(nvlist_size(nvl, &len, NV_ENCODE_NATIVE) == 0);
+
+ if ((packed = zfs_alloc(hdl, len)) == NULL)
+ return (-1);
+
+ verify(nvlist_pack(nvl, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
+
+ zc->zc_nvlist_src = (uint64_t)(uintptr_t)packed;
+ zc->zc_nvlist_src_size = len;
+
+ if (size)
+ *size = len;
+ return (0);
+}
+
+/*
+ * Unpacks an nvlist from the ZFS ioctl command structure.
+ */
+int
+zcmd_read_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t **nvlp)
+{
+ if (nvlist_unpack((void *)(uintptr_t)zc->zc_nvlist_dst,
+ zc->zc_nvlist_dst_size, nvlp, 0) != 0)
+ return (no_memory(hdl));
+
+ return (0);
+}
+
+static void
+zfs_print_prop_headers(libzfs_get_cbdata_t *cbp)
+{
+ zfs_proplist_t *pl = cbp->cb_proplist;
+ int i;
+ char *title;
+ size_t len;
+
+ cbp->cb_first = B_FALSE;
+ if (cbp->cb_scripted)
+ return;
+
+ /*
+ * Start with the length of the column headers.
+ */
+ cbp->cb_colwidths[GET_COL_NAME] = strlen(dgettext(TEXT_DOMAIN, "NAME"));
+ cbp->cb_colwidths[GET_COL_PROPERTY] = strlen(dgettext(TEXT_DOMAIN,
+ "PROPERTY"));
+ cbp->cb_colwidths[GET_COL_VALUE] = strlen(dgettext(TEXT_DOMAIN,
+ "VALUE"));
+ cbp->cb_colwidths[GET_COL_SOURCE] = strlen(dgettext(TEXT_DOMAIN,
+ "SOURCE"));
+
+ /*
+ * Go through and calculate the widths for each column. For the
+ * 'source' column, we kludge it up by taking the worst-case scenario of
+ * inheriting from the longest name. This is acceptable because in the
+ * majority of cases 'SOURCE' is the last column displayed, and we don't
+ * use the width anyway. Note that the 'VALUE' column can be oversized,
+ * if the name of the property is much longer the any values we find.
+ */
+ for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
+ /*
+ * 'PROPERTY' column
+ */
+ if (pl->pl_prop != ZFS_PROP_INVAL) {
+ len = strlen(zfs_prop_to_name(pl->pl_prop));
+ if (len > cbp->cb_colwidths[GET_COL_PROPERTY])
+ cbp->cb_colwidths[GET_COL_PROPERTY] = len;
+ } else {
+ len = strlen(pl->pl_user_prop);
+ if (len > cbp->cb_colwidths[GET_COL_PROPERTY])
+ cbp->cb_colwidths[GET_COL_PROPERTY] = len;
+ }
+
+ /*
+ * 'VALUE' column
+ */
+ if ((pl->pl_prop != ZFS_PROP_NAME || !pl->pl_all) &&
+ pl->pl_width > cbp->cb_colwidths[GET_COL_VALUE])
+ cbp->cb_colwidths[GET_COL_VALUE] = pl->pl_width;
+
+ /*
+ * 'NAME' and 'SOURCE' columns
+ */
+ if (pl->pl_prop == ZFS_PROP_NAME &&
+ pl->pl_width > cbp->cb_colwidths[GET_COL_NAME]) {
+ cbp->cb_colwidths[GET_COL_NAME] = pl->pl_width;
+ cbp->cb_colwidths[GET_COL_SOURCE] = pl->pl_width +
+ strlen(dgettext(TEXT_DOMAIN, "inherited from"));
+ }
+ }
+
+ /*
+ * Now go through and print the headers.
+ */
+ for (i = 0; i < 4; i++) {
+ switch (cbp->cb_columns[i]) {
+ case GET_COL_NAME:
+ title = dgettext(TEXT_DOMAIN, "NAME");
+ break;
+ case GET_COL_PROPERTY:
+ title = dgettext(TEXT_DOMAIN, "PROPERTY");
+ break;
+ case GET_COL_VALUE:
+ title = dgettext(TEXT_DOMAIN, "VALUE");
+ break;
+ case GET_COL_SOURCE:
+ title = dgettext(TEXT_DOMAIN, "SOURCE");
+ break;
+ default:
+ title = NULL;
+ }
+
+ if (title != NULL) {
+ if (i == 3 || cbp->cb_columns[i + 1] == 0)
+ (void) printf("%s", title);
+ else
+ (void) printf("%-*s ",
+ cbp->cb_colwidths[cbp->cb_columns[i]],
+ title);
+ }
+ }
+ (void) printf("\n");
+}
+
+/*
+ * Display a single line of output, according to the settings in the callback
+ * structure.
+ */
+void
+libzfs_print_one_property(const char *name, libzfs_get_cbdata_t *cbp,
+ const char *propname, const char *value, zfs_source_t sourcetype,
+ const char *source)
+{
+ int i;
+ const char *str;
+ char buf[128];
+
+ /*
+ * Ignore those source types that the user has chosen to ignore.
+ */
+ if ((sourcetype & cbp->cb_sources) == 0)
+ return;
+
+ if (cbp->cb_first)
+ zfs_print_prop_headers(cbp);
+
+ for (i = 0; i < 4; i++) {
+ switch (cbp->cb_columns[i]) {
+ case GET_COL_NAME:
+ str = name;
+ break;
+
+ case GET_COL_PROPERTY:
+ str = propname;
+ break;
+
+ case GET_COL_VALUE:
+ str = value;
+ break;
+
+ case GET_COL_SOURCE:
+ switch (sourcetype) {
+ case ZFS_SRC_NONE:
+ str = "-";
+ break;
+
+ case ZFS_SRC_DEFAULT:
+ str = "default";
+ break;
+
+ case ZFS_SRC_LOCAL:
+ str = "local";
+ break;
+
+ case ZFS_SRC_TEMPORARY:
+ str = "temporary";
+ break;
+
+ case ZFS_SRC_INHERITED:
+ (void) snprintf(buf, sizeof (buf),
+ "inherited from %s", source);
+ str = buf;
+ break;
+ }
+ break;
+
+ default:
+ continue;
+ }
+
+ if (cbp->cb_columns[i + 1] == 0)
+ (void) printf("%s", str);
+ else if (cbp->cb_scripted)
+ (void) printf("%s\t", str);
+ else
+ (void) printf("%-*s ",
+ cbp->cb_colwidths[cbp->cb_columns[i]],
+ str);
+
+ }
+
+ (void) printf("\n");
+}
OpenPOWER on IntegriCloud