summaryrefslogtreecommitdiffstats
path: root/cddl/contrib/opensolaris/lib/libzfs
diff options
context:
space:
mode:
Diffstat (limited to 'cddl/contrib/opensolaris/lib/libzfs')
-rw-r--r--cddl/contrib/opensolaris/lib/libzfs/common/libzfs.h271
-rw-r--r--cddl/contrib/opensolaris/lib/libzfs/common/libzfs_changelist.c214
-rw-r--r--cddl/contrib/opensolaris/lib/libzfs/common/libzfs_dataset.c3026
-rw-r--r--cddl/contrib/opensolaris/lib/libzfs/common/libzfs_graph.c154
-rw-r--r--cddl/contrib/opensolaris/lib/libzfs/common/libzfs_impl.h82
-rw-r--r--cddl/contrib/opensolaris/lib/libzfs/common/libzfs_import.c515
-rw-r--r--cddl/contrib/opensolaris/lib/libzfs/common/libzfs_mount.c769
-rw-r--r--cddl/contrib/opensolaris/lib/libzfs/common/libzfs_pool.c1691
-rw-r--r--cddl/contrib/opensolaris/lib/libzfs/common/libzfs_sendrecv.c2103
-rw-r--r--cddl/contrib/opensolaris/lib/libzfs/common/libzfs_status.c76
-rw-r--r--cddl/contrib/opensolaris/lib/libzfs/common/libzfs_util.c616
11 files changed, 7344 insertions, 2173 deletions
diff --git a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs.h b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs.h
index 232324e..71aab1c 100644
--- a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs.h
+++ b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs.h
@@ -20,21 +20,20 @@
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _LIBZFS_H
#define _LIBZFS_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <assert.h>
#include <libnvpair.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/varargs.h>
#include <sys/fs/zfs.h>
+#include <sys/avl.h>
#include <sys/zfs_ioctl.h>
#ifdef __cplusplus
@@ -47,6 +46,7 @@ extern "C" {
#define ZFS_MAXNAMELEN MAXNAMELEN
#define ZPOOL_MAXNAMELEN MAXNAMELEN
#define ZFS_MAXPROPLEN MAXPATHLEN
+#define ZPOOL_MAXPROPLEN MAXPATHLEN
/*
* libzfs errors
@@ -99,10 +99,62 @@ enum {
EZFS_POOL_NOTSUP, /* ops not supported for this type of pool */
EZFS_POOL_INVALARG, /* invalid argument for this pool operation */
EZFS_NAMETOOLONG, /* dataset name is too long */
+ EZFS_OPENFAILED, /* open of device failed */
+ EZFS_NOCAP, /* couldn't get capacity */
+ EZFS_LABELFAILED, /* write of label failed */
+ EZFS_ISCSISVCUNAVAIL, /* iscsi service unavailable */
+ EZFS_BADWHO, /* invalid permission who */
+ EZFS_BADPERM, /* invalid permission */
+ EZFS_BADPERMSET, /* invalid permission set name */
+ EZFS_NODELEGATION, /* delegated administration is disabled */
+ EZFS_PERMRDONLY, /* pemissions are readonly */
+ EZFS_UNSHARESMBFAILED, /* failed to unshare over smb */
+ EZFS_SHARESMBFAILED, /* failed to share over smb */
+ EZFS_BADCACHE, /* bad cache file */
+ EZFS_ISL2CACHE, /* device is for the level 2 ARC */
+ EZFS_VDEVNOTSUP, /* unsupported vdev type */
+ EZFS_NOTSUP, /* ops not supported on this dataset */
+ EZFS_ACTIVE_SPARE, /* pool has active shared spare devices */
EZFS_UNKNOWN
};
/*
+ * The following data structures are all part
+ * of the zfs_allow_t data structure which is
+ * used for printing 'allow' permissions.
+ * It is a linked list of zfs_allow_t's which
+ * then contain avl tree's for user/group/sets/...
+ * and each one of the entries in those trees have
+ * avl tree's for the permissions they belong to and
+ * whether they are local,descendent or local+descendent
+ * permissions. The AVL trees are used primarily for
+ * sorting purposes, but also so that we can quickly find
+ * a given user and or permission.
+ */
+typedef struct zfs_perm_node {
+ avl_node_t z_node;
+ char z_pname[MAXPATHLEN];
+} zfs_perm_node_t;
+
+typedef struct zfs_allow_node {
+ avl_node_t z_node;
+ char z_key[MAXPATHLEN]; /* name, such as joe */
+ avl_tree_t z_localdescend; /* local+descendent perms */
+ avl_tree_t z_local; /* local permissions */
+ avl_tree_t z_descend; /* descendent permissions */
+} zfs_allow_node_t;
+
+typedef struct zfs_allow {
+ struct zfs_allow *z_next;
+ char z_setpoint[MAXPATHLEN];
+ avl_tree_t z_sets;
+ avl_tree_t z_crperms;
+ avl_tree_t z_user;
+ avl_tree_t z_group;
+ avl_tree_t z_everyone;
+} zfs_allow_t;
+
+/*
* Basic handle types
*/
typedef struct zfs_handle zfs_handle_t;
@@ -131,12 +183,9 @@ extern zpool_handle_t *zpool_open(libzfs_handle_t *, const char *);
extern zpool_handle_t *zpool_open_canfail(libzfs_handle_t *, const char *);
extern void zpool_close(zpool_handle_t *);
extern const char *zpool_get_name(zpool_handle_t *);
-extern uint64_t zpool_get_guid(zpool_handle_t *);
-extern uint64_t zpool_get_space_used(zpool_handle_t *);
-extern uint64_t zpool_get_space_total(zpool_handle_t *);
-extern int zpool_get_root(zpool_handle_t *, char *, size_t);
extern int zpool_get_state(zpool_handle_t *);
-extern uint64_t zpool_get_version(zpool_handle_t *);
+extern char *zpool_state_to_name(vdev_state_t, vdev_aux_t);
+extern void zpool_free_handles(libzfs_handle_t *);
/*
* Iterate over all active pools in the system.
@@ -148,7 +197,7 @@ extern int zpool_iter(libzfs_handle_t *, zpool_iter_f, void *);
* Functions to create and destroy pools
*/
extern int zpool_create(libzfs_handle_t *, const char *, nvlist_t *,
- const char *);
+ nvlist_t *, nvlist_t *);
extern int zpool_destroy(zpool_handle_t *);
extern int zpool_add(zpool_handle_t *, nvlist_t *);
@@ -156,22 +205,33 @@ extern int zpool_add(zpool_handle_t *, nvlist_t *);
* Functions to manipulate pool and vdev state
*/
extern int zpool_scrub(zpool_handle_t *, pool_scrub_type_t);
+extern int zpool_clear(zpool_handle_t *, const char *);
-extern int zpool_vdev_online(zpool_handle_t *, const char *);
-extern int zpool_vdev_offline(zpool_handle_t *, const char *, int);
-extern int zpool_vdev_attach(zpool_handle_t *, const char *, const char *,
- nvlist_t *, int);
+extern int zpool_vdev_online(zpool_handle_t *, const char *, int,
+ vdev_state_t *);
+extern int zpool_vdev_offline(zpool_handle_t *, const char *, boolean_t);
+extern int zpool_vdev_attach(zpool_handle_t *, const char *,
+ const char *, nvlist_t *, int);
extern int zpool_vdev_detach(zpool_handle_t *, const char *);
extern int zpool_vdev_remove(zpool_handle_t *, const char *);
-extern int zpool_clear(zpool_handle_t *, const char *);
-extern nvlist_t *zpool_find_vdev(zpool_handle_t *, const char *, boolean_t *);
+
+extern int zpool_vdev_fault(zpool_handle_t *, uint64_t);
+extern int zpool_vdev_degrade(zpool_handle_t *, uint64_t);
+extern int zpool_vdev_clear(zpool_handle_t *, uint64_t);
+
+extern nvlist_t *zpool_find_vdev(zpool_handle_t *, const char *, boolean_t *,
+ boolean_t *, boolean_t *);
+extern int zpool_label_disk(libzfs_handle_t *, zpool_handle_t *, char *);
/*
* Functions to manage pool properties
*/
extern int zpool_set_prop(zpool_handle_t *, const char *, const char *);
-extern int zpool_get_prop(zpool_handle_t *, zfs_prop_t, char *,
- size_t proplen, zfs_source_t *);
+extern int zpool_get_prop(zpool_handle_t *, zpool_prop_t, char *,
+ size_t proplen, zprop_source_t *);
+extern uint64_t zpool_get_prop_int(zpool_handle_t *, zpool_prop_t,
+ zprop_source_t *);
+
extern const char *zpool_prop_to_name(zpool_prop_t);
extern const char *zpool_prop_values(zpool_prop_t);
@@ -194,6 +254,11 @@ typedef enum {
ZPOOL_STATUS_FAILING_DEV, /* device experiencing errors */
ZPOOL_STATUS_VERSION_NEWER, /* newer on-disk version */
ZPOOL_STATUS_HOSTID_MISMATCH, /* last accessed by another system */
+ ZPOOL_STATUS_IO_FAILURE_WAIT, /* failed I/O, failmode 'wait' */
+ ZPOOL_STATUS_IO_FAILURE_CONTINUE, /* failed I/O, failmode 'continue' */
+ ZPOOL_STATUS_FAULTED_DEV_R, /* faulted device with replicas */
+ ZPOOL_STATUS_FAULTED_DEV_NR, /* faulted device with no replicas */
+ ZPOOL_STATUS_BAD_LOG, /* cannot read log chain(s) */
/*
* The following are not faults per se, but still an error possibly
@@ -223,26 +288,39 @@ extern int zpool_get_errlog(zpool_handle_t *, nvlist_t **);
/*
* Import and export functions
*/
-extern int zpool_export(zpool_handle_t *);
+extern int zpool_export(zpool_handle_t *, boolean_t);
extern int zpool_import(libzfs_handle_t *, nvlist_t *, const char *,
- const char *);
+ char *altroot);
+extern int zpool_import_props(libzfs_handle_t *, nvlist_t *, const char *,
+ nvlist_t *, boolean_t);
/*
* Search for pools to import
*/
extern nvlist_t *zpool_find_import(libzfs_handle_t *, int, char **);
+extern nvlist_t *zpool_find_import_cached(libzfs_handle_t *, const char *,
+ char *, uint64_t);
+extern nvlist_t *zpool_find_import_byname(libzfs_handle_t *, int, char **,
+ char *);
+extern nvlist_t *zpool_find_import_byguid(libzfs_handle_t *, int, char **,
+ uint64_t);
+extern nvlist_t *zpool_find_import_activeok(libzfs_handle_t *, int, char **);
/*
* Miscellaneous pool functions
*/
+struct zfs_cmd;
+
extern char *zpool_vdev_name(libzfs_handle_t *, zpool_handle_t *, nvlist_t *);
-extern int zpool_upgrade(zpool_handle_t *);
+extern int zpool_upgrade(zpool_handle_t *, uint64_t);
extern int zpool_get_history(zpool_handle_t *, nvlist_t **);
-extern void zpool_log_history(libzfs_handle_t *, int, char **, const char *,
- boolean_t, boolean_t);
+extern void zpool_set_history_str(const char *subcommand, int argc,
+ char **argv, char *history_str);
+extern int zpool_stage_history(libzfs_handle_t *, const char *);
extern void zpool_obj_to_path(zpool_handle_t *, uint64_t, uint64_t, char *,
size_t len);
-
+extern int zfs_ioctl(libzfs_handle_t *, int, struct zfs_cmd *);
+extern int zpool_get_physpath(zpool_handle_t *, char *);
/*
* Basic handle manipulations. These functions do not create or destroy the
* underlying datasets, only the references to them.
@@ -251,65 +329,84 @@ extern zfs_handle_t *zfs_open(libzfs_handle_t *, const char *, int);
extern void zfs_close(zfs_handle_t *);
extern zfs_type_t zfs_get_type(const zfs_handle_t *);
extern const char *zfs_get_name(const zfs_handle_t *);
+extern zpool_handle_t *zfs_get_pool_handle(const zfs_handle_t *);
/*
* Property management functions. Some functions are shared with the kernel,
* and are found in sys/fs/zfs.h.
*/
+
+/*
+ * zfs dataset property management
+ */
+extern const char *zfs_prop_default_string(zfs_prop_t);
+extern uint64_t zfs_prop_default_numeric(zfs_prop_t);
+extern const char *zfs_prop_column_name(zfs_prop_t);
+extern boolean_t zfs_prop_align_right(zfs_prop_t);
+
+extern nvlist_t *zfs_valid_proplist(libzfs_handle_t *, zfs_type_t,
+ nvlist_t *, uint64_t, zfs_handle_t *, const char *);
+
extern const char *zfs_prop_to_name(zfs_prop_t);
extern int zfs_prop_set(zfs_handle_t *, const char *, const char *);
extern int zfs_prop_get(zfs_handle_t *, zfs_prop_t, char *, size_t,
- zfs_source_t *, char *, size_t, boolean_t);
+ zprop_source_t *, char *, size_t, boolean_t);
extern int zfs_prop_get_numeric(zfs_handle_t *, zfs_prop_t, uint64_t *,
- zfs_source_t *, char *, size_t);
+ zprop_source_t *, char *, size_t);
extern uint64_t zfs_prop_get_int(zfs_handle_t *, zfs_prop_t);
-extern const char *zfs_prop_get_string(zfs_handle_t *, zfs_prop_t);
extern int zfs_prop_inherit(zfs_handle_t *, const char *);
extern const char *zfs_prop_values(zfs_prop_t);
-extern int zfs_prop_valid_for_type(zfs_prop_t, int);
-extern const char *zfs_prop_default_string(zfs_prop_t prop);
-extern uint64_t zfs_prop_default_numeric(zfs_prop_t);
extern int zfs_prop_is_string(zfs_prop_t prop);
-extern const char *zfs_prop_column_name(zfs_prop_t);
-extern boolean_t zfs_prop_align_right(zfs_prop_t);
-extern void nicebool(int value, char *buf, size_t buflen);
+extern nvlist_t *zfs_get_user_props(zfs_handle_t *);
-typedef struct zfs_proplist {
- zfs_prop_t pl_prop;
+typedef struct zprop_list {
+ int pl_prop;
char *pl_user_prop;
- struct zfs_proplist *pl_next;
+ struct zprop_list *pl_next;
boolean_t pl_all;
size_t pl_width;
boolean_t pl_fixed;
-} zfs_proplist_t;
+} zprop_list_t;
-typedef zfs_proplist_t zpool_proplist_t;
-
-extern int zfs_get_proplist(libzfs_handle_t *, char *, zfs_proplist_t **);
-extern int zpool_get_proplist(libzfs_handle_t *, char *, zpool_proplist_t **);
-extern int zfs_expand_proplist(zfs_handle_t *, zfs_proplist_t **);
-extern int zpool_expand_proplist(zpool_handle_t *, zpool_proplist_t **);
-extern void zfs_free_proplist(zfs_proplist_t *);
-extern nvlist_t *zfs_get_user_props(zfs_handle_t *);
+extern int zfs_expand_proplist(zfs_handle_t *, zprop_list_t **);
#define ZFS_MOUNTPOINT_NONE "none"
#define ZFS_MOUNTPOINT_LEGACY "legacy"
/*
- * Functions for printing properties from zfs/zpool
+ * zpool property management
+ */
+extern int zpool_expand_proplist(zpool_handle_t *, zprop_list_t **);
+extern const char *zpool_prop_default_string(zpool_prop_t);
+extern uint64_t zpool_prop_default_numeric(zpool_prop_t);
+extern const char *zpool_prop_column_name(zpool_prop_t);
+extern boolean_t zpool_prop_align_right(zpool_prop_t);
+
+/*
+ * Functions shared by zfs and zpool property management.
*/
-typedef struct libzfs_get_cbdata {
+extern int zprop_iter(zprop_func func, void *cb, boolean_t show_all,
+ boolean_t ordered, zfs_type_t type);
+extern int zprop_get_list(libzfs_handle_t *, char *, zprop_list_t **,
+ zfs_type_t);
+extern void zprop_free_list(zprop_list_t *);
+
+/*
+ * Functions for printing zfs or zpool properties
+ */
+typedef struct zprop_get_cbdata {
int cb_sources;
int cb_columns[4];
int cb_colwidths[5];
boolean_t cb_scripted;
boolean_t cb_literal;
boolean_t cb_first;
- zfs_proplist_t *cb_proplist;
-} libzfs_get_cbdata_t;
+ zprop_list_t *cb_proplist;
+ zfs_type_t cb_type;
+} zprop_get_cbdata_t;
-void libzfs_print_one_property(const char *, libzfs_get_cbdata_t *,
- const char *, const char *, zfs_source_t, const char *);
+void zprop_print_one_property(const char *, zprop_get_cbdata_t *,
+ const char *, const char *, zprop_source_t, const char *);
#define GET_COL_NAME 1
#define GET_COL_PROPERTY 2
@@ -331,26 +428,61 @@ extern int zfs_iter_snapshots(zfs_handle_t *, zfs_iter_f, void *);
*/
extern int zfs_create(libzfs_handle_t *, const char *, zfs_type_t,
nvlist_t *);
+extern int zfs_create_ancestors(libzfs_handle_t *, const char *);
extern int zfs_destroy(zfs_handle_t *);
extern int zfs_destroy_snaps(zfs_handle_t *, char *);
extern int zfs_clone(zfs_handle_t *, const char *, nvlist_t *);
-extern int zfs_snapshot(libzfs_handle_t *, const char *, boolean_t);
-extern int zfs_rollback(zfs_handle_t *, zfs_handle_t *, int);
-extern int zfs_rename(zfs_handle_t *, const char *, int);
-extern int zfs_send(zfs_handle_t *, const char *, int);
-extern int zfs_receive(libzfs_handle_t *, const char *, int, int, int,
- boolean_t, int);
+extern int zfs_snapshot(libzfs_handle_t *, const char *, boolean_t, nvlist_t *);
+extern int zfs_rollback(zfs_handle_t *, zfs_handle_t *, boolean_t);
+extern int zfs_rename(zfs_handle_t *, const char *, boolean_t);
+extern int zfs_send(zfs_handle_t *, const char *, const char *,
+ boolean_t, boolean_t, boolean_t, boolean_t, int);
extern int zfs_promote(zfs_handle_t *);
+typedef struct recvflags {
+ /* print informational messages (ie, -v was specified) */
+ int verbose : 1;
+
+ /* the destination is a prefix, not the exact fs (ie, -d) */
+ int isprefix : 1;
+
+ /* do not actually do the recv, just check if it would work (ie, -n) */
+ int dryrun : 1;
+
+ /* rollback/destroy filesystems as necessary (eg, -F) */
+ int force : 1;
+
+ /* set "canmount=off" on all modified filesystems */
+ int canmountoff : 1;
+
+ /* byteswap flag is used internally; callers need not specify */
+ int byteswap : 1;
+} recvflags_t;
+
+extern int zfs_receive(libzfs_handle_t *, const char *, recvflags_t,
+ int, avl_tree_t *);
+
/*
* Miscellaneous functions.
*/
extern const char *zfs_type_to_name(zfs_type_t);
extern void zfs_refresh_properties(zfs_handle_t *);
extern int zfs_name_valid(const char *, zfs_type_t);
-extern int zfs_disable(zfs_handle_t *);
-extern int zfs_enable(zfs_handle_t *);
extern zfs_handle_t *zfs_path_to_zhandle(libzfs_handle_t *, char *, zfs_type_t);
+extern boolean_t zfs_dataset_exists(libzfs_handle_t *, const char *,
+ zfs_type_t);
+extern int zfs_spa_version(zfs_handle_t *, int *);
+
+/*
+ * dataset permission functions.
+ */
+extern int zfs_perm_set(zfs_handle_t *, nvlist_t *);
+extern int zfs_perm_remove(zfs_handle_t *, nvlist_t *);
+extern int zfs_build_perms(zfs_handle_t *, char *, char *,
+ zfs_deleg_who_type_t, zfs_deleg_inherit_t, nvlist_t **nvlist_t);
+extern int zfs_perm_get(zfs_handle_t *, zfs_allow_t **);
+extern void zfs_free_allows(zfs_allow_t *);
+extern void zfs_deleg_permissions(void);
/*
* Mount support functions.
@@ -369,15 +501,27 @@ extern int zfs_share(zfs_handle_t *);
extern int zfs_unshare(zfs_handle_t *);
/*
- * Protocol-specifc share support functions.
+ * Protocol-specific share support functions.
*/
extern boolean_t zfs_is_shared_nfs(zfs_handle_t *, char **);
+extern boolean_t zfs_is_shared_smb(zfs_handle_t *, char **);
extern int zfs_share_nfs(zfs_handle_t *);
+extern int zfs_share_smb(zfs_handle_t *);
+extern int zfs_shareall(zfs_handle_t *);
extern int zfs_unshare_nfs(zfs_handle_t *, const char *);
+extern int zfs_unshare_smb(zfs_handle_t *, const char *);
extern int zfs_unshareall_nfs(zfs_handle_t *);
+extern int zfs_unshareall_smb(zfs_handle_t *);
+extern int zfs_unshareall_bypath(zfs_handle_t *, const char *);
+extern int zfs_unshareall(zfs_handle_t *);
extern boolean_t zfs_is_shared_iscsi(zfs_handle_t *);
extern int zfs_share_iscsi(zfs_handle_t *);
extern int zfs_unshare_iscsi(zfs_handle_t *);
+#ifdef TODO
+extern int zfs_iscsi_perm_check(libzfs_handle_t *, char *, ucred_t *);
+#endif
+extern int zfs_deleg_share_nfs(libzfs_handle_t *, char *, char *,
+ void *, void *, int, zfs_share_op_t);
/*
* FreeBSD-specific jail support function.
@@ -402,12 +546,6 @@ extern void zfs_nicenum(uint64_t, char *, size_t);
extern int zfs_nicestrtonum(libzfs_handle_t *, const char *, uint64_t *);
/*
- * Pool destroy special. Remove the device information without destroying
- * the underlying dataset.
- */
-extern int zfs_remove_link(zfs_handle_t *);
-
-/*
* Given a device or file, determine if it is part of a pool.
*/
extern int zpool_in_use(libzfs_handle_t *, int, pool_state_t *, char **,
@@ -424,6 +562,9 @@ extern int zpool_read_label(int, nvlist_t **);
extern int zpool_create_zvol_links(zpool_handle_t *);
extern int zpool_remove_zvol_links(zpool_handle_t *);
+/* is this zvol valid for use as a dump device? */
+extern int zvol_check_dump_config(char *);
+
/*
* Enable and disable datasets within a pool by mounting/unmounting and
* sharing/unsharing them.
diff --git a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_changelist.c b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_changelist.c
index 5e6de6d..b905bc6 100644
--- a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_changelist.c
+++ b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_changelist.c
@@ -20,12 +20,12 @@
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ *
+ * Portions Copyright 2007 Ramprakash Jelari
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <libintl.h>
#include <libuutil.h>
#include <stddef.h>
@@ -65,18 +65,21 @@ typedef struct prop_changenode {
int cn_shared;
int cn_mounted;
int cn_zoned;
+ boolean_t cn_needpost; /* is postfix() needed? */
uu_list_node_t cn_listnode;
} prop_changenode_t;
struct prop_changelist {
zfs_prop_t cl_prop;
zfs_prop_t cl_realprop;
+ zfs_prop_t cl_shareprop; /* used with sharenfs/sharesmb */
uu_list_pool_t *cl_pool;
uu_list_t *cl_list;
boolean_t cl_waslegacy;
boolean_t cl_allchildren;
boolean_t cl_alldependents;
- int cl_flags;
+ int cl_mflags; /* Mount flags */
+ int cl_gflags; /* Gather request flags */
boolean_t cl_haszonedchild;
boolean_t cl_sorted;
};
@@ -84,7 +87,8 @@ struct prop_changelist {
/*
* If the property is 'mountpoint', go through and unmount filesystems as
* necessary. We don't do the same for 'sharenfs', because we can just re-share
- * with different options without interrupting service.
+ * with different options without interrupting service. We do handle 'sharesmb'
+ * since there may be old resource names that need to be removed.
*/
int
changelist_prefix(prop_changelist_t *clp)
@@ -92,11 +96,19 @@ changelist_prefix(prop_changelist_t *clp)
prop_changenode_t *cn;
int ret = 0;
- if (clp->cl_prop != ZFS_PROP_MOUNTPOINT)
+ if (clp->cl_prop != ZFS_PROP_MOUNTPOINT &&
+ clp->cl_prop != ZFS_PROP_SHARESMB)
return (0);
for (cn = uu_list_first(clp->cl_list); cn != NULL;
cn = uu_list_next(clp->cl_list, cn)) {
+
+ /* if a previous loop failed, set the remaining to false */
+ if (ret == -1) {
+ cn->cn_needpost = B_FALSE;
+ continue;
+ }
+
/*
* If we are in the global zone, but this dataset is exported
* to a local zone, do nothing.
@@ -114,8 +126,11 @@ changelist_prefix(prop_changelist_t *clp)
(void) zfs_unshare_iscsi(cn->cn_handle);
if (zvol_remove_link(cn->cn_handle->zfs_hdl,
- cn->cn_handle->zfs_name) != 0)
+ cn->cn_handle->zfs_name) != 0) {
ret = -1;
+ cn->cn_needpost = B_FALSE;
+ (void) zfs_share_iscsi(cn->cn_handle);
+ }
break;
case ZFS_PROP_VOLSIZE:
@@ -126,10 +141,28 @@ changelist_prefix(prop_changelist_t *clp)
(void) zfs_unshare_iscsi(cn->cn_handle);
break;
}
- } else if (zfs_unmount(cn->cn_handle, NULL, clp->cl_flags) != 0)
- ret = -1;
+ } else {
+ /*
+ * Do the property specific processing.
+ */
+ switch (clp->cl_prop) {
+ case ZFS_PROP_MOUNTPOINT:
+ if (zfs_unmount(cn->cn_handle, NULL,
+ clp->cl_mflags) != 0) {
+ ret = -1;
+ cn->cn_needpost = B_FALSE;
+ }
+ break;
+ case ZFS_PROP_SHARESMB:
+ (void) zfs_unshare_smb(cn->cn_handle, NULL);
+ break;
+ }
+ }
}
+ if (ret == -1)
+ (void) changelist_postfix(clp);
+
return (ret);
}
@@ -147,7 +180,8 @@ changelist_postfix(prop_changelist_t *clp)
{
prop_changenode_t *cn;
char shareopts[ZFS_MAXPROPLEN];
- int ret = 0;
+ int errors = 0;
+ libzfs_handle_t *hdl;
/*
* If we're changing the mountpoint, attempt to destroy the underlying
@@ -163,11 +197,28 @@ changelist_postfix(prop_changelist_t *clp)
remove_mountpoint(cn->cn_handle);
/*
+ * It is possible that the changelist_prefix() used libshare
+ * to unshare some entries. Since libshare caches data, an
+ * attempt to reshare during postfix can fail unless libshare
+ * is uninitialized here so that it will reinitialize later.
+ */
+ if (cn->cn_handle != NULL) {
+ hdl = cn->cn_handle->zfs_hdl;
+ assert(hdl != NULL);
+ zfs_uninit_libshare(hdl);
+ }
+
+ /*
* We walk the datasets in reverse, because we want to mount any parent
- * datasets before mounting the children.
+ * datasets before mounting the children. We walk all datasets even if
+ * there are errors.
*/
for (cn = uu_list_last(clp->cl_list); cn != NULL;
cn = uu_list_prev(clp->cl_list, cn)) {
+
+ boolean_t sharenfs;
+ boolean_t sharesmb;
+
/*
* If we are in the global zone, but this dataset is exported
* to a local zone, do nothing.
@@ -175,6 +226,11 @@ changelist_postfix(prop_changelist_t *clp)
if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned)
continue;
+ /* Only do post-processing if it's required */
+ if (!cn->cn_needpost)
+ continue;
+ cn->cn_needpost = B_FALSE;
+
zfs_refresh_properties(cn->cn_handle);
if (ZFS_IS_VOLUME(cn->cn_handle)) {
@@ -185,7 +241,7 @@ changelist_postfix(prop_changelist_t *clp)
if (clp->cl_realprop == ZFS_PROP_NAME &&
zvol_create_link(cn->cn_handle->zfs_hdl,
cn->cn_handle->zfs_name) != 0) {
- ret = -1;
+ errors++;
} else if (cn->cn_shared ||
clp->cl_prop == ZFS_PROP_SHAREISCSI) {
if (zfs_prop_get(cn->cn_handle,
@@ -193,43 +249,55 @@ changelist_postfix(prop_changelist_t *clp)
sizeof (shareopts), NULL, NULL, 0,
B_FALSE) == 0 &&
strcmp(shareopts, "off") == 0) {
- ret = zfs_unshare_iscsi(cn->cn_handle);
+ errors +=
+ zfs_unshare_iscsi(cn->cn_handle);
} else {
- ret = zfs_share_iscsi(cn->cn_handle);
+ errors +=
+ zfs_share_iscsi(cn->cn_handle);
}
}
continue;
}
- if ((clp->cl_waslegacy || cn->cn_mounted) &&
- !zfs_is_mounted(cn->cn_handle, NULL) &&
+ /*
+ * Remount if previously mounted or mountpoint was legacy,
+ * or sharenfs or sharesmb property is set.
+ */
+ sharenfs = ((zfs_prop_get(cn->cn_handle, ZFS_PROP_SHARENFS,
+ shareopts, sizeof (shareopts), NULL, NULL, 0,
+ B_FALSE) == 0) && (strcmp(shareopts, "off") != 0));
+
+ sharesmb = ((zfs_prop_get(cn->cn_handle, ZFS_PROP_SHARESMB,
+ shareopts, sizeof (shareopts), NULL, NULL, 0,
+ B_FALSE) == 0) && (strcmp(shareopts, "off") != 0));
+
+ if ((cn->cn_mounted || clp->cl_waslegacy || sharenfs ||
+ sharesmb) && !zfs_is_mounted(cn->cn_handle, NULL) &&
zfs_mount(cn->cn_handle, NULL, 0) != 0)
- ret = -1;
+ errors++;
/*
* We always re-share even if the filesystem is currently
* shared, so that we can adopt any new options.
*/
- if (cn->cn_shared ||
- (clp->cl_prop == ZFS_PROP_SHARENFS && clp->cl_waslegacy)) {
- if (zfs_prop_get(cn->cn_handle, ZFS_PROP_SHARENFS,
- shareopts, sizeof (shareopts), NULL, NULL, 0,
- B_FALSE) == 0 && strcmp(shareopts, "off") == 0) {
- ret = zfs_unshare_nfs(cn->cn_handle, NULL);
- } else {
- ret = zfs_share_nfs(cn->cn_handle);
- }
- }
+ if (sharenfs)
+ errors += zfs_share_nfs(cn->cn_handle);
+ else if (cn->cn_shared || clp->cl_waslegacy)
+ errors += zfs_unshare_nfs(cn->cn_handle, NULL);
+ if (sharesmb)
+ errors += zfs_share_smb(cn->cn_handle);
+ else if (cn->cn_shared || clp->cl_waslegacy)
+ errors += zfs_unshare_smb(cn->cn_handle, NULL);
}
- return (ret);
+ return (errors ? -1 : 0);
}
/*
* Is this "dataset" a child of "parent"?
*/
-static boolean_t
+boolean_t
isa_child_of(const char *dataset, const char *parent)
{
int len;
@@ -280,21 +348,22 @@ changelist_rename(prop_changelist_t *clp, const char *src, const char *dst)
}
/*
- * Given a gathered changelist for the 'sharenfs' property, unshare all the
- * datasets in the list.
+ * Given a gathered changelist for the 'sharenfs' or 'sharesmb' property,
+ * unshare all the datasets in the list.
*/
int
-changelist_unshare(prop_changelist_t *clp)
+changelist_unshare(prop_changelist_t *clp, zfs_share_proto_t *proto)
{
prop_changenode_t *cn;
int ret = 0;
- if (clp->cl_prop != ZFS_PROP_SHARENFS)
+ if (clp->cl_prop != ZFS_PROP_SHARENFS &&
+ clp->cl_prop != ZFS_PROP_SHARESMB)
return (0);
for (cn = uu_list_first(clp->cl_list); cn != NULL;
cn = uu_list_next(clp->cl_list, cn)) {
- if (zfs_unshare_nfs(cn->cn_handle, NULL) != 0)
+ if (zfs_unshare_proto(cn->cn_handle, NULL, proto) != 0)
ret = -1;
}
@@ -316,14 +385,14 @@ changelist_haszonedchild(prop_changelist_t *clp)
* Remove a node from a gathered list.
*/
void
-changelist_remove(zfs_handle_t *zhp, prop_changelist_t *clp)
+changelist_remove(prop_changelist_t *clp, const char *name)
{
prop_changenode_t *cn;
for (cn = uu_list_first(clp->cl_list); cn != NULL;
cn = uu_list_next(clp->cl_list, cn)) {
- if (strcmp(cn->cn_handle->zfs_name, zhp->zfs_name) == 0) {
+ if (strcmp(cn->cn_handle->zfs_name, name) == 0) {
uu_list_remove(clp->cl_list, cn);
zfs_close(cn->cn_handle);
free(cn);
@@ -363,7 +432,8 @@ change_one(zfs_handle_t *zhp, void *data)
char property[ZFS_MAXPROPLEN];
char where[64];
prop_changenode_t *cn;
- zfs_source_t sourcetype;
+ zprop_source_t sourcetype;
+ zprop_source_t share_sourcetype;
/*
* We only want to unmount/unshare those filesystems that may inherit
@@ -383,8 +453,25 @@ change_one(zfs_handle_t *zhp, void *data)
return (0);
}
+ /*
+ * If we are "watching" sharenfs or sharesmb
+ * then check out the companion property which is tracked
+ * in cl_shareprop
+ */
+ if (clp->cl_shareprop != ZPROP_INVAL &&
+ zfs_prop_get(zhp, clp->cl_shareprop, property,
+ sizeof (property), &share_sourcetype, where, sizeof (where),
+ B_FALSE) != 0) {
+ zfs_close(zhp);
+ return (0);
+ }
+
if (clp->cl_alldependents || clp->cl_allchildren ||
- sourcetype == ZFS_SRC_DEFAULT || sourcetype == ZFS_SRC_INHERITED) {
+ sourcetype == ZPROP_SRC_DEFAULT ||
+ sourcetype == ZPROP_SRC_INHERITED ||
+ (clp->cl_shareprop != ZPROP_INVAL &&
+ (share_sourcetype == ZPROP_SRC_DEFAULT ||
+ share_sourcetype == ZPROP_SRC_INHERITED))) {
if ((cn = zfs_alloc(zfs_get_handle(zhp),
sizeof (prop_changenode_t))) == NULL) {
zfs_close(zhp);
@@ -392,9 +479,11 @@ change_one(zfs_handle_t *zhp, void *data)
}
cn->cn_handle = zhp;
- cn->cn_mounted = zfs_is_mounted(zhp, NULL);
+ cn->cn_mounted = (clp->cl_gflags & CL_GATHER_MOUNT_ALWAYS) ||
+ zfs_is_mounted(zhp, NULL);
cn->cn_shared = zfs_is_shared(zhp);
cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
+ cn->cn_needpost = B_TRUE;
/* Indicate if any child is exported to a local zone. */
if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned)
@@ -467,7 +556,8 @@ compare_mountpoints(const void *a, const void *b, void *unused)
* mark whether it was shared beforehand.
*/
prop_changelist_t *
-changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int flags)
+changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int gather_flags,
+ int mnt_flags)
{
prop_changelist_t *clp;
prop_changenode_t *cn;
@@ -484,7 +574,8 @@ changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int flags)
* order, regardless of their position in the hierarchy.
*/
if (prop == ZFS_PROP_NAME || prop == ZFS_PROP_ZONED ||
- prop == ZFS_PROP_MOUNTPOINT || prop == ZFS_PROP_SHARENFS) {
+ prop == ZFS_PROP_MOUNTPOINT || prop == ZFS_PROP_SHARENFS ||
+ prop == ZFS_PROP_SHARESMB) {
compare = compare_mountpoints;
clp->cl_sorted = B_TRUE;
}
@@ -502,7 +593,8 @@ changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int flags)
clp->cl_list = uu_list_create(clp->cl_pool, NULL,
clp->cl_sorted ? UU_LIST_SORTED : 0);
- clp->cl_flags = flags;
+ clp->cl_gflags = gather_flags;
+ clp->cl_mflags = mnt_flags;
if (clp->cl_list == NULL) {
assert(uu_error() == UU_ERROR_NO_MEMORY);
@@ -529,6 +621,8 @@ changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int flags)
clp->cl_prop = ZFS_PROP_MOUNTPOINT;
} else if (prop == ZFS_PROP_VOLSIZE) {
clp->cl_prop = ZFS_PROP_MOUNTPOINT;
+ } else if (prop == ZFS_PROP_VERSION) {
+ clp->cl_prop = ZFS_PROP_MOUNTPOINT;
} else {
clp->cl_prop = prop;
}
@@ -536,9 +630,19 @@ changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int flags)
if (clp->cl_prop != ZFS_PROP_MOUNTPOINT &&
clp->cl_prop != ZFS_PROP_SHARENFS &&
+ clp->cl_prop != ZFS_PROP_SHARESMB &&
clp->cl_prop != ZFS_PROP_SHAREISCSI)
return (clp);
+ /*
+ * If watching SHARENFS or SHARESMB then
+ * also watch its companion property.
+ */
+ if (clp->cl_prop == ZFS_PROP_SHARENFS)
+ clp->cl_shareprop = ZFS_PROP_SHARESMB;
+ else if (clp->cl_prop == ZFS_PROP_SHARESMB)
+ clp->cl_shareprop = ZFS_PROP_SHARENFS;
+
if (clp->cl_alldependents) {
if (zfs_iter_dependents(zhp, B_TRUE, change_one, clp) != 0) {
changelist_free(clp);
@@ -554,7 +658,7 @@ changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int flags)
* and can't tell the difference.
*/
if ((temp = zfs_open(zhp->zfs_hdl, zfs_get_name(zhp),
- ZFS_TYPE_ANY)) == NULL) {
+ ZFS_TYPE_DATASET)) == NULL) {
changelist_free(clp);
return (NULL);
}
@@ -571,9 +675,11 @@ changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int flags)
}
cn->cn_handle = temp;
- cn->cn_mounted = zfs_is_mounted(temp, NULL);
+ cn->cn_mounted = (clp->cl_gflags & CL_GATHER_MOUNT_ALWAYS) ||
+ zfs_is_mounted(temp, NULL);
cn->cn_shared = zfs_is_shared(temp);
cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
+ cn->cn_needpost = B_TRUE;
uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool);
if (clp->cl_sorted) {
@@ -586,14 +692,22 @@ changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int flags)
}
/*
- * If the property was previously 'legacy' or 'none', record this fact,
- * as the behavior of changelist_postfix() will be different.
+ * If the mountpoint property was previously 'legacy', or 'none',
+ * record it as the behavior of changelist_postfix() will be different.
*/
- if (zfs_prop_get(zhp, prop, property, sizeof (property),
+ if ((clp->cl_prop == ZFS_PROP_MOUNTPOINT) &&
+ (zfs_prop_get(zhp, prop, property, sizeof (property),
NULL, NULL, 0, B_FALSE) == 0 &&
- (strcmp(property, "legacy") == 0 || strcmp(property, "none") == 0 ||
- strcmp(property, "off") == 0))
- clp->cl_waslegacy = B_TRUE;
+ (strcmp(property, "legacy") == 0 ||
+ strcmp(property, "none") == 0))) {
+ /*
+ * do not automatically mount ex-legacy datasets if
+ * we specifically set canmount to noauto
+ */
+ if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) !=
+ ZFS_CANMOUNT_NOAUTO)
+ clp->cl_waslegacy = B_TRUE;
+ }
return (clp);
}
diff --git a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_dataset.c b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_dataset.c
index 4fc441a..58ce6c8 100644
--- a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_dataset.c
+++ b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_dataset.c
@@ -20,12 +20,10 @@
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <assert.h>
#include <ctype.h>
#include <errno.h>
@@ -35,20 +33,26 @@
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
+#include <stddef.h>
#include <zone.h>
#include <fcntl.h>
#include <sys/mntent.h>
#include <sys/mnttab.h>
#include <sys/mount.h>
+#include <sys/avl.h>
+#include <priv.h>
+#include <pwd.h>
+#include <grp.h>
+#include <stddef.h>
#include <sys/spa.h>
-#include <sys/zio.h>
#include <sys/zap.h>
#include <libzfs.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "libzfs_impl.h"
+#include "zfs_deleg.h"
static int zvol_create_link_common(libzfs_handle_t *, const char *, int);
@@ -121,7 +125,8 @@ path_to_str(const char *path, int types)
* 'buf' detailing exactly why the name was not valid.
*/
static int
-zfs_validate_name(libzfs_handle_t *hdl, const char *path, int type)
+zfs_validate_name(libzfs_handle_t *hdl, const char *path, int type,
+ boolean_t modifying)
{
namecheck_err_t why;
char what;
@@ -194,43 +199,118 @@ zfs_validate_name(libzfs_handle_t *hdl, const char *path, int type)
return (0);
}
+ if (modifying && strchr(path, '%') != NULL) {
+ if (hdl != NULL)
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid character %c in name"), '%');
+ return (0);
+ }
+
return (-1);
}
int
zfs_name_valid(const char *name, zfs_type_t type)
{
- return (zfs_validate_name(NULL, name, type));
+ if (type == ZFS_TYPE_POOL)
+ return (zpool_name_valid(NULL, B_FALSE, name));
+ return (zfs_validate_name(NULL, name, type, B_FALSE));
}
/*
* This function takes the raw DSL properties, and filters out the user-defined
* properties into a separate nvlist.
*/
-static int
-process_user_props(zfs_handle_t *zhp)
+static nvlist_t *
+process_user_props(zfs_handle_t *zhp, nvlist_t *props)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvpair_t *elem;
nvlist_t *propval;
+ nvlist_t *nvl;
- nvlist_free(zhp->zfs_user_props);
-
- if (nvlist_alloc(&zhp->zfs_user_props, NV_UNIQUE_NAME, 0) != 0)
- return (no_memory(hdl));
+ if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) {
+ (void) no_memory(hdl);
+ return (NULL);
+ }
elem = NULL;
- while ((elem = nvlist_next_nvpair(zhp->zfs_props, elem)) != NULL) {
+ while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
if (!zfs_prop_user(nvpair_name(elem)))
continue;
verify(nvpair_value_nvlist(elem, &propval) == 0);
- if (nvlist_add_nvlist(zhp->zfs_user_props,
- nvpair_name(elem), propval) != 0)
- return (no_memory(hdl));
+ if (nvlist_add_nvlist(nvl, nvpair_name(elem), propval) != 0) {
+ nvlist_free(nvl);
+ (void) no_memory(hdl);
+ return (NULL);
+ }
}
- return (0);
+ return (nvl);
+}
+
+static zpool_handle_t *
+zpool_add_handle(zfs_handle_t *zhp, const char *pool_name)
+{
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+ zpool_handle_t *zph;
+
+ if ((zph = zpool_open_canfail(hdl, pool_name)) != NULL) {
+ if (hdl->libzfs_pool_handles != NULL)
+ zph->zpool_next = hdl->libzfs_pool_handles;
+ hdl->libzfs_pool_handles = zph;
+ }
+ return (zph);
+}
+
+static zpool_handle_t *
+zpool_find_handle(zfs_handle_t *zhp, const char *pool_name, int len)
+{
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+ zpool_handle_t *zph = hdl->libzfs_pool_handles;
+
+ while ((zph != NULL) &&
+ (strncmp(pool_name, zpool_get_name(zph), len) != 0))
+ zph = zph->zpool_next;
+ return (zph);
+}
+
+/*
+ * Returns a handle to the pool that contains the provided dataset.
+ * If a handle to that pool already exists then that handle is returned.
+ * Otherwise, a new handle is created and added to the list of handles.
+ */
+static zpool_handle_t *
+zpool_handle(zfs_handle_t *zhp)
+{
+ char *pool_name;
+ int len;
+ zpool_handle_t *zph;
+
+ len = strcspn(zhp->zfs_name, "/@") + 1;
+ pool_name = zfs_alloc(zhp->zfs_hdl, len);
+ (void) strlcpy(pool_name, zhp->zfs_name, len);
+
+ zph = zpool_find_handle(zhp, pool_name, len);
+ if (zph == NULL)
+ zph = zpool_add_handle(zhp, pool_name);
+
+ free(pool_name);
+ return (zph);
+}
+
+void
+zpool_free_handles(libzfs_handle_t *hdl)
+{
+ zpool_handle_t *next, *zph = hdl->libzfs_pool_handles;
+
+ while (zph != NULL) {
+ next = zph->zpool_next;
+ zpool_close(zph);
+ zph = next;
+ }
+ hdl->libzfs_pool_handles = NULL;
}
/*
@@ -241,6 +321,7 @@ get_stats(zfs_handle_t *zhp)
{
zfs_cmd_t zc = { 0 };
libzfs_handle_t *hdl = zhp->zfs_hdl;
+ nvlist_t *allprops, *userprops;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
@@ -261,22 +342,23 @@ get_stats(zfs_handle_t *zhp)
zhp->zfs_dmustats = zc.zc_objset_stats; /* structure assignment */
- (void) strlcpy(zhp->zfs_root, zc.zc_value, sizeof (zhp->zfs_root));
-
- if (zhp->zfs_props) {
- nvlist_free(zhp->zfs_props);
- zhp->zfs_props = NULL;
- }
-
- if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zfs_props) != 0) {
+ if (zcmd_read_dst_nvlist(hdl, &zc, &allprops) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
zcmd_free_nvlists(&zc);
- if (process_user_props(zhp) != 0)
+ if ((userprops = process_user_props(zhp, allprops)) == NULL) {
+ nvlist_free(allprops);
return (-1);
+ }
+
+ nvlist_free(zhp->zfs_props);
+ nvlist_free(zhp->zfs_user_props);
+
+ zhp->zfs_props = allprops;
+ zhp->zfs_user_props = userprops;
return (0);
}
@@ -298,16 +380,25 @@ zfs_handle_t *
make_dataset_handle(libzfs_handle_t *hdl, const char *path)
{
zfs_handle_t *zhp = calloc(sizeof (zfs_handle_t), 1);
+ char *logstr;
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = hdl;
+ /*
+ * Preserve history log string.
+ * any changes performed here will be
+ * logged as an internal event.
+ */
+ logstr = zhp->zfs_hdl->libzfs_log_str;
+ zhp->zfs_hdl->libzfs_log_str = NULL;
top:
(void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name));
if (get_stats(zhp) != 0) {
+ zhp->zfs_hdl->libzfs_log_str = logstr;
free(zhp);
return (NULL);
}
@@ -339,18 +430,19 @@ top:
zc.zc_objset_type = DMU_OST_ZFS;
}
- /* If we can successfully roll it back, reget the stats */
- if (ioctl(hdl->libzfs_fd, ZFS_IOC_ROLLBACK, &zc) == 0)
- goto top;
/*
- * If we can sucessfully destroy it, pretend that it
+ * If we can successfully destroy it, pretend that it
* never existed.
*/
if (ioctl(hdl->libzfs_fd, ZFS_IOC_DESTROY, &zc) == 0) {
+ zhp->zfs_hdl->libzfs_log_str = logstr;
free(zhp);
errno = ENOENT;
return (NULL);
}
+ /* If we can successfully roll it back, reget the stats */
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_ROLLBACK, &zc) == 0)
+ goto top;
}
/*
@@ -373,6 +465,8 @@ top:
else
abort(); /* we should never see any other types */
+ zhp->zfs_hdl->libzfs_log_str = logstr;
+ zhp->zpool_hdl = zpool_handle(zhp);
return (zhp);
}
@@ -393,7 +487,7 @@ zfs_open(libzfs_handle_t *hdl, const char *path, int types)
/*
* Validate the name before we even try to open it.
*/
- if (!zfs_validate_name(hdl, path, ZFS_TYPE_ANY)) {
+ if (!zfs_validate_name(hdl, path, ZFS_TYPE_DATASET, B_FALSE)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid dataset name"));
(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
@@ -431,350 +525,92 @@ zfs_close(zfs_handle_t *zhp)
free(zhp);
}
-/*
- * Given a numeric suffix, convert the value into a number of bits that the
- * resulting value must be shifted.
- */
-static int
-str2shift(libzfs_handle_t *hdl, const char *buf)
-{
- const char *ends = "BKMGTPEZ";
- int i;
-
- if (buf[0] == '\0')
- return (0);
- for (i = 0; i < strlen(ends); i++) {
- if (toupper(buf[0]) == ends[i])
- break;
- }
- if (i == strlen(ends)) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "invalid numeric suffix '%s'"), buf);
- return (-1);
- }
-
- /*
- * We want to allow trailing 'b' characters for 'GB' or 'Mb'. But don't
- * allow 'BB' - that's just weird.
- */
- if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0' &&
- toupper(buf[0]) != 'B'))
- return (10*i);
-
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "invalid numeric suffix '%s'"), buf);
- return (-1);
-}
-
-/*
- * Convert a string of the form '100G' into a real number. Used when setting
- * properties or creating a volume. 'buf' is used to place an extended error
- * message for the caller to use.
- */
-static int
-nicestrtonum(libzfs_handle_t *hdl, const char *value, uint64_t *num)
+int
+zfs_spa_version(zfs_handle_t *zhp, int *spa_version)
{
- char *end;
- int shift;
-
- *num = 0;
-
- /* Check to see if this looks like a number. */
- if ((value[0] < '0' || value[0] > '9') && value[0] != '.') {
- if (hdl)
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "bad numeric value '%s'"), value);
- return (-1);
- }
-
- /* Rely on stroll() to process the numeric portion. */
- errno = 0;
- *num = strtoll(value, &end, 10);
+ zpool_handle_t *zpool_handle = zhp->zpool_hdl;
- /*
- * Check for ERANGE, which indicates that the value is too large to fit
- * in a 64-bit value.
- */
- if (errno == ERANGE) {
- if (hdl)
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "numeric value is too large"));
+ if (zpool_handle == NULL)
return (-1);
- }
-
- /*
- * If we have a decimal value, then do the computation with floating
- * point arithmetic. Otherwise, use standard arithmetic.
- */
- if (*end == '.') {
- double fval = strtod(value, &end);
-
- if ((shift = str2shift(hdl, end)) == -1)
- return (-1);
-
- fval *= pow(2, shift);
-
- if (fval > UINT64_MAX) {
- if (hdl)
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "numeric value is too large"));
- return (-1);
- }
-
- *num = (uint64_t)fval;
- } else {
- if ((shift = str2shift(hdl, end)) == -1)
- return (-1);
-
- /* Check for overflow */
- if (shift >= 64 || (*num << shift) >> shift != *num) {
- if (hdl)
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "numeric value is too large"));
- return (-1);
- }
-
- *num <<= shift;
- }
+ *spa_version = zpool_get_prop_int(zpool_handle,
+ ZPOOL_PROP_VERSION, NULL);
return (0);
}
-int
-zfs_nicestrtonum(libzfs_handle_t *hdl, const char *str, uint64_t *val)
-{
- return (nicestrtonum(hdl, str, val));
-}
-
/*
- * The prop_parse_*() functions are designed to allow flexibility in callers
- * when setting properties. At the DSL layer, all properties are either 64-bit
- * numbers or strings. We want the user to be able to ignore this fact and
- * specify properties as native values (boolean, for example) or as strings (to
- * simplify command line utilities). This also handles converting index types
- * (compression, checksum, etc) from strings to their on-disk index.
+ * The choice of reservation property depends on the SPA version.
*/
-
static int
-prop_parse_boolean(libzfs_handle_t *hdl, nvpair_t *elem, uint64_t *val)
+zfs_which_resv_prop(zfs_handle_t *zhp, zfs_prop_t *resv_prop)
{
- uint64_t ret;
+ int spa_version;
- switch (nvpair_type(elem)) {
- case DATA_TYPE_STRING:
- {
- char *value;
- verify(nvpair_value_string(elem, &value) == 0);
-
- if (strcmp(value, "on") == 0) {
- ret = 1;
- } else if (strcmp(value, "off") == 0) {
- ret = 0;
- } else {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "property '%s' must be 'on' or 'off'"),
- nvpair_name(elem));
- return (-1);
- }
- break;
- }
-
- case DATA_TYPE_UINT64:
- {
- verify(nvpair_value_uint64(elem, &ret) == 0);
- if (ret > 1) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "'%s' must be a boolean value"),
- nvpair_name(elem));
- return (-1);
- }
- break;
- }
-
- case DATA_TYPE_BOOLEAN_VALUE:
- {
- boolean_t value;
- verify(nvpair_value_boolean_value(elem, &value) == 0);
- ret = value;
- break;
- }
-
- default:
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "'%s' must be a boolean value"),
- nvpair_name(elem));
+ if (zfs_spa_version(zhp, &spa_version) < 0)
return (-1);
- }
-
- *val = ret;
- return (0);
-}
-
-static int
-prop_parse_number(libzfs_handle_t *hdl, nvpair_t *elem, zfs_prop_t prop,
- uint64_t *val)
-{
- uint64_t ret;
- boolean_t isnone = B_FALSE;
- switch (nvpair_type(elem)) {
- case DATA_TYPE_STRING:
- {
- char *value;
- (void) nvpair_value_string(elem, &value);
- if (strcmp(value, "none") == 0) {
- isnone = B_TRUE;
- ret = 0;
- } else if (nicestrtonum(hdl, value, &ret) != 0) {
- return (-1);
- }
- break;
- }
-
- case DATA_TYPE_UINT64:
- (void) nvpair_value_uint64(elem, &ret);
- break;
-
- default:
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "'%s' must be a number"),
- nvpair_name(elem));
- return (-1);
- }
-
- /*
- * Quota special: force 'none' and don't allow 0.
- */
- if (ret == 0 && !isnone && prop == ZFS_PROP_QUOTA) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "use 'none' to disable quota"));
- return (-1);
- }
-
- *val = ret;
- return (0);
-}
-
-static int
-prop_parse_index(libzfs_handle_t *hdl, nvpair_t *elem, zfs_prop_t prop,
- uint64_t *val)
-{
- char *propname = nvpair_name(elem);
- char *value;
-
- if (nvpair_type(elem) != DATA_TYPE_STRING) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "'%s' must be a string"), propname);
- return (-1);
- }
-
- (void) nvpair_value_string(elem, &value);
-
- if (zfs_prop_string_to_index(prop, value, val) != 0) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "'%s' must be one of '%s'"), propname,
- zfs_prop_values(prop));
- return (-1);
- }
+ if (spa_version >= SPA_VERSION_REFRESERVATION)
+ *resv_prop = ZFS_PROP_REFRESERVATION;
+ else
+ *resv_prop = ZFS_PROP_RESERVATION;
return (0);
}
/*
- * Check if the bootfs name has the same pool name as it is set to.
- * Assuming bootfs is a valid dataset name.
- */
-static boolean_t
-bootfs_poolname_valid(char *pool, char *bootfs)
-{
- char ch, *pname;
-
- /* get the pool name from the bootfs name */
- pname = bootfs;
- while (*bootfs && !isspace(*bootfs) && *bootfs != '/')
- bootfs++;
-
- ch = *bootfs;
- *bootfs = 0;
-
- if (strcmp(pool, pname) == 0) {
- *bootfs = ch;
- return (B_TRUE);
- }
-
- *bootfs = ch;
- return (B_FALSE);
-}
-
-/*
* Given an nvlist of properties to set, validates that they are correct, and
* parses any numeric properties (index, boolean, etc) if they are specified as
* strings.
*/
nvlist_t *
-zfs_validate_properties(libzfs_handle_t *hdl, zfs_type_t type, char *pool_name,
- nvlist_t *nvl, uint64_t zoned, zfs_handle_t *zhp, const char *errbuf)
+zfs_valid_proplist(libzfs_handle_t *hdl, zfs_type_t type, nvlist_t *nvl,
+ uint64_t zoned, zfs_handle_t *zhp, const char *errbuf)
{
nvpair_t *elem;
- const char *propname;
- zfs_prop_t prop;
uint64_t intval;
char *strval;
+ zfs_prop_t prop;
nvlist_t *ret;
- int isuser;
+ int chosen_normal = -1;
+ int chosen_utf = -1;
if (nvlist_alloc(&ret, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (NULL);
}
- if (type == ZFS_TYPE_SNAPSHOT) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "snapshot properties cannot be modified"));
- (void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
- goto error;
- }
-
elem = NULL;
while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) {
- propname = nvpair_name(elem);
+ const char *propname = nvpair_name(elem);
/*
* Make sure this property is valid and applies to this type.
*/
- if ((prop = zfs_name_to_prop_common(propname, type))
- == ZFS_PROP_INVAL) {
- isuser = zfs_prop_user(propname);
- if (!isuser || (isuser && (type & ZFS_TYPE_POOL))) {
+ if ((prop = zfs_name_to_prop(propname)) == ZPROP_INVAL) {
+ if (!zfs_prop_user(propname)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "invalid property '%s'"),
- propname);
+ "invalid property '%s'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
- } else {
- /*
- * If this is a user property, make sure it's a
- * string, and that it's less than
- * ZAP_MAXNAMELEN.
- */
- if (nvpair_type(elem) != DATA_TYPE_STRING) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "'%s' must be a string"),
- propname);
- (void) zfs_error(hdl, EZFS_BADPROP,
- errbuf);
- goto error;
- }
+ }
- if (strlen(nvpair_name(elem)) >=
- ZAP_MAXNAMELEN) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "property name '%s' is too long"),
- propname);
- (void) zfs_error(hdl, EZFS_BADPROP,
- errbuf);
- goto error;
- }
+ /*
+ * If this is a user property, make sure it's a
+ * string, and that it's less than ZAP_MAXNAMELEN.
+ */
+ if (nvpair_type(elem) != DATA_TYPE_STRING) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be a string"), propname);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+
+ if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "property name '%s' is too long"),
+ propname);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
}
(void) nvpair_value_string(elem, &strval);
@@ -785,10 +621,12 @@ zfs_validate_properties(libzfs_handle_t *hdl, zfs_type_t type, char *pool_name,
continue;
}
- /*
- * Normalize the name, to get rid of shorthand abbrevations.
- */
- propname = zfs_prop_to_name(prop);
+ if (type == ZFS_TYPE_SNAPSHOT) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "this property can not be modified for snapshots"));
+ (void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
+ goto error;
+ }
if (!zfs_prop_valid_for_type(prop, type)) {
zfs_error_aux(hdl,
@@ -799,7 +637,7 @@ zfs_validate_properties(libzfs_handle_t *hdl, zfs_type_t type, char *pool_name,
}
if (zfs_prop_readonly(prop) &&
- (prop != ZFS_PROP_VOLBLOCKSIZE || zhp != NULL)) {
+ (!zfs_prop_setonce(prop) || zhp != NULL)) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "'%s' is readonly"),
propname);
@@ -807,70 +645,31 @@ zfs_validate_properties(libzfs_handle_t *hdl, zfs_type_t type, char *pool_name,
goto error;
}
+ if (zprop_parse_value(hdl, elem, prop, type, ret,
+ &strval, &intval, errbuf) != 0)
+ goto error;
+
/*
- * Convert any properties to the internal DSL value types.
+ * Perform some additional checks for specific properties.
*/
- strval = NULL;
- switch (zfs_prop_get_type(prop)) {
- case prop_type_boolean:
- if (prop_parse_boolean(hdl, elem, &intval) != 0) {
- (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
- goto error;
- }
- break;
+ switch (prop) {
+ case ZFS_PROP_VERSION:
+ {
+ int version;
- case prop_type_string:
- if (nvpair_type(elem) != DATA_TYPE_STRING) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "'%s' must be a string"),
- propname);
- (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
- goto error;
- }
- (void) nvpair_value_string(elem, &strval);
- if (strlen(strval) >= ZFS_MAXPROPLEN) {
+ if (zhp == NULL)
+ break;
+ version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
+ if (intval < version) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "'%s' is too long"), propname);
+ "Can not downgrade; already at version %u"),
+ version);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
-
- case prop_type_number:
- if (prop_parse_number(hdl, elem, prop, &intval) != 0) {
- (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
- goto error;
- }
- break;
-
- case prop_type_index:
- if (prop_parse_index(hdl, elem, prop, &intval) != 0) {
- (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
- goto error;
- }
- break;
-
- default:
- abort();
}
- /*
- * Add the result to our return set of properties.
- */
- if (strval) {
- if (nvlist_add_string(ret, propname, strval) != 0) {
- (void) no_memory(hdl);
- goto error;
- }
- } else if (nvlist_add_uint64(ret, propname, intval) != 0) {
- (void) no_memory(hdl);
- goto error;
- }
-
- /*
- * Perform some additional checks for specific properties.
- */
- switch (prop) {
case ZFS_PROP_RECORDSIZE:
case ZFS_PROP_VOLBLOCKSIZE:
/* must be power of two within SPA_{MIN,MAX}BLOCKSIZE */
@@ -900,32 +699,52 @@ zfs_validate_properties(libzfs_handle_t *hdl, zfs_type_t type, char *pool_name,
break;
case ZFS_PROP_MOUNTPOINT:
+ {
+ namecheck_err_t why;
+
if (strcmp(strval, ZFS_MOUNTPOINT_NONE) == 0 ||
strcmp(strval, ZFS_MOUNTPOINT_LEGACY) == 0)
break;
- if (strval[0] != '/') {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "'%s' must be an absolute path, "
- "'none', or 'legacy'"), propname);
+ if (mountpoint_namecheck(strval, &why)) {
+ switch (why) {
+ case NAME_ERR_LEADING_SLASH:
+ zfs_error_aux(hdl,
+ dgettext(TEXT_DOMAIN,
+ "'%s' must be an absolute path, "
+ "'none', or 'legacy'"), propname);
+ break;
+ case NAME_ERR_TOOLONG:
+ zfs_error_aux(hdl,
+ dgettext(TEXT_DOMAIN,
+ "component of '%s' is too long"),
+ propname);
+ break;
+ }
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
+ }
+
/*FALLTHRU*/
+ case ZFS_PROP_SHARESMB:
case ZFS_PROP_SHARENFS:
/*
- * For the mountpoint and sharenfs properties, check if
- * it can be set in a global/non-global zone based on
+ * For the mountpoint and sharenfs or sharesmb
+ * properties, check if it can be set in a
+ * global/non-global zone based on
* the zoned property value:
*
* global zone non-global zone
* --------------------------------------------------
* zoned=on mountpoint (no) mountpoint (yes)
* sharenfs (no) sharenfs (no)
+ * sharesmb (no) sharesmb (no)
*
* zoned=off mountpoint (yes) N/A
* sharenfs (yes)
+ * sharesmb (yes)
*/
if (zoned) {
if (getzoneid() == GLOBAL_ZONEID) {
@@ -936,7 +755,8 @@ zfs_validate_properties(libzfs_handle_t *hdl, zfs_type_t type, char *pool_name,
(void) zfs_error(hdl, EZFS_ZONED,
errbuf);
goto error;
- } else if (prop == ZFS_PROP_SHARENFS) {
+ } else if (prop == ZFS_PROP_SHARENFS ||
+ prop == ZFS_PROP_SHARESMB) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set in "
"a non-global zone"), propname);
@@ -956,22 +776,73 @@ zfs_validate_properties(libzfs_handle_t *hdl, zfs_type_t type, char *pool_name,
goto error;
}
- break;
-
- case ZFS_PROP_BOOTFS:
/*
- * bootfs property value has to be a dataset name and
- * the dataset has to be in the same pool as it sets to.
+ * At this point, it is legitimate to set the
+ * property. Now we want to make sure that the
+ * property value is valid if it is sharenfs.
*/
- if (strval[0] != '\0' && (!zfs_name_valid(strval,
- ZFS_TYPE_FILESYSTEM) || !bootfs_poolname_valid(
- pool_name, strval))) {
+ if ((prop == ZFS_PROP_SHARENFS ||
+ prop == ZFS_PROP_SHARESMB) &&
+ strcmp(strval, "on") != 0 &&
+ strcmp(strval, "off") != 0) {
+ zfs_share_proto_t proto;
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
- "is an invalid name"), strval);
- (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
- goto error;
+ if (prop == ZFS_PROP_SHARESMB)
+ proto = PROTO_SMB;
+ else
+ proto = PROTO_NFS;
+
+ /*
+ * Must be an valid sharing protocol
+ * option string so init the libshare
+ * in order to enable the parser and
+ * then parse the options. We use the
+ * control API since we don't care about
+ * the current configuration and don't
+ * want the overhead of loading it
+ * until we actually do something.
+ */
+
+ if (zfs_init_libshare(hdl,
+ SA_INIT_CONTROL_API) != SA_OK) {
+ /*
+ * An error occurred so we can't do
+ * anything
+ */
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' cannot be set: problem "
+ "in share initialization"),
+ propname);
+ (void) zfs_error(hdl, EZFS_BADPROP,
+ errbuf);
+ goto error;
+ }
+
+ if (zfs_parse_options(strval, proto) != SA_OK) {
+ /*
+ * There was an error in parsing so
+ * deal with it by issuing an error
+ * message and leaving after
+ * uninitializing the the libshare
+ * interface.
+ */
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' cannot be set to invalid "
+ "options"), propname);
+ (void) zfs_error(hdl, EZFS_BADPROP,
+ errbuf);
+ zfs_uninit_libshare(hdl);
+ goto error;
+ }
+ zfs_uninit_libshare(hdl);
}
+
+ break;
+ case ZFS_PROP_UTF8ONLY:
+ chosen_utf = (int)intval;
+ break;
+ case ZFS_PROP_NORMALIZE:
+ chosen_normal = (int)intval;
break;
}
@@ -988,6 +859,7 @@ zfs_validate_properties(libzfs_handle_t *hdl, zfs_type_t type, char *pool_name,
switch (prop) {
case ZFS_PROP_RESERVATION:
+ case ZFS_PROP_REFRESERVATION:
if (intval > volsize) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is greater than current "
@@ -1025,6 +897,27 @@ zfs_validate_properties(libzfs_handle_t *hdl, zfs_type_t type, char *pool_name,
}
/*
+ * If normalization was chosen, but no UTF8 choice was made,
+ * enforce rejection of non-UTF8 names.
+ *
+ * If normalization was chosen, but rejecting non-UTF8 names
+ * was explicitly not chosen, it is an error.
+ */
+ if (chosen_normal > 0 && chosen_utf < 0) {
+ if (nvlist_add_uint64(ret,
+ zfs_prop_to_name(ZFS_PROP_UTF8ONLY), 1) != 0) {
+ (void) no_memory(hdl);
+ goto error;
+ }
+ } else if (chosen_normal > 0 && chosen_utf == 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be set 'on' if normalization chosen"),
+ zfs_prop_to_name(ZFS_PROP_UTF8ONLY));
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+
+ /*
* If this is an existing volume, and someone is setting the volsize,
* make sure that it matches the reservation, or add it if necessary.
*/
@@ -1033,23 +926,24 @@ zfs_validate_properties(libzfs_handle_t *hdl, zfs_type_t type, char *pool_name,
&intval) == 0) {
uint64_t old_volsize = zfs_prop_get_int(zhp,
ZFS_PROP_VOLSIZE);
- uint64_t old_reservation = zfs_prop_get_int(zhp,
- ZFS_PROP_RESERVATION);
+ uint64_t old_reservation;
uint64_t new_reservation;
+ zfs_prop_t resv_prop;
+
+ if (zfs_which_resv_prop(zhp, &resv_prop) < 0)
+ goto error;
+ old_reservation = zfs_prop_get_int(zhp, resv_prop);
if (old_volsize == old_reservation &&
- nvlist_lookup_uint64(ret,
- zfs_prop_to_name(ZFS_PROP_RESERVATION),
+ nvlist_lookup_uint64(ret, zfs_prop_to_name(resv_prop),
&new_reservation) != 0) {
if (nvlist_add_uint64(ret,
- zfs_prop_to_name(ZFS_PROP_RESERVATION),
- intval) != 0) {
+ zfs_prop_to_name(resv_prop), intval) != 0) {
(void) no_memory(hdl);
goto error;
}
}
}
-
return (ret);
error:
@@ -1057,6 +951,808 @@ error:
return (NULL);
}
+static int
+zfs_get_perm_who(const char *who, zfs_deleg_who_type_t *who_type,
+ uint64_t *ret_who)
+{
+ struct passwd *pwd;
+ struct group *grp;
+ uid_t id;
+
+ if (*who_type == ZFS_DELEG_EVERYONE || *who_type == ZFS_DELEG_CREATE ||
+ *who_type == ZFS_DELEG_NAMED_SET) {
+ *ret_who = -1;
+ return (0);
+ }
+ if (who == NULL && !(*who_type == ZFS_DELEG_EVERYONE))
+ return (EZFS_BADWHO);
+
+ if (*who_type == ZFS_DELEG_WHO_UNKNOWN &&
+ strcmp(who, "everyone") == 0) {
+ *ret_who = -1;
+ *who_type = ZFS_DELEG_EVERYONE;
+ return (0);
+ }
+
+ pwd = getpwnam(who);
+ grp = getgrnam(who);
+
+ if ((*who_type == ZFS_DELEG_USER) && pwd) {
+ *ret_who = pwd->pw_uid;
+ } else if ((*who_type == ZFS_DELEG_GROUP) && grp) {
+ *ret_who = grp->gr_gid;
+ } else if (pwd) {
+ *ret_who = pwd->pw_uid;
+ *who_type = ZFS_DELEG_USER;
+ } else if (grp) {
+ *ret_who = grp->gr_gid;
+ *who_type = ZFS_DELEG_GROUP;
+ } else {
+ char *end;
+
+ id = strtol(who, &end, 10);
+ if (errno != 0 || *end != '\0') {
+ return (EZFS_BADWHO);
+ } else {
+ *ret_who = id;
+ if (*who_type == ZFS_DELEG_WHO_UNKNOWN)
+ *who_type = ZFS_DELEG_USER;
+ }
+ }
+
+ return (0);
+}
+
+static void
+zfs_perms_add_to_nvlist(nvlist_t *who_nvp, char *name, nvlist_t *perms_nvp)
+{
+ if (perms_nvp != NULL) {
+ verify(nvlist_add_nvlist(who_nvp,
+ name, perms_nvp) == 0);
+ } else {
+ verify(nvlist_add_boolean(who_nvp, name) == 0);
+ }
+}
+
+static void
+helper(zfs_deleg_who_type_t who_type, uint64_t whoid, char *whostr,
+ zfs_deleg_inherit_t inherit, nvlist_t *who_nvp, nvlist_t *perms_nvp,
+ nvlist_t *sets_nvp)
+{
+ boolean_t do_perms, do_sets;
+ char name[ZFS_MAX_DELEG_NAME];
+
+ do_perms = (nvlist_next_nvpair(perms_nvp, NULL) != NULL);
+ do_sets = (nvlist_next_nvpair(sets_nvp, NULL) != NULL);
+
+ if (!do_perms && !do_sets)
+ do_perms = do_sets = B_TRUE;
+
+ if (do_perms) {
+ zfs_deleg_whokey(name, who_type, inherit,
+ (who_type == ZFS_DELEG_NAMED_SET) ?
+ whostr : (void *)&whoid);
+ zfs_perms_add_to_nvlist(who_nvp, name, perms_nvp);
+ }
+ if (do_sets) {
+ zfs_deleg_whokey(name, toupper(who_type), inherit,
+ (who_type == ZFS_DELEG_NAMED_SET) ?
+ whostr : (void *)&whoid);
+ zfs_perms_add_to_nvlist(who_nvp, name, sets_nvp);
+ }
+}
+
+static void
+zfs_perms_add_who_nvlist(nvlist_t *who_nvp, uint64_t whoid, void *whostr,
+ nvlist_t *perms_nvp, nvlist_t *sets_nvp,
+ zfs_deleg_who_type_t who_type, zfs_deleg_inherit_t inherit)
+{
+ if (who_type == ZFS_DELEG_NAMED_SET || who_type == ZFS_DELEG_CREATE) {
+ helper(who_type, whoid, whostr, 0,
+ who_nvp, perms_nvp, sets_nvp);
+ } else {
+ if (inherit & ZFS_DELEG_PERM_LOCAL) {
+ helper(who_type, whoid, whostr, ZFS_DELEG_LOCAL,
+ who_nvp, perms_nvp, sets_nvp);
+ }
+ if (inherit & ZFS_DELEG_PERM_DESCENDENT) {
+ helper(who_type, whoid, whostr, ZFS_DELEG_DESCENDENT,
+ who_nvp, perms_nvp, sets_nvp);
+ }
+ }
+}
+
+/*
+ * Construct nvlist to pass down to kernel for setting/removing permissions.
+ *
+ * The nvlist is constructed as a series of nvpairs with an optional embedded
+ * nvlist of permissions to remove or set. The topmost nvpairs are the actual
+ * base attribute named stored in the dsl.
+ * Arguments:
+ *
+ * whostr: is a comma separated list of users, groups, or a single set name.
+ * whostr may be null for everyone or create perms.
+ * who_type: is the type of entry in whostr. Typically this will be
+ * ZFS_DELEG_WHO_UNKNOWN.
+ * perms: common separated list of permissions. May be null if user
+ * is requested to remove permissions by who.
+ * inherit: Specifies the inheritance of the permissions. Will be either
+ * ZFS_DELEG_PERM_LOCAL and/or ZFS_DELEG_PERM_DESCENDENT.
+ * nvp The constructed nvlist to pass to zfs_perm_set().
+ * The output nvp will look something like this.
+ * ul$1234 -> {create ; destroy }
+ * Ul$1234 -> { @myset }
+ * s-$@myset - { snapshot; checksum; compression }
+ */
+int
+zfs_build_perms(zfs_handle_t *zhp, char *whostr, char *perms,
+ zfs_deleg_who_type_t who_type, zfs_deleg_inherit_t inherit, nvlist_t **nvp)
+{
+ nvlist_t *who_nvp;
+ nvlist_t *perms_nvp = NULL;
+ nvlist_t *sets_nvp = NULL;
+ char errbuf[1024];
+ char *who_tok, *perm;
+ int error;
+
+ *nvp = NULL;
+
+ if (perms) {
+ if ((error = nvlist_alloc(&perms_nvp,
+ NV_UNIQUE_NAME, 0)) != 0) {
+ return (1);
+ }
+ if ((error = nvlist_alloc(&sets_nvp,
+ NV_UNIQUE_NAME, 0)) != 0) {
+ nvlist_free(perms_nvp);
+ return (1);
+ }
+ }
+
+ if ((error = nvlist_alloc(&who_nvp, NV_UNIQUE_NAME, 0)) != 0) {
+ if (perms_nvp)
+ nvlist_free(perms_nvp);
+ if (sets_nvp)
+ nvlist_free(sets_nvp);
+ return (1);
+ }
+
+ if (who_type == ZFS_DELEG_NAMED_SET) {
+ namecheck_err_t why;
+ char what;
+
+ if ((error = permset_namecheck(whostr, &why, &what)) != 0) {
+ nvlist_free(who_nvp);
+ if (perms_nvp)
+ nvlist_free(perms_nvp);
+ if (sets_nvp)
+ nvlist_free(sets_nvp);
+
+ switch (why) {
+ case NAME_ERR_NO_AT:
+ zfs_error_aux(zhp->zfs_hdl,
+ dgettext(TEXT_DOMAIN,
+ "set definition must begin with an '@' "
+ "character"));
+ }
+ return (zfs_error(zhp->zfs_hdl,
+ EZFS_BADPERMSET, whostr));
+ }
+ }
+
+ /*
+ * Build up nvlist(s) of permissions. Two nvlists are maintained.
+ * The first nvlist perms_nvp will have normal permissions and the
+ * other sets_nvp will have only permssion set names in it.
+ */
+ for (perm = strtok(perms, ","); perm; perm = strtok(NULL, ",")) {
+ const char *perm_canonical = zfs_deleg_canonicalize_perm(perm);
+
+ if (perm_canonical) {
+ verify(nvlist_add_boolean(perms_nvp,
+ perm_canonical) == 0);
+ } else if (perm[0] == '@') {
+ verify(nvlist_add_boolean(sets_nvp, perm) == 0);
+ } else {
+ nvlist_free(who_nvp);
+ nvlist_free(perms_nvp);
+ nvlist_free(sets_nvp);
+ return (zfs_error(zhp->zfs_hdl, EZFS_BADPERM, perm));
+ }
+ }
+
+ if (whostr && who_type != ZFS_DELEG_CREATE) {
+ who_tok = strtok(whostr, ",");
+ if (who_tok == NULL) {
+ nvlist_free(who_nvp);
+ if (perms_nvp)
+ nvlist_free(perms_nvp);
+ if (sets_nvp)
+ nvlist_free(sets_nvp);
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "Who string is NULL"),
+ whostr);
+ return (zfs_error(zhp->zfs_hdl, EZFS_BADWHO, errbuf));
+ }
+ }
+
+ /*
+ * Now create the nvlist(s)
+ */
+ do {
+ uint64_t who_id;
+
+ error = zfs_get_perm_who(who_tok, &who_type,
+ &who_id);
+ if (error) {
+ nvlist_free(who_nvp);
+ if (perms_nvp)
+ nvlist_free(perms_nvp);
+ if (sets_nvp)
+ nvlist_free(sets_nvp);
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN,
+ "Unable to determine uid/gid for "
+ "%s "), who_tok);
+ return (zfs_error(zhp->zfs_hdl, EZFS_BADWHO, errbuf));
+ }
+
+ /*
+ * add entries for both local and descendent when required
+ */
+ zfs_perms_add_who_nvlist(who_nvp, who_id, who_tok,
+ perms_nvp, sets_nvp, who_type, inherit);
+
+ } while (who_tok = strtok(NULL, ","));
+ *nvp = who_nvp;
+ return (0);
+}
+
+static int
+zfs_perm_set_common(zfs_handle_t *zhp, nvlist_t *nvp, boolean_t unset)
+{
+ zfs_cmd_t zc = { 0 };
+ int error;
+ char errbuf[1024];
+
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "Cannot update 'allows' for '%s'"),
+ zhp->zfs_name);
+
+ if (zcmd_write_src_nvlist(zhp->zfs_hdl, &zc, nvp))
+ return (-1);
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+ zc.zc_perm_action = unset;
+
+ error = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SET_FSACL, &zc);
+ if (error && errno == ENOTSUP) {
+ (void) snprintf(errbuf, sizeof (errbuf),
+ gettext("Pool must be upgraded to use 'allow/unallow'"));
+ zcmd_free_nvlists(&zc);
+ return (zfs_error(zhp->zfs_hdl, EZFS_BADVERSION, errbuf));
+ } else if (error) {
+ return (zfs_standard_error(zhp->zfs_hdl, errno, errbuf));
+ }
+ zcmd_free_nvlists(&zc);
+
+ return (error);
+}
+
+int
+zfs_perm_set(zfs_handle_t *zhp, nvlist_t *nvp)
+{
+ return (zfs_perm_set_common(zhp, nvp, B_FALSE));
+}
+
+int
+zfs_perm_remove(zfs_handle_t *zhp, nvlist_t *perms)
+{
+ return (zfs_perm_set_common(zhp, perms, B_TRUE));
+}
+
+static int
+perm_compare(const void *arg1, const void *arg2)
+{
+ const zfs_perm_node_t *node1 = arg1;
+ const zfs_perm_node_t *node2 = arg2;
+ int ret;
+
+ ret = strcmp(node1->z_pname, node2->z_pname);
+
+ if (ret > 0)
+ return (1);
+ if (ret < 0)
+ return (-1);
+ else
+ return (0);
+}
+
+static void
+zfs_destroy_perm_tree(avl_tree_t *tree)
+{
+ zfs_perm_node_t *permnode;
+ void *cookie = NULL;
+
+ while ((permnode = avl_destroy_nodes(tree, &cookie)) != NULL)
+ free(permnode);
+ avl_destroy(tree);
+}
+
+static void
+zfs_destroy_tree(avl_tree_t *tree)
+{
+ zfs_allow_node_t *allownode;
+ void *cookie = NULL;
+
+ while ((allownode = avl_destroy_nodes(tree, &cookie)) != NULL) {
+ zfs_destroy_perm_tree(&allownode->z_localdescend);
+ zfs_destroy_perm_tree(&allownode->z_local);
+ zfs_destroy_perm_tree(&allownode->z_descend);
+ free(allownode);
+ }
+ avl_destroy(tree);
+}
+
+void
+zfs_free_allows(zfs_allow_t *allow)
+{
+ zfs_allow_t *allownext;
+ zfs_allow_t *freeallow;
+
+ allownext = allow;
+ while (allownext) {
+ zfs_destroy_tree(&allownext->z_sets);
+ zfs_destroy_tree(&allownext->z_crperms);
+ zfs_destroy_tree(&allownext->z_user);
+ zfs_destroy_tree(&allownext->z_group);
+ zfs_destroy_tree(&allownext->z_everyone);
+ freeallow = allownext;
+ allownext = allownext->z_next;
+ free(freeallow);
+ }
+}
+
+static zfs_allow_t *
+zfs_alloc_perm_tree(zfs_handle_t *zhp, zfs_allow_t *prev, char *setpoint)
+{
+ zfs_allow_t *ptree;
+
+ if ((ptree = zfs_alloc(zhp->zfs_hdl,
+ sizeof (zfs_allow_t))) == NULL) {
+ return (NULL);
+ }
+
+ (void) strlcpy(ptree->z_setpoint, setpoint, sizeof (ptree->z_setpoint));
+ avl_create(&ptree->z_sets,
+ perm_compare, sizeof (zfs_allow_node_t),
+ offsetof(zfs_allow_node_t, z_node));
+ avl_create(&ptree->z_crperms,
+ perm_compare, sizeof (zfs_allow_node_t),
+ offsetof(zfs_allow_node_t, z_node));
+ avl_create(&ptree->z_user,
+ perm_compare, sizeof (zfs_allow_node_t),
+ offsetof(zfs_allow_node_t, z_node));
+ avl_create(&ptree->z_group,
+ perm_compare, sizeof (zfs_allow_node_t),
+ offsetof(zfs_allow_node_t, z_node));
+ avl_create(&ptree->z_everyone,
+ perm_compare, sizeof (zfs_allow_node_t),
+ offsetof(zfs_allow_node_t, z_node));
+
+ if (prev)
+ prev->z_next = ptree;
+ ptree->z_next = NULL;
+ return (ptree);
+}
+
+/*
+ * Add permissions to the appropriate AVL permission tree.
+ * The appropriate tree may not be the requested tree.
+ * For example if ld indicates a local permission, but
+ * same permission also exists as a descendent permission
+ * then the permission will be removed from the descendent
+ * tree and add the the local+descendent tree.
+ */
+static int
+zfs_coalesce_perm(zfs_handle_t *zhp, zfs_allow_node_t *allownode,
+ char *perm, char ld)
+{
+ zfs_perm_node_t pnode, *permnode, *permnode2;
+ zfs_perm_node_t *newnode;
+ avl_index_t where, where2;
+ avl_tree_t *tree, *altree;
+
+ (void) strlcpy(pnode.z_pname, perm, sizeof (pnode.z_pname));
+
+ if (ld == ZFS_DELEG_NA) {
+ tree = &allownode->z_localdescend;
+ altree = &allownode->z_descend;
+ } else if (ld == ZFS_DELEG_LOCAL) {
+ tree = &allownode->z_local;
+ altree = &allownode->z_descend;
+ } else {
+ tree = &allownode->z_descend;
+ altree = &allownode->z_local;
+ }
+ permnode = avl_find(tree, &pnode, &where);
+ permnode2 = avl_find(altree, &pnode, &where2);
+
+ if (permnode2) {
+ avl_remove(altree, permnode2);
+ free(permnode2);
+ if (permnode == NULL) {
+ tree = &allownode->z_localdescend;
+ }
+ }
+
+ /*
+ * Now insert new permission in either requested location
+ * local/descendent or into ld when perm will exist in both.
+ */
+ if (permnode == NULL) {
+ if ((newnode = zfs_alloc(zhp->zfs_hdl,
+ sizeof (zfs_perm_node_t))) == NULL) {
+ return (-1);
+ }
+ *newnode = pnode;
+ avl_add(tree, newnode);
+ }
+ return (0);
+}
+
+/*
+ * Uggh, this is going to be a bit complicated.
+ * we have an nvlist coming out of the kernel that
+ * will indicate where the permission is set and then
+ * it will contain allow of the various "who's", and what
+ * their permissions are. To further complicate this
+ * we will then have to coalesce the local,descendent
+ * and local+descendent permissions where appropriate.
+ * The kernel only knows about a permission as being local
+ * or descendent, but not both.
+ *
+ * In order to make this easier for zfs_main to deal with
+ * a series of AVL trees will be used to maintain
+ * all of this, primarily for sorting purposes as well
+ * as the ability to quickly locate a specific entry.
+ *
+ * What we end up with are tree's for sets, create perms,
+ * user, groups and everyone. With each of those trees
+ * we have subtrees for local, descendent and local+descendent
+ * permissions.
+ */
+int
+zfs_perm_get(zfs_handle_t *zhp, zfs_allow_t **zfs_perms)
+{
+ zfs_cmd_t zc = { 0 };
+ int error;
+ nvlist_t *nvlist;
+ nvlist_t *permnv, *sourcenv;
+ nvpair_t *who_pair, *source_pair;
+ nvpair_t *perm_pair;
+ char errbuf[1024];
+ zfs_allow_t *zallowp, *newallowp;
+ char ld;
+ char *nvpname;
+ uid_t uid;
+ gid_t gid;
+ avl_tree_t *tree;
+ avl_index_t where;
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+ if (zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0)
+ return (-1);
+
+ while (ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_GET_FSACL, &zc) != 0) {
+ if (errno == ENOMEM) {
+ if (zcmd_expand_dst_nvlist(zhp->zfs_hdl, &zc) != 0) {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+ } else if (errno == ENOTSUP) {
+ zcmd_free_nvlists(&zc);
+ (void) snprintf(errbuf, sizeof (errbuf),
+ gettext("Pool must be upgraded to use 'allow'"));
+ return (zfs_error(zhp->zfs_hdl,
+ EZFS_BADVERSION, errbuf));
+ } else {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+ }
+
+ if (zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &nvlist) != 0) {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+
+ zcmd_free_nvlists(&zc);
+
+ source_pair = nvlist_next_nvpair(nvlist, NULL);
+
+ if (source_pair == NULL) {
+ *zfs_perms = NULL;
+ return (0);
+ }
+
+ *zfs_perms = zfs_alloc_perm_tree(zhp, NULL, nvpair_name(source_pair));
+ if (*zfs_perms == NULL) {
+ return (0);
+ }
+
+ zallowp = *zfs_perms;
+
+ for (;;) {
+ struct passwd *pwd;
+ struct group *grp;
+ zfs_allow_node_t *allownode;
+ zfs_allow_node_t findallownode;
+ zfs_allow_node_t *newallownode;
+
+ (void) strlcpy(zallowp->z_setpoint,
+ nvpair_name(source_pair),
+ sizeof (zallowp->z_setpoint));
+
+ if ((error = nvpair_value_nvlist(source_pair, &sourcenv)) != 0)
+ goto abort;
+
+ /*
+ * Make sure nvlist is composed correctly
+ */
+ if (zfs_deleg_verify_nvlist(sourcenv)) {
+ goto abort;
+ }
+
+ who_pair = nvlist_next_nvpair(sourcenv, NULL);
+ if (who_pair == NULL) {
+ goto abort;
+ }
+
+ do {
+ error = nvpair_value_nvlist(who_pair, &permnv);
+ if (error) {
+ goto abort;
+ }
+
+ /*
+ * First build up the key to use
+ * for looking up in the various
+ * who trees.
+ */
+ ld = nvpair_name(who_pair)[1];
+ nvpname = nvpair_name(who_pair);
+ switch (nvpair_name(who_pair)[0]) {
+ case ZFS_DELEG_USER:
+ case ZFS_DELEG_USER_SETS:
+ tree = &zallowp->z_user;
+ uid = atol(&nvpname[3]);
+ pwd = getpwuid(uid);
+ (void) snprintf(findallownode.z_key,
+ sizeof (findallownode.z_key), "user %s",
+ (pwd) ? pwd->pw_name :
+ &nvpair_name(who_pair)[3]);
+ break;
+ case ZFS_DELEG_GROUP:
+ case ZFS_DELEG_GROUP_SETS:
+ tree = &zallowp->z_group;
+ gid = atol(&nvpname[3]);
+ grp = getgrgid(gid);
+ (void) snprintf(findallownode.z_key,
+ sizeof (findallownode.z_key), "group %s",
+ (grp) ? grp->gr_name :
+ &nvpair_name(who_pair)[3]);
+ break;
+ case ZFS_DELEG_CREATE:
+ case ZFS_DELEG_CREATE_SETS:
+ tree = &zallowp->z_crperms;
+ (void) strlcpy(findallownode.z_key, "",
+ sizeof (findallownode.z_key));
+ break;
+ case ZFS_DELEG_EVERYONE:
+ case ZFS_DELEG_EVERYONE_SETS:
+ (void) snprintf(findallownode.z_key,
+ sizeof (findallownode.z_key), "everyone");
+ tree = &zallowp->z_everyone;
+ break;
+ case ZFS_DELEG_NAMED_SET:
+ case ZFS_DELEG_NAMED_SET_SETS:
+ (void) snprintf(findallownode.z_key,
+ sizeof (findallownode.z_key), "%s",
+ &nvpair_name(who_pair)[3]);
+ tree = &zallowp->z_sets;
+ break;
+ }
+
+ /*
+ * Place who in tree
+ */
+ allownode = avl_find(tree, &findallownode, &where);
+ if (allownode == NULL) {
+ if ((newallownode = zfs_alloc(zhp->zfs_hdl,
+ sizeof (zfs_allow_node_t))) == NULL) {
+ goto abort;
+ }
+ avl_create(&newallownode->z_localdescend,
+ perm_compare,
+ sizeof (zfs_perm_node_t),
+ offsetof(zfs_perm_node_t, z_node));
+ avl_create(&newallownode->z_local,
+ perm_compare,
+ sizeof (zfs_perm_node_t),
+ offsetof(zfs_perm_node_t, z_node));
+ avl_create(&newallownode->z_descend,
+ perm_compare,
+ sizeof (zfs_perm_node_t),
+ offsetof(zfs_perm_node_t, z_node));
+ (void) strlcpy(newallownode->z_key,
+ findallownode.z_key,
+ sizeof (findallownode.z_key));
+ avl_insert(tree, newallownode, where);
+ allownode = newallownode;
+ }
+
+ /*
+ * Now iterate over the permissions and
+ * place them in the appropriate local,
+ * descendent or local+descendent tree.
+ *
+ * The permissions are added to the tree
+ * via zfs_coalesce_perm().
+ */
+ perm_pair = nvlist_next_nvpair(permnv, NULL);
+ if (perm_pair == NULL)
+ goto abort;
+ do {
+ if (zfs_coalesce_perm(zhp, allownode,
+ nvpair_name(perm_pair), ld) != 0)
+ goto abort;
+ } while (perm_pair = nvlist_next_nvpair(permnv,
+ perm_pair));
+ } while (who_pair = nvlist_next_nvpair(sourcenv, who_pair));
+
+ source_pair = nvlist_next_nvpair(nvlist, source_pair);
+ if (source_pair == NULL)
+ break;
+
+ /*
+ * allocate another node from the link list of
+ * zfs_allow_t structures
+ */
+ newallowp = zfs_alloc_perm_tree(zhp, zallowp,
+ nvpair_name(source_pair));
+ if (newallowp == NULL) {
+ goto abort;
+ }
+ zallowp = newallowp;
+ }
+ nvlist_free(nvlist);
+ return (0);
+abort:
+ zfs_free_allows(*zfs_perms);
+ nvlist_free(nvlist);
+ return (-1);
+}
+
+static char *
+zfs_deleg_perm_note(zfs_deleg_note_t note)
+{
+ /*
+ * Don't put newlines on end of lines
+ */
+ switch (note) {
+ case ZFS_DELEG_NOTE_CREATE:
+ return (dgettext(TEXT_DOMAIN,
+ "Must also have the 'mount' ability"));
+ case ZFS_DELEG_NOTE_DESTROY:
+ return (dgettext(TEXT_DOMAIN,
+ "Must also have the 'mount' ability"));
+ case ZFS_DELEG_NOTE_SNAPSHOT:
+ return (dgettext(TEXT_DOMAIN,
+ "Must also have the 'mount' ability"));
+ case ZFS_DELEG_NOTE_ROLLBACK:
+ return (dgettext(TEXT_DOMAIN,
+ "Must also have the 'mount' ability"));
+ case ZFS_DELEG_NOTE_CLONE:
+ return (dgettext(TEXT_DOMAIN, "Must also have the 'create' "
+ "ability and 'mount'\n"
+ "\t\t\t\tability in the origin file system"));
+ case ZFS_DELEG_NOTE_PROMOTE:
+ return (dgettext(TEXT_DOMAIN, "Must also have the 'mount'\n"
+ "\t\t\t\tand 'promote' ability in the origin file system"));
+ case ZFS_DELEG_NOTE_RENAME:
+ return (dgettext(TEXT_DOMAIN, "Must also have the 'mount' "
+ "and 'create' \n\t\t\t\tability in the new parent"));
+ case ZFS_DELEG_NOTE_RECEIVE:
+ return (dgettext(TEXT_DOMAIN, "Must also have the 'mount'"
+ " and 'create' ability"));
+ case ZFS_DELEG_NOTE_USERPROP:
+ return (dgettext(TEXT_DOMAIN,
+ "Allows changing any user property"));
+ case ZFS_DELEG_NOTE_ALLOW:
+ return (dgettext(TEXT_DOMAIN,
+ "Must also have the permission that is being\n"
+ "\t\t\t\tallowed"));
+ case ZFS_DELEG_NOTE_MOUNT:
+ return (dgettext(TEXT_DOMAIN,
+ "Allows mount/umount of ZFS datasets"));
+ case ZFS_DELEG_NOTE_SHARE:
+ return (dgettext(TEXT_DOMAIN,
+ "Allows sharing file systems over NFS or SMB\n"
+ "\t\t\t\tprotocols"));
+ case ZFS_DELEG_NOTE_NONE:
+ default:
+ return (dgettext(TEXT_DOMAIN, ""));
+ }
+}
+
+typedef enum {
+ ZFS_DELEG_SUBCOMMAND,
+ ZFS_DELEG_PROP,
+ ZFS_DELEG_OTHER
+} zfs_deleg_perm_type_t;
+
+/*
+ * is the permission a subcommand or other?
+ */
+zfs_deleg_perm_type_t
+zfs_deleg_perm_type(const char *perm)
+{
+ if (strcmp(perm, "userprop") == 0)
+ return (ZFS_DELEG_OTHER);
+ else
+ return (ZFS_DELEG_SUBCOMMAND);
+}
+
+static char *
+zfs_deleg_perm_type_str(zfs_deleg_perm_type_t type)
+{
+ switch (type) {
+ case ZFS_DELEG_SUBCOMMAND:
+ return (dgettext(TEXT_DOMAIN, "subcommand"));
+ case ZFS_DELEG_PROP:
+ return (dgettext(TEXT_DOMAIN, "property"));
+ case ZFS_DELEG_OTHER:
+ return (dgettext(TEXT_DOMAIN, "other"));
+ }
+ return ("");
+}
+
+/*ARGSUSED*/
+static int
+zfs_deleg_prop_cb(int prop, void *cb)
+{
+ if (zfs_prop_delegatable(prop))
+ (void) fprintf(stderr, "%-15s %-15s\n", zfs_prop_to_name(prop),
+ zfs_deleg_perm_type_str(ZFS_DELEG_PROP));
+
+ return (ZPROP_CONT);
+}
+
+void
+zfs_deleg_permissions(void)
+{
+ int i;
+
+ (void) fprintf(stderr, "\n%-15s %-15s\t%s\n\n", "NAME",
+ "TYPE", "NOTES");
+
+ /*
+ * First print out the subcommands
+ */
+ for (i = 0; zfs_deleg_perm_tab[i].z_perm != NULL; i++) {
+ (void) fprintf(stderr, "%-15s %-15s\t%s\n",
+ zfs_deleg_perm_tab[i].z_perm,
+ zfs_deleg_perm_type_str(
+ zfs_deleg_perm_type(zfs_deleg_perm_tab[i].z_perm)),
+ zfs_deleg_perm_note(zfs_deleg_perm_tab[i].z_note));
+ }
+
+ (void) zprop_iter(zfs_deleg_prop_cb, NULL, B_FALSE, B_TRUE,
+ ZFS_TYPE_DATASET|ZFS_TYPE_VOLUME);
+}
+
/*
* Given a property name and value, set the property for the given dataset.
*/
@@ -1070,6 +1766,8 @@ zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval)
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *nvl = NULL, *realprops;
zfs_prop_t prop;
+ boolean_t do_prefix;
+ uint64_t idx;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
@@ -1081,9 +1779,10 @@ zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval)
goto error;
}
- if ((realprops = zfs_validate_properties(hdl, zhp->zfs_type, NULL, nvl,
+ if ((realprops = zfs_valid_proplist(hdl, zhp->zfs_type, nvl,
zfs_prop_get_int(zhp, ZFS_PROP_ZONED), zhp, errbuf)) == NULL)
goto error;
+
nvlist_free(nvl);
nvl = realprops;
@@ -1102,7 +1801,7 @@ zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval)
goto error;
}
- if ((cl = changelist_gather(zhp, prop, 0)) == NULL)
+ if ((cl = changelist_gather(zhp, prop, 0, 0)) == NULL)
goto error;
if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) {
@@ -1113,7 +1812,15 @@ zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval)
goto error;
}
- if ((ret = changelist_prefix(cl)) != 0)
+ /*
+ * If the dataset's canmount property is being set to noauto,
+ * then we want to prevent unmounting & remounting it.
+ */
+ do_prefix = !((prop == ZFS_PROP_CANMOUNT) &&
+ (zprop_string_to_index(prop, propval, &idx,
+ ZFS_TYPE_DATASET) == 0) && (idx == ZFS_CANMOUNT_NOAUTO));
+
+ if (do_prefix && (ret = changelist_prefix(cl)) != 0)
goto error;
/*
@@ -1121,11 +1828,10 @@ zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval)
*/
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
- if (zcmd_write_src_nvlist(hdl, &zc, nvl, NULL) != 0)
+ if (zcmd_write_src_nvlist(hdl, &zc, nvl) != 0)
goto error;
- ret = ioctl(hdl->libzfs_fd, ZFS_IOC_SET_PROP, &zc);
-
+ ret = zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
if (ret != 0) {
switch (errno) {
@@ -1137,6 +1843,7 @@ zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval)
*/
switch (prop) {
case ZFS_PROP_QUOTA:
+ case ZFS_PROP_REFQUOTA:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"size is less than current used or "
"reserved space"));
@@ -1144,6 +1851,7 @@ zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval)
break;
case ZFS_PROP_RESERVATION:
+ case ZFS_PROP_REFRESERVATION:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"size is greater than available space"));
(void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
@@ -1168,10 +1876,22 @@ zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval)
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "pool must be upgraded to allow gzip compression"));
+ "pool and or dataset must be upgraded to set this "
+ "property or value"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
+ case ERANGE:
+ if (prop == ZFS_PROP_COMPRESSION) {
+ (void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "property setting is not allowed on "
+ "bootable datasets"));
+ (void) zfs_error(hdl, EZFS_NOTSUP, errbuf);
+ } else {
+ (void) zfs_standard_error(hdl, errno, errbuf);
+ }
+ break;
+
case EOVERFLOW:
/*
* This platform can't address a volume this big.
@@ -1187,11 +1907,14 @@ zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval)
(void) zfs_standard_error(hdl, errno, errbuf);
}
} else {
+ if (do_prefix)
+ ret = changelist_postfix(cl);
+
/*
* Refresh the statistics so the new property value
* is reflected.
*/
- if ((ret = changelist_postfix(cl)) == 0)
+ if (ret == 0)
(void) get_stats(zhp);
}
@@ -1219,7 +1942,7 @@ zfs_prop_inherit(zfs_handle_t *zhp, const char *propname)
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot inherit %s for '%s'"), propname, zhp->zfs_name);
- if ((prop = zfs_name_to_prop(propname)) == ZFS_PROP_INVAL) {
+ if ((prop = zfs_name_to_prop(propname)) == ZPROP_INVAL) {
/*
* For user properties, the amount of work we have to do is very
* small, so just do it here.
@@ -1233,8 +1956,7 @@ zfs_prop_inherit(zfs_handle_t *zhp, const char *propname)
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value));
- if (ioctl(zhp->zfs_hdl->libzfs_fd,
- ZFS_IOC_SET_PROP, &zc) != 0)
+ if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc) != 0)
return (zfs_standard_error(hdl, errno, errbuf));
return (0);
@@ -1272,7 +1994,7 @@ zfs_prop_inherit(zfs_handle_t *zhp, const char *propname)
/*
* Determine datasets which will be affected by this change, if any.
*/
- if ((cl = changelist_gather(zhp, prop, 0)) == NULL)
+ if ((cl = changelist_gather(zhp, prop, 0, 0)) == NULL)
return (-1);
if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) {
@@ -1286,8 +2008,7 @@ zfs_prop_inherit(zfs_handle_t *zhp, const char *propname)
if ((ret = changelist_prefix(cl)) != 0)
goto error;
- if ((ret = ioctl(zhp->zfs_hdl->libzfs_fd,
- ZFS_IOC_SET_PROP, &zc)) != 0) {
+ if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc)) != 0) {
return (zfs_standard_error(hdl, errno, errbuf));
} else {
@@ -1305,15 +2026,6 @@ error:
return (ret);
}
-void
-nicebool(int value, char *buf, size_t buflen)
-{
- if (value)
- (void) strlcpy(buf, "on", buflen);
- else
- (void) strlcpy(buf, "off", buflen);
-}
-
/*
* True DSL properties are stored in an nvlist. The following two functions
* extract them appropriately.
@@ -1327,8 +2039,8 @@ getprop_uint64(zfs_handle_t *zhp, zfs_prop_t prop, char **source)
*source = NULL;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(prop), &nv) == 0) {
- verify(nvlist_lookup_uint64(nv, ZFS_PROP_VALUE, &value) == 0);
- (void) nvlist_lookup_string(nv, ZFS_PROP_SOURCE, source);
+ verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
+ (void) nvlist_lookup_string(nv, ZPROP_SOURCE, source);
} else {
value = zfs_prop_default_numeric(prop);
*source = "";
@@ -1346,8 +2058,8 @@ getprop_string(zfs_handle_t *zhp, zfs_prop_t prop, char **source)
*source = NULL;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(prop), &nv) == 0) {
- verify(nvlist_lookup_string(nv, ZFS_PROP_VALUE, &value) == 0);
- (void) nvlist_lookup_string(nv, ZFS_PROP_SOURCE, source);
+ verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
+ (void) nvlist_lookup_string(nv, ZPROP_SOURCE, source);
} else {
if ((value = (char *)zfs_prop_default_string(prop)) == NULL)
value = "";
@@ -1367,9 +2079,11 @@ getprop_string(zfs_handle_t *zhp, zfs_prop_t prop, char **source)
* the source "temporary".
*/
static int
-get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zfs_source_t *src,
+get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zprop_source_t *src,
char **source, uint64_t *val)
{
+ zfs_cmd_t zc = { 0 };
+ nvlist_t *zplprops = NULL;
struct mnttab mnt;
char *mntopt_on = NULL;
char *mntopt_off = NULL;
@@ -1406,6 +2120,11 @@ get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zfs_source_t *src,
mntopt_on = MNTOPT_XATTR;
mntopt_off = MNTOPT_NOXATTR;
break;
+
+ case ZFS_PROP_NBMAND:
+ mntopt_on = MNTOPT_NBMAND;
+ mntopt_off = MNTOPT_NONBMAND;
+ break;
}
/*
@@ -1444,43 +2163,32 @@ get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zfs_source_t *src,
case ZFS_PROP_READONLY:
case ZFS_PROP_SETUID:
case ZFS_PROP_XATTR:
+ case ZFS_PROP_NBMAND:
*val = getprop_uint64(zhp, prop, source);
if (hasmntopt(&mnt, mntopt_on) && !*val) {
*val = B_TRUE;
if (src)
- *src = ZFS_SRC_TEMPORARY;
+ *src = ZPROP_SRC_TEMPORARY;
} else if (hasmntopt(&mnt, mntopt_off) && *val) {
*val = B_FALSE;
if (src)
- *src = ZFS_SRC_TEMPORARY;
+ *src = ZPROP_SRC_TEMPORARY;
}
break;
- case ZFS_PROP_RECORDSIZE:
- case ZFS_PROP_COMPRESSION:
- case ZFS_PROP_ZONED:
- case ZFS_PROP_CREATION:
- case ZFS_PROP_COMPRESSRATIO:
- case ZFS_PROP_REFERENCED:
- case ZFS_PROP_USED:
- case ZFS_PROP_CREATETXG:
- case ZFS_PROP_AVAILABLE:
- case ZFS_PROP_VOLSIZE:
- case ZFS_PROP_VOLBLOCKSIZE:
- *val = getprop_uint64(zhp, prop, source);
- break;
-
case ZFS_PROP_CANMOUNT:
*val = getprop_uint64(zhp, prop, source);
- if (*val == 0)
+ if (*val != ZFS_CANMOUNT_ON)
*source = zhp->zfs_name;
else
*source = ""; /* default */
break;
case ZFS_PROP_QUOTA:
+ case ZFS_PROP_REFQUOTA:
case ZFS_PROP_RESERVATION:
+ case ZFS_PROP_REFRESERVATION:
*val = getprop_uint64(zhp, prop, source);
if (*val == 0)
*source = ""; /* default */
@@ -1496,11 +2204,60 @@ get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zfs_source_t *src,
*val = zhp->zfs_dmustats.dds_num_clones;
break;
+ case ZFS_PROP_VERSION:
+ case ZFS_PROP_NORMALIZE:
+ case ZFS_PROP_UTF8ONLY:
+ case ZFS_PROP_CASE:
+ if (!zfs_prop_valid_for_type(prop, zhp->zfs_head_type) ||
+ zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0)
+ return (-1);
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+ if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_ZPLPROPS, &zc)) {
+ zcmd_free_nvlists(&zc);
+ zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
+ "unable to get %s property"),
+ zfs_prop_to_name(prop));
+ return (zfs_error(zhp->zfs_hdl, EZFS_BADVERSION,
+ dgettext(TEXT_DOMAIN, "internal error")));
+ }
+ if (zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &zplprops) != 0 ||
+ nvlist_lookup_uint64(zplprops, zfs_prop_to_name(prop),
+ val) != 0) {
+ zcmd_free_nvlists(&zc);
+ zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
+ "unable to get %s property"),
+ zfs_prop_to_name(prop));
+ return (zfs_error(zhp->zfs_hdl, EZFS_NOMEM,
+ dgettext(TEXT_DOMAIN, "internal error")));
+ }
+ if (zplprops)
+ nvlist_free(zplprops);
+ zcmd_free_nvlists(&zc);
+ break;
+
default:
- zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
- "cannot get non-numeric property"));
- return (zfs_error(zhp->zfs_hdl, EZFS_BADPROP,
- dgettext(TEXT_DOMAIN, "internal error")));
+ switch (zfs_prop_get_type(prop)) {
+ case PROP_TYPE_NUMBER:
+ case PROP_TYPE_INDEX:
+ *val = getprop_uint64(zhp, prop, source);
+ /*
+ * If we tried to use a defalut value for a
+ * readonly property, it means that it was not
+ * present; return an error.
+ */
+ if (zfs_prop_readonly(prop) &&
+ *source && (*source)[0] == '\0') {
+ return (-1);
+ }
+ break;
+
+ case PROP_TYPE_STRING:
+ default:
+ zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
+ "cannot get non-numeric property"));
+ return (zfs_error(zhp->zfs_hdl, EZFS_BADPROP,
+ dgettext(TEXT_DOMAIN, "internal error")));
+ }
}
return (0);
@@ -1510,22 +2267,22 @@ get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zfs_source_t *src,
* Calculate the source type, given the raw source string.
*/
static void
-get_source(zfs_handle_t *zhp, zfs_source_t *srctype, char *source,
+get_source(zfs_handle_t *zhp, zprop_source_t *srctype, char *source,
char *statbuf, size_t statlen)
{
- if (statbuf == NULL || *srctype == ZFS_SRC_TEMPORARY)
+ if (statbuf == NULL || *srctype == ZPROP_SRC_TEMPORARY)
return;
if (source == NULL) {
- *srctype = ZFS_SRC_NONE;
+ *srctype = ZPROP_SRC_NONE;
} else if (source[0] == '\0') {
- *srctype = ZFS_SRC_DEFAULT;
+ *srctype = ZPROP_SRC_DEFAULT;
} else {
if (strcmp(source, zhp->zfs_name) == 0) {
- *srctype = ZFS_SRC_LOCAL;
+ *srctype = ZPROP_SRC_LOCAL;
} else {
(void) strlcpy(statbuf, source, statlen);
- *srctype = ZFS_SRC_INHERITED;
+ *srctype = ZPROP_SRC_INHERITED;
}
}
@@ -1540,12 +2297,11 @@ get_source(zfs_handle_t *zhp, zfs_source_t *srctype, char *source,
*/
int
zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen,
- zfs_source_t *src, char *statbuf, size_t statlen, boolean_t literal)
+ zprop_source_t *src, char *statbuf, size_t statlen, boolean_t literal)
{
char *source = NULL;
uint64_t val;
char *str;
- const char *root;
const char *strval;
/*
@@ -1555,66 +2311,9 @@ zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen,
return (-1);
if (src)
- *src = ZFS_SRC_NONE;
+ *src = ZPROP_SRC_NONE;
switch (prop) {
- case ZFS_PROP_ATIME:
- case ZFS_PROP_READONLY:
- case ZFS_PROP_SETUID:
- case ZFS_PROP_ZONED:
- case ZFS_PROP_DEVICES:
- case ZFS_PROP_EXEC:
- case ZFS_PROP_CANMOUNT:
- case ZFS_PROP_XATTR:
- /*
- * Basic boolean values are built on top of
- * get_numeric_property().
- */
- if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
- return (-1);
- nicebool(val, propbuf, proplen);
-
- break;
-
- case ZFS_PROP_AVAILABLE:
- case ZFS_PROP_RECORDSIZE:
- case ZFS_PROP_CREATETXG:
- case ZFS_PROP_REFERENCED:
- case ZFS_PROP_USED:
- case ZFS_PROP_VOLSIZE:
- case ZFS_PROP_VOLBLOCKSIZE:
- case ZFS_PROP_NUMCLONES:
- /*
- * Basic numeric values are built on top of
- * get_numeric_property().
- */
- if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
- return (-1);
- if (literal)
- (void) snprintf(propbuf, proplen, "%llu",
- (u_longlong_t)val);
- else
- zfs_nicenum(val, propbuf, proplen);
- break;
-
- case ZFS_PROP_COMPRESSION:
- case ZFS_PROP_CHECKSUM:
- case ZFS_PROP_SNAPDIR:
-#ifdef ZFS_NO_ACL
- case ZFS_PROP_ACLMODE:
- case ZFS_PROP_ACLINHERIT:
- case ZFS_PROP_COPIES:
- val = getprop_uint64(zhp, prop, &source);
- verify(zfs_prop_index_to_string(prop, val, &strval) == 0);
- (void) strlcpy(propbuf, strval, proplen);
- break;
-#else /* ZFS_NO_ACL */
- case ZFS_PROP_ACLMODE:
- case ZFS_PROP_ACLINHERIT:
- (void) strlcpy(propbuf, "<unsupported>", proplen);
- break;
-#endif /* ZFS_NO_ACL */
-
case ZFS_PROP_CREATION:
/*
* 'creation' is a time_t stored in the statistics. We convert
@@ -1638,25 +2337,42 @@ zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen,
* Getting the precise mountpoint can be tricky.
*
* - for 'none' or 'legacy', return those values.
- * - for default mountpoints, construct it as /zfs/<dataset>
* - for inherited mountpoints, we want to take everything
* after our ancestor and append it to the inherited value.
*
* If the pool has an alternate root, we want to prepend that
* root to any values we return.
*/
- root = zhp->zfs_root;
+
str = getprop_string(zhp, prop, &source);
- if (str[0] == '\0') {
- (void) snprintf(propbuf, proplen, "%s/zfs/%s",
- root, zhp->zfs_name);
- } else if (str[0] == '/') {
+ if (str[0] == '/') {
+ char buf[MAXPATHLEN];
+ char *root = buf;
const char *relpath = zhp->zfs_name + strlen(source);
if (relpath[0] == '/')
relpath++;
- if (str[1] == '\0')
+
+ if ((zpool_get_prop(zhp->zpool_hdl,
+ ZPOOL_PROP_ALTROOT, buf, MAXPATHLEN, NULL)) ||
+ (strcmp(root, "-") == 0))
+ root[0] = '\0';
+ /*
+ * Special case an alternate root of '/'. This will
+ * avoid having multiple leading slashes in the
+ * mountpoint path.
+ */
+ if (strcmp(root, "/") == 0)
+ root++;
+
+ /*
+ * If the mountpoint is '/' then skip over this
+ * if we are obtaining either an alternate root or
+ * an inherited mountpoint.
+ */
+ if (str[1] == '\0' && (root[0] != '\0' ||
+ relpath[0] != '\0'))
str++;
if (relpath[0] == '\0')
@@ -1673,13 +2389,6 @@ zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen,
break;
- case ZFS_PROP_SHARENFS:
- case ZFS_PROP_SHAREISCSI:
- case ZFS_PROP_ISCSIOPTIONS:
- (void) strlcpy(propbuf, getprop_string(zhp, prop, &source),
- proplen);
- break;
-
case ZFS_PROP_ORIGIN:
(void) strlcpy(propbuf, getprop_string(zhp, prop, &source),
proplen);
@@ -1692,7 +2401,10 @@ zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen,
break;
case ZFS_PROP_QUOTA:
+ case ZFS_PROP_REFQUOTA:
case ZFS_PROP_RESERVATION:
+ case ZFS_PROP_REFRESERVATION:
+
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
@@ -1766,7 +2478,35 @@ zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen,
break;
default:
- abort();
+ switch (zfs_prop_get_type(prop)) {
+ case PROP_TYPE_NUMBER:
+ if (get_numeric_property(zhp, prop, src,
+ &source, &val) != 0)
+ return (-1);
+ if (literal)
+ (void) snprintf(propbuf, proplen, "%llu",
+ (u_longlong_t)val);
+ else
+ zfs_nicenum(val, propbuf, proplen);
+ break;
+
+ case PROP_TYPE_STRING:
+ (void) strlcpy(propbuf,
+ getprop_string(zhp, prop, &source), proplen);
+ break;
+
+ case PROP_TYPE_INDEX:
+ if (get_numeric_property(zhp, prop, src,
+ &source, &val) != 0)
+ return (-1);
+ if (zfs_prop_index_to_string(prop, val, &strval) != 0)
+ return (-1);
+ (void) strlcpy(propbuf, strval, proplen);
+ break;
+
+ default:
+ abort();
+ }
}
get_source(zhp, src, source, statbuf, statlen);
@@ -1783,33 +2523,42 @@ uint64_t
zfs_prop_get_int(zfs_handle_t *zhp, zfs_prop_t prop)
{
char *source;
- zfs_source_t sourcetype = ZFS_SRC_NONE;
uint64_t val;
- (void) get_numeric_property(zhp, prop, &sourcetype, &source, &val);
+ (void) get_numeric_property(zhp, prop, NULL, &source, &val);
return (val);
}
+int
+zfs_prop_set_int(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t val)
+{
+ char buf[64];
+
+ zfs_nicenum(val, buf, sizeof (buf));
+ return (zfs_prop_set(zhp, zfs_prop_to_name(prop), buf));
+}
+
/*
* Similar to zfs_prop_get(), but returns the value as an integer.
*/
int
zfs_prop_get_numeric(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t *value,
- zfs_source_t *src, char *statbuf, size_t statlen)
+ zprop_source_t *src, char *statbuf, size_t statlen)
{
char *source;
/*
* Check to see if this property applies to our object
*/
- if (!zfs_prop_valid_for_type(prop, zhp->zfs_type))
+ if (!zfs_prop_valid_for_type(prop, zhp->zfs_type)) {
return (zfs_error_fmt(zhp->zfs_hdl, EZFS_PROPTYPE,
dgettext(TEXT_DOMAIN, "cannot get property '%s'"),
zfs_prop_to_name(prop)));
+ }
if (src)
- *src = ZFS_SRC_NONE;
+ *src = ZPROP_SRC_NONE;
if (get_numeric_property(zhp, prop, src, &source, value) != 0)
return (-1);
@@ -1847,6 +2596,9 @@ zfs_iter_filesystems(zfs_handle_t *zhp, zfs_iter_f func, void *data)
zfs_handle_t *nzhp;
int ret;
+ if (zhp->zfs_type != ZFS_TYPE_FILESYSTEM)
+ return (0);
+
for ((void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_DATASET_LIST_NEXT, &zc) == 0;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name))) {
@@ -1890,6 +2642,9 @@ zfs_iter_snapshots(zfs_handle_t *zhp, zfs_iter_f func, void *data)
zfs_handle_t *nzhp;
int ret;
+ if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT)
+ return (0);
+
for ((void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_SNAPSHOT_LIST_NEXT,
&zc) == 0;
@@ -1948,12 +2703,16 @@ parent_name(const char *path, char *buf, size_t buflen)
}
/*
- * Checks to make sure that the given path has a parent, and that it exists. We
- * also fetch the 'zoned' property, which is used to validate property settings
- * when creating new datasets.
+ * If accept_ancestor is false, then check to make sure that the given path has
+ * a parent, and that it exists. If accept_ancestor is true, then find the
+ * closest existing ancestor for the given path. In prefixlen return the
+ * length of already existing prefix of the given path. We also fetch the
+ * 'zoned' property, which is used to validate property settings when creating
+ * new datasets.
*/
static int
-check_parents(libzfs_handle_t *hdl, const char *path, uint64_t *zoned)
+check_parents(libzfs_handle_t *hdl, const char *path, uint64_t *zoned,
+ boolean_t accept_ancestor, int *prefixlen)
{
zfs_cmd_t zc = { 0 };
char parent[ZFS_MAXNAMELEN];
@@ -1984,16 +2743,22 @@ check_parents(libzfs_handle_t *hdl, const char *path, uint64_t *zoned)
}
/* check to see if the parent dataset exists */
- if ((zhp = make_dataset_handle(hdl, parent)) == NULL) {
- switch (errno) {
- case ENOENT:
+ while ((zhp = make_dataset_handle(hdl, parent)) == NULL) {
+ if (errno == ENOENT && accept_ancestor) {
+ /*
+ * Go deeper to find an ancestor, give up on top level.
+ */
+ if (parent_name(parent, parent, sizeof (parent)) != 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "no such pool '%s'"), zc.zc_name);
+ return (zfs_error(hdl, EZFS_NOENT, errbuf));
+ }
+ } else if (errno == ENOENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent does not exist"));
return (zfs_error(hdl, EZFS_NOENT, errbuf));
-
- default:
+ } else
return (zfs_standard_error(hdl, errno, errbuf));
- }
}
*zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
@@ -2014,6 +2779,136 @@ check_parents(libzfs_handle_t *hdl, const char *path, uint64_t *zoned)
}
zfs_close(zhp);
+ if (prefixlen != NULL)
+ *prefixlen = strlen(parent);
+ return (0);
+}
+
+/*
+ * Finds whether the dataset of the given type(s) exists.
+ */
+boolean_t
+zfs_dataset_exists(libzfs_handle_t *hdl, const char *path, zfs_type_t types)
+{
+ zfs_handle_t *zhp;
+
+ if (!zfs_validate_name(hdl, path, types, B_FALSE))
+ return (B_FALSE);
+
+ /*
+ * Try to get stats for the dataset, which will tell us if it exists.
+ */
+ if ((zhp = make_dataset_handle(hdl, path)) != NULL) {
+ int ds_type = zhp->zfs_type;
+
+ zfs_close(zhp);
+ if (types & ds_type)
+ return (B_TRUE);
+ }
+ return (B_FALSE);
+}
+
+/*
+ * Given a path to 'target', create all the ancestors between
+ * the prefixlen portion of the path, and the target itself.
+ * Fail if the initial prefixlen-ancestor does not already exist.
+ */
+int
+create_parents(libzfs_handle_t *hdl, char *target, int prefixlen)
+{
+ zfs_handle_t *h;
+ char *cp;
+ const char *opname;
+
+ /* make sure prefix exists */
+ cp = target + prefixlen;
+ if (*cp != '/') {
+ assert(strchr(cp, '/') == NULL);
+ h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
+ } else {
+ *cp = '\0';
+ h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
+ *cp = '/';
+ }
+ if (h == NULL)
+ return (-1);
+ zfs_close(h);
+
+ /*
+ * Attempt to create, mount, and share any ancestor filesystems,
+ * up to the prefixlen-long one.
+ */
+ for (cp = target + prefixlen + 1;
+ cp = strchr(cp, '/'); *cp = '/', cp++) {
+ char *logstr;
+
+ *cp = '\0';
+
+ h = make_dataset_handle(hdl, target);
+ if (h) {
+ /* it already exists, nothing to do here */
+ zfs_close(h);
+ continue;
+ }
+
+ logstr = hdl->libzfs_log_str;
+ hdl->libzfs_log_str = NULL;
+ if (zfs_create(hdl, target, ZFS_TYPE_FILESYSTEM,
+ NULL) != 0) {
+ hdl->libzfs_log_str = logstr;
+ opname = dgettext(TEXT_DOMAIN, "create");
+ goto ancestorerr;
+ }
+
+ hdl->libzfs_log_str = logstr;
+ h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
+ if (h == NULL) {
+ opname = dgettext(TEXT_DOMAIN, "open");
+ goto ancestorerr;
+ }
+
+ if (zfs_mount(h, NULL, 0) != 0) {
+ opname = dgettext(TEXT_DOMAIN, "mount");
+ goto ancestorerr;
+ }
+
+ if (zfs_share(h) != 0) {
+ opname = dgettext(TEXT_DOMAIN, "share");
+ goto ancestorerr;
+ }
+
+ zfs_close(h);
+ }
+
+ return (0);
+
+ancestorerr:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "failed to %s ancestor '%s'"), opname, target);
+ return (-1);
+}
+
+/*
+ * Creates non-existing ancestors of the given path.
+ */
+int
+zfs_create_ancestors(libzfs_handle_t *hdl, const char *path)
+{
+ int prefix;
+ uint64_t zoned;
+ char *path_copy;
+ int rc;
+
+ if (check_parents(hdl, path, &zoned, B_TRUE, &prefix) != 0)
+ return (-1);
+
+ if ((path_copy = strdup(path)) != NULL) {
+ rc = create_parents(hdl, path_copy, prefix);
+ free(path_copy);
+ }
+ if (path_copy == NULL || rc != 0)
+ return (-1);
+
return (0);
}
@@ -2035,11 +2930,11 @@ zfs_create(libzfs_handle_t *hdl, const char *path, zfs_type_t type,
"cannot create '%s'"), path);
/* validate the path, taking care to note the extended error message */
- if (!zfs_validate_name(hdl, path, type))
+ if (!zfs_validate_name(hdl, path, type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/* validate parents exist */
- if (check_parents(hdl, path, &zoned) != 0)
+ if (check_parents(hdl, path, &zoned, B_FALSE, NULL) != 0)
return (-1);
/*
@@ -2050,7 +2945,7 @@ zfs_create(libzfs_handle_t *hdl, const char *path, zfs_type_t type,
* first try to see if the dataset exists.
*/
(void) strlcpy(zc.zc_name, path, sizeof (zc.zc_name));
- if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) == 0) {
+ if (zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset already exists"));
return (zfs_error(hdl, EZFS_EXISTS, errbuf));
@@ -2061,7 +2956,7 @@ zfs_create(libzfs_handle_t *hdl, const char *path, zfs_type_t type,
else
zc.zc_objset_type = DMU_OST_ZFS;
- if (props && (props = zfs_validate_properties(hdl, type, NULL, props,
+ if (props && (props = zfs_valid_proplist(hdl, type, props,
zoned, NULL, errbuf)) == 0)
return (-1);
@@ -2111,13 +3006,12 @@ zfs_create(libzfs_handle_t *hdl, const char *path, zfs_type_t type,
}
}
- if (props &&
- zcmd_write_src_nvlist(hdl, &zc, props, NULL) != 0)
+ if (props && zcmd_write_src_nvlist(hdl, &zc, props) != 0)
return (-1);
nvlist_free(props);
/* create the dataset */
- ret = ioctl(hdl->libzfs_fd, ZFS_IOC_CREATE, &zc);
+ ret = zfs_ioctl(hdl, ZFS_IOC_CREATE, &zc);
if (ret == 0 && type == ZFS_TYPE_VOLUME) {
ret = zvol_create_link(hdl, path);
@@ -2158,6 +3052,11 @@ zfs_create(libzfs_handle_t *hdl, const char *path, zfs_type_t type,
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
+ case ENOTSUP:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "pool must be upgraded to set this "
+ "property or value"));
+ return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
#ifdef _ILP32
case EOVERFLOW:
/*
@@ -2189,10 +3088,13 @@ zfs_destroy(zfs_handle_t *zhp)
if (ZFS_IS_VOLUME(zhp)) {
/*
- * Unconditionally unshare this zvol ignoring failure as it
- * indicates only that the volume wasn't shared initially.
+ * If user doesn't have permissions to unshare volume, then
+ * abort the request. This would only happen for a
+ * non-privileged user.
*/
- (void) zfs_unshare_iscsi(zhp);
+ if (zfs_unshare_iscsi(zhp) != 0) {
+ return (-1);
+ }
if (zvol_remove_link(zhp->zfs_hdl, zhp->zfs_name) != 0)
return (-1);
@@ -2202,7 +3104,7 @@ zfs_destroy(zfs_handle_t *zhp)
zc.zc_objset_type = DMU_OST_ZFS;
}
- if (ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_DESTROY, &zc) != 0) {
+ if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_DESTROY, &zc) != 0) {
return (zfs_standard_error_fmt(zhp->zfs_hdl, errno,
dgettext(TEXT_DOMAIN, "cannot destroy '%s'"),
zhp->zfs_name));
@@ -2276,7 +3178,7 @@ zfs_destroy_snaps(zfs_handle_t *zhp, char *snapname)
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
- ret = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_DESTROY_SNAPS, &zc);
+ ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_DESTROY_SNAPS, &zc);
if (ret != 0) {
char errbuf[1024];
@@ -2318,11 +3220,11 @@ zfs_clone(zfs_handle_t *zhp, const char *target, nvlist_t *props)
"cannot create '%s'"), target);
/* validate the target name */
- if (!zfs_validate_name(hdl, target, ZFS_TYPE_FILESYSTEM))
+ if (!zfs_validate_name(hdl, target, ZFS_TYPE_FILESYSTEM, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/* validate parents exist */
- if (check_parents(hdl, target, &zoned) != 0)
+ if (check_parents(hdl, target, &zoned, B_FALSE, NULL) != 0)
return (-1);
(void) parent_name(target, parent, sizeof (parent));
@@ -2337,11 +3239,11 @@ zfs_clone(zfs_handle_t *zhp, const char *target, nvlist_t *props)
}
if (props) {
- if ((props = zfs_validate_properties(hdl, type, NULL, props,
- zoned, zhp, errbuf)) == NULL)
+ if ((props = zfs_valid_proplist(hdl, type, props, zoned,
+ zhp, errbuf)) == NULL)
return (-1);
- if (zcmd_write_src_nvlist(hdl, &zc, props, NULL) != 0) {
+ if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
nvlist_free(props);
return (-1);
}
@@ -2351,7 +3253,7 @@ zfs_clone(zfs_handle_t *zhp, const char *target, nvlist_t *props)
(void) strlcpy(zc.zc_name, target, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, zhp->zfs_name, sizeof (zc.zc_value));
- ret = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_CREATE, &zc);
+ ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_CREATE, &zc);
zcmd_free_nvlists(&zc);
@@ -2470,7 +3372,7 @@ zfs_promote(zfs_handle_t *zhp)
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
- (void) strlcpy(parent, zhp->zfs_dmustats.dds_clone_of, sizeof (parent));
+ (void) strlcpy(parent, zhp->zfs_dmustats.dds_origin, sizeof (parent));
if (parent[0] == '\0') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not a cloned filesystem"));
@@ -2480,14 +3382,14 @@ zfs_promote(zfs_handle_t *zhp)
*cp = '\0';
/* Walk the snapshots we will be moving */
- pzhp = zfs_open(hdl, zhp->zfs_dmustats.dds_clone_of, ZFS_TYPE_SNAPSHOT);
+ pzhp = zfs_open(hdl, zhp->zfs_dmustats.dds_origin, ZFS_TYPE_SNAPSHOT);
if (pzhp == NULL)
return (-1);
pd.cb_pivot_txg = zfs_prop_get_int(pzhp, ZFS_PROP_CREATETXG);
zfs_close(pzhp);
pd.cb_target = zhp->zfs_name;
pd.cb_errbuf = errbuf;
- pzhp = zfs_open(hdl, parent, ZFS_TYPE_ANY);
+ pzhp = zfs_open(hdl, parent, ZFS_TYPE_DATASET);
if (pzhp == NULL)
return (-1);
(void) zfs_prop_get(pzhp, ZFS_PROP_MOUNTPOINT, pd.cb_mountpoint,
@@ -2499,10 +3401,10 @@ zfs_promote(zfs_handle_t *zhp)
}
/* issue the ioctl */
- (void) strlcpy(zc.zc_value, zhp->zfs_dmustats.dds_clone_of,
+ (void) strlcpy(zc.zc_value, zhp->zfs_dmustats.dds_origin,
sizeof (zc.zc_value));
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
- ret = ioctl(hdl->libzfs_fd, ZFS_IOC_PROMOTE, &zc);
+ ret = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc);
if (ret != 0) {
int save_errno = errno;
@@ -2570,10 +3472,11 @@ zfs_create_link_cb(zfs_handle_t *zhp, void *arg)
* Takes a snapshot of the given dataset.
*/
int
-zfs_snapshot(libzfs_handle_t *hdl, const char *path, boolean_t recursive)
+zfs_snapshot(libzfs_handle_t *hdl, const char *path, boolean_t recursive,
+ nvlist_t *props)
{
const char *delim;
- char *parent;
+ char parent[ZFS_MAXNAMELEN];
zfs_handle_t *zhp;
zfs_cmd_t zc = { 0 };
int ret;
@@ -2583,33 +3486,52 @@ zfs_snapshot(libzfs_handle_t *hdl, const char *path, boolean_t recursive)
"cannot snapshot '%s'"), path);
/* validate the target name */
- if (!zfs_validate_name(hdl, path, ZFS_TYPE_SNAPSHOT))
+ if (!zfs_validate_name(hdl, path, ZFS_TYPE_SNAPSHOT, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+ if (props) {
+ if ((props = zfs_valid_proplist(hdl, ZFS_TYPE_SNAPSHOT,
+ props, B_FALSE, NULL, errbuf)) == NULL)
+ return (-1);
+
+ if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
+ nvlist_free(props);
+ return (-1);
+ }
+
+ nvlist_free(props);
+ }
+
/* make sure the parent exists and is of the appropriate type */
delim = strchr(path, '@');
- if ((parent = zfs_alloc(hdl, delim - path + 1)) == NULL)
- return (-1);
(void) strncpy(parent, path, delim - path);
parent[delim - path] = '\0';
if ((zhp = zfs_open(hdl, parent, ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) == NULL) {
- free(parent);
+ zcmd_free_nvlists(&zc);
return (-1);
}
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, delim+1, sizeof (zc.zc_value));
+ if (ZFS_IS_VOLUME(zhp))
+ zc.zc_objset_type = DMU_OST_ZVOL;
+ else
+ zc.zc_objset_type = DMU_OST_ZFS;
zc.zc_cookie = recursive;
- ret = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_SNAPSHOT, &zc);
+ ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SNAPSHOT, &zc);
+
+ zcmd_free_nvlists(&zc);
/*
* if it was recursive, the one that actually failed will be in
* zc.zc_name.
*/
- (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
- "cannot create snapshot '%s@%s'"), zc.zc_name, zc.zc_value);
+ if (ret != 0)
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot create snapshot '%s@%s'"), zc.zc_name, zc.zc_value);
+
if (ret == 0 && recursive) {
struct createdata cd;
@@ -2620,408 +3542,24 @@ zfs_snapshot(libzfs_handle_t *hdl, const char *path, boolean_t recursive)
if (ret == 0 && zhp->zfs_type == ZFS_TYPE_VOLUME) {
ret = zvol_create_link(zhp->zfs_hdl, path);
if (ret != 0) {
- (void) ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_DESTROY,
- &zc);
+ (void) zfs_standard_error(hdl, errno,
+ dgettext(TEXT_DOMAIN,
+ "Volume successfully snapshotted, but device links "
+ "were not created"));
+ zfs_close(zhp);
+ return (-1);
}
}
if (ret != 0)
(void) zfs_standard_error(hdl, errno, errbuf);
- free(parent);
zfs_close(zhp);
return (ret);
}
/*
- * Dumps a backup of the given snapshot (incremental from fromsnap if it's not
- * NULL) to the file descriptor specified by outfd.
- */
-int
-zfs_send(zfs_handle_t *zhp, const char *fromsnap, int outfd)
-{
- zfs_cmd_t zc = { 0 };
- char errbuf[1024];
- libzfs_handle_t *hdl = zhp->zfs_hdl;
-
- assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
-
- (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
- if (fromsnap)
- (void) strlcpy(zc.zc_value, fromsnap, sizeof (zc.zc_name));
- zc.zc_cookie = outfd;
-
- if (ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_SENDBACKUP, &zc) != 0) {
- (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
- "cannot send '%s'"), zhp->zfs_name);
-
- switch (errno) {
-
- case EXDEV:
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "not an earlier snapshot from the same fs"));
- return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
-
- case EDQUOT:
- case EFBIG:
- case EIO:
- case ENOLINK:
- case ENOSPC:
- case ENXIO:
- case EPIPE:
- case ERANGE:
- case EFAULT:
- case EROFS:
- zfs_error_aux(hdl, strerror(errno));
- return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
-
- default:
- return (zfs_standard_error(hdl, errno, errbuf));
- }
- }
-
- return (0);
-}
-
-/*
- * Create ancestors of 'target', but not target itself, and not
- * ancestors whose names are shorter than prefixlen. Die if
- * prefixlen-ancestor does not exist.
- */
-static int
-create_parents(libzfs_handle_t *hdl, char *target, int prefixlen)
-{
- zfs_handle_t *h;
- char *cp;
-
- /* make sure prefix exists */
- cp = strchr(target + prefixlen, '/');
- *cp = '\0';
- h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
- *cp = '/';
- if (h == NULL)
- return (-1);
- zfs_close(h);
-
- /*
- * Attempt to create, mount, and share any ancestor filesystems,
- * up to the prefixlen-long one.
- */
- for (cp = target + prefixlen + 1;
- cp = strchr(cp, '/'); *cp = '/', cp++) {
- const char *opname;
-
- *cp = '\0';
-
- h = make_dataset_handle(hdl, target);
- if (h) {
- /* it already exists, nothing to do here */
- zfs_close(h);
- continue;
- }
-
- opname = dgettext(TEXT_DOMAIN, "create");
- if (zfs_create(hdl, target, ZFS_TYPE_FILESYSTEM,
- NULL) != 0)
- goto ancestorerr;
-
- opname = dgettext(TEXT_DOMAIN, "open");
- h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
- if (h == NULL)
- goto ancestorerr;
-
- opname = dgettext(TEXT_DOMAIN, "mount");
- if (zfs_mount(h, NULL, 0) != 0)
- goto ancestorerr;
-
- opname = dgettext(TEXT_DOMAIN, "share");
- if (zfs_share(h) != 0)
- goto ancestorerr;
-
- zfs_close(h);
-
- continue;
-ancestorerr:
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "failed to %s ancestor '%s'"), opname, target);
- return (-1);
- }
-
- return (0);
-}
-
-/*
- * Restores a backup of tosnap from the file descriptor specified by infd.
- */
-int
-zfs_receive(libzfs_handle_t *hdl, const char *tosnap, int isprefix,
- int verbose, int dryrun, boolean_t force, int infd)
-{
- zfs_cmd_t zc = { 0 };
- time_t begin_time;
- int ioctl_err, err, bytes, size, choplen;
- char *cp;
- dmu_replay_record_t drr;
- struct drr_begin *drrb = &zc.zc_begin_record;
- char errbuf[1024];
- prop_changelist_t *clp;
- char chopprefix[ZFS_MAXNAMELEN];
-
- begin_time = time(NULL);
-
- (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
- "cannot receive"));
-
- /* read in the BEGIN record */
- cp = (char *)&drr;
- bytes = 0;
- do {
- size = read(infd, cp, sizeof (drr) - bytes);
- cp += size;
- bytes += size;
- } while (size > 0);
-
- if (size < 0 || bytes != sizeof (drr)) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
- "stream (failed to read first record)"));
- return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
- }
-
- zc.zc_begin_record = drr.drr_u.drr_begin;
-
- if (drrb->drr_magic != DMU_BACKUP_MAGIC &&
- drrb->drr_magic != BSWAP_64(DMU_BACKUP_MAGIC)) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
- "stream (bad magic number)"));
- return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
- }
-
- if (drrb->drr_version != DMU_BACKUP_VERSION &&
- drrb->drr_version != BSWAP_64(DMU_BACKUP_VERSION)) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only version "
- "0x%llx is supported (stream is version 0x%llx)"),
- DMU_BACKUP_VERSION, drrb->drr_version);
- return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
- }
-
- if (strchr(drr.drr_u.drr_begin.drr_toname, '@') == NULL) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
- "stream (bad snapshot name)"));
- return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
- }
- /*
- * Determine how much of the snapshot name stored in the stream
- * we are going to tack on to the name they specified on the
- * command line, and how much we are going to chop off.
- *
- * If they specified a snapshot, chop the entire name stored in
- * the stream.
- */
- (void) strcpy(chopprefix, drr.drr_u.drr_begin.drr_toname);
- if (isprefix) {
- /*
- * They specified a fs with -d, we want to tack on
- * everything but the pool name stored in the stream
- */
- if (strchr(tosnap, '@')) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
- "argument - snapshot not allowed with -d"));
- return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
- }
- cp = strchr(chopprefix, '/');
- if (cp == NULL)
- cp = strchr(chopprefix, '@');
- *cp = '\0';
- } else if (strchr(tosnap, '@') == NULL) {
- /*
- * If they specified a filesystem without -d, we want to
- * tack on everything after the fs specified in the
- * first name from the stream.
- */
- cp = strchr(chopprefix, '@');
- *cp = '\0';
- }
- choplen = strlen(chopprefix);
-
- /*
- * Determine name of destination snapshot, store in zc_value.
- */
- (void) strcpy(zc.zc_value, tosnap);
- (void) strncat(zc.zc_value, drr.drr_u.drr_begin.drr_toname+choplen,
- sizeof (zc.zc_value));
- if (!zfs_validate_name(hdl, zc.zc_value, ZFS_TYPE_SNAPSHOT))
- return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
-
- (void) strcpy(zc.zc_name, zc.zc_value);
- if (drrb->drr_fromguid) {
- /* incremental backup stream */
- zfs_handle_t *h;
-
- /* do the recvbackup ioctl to the containing fs */
- *strchr(zc.zc_name, '@') = '\0';
-
- /* make sure destination fs exists */
- h = zfs_open(hdl, zc.zc_name,
- ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
- if (h == NULL)
- return (-1);
- if (!dryrun) {
- /*
- * We need to unmount all the dependents of the dataset
- * and the dataset itself. If it's a volume
- * then remove device link.
- */
- if (h->zfs_type == ZFS_TYPE_FILESYSTEM) {
- clp = changelist_gather(h, ZFS_PROP_NAME, 0);
- if (clp == NULL)
- return (-1);
- if (changelist_prefix(clp) != 0) {
- changelist_free(clp);
- return (-1);
- }
- } else {
- (void) zvol_remove_link(hdl, h->zfs_name);
- }
- }
- zfs_close(h);
- } else {
- /* full backup stream */
-
- /* Make sure destination fs does not exist */
- *strchr(zc.zc_name, '@') = '\0';
- if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) == 0) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "destination '%s' exists"), zc.zc_name);
- return (zfs_error(hdl, EZFS_EXISTS, errbuf));
- }
-
- if (strchr(zc.zc_name, '/') == NULL) {
- /*
- * they're trying to do a recv into a
- * nonexistant topmost filesystem.
- */
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "destination does not exist"), zc.zc_name);
- return (zfs_error(hdl, EZFS_EXISTS, errbuf));
- }
-
- /* Do the recvbackup ioctl to the fs's parent. */
- *strrchr(zc.zc_name, '/') = '\0';
-
- if (isprefix && (err = create_parents(hdl,
- zc.zc_value, strlen(tosnap))) != 0) {
- return (zfs_error(hdl, EZFS_BADRESTORE, errbuf));
- }
-
- }
-
- zc.zc_cookie = infd;
- zc.zc_guid = force;
- if (verbose) {
- (void) printf("%s %s stream of %s into %s\n",
- dryrun ? "would receive" : "receiving",
- drrb->drr_fromguid ? "incremental" : "full",
- drr.drr_u.drr_begin.drr_toname,
- zc.zc_value);
- (void) fflush(stdout);
- }
- if (dryrun)
- return (0);
- err = ioctl_err = ioctl(hdl->libzfs_fd, ZFS_IOC_RECVBACKUP, &zc);
- if (ioctl_err != 0) {
- switch (errno) {
- case ENODEV:
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "most recent snapshot does not match incremental "
- "source"));
- (void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
- break;
- case ETXTBSY:
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "destination has been modified since most recent "
- "snapshot"));
- (void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
- break;
- case EEXIST:
- if (drrb->drr_fromguid == 0) {
- /* it's the containing fs that exists */
- cp = strchr(zc.zc_value, '@');
- *cp = '\0';
- }
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "destination already exists"));
- (void) zfs_error_fmt(hdl, EZFS_EXISTS,
- dgettext(TEXT_DOMAIN, "cannot restore to %s"),
- zc.zc_value);
- break;
- case EINVAL:
- (void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
- break;
- case ECKSUM:
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "invalid stream (checksum mismatch)"));
- (void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
- break;
- default:
- (void) zfs_standard_error(hdl, errno, errbuf);
- }
- }
-
- /*
- * Mount or recreate the /dev links for the target filesystem
- * (if created, or if we tore them down to do an incremental
- * restore), and the /dev links for the new snapshot (if
- * created). Also mount any children of the target filesystem
- * if we did an incremental receive.
- */
- cp = strchr(zc.zc_value, '@');
- if (cp && (ioctl_err == 0 || drrb->drr_fromguid)) {
- zfs_handle_t *h;
-
- *cp = '\0';
- h = zfs_open(hdl, zc.zc_value,
- ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
- *cp = '@';
- if (h) {
- if (h->zfs_type == ZFS_TYPE_VOLUME) {
- err = zvol_create_link(hdl, h->zfs_name);
- if (err == 0 && ioctl_err == 0)
- err = zvol_create_link(hdl,
- zc.zc_value);
- } else {
- if (drrb->drr_fromguid) {
- err = changelist_postfix(clp);
- changelist_free(clp);
- } else {
- err = zfs_mount(h, NULL, 0);
- }
- }
- zfs_close(h);
- }
- }
-
- if (err || ioctl_err)
- return (-1);
-
- if (verbose) {
- char buf1[64];
- char buf2[64];
- uint64_t bytes = zc.zc_cookie;
- time_t delta = time(NULL) - begin_time;
- if (delta == 0)
- delta = 1;
- zfs_nicenum(bytes, buf1, sizeof (buf1));
- zfs_nicenum(bytes/delta, buf2, sizeof (buf1));
-
- (void) printf("received %sb stream in %lu seconds (%sb/sec)\n",
- buf1, delta, buf2);
- }
-
- return (0);
-}
-
-/*
* Destroy any more recent snapshots. We invoke this callback on any dependents
* of the snapshot first. If the 'cb_dependent' member is non-zero, then this
* is a dependent and we should just destroy it without checking the transaction
@@ -3030,9 +3568,9 @@ zfs_receive(libzfs_handle_t *hdl, const char *tosnap, int isprefix,
typedef struct rollback_data {
const char *cb_target; /* the snapshot */
uint64_t cb_create; /* creation time reference */
- prop_changelist_t *cb_clp; /* changelist pointer */
- int cb_error;
+ boolean_t cb_error;
boolean_t cb_dependent;
+ boolean_t cb_force;
} rollback_data_t;
static int
@@ -3045,23 +3583,35 @@ rollback_destroy(zfs_handle_t *zhp, void *data)
zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT &&
zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) >
cbp->cb_create) {
+ char *logstr;
cbp->cb_dependent = B_TRUE;
- if (zfs_iter_dependents(zhp, B_FALSE, rollback_destroy,
- cbp) != 0)
- cbp->cb_error = 1;
+ cbp->cb_error |= zfs_iter_dependents(zhp, B_FALSE,
+ rollback_destroy, cbp);
cbp->cb_dependent = B_FALSE;
- if (zfs_destroy(zhp) != 0)
- cbp->cb_error = 1;
- else
- changelist_remove(zhp, cbp->cb_clp);
+ logstr = zhp->zfs_hdl->libzfs_log_str;
+ zhp->zfs_hdl->libzfs_log_str = NULL;
+ cbp->cb_error |= zfs_destroy(zhp);
+ zhp->zfs_hdl->libzfs_log_str = logstr;
}
} else {
+ /* We must destroy this clone; first unmount it */
+ prop_changelist_t *clp;
+
+ clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
+ cbp->cb_force ? MS_FORCE: 0);
+ if (clp == NULL || changelist_prefix(clp) != 0) {
+ cbp->cb_error = B_TRUE;
+ zfs_close(zhp);
+ return (0);
+ }
if (zfs_destroy(zhp) != 0)
- cbp->cb_error = 1;
+ cbp->cb_error = B_TRUE;
else
- changelist_remove(zhp, cbp->cb_clp);
+ changelist_remove(clp, zhp->zfs_name);
+ (void) changelist_postfix(clp);
+ changelist_free(clp);
}
zfs_close(zhp);
@@ -3069,48 +3619,6 @@ rollback_destroy(zfs_handle_t *zhp, void *data)
}
/*
- * Rollback the dataset to its latest snapshot.
- */
-static int
-do_rollback(zfs_handle_t *zhp)
-{
- int ret;
- zfs_cmd_t zc = { 0 };
-
- assert(zhp->zfs_type == ZFS_TYPE_FILESYSTEM ||
- zhp->zfs_type == ZFS_TYPE_VOLUME);
-
- if (zhp->zfs_type == ZFS_TYPE_VOLUME &&
- zvol_remove_link(zhp->zfs_hdl, zhp->zfs_name) != 0)
- return (-1);
-
- (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
-
- if (ZFS_IS_VOLUME(zhp))
- zc.zc_objset_type = DMU_OST_ZVOL;
- else
- zc.zc_objset_type = DMU_OST_ZFS;
-
- /*
- * We rely on the consumer to verify that there are no newer snapshots
- * for the given dataset. Given these constraints, we can simply pass
- * the name on to the ioctl() call. There is still an unlikely race
- * condition where the user has taken a snapshot since we verified that
- * this was the most recent.
- */
- if ((ret = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_ROLLBACK,
- &zc)) != 0) {
- (void) zfs_standard_error_fmt(zhp->zfs_hdl, errno,
- dgettext(TEXT_DOMAIN, "cannot rollback '%s'"),
- zhp->zfs_name);
- } else if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
- ret = zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
- }
-
- return (ret);
-}
-
-/*
* Given a dataset, rollback to a specific snapshot, discarding any
* data changes since then and making it the active dataset.
*
@@ -3118,56 +3626,87 @@ do_rollback(zfs_handle_t *zhp)
* their dependents.
*/
int
-zfs_rollback(zfs_handle_t *zhp, zfs_handle_t *snap, int flag)
+zfs_rollback(zfs_handle_t *zhp, zfs_handle_t *snap, boolean_t force)
{
- int ret;
rollback_data_t cb = { 0 };
- prop_changelist_t *clp;
-
- /*
- * Unmount all dependendents of the dataset and the dataset itself.
- * The list we need to gather is the same as for doing rename
- */
- clp = changelist_gather(zhp, ZFS_PROP_NAME, flag ? MS_FORCE: 0);
- if (clp == NULL)
- return (-1);
+ int err;
+ zfs_cmd_t zc = { 0 };
+ boolean_t restore_resv = 0;
+ uint64_t old_volsize, new_volsize;
+ zfs_prop_t resv_prop;
- if ((ret = changelist_prefix(clp)) != 0)
- goto out;
+ assert(zhp->zfs_type == ZFS_TYPE_FILESYSTEM ||
+ zhp->zfs_type == ZFS_TYPE_VOLUME);
/*
* Destroy all recent snapshots and its dependends.
*/
+ cb.cb_force = force;
cb.cb_target = snap->zfs_name;
cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG);
- cb.cb_clp = clp;
(void) zfs_iter_children(zhp, rollback_destroy, &cb);
- if ((ret = cb.cb_error) != 0) {
- (void) changelist_postfix(clp);
- goto out;
- }
+ if (cb.cb_error)
+ return (-1);
/*
* Now that we have verified that the snapshot is the latest,
* rollback to the given snapshot.
*/
- ret = do_rollback(zhp);
- if (ret != 0) {
- (void) changelist_postfix(clp);
- goto out;
+ if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
+ if (zvol_remove_link(zhp->zfs_hdl, zhp->zfs_name) != 0)
+ return (-1);
+ if (zfs_which_resv_prop(zhp, &resv_prop) < 0)
+ return (-1);
+ old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
+ restore_resv =
+ (old_volsize == zfs_prop_get_int(zhp, resv_prop));
}
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+ if (ZFS_IS_VOLUME(zhp))
+ zc.zc_objset_type = DMU_OST_ZVOL;
+ else
+ zc.zc_objset_type = DMU_OST_ZFS;
+
/*
- * We only want to re-mount the filesystem if it was mounted in the
- * first place.
+ * We rely on zfs_iter_children() to verify that there are no
+ * newer snapshots for the given dataset. Therefore, we can
+ * simply pass the name on to the ioctl() call. There is still
+ * an unlikely race condition where the user has taken a
+ * snapshot since we verified that this was the most recent.
+ *
*/
- ret = changelist_postfix(clp);
+ if ((err = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_ROLLBACK, &zc)) != 0) {
+ (void) zfs_standard_error_fmt(zhp->zfs_hdl, errno,
+ dgettext(TEXT_DOMAIN, "cannot rollback '%s'"),
+ zhp->zfs_name);
+ return (err);
+ }
-out:
- changelist_free(clp);
- return (ret);
+ /*
+ * For volumes, if the pre-rollback volsize matched the pre-
+ * rollback reservation and the volsize has changed then set
+ * the reservation property to the post-rollback volsize.
+ * Make a new handle since the rollback closed the dataset.
+ */
+ if ((zhp->zfs_type == ZFS_TYPE_VOLUME) &&
+ (zhp = make_dataset_handle(zhp->zfs_hdl, zhp->zfs_name))) {
+ if (err = zvol_create_link(zhp->zfs_hdl, zhp->zfs_name)) {
+ zfs_close(zhp);
+ return (err);
+ }
+ if (restore_resv) {
+ new_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
+ if (old_volsize != new_volsize)
+ err = zfs_prop_set_int(zhp, resv_prop,
+ new_volsize);
+ }
+ zfs_close(zhp);
+ }
+ return (err);
}
/*
@@ -3210,7 +3749,7 @@ zfs_iter_dependents(zfs_handle_t *zhp, boolean_t allowrecursion,
* Renames the given dataset.
*/
int
-zfs_rename(zfs_handle_t *zhp, const char *target, int recursive)
+zfs_rename(zfs_handle_t *zhp, const char *target, boolean_t recursive)
{
int ret;
zfs_cmd_t zc = { 0 };
@@ -3262,7 +3801,7 @@ zfs_rename(zfs_handle_t *zhp, const char *target, int recursive)
errbuf));
}
}
- if (!zfs_validate_name(hdl, target, zhp->zfs_type))
+ if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
} else {
if (recursive) {
@@ -3271,12 +3810,12 @@ zfs_rename(zfs_handle_t *zhp, const char *target, int recursive)
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
- if (!zfs_validate_name(hdl, target, zhp->zfs_type))
+ if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
uint64_t unused;
/* validate parents */
- if (check_parents(hdl, target, &unused) != 0)
+ if (check_parents(hdl, target, &unused, B_FALSE, NULL) != 0)
return (-1);
(void) parent_name(target, parent, sizeof (parent));
@@ -3313,17 +3852,22 @@ zfs_rename(zfs_handle_t *zhp, const char *target, int recursive)
if (recursive) {
struct destroydata dd;
- parentname = strdup(zhp->zfs_name);
+ parentname = zfs_strdup(zhp->zfs_hdl, zhp->zfs_name);
+ if (parentname == NULL) {
+ ret = -1;
+ goto error;
+ }
delim = strchr(parentname, '@');
*delim = '\0';
- zhrp = zfs_open(zhp->zfs_hdl, parentname, ZFS_TYPE_ANY);
+ zhrp = zfs_open(zhp->zfs_hdl, parentname, ZFS_TYPE_DATASET);
if (zhrp == NULL) {
- return (-1);
+ ret = -1;
+ goto error;
}
dd.snapname = delim + 1;
dd.gotone = B_FALSE;
- dd.closezhp = B_FALSE;
+ dd.closezhp = B_TRUE;
/* We remove any zvol links prior to renaming them */
ret = zfs_iter_filesystems(zhrp, zfs_remove_link_cb, &dd);
@@ -3331,7 +3875,7 @@ zfs_rename(zfs_handle_t *zhp, const char *target, int recursive)
goto error;
}
} else {
- if ((cl = changelist_gather(zhp, ZFS_PROP_NAME, 0)) == NULL)
+ if ((cl = changelist_gather(zhp, ZFS_PROP_NAME, 0, 0)) == NULL)
return (-1);
if (changelist_haszonedchild(cl)) {
@@ -3356,19 +3900,19 @@ zfs_rename(zfs_handle_t *zhp, const char *target, int recursive)
zc.zc_cookie = recursive;
- if ((ret = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_RENAME, &zc)) != 0) {
+ if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_RENAME, &zc)) != 0) {
/*
* if it was recursive, the one that actually failed will
* be in zc.zc_name
*/
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
- "cannot rename to '%s'"), zc.zc_name);
+ "cannot rename '%s'"), zc.zc_name);
if (recursive && errno == EEXIST) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"a child dataset already has a snapshot "
"with the new name"));
- (void) zfs_error(hdl, EZFS_CROSSTARGET, errbuf);
+ (void) zfs_error(hdl, EZFS_EXISTS, errbuf);
} else {
(void) zfs_standard_error(zhp->zfs_hdl, errno, errbuf);
}
@@ -3432,6 +3976,8 @@ zvol_create_link_common(libzfs_handle_t *hdl, const char *dataset, int ifexists)
zfs_cmd_t zc = { 0 };
#if 0
di_devlink_handle_t dhdl;
+ priv_set_t *priv_effective;
+ int privileged;
#endif
(void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
@@ -3470,17 +4016,51 @@ zvol_create_link_common(libzfs_handle_t *hdl, const char *dataset, int ifexists)
#if 0
/*
- * Call devfsadm and wait for the links to magically appear.
+ * If privileged call devfsadm and wait for the links to
+ * magically appear.
+ * Otherwise, print out an informational message.
*/
- if ((dhdl = di_devlink_init(ZFS_DRIVER, DI_MAKE_LINK)) == NULL) {
- zfs_error_aux(hdl, strerror(errno));
- (void) zfs_error_fmt(hdl, EZFS_DEVLINKS,
- dgettext(TEXT_DOMAIN, "cannot create device links "
- "for '%s'"), dataset);
- (void) ioctl(hdl->libzfs_fd, ZFS_IOC_REMOVE_MINOR, &zc);
- return (-1);
+
+ priv_effective = priv_allocset();
+ (void) getppriv(PRIV_EFFECTIVE, priv_effective);
+ privileged = (priv_isfullset(priv_effective) == B_TRUE);
+ priv_freeset(priv_effective);
+
+ if (privileged) {
+ if ((dhdl = di_devlink_init(ZFS_DRIVER,
+ DI_MAKE_LINK)) == NULL) {
+ zfs_error_aux(hdl, strerror(errno));
+ (void) zfs_error_fmt(hdl, errno,
+ dgettext(TEXT_DOMAIN, "cannot create device links "
+ "for '%s'"), dataset);
+ (void) ioctl(hdl->libzfs_fd, ZFS_IOC_REMOVE_MINOR, &zc);
+ return (-1);
+ } else {
+ (void) di_devlink_fini(&dhdl);
+ }
} else {
- (void) di_devlink_fini(&dhdl);
+ char pathname[MAXPATHLEN];
+ struct stat64 statbuf;
+ int i;
+
+#define MAX_WAIT 10
+
+ /*
+ * This is the poor mans way of waiting for the link
+ * to show up. If after 10 seconds we still don't
+ * have it, then print out a message.
+ */
+ (void) snprintf(pathname, sizeof (pathname), "/dev/zvol/dsk/%s",
+ dataset);
+
+ for (i = 0; i != MAX_WAIT; i++) {
+ if (stat64(pathname, &statbuf) == 0)
+ break;
+ (void) sleep(1);
+ }
+ if (i == MAX_WAIT)
+ (void) printf(gettext("%s may not be immediately "
+ "available\n"), pathname);
}
#endif
@@ -3524,200 +4104,6 @@ zfs_get_user_props(zfs_handle_t *zhp)
}
/*
- * Given a comma-separated list of properties, contruct a property list
- * containing both user-defined and native properties. This function will
- * return a NULL list if 'all' is specified, which can later be expanded on a
- * per-dataset basis by zfs_expand_proplist().
- */
-int
-zfs_get_proplist_common(libzfs_handle_t *hdl, char *fields,
- zfs_proplist_t **listp, zfs_type_t type)
-{
- size_t len;
- char *s, *p;
- char c;
- zfs_prop_t prop;
- zfs_proplist_t *entry;
- zfs_proplist_t **last;
-
- *listp = NULL;
- last = listp;
-
- /*
- * If 'all' is specified, return a NULL list.
- */
- if (strcmp(fields, "all") == 0)
- return (0);
-
- /*
- * If no fields were specified, return an error.
- */
- if (fields[0] == '\0') {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "no properties specified"));
- return (zfs_error(hdl, EZFS_BADPROP, dgettext(TEXT_DOMAIN,
- "bad property list")));
- }
-
- /*
- * It would be nice to use getsubopt() here, but the inclusion of column
- * aliases makes this more effort than it's worth.
- */
- s = fields;
- while (*s != '\0') {
- if ((p = strchr(s, ',')) == NULL) {
- len = strlen(s);
- p = s + len;
- } else {
- len = p - s;
- }
-
- /*
- * Check for empty options.
- */
- if (len == 0) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "empty property name"));
- return (zfs_error(hdl, EZFS_BADPROP,
- dgettext(TEXT_DOMAIN, "bad property list")));
- }
-
- /*
- * Check all regular property names.
- */
- c = s[len];
- s[len] = '\0';
- prop = zfs_name_to_prop_common(s, type);
-
- if (prop != ZFS_PROP_INVAL &&
- !zfs_prop_valid_for_type(prop, type))
- prop = ZFS_PROP_INVAL;
-
- /*
- * When no property table entry can be found, return failure if
- * this is a pool property or if this isn't a user-defined
- * dataset property,
- */
- if (prop == ZFS_PROP_INVAL &&
- (type & ZFS_TYPE_POOL || !zfs_prop_user(s))) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "invalid property '%s'"), s);
- return (zfs_error(hdl, EZFS_BADPROP,
- dgettext(TEXT_DOMAIN, "bad property list")));
- }
-
- if ((entry = zfs_alloc(hdl, sizeof (zfs_proplist_t))) == NULL)
- return (-1);
-
- entry->pl_prop = prop;
- if (prop == ZFS_PROP_INVAL) {
- if ((entry->pl_user_prop =
- zfs_strdup(hdl, s)) == NULL) {
- free(entry);
- return (-1);
- }
- entry->pl_width = strlen(s);
- } else {
- entry->pl_width = zfs_prop_width(prop,
- &entry->pl_fixed);
- }
-
- *last = entry;
- last = &entry->pl_next;
-
- s = p;
- if (c == ',')
- s++;
- }
-
- return (0);
-}
-
-int
-zfs_get_proplist(libzfs_handle_t *hdl, char *fields, zfs_proplist_t **listp)
-{
- return (zfs_get_proplist_common(hdl, fields, listp, ZFS_TYPE_ANY));
-}
-
-void
-zfs_free_proplist(zfs_proplist_t *pl)
-{
- zfs_proplist_t *next;
-
- while (pl != NULL) {
- next = pl->pl_next;
- free(pl->pl_user_prop);
- free(pl);
- pl = next;
- }
-}
-
-typedef struct expand_data {
- zfs_proplist_t **last;
- libzfs_handle_t *hdl;
-} expand_data_t;
-
-static zfs_prop_t
-zfs_expand_proplist_cb(zfs_prop_t prop, void *cb)
-{
- zfs_proplist_t *entry;
- expand_data_t *edp = cb;
-
- if ((entry = zfs_alloc(edp->hdl, sizeof (zfs_proplist_t))) == NULL)
- return (ZFS_PROP_INVAL);
-
- entry->pl_prop = prop;
- entry->pl_width = zfs_prop_width(prop, &entry->pl_fixed);
- entry->pl_all = B_TRUE;
-
- *(edp->last) = entry;
- edp->last = &entry->pl_next;
-
- return (ZFS_PROP_CONT);
-}
-
-int
-zfs_expand_proplist_common(libzfs_handle_t *hdl, zfs_proplist_t **plp,
- zfs_type_t type)
-{
- zfs_proplist_t *entry;
- zfs_proplist_t **last;
- expand_data_t exp;
-
- if (*plp == NULL) {
- /*
- * If this is the very first time we've been called for an 'all'
- * specification, expand the list to include all native
- * properties.
- */
- last = plp;
-
- exp.last = last;
- exp.hdl = hdl;
-
- if (zfs_prop_iter_common(zfs_expand_proplist_cb, &exp, type,
- B_FALSE) == ZFS_PROP_INVAL)
- return (-1);
-
- /*
- * Add 'name' to the beginning of the list, which is handled
- * specially.
- */
- if ((entry = zfs_alloc(hdl,
- sizeof (zfs_proplist_t))) == NULL)
- return (-1);
-
- entry->pl_prop = ZFS_PROP_NAME;
- entry->pl_width = zfs_prop_width(ZFS_PROP_NAME,
- &entry->pl_fixed);
- entry->pl_all = B_TRUE;
- entry->pl_next = *plp;
- *plp = entry;
- }
- return (0);
-}
-
-/*
* This function is used by 'zfs list' to determine the exact set of columns to
* display, and their maximum widths. This does two main things:
*
@@ -3729,17 +4115,17 @@ zfs_expand_proplist_common(libzfs_handle_t *hdl, zfs_proplist_t **plp,
* so that we can size the column appropriately.
*/
int
-zfs_expand_proplist(zfs_handle_t *zhp, zfs_proplist_t **plp)
+zfs_expand_proplist(zfs_handle_t *zhp, zprop_list_t **plp)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
- zfs_proplist_t *entry;
- zfs_proplist_t **last, **start;
+ zprop_list_t *entry;
+ zprop_list_t **last, **start;
nvlist_t *userprops, *propval;
nvpair_t *elem;
char *strval;
char buf[ZFS_MAXPROPLEN];
- if (zfs_expand_proplist_common(hdl, plp, ZFS_TYPE_ANY) != 0)
+ if (zprop_expand_list(hdl, plp, ZFS_TYPE_DATASET) != 0)
return (-1);
userprops = zfs_get_user_props(zhp);
@@ -3753,7 +4139,7 @@ zfs_expand_proplist(zfs_handle_t *zhp, zfs_proplist_t **plp)
*/
start = plp;
while (*start != NULL) {
- if ((*start)->pl_prop == ZFS_PROP_INVAL)
+ if ((*start)->pl_prop == ZPROP_INVAL)
break;
start = &(*start)->pl_next;
}
@@ -3772,14 +4158,14 @@ zfs_expand_proplist(zfs_handle_t *zhp, zfs_proplist_t **plp)
if (*last == NULL) {
if ((entry = zfs_alloc(hdl,
- sizeof (zfs_proplist_t))) == NULL ||
+ sizeof (zprop_list_t))) == NULL ||
((entry->pl_user_prop = zfs_strdup(hdl,
nvpair_name(elem)))) == NULL) {
free(entry);
return (-1);
}
- entry->pl_prop = ZFS_PROP_INVAL;
+ entry->pl_prop = ZPROP_INVAL;
entry->pl_width = strlen(nvpair_name(elem));
entry->pl_all = B_TRUE;
*last = entry;
@@ -3794,7 +4180,7 @@ zfs_expand_proplist(zfs_handle_t *zhp, zfs_proplist_t **plp)
if (entry->pl_fixed)
continue;
- if (entry->pl_prop != ZFS_PROP_INVAL) {
+ if (entry->pl_prop != ZPROP_INVAL) {
if (zfs_prop_get(zhp, entry->pl_prop,
buf, sizeof (buf), NULL, NULL, 0, B_FALSE) == 0) {
if (strlen(buf) > entry->pl_width)
@@ -3803,7 +4189,7 @@ zfs_expand_proplist(zfs_handle_t *zhp, zfs_proplist_t **plp)
} else if (nvlist_lookup_nvlist(userprops,
entry->pl_user_prop, &propval) == 0) {
verify(nvlist_lookup_string(propval,
- ZFS_PROP_VALUE, &strval) == 0);
+ ZPROP_VALUE, &strval) == 0);
if (strlen(strval) > entry->pl_width)
entry->pl_width = strlen(strval);
}
@@ -3812,6 +4198,72 @@ zfs_expand_proplist(zfs_handle_t *zhp, zfs_proplist_t **plp)
return (0);
}
+#ifdef TODO
+int
+zfs_iscsi_perm_check(libzfs_handle_t *hdl, char *dataset, ucred_t *cred)
+{
+ zfs_cmd_t zc = { 0 };
+ nvlist_t *nvp;
+ gid_t gid;
+ uid_t uid;
+ const gid_t *groups;
+ int group_cnt;
+ int error;
+
+ if (nvlist_alloc(&nvp, NV_UNIQUE_NAME, 0) != 0)
+ return (no_memory(hdl));
+
+ uid = ucred_geteuid(cred);
+ gid = ucred_getegid(cred);
+ group_cnt = ucred_getgroups(cred, &groups);
+
+ if (uid == (uid_t)-1 || gid == (uid_t)-1 || group_cnt == (uid_t)-1)
+ return (1);
+
+ if (nvlist_add_uint32(nvp, ZFS_DELEG_PERM_UID, uid) != 0) {
+ nvlist_free(nvp);
+ return (1);
+ }
+
+ if (nvlist_add_uint32(nvp, ZFS_DELEG_PERM_GID, gid) != 0) {
+ nvlist_free(nvp);
+ return (1);
+ }
+
+ if (nvlist_add_uint32_array(nvp,
+ ZFS_DELEG_PERM_GROUPS, (uint32_t *)groups, group_cnt) != 0) {
+ nvlist_free(nvp);
+ return (1);
+ }
+ (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
+
+ if (zcmd_write_src_nvlist(hdl, &zc, nvp))
+ return (-1);
+
+ error = ioctl(hdl->libzfs_fd, ZFS_IOC_ISCSI_PERM_CHECK, &zc);
+ nvlist_free(nvp);
+ return (error);
+}
+#endif
+
+int
+zfs_deleg_share_nfs(libzfs_handle_t *hdl, char *dataset, char *path,
+ void *export, void *sharetab, int sharemax, zfs_share_op_t operation)
+{
+ zfs_cmd_t zc = { 0 };
+ int error;
+
+ (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
+ (void) strlcpy(zc.zc_value, path, sizeof (zc.zc_value));
+ zc.zc_share.z_sharedata = (uint64_t)(uintptr_t)sharetab;
+ zc.zc_share.z_exportdata = (uint64_t)(uintptr_t)export;
+ zc.zc_share.z_sharetype = operation;
+ zc.zc_share.z_sharemax = sharemax;
+
+ error = ioctl(hdl->libzfs_fd, ZFS_IOC_SHARE, &zc);
+ return (error);
+}
+
/*
* Attach/detach the given filesystem to/from the given jail.
*/
diff --git a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_graph.c b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_graph.c
index c283016..e7cbf23 100644
--- a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_graph.c
+++ b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_graph.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -126,6 +126,8 @@ typedef struct zfs_graph {
zfs_vertex_t **zg_hash;
size_t zg_size;
size_t zg_nvertex;
+ const char *zg_root;
+ int zg_clone_count;
} zfs_graph_t;
/*
@@ -255,7 +257,7 @@ zfs_vertex_sort_edges(zfs_vertex_t *zvp)
* datasets in the pool.
*/
static zfs_graph_t *
-zfs_graph_create(libzfs_handle_t *hdl, size_t size)
+zfs_graph_create(libzfs_handle_t *hdl, const char *dataset, size_t size)
{
zfs_graph_t *zgp = zfs_alloc(hdl, sizeof (zfs_graph_t));
@@ -269,6 +271,9 @@ zfs_graph_create(libzfs_handle_t *hdl, size_t size)
return (NULL);
}
+ zgp->zg_root = dataset;
+ zgp->zg_clone_count = 0;
+
return (zgp);
}
@@ -367,17 +372,16 @@ zfs_graph_add(libzfs_handle_t *hdl, zfs_graph_t *zgp, const char *source,
}
/*
- * Iterate over all children of the given dataset, adding any vertices as
- * necessary. Returns 0 if no cloned snapshots were seen, -1 if there was an
- * error, or 1 otherwise. This is a simple recursive algorithm - the ZFS
- * namespace typically is very flat. We manually invoke the necessary ioctl()
- * calls to avoid the overhead and additional semantics of zfs_open().
+ * Iterate over all children of the given dataset, adding any vertices
+ * as necessary. Returns -1 if there was an error, or 0 otherwise.
+ * This is a simple recursive algorithm - the ZFS namespace typically
+ * is very flat. We manually invoke the necessary ioctl() calls to
+ * avoid the overhead and additional semantics of zfs_open().
*/
static int
iterate_children(libzfs_handle_t *hdl, zfs_graph_t *zgp, const char *dataset)
{
zfs_cmd_t zc = { 0 };
- int ret = 0, err;
zfs_vertex_t *zvp;
/*
@@ -390,18 +394,8 @@ iterate_children(libzfs_handle_t *hdl, zfs_graph_t *zgp, const char *dataset)
return (0);
/*
- * We check the clone parent here instead of within the loop, so that if
- * the root dataset has been promoted from a clone, we find its parent
- * appropriately.
+ * Iterate over all children
*/
- (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
- if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) == 0 &&
- zc.zc_objset_stats.dds_clone_of[0] != '\0') {
- if (zfs_graph_add(hdl, zgp, zc.zc_objset_stats.dds_clone_of,
- zc.zc_name, zc.zc_objset_stats.dds_creation_txg) != 0)
- return (-1);
- }
-
for ((void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
ioctl(hdl->libzfs_fd, ZFS_IOC_DATASET_LIST_NEXT, &zc) == 0;
(void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name))) {
@@ -417,9 +411,23 @@ iterate_children(libzfs_handle_t *hdl, zfs_graph_t *zgp, const char *dataset)
* dataset and clone statistics. If this fails, the dataset has
* since been removed, and we're pretty much screwed anyway.
*/
+ zc.zc_objset_stats.dds_origin[0] = '\0';
if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0)
continue;
+ if (zc.zc_objset_stats.dds_origin[0] != '\0') {
+ if (zfs_graph_add(hdl, zgp,
+ zc.zc_objset_stats.dds_origin, zc.zc_name,
+ zc.zc_objset_stats.dds_creation_txg) != 0)
+ return (-1);
+ /*
+ * Count origins only if they are contained in the graph
+ */
+ if (isa_child_of(zc.zc_objset_stats.dds_origin,
+ zgp->zg_root))
+ zgp->zg_clone_count--;
+ }
+
/*
* Add an edge between the parent and the child.
*/
@@ -428,19 +436,10 @@ iterate_children(libzfs_handle_t *hdl, zfs_graph_t *zgp, const char *dataset)
return (-1);
/*
- * Iterate over all children
+ * Recursively visit child
*/
- err = iterate_children(hdl, zgp, zc.zc_name);
- if (err == -1)
+ if (iterate_children(hdl, zgp, zc.zc_name))
return (-1);
- else if (err == 1)
- ret = 1;
-
- /*
- * Indicate if we found a dataset with a non-zero clone count.
- */
- if (zc.zc_objset_stats.dds_num_clones != 0)
- ret = 1;
}
/*
@@ -467,67 +466,84 @@ iterate_children(libzfs_handle_t *hdl, zfs_graph_t *zgp, const char *dataset)
zc.zc_objset_stats.dds_creation_txg) != 0)
return (-1);
- /*
- * Indicate if we found a dataset with a non-zero clone count.
- */
- if (zc.zc_objset_stats.dds_num_clones != 0)
- ret = 1;
+ zgp->zg_clone_count += zc.zc_objset_stats.dds_num_clones;
}
zvp->zv_visited = VISIT_SEEN;
- return (ret);
+ return (0);
}
/*
- * Construct a complete graph of all necessary vertices. First, we iterate over
- * only our object's children. If we don't find any cloned snapshots, then we
- * simple return that. Otherwise, we have to start at the pool root and iterate
- * over all datasets.
+ * Returns false if there are no snapshots with dependent clones in this
+ * subtree or if all of those clones are also in this subtree. Returns
+ * true if there is an error or there are external dependents.
+ */
+static boolean_t
+external_dependents(libzfs_handle_t *hdl, zfs_graph_t *zgp, const char *dataset)
+{
+ zfs_cmd_t zc = { 0 };
+
+ /*
+ * Check whether this dataset is a clone or has clones since
+ * iterate_children() only checks the children.
+ */
+ (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0)
+ return (B_TRUE);
+
+ if (zc.zc_objset_stats.dds_origin[0] != '\0') {
+ if (zfs_graph_add(hdl, zgp,
+ zc.zc_objset_stats.dds_origin, zc.zc_name,
+ zc.zc_objset_stats.dds_creation_txg) != 0)
+ return (B_TRUE);
+ if (isa_child_of(zc.zc_objset_stats.dds_origin, dataset))
+ zgp->zg_clone_count--;
+ }
+
+ if ((zc.zc_objset_stats.dds_num_clones) ||
+ iterate_children(hdl, zgp, dataset))
+ return (B_TRUE);
+
+ return (zgp->zg_clone_count != 0);
+}
+
+/*
+ * Construct a complete graph of all necessary vertices. First, iterate over
+ * only our object's children. If no cloned snapshots are found, or all of
+ * the cloned snapshots are in this subtree then return a graph of the subtree.
+ * Otherwise, start at the root of the pool and iterate over all datasets.
*/
static zfs_graph_t *
construct_graph(libzfs_handle_t *hdl, const char *dataset)
{
- zfs_graph_t *zgp = zfs_graph_create(hdl, ZFS_GRAPH_SIZE);
- zfs_cmd_t zc = { 0 };
+ zfs_graph_t *zgp = zfs_graph_create(hdl, dataset, ZFS_GRAPH_SIZE);
int ret = 0;
if (zgp == NULL)
return (zgp);
- /*
- * We need to explicitly check whether this dataset has clones or not,
- * since iterate_children() only checks the children.
- */
- (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
- (void) ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc);
-
- if (zc.zc_objset_stats.dds_num_clones != 0 ||
- (ret = iterate_children(hdl, zgp, dataset)) != 0) {
+ if ((strchr(dataset, '/') == NULL) ||
+ (external_dependents(hdl, zgp, dataset))) {
/*
* Determine pool name and try again.
*/
- char *pool, *slash;
-
- if ((slash = strchr(dataset, '/')) != NULL ||
- (slash = strchr(dataset, '@')) != NULL) {
- pool = zfs_alloc(hdl, slash - dataset + 1);
- if (pool == NULL) {
- zfs_graph_destroy(zgp);
- return (NULL);
- }
- (void) strncpy(pool, dataset, slash - dataset);
- pool[slash - dataset] = '\0';
-
- if (iterate_children(hdl, zgp, pool) == -1 ||
- zfs_graph_add(hdl, zgp, pool, NULL, 0) != 0) {
- free(pool);
- zfs_graph_destroy(zgp);
- return (NULL);
- }
+ int len = strcspn(dataset, "/@") + 1;
+ char *pool = zfs_alloc(hdl, len);
+
+ if (pool == NULL) {
+ zfs_graph_destroy(zgp);
+ return (NULL);
+ }
+ (void) strlcpy(pool, dataset, len);
+ if (iterate_children(hdl, zgp, pool) == -1 ||
+ zfs_graph_add(hdl, zgp, pool, NULL, 0) != 0) {
free(pool);
+ zfs_graph_destroy(zgp);
+ return (NULL);
}
+ free(pool);
}
if (ret == -1 || zfs_graph_add(hdl, zgp, dataset, NULL, 0) != 0) {
diff --git a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_impl.h b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_impl.h
index 9581331..afe71f3 100644
--- a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_impl.h
+++ b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_impl.h
@@ -1,5 +1,5 @@
/*
- * CDDL HEADER START
+ * CDDL HEADER SART
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
@@ -20,21 +20,21 @@
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _LIBFS_IMPL_H
#define _LIBFS_IMPL_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/dmu.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_acl.h>
+#include <sys/spa.h>
#include <sys/nvpair.h>
+#include <libshare.h>
#include <libuutil.h>
#include <libzfs.h>
@@ -42,22 +42,33 @@
extern "C" {
#endif
+#ifdef VERIFY
+#undef VERIFY
+#endif
+#define VERIFY verify
+
struct libzfs_handle {
int libzfs_error;
int libzfs_fd;
FILE *libzfs_mnttab;
FILE *libzfs_sharetab;
+ zpool_handle_t *libzfs_pool_handles;
uu_avl_pool_t *libzfs_ns_avlpool;
uu_avl_t *libzfs_ns_avl;
uint64_t libzfs_ns_gen;
int libzfs_desc_active;
char libzfs_action[1024];
char libzfs_desc[1024];
+ char *libzfs_log_str;
int libzfs_printerr;
+ void *libzfs_sharehdl; /* libshare handle */
+ uint_t libzfs_shareflags;
};
+#define ZFSSHARE_MISS 0x01 /* Didn't find entry in cache */
struct zfs_handle {
libzfs_handle_t *zfs_hdl;
+ zpool_handle_t *zpool_hdl;
char zfs_name[ZFS_MAXNAMELEN];
zfs_type_t zfs_type; /* type including snapshot */
zfs_type_t zfs_head_type; /* type excluding snapshot */
@@ -66,7 +77,6 @@ struct zfs_handle {
nvlist_t *zfs_user_props;
boolean_t zfs_mntcheck;
char *zfs_mntopts;
- char zfs_root[MAXPATHLEN];
};
/*
@@ -77,14 +87,33 @@ struct zfs_handle {
struct zpool_handle {
libzfs_handle_t *zpool_hdl;
+ zpool_handle_t *zpool_next;
char zpool_name[ZPOOL_MAXNAMELEN];
int zpool_state;
size_t zpool_config_size;
nvlist_t *zpool_config;
nvlist_t *zpool_old_config;
nvlist_t *zpool_props;
+ diskaddr_t zpool_start_block;
};
+typedef enum {
+ PROTO_NFS = 0,
+ PROTO_SMB = 1,
+ PROTO_END = 2
+} zfs_share_proto_t;
+
+/*
+ * The following can be used as a bitmask and any new values
+ * added must preserve that capability.
+ */
+typedef enum {
+ SHARED_NOT_SHARED = 0x0,
+ SHARED_ISCSI = 0x1,
+ SHARED_NFS = 0x2,
+ SHARED_SMB = 0x4
+} zfs_share_type_t;
+
int zfs_error(libzfs_handle_t *, int, const char *);
int zfs_error_fmt(libzfs_handle_t *, int, const char *, ...);
void zfs_error_aux(libzfs_handle_t *, const char *, ...);
@@ -101,20 +130,24 @@ int zpool_standard_error_fmt(libzfs_handle_t *, int, const char *, ...);
int get_dependents(libzfs_handle_t *, boolean_t, const char *, char ***,
size_t *);
-int zfs_expand_proplist_common(libzfs_handle_t *, zfs_proplist_t **,
- zfs_type_t);
-int zfs_get_proplist_common(libzfs_handle_t *, char *, zfs_proplist_t **,
- zfs_type_t);
-zfs_prop_t zfs_prop_iter_common(zfs_prop_f, void *, zfs_type_t, boolean_t);
-zfs_prop_t zfs_name_to_prop_common(const char *, zfs_type_t);
-nvlist_t *zfs_validate_properties(libzfs_handle_t *, zfs_type_t, char *,
- nvlist_t *, uint64_t, zfs_handle_t *zhp, const char *errbuf);
+int zprop_parse_value(libzfs_handle_t *, nvpair_t *, int, zfs_type_t,
+ nvlist_t *, char **, uint64_t *, const char *);
+int zprop_expand_list(libzfs_handle_t *hdl, zprop_list_t **plp,
+ zfs_type_t type);
+
+/*
+ * Use this changelist_gather() flag to force attempting mounts
+ * on each change node regardless of whether or not it is currently
+ * mounted.
+ */
+#define CL_GATHER_MOUNT_ALWAYS 1
typedef struct prop_changelist prop_changelist_t;
int zcmd_alloc_dst_nvlist(libzfs_handle_t *, zfs_cmd_t *, size_t);
-int zcmd_write_src_nvlist(libzfs_handle_t *, zfs_cmd_t *, nvlist_t *, size_t *);
+int zcmd_write_src_nvlist(libzfs_handle_t *, zfs_cmd_t *, nvlist_t *);
+int zcmd_write_conf_nvlist(libzfs_handle_t *, zfs_cmd_t *, nvlist_t *);
int zcmd_expand_dst_nvlist(libzfs_handle_t *, zfs_cmd_t *);
int zcmd_read_dst_nvlist(libzfs_handle_t *, zfs_cmd_t *, nvlist_t **);
void zcmd_free_nvlists(zfs_cmd_t *);
@@ -122,13 +155,15 @@ void zcmd_free_nvlists(zfs_cmd_t *);
int changelist_prefix(prop_changelist_t *);
int changelist_postfix(prop_changelist_t *);
void changelist_rename(prop_changelist_t *, const char *, const char *);
-void changelist_remove(zfs_handle_t *, prop_changelist_t *);
+void changelist_remove(prop_changelist_t *, const char *);
void changelist_free(prop_changelist_t *);
-prop_changelist_t *changelist_gather(zfs_handle_t *, zfs_prop_t, int);
-int changelist_unshare(prop_changelist_t *);
+prop_changelist_t *changelist_gather(zfs_handle_t *, zfs_prop_t, int, int);
+int changelist_unshare(prop_changelist_t *, zfs_share_proto_t *);
int changelist_haszonedchild(prop_changelist_t *);
void remove_mountpoint(zfs_handle_t *);
+int create_parents(libzfs_handle_t *, char *, int);
+boolean_t isa_child_of(const char *dataset, const char *parent);
zfs_handle_t *make_dataset_handle(libzfs_handle_t *, const char *);
@@ -137,10 +172,23 @@ int zpool_open_silent(libzfs_handle_t *, const char *, zpool_handle_t **);
int zvol_create_link(libzfs_handle_t *, const char *);
int zvol_remove_link(libzfs_handle_t *, const char *);
int zpool_iter_zvol(zpool_handle_t *, int (*)(const char *, void *), void *);
+boolean_t zpool_name_valid(libzfs_handle_t *, boolean_t, const char *);
void namespace_clear(libzfs_handle_t *);
+/*
+ * libshare (sharemgr) interfaces used internally.
+ */
+
+extern int zfs_init_libshare(libzfs_handle_t *, int);
+extern void zfs_uninit_libshare(libzfs_handle_t *);
+extern int zfs_parse_options(char *, zfs_share_proto_t);
+
+extern int zfs_unshare_proto(zfs_handle_t *zhp,
+ const char *, zfs_share_proto_t *);
+
#ifdef __FreeBSD__
+
/*
* This is FreeBSD version of ioctl, because Solaris' ioctl() updates
* zc_nvlist_dst_size even if an error is returned, on FreeBSD if an
diff --git a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_import.c b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_import.c
index 1c77045..166c831 100644
--- a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_import.c
+++ b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_import.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -213,11 +213,13 @@ add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
name_entry_t *ne;
/*
- * If this is a hot spare not currently in use, add it to the list of
- * names to translate, but don't do anything else.
+ * If this is a hot spare not currently in use or level 2 cache
+ * device, add it to the list of names to translate, but don't do
+ * anything else.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
- &state) == 0 && state == POOL_STATE_SPARE &&
+ &state) == 0 &&
+ (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
return (-1);
@@ -361,6 +363,46 @@ pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
return (0);
}
+static nvlist_t *
+refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
+{
+ nvlist_t *nvl;
+ zfs_cmd_t zc = { 0 };
+ int err;
+
+ if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
+ return (NULL);
+
+ if (zcmd_alloc_dst_nvlist(hdl, &zc,
+ zc.zc_nvlist_conf_size * 2) != 0) {
+ zcmd_free_nvlists(&zc);
+ return (NULL);
+ }
+
+ while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
+ &zc)) != 0 && errno == ENOMEM) {
+ if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
+ zcmd_free_nvlists(&zc);
+ return (NULL);
+ }
+ }
+
+ if (err) {
+ (void) zpool_standard_error(hdl, errno,
+ dgettext(TEXT_DOMAIN, "cannot discover pools"));
+ zcmd_free_nvlists(&zc);
+ return (NULL);
+ }
+
+ if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
+ zcmd_free_nvlists(&zc);
+ return (NULL);
+ }
+
+ zcmd_free_nvlists(&zc);
+ return (nvl);
+}
+
/*
* Convert our list of pools into the definitive set of configurations. We
* start by picking the best config for each toplevel vdev. Once that's done,
@@ -369,26 +411,25 @@ pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
* return to the user.
*/
static nvlist_t *
-get_configs(libzfs_handle_t *hdl, pool_list_t *pl)
+get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
{
pool_entry_t *pe;
vdev_entry_t *ve;
config_entry_t *ce;
nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot;
- nvlist_t **spares;
- uint_t i, nspares;
+ nvlist_t **spares, **l2cache;
+ uint_t i, nspares, nl2cache;
boolean_t config_seen;
uint64_t best_txg;
char *name, *hostname;
- zfs_cmd_t zc = { 0 };
uint64_t version, guid;
- size_t len;
- int err;
uint_t children = 0;
nvlist_t **child = NULL;
uint_t c;
boolean_t isactive;
uint64_t hostid;
+ nvlist_t *nvl;
+ boolean_t found_one = B_FALSE;
if (nvlist_alloc(&ret, 0, 0) != 0)
goto nomem;
@@ -571,6 +612,13 @@ get_configs(libzfs_handle_t *hdl, pool_list_t *pl)
nvlist_free(nvroot);
/*
+ * zdb uses this path to report on active pools that were
+ * imported or created using -R.
+ */
+ if (active_ok)
+ goto add_pool;
+
+ /*
* Determine if this pool is currently active, in which case we
* can't actually import it.
*/
@@ -588,41 +636,11 @@ get_configs(libzfs_handle_t *hdl, pool_list_t *pl)
continue;
}
- /*
- * Try to do the import in order to get vdev state.
- */
- if (zcmd_write_src_nvlist(hdl, &zc, config, &len) != 0)
+ if ((nvl = refresh_config(hdl, config)) == NULL)
goto error;
nvlist_free(config);
- config = NULL;
-
- if (zcmd_alloc_dst_nvlist(hdl, &zc, len * 2) != 0) {
- zcmd_free_nvlists(&zc);
- goto error;
- }
-
- while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
- &zc)) != 0 && errno == ENOMEM) {
- if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
- zcmd_free_nvlists(&zc);
- goto error;
- }
- }
-
- if (err) {
- (void) zpool_standard_error(hdl, errno,
- dgettext(TEXT_DOMAIN, "cannot discover pools"));
- zcmd_free_nvlists(&zc);
- goto error;
- }
-
- if (zcmd_read_dst_nvlist(hdl, &zc, &config) != 0) {
- zcmd_free_nvlists(&zc);
- goto error;
- }
-
- zcmd_free_nvlists(&zc);
+ config = nvl;
/*
* Go through and update the paths for spares, now that we have
@@ -639,6 +657,17 @@ get_configs(libzfs_handle_t *hdl, pool_list_t *pl)
}
/*
+ * Update the paths for l2cache devices.
+ */
+ if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
+ &l2cache, &nl2cache) == 0) {
+ for (i = 0; i < nl2cache; i++) {
+ if (fix_paths(l2cache[i], pl->names) != 0)
+ goto nomem;
+ }
+ }
+
+ /*
* Restore the original information read from the actual label.
*/
(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
@@ -652,6 +681,7 @@ get_configs(libzfs_handle_t *hdl, pool_list_t *pl)
hostname) == 0);
}
+add_pool:
/*
* Add this pool to the list of configs.
*/
@@ -660,10 +690,16 @@ get_configs(libzfs_handle_t *hdl, pool_list_t *pl)
if (nvlist_add_nvlist(ret, name, config) != 0)
goto nomem;
+ found_one = B_TRUE;
nvlist_free(config);
config = NULL;
}
+ if (!found_one) {
+ nvlist_free(ret);
+ ret = NULL;
+ }
+
return (ret);
nomem:
@@ -682,8 +718,9 @@ error:
* Return the offset of the given label.
*/
static uint64_t
-label_offset(size_t size, int l)
+label_offset(uint64_t size, int l)
{
+ ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
}
@@ -698,19 +735,20 @@ zpool_read_label(int fd, nvlist_t **config)
struct stat64 statbuf;
int l;
vdev_label_t *label;
- uint64_t state, txg;
+ uint64_t state, txg, size;
*config = NULL;
if (fstat64(fd, &statbuf) == -1)
return (0);
+ size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
if ((label = malloc(sizeof (vdev_label_t))) == NULL)
return (-1);
for (l = 0; l < VDEV_LABELS; l++) {
- if (pread(fd, label, sizeof (vdev_label_t),
- label_offset(statbuf.st_size, l)) != sizeof (vdev_label_t))
+ if (pread64(fd, label, sizeof (vdev_label_t),
+ label_offset(size, l)) != sizeof (vdev_label_t))
continue;
if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
@@ -718,12 +756,12 @@ zpool_read_label(int fd, nvlist_t **config)
continue;
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
- &state) != 0 || state > POOL_STATE_SPARE) {
+ &state) != 0 || state > POOL_STATE_L2CACHE) {
nvlist_free(*config);
continue;
}
- if (state != POOL_STATE_SPARE &&
+ if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0 || txg == 0)) {
nvlist_free(*config);
@@ -739,31 +777,20 @@ zpool_read_label(int fd, nvlist_t **config)
return (0);
}
-/*
- * Given a list of directories to search, find all pools stored on disk. This
- * includes partial pools which are not available to import. If no args are
- * given (argc is 0), then the default directory (/dev) is searched.
- */
-nvlist_t *
-zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
+static int
+geom_find_import(libzfs_handle_t *hdl, pool_list_t *pools)
{
- int i;
char path[MAXPATHLEN];
- nvlist_t *ret = NULL, *config;
- int fd;
- pool_list_t pools = { 0 };
- pool_entry_t *pe, *penext;
- vdev_entry_t *ve, *venext;
- config_entry_t *ce, *cenext;
- name_entry_t *ne, *nenext;
struct gmesh mesh;
struct gclass *mp;
struct ggeom *gp;
struct gprovider *pp;
+ nvlist_t *config;
+ int fd, ret = 0;
/*
* Go through and read the label configuration information from every
- * possible device, organizing the information according to pool GUID
+ * GEOM provider, organizing the information according to pool GUID
* and toplevel GUID.
*/
@@ -773,32 +800,183 @@ zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
LIST_FOREACH(mp, &mesh.lg_class, lg_class) {
LIST_FOREACH(gp, &mp->lg_geom, lg_geom) {
LIST_FOREACH(pp, &gp->lg_provider, lg_provider) {
+ if ((fd = g_open(pp->lg_name, 0)) < 0)
+ continue;
(void) snprintf(path, sizeof (path), "%s%s",
_PATH_DEV, pp->lg_name);
- if ((fd = open64(path, O_RDONLY)) < 0)
- continue;
-
if ((zpool_read_label(fd, &config)) != 0) {
+ (void) g_close(fd);
(void) no_memory(hdl);
goto error;
}
- (void) close(fd);
+ (void) g_close(fd);
if (config == NULL)
continue;
- if (add_config(hdl, &pools, path, config) != 0)
+ if (add_config(hdl, pools, path, config) != 0) {
+ ret = -1;
goto error;
+ }
}
}
}
-
+error:
geom_deletetree(&mesh);
+ return (ret);
+}
+
+/*
+ * Given a list of directories to search, find all pools stored on disk. This
+ * includes partial pools which are not available to import. If no args are
+ * given (argc is 0), then the default directory (/dev/dsk) is searched.
+ * poolname or guid (but not both) are provided by the caller when trying
+ * to import a specific pool.
+ */
+static nvlist_t *
+zpool_find_import_impl(libzfs_handle_t *hdl, int argc, char **argv,
+ boolean_t active_ok, char *poolname, uint64_t guid)
+{
+ int i;
+ DIR *dirp = NULL;
+ struct dirent64 *dp;
+ char path[MAXPATHLEN];
+ char *end;
+ size_t pathleft;
+ struct stat64 statbuf;
+ nvlist_t *ret = NULL, *config;
+ static char *default_dir = "/dev/dsk";
+ int fd;
+ pool_list_t pools = { 0 };
+ pool_entry_t *pe, *penext;
+ vdev_entry_t *ve, *venext;
+ config_entry_t *ce, *cenext;
+ name_entry_t *ne, *nenext;
+
+ verify(poolname == NULL || guid == 0);
+
+ if (argc == 0) {
+ argc = 1;
+ argv = &default_dir;
+ }
+
+ /*
+ * Go through and read the label configuration information from every
+ * possible device, organizing the information according to pool GUID
+ * and toplevel GUID.
+ */
+ for (i = 0; i < argc; i++) {
+ char *rdsk;
+ int dfd;
+
+ /* use realpath to normalize the path */
+ if (realpath(argv[i], path) == 0) {
+ (void) zfs_error_fmt(hdl, EZFS_BADPATH,
+ dgettext(TEXT_DOMAIN, "cannot open '%s'"),
+ argv[i]);
+ goto error;
+ }
+ end = &path[strlen(path)];
+ *end++ = '/';
+ *end = 0;
+ pathleft = &path[sizeof (path)] - end;
- ret = get_configs(hdl, &pools);
+ if (strcmp(argv[i], default_dir) == 0) {
+ geom_find_import(hdl, &pools);
+ continue;
+ }
+
+ /*
+ * Using raw devices instead of block devices when we're
+ * reading the labels skips a bunch of slow operations during
+ * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
+ */
+ if (strcmp(path, "/dev/dsk/") == 0)
+ rdsk = "/dev/rdsk/";
+ else
+ rdsk = path;
+
+ if ((dirp = opendir(rdsk)) == NULL) {
+ zfs_error_aux(hdl, strerror(errno));
+ (void) zfs_error_fmt(hdl, EZFS_BADPATH,
+ dgettext(TEXT_DOMAIN, "cannot open '%s'"),
+ rdsk);
+ goto error;
+ }
+
+ /*
+ * This is not MT-safe, but we have no MT consumers of libzfs
+ */
+ while ((dp = readdir64(dirp)) != NULL) {
+ const char *name = dp->d_name;
+ if (name[0] == '.' &&
+ (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
+ continue;
+
+ (void) snprintf(path, sizeof (path), "%s/%s", rdsk,
+ dp->d_name);
+
+ if ((fd = open64(path, O_RDONLY)) < 0)
+ continue;
+
+ /*
+ * Ignore failed stats. We only want regular
+ * files, character devs and block devs.
+ */
+ if (fstat64(fd, &statbuf) != 0 ||
+ (!S_ISREG(statbuf.st_mode) &&
+ !S_ISCHR(statbuf.st_mode) &&
+ !S_ISBLK(statbuf.st_mode))) {
+ (void) close(fd);
+ continue;
+ }
+
+ if ((zpool_read_label(fd, &config)) != 0) {
+ (void) close(fd);
+ (void) no_memory(hdl);
+ goto error;
+ }
+
+ (void) close(fd);
+
+ if (config != NULL) {
+ boolean_t matched = B_TRUE;
+
+ if (poolname != NULL) {
+ char *pname;
+
+ matched = nvlist_lookup_string(config,
+ ZPOOL_CONFIG_POOL_NAME,
+ &pname) == 0 &&
+ strcmp(poolname, pname) == 0;
+ } else if (guid != 0) {
+ uint64_t this_guid;
+
+ matched = nvlist_lookup_uint64(config,
+ ZPOOL_CONFIG_POOL_GUID,
+ &this_guid) == 0 &&
+ guid == this_guid;
+ }
+ if (!matched) {
+ nvlist_free(config);
+ config = NULL;
+ continue;
+ }
+ /* use the non-raw path for the config */
+ (void) strlcpy(end, name, pathleft);
+ if (add_config(hdl, &pools, path, config) != 0)
+ goto error;
+ }
+ }
+
+ (void) closedir(dirp);
+ dirp = NULL;
+ }
+
+ ret = get_configs(hdl, &pools, active_ok);
error:
for (pe = pools.pools; pe != NULL; pe = penext) {
@@ -823,9 +1001,158 @@ error:
free(ne);
}
+ if (dirp)
+ (void) closedir(dirp);
+
return (ret);
}
+nvlist_t *
+zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
+{
+ return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, NULL, 0));
+}
+
+nvlist_t *
+zpool_find_import_byname(libzfs_handle_t *hdl, int argc, char **argv,
+ char *pool)
+{
+ return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, pool, 0));
+}
+
+nvlist_t *
+zpool_find_import_byguid(libzfs_handle_t *hdl, int argc, char **argv,
+ uint64_t guid)
+{
+ return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, NULL, guid));
+}
+
+nvlist_t *
+zpool_find_import_activeok(libzfs_handle_t *hdl, int argc, char **argv)
+{
+ return (zpool_find_import_impl(hdl, argc, argv, B_TRUE, NULL, 0));
+}
+
+/*
+ * Given a cache file, return the contents as a list of importable pools.
+ * poolname or guid (but not both) are provided by the caller when trying
+ * to import a specific pool.
+ */
+nvlist_t *
+zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
+ char *poolname, uint64_t guid)
+{
+ char *buf;
+ int fd;
+ struct stat64 statbuf;
+ nvlist_t *raw, *src, *dst;
+ nvlist_t *pools;
+ nvpair_t *elem;
+ char *name;
+ uint64_t this_guid;
+ boolean_t active;
+
+ verify(poolname == NULL || guid == 0);
+
+ if ((fd = open(cachefile, O_RDONLY)) < 0) {
+ zfs_error_aux(hdl, "%s", strerror(errno));
+ (void) zfs_error(hdl, EZFS_BADCACHE,
+ dgettext(TEXT_DOMAIN, "failed to open cache file"));
+ return (NULL);
+ }
+
+ if (fstat64(fd, &statbuf) != 0) {
+ zfs_error_aux(hdl, "%s", strerror(errno));
+ (void) close(fd);
+ (void) zfs_error(hdl, EZFS_BADCACHE,
+ dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
+ return (NULL);
+ }
+
+ if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
+ (void) close(fd);
+ return (NULL);
+ }
+
+ if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
+ (void) close(fd);
+ free(buf);
+ (void) zfs_error(hdl, EZFS_BADCACHE,
+ dgettext(TEXT_DOMAIN,
+ "failed to read cache file contents"));
+ return (NULL);
+ }
+
+ (void) close(fd);
+
+ if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
+ free(buf);
+ (void) zfs_error(hdl, EZFS_BADCACHE,
+ dgettext(TEXT_DOMAIN,
+ "invalid or corrupt cache file contents"));
+ return (NULL);
+ }
+
+ free(buf);
+
+ /*
+ * Go through and get the current state of the pools and refresh their
+ * state.
+ */
+ if (nvlist_alloc(&pools, 0, 0) != 0) {
+ (void) no_memory(hdl);
+ nvlist_free(raw);
+ return (NULL);
+ }
+
+ elem = NULL;
+ while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
+ verify(nvpair_value_nvlist(elem, &src) == 0);
+
+ verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
+ &name) == 0);
+ if (poolname != NULL && strcmp(poolname, name) != 0)
+ continue;
+
+ verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
+ &this_guid) == 0);
+ if (guid != 0) {
+ verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
+ &this_guid) == 0);
+ if (guid != this_guid)
+ continue;
+ }
+
+ if (pool_active(hdl, name, this_guid, &active) != 0) {
+ nvlist_free(raw);
+ nvlist_free(pools);
+ return (NULL);
+ }
+
+ if (active)
+ continue;
+
+ if ((dst = refresh_config(hdl, src)) == NULL) {
+ nvlist_free(raw);
+ nvlist_free(pools);
+ return (NULL);
+ }
+
+ if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
+ (void) no_memory(hdl);
+ nvlist_free(dst);
+ nvlist_free(raw);
+ nvlist_free(pools);
+ return (NULL);
+ }
+ nvlist_free(dst);
+ }
+
+ nvlist_free(raw);
+ return (pools);
+}
+
+
boolean_t
find_guid(nvlist_t *nv, uint64_t guid)
{
@@ -847,27 +1174,28 @@ find_guid(nvlist_t *nv, uint64_t guid)
return (B_FALSE);
}
-typedef struct spare_cbdata {
+typedef struct aux_cbdata {
+ const char *cb_type;
uint64_t cb_guid;
zpool_handle_t *cb_zhp;
-} spare_cbdata_t;
+} aux_cbdata_t;
static int
-find_spare(zpool_handle_t *zhp, void *data)
+find_aux(zpool_handle_t *zhp, void *data)
{
- spare_cbdata_t *cbp = data;
- nvlist_t **spares;
- uint_t i, nspares;
+ aux_cbdata_t *cbp = data;
+ nvlist_t **list;
+ uint_t i, count;
uint64_t guid;
nvlist_t *nvroot;
verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
- if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
- &spares, &nspares) == 0) {
- for (i = 0; i < nspares; i++) {
- verify(nvlist_lookup_uint64(spares[i],
+ if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
+ &list, &count) == 0) {
+ for (i = 0; i < count; i++) {
+ verify(nvlist_lookup_uint64(list[i],
ZPOOL_CONFIG_GUID, &guid) == 0);
if (guid == cbp->cb_guid) {
cbp->cb_zhp = zhp;
@@ -896,7 +1224,7 @@ zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
zpool_handle_t *zhp;
nvlist_t *pool_config;
uint64_t stateval, isspare;
- spare_cbdata_t cb = { 0 };
+ aux_cbdata_t cb = { 0 };
boolean_t isactive;
*inuse = B_FALSE;
@@ -914,7 +1242,7 @@ zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
&vdev_guid) == 0);
- if (stateval != POOL_STATE_SPARE) {
+ if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
@@ -993,7 +1321,24 @@ zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
*/
cb.cb_zhp = NULL;
cb.cb_guid = vdev_guid;
- if (zpool_iter(hdl, find_spare, &cb) == 1) {
+ cb.cb_type = ZPOOL_CONFIG_SPARES;
+ if (zpool_iter(hdl, find_aux, &cb) == 1) {
+ name = (char *)zpool_get_name(cb.cb_zhp);
+ ret = TRUE;
+ } else {
+ ret = FALSE;
+ }
+ break;
+
+ case POOL_STATE_L2CACHE:
+
+ /*
+ * Check if any pool is currently using this l2cache device.
+ */
+ cb.cb_zhp = NULL;
+ cb.cb_guid = vdev_guid;
+ cb.cb_type = ZPOOL_CONFIG_L2CACHE;
+ if (zpool_iter(hdl, find_aux, &cb) == 1) {
name = (char *)zpool_get_name(cb.cb_zhp);
ret = TRUE;
} else {
@@ -1008,6 +1353,8 @@ zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
if (ret) {
if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
+ if (cb.cb_zhp)
+ zpool_close(cb.cb_zhp);
nvlist_free(config);
return (-1);
}
diff --git a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_mount.c b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_mount.c
index b4bc945..84a8a57 100644
--- a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_mount.c
+++ b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_mount.c
@@ -20,12 +20,10 @@
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* Routines to manage ZFS mounts. We separate all the nasty routines that have
* to deal with the OS. The following functions are the main entry points --
@@ -45,11 +43,17 @@
* zfs_unshare()
*
* zfs_is_shared_nfs()
- * zfs_share_nfs()
- * zfs_unshare_nfs()
- * zfs_unshareall_nfs()
+ * zfs_is_shared_smb()
* zfs_is_shared_iscsi()
+ * zfs_share_proto()
+ * zfs_shareall();
* zfs_share_iscsi()
+ * zfs_unshare_nfs()
+ * zfs_unshare_smb()
+ * zfs_unshareall_nfs()
+ * zfs_unshareall_smb()
+ * zfs_unshareall()
+ * zfs_unshareall_bypath()
* zfs_unshare_iscsi()
*
* The following functions are available for pool consumers, and will
@@ -78,9 +82,49 @@
#include "libzfs_impl.h"
+#include <libshare.h>
+
+#define MAXISALEN 257 /* based on sysinfo(2) man page */
+
+static int zfs_share_proto(zfs_handle_t *, zfs_share_proto_t *);
+zfs_share_type_t zfs_is_shared_proto(zfs_handle_t *, char **,
+ zfs_share_proto_t);
+
static int (*iscsitgt_zfs_share)(const char *);
static int (*iscsitgt_zfs_unshare)(const char *);
static int (*iscsitgt_zfs_is_shared)(const char *);
+static int (*iscsitgt_svc_online)();
+
+/*
+ * The share protocols table must be in the same order as the zfs_share_prot_t
+ * enum in libzfs_impl.h
+ */
+typedef struct {
+ zfs_prop_t p_prop;
+ char *p_name;
+ int p_share_err;
+ int p_unshare_err;
+} proto_table_t;
+
+proto_table_t proto_table[PROTO_END] = {
+ {ZFS_PROP_SHARENFS, "nfs", EZFS_SHARENFSFAILED, EZFS_UNSHARENFSFAILED},
+ {ZFS_PROP_SHARESMB, "smb", EZFS_SHARESMBFAILED, EZFS_UNSHARESMBFAILED},
+};
+
+zfs_share_proto_t nfs_only[] = {
+ PROTO_NFS,
+ PROTO_END
+};
+
+zfs_share_proto_t smb_only[] = {
+ PROTO_SMB,
+ PROTO_END
+};
+zfs_share_proto_t share_all_proto[] = {
+ PROTO_NFS,
+ PROTO_SMB,
+ PROTO_END
+};
#pragma init(zfs_iscsi_init)
static void
@@ -95,37 +139,65 @@ zfs_iscsi_init(void)
(iscsitgt_zfs_unshare = (int (*)(const char *))dlsym(libiscsitgt,
"iscsitgt_zfs_unshare")) == NULL ||
(iscsitgt_zfs_is_shared = (int (*)(const char *))dlsym(libiscsitgt,
- "iscsitgt_zfs_is_shared")) == NULL) {
+ "iscsitgt_zfs_is_shared")) == NULL ||
+ (iscsitgt_svc_online = (int (*)(const char *))dlsym(libiscsitgt,
+ "iscsitgt_svc_online")) == NULL) {
iscsitgt_zfs_share = NULL;
iscsitgt_zfs_unshare = NULL;
iscsitgt_zfs_is_shared = NULL;
+ iscsitgt_svc_online = NULL;
}
}
/*
- * Search the sharetab for the given mountpoint, returning true if it is found.
+ * Search the sharetab for the given mountpoint and protocol, returning
+ * a zfs_share_type_t value.
*/
-static boolean_t
-is_shared(libzfs_handle_t *hdl, const char *mountpoint)
+static zfs_share_type_t
+is_shared(libzfs_handle_t *hdl, const char *mountpoint, zfs_share_proto_t proto)
{
char buf[MAXPATHLEN], *tab;
+ char *ptr;
if (hdl->libzfs_sharetab == NULL)
- return (0);
+ return (SHARED_NOT_SHARED);
(void) fseek(hdl->libzfs_sharetab, 0, SEEK_SET);
while (fgets(buf, sizeof (buf), hdl->libzfs_sharetab) != NULL) {
/* the mountpoint is the first entry on each line */
- if ((tab = strchr(buf, '\t')) != NULL) {
+ if ((tab = strchr(buf, '\t')) == NULL)
+ continue;
+
+ *tab = '\0';
+ if (strcmp(buf, mountpoint) == 0) {
+ /*
+ * the protocol field is the third field
+ * skip over second field
+ */
+ ptr = ++tab;
+ if ((tab = strchr(ptr, '\t')) == NULL)
+ continue;
+ ptr = ++tab;
+ if ((tab = strchr(ptr, '\t')) == NULL)
+ continue;
*tab = '\0';
- if (strcmp(buf, mountpoint) == 0)
- return (B_TRUE);
+ if (strcmp(ptr,
+ proto_table[proto].p_name) == 0) {
+ switch (proto) {
+ case PROTO_NFS:
+ return (SHARED_NFS);
+ case PROTO_SMB:
+ return (SHARED_SMB);
+ default:
+ return (0);
+ }
+ }
}
}
- return (B_FALSE);
+ return (SHARED_NOT_SHARED);
}
#if 0
@@ -198,10 +270,10 @@ zfs_is_mounted(zfs_handle_t *zhp, char **where)
*/
static boolean_t
zfs_is_mountable(zfs_handle_t *zhp, char *buf, size_t buflen,
- zfs_source_t *source)
+ zprop_source_t *source)
{
char sourceloc[ZFS_MAXNAMELEN];
- zfs_source_t sourcetype;
+ zprop_source_t sourcetype;
if (!zfs_prop_valid_for_type(ZFS_PROP_MOUNTPOINT, zhp->zfs_type))
return (B_FALSE);
@@ -213,7 +285,7 @@ zfs_is_mountable(zfs_handle_t *zhp, char *buf, size_t buflen,
strcmp(buf, ZFS_MOUNTPOINT_LEGACY) == 0)
return (B_FALSE);
- if (!zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT))
+ if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_OFF)
return (B_FALSE);
if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED) &&
@@ -281,11 +353,15 @@ zfs_mount(zfs_handle_t *zhp, const char *options, int flags)
* from mount(), and they're well-understood. We pick a few
* common ones to improve upon.
*/
- if (errno == EBUSY)
+ if (errno == EBUSY) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"mountpoint or dataset is busy"));
- else
+ } else if (errno == EPERM) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "Insufficient privileges"));
+ } else {
zfs_error_aux(hdl, strerror(errno));
+ }
return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
@@ -318,6 +394,7 @@ int
zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags)
{
struct mnttab search = { 0 }, entry;
+ char *mntpt = NULL;
/* check to see if need to unmount the filesystem */
search.mnt_special = zhp->zfs_name;
@@ -326,15 +403,29 @@ zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags)
if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
getmntany(zhp->zfs_hdl->libzfs_mnttab, &entry, &search) == 0)) {
+ /*
+ * mountpoint may have come from a call to
+ * getmnt/getmntany if it isn't NULL. If it is NULL,
+ * we know it comes from getmntany which can then get
+ * overwritten later. We strdup it to play it safe.
+ */
if (mountpoint == NULL)
- mountpoint = entry.mnt_mountp;
+ mntpt = zfs_strdup(zhp->zfs_hdl, entry.mnt_mountp);
+ else
+ mntpt = zfs_strdup(zhp->zfs_hdl, mountpoint);
/*
* Unshare and unmount the filesystem
*/
- if (zfs_unshare_nfs(zhp, mountpoint) != 0 ||
- unmount_one(zhp->zfs_hdl, mountpoint, flags) != 0)
+ if (zfs_unshare_proto(zhp, mntpt, share_all_proto) != 0)
+ return (-1);
+
+ if (unmount_one(zhp->zfs_hdl, mntpt, flags) != 0) {
+ free(mntpt);
+ (void) zfs_shareall(zhp);
return (-1);
+ }
+ free(mntpt);
}
return (0);
@@ -351,7 +442,7 @@ zfs_unmountall(zfs_handle_t *zhp, int flags)
prop_changelist_t *clp;
int ret;
- clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT, flags);
+ clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT, 0, flags);
if (clp == NULL)
return (-1);
@@ -364,10 +455,17 @@ zfs_unmountall(zfs_handle_t *zhp, int flags)
boolean_t
zfs_is_shared(zfs_handle_t *zhp)
{
+ zfs_share_type_t rc = 0;
+ zfs_share_proto_t *curr_proto;
+
if (ZFS_IS_VOLUME(zhp))
return (zfs_is_shared_iscsi(zhp));
- return (zfs_is_shared_nfs(zhp, NULL));
+ for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
+ curr_proto++)
+ rc |= zfs_is_shared_proto(zhp, NULL, *curr_proto);
+
+ return (rc ? B_TRUE : B_FALSE);
}
int
@@ -376,7 +474,7 @@ zfs_share(zfs_handle_t *zhp)
if (ZFS_IS_VOLUME(zhp))
return (zfs_share_iscsi(zhp));
- return (zfs_share_nfs(zhp));
+ return (zfs_share_proto(zhp, share_all_proto));
}
int
@@ -385,139 +483,399 @@ zfs_unshare(zfs_handle_t *zhp)
if (ZFS_IS_VOLUME(zhp))
return (zfs_unshare_iscsi(zhp));
- return (zfs_unshare_nfs(zhp, NULL));
+ return (zfs_unshareall(zhp));
}
/*
* Check to see if the filesystem is currently shared.
*/
-boolean_t
-zfs_is_shared_nfs(zfs_handle_t *zhp, char **where)
+zfs_share_type_t
+zfs_is_shared_proto(zfs_handle_t *zhp, char **where, zfs_share_proto_t proto)
{
char *mountpoint;
+ zfs_share_type_t rc;
if (!zfs_is_mounted(zhp, &mountpoint))
- return (B_FALSE);
+ return (SHARED_NOT_SHARED);
- if (is_shared(zhp->zfs_hdl, mountpoint)) {
+ if (rc = is_shared(zhp->zfs_hdl, mountpoint, proto)) {
if (where != NULL)
*where = mountpoint;
else
free(mountpoint);
- return (B_TRUE);
+ return (rc);
} else {
free(mountpoint);
- return (B_FALSE);
+ return (SHARED_NOT_SHARED);
+ }
+}
+
+boolean_t
+zfs_is_shared_nfs(zfs_handle_t *zhp, char **where)
+{
+ return (zfs_is_shared_proto(zhp, where,
+ PROTO_NFS) != SHARED_NOT_SHARED);
+}
+
+boolean_t
+zfs_is_shared_smb(zfs_handle_t *zhp, char **where)
+{
+ return (zfs_is_shared_proto(zhp, where,
+ PROTO_SMB) != SHARED_NOT_SHARED);
+}
+
+/*
+ * Make sure things will work if libshare isn't installed by using
+ * wrapper functions that check to see that the pointers to functions
+ * initialized in _zfs_init_libshare() are actually present.
+ */
+
+#if 0
+static sa_handle_t (*_sa_init)(int);
+static void (*_sa_fini)(sa_handle_t);
+static sa_share_t (*_sa_find_share)(sa_handle_t, char *);
+static int (*_sa_enable_share)(sa_share_t, char *);
+static int (*_sa_disable_share)(sa_share_t, char *);
+static char *(*_sa_errorstr)(int);
+static int (*_sa_parse_legacy_options)(sa_group_t, char *, char *);
+static boolean_t (*_sa_needs_refresh)(sa_handle_t *);
+static libzfs_handle_t *(*_sa_get_zfs_handle)(sa_handle_t);
+static int (*_sa_zfs_process_share)(sa_handle_t, sa_group_t, sa_share_t,
+ char *, char *, zprop_source_t, char *, char *, char *);
+static void (*_sa_update_sharetab_ts)(sa_handle_t);
+#endif
+
+/*
+ * _zfs_init_libshare()
+ *
+ * Find the libshare.so.1 entry points that we use here and save the
+ * values to be used later. This is triggered by the runtime loader.
+ * Make sure the correct ISA version is loaded.
+ */
+
+#pragma init(_zfs_init_libshare)
+static void
+_zfs_init_libshare(void)
+{
+#if 0
+ void *libshare;
+ char path[MAXPATHLEN];
+ char isa[MAXISALEN];
+
+#if defined(_LP64)
+ if (sysinfo(SI_ARCHITECTURE_64, isa, MAXISALEN) == -1)
+ isa[0] = '\0';
+#else
+ isa[0] = '\0';
+#endif
+ (void) snprintf(path, MAXPATHLEN,
+ "/usr/lib/%s/libshare.so.1", isa);
+
+ if ((libshare = dlopen(path, RTLD_LAZY | RTLD_GLOBAL)) != NULL) {
+ _sa_init = (sa_handle_t (*)(int))dlsym(libshare, "sa_init");
+ _sa_fini = (void (*)(sa_handle_t))dlsym(libshare, "sa_fini");
+ _sa_find_share = (sa_share_t (*)(sa_handle_t, char *))
+ dlsym(libshare, "sa_find_share");
+ _sa_enable_share = (int (*)(sa_share_t, char *))dlsym(libshare,
+ "sa_enable_share");
+ _sa_disable_share = (int (*)(sa_share_t, char *))dlsym(libshare,
+ "sa_disable_share");
+ _sa_errorstr = (char *(*)(int))dlsym(libshare, "sa_errorstr");
+ _sa_parse_legacy_options = (int (*)(sa_group_t, char *, char *))
+ dlsym(libshare, "sa_parse_legacy_options");
+ _sa_needs_refresh = (boolean_t (*)(sa_handle_t *))
+ dlsym(libshare, "sa_needs_refresh");
+ _sa_get_zfs_handle = (libzfs_handle_t *(*)(sa_handle_t))
+ dlsym(libshare, "sa_get_zfs_handle");
+ _sa_zfs_process_share = (int (*)(sa_handle_t, sa_group_t,
+ sa_share_t, char *, char *, zprop_source_t, char *,
+ char *, char *))dlsym(libshare, "sa_zfs_process_share");
+ _sa_update_sharetab_ts = (void (*)(sa_handle_t))
+ dlsym(libshare, "sa_update_sharetab_ts");
+ if (_sa_init == NULL || _sa_fini == NULL ||
+ _sa_find_share == NULL || _sa_enable_share == NULL ||
+ _sa_disable_share == NULL || _sa_errorstr == NULL ||
+ _sa_parse_legacy_options == NULL ||
+ _sa_needs_refresh == NULL || _sa_get_zfs_handle == NULL ||
+ _sa_zfs_process_share == NULL ||
+ _sa_update_sharetab_ts == NULL) {
+ _sa_init = NULL;
+ _sa_fini = NULL;
+ _sa_disable_share = NULL;
+ _sa_enable_share = NULL;
+ _sa_errorstr = NULL;
+ _sa_parse_legacy_options = NULL;
+ (void) dlclose(libshare);
+ _sa_needs_refresh = NULL;
+ _sa_get_zfs_handle = NULL;
+ _sa_zfs_process_share = NULL;
+ _sa_update_sharetab_ts = NULL;
+ }
}
+#endif
}
/*
- * Share the given filesystem according to the options in 'sharenfs'. We rely
- * on share(1M) to the dirty work for us.
+ * zfs_init_libshare(zhandle, service)
+ *
+ * Initialize the libshare API if it hasn't already been initialized.
+ * In all cases it returns 0 if it succeeded and an error if not. The
+ * service value is which part(s) of the API to initialize and is a
+ * direct map to the libshare sa_init(service) interface.
*/
int
-zfs_share_nfs(zfs_handle_t *zhp)
+zfs_init_libshare(libzfs_handle_t *zhandle, int service)
{
- char mountpoint[ZFS_MAXPROPLEN];
- char shareopts[ZFS_MAXPROPLEN];
- char buf[MAXPATHLEN];
- FILE *fp;
- libzfs_handle_t *hdl = zhp->zfs_hdl;
+ int ret = SA_OK;
- if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL))
- return (0);
+#if 0
+ if (_sa_init == NULL)
+ ret = SA_CONFIG_ERR;
- /*
- * Return success if there are no share options.
- */
- if (zfs_prop_get(zhp, ZFS_PROP_SHARENFS, shareopts, sizeof (shareopts),
- NULL, NULL, 0, B_FALSE) != 0 ||
- strcmp(shareopts, "off") == 0)
- return (0);
+ if (ret == SA_OK && zhandle->libzfs_shareflags & ZFSSHARE_MISS) {
+ /*
+ * We had a cache miss. Most likely it is a new ZFS
+ * dataset that was just created. We want to make sure
+ * so check timestamps to see if a different process
+ * has updated any of the configuration. If there was
+ * some non-ZFS change, we need to re-initialize the
+ * internal cache.
+ */
+ zhandle->libzfs_shareflags &= ~ZFSSHARE_MISS;
+ if (_sa_needs_refresh != NULL &&
+ _sa_needs_refresh(zhandle->libzfs_sharehdl)) {
+ zfs_uninit_libshare(zhandle);
+ zhandle->libzfs_sharehdl = _sa_init(service);
+ }
+ }
- /*
- * If the 'zoned' property is set, then zfs_is_mountable() will have
- * already bailed out if we are in the global zone. But local
- * zones cannot be NFS servers, so we ignore it for local zones as well.
- */
- if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED))
- return (0);
+ if (ret == SA_OK && zhandle && zhandle->libzfs_sharehdl == NULL)
+ zhandle->libzfs_sharehdl = _sa_init(service);
-#ifdef __FreeBSD__
- {
- int error;
+ if (ret == SA_OK && zhandle->libzfs_sharehdl == NULL)
+ ret = SA_NO_MEMORY;
+#endif
- if (strcmp(shareopts, "on") == 0)
- error = fsshare(ZFS_EXPORTS_PATH, mountpoint, "");
- else
- error = fsshare(ZFS_EXPORTS_PATH, mountpoint, shareopts);
- if (error != 0) {
- zfs_error_aux(hdl, "%s", strerror(error));
- (void) zfs_error_fmt(hdl, EZFS_SHARENFSFAILED,
- dgettext(TEXT_DOMAIN, "cannot share '%s'"),
- zfs_get_name(zhp));
- return (-1);
+ return (ret);
+}
+
+/*
+ * zfs_uninit_libshare(zhandle)
+ *
+ * Uninitialize the libshare API if it hasn't already been
+ * uninitialized. It is OK to call multiple times.
+ */
+void
+zfs_uninit_libshare(libzfs_handle_t *zhandle)
+{
+ if (zhandle != NULL && zhandle->libzfs_sharehdl != NULL) {
+#if 0
+ if (_sa_fini != NULL)
+ _sa_fini(zhandle->libzfs_sharehdl);
+#endif
+ zhandle->libzfs_sharehdl = NULL;
}
+}
+
+/*
+ * zfs_parse_options(options, proto)
+ *
+ * Call the legacy parse interface to get the protocol specific
+ * options using the NULL arg to indicate that this is a "parse" only.
+ */
+int
+zfs_parse_options(char *options, zfs_share_proto_t proto)
+{
+#if 0
+ if (_sa_parse_legacy_options != NULL) {
+ return (_sa_parse_legacy_options(NULL, options,
+ proto_table[proto].p_name));
}
+ return (SA_CONFIG_ERR);
#else
- /*
- * Invoke the share(1M) command. We always do this, even if it's
- * currently shared, as the options may have changed.
- */
- if (strcmp(shareopts, "on") == 0)
- (void) snprintf(buf, sizeof (buf), "/usr/sbin/share "
- "-F nfs \"%s\" 2>&1", mountpoint);
- else
- (void) snprintf(buf, sizeof (buf), "/usr/sbin/share "
- "-F nfs -o \"%s\" \"%s\" 2>&1", shareopts,
- mountpoint);
+ return (SA_OK);
+#endif
+}
- if ((fp = popen(buf, "r")) == NULL)
- return (zfs_error_fmt(hdl, EZFS_SHARENFSFAILED,
- dgettext(TEXT_DOMAIN, "cannot share '%s'"),
- zfs_get_name(zhp)));
+#if 0
+/*
+ * zfs_sa_find_share(handle, path)
+ *
+ * wrapper around sa_find_share to find a share path in the
+ * configuration.
+ */
+static sa_share_t
+zfs_sa_find_share(sa_handle_t handle, char *path)
+{
+ if (_sa_find_share != NULL)
+ return (_sa_find_share(handle, path));
+ return (NULL);
+}
- /*
- * share(1M) should only produce output if there is some kind
- * of error. All output begins with "share_nfs: ", so we trim
- * this off to get to the real error.
- */
- if (fgets(buf, sizeof (buf), fp) != NULL) {
- char *colon = strchr(buf, ':');
+/*
+ * zfs_sa_enable_share(share, proto)
+ *
+ * Wrapper for sa_enable_share which enables a share for a specified
+ * protocol.
+ */
+static int
+zfs_sa_enable_share(sa_share_t share, char *proto)
+{
+ if (_sa_enable_share != NULL)
+ return (_sa_enable_share(share, proto));
+ return (SA_CONFIG_ERR);
+}
- while (buf[strlen(buf) - 1] == '\n')
- buf[strlen(buf) - 1] = '\0';
+/*
+ * zfs_sa_disable_share(share, proto)
+ *
+ * Wrapper for sa_enable_share which disables a share for a specified
+ * protocol.
+ */
+static int
+zfs_sa_disable_share(sa_share_t share, char *proto)
+{
+ if (_sa_disable_share != NULL)
+ return (_sa_disable_share(share, proto));
+ return (SA_CONFIG_ERR);
+}
+#endif
- if (colon != NULL)
- zfs_error_aux(hdl, colon + 2);
+/*
+ * Share the given filesystem according to the options in the specified
+ * protocol specific properties (sharenfs, sharesmb). We rely
+ * on "libshare" to the dirty work for us.
+ */
+static int
+zfs_share_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto)
+{
+ char mountpoint[ZFS_MAXPROPLEN];
+ char shareopts[ZFS_MAXPROPLEN];
+ char sourcestr[ZFS_MAXPROPLEN];
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+ zfs_share_proto_t *curr_proto;
+ zprop_source_t sourcetype;
+ int error, ret;
- (void) zfs_error_fmt(hdl, EZFS_SHARENFSFAILED,
- dgettext(TEXT_DOMAIN, "cannot share '%s'"),
- zfs_get_name(zhp));
+ if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL))
+ return (0);
- verify(pclose(fp) != 0);
- return (-1);
- }
+ for (curr_proto = proto; *curr_proto != PROTO_END; curr_proto++) {
+ /*
+ * Return success if there are no share options.
+ */
+ if (zfs_prop_get(zhp, proto_table[*curr_proto].p_prop,
+ shareopts, sizeof (shareopts), &sourcetype, sourcestr,
+ ZFS_MAXPROPLEN, B_FALSE) != 0 ||
+ strcmp(shareopts, "off") == 0)
+ continue;
- verify(pclose(fp) == 0);
-#endif
+ /*
+ * If the 'zoned' property is set, then zfs_is_mountable()
+ * will have already bailed out if we are in the global zone.
+ * But local zones cannot be NFS servers, so we ignore it for
+ * local zones as well.
+ */
+ if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED))
+ continue;
+ if (*curr_proto != PROTO_NFS) {
+ fprintf(stderr, "Unsupported share protocol: %d.\n",
+ *curr_proto);
+ continue;
+ }
+
+#if 0
+ share = zfs_sa_find_share(hdl->libzfs_sharehdl, mountpoint);
+ if (share == NULL) {
+ /*
+ * This may be a new file system that was just
+ * created so isn't in the internal cache
+ * (second time through). Rather than
+ * reloading the entire configuration, we can
+ * assume ZFS has done the checking and it is
+ * safe to add this to the internal
+ * configuration.
+ */
+ if (_sa_zfs_process_share(hdl->libzfs_sharehdl,
+ NULL, NULL, mountpoint,
+ proto_table[*curr_proto].p_name, sourcetype,
+ shareopts, sourcestr, zhp->zfs_name) != SA_OK) {
+ (void) zfs_error_fmt(hdl,
+ proto_table[*curr_proto].p_share_err,
+ dgettext(TEXT_DOMAIN, "cannot share '%s'"),
+ zfs_get_name(zhp));
+ return (-1);
+ }
+ hdl->libzfs_shareflags |= ZFSSHARE_MISS;
+ share = zfs_sa_find_share(hdl->libzfs_sharehdl,
+ mountpoint);
+ }
+ if (share != NULL) {
+ int err;
+ err = zfs_sa_enable_share(share,
+ proto_table[*curr_proto].p_name);
+ if (err != SA_OK) {
+ (void) zfs_error_fmt(hdl,
+ proto_table[*curr_proto].p_share_err,
+ dgettext(TEXT_DOMAIN, "cannot share '%s'"),
+ zfs_get_name(zhp));
+ return (-1);
+ }
+ } else
+#else
+ if (strcmp(shareopts, "on") == 0)
+ error = fsshare(ZFS_EXPORTS_PATH, mountpoint, "");
+ else
+ error = fsshare(ZFS_EXPORTS_PATH, mountpoint, shareopts);
+ if (error != 0)
+#endif
+ {
+ (void) zfs_error_fmt(hdl,
+ proto_table[*curr_proto].p_share_err,
+ dgettext(TEXT_DOMAIN, "cannot share '%s'"),
+ zfs_get_name(zhp));
+ return (-1);
+ }
+ }
return (0);
}
+
+int
+zfs_share_nfs(zfs_handle_t *zhp)
+{
+ return (zfs_share_proto(zhp, nfs_only));
+}
+
+int
+zfs_share_smb(zfs_handle_t *zhp)
+{
+ return (zfs_share_proto(zhp, smb_only));
+}
+
+int
+zfs_shareall(zfs_handle_t *zhp)
+{
+ return (zfs_share_proto(zhp, share_all_proto));
+}
+
/*
* Unshare a filesystem by mountpoint.
*/
static int
-unshare_one(libzfs_handle_t *hdl, const char *name, const char *mountpoint)
+unshare_one(libzfs_handle_t *hdl, const char *name, const char *mountpoint,
+ zfs_share_proto_t proto)
{
char buf[MAXPATHLEN];
FILE *fp;
-
-#ifdef __FreeBSD__
- {
int error;
+ if (proto != PROTO_NFS) {
+ fprintf(stderr, "No SMB support in FreeBSD yet.\n");
+ return (EOPNOTSUPP);
+ }
+
error = fsunshare(ZFS_EXPORTS_PATH, mountpoint);
if (error != 0) {
zfs_error_aux(hdl, "%s", strerror(error));
@@ -525,40 +883,6 @@ unshare_one(libzfs_handle_t *hdl, const char *name, const char *mountpoint)
dgettext(TEXT_DOMAIN,
"cannot unshare '%s'"), name));
}
- }
-#else
- (void) snprintf(buf, sizeof (buf),
- "/usr/sbin/unshare \"%s\" 2>&1",
- mountpoint);
-
- if ((fp = popen(buf, "r")) == NULL)
- return (zfs_error_fmt(hdl, EZFS_UNSHARENFSFAILED,
- dgettext(TEXT_DOMAIN,
- "cannot unshare '%s'"), name));
-
- /*
- * unshare(1M) should only produce output if there is
- * some kind of error. All output begins with "unshare
- * nfs: ", so we trim this off to get to the real error.
- */
- if (fgets(buf, sizeof (buf), fp) != NULL) {
- char *colon = strchr(buf, ':');
-
- while (buf[strlen(buf) - 1] == '\n')
- buf[strlen(buf) - 1] = '\0';
-
- if (colon != NULL)
- zfs_error_aux(hdl, colon + 2);
-
- verify(pclose(fp) != 0);
-
- return (zfs_error_fmt(hdl, EZFS_UNSHARENFSFAILED,
- dgettext(TEXT_DOMAIN,
- "cannot unshare '%s'"), name));
- }
-
- verify(pclose(fp) == 0);
-#endif
return (0);
}
@@ -567,47 +891,99 @@ unshare_one(libzfs_handle_t *hdl, const char *name, const char *mountpoint)
* Unshare the given filesystem.
*/
int
-zfs_unshare_nfs(zfs_handle_t *zhp, const char *mountpoint)
+zfs_unshare_proto(zfs_handle_t *zhp, const char *mountpoint,
+ zfs_share_proto_t *proto)
{
struct mnttab search = { 0 }, entry;
+ char *mntpt = NULL;
/* check to see if need to unmount the filesystem */
search.mnt_special = (char *)zfs_get_name(zhp);
search.mnt_fstype = MNTTYPE_ZFS;
rewind(zhp->zfs_hdl->libzfs_mnttab);
+ if (mountpoint != NULL)
+ mntpt = zfs_strdup(zhp->zfs_hdl, mountpoint);
+
if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
getmntany(zhp->zfs_hdl->libzfs_mnttab, &entry, &search) == 0)) {
+ zfs_share_proto_t *curr_proto;
if (mountpoint == NULL)
- mountpoint = entry.mnt_mountp;
+ mntpt = zfs_strdup(zhp->zfs_hdl, entry.mnt_mountp);
- if (is_shared(zhp->zfs_hdl, mountpoint) &&
- unshare_one(zhp->zfs_hdl, zhp->zfs_name, mountpoint) != 0)
- return (-1);
+ for (curr_proto = proto; *curr_proto != PROTO_END;
+ curr_proto++) {
+
+ if (is_shared(zhp->zfs_hdl, mntpt, *curr_proto) &&
+ unshare_one(zhp->zfs_hdl, zhp->zfs_name,
+ mntpt, *curr_proto) != 0) {
+ if (mntpt != NULL)
+ free(mntpt);
+ return (-1);
+ }
+ }
}
+ if (mntpt != NULL)
+ free(mntpt);
return (0);
}
+int
+zfs_unshare_nfs(zfs_handle_t *zhp, const char *mountpoint)
+{
+ return (zfs_unshare_proto(zhp, mountpoint, nfs_only));
+}
+
+int
+zfs_unshare_smb(zfs_handle_t *zhp, const char *mountpoint)
+{
+ return (zfs_unshare_proto(zhp, mountpoint, smb_only));
+}
+
/*
- * Same as zfs_unmountall(), but for NFS unshares.
+ * Same as zfs_unmountall(), but for NFS and SMB unshares.
*/
int
-zfs_unshareall_nfs(zfs_handle_t *zhp)
+zfs_unshareall_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto)
{
prop_changelist_t *clp;
int ret;
- clp = changelist_gather(zhp, ZFS_PROP_SHARENFS, 0);
+ clp = changelist_gather(zhp, ZFS_PROP_SHARENFS, 0, 0);
if (clp == NULL)
return (-1);
- ret = changelist_unshare(clp);
+ ret = changelist_unshare(clp, proto);
changelist_free(clp);
return (ret);
}
+int
+zfs_unshareall_nfs(zfs_handle_t *zhp)
+{
+ return (zfs_unshareall_proto(zhp, nfs_only));
+}
+
+int
+zfs_unshareall_smb(zfs_handle_t *zhp)
+{
+ return (zfs_unshareall_proto(zhp, smb_only));
+}
+
+int
+zfs_unshareall(zfs_handle_t *zhp)
+{
+ return (zfs_unshareall_proto(zhp, share_all_proto));
+}
+
+int
+zfs_unshareall_bypath(zfs_handle_t *zhp, const char *mountpoint)
+{
+ return (zfs_unshare_proto(zhp, mountpoint, share_all_proto));
+}
+
/*
* Remove the mountpoint associated with the current dataset, if necessary.
* We only remove the underlying directory if:
@@ -623,14 +999,14 @@ void
remove_mountpoint(zfs_handle_t *zhp)
{
char mountpoint[ZFS_MAXPROPLEN];
- zfs_source_t source;
+ zprop_source_t source;
if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint),
&source))
return;
- if (source == ZFS_SRC_DEFAULT ||
- source == ZFS_SRC_INHERITED) {
+ if (source == ZPROP_SRC_DEFAULT ||
+ source == ZPROP_SRC_INHERITED) {
/*
* Try to remove the directory, silently ignoring any errors.
* The filesystem may have since been removed or moved around,
@@ -644,8 +1020,15 @@ remove_mountpoint(zfs_handle_t *zhp)
boolean_t
zfs_is_shared_iscsi(zfs_handle_t *zhp)
{
- return (iscsitgt_zfs_is_shared != NULL &&
- iscsitgt_zfs_is_shared(zhp->zfs_name) != 0);
+
+ /*
+ * If iscsi deamon isn't running then we aren't shared
+ */
+ if (iscsitgt_svc_online && iscsitgt_svc_online() == 1)
+ return (B_FALSE);
+ else
+ return (iscsitgt_zfs_is_shared != NULL &&
+ iscsitgt_zfs_is_shared(zhp->zfs_name) != 0);
}
int
@@ -665,9 +1048,20 @@ zfs_share_iscsi(zfs_handle_t *zhp)
/* We don't support iSCSI on FreeBSD yet. */
#ifdef TODO
- if (iscsitgt_zfs_share == NULL || iscsitgt_zfs_share(dataset) != 0)
- return (zfs_error_fmt(hdl, EZFS_SHAREISCSIFAILED,
+ if (iscsitgt_zfs_share == NULL || iscsitgt_zfs_share(dataset) != 0) {
+ int error = EZFS_SHAREISCSIFAILED;
+
+ /*
+ * If service isn't availabele and EPERM was
+ * returned then use special error.
+ */
+ if (iscsitgt_svc_online && errno == EPERM &&
+ (iscsitgt_svc_online() != 0))
+ error = EZFS_ISCSISVCUNAVAIL;
+
+ return (zfs_error_fmt(hdl, error,
dgettext(TEXT_DOMAIN, "cannot share '%s'"), dataset));
+ }
#endif
return (0);
@@ -684,7 +1078,7 @@ zfs_unshare_iscsi(zfs_handle_t *zhp)
/*
* Return if the volume is not shared
*/
- if (!zfs_is_shared_iscsi(zhp))
+ if (zfs_is_shared_iscsi(zhp) != SHARED_ISCSI)
return (0);
/*
@@ -692,9 +1086,13 @@ zfs_unshare_iscsi(zfs_handle_t *zhp)
* we should return success in that case.
*/
if (iscsitgt_zfs_unshare == NULL ||
- (iscsitgt_zfs_unshare(dataset) != 0 && errno != ENODEV))
+ (iscsitgt_zfs_unshare(dataset) != 0 && errno != ENODEV)) {
+ if (errno == EPERM)
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "Insufficient privileges to unshare iscsi"));
return (zfs_error_fmt(hdl, EZFS_UNSHAREISCSIFAILED,
dgettext(TEXT_DOMAIN, "cannot unshare '%s'"), dataset));
+ }
#endif
return (0);
@@ -716,6 +1114,11 @@ mount_cb(zfs_handle_t *zhp, void *data)
return (0);
}
+ if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_NOAUTO) {
+ zfs_close(zhp);
+ return (0);
+ }
+
if (cbp->cb_alloc == cbp->cb_used) {
void *ptr;
@@ -730,7 +1133,7 @@ mount_cb(zfs_handle_t *zhp, void *data)
cbp->cb_datasets[cbp->cb_used++] = zhp;
- return (zfs_iter_children(zhp, mount_cb, cbp));
+ return (zfs_iter_filesystems(zhp, mount_cb, cbp));
}
static int
@@ -776,21 +1179,22 @@ zpool_enable_datasets(zpool_handle_t *zhp, const char *mntopts, int flags)
libzfs_handle_t *hdl = zhp->zpool_hdl;
zfs_handle_t *zfsp;
int i, ret = -1;
+ int *good;
/*
- * Gather all datasets within the pool.
+ * Gather all non-snap datasets within the pool.
*/
if ((cb.cb_datasets = zfs_alloc(hdl, 4 * sizeof (void *))) == NULL)
return (-1);
cb.cb_alloc = 4;
- if ((zfsp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_ANY)) == NULL)
+ if ((zfsp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_DATASET)) == NULL)
goto out;
cb.cb_datasets[0] = zfsp;
cb.cb_used = 1;
- if (zfs_iter_children(zfsp, mount_cb, &cb) != 0)
+ if (zfs_iter_filesystems(zfsp, mount_cb, &cb) != 0)
goto out;
/*
@@ -799,15 +1203,32 @@ zpool_enable_datasets(zpool_handle_t *zhp, const char *mntopts, int flags)
qsort(cb.cb_datasets, cb.cb_used, sizeof (void *), dataset_cmp);
/*
- * And mount all the datasets.
+ * And mount all the datasets, keeping track of which ones
+ * succeeded or failed. By using zfs_alloc(), the good pointer
+ * will always be non-NULL.
*/
+ good = zfs_alloc(zhp->zpool_hdl, cb.cb_used * sizeof (int));
ret = 0;
for (i = 0; i < cb.cb_used; i++) {
- if (zfs_mount(cb.cb_datasets[i], mntopts, flags) != 0 ||
- zfs_share(cb.cb_datasets[i]) != 0)
+ if (zfs_mount(cb.cb_datasets[i], mntopts, flags) != 0)
+ ret = -1;
+ else
+ good[i] = 1;
+ }
+
+ /*
+ * Then share all the ones that need to be shared. This needs
+ * to be a separate pass in order to avoid excessive reloading
+ * of the configuration. Good should never be NULL since
+ * zfs_alloc is supposed to exit if memory isn't available.
+ */
+ for (i = 0; i < cb.cb_used; i++) {
+ if (good[i] && zfs_share(cb.cb_datasets[i]) != 0)
ret = -1;
}
+ free(good);
+
out:
for (i = 0; i < cb.cb_used; i++)
zfs_close(cb.cb_datasets[i]);
@@ -830,7 +1251,8 @@ zvol_cb(const char *dataset, void *data)
(zhp = zfs_open(hdl, dataset, ZFS_TYPE_VOLUME)) == NULL)
return (0);
- (void) zfs_unshare_iscsi(zhp);
+ if (zfs_unshare_iscsi(zhp) != 0)
+ return (-1);
zfs_close(zhp);
@@ -953,9 +1375,14 @@ zpool_disable_datasets(zpool_handle_t *zhp, boolean_t force)
* Walk through and first unshare everything.
*/
for (i = 0; i < used; i++) {
- if (is_shared(hdl, mountpoints[i]) &&
- unshare_one(hdl, mountpoints[i], mountpoints[i]) != 0)
- goto out;
+ zfs_share_proto_t *curr_proto;
+ for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
+ curr_proto++) {
+ if (is_shared(hdl, mountpoints[i], *curr_proto) &&
+ unshare_one(hdl, mountpoints[i],
+ mountpoints[i], *curr_proto) != 0)
+ goto out;
+ }
}
/*
diff --git a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_pool.c b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_pool.c
index 8580837..c3cb547 100644
--- a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_pool.c
+++ b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_pool.c
@@ -20,12 +20,10 @@
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/stat.h>
#include <assert.h>
@@ -39,6 +37,7 @@
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
+#include <zone.h>
#include <sys/zfs_ioctl.h>
#include <sys/zio.h>
#include <strings.h>
@@ -48,11 +47,554 @@
#include "zfs_prop.h"
#include "libzfs_impl.h"
+static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
+
+/*
+ * ====================================================================
+ * zpool property functions
+ * ====================================================================
+ */
+
+static int
+zpool_get_all_props(zpool_handle_t *zhp)
+{
+ zfs_cmd_t zc = { 0 };
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+
+ if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
+ return (-1);
+
+ while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
+ if (errno == ENOMEM) {
+ if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+ } else {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+ }
+
+ if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+
+ zcmd_free_nvlists(&zc);
+
+ return (0);
+}
+
+static int
+zpool_props_refresh(zpool_handle_t *zhp)
+{
+ nvlist_t *old_props;
+
+ old_props = zhp->zpool_props;
+
+ if (zpool_get_all_props(zhp) != 0)
+ return (-1);
+
+ nvlist_free(old_props);
+ return (0);
+}
+
+static char *
+zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
+ zprop_source_t *src)
+{
+ nvlist_t *nv, *nvl;
+ uint64_t ival;
+ char *value;
+ zprop_source_t source;
+
+ nvl = zhp->zpool_props;
+ if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
+ verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
+ source = ival;
+ verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
+ } else {
+ source = ZPROP_SRC_DEFAULT;
+ if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
+ value = "-";
+ }
+
+ if (src)
+ *src = source;
+
+ return (value);
+}
+
+uint64_t
+zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
+{
+ nvlist_t *nv, *nvl;
+ uint64_t value;
+ zprop_source_t source;
+
+ if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
+ /*
+ * zpool_get_all_props() has most likely failed because
+ * the pool is faulted, but if all we need is the top level
+ * vdev's guid then get it from the zhp config nvlist.
+ */
+ if ((prop == ZPOOL_PROP_GUID) &&
+ (nvlist_lookup_nvlist(zhp->zpool_config,
+ ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
+ (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
+ == 0)) {
+ return (value);
+ }
+ return (zpool_prop_default_numeric(prop));
+ }
+
+ nvl = zhp->zpool_props;
+ if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
+ verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
+ source = value;
+ verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
+ } else {
+ source = ZPROP_SRC_DEFAULT;
+ value = zpool_prop_default_numeric(prop);
+ }
+
+ if (src)
+ *src = source;
+
+ return (value);
+}
+
+/*
+ * Map VDEV STATE to printed strings.
+ */
+char *
+zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
+{
+ switch (state) {
+ case VDEV_STATE_CLOSED:
+ case VDEV_STATE_OFFLINE:
+ return (gettext("OFFLINE"));
+ case VDEV_STATE_REMOVED:
+ return (gettext("REMOVED"));
+ case VDEV_STATE_CANT_OPEN:
+ if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
+ return (gettext("FAULTED"));
+ else
+ return (gettext("UNAVAIL"));
+ case VDEV_STATE_FAULTED:
+ return (gettext("FAULTED"));
+ case VDEV_STATE_DEGRADED:
+ return (gettext("DEGRADED"));
+ case VDEV_STATE_HEALTHY:
+ return (gettext("ONLINE"));
+ }
+
+ return (gettext("UNKNOWN"));
+}
+
+/*
+ * Get a zpool property value for 'prop' and return the value in
+ * a pre-allocated buffer.
+ */
+int
+zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
+ zprop_source_t *srctype)
+{
+ uint64_t intval;
+ const char *strval;
+ zprop_source_t src = ZPROP_SRC_NONE;
+ nvlist_t *nvroot;
+ vdev_stat_t *vs;
+ uint_t vsc;
+
+ if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
+ if (prop == ZPOOL_PROP_NAME)
+ (void) strlcpy(buf, zpool_get_name(zhp), len);
+ else if (prop == ZPOOL_PROP_HEALTH)
+ (void) strlcpy(buf, "FAULTED", len);
+ else
+ (void) strlcpy(buf, "-", len);
+ return (0);
+ }
+
+ if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
+ prop != ZPOOL_PROP_NAME)
+ return (-1);
+
+ switch (zpool_prop_get_type(prop)) {
+ case PROP_TYPE_STRING:
+ (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
+ len);
+ break;
+
+ case PROP_TYPE_NUMBER:
+ intval = zpool_get_prop_int(zhp, prop, &src);
+
+ switch (prop) {
+ case ZPOOL_PROP_SIZE:
+ case ZPOOL_PROP_USED:
+ case ZPOOL_PROP_AVAILABLE:
+ (void) zfs_nicenum(intval, buf, len);
+ break;
+
+ case ZPOOL_PROP_CAPACITY:
+ (void) snprintf(buf, len, "%llu%%",
+ (u_longlong_t)intval);
+ break;
+
+ case ZPOOL_PROP_HEALTH:
+ verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
+ ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
+ verify(nvlist_lookup_uint64_array(nvroot,
+ ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
+
+ (void) strlcpy(buf, zpool_state_to_name(intval,
+ vs->vs_aux), len);
+ break;
+ default:
+ (void) snprintf(buf, len, "%llu", intval);
+ }
+ break;
+
+ case PROP_TYPE_INDEX:
+ intval = zpool_get_prop_int(zhp, prop, &src);
+ if (zpool_prop_index_to_string(prop, intval, &strval)
+ != 0)
+ return (-1);
+ (void) strlcpy(buf, strval, len);
+ break;
+
+ default:
+ abort();
+ }
+
+ if (srctype)
+ *srctype = src;
+
+ return (0);
+}
+
+/*
+ * Check if the bootfs name has the same pool name as it is set to.
+ * Assuming bootfs is a valid dataset name.
+ */
+static boolean_t
+bootfs_name_valid(const char *pool, char *bootfs)
+{
+ int len = strlen(pool);
+
+ if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
+ return (B_FALSE);
+
+ if (strncmp(pool, bootfs, len) == 0 &&
+ (bootfs[len] == '/' || bootfs[len] == '\0'))
+ return (B_TRUE);
+
+ return (B_FALSE);
+}
+
+#if defined(sun)
+/*
+ * Inspect the configuration to determine if any of the devices contain
+ * an EFI label.
+ */
+static boolean_t
+pool_uses_efi(nvlist_t *config)
+{
+ nvlist_t **child;
+ uint_t c, children;
+
+ if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
+ &child, &children) != 0)
+ return (read_efi_label(config, NULL) >= 0);
+
+ for (c = 0; c < children; c++) {
+ if (pool_uses_efi(child[c]))
+ return (B_TRUE);
+ }
+ return (B_FALSE);
+}
+#endif
+
+/*
+ * Given an nvlist of zpool properties to be set, validate that they are
+ * correct, and parse any numeric properties (index, boolean, etc) if they are
+ * specified as strings.
+ */
+static nvlist_t *
+zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
+ nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
+{
+ nvpair_t *elem;
+ nvlist_t *retprops;
+ zpool_prop_t prop;
+ char *strval;
+ uint64_t intval;
+ char *slash;
+ struct stat64 statbuf;
+ zpool_handle_t *zhp;
+ nvlist_t *nvroot;
+
+ if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
+ (void) no_memory(hdl);
+ return (NULL);
+ }
+
+ elem = NULL;
+ while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
+ const char *propname = nvpair_name(elem);
+
+ /*
+ * Make sure this property is valid and applies to this type.
+ */
+ if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid property '%s'"), propname);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+
+ if (zpool_prop_readonly(prop)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
+ "is readonly"), propname);
+ (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
+ goto error;
+ }
+
+ if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
+ &strval, &intval, errbuf) != 0)
+ goto error;
+
+ /*
+ * Perform additional checking for specific properties.
+ */
+ switch (prop) {
+ case ZPOOL_PROP_VERSION:
+ if (intval < version || intval > SPA_VERSION) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "property '%s' number %d is invalid."),
+ propname, intval);
+ (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
+ goto error;
+ }
+ break;
+
+ case ZPOOL_PROP_BOOTFS:
+ if (create_or_import) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "property '%s' cannot be set at creation "
+ "or import time"), propname);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+
+ if (version < SPA_VERSION_BOOTFS) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "pool must be upgraded to support "
+ "'%s' property"), propname);
+ (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
+ goto error;
+ }
+
+ /*
+ * bootfs property value has to be a dataset name and
+ * the dataset has to be in the same pool as it sets to.
+ */
+ if (strval[0] != '\0' && !bootfs_name_valid(poolname,
+ strval)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
+ "is an invalid name"), strval);
+ (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
+ goto error;
+ }
+
+ if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "could not open pool '%s'"), poolname);
+ (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
+ goto error;
+ }
+ verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
+ ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
+
+#if defined(sun)
+ /*
+ * bootfs property cannot be set on a disk which has
+ * been EFI labeled.
+ */
+ if (pool_uses_efi(nvroot)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "property '%s' not supported on "
+ "EFI labeled devices"), propname);
+ (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
+ zpool_close(zhp);
+ goto error;
+ }
+#endif
+ zpool_close(zhp);
+ break;
+
+ case ZPOOL_PROP_ALTROOT:
+ if (!create_or_import) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "property '%s' can only be set during pool "
+ "creation or import"), propname);
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ goto error;
+ }
+
+ if (strval[0] != '/') {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "bad alternate root '%s'"), strval);
+ (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
+ goto error;
+ }
+ break;
+
+ case ZPOOL_PROP_CACHEFILE:
+ if (strval[0] == '\0')
+ break;
+
+ if (strcmp(strval, "none") == 0)
+ break;
+
+ if (strval[0] != '/') {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "property '%s' must be empty, an "
+ "absolute path, or 'none'"), propname);
+ (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
+ goto error;
+ }
+
+ slash = strrchr(strval, '/');
+
+ if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
+ strcmp(slash, "/..") == 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' is not a valid file"), strval);
+ (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
+ goto error;
+ }
+
+ *slash = '\0';
+
+ if (strval[0] != '\0' &&
+ (stat64(strval, &statbuf) != 0 ||
+ !S_ISDIR(statbuf.st_mode))) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' is not a valid directory"),
+ strval);
+ (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
+ goto error;
+ }
+
+ *slash = '/';
+ break;
+ }
+ }
+
+ return (retprops);
+error:
+ nvlist_free(retprops);
+ return (NULL);
+}
+
+/*
+ * Set zpool property : propname=propval.
+ */
+int
+zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
+{
+ zfs_cmd_t zc = { 0 };
+ int ret = -1;
+ char errbuf[1024];
+ nvlist_t *nvl = NULL;
+ nvlist_t *realprops;
+ uint64_t version;
+
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
+ zhp->zpool_name);
+
+ if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
+ return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
+
+ if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
+ return (no_memory(zhp->zpool_hdl));
+
+ if (nvlist_add_string(nvl, propname, propval) != 0) {
+ nvlist_free(nvl);
+ return (no_memory(zhp->zpool_hdl));
+ }
+
+ version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
+ if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
+ zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
+ nvlist_free(nvl);
+ return (-1);
+ }
+
+ nvlist_free(nvl);
+ nvl = realprops;
+
+ /*
+ * Execute the corresponding ioctl() to set this property.
+ */
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+
+ if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
+ nvlist_free(nvl);
+ return (-1);
+ }
+
+ ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
+
+ zcmd_free_nvlists(&zc);
+ nvlist_free(nvl);
+
+ if (ret)
+ (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
+ else
+ (void) zpool_props_refresh(zhp);
+
+ return (ret);
+}
+
+int
+zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
+{
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+ zprop_list_t *entry;
+ char buf[ZFS_MAXPROPLEN];
+
+ if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
+ return (-1);
+
+ for (entry = *plp; entry != NULL; entry = entry->pl_next) {
+
+ if (entry->pl_fixed)
+ continue;
+
+ if (entry->pl_prop != ZPROP_INVAL &&
+ zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
+ NULL) == 0) {
+ if (strlen(buf) > entry->pl_width)
+ entry->pl_width = strlen(buf);
+ }
+ }
+
+ return (0);
+}
+
+
/*
* Validate the given pool name, optionally putting an extended error message in
* 'buf'.
*/
-static boolean_t
+boolean_t
zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
{
namecheck_err_t why;
@@ -70,9 +612,11 @@ zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
if (ret == 0 && !isopen &&
(strncmp(pool, "mirror", 6) == 0 ||
strncmp(pool, "raidz", 5) == 0 ||
- strncmp(pool, "spare", 5) == 0)) {
- zfs_error_aux(hdl,
- dgettext(TEXT_DOMAIN, "name is reserved"));
+ strncmp(pool, "spare", 5) == 0 ||
+ strcmp(pool, "log") == 0)) {
+ if (hdl != NULL)
+ zfs_error_aux(hdl,
+ dgettext(TEXT_DOMAIN, "name is reserved"));
return (B_FALSE);
}
@@ -134,39 +678,6 @@ zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
return (B_TRUE);
}
-static int
-zpool_get_all_props(zpool_handle_t *zhp)
-{
- zfs_cmd_t zc = { 0 };
- libzfs_handle_t *hdl = zhp->zpool_hdl;
-
- (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
-
- if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
- return (-1);
-
- while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
- if (errno == ENOMEM) {
- if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
- zcmd_free_nvlists(&zc);
- return (-1);
- }
- } else {
- zcmd_free_nvlists(&zc);
- return (-1);
- }
- }
-
- if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
- zcmd_free_nvlists(&zc);
- return (-1);
- }
-
- zcmd_free_nvlists(&zc);
-
- return (0);
-}
-
/*
* Open a handle to the given pool, even if the pool is currently in the FAULTED
* state.
@@ -199,11 +710,9 @@ zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
}
if (missing) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "no such pool"));
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
(void) zfs_error_fmt(hdl, EZFS_NOENT,
- dgettext(TEXT_DOMAIN, "cannot open '%s'"),
- pool);
+ dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
zpool_close(zhp);
return (NULL);
}
@@ -288,86 +797,6 @@ zpool_get_name(zpool_handle_t *zhp)
return (zhp->zpool_name);
}
-/*
- * Return the GUID of the pool.
- */
-uint64_t
-zpool_get_guid(zpool_handle_t *zhp)
-{
- uint64_t guid;
-
- verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
- &guid) == 0);
- return (guid);
-}
-
-/*
- * Return the version of the pool.
- */
-uint64_t
-zpool_get_version(zpool_handle_t *zhp)
-{
- uint64_t version;
-
- verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION,
- &version) == 0);
-
- return (version);
-}
-
-/*
- * Return the amount of space currently consumed by the pool.
- */
-uint64_t
-zpool_get_space_used(zpool_handle_t *zhp)
-{
- nvlist_t *nvroot;
- vdev_stat_t *vs;
- uint_t vsc;
-
- verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
- &nvroot) == 0);
- verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
- (uint64_t **)&vs, &vsc) == 0);
-
- return (vs->vs_alloc);
-}
-
-/*
- * Return the total space in the pool.
- */
-uint64_t
-zpool_get_space_total(zpool_handle_t *zhp)
-{
- nvlist_t *nvroot;
- vdev_stat_t *vs;
- uint_t vsc;
-
- verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
- &nvroot) == 0);
- verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
- (uint64_t **)&vs, &vsc) == 0);
-
- return (vs->vs_space);
-}
-
-/*
- * Return the alternate root for this pool, if any.
- */
-int
-zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
-{
- zfs_cmd_t zc = { 0 };
-
- (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
- if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
- zc.zc_value[0] == '\0')
- return (-1);
-
- (void) strlcpy(buf, zc.zc_value, buflen);
-
- return (0);
-}
/*
* Return the state of the pool (ACTIVE or UNAVAILABLE)
@@ -385,10 +814,14 @@ zpool_get_state(zpool_handle_t *zhp)
*/
int
zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
- const char *altroot)
+ nvlist_t *props, nvlist_t *fsprops)
{
zfs_cmd_t zc = { 0 };
+ nvlist_t *zc_fsprops = NULL;
+ nvlist_t *zc_props = NULL;
char msg[1024];
+ char *altroot;
+ int ret = -1;
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), pool);
@@ -396,20 +829,48 @@ zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
if (!zpool_name_valid(hdl, B_FALSE, pool))
return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
- if (altroot != NULL && altroot[0] != '/')
- return (zfs_error_fmt(hdl, EZFS_BADPATH,
- dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));
-
- if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
+ if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
return (-1);
+ if (props) {
+ if ((zc_props = zpool_valid_proplist(hdl, pool, props,
+ SPA_VERSION_1, B_TRUE, msg)) == NULL) {
+ goto create_failed;
+ }
+ }
+
+ if (fsprops) {
+ uint64_t zoned;
+ char *zonestr;
+
+ zoned = ((nvlist_lookup_string(fsprops,
+ zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
+ strcmp(zonestr, "on") == 0);
+
+ if ((zc_fsprops = zfs_valid_proplist(hdl,
+ ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
+ goto create_failed;
+ }
+ if (!zc_props &&
+ (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
+ goto create_failed;
+ }
+ if (nvlist_add_nvlist(zc_props,
+ ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
+ goto create_failed;
+ }
+ }
+
+ if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
+ goto create_failed;
+
(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
- if (altroot != NULL)
- (void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
+ if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
- if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) {
zcmd_free_nvlists(&zc);
+ nvlist_free(zc_props);
+ nvlist_free(zc_fsprops);
switch (errno) {
case EBUSY:
@@ -446,28 +907,36 @@ zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
"one or more devices is out of space"));
return (zfs_error(hdl, EZFS_BADDEV, msg));
+ case ENOTBLK:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "cache device must be a disk or disk slice"));
+ return (zfs_error(hdl, EZFS_BADDEV, msg));
+
default:
return (zpool_standard_error(hdl, errno, msg));
}
}
- zcmd_free_nvlists(&zc);
-
/*
* If this is an alternate root pool, then we automatically set the
* mountpoint of the root dataset to be '/'.
*/
- if (altroot != NULL) {
+ if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
+ &altroot) == 0) {
zfs_handle_t *zhp;
- verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
+ verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
"/") == 0);
zfs_close(zhp);
}
- return (0);
+create_failed:
+ zcmd_free_nvlists(&zc);
+ nvlist_free(zc_props);
+ nvlist_free(zc_fsprops);
+ return (ret);
}
/*
@@ -492,7 +961,7 @@ zpool_destroy(zpool_handle_t *zhp)
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
- if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
+ if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot destroy '%s'"), zhp->zpool_name);
@@ -528,13 +997,14 @@ zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
int ret;
libzfs_handle_t *hdl = zhp->zpool_hdl;
char msg[1024];
- nvlist_t **spares;
- uint_t nspares;
+ nvlist_t **spares, **l2cache;
+ uint_t nspares, nl2cache;
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot add to '%s'"), zhp->zpool_name);
- if (zpool_get_version(zhp) < ZFS_VERSION_SPARES &&
+ if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
+ SPA_VERSION_SPARES &&
nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
@@ -542,11 +1012,20 @@ zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
return (zfs_error(hdl, EZFS_BADVERSION, msg));
}
- if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
+ if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
+ SPA_VERSION_L2CACHE &&
+ nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
+ &l2cache, &nl2cache) == 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
+ "upgraded to add cache devices"));
+ return (zfs_error(hdl, EZFS_BADVERSION, msg));
+ }
+
+ if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
return (-1);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
- if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) {
+ if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
switch (errno) {
case EBUSY:
/*
@@ -581,16 +1060,23 @@ zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "pool must be upgraded to add raidz2 vdevs"));
+ "pool must be upgraded to add these vdevs"));
(void) zfs_error(hdl, EZFS_BADVERSION, msg);
break;
case EDOM:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "root pool can not have concatenated devices"));
+ "root pool can not have multiple vdevs"
+ " or separate logs"));
(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
break;
+ case ENOTBLK:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "cache device must be a disk or disk slice"));
+ (void) zfs_error(hdl, EZFS_BADDEV, msg);
+ break;
+
default:
(void) zpool_standard_error(hdl, errno, msg);
}
@@ -610,40 +1096,97 @@ zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
* mounted datasets in the pool.
*/
int
-zpool_export(zpool_handle_t *zhp)
+zpool_export(zpool_handle_t *zhp, boolean_t force)
{
zfs_cmd_t zc = { 0 };
+ char msg[1024];
if (zpool_remove_zvol_links(zhp) != 0)
return (-1);
+ (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+ "cannot export '%s'"), zhp->zpool_name);
+
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ zc.zc_cookie = force;
+
+ if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
+ switch (errno) {
+ case EXDEV:
+ zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
+ "use '-f' to override the following errors:\n"
+ "'%s' has an active shared spare which could be"
+ " used by other pools once '%s' is exported."),
+ zhp->zpool_name, zhp->zpool_name);
+ return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
+ msg));
+ default:
+ return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
+ msg));
+ }
+ }
- if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0)
- return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
- dgettext(TEXT_DOMAIN, "cannot export '%s'"),
- zhp->zpool_name));
return (0);
}
/*
- * Import the given pool using the known configuration. The configuration
- * should have come from zpool_find_import(). The 'newname' and 'altroot'
- * parameters control whether the pool is imported with a different name or with
- * an alternate root, respectively.
+ * zpool_import() is a contracted interface. Should be kept the same
+ * if possible.
+ *
+ * Applications should use zpool_import_props() to import a pool with
+ * new properties value to be set.
*/
int
zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
- const char *altroot)
+ char *altroot)
+{
+ nvlist_t *props = NULL;
+ int ret;
+
+ if (altroot != NULL) {
+ if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
+ return (zfs_error_fmt(hdl, EZFS_NOMEM,
+ dgettext(TEXT_DOMAIN, "cannot import '%s'"),
+ newname));
+ }
+
+ if (nvlist_add_string(props,
+ zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0) {
+ nvlist_free(props);
+ return (zfs_error_fmt(hdl, EZFS_NOMEM,
+ dgettext(TEXT_DOMAIN, "cannot import '%s'"),
+ newname));
+ }
+ }
+
+ ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
+ if (props)
+ nvlist_free(props);
+ return (ret);
+}
+
+/*
+ * Import the given pool using the known configuration and a list of
+ * properties to be set. The configuration should have come from
+ * zpool_find_import(). The 'newname' parameters control whether the pool
+ * is imported with a different name.
+ */
+int
+zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
+ nvlist_t *props, boolean_t importfaulted)
{
zfs_cmd_t zc = { 0 };
char *thename;
char *origname;
int ret;
+ char errbuf[1024];
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&origname) == 0);
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot import pool '%s'"), origname);
+
if (newname != NULL) {
if (!zpool_name_valid(hdl, B_FALSE, newname))
return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
@@ -654,26 +1197,34 @@ zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
thename = origname;
}
- if (altroot != NULL && altroot[0] != '/')
- return (zfs_error_fmt(hdl, EZFS_BADPATH,
- dgettext(TEXT_DOMAIN, "bad alternate root '%s'"),
- altroot));
+ if (props) {
+ uint64_t version;
- (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
+ verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
+ &version) == 0);
- if (altroot != NULL)
- (void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
- else
- zc.zc_value[0] = '\0';
+ if ((props = zpool_valid_proplist(hdl, origname,
+ props, version, B_TRUE, errbuf)) == NULL) {
+ return (-1);
+ } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
+ nvlist_free(props);
+ return (-1);
+ }
+ }
+
+ (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&zc.zc_guid) == 0);
- if (zcmd_write_src_nvlist(hdl, &zc, config, NULL) != 0)
+ if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
+ nvlist_free(props);
return (-1);
+ }
+ zc.zc_cookie = (uint64_t)importfaulted;
ret = 0;
- if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
+ if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
char desc[1024];
if (newname == NULL)
(void) snprintf(desc, sizeof (desc),
@@ -703,6 +1254,7 @@ zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
ret = -1;
} else {
zpool_handle_t *zhp;
+
/*
* This should never fail, but play it safe anyway.
*/
@@ -712,9 +1264,12 @@ zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
ret = zpool_create_zvol_links(zhp);
zpool_close(zhp);
}
+
}
zcmd_free_nvlists(&zc);
+ nvlist_free(props);
+
return (ret);
}
@@ -731,7 +1286,7 @@ zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = type;
- if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0)
+ if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
return (0);
(void) snprintf(msg, sizeof (msg),
@@ -749,7 +1304,7 @@ zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
*/
static nvlist_t *
vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
- boolean_t *avail_spare)
+ boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
{
uint_t c, children;
nvlist_t **child;
@@ -757,6 +1312,7 @@ vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
char *path;
uint64_t wholedisk = 0;
nvlist_t *ret;
+ uint64_t is_log;
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
@@ -789,27 +1345,53 @@ vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
&child, &children) != 0)
return (NULL);
- for (c = 0; c < children; c++)
+ for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
- avail_spare)) != NULL)
+ avail_spare, l2cache, NULL)) != NULL) {
+ /*
+ * The 'is_log' value is only set for the toplevel
+ * vdev, not the leaf vdevs. So we always lookup the
+ * log device from the root of the vdev tree (where
+ * 'log' is non-NULL).
+ */
+ if (log != NULL &&
+ nvlist_lookup_uint64(child[c],
+ ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
+ is_log) {
+ *log = B_TRUE;
+ }
return (ret);
+ }
+ }
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
- avail_spare)) != NULL) {
+ avail_spare, l2cache, NULL)) != NULL) {
*avail_spare = B_TRUE;
return (ret);
}
}
}
+ if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
+ &child, &children) == 0) {
+ for (c = 0; c < children; c++) {
+ if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
+ avail_spare, l2cache, NULL)) != NULL) {
+ *l2cache = B_TRUE;
+ return (ret);
+ }
+ }
+ }
+
return (NULL);
}
nvlist_t *
-zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare)
+zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
+ boolean_t *l2cache, boolean_t *log)
{
char buf[MAXPATHLEN];
const char *search;
@@ -831,29 +1413,124 @@ zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare)
&nvroot) == 0);
*avail_spare = B_FALSE;
- return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare));
+ *l2cache = B_FALSE;
+ if (log != NULL)
+ *log = B_FALSE;
+ return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare,
+ l2cache, log));
+}
+
+static int
+vdev_online(nvlist_t *nv)
+{
+ uint64_t ival;
+
+ if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
+ nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
+ nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
+ return (0);
+
+ return (1);
+}
+
+/*
+ * Get phys_path for a root pool
+ * Return 0 on success; non-zeron on failure.
+ */
+int
+zpool_get_physpath(zpool_handle_t *zhp, char *physpath)
+{
+ char bootfs[ZPOOL_MAXNAMELEN];
+ nvlist_t *vdev_root;
+ nvlist_t **child;
+ uint_t count;
+ int i;
+
+ /*
+ * Make sure this is a root pool, as phys_path doesn't mean
+ * anything to a non-root pool.
+ */
+ if (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
+ sizeof (bootfs), NULL) != 0)
+ return (-1);
+
+ verify(nvlist_lookup_nvlist(zhp->zpool_config,
+ ZPOOL_CONFIG_VDEV_TREE, &vdev_root) == 0);
+
+ if (nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
+ &child, &count) != 0)
+ return (-2);
+
+ for (i = 0; i < count; i++) {
+ nvlist_t **child2;
+ uint_t count2;
+ char *type;
+ char *tmppath;
+ int j;
+
+ if (nvlist_lookup_string(child[i], ZPOOL_CONFIG_TYPE, &type)
+ != 0)
+ return (-3);
+
+ if (strcmp(type, VDEV_TYPE_DISK) == 0) {
+ if (!vdev_online(child[i]))
+ return (-8);
+ verify(nvlist_lookup_string(child[i],
+ ZPOOL_CONFIG_PHYS_PATH, &tmppath) == 0);
+ (void) strncpy(physpath, tmppath, strlen(tmppath));
+ } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0) {
+ if (nvlist_lookup_nvlist_array(child[i],
+ ZPOOL_CONFIG_CHILDREN, &child2, &count2) != 0)
+ return (-4);
+
+ for (j = 0; j < count2; j++) {
+ if (!vdev_online(child2[j]))
+ return (-8);
+ if (nvlist_lookup_string(child2[j],
+ ZPOOL_CONFIG_PHYS_PATH, &tmppath) != 0)
+ return (-5);
+
+ if ((strlen(physpath) + strlen(tmppath)) >
+ MAXNAMELEN)
+ return (-6);
+
+ if (strlen(physpath) == 0) {
+ (void) strncpy(physpath, tmppath,
+ strlen(tmppath));
+ } else {
+ (void) strcat(physpath, " ");
+ (void) strcat(physpath, tmppath);
+ }
+ }
+ } else {
+ return (-7);
+ }
+ }
+
+ return (0);
}
/*
- * Returns TRUE if the given guid corresponds to a spare (INUSE or not).
+ * Returns TRUE if the given guid corresponds to the given type.
+ * This is used to check for hot spares (INUSE or not), and level 2 cache
+ * devices.
*/
static boolean_t
-is_spare(zpool_handle_t *zhp, uint64_t guid)
+is_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type)
{
- uint64_t spare_guid;
+ uint64_t target_guid;
nvlist_t *nvroot;
- nvlist_t **spares;
- uint_t nspares;
+ nvlist_t **list;
+ uint_t count;
int i;
verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
- if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
- &spares, &nspares) == 0) {
- for (i = 0; i < nspares; i++) {
- verify(nvlist_lookup_uint64(spares[i],
- ZPOOL_CONFIG_GUID, &spare_guid) == 0);
- if (guid == spare_guid)
+ if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) {
+ for (i = 0; i < count; i++) {
+ verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID,
+ &target_guid) == 0);
+ if (guid == target_guid)
return (B_TRUE);
}
}
@@ -862,62 +1539,106 @@ is_spare(zpool_handle_t *zhp, uint64_t guid)
}
/*
- * Bring the specified vdev online
+ * Bring the specified vdev online. The 'flags' parameter is a set of the
+ * ZFS_ONLINE_* flags.
*/
int
-zpool_vdev_online(zpool_handle_t *zhp, const char *path)
+zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
+ vdev_state_t *newstate)
{
zfs_cmd_t zc = { 0 };
char msg[1024];
nvlist_t *tgt;
- boolean_t avail_spare;
+ boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot online %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
- if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
+ if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
+ NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
- if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
+ if (avail_spare ||
+ is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
- if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ONLINE, &zc) == 0)
- return (0);
+ zc.zc_cookie = VDEV_STATE_ONLINE;
+ zc.zc_obj = flags;
- return (zpool_standard_error(hdl, errno, msg));
+ if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
+ return (zpool_standard_error(hdl, errno, msg));
+
+ *newstate = zc.zc_cookie;
+ return (0);
}
/*
* Take the specified vdev offline
*/
int
-zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp)
+zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
{
zfs_cmd_t zc = { 0 };
char msg[1024];
nvlist_t *tgt;
- boolean_t avail_spare;
+ boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
- if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
+ if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
+ NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
- if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
+ if (avail_spare ||
+ is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
- zc.zc_cookie = istmp;
+ zc.zc_cookie = VDEV_STATE_OFFLINE;
+ zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
+
+ if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
+ return (0);
+
+ switch (errno) {
+ case EBUSY:
+
+ /*
+ * There are no other replicas of this device.
+ */
+ return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
+
+ default:
+ return (zpool_standard_error(hdl, errno, msg));
+ }
+}
+
+/*
+ * Mark the given vdev faulted.
+ */
+int
+zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
+{
+ zfs_cmd_t zc = { 0 };
+ char msg[1024];
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ (void) snprintf(msg, sizeof (msg),
+ dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ zc.zc_guid = guid;
+ zc.zc_cookie = VDEV_STATE_FAULTED;
- if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_OFFLINE, &zc) == 0)
+ if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
switch (errno) {
@@ -931,6 +1652,30 @@ zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp)
default:
return (zpool_standard_error(hdl, errno, msg));
}
+
+}
+
+/*
+ * Mark the given vdev degraded.
+ */
+int
+zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
+{
+ zfs_cmd_t zc = { 0 };
+ char msg[1024];
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ (void) snprintf(msg, sizeof (msg),
+ dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ zc.zc_guid = guid;
+ zc.zc_cookie = VDEV_STATE_DEGRADED;
+
+ if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
+ return (0);
+
+ return (zpool_standard_error(hdl, errno, msg));
}
/*
@@ -963,7 +1708,7 @@ is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
/*
* Attach new_disk (fully described by nvroot) to old_disk.
- * If 'replacing' is specified, tne new disk will replace the old one.
+ * If 'replacing' is specified, the new disk will replace the old one.
*/
int
zpool_vdev_attach(zpool_handle_t *zhp,
@@ -973,9 +1718,9 @@ zpool_vdev_attach(zpool_handle_t *zhp,
char msg[1024];
int ret;
nvlist_t *tgt;
- boolean_t avail_spare;
+ boolean_t avail_spare, l2cache, islog;
uint64_t val;
- char *path;
+ char *path, *newname;
nvlist_t **child;
uint_t children;
nvlist_t *config_root;
@@ -989,12 +1734,16 @@ zpool_vdev_attach(zpool_handle_t *zhp,
"cannot attach %s to %s"), new_disk, old_disk);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
- if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0)
+ if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
+ &islog)) == 0)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
+ if (l2cache)
+ return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
+
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
zc.zc_cookie = replacing;
@@ -1008,17 +1757,21 @@ zpool_vdev_attach(zpool_handle_t *zhp,
verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
+ if ((newname = zpool_vdev_name(NULL, NULL, child[0])) == NULL)
+ return (-1);
+
/*
* If the target is a hot spare that has been swapped in, we can only
* replace it with another hot spare.
*/
if (replacing &&
nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
- nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
- (zpool_find_vdev(zhp, path, &avail_spare) == NULL ||
- !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
+ (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
+ NULL) == NULL || !avail_spare) &&
+ is_replacing_spare(config_root, tgt, 1)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"can only be replaced by another hot spare"));
+ free(newname);
return (zfs_error(hdl, EZFS_BADTARGET, msg));
}
@@ -1028,17 +1781,21 @@ zpool_vdev_attach(zpool_handle_t *zhp,
*/
if (replacing &&
nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
- zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare &&
+ zpool_find_vdev(zhp, newname, &avail_spare,
+ &l2cache, NULL) != NULL && avail_spare &&
is_replacing_spare(config_root, tgt, 0)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device has already been replaced with a spare"));
+ free(newname);
return (zfs_error(hdl, EZFS_BADTARGET, msg));
}
- if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
+ free(newname);
+
+ if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
return (-1);
- ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ATTACH, &zc);
+ ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
zcmd_free_nvlists(&zc);
@@ -1050,13 +1807,18 @@ zpool_vdev_attach(zpool_handle_t *zhp,
/*
* Can't attach to or replace this type of vdev.
*/
- if (replacing)
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "cannot replace a replacing device"));
- else
+ if (replacing) {
+ if (islog)
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "cannot replace a log with a spare"));
+ else
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "cannot replace a replacing device"));
+ } else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"can only attach to mirrors and top-level "
"disks"));
+ }
(void) zfs_error(hdl, EZFS_BADTARGET, msg);
break;
@@ -1116,22 +1878,26 @@ zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
zfs_cmd_t zc = { 0 };
char msg[1024];
nvlist_t *tgt;
- boolean_t avail_spare;
+ boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
- if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
+ if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
+ NULL)) == 0)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
+ if (l2cache)
+ return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
+
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
- if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0)
+ if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
return (0);
switch (errno) {
@@ -1160,7 +1926,8 @@ zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
}
/*
- * Remove the given device. Currently, this is supported only for hot spares.
+ * Remove the given device. Currently, this is supported only for hot spares
+ * and level 2 cache devices.
*/
int
zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
@@ -1168,25 +1935,27 @@ zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
zfs_cmd_t zc = { 0 };
char msg[1024];
nvlist_t *tgt;
- boolean_t avail_spare;
+ boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
- if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
+ if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
+ NULL)) == 0)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
- if (!avail_spare) {
+ if (!avail_spare && !l2cache) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "only inactive hot spares can be removed"));
+ "only inactive hot spares or cache devices "
+ "can be removed"));
return (zfs_error(hdl, EZFS_NODEVICE, msg));
}
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
- if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
+ if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, msg));
@@ -1201,7 +1970,7 @@ zpool_clear(zpool_handle_t *zhp, const char *path)
zfs_cmd_t zc = { 0 };
char msg[1024];
nvlist_t *tgt;
- boolean_t avail_spare;
+ boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
if (path)
@@ -1215,9 +1984,14 @@ zpool_clear(zpool_handle_t *zhp, const char *path)
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (path) {
- if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
+ if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
+ &l2cache, NULL)) == 0)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
+ /*
+ * Don't allow error clearing for hot spares. Do allow
+ * error clearing for l2cache devices.
+ */
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
@@ -1225,6 +1999,29 @@ zpool_clear(zpool_handle_t *zhp, const char *path)
&zc.zc_guid) == 0);
}
+ if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
+ return (0);
+
+ return (zpool_standard_error(hdl, errno, msg));
+}
+
+/*
+ * Similar to zpool_clear(), but takes a GUID (used by fmd).
+ */
+int
+zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
+{
+ zfs_cmd_t zc = { 0 };
+ char msg[1024];
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ (void) snprintf(msg, sizeof (msg),
+ dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
+ guid);
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ zc.zc_guid = guid;
+
if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
return (0);
@@ -1342,12 +2139,15 @@ typedef struct zvol_cb {
static int
do_zvol_create(zfs_handle_t *zhp, void *data)
{
- int ret;
+ int ret = 0;
- if (ZFS_IS_VOLUME(zhp))
+ if (ZFS_IS_VOLUME(zhp)) {
(void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
+ ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL);
+ }
- ret = zfs_iter_children(zhp, do_zvol_create, NULL);
+ if (ret == 0)
+ ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL);
zfs_close(zhp);
@@ -1370,7 +2170,7 @@ zpool_create_zvol_links(zpool_handle_t *zhp)
zhp->zpool_name)) == NULL)
return (0);
- ret = zfs_iter_children(zfp, do_zvol_create, NULL);
+ ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL);
zfs_close(zfp);
return (ret);
@@ -1492,6 +2292,8 @@ zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
char *path, *devid;
uint64_t value;
char buf[64];
+ vdev_stat_t *vs;
+ uint_t vsc;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&value) == 0) {
@@ -1502,7 +2304,16 @@ zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
path = buf;
} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
- if (zhp != NULL &&
+ /*
+ * If the device is dead (faulted, offline, etc) then don't
+ * bother opening it. Otherwise we may be forcing the user to
+ * open a misbehaving device, which can have undesirable
+ * effects.
+ */
+ if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
+ (uint64_t **)&vs, &vsc) != 0 ||
+ vs->vs_state >= VDEV_STATE_DEGRADED) &&
+ zhp != NULL &&
nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
/*
* Determine if the current path is correct.
@@ -1585,6 +2396,8 @@ zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
*/
verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
&count) == 0);
+ if (count == 0)
+ return (0);
if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
return (-1);
@@ -1665,62 +2478,56 @@ nomem:
* Upgrade a ZFS pool to the latest on-disk version.
*/
int
-zpool_upgrade(zpool_handle_t *zhp)
+zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
{
zfs_cmd_t zc = { 0 };
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strcpy(zc.zc_name, zhp->zpool_name);
- if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
+ zc.zc_cookie = new_version;
+
+ if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
zhp->zpool_name));
-
return (0);
}
-/*
- * Log command history.
- *
- * 'pool' is B_TRUE if we are logging a command for 'zpool'; B_FALSE
- * otherwise ('zfs'). 'pool_create' is B_TRUE if we are logging the creation
- * of the pool; B_FALSE otherwise. 'path' is the pathanme containing the
- * poolname. 'argc' and 'argv' are used to construct the command string.
- */
void
-zpool_log_history(libzfs_handle_t *hdl, int argc, char **argv, const char *path,
- boolean_t pool, boolean_t pool_create)
+zpool_set_history_str(const char *subcommand, int argc, char **argv,
+ char *history_str)
{
- char cmd_buf[HIS_MAX_RECORD_LEN];
- char *dspath;
- zfs_cmd_t zc = { 0 };
int i;
- /* construct the command string */
- (void) strcpy(cmd_buf, pool ? "zpool" : "zfs");
- for (i = 0; i < argc; i++) {
- if (strlen(cmd_buf) + 1 + strlen(argv[i]) > HIS_MAX_RECORD_LEN)
+ (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
+ for (i = 1; i < argc; i++) {
+ if (strlen(history_str) + 1 + strlen(argv[i]) >
+ HIS_MAX_RECORD_LEN)
break;
- (void) strcat(cmd_buf, " ");
- (void) strcat(cmd_buf, argv[i]);
+ (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
+ (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
}
+}
- /* figure out the poolname */
- dspath = strpbrk(path, "/@");
- if (dspath == NULL) {
- (void) strcpy(zc.zc_name, path);
- } else {
- (void) strncpy(zc.zc_name, path, dspath - path);
- zc.zc_name[dspath-path] = '\0';
- }
+/*
+ * Stage command history for logging.
+ */
+int
+zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
+{
+ if (history_str == NULL)
+ return (EINVAL);
+
+ if (strlen(history_str) > HIS_MAX_RECORD_LEN)
+ return (EINVAL);
- zc.zc_history = (uint64_t)(uintptr_t)cmd_buf;
- zc.zc_history_len = strlen(cmd_buf);
+ if (hdl->libzfs_log_str != NULL)
+ free(hdl->libzfs_log_str);
- /* overloading zc_history_offset */
- zc.zc_history_offset = pool_create;
+ if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
+ return (no_memory(hdl));
- (void) ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_LOG_HISTORY, &zc);
+ return (0);
}
/*
@@ -1906,150 +2713,296 @@ zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
free(mntpnt);
}
-int
-zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
+#define RDISK_ROOT "/dev/rdsk"
+#define BACKUP_SLICE "s2"
+/*
+ * Don't start the slice at the default block of 34; many storage
+ * devices will use a stripe width of 128k, so start there instead.
+ */
+#define NEW_START_BLOCK 256
+
+#if defined(sun)
+/*
+ * Read the EFI label from the config, if a label does not exist then
+ * pass back the error to the caller. If the caller has passed a non-NULL
+ * diskaddr argument then we set it to the starting address of the EFI
+ * partition.
+ */
+static int
+read_efi_label(nvlist_t *config, diskaddr_t *sb)
{
- zfs_cmd_t zc = { 0 };
- int ret = -1;
- char errbuf[1024];
- nvlist_t *nvl = NULL;
- nvlist_t *realprops;
+ char *path;
+ int fd;
+ char diskname[MAXPATHLEN];
+ int err = -1;
- (void) snprintf(errbuf, sizeof (errbuf),
- dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
- zhp->zpool_name);
+ if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
+ return (err);
- if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
- zfs_error_aux(zhp->zpool_hdl,
- dgettext(TEXT_DOMAIN, "pool must be "
- "upgraded to support pool properties"));
- return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, errbuf));
- }
+ (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
+ strrchr(path, '/'));
+ if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
+ struct dk_gpt *vtoc;
- if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
- return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
+ if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
+ if (sb != NULL)
+ *sb = vtoc->efi_parts[0].p_start;
+ efi_free(vtoc);
+ }
+ (void) close(fd);
+ }
+ return (err);
+}
- if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
- nvlist_add_string(nvl, propname, propval) != 0) {
- return (no_memory(zhp->zpool_hdl));
+/*
+ * determine where a partition starts on a disk in the current
+ * configuration
+ */
+static diskaddr_t
+find_start_block(nvlist_t *config)
+{
+ nvlist_t **child;
+ uint_t c, children;
+ diskaddr_t sb = MAXOFFSET_T;
+ uint64_t wholedisk;
+
+ if (nvlist_lookup_nvlist_array(config,
+ ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
+ if (nvlist_lookup_uint64(config,
+ ZPOOL_CONFIG_WHOLE_DISK,
+ &wholedisk) != 0 || !wholedisk) {
+ return (MAXOFFSET_T);
+ }
+ if (read_efi_label(config, &sb) < 0)
+ sb = MAXOFFSET_T;
+ return (sb);
}
- if ((realprops = zfs_validate_properties(zhp->zpool_hdl, ZFS_TYPE_POOL,
- zhp->zpool_name, nvl, 0, NULL, errbuf)) == NULL) {
- nvlist_free(nvl);
- return (-1);
+ for (c = 0; c < children; c++) {
+ sb = find_start_block(child[c]);
+ if (sb != MAXOFFSET_T) {
+ return (sb);
+ }
}
+ return (MAXOFFSET_T);
+}
+#endif /* sun */
- nvlist_free(nvl);
- nvl = realprops;
+/*
+ * Label an individual disk. The name provided is the short name,
+ * stripped of any leading /dev path.
+ */
+int
+zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
+{
+#if defined(sun)
+ char path[MAXPATHLEN];
+ struct dk_gpt *vtoc;
+ int fd;
+ size_t resv = EFI_MIN_RESV_SIZE;
+ uint64_t slice_size;
+ diskaddr_t start_block;
+ char errbuf[1024];
- /*
- * Execute the corresponding ioctl() to set this property.
- */
- (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ /* prepare an error message just in case */
+ (void) snprintf(errbuf, sizeof (errbuf),
+ dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
- if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl, NULL) != 0)
- return (-1);
+ if (zhp) {
+ nvlist_t *nvroot;
- ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SET_PROPS, &zc);
- zcmd_free_nvlists(&zc);
+ verify(nvlist_lookup_nvlist(zhp->zpool_config,
+ ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
- if (ret)
- (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
+ if (zhp->zpool_start_block == 0)
+ start_block = find_start_block(nvroot);
+ else
+ start_block = zhp->zpool_start_block;
+ zhp->zpool_start_block = start_block;
+ } else {
+ /* new pool */
+ start_block = NEW_START_BLOCK;
+ }
- return (ret);
-}
+ (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
+ BACKUP_SLICE);
-int
-zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *propbuf,
- size_t proplen, zfs_source_t *srctype)
-{
- uint64_t value;
- char msg[1024], *strvalue;
- nvlist_t *nvp;
- zfs_source_t src = ZFS_SRC_NONE;
+ if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
+ /*
+ * This shouldn't happen. We've long since verified that this
+ * is a valid device.
+ */
+ zfs_error_aux(hdl,
+ dgettext(TEXT_DOMAIN, "unable to open device"));
+ return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
+ }
- (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
- "cannot get property '%s'"), zpool_prop_to_name(prop));
+ if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
+ /*
+ * The only way this can fail is if we run out of memory, or we
+ * were unable to read the disk's capacity
+ */
+ if (errno == ENOMEM)
+ (void) no_memory(hdl);
+
+ (void) close(fd);
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "unable to read disk capacity"), name);
- if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
- zfs_error_aux(zhp->zpool_hdl,
- dgettext(TEXT_DOMAIN, "pool must be "
- "upgraded to support pool properties"));
- return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, msg));
+ return (zfs_error(hdl, EZFS_NOCAP, errbuf));
}
- if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
- return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, msg));
+ slice_size = vtoc->efi_last_u_lba + 1;
+ slice_size -= EFI_MIN_RESV_SIZE;
+ if (start_block == MAXOFFSET_T)
+ start_block = NEW_START_BLOCK;
+ slice_size -= start_block;
+
+ vtoc->efi_parts[0].p_start = start_block;
+ vtoc->efi_parts[0].p_size = slice_size;
/*
- * the "name" property is special cased
+ * Why we use V_USR: V_BACKUP confuses users, and is considered
+ * disposable by some EFI utilities (since EFI doesn't have a backup
+ * slice). V_UNASSIGNED is supposed to be used only for zero size
+ * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
+ * etc. were all pretty specific. V_USR is as close to reality as we
+ * can get, in the absence of V_OTHER.
*/
- if (!zfs_prop_valid_for_type(prop, ZFS_TYPE_POOL) &&
- prop != ZFS_PROP_NAME)
- return (-1);
+ vtoc->efi_parts[0].p_tag = V_USR;
+ (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
- switch (prop) {
- case ZFS_PROP_NAME:
- (void) strlcpy(propbuf, zhp->zpool_name, proplen);
- break;
+ vtoc->efi_parts[8].p_start = slice_size + start_block;
+ vtoc->efi_parts[8].p_size = resv;
+ vtoc->efi_parts[8].p_tag = V_RESERVED;
- case ZFS_PROP_BOOTFS:
- if (nvlist_lookup_nvlist(zhp->zpool_props,
- zpool_prop_to_name(prop), &nvp) != 0) {
- strvalue = (char *)zfs_prop_default_string(prop);
- if (strvalue == NULL)
- strvalue = "-";
- src = ZFS_SRC_DEFAULT;
- } else {
- VERIFY(nvlist_lookup_uint64(nvp,
- ZFS_PROP_SOURCE, &value) == 0);
- src = value;
- VERIFY(nvlist_lookup_string(nvp, ZFS_PROP_VALUE,
- &strvalue) == 0);
- if (strlen(strvalue) >= proplen)
- return (-1);
- }
- (void) strcpy(propbuf, strvalue);
- break;
+ if (efi_write(fd, vtoc) != 0) {
+ /*
+ * Some block drivers (like pcata) may not support EFI
+ * GPT labels. Print out a helpful error message dir-
+ * ecting the user to manually label the disk and give
+ * a specific slice.
+ */
+ (void) close(fd);
+ efi_free(vtoc);
- default:
- return (-1);
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "try using fdisk(1M) and then provide a specific slice"));
+ return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
}
- if (srctype)
- *srctype = src;
+
+ (void) close(fd);
+ efi_free(vtoc);
+#endif /* sun */
return (0);
}
-int
-zpool_get_proplist(libzfs_handle_t *hdl, char *fields, zpool_proplist_t **listp)
+static boolean_t
+supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
{
- return (zfs_get_proplist_common(hdl, fields, listp, ZFS_TYPE_POOL));
-}
+ char *type;
+ nvlist_t **child;
+ uint_t children, c;
+ verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
+ if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
+ strcmp(type, VDEV_TYPE_FILE) == 0 ||
+ strcmp(type, VDEV_TYPE_LOG) == 0 ||
+ strcmp(type, VDEV_TYPE_MISSING) == 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "vdev type '%s' is not supported"), type);
+ (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
+ return (B_FALSE);
+ }
+ if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
+ &child, &children) == 0) {
+ for (c = 0; c < children; c++) {
+ if (!supported_dump_vdev_type(hdl, child[c], errbuf))
+ return (B_FALSE);
+ }
+ }
+ return (B_TRUE);
+}
+/*
+ * check if this zvol is allowable for use as a dump device; zero if
+ * it is, > 0 if it isn't, < 0 if it isn't a zvol
+ */
int
-zpool_expand_proplist(zpool_handle_t *zhp, zpool_proplist_t **plp)
+zvol_check_dump_config(char *arg)
{
- libzfs_handle_t *hdl = zhp->zpool_hdl;
- zpool_proplist_t *entry;
- char buf[ZFS_MAXPROPLEN];
+ zpool_handle_t *zhp = NULL;
+ nvlist_t *config, *nvroot;
+ char *p, *volname;
+ nvlist_t **top;
+ uint_t toplevels;
+ libzfs_handle_t *hdl;
+ char errbuf[1024];
+ char poolname[ZPOOL_MAXNAMELEN];
+ int pathlen = strlen(ZVOL_FULL_DEV_DIR);
+ int ret = 1;
- if (zfs_expand_proplist_common(hdl, plp, ZFS_TYPE_POOL) != 0)
+ if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
return (-1);
+ }
- for (entry = *plp; entry != NULL; entry = entry->pl_next) {
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "dump is not supported on device '%s'"), arg);
- if (entry->pl_fixed)
- continue;
+ if ((hdl = libzfs_init()) == NULL)
+ return (1);
+ libzfs_print_on_error(hdl, B_TRUE);
- if (entry->pl_prop != ZFS_PROP_INVAL &&
- zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
- NULL) == 0) {
- if (strlen(buf) > entry->pl_width)
- entry->pl_width = strlen(buf);
- }
+ volname = arg + pathlen;
+
+ /* check the configuration of the pool */
+ if ((p = strchr(volname, '/')) == NULL) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "malformed dataset name"));
+ (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
+ return (1);
+ } else if (p - volname >= ZFS_MAXNAMELEN) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "dataset name is too long"));
+ (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
+ return (1);
+ } else {
+ (void) strncpy(poolname, volname, p - volname);
+ poolname[p - volname] = '\0';
}
- return (0);
+ if ((zhp = zpool_open(hdl, poolname)) == NULL) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "could not open pool '%s'"), poolname);
+ (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
+ goto out;
+ }
+ config = zpool_get_config(zhp, NULL);
+ if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+ &nvroot) != 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "could not obtain vdev configuration for '%s'"), poolname);
+ (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
+ goto out;
+ }
+
+ verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
+ &top, &toplevels) == 0);
+ if (toplevels != 1) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' has multiple top level vdevs"), poolname);
+ (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
+ goto out;
+ }
+
+ if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
+ goto out;
+ }
+ ret = 0;
+
+out:
+ if (zhp)
+ zpool_close(zhp);
+ libzfs_fini(hdl);
+ return (ret);
}
diff --git a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_sendrecv.c b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_sendrecv.c
new file mode 100644
index 0000000..d1163ce
--- /dev/null
+++ b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_sendrecv.c
@@ -0,0 +1,2103 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libintl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <stddef.h>
+#include <fcntl.h>
+#include <sys/mount.h>
+#include <sys/mntent.h>
+#include <sys/mnttab.h>
+#include <sys/avl.h>
+#include <stddef.h>
+
+#include <libzfs.h>
+
+#include "zfs_namecheck.h"
+#include "zfs_prop.h"
+#include "libzfs_impl.h"
+
+#include <fletcher.c> /* XXX */
+
+/* We need to use something for ENODATA. */
+#define ENODATA EIDRM
+
+static int zfs_receive_impl(libzfs_handle_t *, const char *, recvflags_t,
+ int, avl_tree_t *, char **);
+
+/*
+ * Routines for dealing with the AVL tree of fs-nvlists
+ */
+typedef struct fsavl_node {
+ avl_node_t fn_node;
+ nvlist_t *fn_nvfs;
+ char *fn_snapname;
+ uint64_t fn_guid;
+} fsavl_node_t;
+
+static int
+fsavl_compare(const void *arg1, const void *arg2)
+{
+ const fsavl_node_t *fn1 = arg1;
+ const fsavl_node_t *fn2 = arg2;
+
+ if (fn1->fn_guid > fn2->fn_guid)
+ return (+1);
+ else if (fn1->fn_guid < fn2->fn_guid)
+ return (-1);
+ else
+ return (0);
+}
+
+/*
+ * Given the GUID of a snapshot, find its containing filesystem and
+ * (optionally) name.
+ */
+static nvlist_t *
+fsavl_find(avl_tree_t *avl, uint64_t snapguid, char **snapname)
+{
+ fsavl_node_t fn_find;
+ fsavl_node_t *fn;
+
+ fn_find.fn_guid = snapguid;
+
+ fn = avl_find(avl, &fn_find, NULL);
+ if (fn) {
+ if (snapname)
+ *snapname = fn->fn_snapname;
+ return (fn->fn_nvfs);
+ }
+ return (NULL);
+}
+
+static void
+fsavl_destroy(avl_tree_t *avl)
+{
+ fsavl_node_t *fn;
+ void *cookie;
+
+ if (avl == NULL)
+ return;
+
+ cookie = NULL;
+ while ((fn = avl_destroy_nodes(avl, &cookie)) != NULL)
+ free(fn);
+ avl_destroy(avl);
+ free(avl);
+}
+
+static avl_tree_t *
+fsavl_create(nvlist_t *fss)
+{
+ avl_tree_t *fsavl;
+ nvpair_t *fselem = NULL;
+
+ if ((fsavl = malloc(sizeof (avl_tree_t))) == NULL)
+ return (NULL);
+
+ avl_create(fsavl, fsavl_compare, sizeof (fsavl_node_t),
+ offsetof(fsavl_node_t, fn_node));
+
+ while ((fselem = nvlist_next_nvpair(fss, fselem)) != NULL) {
+ nvlist_t *nvfs, *snaps;
+ nvpair_t *snapelem = NULL;
+
+ VERIFY(0 == nvpair_value_nvlist(fselem, &nvfs));
+ VERIFY(0 == nvlist_lookup_nvlist(nvfs, "snaps", &snaps));
+
+ while ((snapelem =
+ nvlist_next_nvpair(snaps, snapelem)) != NULL) {
+ fsavl_node_t *fn;
+ uint64_t guid;
+
+ VERIFY(0 == nvpair_value_uint64(snapelem, &guid));
+ if ((fn = malloc(sizeof (fsavl_node_t))) == NULL) {
+ fsavl_destroy(fsavl);
+ return (NULL);
+ }
+ fn->fn_nvfs = nvfs;
+ fn->fn_snapname = nvpair_name(snapelem);
+ fn->fn_guid = guid;
+
+ /*
+ * Note: if there are multiple snaps with the
+ * same GUID, we ignore all but one.
+ */
+ if (avl_find(fsavl, fn, NULL) == NULL)
+ avl_add(fsavl, fn);
+ else
+ free(fn);
+ }
+ }
+
+ return (fsavl);
+}
+
+/*
+ * Routines for dealing with the giant nvlist of fs-nvlists, etc.
+ */
+typedef struct send_data {
+ uint64_t parent_fromsnap_guid;
+ nvlist_t *parent_snaps;
+ nvlist_t *fss;
+ nvlist_t *snapprops;
+ const char *fromsnap;
+ const char *tosnap;
+
+ /*
+ * The header nvlist is of the following format:
+ * {
+ * "tosnap" -> string
+ * "fromsnap" -> string (if incremental)
+ * "fss" -> {
+ * id -> {
+ *
+ * "name" -> string (full name; for debugging)
+ * "parentfromsnap" -> number (guid of fromsnap in parent)
+ *
+ * "props" -> { name -> value (only if set here) }
+ * "snaps" -> { name (lastname) -> number (guid) }
+ * "snapprops" -> { name (lastname) -> { name -> value } }
+ *
+ * "origin" -> number (guid) (if clone)
+ * "sent" -> boolean (not on-disk)
+ * }
+ * }
+ * }
+ *
+ */
+} send_data_t;
+
+static void send_iterate_prop(zfs_handle_t *zhp, nvlist_t *nv);
+
+static int
+send_iterate_snap(zfs_handle_t *zhp, void *arg)
+{
+ send_data_t *sd = arg;
+ uint64_t guid = zhp->zfs_dmustats.dds_guid;
+ char *snapname;
+ nvlist_t *nv;
+
+ snapname = strrchr(zhp->zfs_name, '@')+1;
+
+ VERIFY(0 == nvlist_add_uint64(sd->parent_snaps, snapname, guid));
+ /*
+ * NB: if there is no fromsnap here (it's a newly created fs in
+ * an incremental replication), we will substitute the tosnap.
+ */
+ if ((sd->fromsnap && strcmp(snapname, sd->fromsnap) == 0) ||
+ (sd->parent_fromsnap_guid == 0 && sd->tosnap &&
+ strcmp(snapname, sd->tosnap) == 0)) {
+ sd->parent_fromsnap_guid = guid;
+ }
+
+ VERIFY(0 == nvlist_alloc(&nv, NV_UNIQUE_NAME, 0));
+ send_iterate_prop(zhp, nv);
+ VERIFY(0 == nvlist_add_nvlist(sd->snapprops, snapname, nv));
+ nvlist_free(nv);
+
+ zfs_close(zhp);
+ return (0);
+}
+
+static void
+send_iterate_prop(zfs_handle_t *zhp, nvlist_t *nv)
+{
+ nvpair_t *elem = NULL;
+
+ while ((elem = nvlist_next_nvpair(zhp->zfs_props, elem)) != NULL) {
+ char *propname = nvpair_name(elem);
+ zfs_prop_t prop = zfs_name_to_prop(propname);
+ nvlist_t *propnv;
+
+ if (!zfs_prop_user(propname) && zfs_prop_readonly(prop))
+ continue;
+
+ verify(nvpair_value_nvlist(elem, &propnv) == 0);
+ if (prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_RESERVATION) {
+ /* these guys are modifyable, but have no source */
+ uint64_t value;
+ verify(nvlist_lookup_uint64(propnv,
+ ZPROP_VALUE, &value) == 0);
+ if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT)
+ continue;
+ } else {
+ char *source;
+ if (nvlist_lookup_string(propnv,
+ ZPROP_SOURCE, &source) != 0)
+ continue;
+ if (strcmp(source, zhp->zfs_name) != 0)
+ continue;
+ }
+
+ if (zfs_prop_user(propname) ||
+ zfs_prop_get_type(prop) == PROP_TYPE_STRING) {
+ char *value;
+ verify(nvlist_lookup_string(propnv,
+ ZPROP_VALUE, &value) == 0);
+ VERIFY(0 == nvlist_add_string(nv, propname, value));
+ } else {
+ uint64_t value;
+ verify(nvlist_lookup_uint64(propnv,
+ ZPROP_VALUE, &value) == 0);
+ VERIFY(0 == nvlist_add_uint64(nv, propname, value));
+ }
+ }
+}
+
+static int
+send_iterate_fs(zfs_handle_t *zhp, void *arg)
+{
+ send_data_t *sd = arg;
+ nvlist_t *nvfs, *nv;
+ int rv;
+ uint64_t parent_fromsnap_guid_save = sd->parent_fromsnap_guid;
+ uint64_t guid = zhp->zfs_dmustats.dds_guid;
+ char guidstring[64];
+
+ VERIFY(0 == nvlist_alloc(&nvfs, NV_UNIQUE_NAME, 0));
+ VERIFY(0 == nvlist_add_string(nvfs, "name", zhp->zfs_name));
+ VERIFY(0 == nvlist_add_uint64(nvfs, "parentfromsnap",
+ sd->parent_fromsnap_guid));
+
+ if (zhp->zfs_dmustats.dds_origin[0]) {
+ zfs_handle_t *origin = zfs_open(zhp->zfs_hdl,
+ zhp->zfs_dmustats.dds_origin, ZFS_TYPE_SNAPSHOT);
+ if (origin == NULL)
+ return (-1);
+ VERIFY(0 == nvlist_add_uint64(nvfs, "origin",
+ origin->zfs_dmustats.dds_guid));
+ }
+
+ /* iterate over props */
+ VERIFY(0 == nvlist_alloc(&nv, NV_UNIQUE_NAME, 0));
+ send_iterate_prop(zhp, nv);
+ VERIFY(0 == nvlist_add_nvlist(nvfs, "props", nv));
+ nvlist_free(nv);
+
+ /* iterate over snaps, and set sd->parent_fromsnap_guid */
+ sd->parent_fromsnap_guid = 0;
+ VERIFY(0 == nvlist_alloc(&sd->parent_snaps, NV_UNIQUE_NAME, 0));
+ VERIFY(0 == nvlist_alloc(&sd->snapprops, NV_UNIQUE_NAME, 0));
+ (void) zfs_iter_snapshots(zhp, send_iterate_snap, sd);
+ VERIFY(0 == nvlist_add_nvlist(nvfs, "snaps", sd->parent_snaps));
+ VERIFY(0 == nvlist_add_nvlist(nvfs, "snapprops", sd->snapprops));
+ nvlist_free(sd->parent_snaps);
+ nvlist_free(sd->snapprops);
+
+ /* add this fs to nvlist */
+ (void) snprintf(guidstring, sizeof (guidstring),
+ "0x%llx", (longlong_t)guid);
+ VERIFY(0 == nvlist_add_nvlist(sd->fss, guidstring, nvfs));
+ nvlist_free(nvfs);
+
+ /* iterate over children */
+ rv = zfs_iter_filesystems(zhp, send_iterate_fs, sd);
+
+ sd->parent_fromsnap_guid = parent_fromsnap_guid_save;
+
+ zfs_close(zhp);
+ return (rv);
+}
+
+static int
+gather_nvlist(libzfs_handle_t *hdl, const char *fsname, const char *fromsnap,
+ const char *tosnap, nvlist_t **nvlp, avl_tree_t **avlp)
+{
+ zfs_handle_t *zhp;
+ send_data_t sd = { 0 };
+ int error;
+
+ zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
+ if (zhp == NULL)
+ return (EZFS_BADTYPE);
+
+ VERIFY(0 == nvlist_alloc(&sd.fss, NV_UNIQUE_NAME, 0));
+ sd.fromsnap = fromsnap;
+ sd.tosnap = tosnap;
+
+ if ((error = send_iterate_fs(zhp, &sd)) != 0) {
+ nvlist_free(sd.fss);
+ if (avlp != NULL)
+ *avlp = NULL;
+ *nvlp = NULL;
+ return (error);
+ }
+
+ if (avlp != NULL && (*avlp = fsavl_create(sd.fss)) == NULL) {
+ nvlist_free(sd.fss);
+ *nvlp = NULL;
+ return (EZFS_NOMEM);
+ }
+
+ *nvlp = sd.fss;
+ return (0);
+}
+
+/*
+ * Routines for dealing with the sorted snapshot functionality
+ */
+typedef struct zfs_node {
+ zfs_handle_t *zn_handle;
+ avl_node_t zn_avlnode;
+} zfs_node_t;
+
+static int
+zfs_sort_snaps(zfs_handle_t *zhp, void *data)
+{
+ avl_tree_t *avl = data;
+ zfs_node_t *node = zfs_alloc(zhp->zfs_hdl, sizeof (zfs_node_t));
+
+ node->zn_handle = zhp;
+ avl_add(avl, node);
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+zfs_snapshot_compare(const void *larg, const void *rarg)
+{
+ zfs_handle_t *l = ((zfs_node_t *)larg)->zn_handle;
+ zfs_handle_t *r = ((zfs_node_t *)rarg)->zn_handle;
+ uint64_t lcreate, rcreate;
+
+ /*
+ * Sort them according to creation time. We use the hidden
+ * CREATETXG property to get an absolute ordering of snapshots.
+ */
+ lcreate = zfs_prop_get_int(l, ZFS_PROP_CREATETXG);
+ rcreate = zfs_prop_get_int(r, ZFS_PROP_CREATETXG);
+
+ if (lcreate < rcreate)
+ return (-1);
+ else if (lcreate > rcreate)
+ return (+1);
+ else
+ return (0);
+}
+
+static int
+zfs_iter_snapshots_sorted(zfs_handle_t *zhp, zfs_iter_f callback, void *data)
+{
+ int ret = 0;
+ zfs_node_t *node;
+ avl_tree_t avl;
+ void *cookie = NULL;
+
+ avl_create(&avl, zfs_snapshot_compare,
+ sizeof (zfs_node_t), offsetof(zfs_node_t, zn_avlnode));
+
+ ret = zfs_iter_snapshots(zhp, zfs_sort_snaps, &avl);
+
+ for (node = avl_first(&avl); node != NULL; node = AVL_NEXT(&avl, node))
+ ret |= callback(node->zn_handle, data);
+
+ while ((node = avl_destroy_nodes(&avl, &cookie)) != NULL)
+ free(node);
+
+ avl_destroy(&avl);
+
+ return (ret);
+}
+
+/*
+ * Routines specific to "zfs send"
+ */
+typedef struct send_dump_data {
+ /* these are all just the short snapname (the part after the @) */
+ const char *fromsnap;
+ const char *tosnap;
+ char lastsnap[ZFS_MAXNAMELEN];
+ boolean_t seenfrom, seento, replicate, doall, fromorigin;
+ boolean_t verbose;
+ int outfd;
+ boolean_t err;
+ nvlist_t *fss;
+ avl_tree_t *fsavl;
+} send_dump_data_t;
+
+/*
+ * Dumps a backup of the given snapshot (incremental from fromsnap if it's not
+ * NULL) to the file descriptor specified by outfd.
+ */
+static int
+dump_ioctl(zfs_handle_t *zhp, const char *fromsnap, boolean_t fromorigin,
+ int outfd)
+{
+ zfs_cmd_t zc = { 0 };
+ libzfs_handle_t *hdl = zhp->zfs_hdl;
+
+ assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
+ assert(fromsnap == NULL || fromsnap[0] == '\0' || !fromorigin);
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+ if (fromsnap)
+ (void) strlcpy(zc.zc_value, fromsnap, sizeof (zc.zc_value));
+ zc.zc_cookie = outfd;
+ zc.zc_obj = fromorigin;
+
+ if (ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_SEND, &zc) != 0) {
+ char errbuf[1024];
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "warning: cannot send '%s'"), zhp->zfs_name);
+
+ switch (errno) {
+
+ case EXDEV:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "not an earlier snapshot from the same fs"));
+ return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
+
+ case ENOENT:
+ if (zfs_dataset_exists(hdl, zc.zc_name,
+ ZFS_TYPE_SNAPSHOT)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "incremental source (@%s) does not exist"),
+ zc.zc_value);
+ }
+ return (zfs_error(hdl, EZFS_NOENT, errbuf));
+
+ case EDQUOT:
+ case EFBIG:
+ case EIO:
+ case ENOLINK:
+ case ENOSPC:
+ case ENXIO:
+ case EPIPE:
+ case ERANGE:
+ case EFAULT:
+ case EROFS:
+ zfs_error_aux(hdl, strerror(errno));
+ return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
+
+ default:
+ return (zfs_standard_error(hdl, errno, errbuf));
+ }
+ }
+
+ return (0);
+}
+
+static int
+dump_snapshot(zfs_handle_t *zhp, void *arg)
+{
+ send_dump_data_t *sdd = arg;
+ const char *thissnap;
+ int err;
+
+ thissnap = strchr(zhp->zfs_name, '@') + 1;
+
+ if (sdd->fromsnap && !sdd->seenfrom &&
+ strcmp(sdd->fromsnap, thissnap) == 0) {
+ sdd->seenfrom = B_TRUE;
+ (void) strcpy(sdd->lastsnap, thissnap);
+ zfs_close(zhp);
+ return (0);
+ }
+
+ if (sdd->seento || !sdd->seenfrom) {
+ zfs_close(zhp);
+ return (0);
+ }
+
+ /* send it */
+ if (sdd->verbose) {
+ (void) fprintf(stderr, "sending from @%s to %s\n",
+ sdd->lastsnap, zhp->zfs_name);
+ }
+
+ err = dump_ioctl(zhp, sdd->lastsnap,
+ sdd->lastsnap[0] == '\0' && (sdd->fromorigin || sdd->replicate),
+ sdd->outfd);
+
+ if (!sdd->seento && strcmp(sdd->tosnap, thissnap) == 0)
+ sdd->seento = B_TRUE;
+
+ (void) strcpy(sdd->lastsnap, thissnap);
+ zfs_close(zhp);
+ return (err);
+}
+
+static int
+dump_filesystem(zfs_handle_t *zhp, void *arg)
+{
+ int rv = 0;
+ send_dump_data_t *sdd = arg;
+ boolean_t missingfrom = B_FALSE;
+ zfs_cmd_t zc = { 0 };
+
+ (void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s",
+ zhp->zfs_name, sdd->tosnap);
+ if (ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0) {
+ (void) fprintf(stderr, "WARNING: "
+ "could not send %s@%s: does not exist\n",
+ zhp->zfs_name, sdd->tosnap);
+ sdd->err = B_TRUE;
+ return (0);
+ }
+
+ if (sdd->replicate && sdd->fromsnap) {
+ /*
+ * If this fs does not have fromsnap, and we're doing
+ * recursive, we need to send a full stream from the
+ * beginning (or an incremental from the origin if this
+ * is a clone). If we're doing non-recursive, then let
+ * them get the error.
+ */
+ (void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s",
+ zhp->zfs_name, sdd->fromsnap);
+ if (ioctl(zhp->zfs_hdl->libzfs_fd,
+ ZFS_IOC_OBJSET_STATS, &zc) != 0) {
+ missingfrom = B_TRUE;
+ }
+ }
+
+ if (sdd->doall) {
+ sdd->seenfrom = sdd->seento = sdd->lastsnap[0] = 0;
+ if (sdd->fromsnap == NULL || missingfrom)
+ sdd->seenfrom = B_TRUE;
+
+ rv = zfs_iter_snapshots_sorted(zhp, dump_snapshot, arg);
+ if (!sdd->seenfrom) {
+ (void) fprintf(stderr,
+ "WARNING: could not send %s@%s:\n"
+ "incremental source (%s@%s) does not exist\n",
+ zhp->zfs_name, sdd->tosnap,
+ zhp->zfs_name, sdd->fromsnap);
+ sdd->err = B_TRUE;
+ } else if (!sdd->seento) {
+ (void) fprintf(stderr,
+ "WARNING: could not send %s@%s:\n"
+ "incremental source (%s@%s) "
+ "is not earlier than it\n",
+ zhp->zfs_name, sdd->tosnap,
+ zhp->zfs_name, sdd->fromsnap);
+ sdd->err = B_TRUE;
+ }
+ } else {
+ zfs_handle_t *snapzhp;
+ char snapname[ZFS_MAXNAMELEN];
+
+ (void) snprintf(snapname, sizeof (snapname), "%s@%s",
+ zfs_get_name(zhp), sdd->tosnap);
+ snapzhp = zfs_open(zhp->zfs_hdl, snapname, ZFS_TYPE_SNAPSHOT);
+ if (snapzhp == NULL) {
+ rv = -1;
+ } else {
+ rv = dump_ioctl(snapzhp,
+ missingfrom ? NULL : sdd->fromsnap,
+ sdd->fromorigin || missingfrom,
+ sdd->outfd);
+ sdd->seento = B_TRUE;
+ zfs_close(snapzhp);
+ }
+ }
+
+ return (rv);
+}
+
+static int
+dump_filesystems(zfs_handle_t *rzhp, void *arg)
+{
+ send_dump_data_t *sdd = arg;
+ nvpair_t *fspair;
+ boolean_t needagain, progress;
+
+ if (!sdd->replicate)
+ return (dump_filesystem(rzhp, sdd));
+
+again:
+ needagain = progress = B_FALSE;
+ for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
+ fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
+ nvlist_t *fslist;
+ char *fsname;
+ zfs_handle_t *zhp;
+ int err;
+ uint64_t origin_guid = 0;
+ nvlist_t *origin_nv;
+
+ VERIFY(nvpair_value_nvlist(fspair, &fslist) == 0);
+ if (nvlist_lookup_boolean(fslist, "sent") == 0)
+ continue;
+
+ VERIFY(nvlist_lookup_string(fslist, "name", &fsname) == 0);
+ (void) nvlist_lookup_uint64(fslist, "origin", &origin_guid);
+
+ origin_nv = fsavl_find(sdd->fsavl, origin_guid, NULL);
+ if (origin_nv &&
+ nvlist_lookup_boolean(origin_nv, "sent") == ENOENT) {
+ /*
+ * origin has not been sent yet;
+ * skip this clone.
+ */
+ needagain = B_TRUE;
+ continue;
+ }
+
+ zhp = zfs_open(rzhp->zfs_hdl, fsname, ZFS_TYPE_DATASET);
+ if (zhp == NULL)
+ return (-1);
+ err = dump_filesystem(zhp, sdd);
+ VERIFY(nvlist_add_boolean(fslist, "sent") == 0);
+ progress = B_TRUE;
+ zfs_close(zhp);
+ if (err)
+ return (err);
+ }
+ if (needagain) {
+ assert(progress);
+ goto again;
+ }
+ return (0);
+}
+
+/*
+ * Dumps a backup of tosnap, incremental from fromsnap if it isn't NULL.
+ * If 'doall', dump all intermediate snaps.
+ * If 'replicate', dump special header and do recursively.
+ */
+int
+zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
+ boolean_t replicate, boolean_t doall, boolean_t fromorigin,
+ boolean_t verbose, int outfd)
+{
+ char errbuf[1024];
+ send_dump_data_t sdd = { 0 };
+ int err;
+ nvlist_t *fss = NULL;
+ avl_tree_t *fsavl = NULL;
+
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot send '%s'"), zhp->zfs_name);
+
+ if (fromsnap && fromsnap[0] == '\0') {
+ zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
+ "zero-length incremental source"));
+ return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf));
+ }
+
+ if (replicate || doall) {
+ dmu_replay_record_t drr = { 0 };
+ char *packbuf = NULL;
+ size_t buflen = 0;
+ zio_cksum_t zc = { 0 };
+
+ assert(fromsnap || doall);
+
+ if (replicate) {
+ nvlist_t *hdrnv;
+
+ VERIFY(0 == nvlist_alloc(&hdrnv, NV_UNIQUE_NAME, 0));
+ if (fromsnap) {
+ VERIFY(0 == nvlist_add_string(hdrnv,
+ "fromsnap", fromsnap));
+ }
+ VERIFY(0 == nvlist_add_string(hdrnv, "tosnap", tosnap));
+
+ err = gather_nvlist(zhp->zfs_hdl, zhp->zfs_name,
+ fromsnap, tosnap, &fss, &fsavl);
+ if (err)
+ return (err);
+ VERIFY(0 == nvlist_add_nvlist(hdrnv, "fss", fss));
+ err = nvlist_pack(hdrnv, &packbuf, &buflen,
+ NV_ENCODE_XDR, 0);
+ nvlist_free(hdrnv);
+ if (err) {
+ fsavl_destroy(fsavl);
+ nvlist_free(fss);
+ return (zfs_standard_error(zhp->zfs_hdl,
+ err, errbuf));
+ }
+ }
+
+ /* write first begin record */
+ drr.drr_type = DRR_BEGIN;
+ drr.drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
+ drr.drr_u.drr_begin.drr_version = DMU_BACKUP_HEADER_VERSION;
+ (void) snprintf(drr.drr_u.drr_begin.drr_toname,
+ sizeof (drr.drr_u.drr_begin.drr_toname),
+ "%s@%s", zhp->zfs_name, tosnap);
+ drr.drr_payloadlen = buflen;
+ fletcher_4_incremental_native(&drr, sizeof (drr), &zc);
+ err = write(outfd, &drr, sizeof (drr));
+
+ /* write header nvlist */
+ if (err != -1) {
+ fletcher_4_incremental_native(packbuf, buflen, &zc);
+ err = write(outfd, packbuf, buflen);
+ }
+ free(packbuf);
+ if (err == -1) {
+ fsavl_destroy(fsavl);
+ nvlist_free(fss);
+ return (zfs_standard_error(zhp->zfs_hdl,
+ errno, errbuf));
+ }
+
+ /* write end record */
+ if (err != -1) {
+ bzero(&drr, sizeof (drr));
+ drr.drr_type = DRR_END;
+ drr.drr_u.drr_end.drr_checksum = zc;
+ err = write(outfd, &drr, sizeof (drr));
+ if (err == -1) {
+ fsavl_destroy(fsavl);
+ nvlist_free(fss);
+ return (zfs_standard_error(zhp->zfs_hdl,
+ errno, errbuf));
+ }
+ }
+ }
+
+ /* dump each stream */
+ sdd.fromsnap = fromsnap;
+ sdd.tosnap = tosnap;
+ sdd.outfd = outfd;
+ sdd.replicate = replicate;
+ sdd.doall = doall;
+ sdd.fromorigin = fromorigin;
+ sdd.fss = fss;
+ sdd.fsavl = fsavl;
+ sdd.verbose = verbose;
+ err = dump_filesystems(zhp, &sdd);
+ fsavl_destroy(fsavl);
+ nvlist_free(fss);
+
+ if (replicate || doall) {
+ /*
+ * write final end record. NB: want to do this even if
+ * there was some error, because it might not be totally
+ * failed.
+ */
+ dmu_replay_record_t drr = { 0 };
+ drr.drr_type = DRR_END;
+ if (write(outfd, &drr, sizeof (drr)) == -1) {
+ return (zfs_standard_error(zhp->zfs_hdl,
+ errno, errbuf));
+ }
+ }
+
+ return (err || sdd.err);
+}
+
+/*
+ * Routines specific to "zfs recv"
+ */
+
+static int
+recv_read(libzfs_handle_t *hdl, int fd, void *buf, int ilen,
+ boolean_t byteswap, zio_cksum_t *zc)
+{
+ char *cp = buf;
+ int rv;
+ int len = ilen;
+
+ do {
+ rv = read(fd, cp, len);
+ cp += rv;
+ len -= rv;
+ } while (rv > 0);
+
+ if (rv < 0 || len != 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "failed to read from stream"));
+ return (zfs_error(hdl, EZFS_BADSTREAM, dgettext(TEXT_DOMAIN,
+ "cannot receive")));
+ }
+
+ if (zc) {
+ if (byteswap)
+ fletcher_4_incremental_byteswap(buf, ilen, zc);
+ else
+ fletcher_4_incremental_native(buf, ilen, zc);
+ }
+ return (0);
+}
+
+static int
+recv_read_nvlist(libzfs_handle_t *hdl, int fd, int len, nvlist_t **nvp,
+ boolean_t byteswap, zio_cksum_t *zc)
+{
+ char *buf;
+ int err;
+
+ buf = zfs_alloc(hdl, len);
+ if (buf == NULL)
+ return (ENOMEM);
+
+ err = recv_read(hdl, fd, buf, len, byteswap, zc);
+ if (err != 0) {
+ free(buf);
+ return (err);
+ }
+
+ err = nvlist_unpack(buf, len, nvp, 0);
+ free(buf);
+ if (err != 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
+ "stream (malformed nvlist)"));
+ return (EINVAL);
+ }
+ return (0);
+}
+
+static int
+recv_rename(libzfs_handle_t *hdl, const char *name, const char *tryname,
+ int baselen, char *newname, recvflags_t flags)
+{
+ static int seq;
+ zfs_cmd_t zc = { 0 };
+ int err;
+ prop_changelist_t *clp;
+ zfs_handle_t *zhp;
+
+ zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
+ if (zhp == NULL)
+ return (-1);
+ clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
+ flags.force ? MS_FORCE : 0);
+ zfs_close(zhp);
+ if (clp == NULL)
+ return (-1);
+ err = changelist_prefix(clp);
+ if (err)
+ return (err);
+
+ if (tryname) {
+ (void) strcpy(newname, tryname);
+
+ zc.zc_objset_type = DMU_OST_ZFS;
+ (void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name));
+ (void) strlcpy(zc.zc_value, tryname, sizeof (zc.zc_value));
+
+ if (flags.verbose) {
+ (void) printf("attempting rename %s to %s\n",
+ zc.zc_name, zc.zc_value);
+ }
+ err = ioctl(hdl->libzfs_fd, ZFS_IOC_RENAME, &zc);
+ if (err == 0)
+ changelist_rename(clp, name, tryname);
+ } else {
+ err = ENOENT;
+ }
+
+ if (err != 0 && strncmp(name+baselen, "recv-", 5) != 0) {
+ seq++;
+
+ (void) strncpy(newname, name, baselen);
+ (void) snprintf(newname+baselen, ZFS_MAXNAMELEN-baselen,
+ "recv-%u-%u", getpid(), seq);
+ (void) strlcpy(zc.zc_value, newname, sizeof (zc.zc_value));
+
+ if (flags.verbose) {
+ (void) printf("failed - trying rename %s to %s\n",
+ zc.zc_name, zc.zc_value);
+ }
+ err = ioctl(hdl->libzfs_fd, ZFS_IOC_RENAME, &zc);
+ if (err == 0)
+ changelist_rename(clp, name, newname);
+ if (err && flags.verbose) {
+ (void) printf("failed (%u) - "
+ "will try again on next pass\n", errno);
+ }
+ err = EAGAIN;
+ } else if (flags.verbose) {
+ if (err == 0)
+ (void) printf("success\n");
+ else
+ (void) printf("failed (%u)\n", errno);
+ }
+
+ (void) changelist_postfix(clp);
+ changelist_free(clp);
+
+ return (err);
+}
+
+static int
+recv_destroy(libzfs_handle_t *hdl, const char *name, int baselen,
+ char *newname, recvflags_t flags)
+{
+ zfs_cmd_t zc = { 0 };
+ int err = 0;
+ prop_changelist_t *clp;
+ zfs_handle_t *zhp;
+
+ zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
+ if (zhp == NULL)
+ return (-1);
+ clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
+ flags.force ? MS_FORCE : 0);
+ zfs_close(zhp);
+ if (clp == NULL)
+ return (-1);
+ err = changelist_prefix(clp);
+ if (err)
+ return (err);
+
+ zc.zc_objset_type = DMU_OST_ZFS;
+ (void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name));
+
+ if (flags.verbose)
+ (void) printf("attempting destroy %s\n", zc.zc_name);
+ err = ioctl(hdl->libzfs_fd, ZFS_IOC_DESTROY, &zc);
+
+ if (err == 0) {
+ if (flags.verbose)
+ (void) printf("success\n");
+ changelist_remove(clp, zc.zc_name);
+ }
+
+ (void) changelist_postfix(clp);
+ changelist_free(clp);
+
+ if (err != 0)
+ err = recv_rename(hdl, name, NULL, baselen, newname, flags);
+
+ return (err);
+}
+
+typedef struct guid_to_name_data {
+ uint64_t guid;
+ char *name;
+} guid_to_name_data_t;
+
+static int
+guid_to_name_cb(zfs_handle_t *zhp, void *arg)
+{
+ guid_to_name_data_t *gtnd = arg;
+ int err;
+
+ if (zhp->zfs_dmustats.dds_guid == gtnd->guid) {
+ (void) strcpy(gtnd->name, zhp->zfs_name);
+ return (EEXIST);
+ }
+ err = zfs_iter_children(zhp, guid_to_name_cb, gtnd);
+ zfs_close(zhp);
+ return (err);
+}
+
+static int
+guid_to_name(libzfs_handle_t *hdl, const char *parent, uint64_t guid,
+ char *name)
+{
+ /* exhaustive search all local snapshots */
+ guid_to_name_data_t gtnd;
+ int err = 0;
+ zfs_handle_t *zhp;
+ char *cp;
+
+ gtnd.guid = guid;
+ gtnd.name = name;
+
+ if (strchr(parent, '@') == NULL) {
+ zhp = make_dataset_handle(hdl, parent);
+ if (zhp != NULL) {
+ err = zfs_iter_children(zhp, guid_to_name_cb, &gtnd);
+ zfs_close(zhp);
+ if (err == EEXIST)
+ return (0);
+ }
+ }
+
+ cp = strchr(parent, '/');
+ if (cp)
+ *cp = '\0';
+ zhp = make_dataset_handle(hdl, parent);
+ if (cp)
+ *cp = '/';
+
+ if (zhp) {
+ err = zfs_iter_children(zhp, guid_to_name_cb, &gtnd);
+ zfs_close(zhp);
+ }
+
+ return (err == EEXIST ? 0 : ENOENT);
+
+}
+
+/*
+ * Return true if dataset guid1 is created before guid2.
+ */
+static int
+created_before(libzfs_handle_t *hdl, avl_tree_t *avl,
+ uint64_t guid1, uint64_t guid2)
+{
+ nvlist_t *nvfs;
+ char *fsname, *snapname;
+ char buf[ZFS_MAXNAMELEN];
+ int rv;
+ zfs_node_t zn1, zn2;
+
+ if (guid2 == 0)
+ return (0);
+ if (guid1 == 0)
+ return (1);
+
+ nvfs = fsavl_find(avl, guid1, &snapname);
+ VERIFY(0 == nvlist_lookup_string(nvfs, "name", &fsname));
+ (void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname);
+ zn1.zn_handle = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT);
+ if (zn1.zn_handle == NULL)
+ return (-1);
+
+ nvfs = fsavl_find(avl, guid2, &snapname);
+ VERIFY(0 == nvlist_lookup_string(nvfs, "name", &fsname));
+ (void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname);
+ zn2.zn_handle = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT);
+ if (zn2.zn_handle == NULL) {
+ zfs_close(zn2.zn_handle);
+ return (-1);
+ }
+
+ rv = (zfs_snapshot_compare(&zn1, &zn2) == -1);
+
+ zfs_close(zn1.zn_handle);
+ zfs_close(zn2.zn_handle);
+
+ return (rv);
+}
+
+static int
+recv_incremental_replication(libzfs_handle_t *hdl, const char *tofs,
+ recvflags_t flags, nvlist_t *stream_nv, avl_tree_t *stream_avl)
+{
+ nvlist_t *local_nv;
+ avl_tree_t *local_avl;
+ nvpair_t *fselem, *nextfselem;
+ char *tosnap, *fromsnap;
+ char newname[ZFS_MAXNAMELEN];
+ int error;
+ boolean_t needagain, progress;
+
+ VERIFY(0 == nvlist_lookup_string(stream_nv, "fromsnap", &fromsnap));
+ VERIFY(0 == nvlist_lookup_string(stream_nv, "tosnap", &tosnap));
+
+ if (flags.dryrun)
+ return (0);
+
+again:
+ needagain = progress = B_FALSE;
+
+ if ((error = gather_nvlist(hdl, tofs, fromsnap, NULL,
+ &local_nv, &local_avl)) != 0)
+ return (error);
+
+ /*
+ * Process deletes and renames
+ */
+ for (fselem = nvlist_next_nvpair(local_nv, NULL);
+ fselem; fselem = nextfselem) {
+ nvlist_t *nvfs, *snaps;
+ nvlist_t *stream_nvfs = NULL;
+ nvpair_t *snapelem, *nextsnapelem;
+ uint64_t fromguid = 0;
+ uint64_t originguid = 0;
+ uint64_t stream_originguid = 0;
+ uint64_t parent_fromsnap_guid, stream_parent_fromsnap_guid;
+ char *fsname, *stream_fsname;
+
+ nextfselem = nvlist_next_nvpair(local_nv, fselem);
+
+ VERIFY(0 == nvpair_value_nvlist(fselem, &nvfs));
+ VERIFY(0 == nvlist_lookup_nvlist(nvfs, "snaps", &snaps));
+ VERIFY(0 == nvlist_lookup_string(nvfs, "name", &fsname));
+ VERIFY(0 == nvlist_lookup_uint64(nvfs, "parentfromsnap",
+ &parent_fromsnap_guid));
+ (void) nvlist_lookup_uint64(nvfs, "origin", &originguid);
+
+ /*
+ * First find the stream's fs, so we can check for
+ * a different origin (due to "zfs promote")
+ */
+ for (snapelem = nvlist_next_nvpair(snaps, NULL);
+ snapelem; snapelem = nvlist_next_nvpair(snaps, snapelem)) {
+ uint64_t thisguid;
+
+ VERIFY(0 == nvpair_value_uint64(snapelem, &thisguid));
+ stream_nvfs = fsavl_find(stream_avl, thisguid, NULL);
+
+ if (stream_nvfs != NULL)
+ break;
+ }
+
+ /* check for promote */
+ (void) nvlist_lookup_uint64(stream_nvfs, "origin",
+ &stream_originguid);
+ if (stream_nvfs && originguid != stream_originguid) {
+ switch (created_before(hdl, local_avl,
+ stream_originguid, originguid)) {
+ case 1: {
+ /* promote it! */
+ zfs_cmd_t zc = { 0 };
+ nvlist_t *origin_nvfs;
+ char *origin_fsname;
+
+ if (flags.verbose)
+ (void) printf("promoting %s\n", fsname);
+
+ origin_nvfs = fsavl_find(local_avl, originguid,
+ NULL);
+ VERIFY(0 == nvlist_lookup_string(origin_nvfs,
+ "name", &origin_fsname));
+ (void) strlcpy(zc.zc_value, origin_fsname,
+ sizeof (zc.zc_value));
+ (void) strlcpy(zc.zc_name, fsname,
+ sizeof (zc.zc_name));
+ error = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc);
+ if (error == 0)
+ progress = B_TRUE;
+ break;
+ }
+ default:
+ break;
+ case -1:
+ fsavl_destroy(local_avl);
+ nvlist_free(local_nv);
+ return (-1);
+ }
+ /*
+ * We had/have the wrong origin, therefore our
+ * list of snapshots is wrong. Need to handle
+ * them on the next pass.
+ */
+ needagain = B_TRUE;
+ continue;
+ }
+
+ for (snapelem = nvlist_next_nvpair(snaps, NULL);
+ snapelem; snapelem = nextsnapelem) {
+ uint64_t thisguid;
+ char *stream_snapname;
+ nvlist_t *found, *props;
+
+ nextsnapelem = nvlist_next_nvpair(snaps, snapelem);
+
+ VERIFY(0 == nvpair_value_uint64(snapelem, &thisguid));
+ found = fsavl_find(stream_avl, thisguid,
+ &stream_snapname);
+
+ /* check for delete */
+ if (found == NULL) {
+ char name[ZFS_MAXNAMELEN];
+
+ if (!flags.force)
+ continue;
+
+ (void) snprintf(name, sizeof (name), "%s@%s",
+ fsname, nvpair_name(snapelem));
+
+ error = recv_destroy(hdl, name,
+ strlen(fsname)+1, newname, flags);
+ if (error)
+ needagain = B_TRUE;
+ else
+ progress = B_TRUE;
+ continue;
+ }
+
+ stream_nvfs = found;
+
+ if (0 == nvlist_lookup_nvlist(stream_nvfs, "snapprops",
+ &props) && 0 == nvlist_lookup_nvlist(props,
+ stream_snapname, &props)) {
+ zfs_cmd_t zc = { 0 };
+
+ zc.zc_cookie = B_TRUE; /* clear current props */
+ (void) snprintf(zc.zc_name, sizeof (zc.zc_name),
+ "%s@%s", fsname, nvpair_name(snapelem));
+ if (zcmd_write_src_nvlist(hdl, &zc,
+ props) == 0) {
+ (void) zfs_ioctl(hdl,
+ ZFS_IOC_SET_PROP, &zc);
+ zcmd_free_nvlists(&zc);
+ }
+ }
+
+ /* check for different snapname */
+ if (strcmp(nvpair_name(snapelem),
+ stream_snapname) != 0) {
+ char name[ZFS_MAXNAMELEN];
+ char tryname[ZFS_MAXNAMELEN];
+
+ (void) snprintf(name, sizeof (name), "%s@%s",
+ fsname, nvpair_name(snapelem));
+ (void) snprintf(tryname, sizeof (name), "%s@%s",
+ fsname, stream_snapname);
+
+ error = recv_rename(hdl, name, tryname,
+ strlen(fsname)+1, newname, flags);
+ if (error)
+ needagain = B_TRUE;
+ else
+ progress = B_TRUE;
+ }
+
+ if (strcmp(stream_snapname, fromsnap) == 0)
+ fromguid = thisguid;
+ }
+
+ /* check for delete */
+ if (stream_nvfs == NULL) {
+ if (!flags.force)
+ continue;
+
+ error = recv_destroy(hdl, fsname, strlen(tofs)+1,
+ newname, flags);
+ if (error)
+ needagain = B_TRUE;
+ else
+ progress = B_TRUE;
+ continue;
+ }
+
+ if (fromguid == 0 && flags.verbose) {
+ (void) printf("local fs %s does not have fromsnap "
+ "(%s in stream); must have been deleted locally; "
+ "ignoring\n", fsname, fromsnap);
+ continue;
+ }
+
+ VERIFY(0 == nvlist_lookup_string(stream_nvfs,
+ "name", &stream_fsname));
+ VERIFY(0 == nvlist_lookup_uint64(stream_nvfs,
+ "parentfromsnap", &stream_parent_fromsnap_guid));
+
+ /* check for rename */
+ if ((stream_parent_fromsnap_guid != 0 &&
+ stream_parent_fromsnap_guid != parent_fromsnap_guid) ||
+ strcmp(strrchr(fsname, '/'),
+ strrchr(stream_fsname, '/')) != 0) {
+ nvlist_t *parent;
+ char tryname[ZFS_MAXNAMELEN];
+
+ parent = fsavl_find(local_avl,
+ stream_parent_fromsnap_guid, NULL);
+ /*
+ * NB: parent might not be found if we used the
+ * tosnap for stream_parent_fromsnap_guid,
+ * because the parent is a newly-created fs;
+ * we'll be able to rename it after we recv the
+ * new fs.
+ */
+ if (parent != NULL) {
+ char *pname;
+
+ VERIFY(0 == nvlist_lookup_string(parent, "name",
+ &pname));
+ (void) snprintf(tryname, sizeof (tryname),
+ "%s%s", pname, strrchr(stream_fsname, '/'));
+ } else {
+ tryname[0] = '\0';
+ if (flags.verbose) {
+ (void) printf("local fs %s new parent "
+ "not found\n", fsname);
+ }
+ }
+
+ error = recv_rename(hdl, fsname, tryname,
+ strlen(tofs)+1, newname, flags);
+ if (error)
+ needagain = B_TRUE;
+ else
+ progress = B_TRUE;
+ }
+ }
+
+ fsavl_destroy(local_avl);
+ nvlist_free(local_nv);
+
+ if (needagain && progress) {
+ /* do another pass to fix up temporary names */
+ if (flags.verbose)
+ (void) printf("another pass:\n");
+ goto again;
+ }
+
+ return (needagain);
+}
+
+static int
+zfs_receive_package(libzfs_handle_t *hdl, int fd, const char *destname,
+ recvflags_t flags, dmu_replay_record_t *drr, zio_cksum_t *zc,
+ char **top_zfs)
+{
+ nvlist_t *stream_nv = NULL;
+ avl_tree_t *stream_avl = NULL;
+ char *fromsnap = NULL;
+ char tofs[ZFS_MAXNAMELEN];
+ char errbuf[1024];
+ dmu_replay_record_t drre;
+ int error;
+ boolean_t anyerr = B_FALSE;
+ boolean_t softerr = B_FALSE;
+
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot receive"));
+
+ if (strchr(destname, '@')) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "can not specify snapshot name for multi-snapshot stream"));
+ return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
+ }
+
+ assert(drr->drr_type == DRR_BEGIN);
+ assert(drr->drr_u.drr_begin.drr_magic == DMU_BACKUP_MAGIC);
+ assert(drr->drr_u.drr_begin.drr_version == DMU_BACKUP_HEADER_VERSION);
+
+ /*
+ * Read in the nvlist from the stream.
+ */
+ if (drr->drr_payloadlen != 0) {
+ if (!flags.isprefix) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "must use -d to receive replication "
+ "(send -R) stream"));
+ return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
+ }
+
+ error = recv_read_nvlist(hdl, fd, drr->drr_payloadlen,
+ &stream_nv, flags.byteswap, zc);
+ if (error) {
+ error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
+ goto out;
+ }
+ }
+
+ /*
+ * Read in the end record and verify checksum.
+ */
+ if (0 != (error = recv_read(hdl, fd, &drre, sizeof (drre),
+ flags.byteswap, NULL)))
+ goto out;
+ if (flags.byteswap) {
+ drre.drr_type = BSWAP_32(drre.drr_type);
+ drre.drr_u.drr_end.drr_checksum.zc_word[0] =
+ BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[0]);
+ drre.drr_u.drr_end.drr_checksum.zc_word[1] =
+ BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[1]);
+ drre.drr_u.drr_end.drr_checksum.zc_word[2] =
+ BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[2]);
+ drre.drr_u.drr_end.drr_checksum.zc_word[3] =
+ BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[3]);
+ }
+ if (drre.drr_type != DRR_END) {
+ error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
+ goto out;
+ }
+ if (!ZIO_CHECKSUM_EQUAL(drre.drr_u.drr_end.drr_checksum, *zc)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "incorrect header checksum"));
+ error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
+ goto out;
+ }
+
+ (void) nvlist_lookup_string(stream_nv, "fromsnap", &fromsnap);
+
+ if (drr->drr_payloadlen != 0) {
+ nvlist_t *stream_fss;
+
+ VERIFY(0 == nvlist_lookup_nvlist(stream_nv, "fss",
+ &stream_fss));
+ if ((stream_avl = fsavl_create(stream_fss)) == NULL) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "couldn't allocate avl tree"));
+ error = zfs_error(hdl, EZFS_NOMEM, errbuf);
+ goto out;
+ }
+
+ if (fromsnap != NULL) {
+ (void) strlcpy(tofs, destname, ZFS_MAXNAMELEN);
+ if (flags.isprefix) {
+ int i = strcspn(drr->drr_u.drr_begin.drr_toname,
+ "/@");
+ /* zfs_receive_one() will create_parents() */
+ (void) strlcat(tofs,
+ &drr->drr_u.drr_begin.drr_toname[i],
+ ZFS_MAXNAMELEN);
+ *strchr(tofs, '@') = '\0';
+ }
+ softerr = recv_incremental_replication(hdl, tofs,
+ flags, stream_nv, stream_avl);
+ }
+ }
+
+
+ /* Finally, receive each contained stream */
+ do {
+ /*
+ * we should figure out if it has a recoverable
+ * error, in which case do a recv_skip() and drive on.
+ * Note, if we fail due to already having this guid,
+ * zfs_receive_one() will take care of it (ie,
+ * recv_skip() and return 0).
+ */
+ error = zfs_receive_impl(hdl, destname, flags, fd,
+ stream_avl, top_zfs);
+ if (error == ENODATA) {
+ error = 0;
+ break;
+ }
+ anyerr |= error;
+ } while (error == 0);
+
+ if (drr->drr_payloadlen != 0 && fromsnap != NULL) {
+ /*
+ * Now that we have the fs's they sent us, try the
+ * renames again.
+ */
+ softerr = recv_incremental_replication(hdl, tofs, flags,
+ stream_nv, stream_avl);
+ }
+
+out:
+ fsavl_destroy(stream_avl);
+ if (stream_nv)
+ nvlist_free(stream_nv);
+ if (softerr)
+ error = -2;
+ if (anyerr)
+ error = -1;
+ return (error);
+}
+
+static int
+recv_skip(libzfs_handle_t *hdl, int fd, boolean_t byteswap)
+{
+ dmu_replay_record_t *drr;
+ void *buf = malloc(1<<20);
+
+ /* XXX would be great to use lseek if possible... */
+ drr = buf;
+
+ while (recv_read(hdl, fd, drr, sizeof (dmu_replay_record_t),
+ byteswap, NULL) == 0) {
+ if (byteswap)
+ drr->drr_type = BSWAP_32(drr->drr_type);
+
+ switch (drr->drr_type) {
+ case DRR_BEGIN:
+ /* NB: not to be used on v2 stream packages */
+ assert(drr->drr_payloadlen == 0);
+ break;
+
+ case DRR_END:
+ free(buf);
+ return (0);
+
+ case DRR_OBJECT:
+ if (byteswap) {
+ drr->drr_u.drr_object.drr_bonuslen =
+ BSWAP_32(drr->drr_u.drr_object.
+ drr_bonuslen);
+ }
+ (void) recv_read(hdl, fd, buf,
+ P2ROUNDUP(drr->drr_u.drr_object.drr_bonuslen, 8),
+ B_FALSE, NULL);
+ break;
+
+ case DRR_WRITE:
+ if (byteswap) {
+ drr->drr_u.drr_write.drr_length =
+ BSWAP_64(drr->drr_u.drr_write.drr_length);
+ }
+ (void) recv_read(hdl, fd, buf,
+ drr->drr_u.drr_write.drr_length, B_FALSE, NULL);
+ break;
+
+ case DRR_FREEOBJECTS:
+ case DRR_FREE:
+ break;
+
+ default:
+ assert(!"invalid record type");
+ }
+ }
+
+ free(buf);
+ return (-1);
+}
+
+/*
+ * Restores a backup of tosnap from the file descriptor specified by infd.
+ */
+static int
+zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
+ recvflags_t flags, dmu_replay_record_t *drr,
+ dmu_replay_record_t *drr_noswap, avl_tree_t *stream_avl,
+ char **top_zfs)
+{
+ zfs_cmd_t zc = { 0 };
+ time_t begin_time;
+ int ioctl_err, ioctl_errno, err, choplen;
+ char *cp;
+ struct drr_begin *drrb = &drr->drr_u.drr_begin;
+ char errbuf[1024];
+ char chopprefix[ZFS_MAXNAMELEN];
+ boolean_t newfs = B_FALSE;
+ boolean_t stream_wantsnewfs;
+ uint64_t parent_snapguid = 0;
+ prop_changelist_t *clp = NULL;
+ nvlist_t *snapprops_nvlist = NULL;
+
+ begin_time = time(NULL);
+
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot receive"));
+
+ if (stream_avl != NULL) {
+ char *snapname;
+ nvlist_t *fs = fsavl_find(stream_avl, drrb->drr_toguid,
+ &snapname);
+ nvlist_t *props;
+ int ret;
+
+ (void) nvlist_lookup_uint64(fs, "parentfromsnap",
+ &parent_snapguid);
+ err = nvlist_lookup_nvlist(fs, "props", &props);
+ if (err)
+ VERIFY(0 == nvlist_alloc(&props, NV_UNIQUE_NAME, 0));
+
+ if (flags.canmountoff) {
+ VERIFY(0 == nvlist_add_uint64(props,
+ zfs_prop_to_name(ZFS_PROP_CANMOUNT), 0));
+ }
+ ret = zcmd_write_src_nvlist(hdl, &zc, props);
+ if (err)
+ nvlist_free(props);
+
+ if (0 == nvlist_lookup_nvlist(fs, "snapprops", &props)) {
+ VERIFY(0 == nvlist_lookup_nvlist(props,
+ snapname, &snapprops_nvlist));
+ }
+
+ if (ret != 0)
+ return (-1);
+ }
+
+ /*
+ * Determine how much of the snapshot name stored in the stream
+ * we are going to tack on to the name they specified on the
+ * command line, and how much we are going to chop off.
+ *
+ * If they specified a snapshot, chop the entire name stored in
+ * the stream.
+ */
+ (void) strcpy(chopprefix, drrb->drr_toname);
+ if (flags.isprefix) {
+ /*
+ * They specified a fs with -d, we want to tack on
+ * everything but the pool name stored in the stream
+ */
+ if (strchr(tosnap, '@')) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
+ "argument - snapshot not allowed with -d"));
+ return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+ }
+ cp = strchr(chopprefix, '/');
+ if (cp == NULL)
+ cp = strchr(chopprefix, '@');
+ *cp = '\0';
+ } else if (strchr(tosnap, '@') == NULL) {
+ /*
+ * If they specified a filesystem without -d, we want to
+ * tack on everything after the fs specified in the
+ * first name from the stream.
+ */
+ cp = strchr(chopprefix, '@');
+ *cp = '\0';
+ }
+ choplen = strlen(chopprefix);
+
+ /*
+ * Determine name of destination snapshot, store in zc_value.
+ */
+ (void) strcpy(zc.zc_value, tosnap);
+ (void) strncat(zc.zc_value, drrb->drr_toname+choplen,
+ sizeof (zc.zc_value));
+ if (!zfs_name_valid(zc.zc_value, ZFS_TYPE_SNAPSHOT)) {
+ zcmd_free_nvlists(&zc);
+ return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+ }
+
+ /*
+ * Determine the name of the origin snapshot, store in zc_string.
+ */
+ if (drrb->drr_flags & DRR_FLAG_CLONE) {
+ if (guid_to_name(hdl, tosnap,
+ drrb->drr_fromguid, zc.zc_string) != 0) {
+ zcmd_free_nvlists(&zc);
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "local origin for clone %s does not exist"),
+ zc.zc_value);
+ return (zfs_error(hdl, EZFS_NOENT, errbuf));
+ }
+ if (flags.verbose)
+ (void) printf("found clone origin %s\n", zc.zc_string);
+ }
+
+ stream_wantsnewfs = (drrb->drr_fromguid == 0 ||
+ (drrb->drr_flags & DRR_FLAG_CLONE));
+
+ if (stream_wantsnewfs) {
+ /*
+ * if the parent fs does not exist, look for it based on
+ * the parent snap GUID
+ */
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot receive new filesystem stream"));
+
+ (void) strcpy(zc.zc_name, zc.zc_value);
+ cp = strrchr(zc.zc_name, '/');
+ if (cp)
+ *cp = '\0';
+ if (cp &&
+ !zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_DATASET)) {
+ char suffix[ZFS_MAXNAMELEN];
+ (void) strcpy(suffix, strrchr(zc.zc_value, '/'));
+ if (guid_to_name(hdl, tosnap, parent_snapguid,
+ zc.zc_value) == 0) {
+ *strchr(zc.zc_value, '@') = '\0';
+ (void) strcat(zc.zc_value, suffix);
+ }
+ }
+ } else {
+ /*
+ * if the fs does not exist, look for it based on the
+ * fromsnap GUID
+ */
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot receive incremental stream"));
+
+ (void) strcpy(zc.zc_name, zc.zc_value);
+ *strchr(zc.zc_name, '@') = '\0';
+
+ if (!zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_DATASET)) {
+ char snap[ZFS_MAXNAMELEN];
+ (void) strcpy(snap, strchr(zc.zc_value, '@'));
+ if (guid_to_name(hdl, tosnap, drrb->drr_fromguid,
+ zc.zc_value) == 0) {
+ *strchr(zc.zc_value, '@') = '\0';
+ (void) strcat(zc.zc_value, snap);
+ }
+ }
+ }
+
+ (void) strcpy(zc.zc_name, zc.zc_value);
+ *strchr(zc.zc_name, '@') = '\0';
+
+ if (zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_DATASET)) {
+ zfs_handle_t *zhp;
+ /*
+ * Destination fs exists. Therefore this should either
+ * be an incremental, or the stream specifies a new fs
+ * (full stream or clone) and they want us to blow it
+ * away (and have therefore specified -F and removed any
+ * snapshots).
+ */
+
+ if (stream_wantsnewfs) {
+ if (!flags.force) {
+ zcmd_free_nvlists(&zc);
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "destination '%s' exists\n"
+ "must specify -F to overwrite it"),
+ zc.zc_name);
+ return (zfs_error(hdl, EZFS_EXISTS, errbuf));
+ }
+ if (ioctl(hdl->libzfs_fd, ZFS_IOC_SNAPSHOT_LIST_NEXT,
+ &zc) == 0) {
+ zcmd_free_nvlists(&zc);
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "destination has snapshots (eg. %s)\n"
+ "must destroy them to overwrite it"),
+ zc.zc_name);
+ return (zfs_error(hdl, EZFS_EXISTS, errbuf));
+ }
+ }
+
+ if ((zhp = zfs_open(hdl, zc.zc_name,
+ ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) == NULL) {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+
+ if (stream_wantsnewfs &&
+ zhp->zfs_dmustats.dds_origin[0]) {
+ zcmd_free_nvlists(&zc);
+ zfs_close(zhp);
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "destination '%s' is a clone\n"
+ "must destroy it to overwrite it"),
+ zc.zc_name);
+ return (zfs_error(hdl, EZFS_EXISTS, errbuf));
+ }
+
+ if (!flags.dryrun && zhp->zfs_type == ZFS_TYPE_FILESYSTEM &&
+ stream_wantsnewfs) {
+ /* We can't do online recv in this case */
+ clp = changelist_gather(zhp, ZFS_PROP_NAME, 0, 0);
+ if (clp == NULL) {
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+ if (changelist_prefix(clp) != 0) {
+ changelist_free(clp);
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+ }
+ if (!flags.dryrun && zhp->zfs_type == ZFS_TYPE_VOLUME &&
+ zvol_remove_link(hdl, zhp->zfs_name) != 0) {
+ zfs_close(zhp);
+ zcmd_free_nvlists(&zc);
+ return (-1);
+ }
+ zfs_close(zhp);
+ } else {
+ /*
+ * Destination filesystem does not exist. Therefore we better
+ * be creating a new filesystem (either from a full backup, or
+ * a clone). It would therefore be invalid if the user
+ * specified only the pool name (i.e. if the destination name
+ * contained no slash character).
+ */
+ if (!stream_wantsnewfs ||
+ (cp = strrchr(zc.zc_name, '/')) == NULL) {
+ zcmd_free_nvlists(&zc);
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "destination '%s' does not exist"), zc.zc_name);
+ return (zfs_error(hdl, EZFS_NOENT, errbuf));
+ }
+
+ /*
+ * Trim off the final dataset component so we perform the
+ * recvbackup ioctl to the filesystems's parent.
+ */
+ *cp = '\0';
+
+ if (flags.isprefix && !flags.dryrun &&
+ create_parents(hdl, zc.zc_value, strlen(tosnap)) != 0) {
+ zcmd_free_nvlists(&zc);
+ return (zfs_error(hdl, EZFS_BADRESTORE, errbuf));
+ }
+
+ newfs = B_TRUE;
+ }
+
+ zc.zc_begin_record = drr_noswap->drr_u.drr_begin;
+ zc.zc_cookie = infd;
+ zc.zc_guid = flags.force;
+ if (flags.verbose) {
+ (void) printf("%s %s stream of %s into %s\n",
+ flags.dryrun ? "would receive" : "receiving",
+ drrb->drr_fromguid ? "incremental" : "full",
+ drrb->drr_toname, zc.zc_value);
+ (void) fflush(stdout);
+ }
+
+ if (flags.dryrun) {
+ zcmd_free_nvlists(&zc);
+ return (recv_skip(hdl, infd, flags.byteswap));
+ }
+
+ err = ioctl_err = zfs_ioctl(hdl, ZFS_IOC_RECV, &zc);
+ ioctl_errno = errno;
+ zcmd_free_nvlists(&zc);
+
+ if (err == 0 && snapprops_nvlist) {
+ zfs_cmd_t zc2 = { 0 };
+
+ (void) strcpy(zc2.zc_name, zc.zc_value);
+ if (zcmd_write_src_nvlist(hdl, &zc2, snapprops_nvlist) == 0) {
+ (void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc2);
+ zcmd_free_nvlists(&zc2);
+ }
+ }
+
+ if (err && (ioctl_errno == ENOENT || ioctl_errno == ENODEV)) {
+ /*
+ * It may be that this snapshot already exists,
+ * in which case we want to consume & ignore it
+ * rather than failing.
+ */
+ avl_tree_t *local_avl;
+ nvlist_t *local_nv, *fs;
+ char *cp = strchr(zc.zc_value, '@');
+
+ /*
+ * XXX Do this faster by just iterating over snaps in
+ * this fs. Also if zc_value does not exist, we will
+ * get a strange "does not exist" error message.
+ */
+ *cp = '\0';
+ if (gather_nvlist(hdl, zc.zc_value, NULL, NULL,
+ &local_nv, &local_avl) == 0) {
+ *cp = '@';
+ fs = fsavl_find(local_avl, drrb->drr_toguid, NULL);
+ fsavl_destroy(local_avl);
+ nvlist_free(local_nv);
+
+ if (fs != NULL) {
+ if (flags.verbose) {
+ (void) printf("snap %s already exists; "
+ "ignoring\n", zc.zc_value);
+ }
+ ioctl_err = recv_skip(hdl, infd,
+ flags.byteswap);
+ }
+ }
+ *cp = '@';
+ }
+
+
+ if (ioctl_err != 0) {
+ switch (ioctl_errno) {
+ case ENODEV:
+ cp = strchr(zc.zc_value, '@');
+ *cp = '\0';
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "most recent snapshot of %s does not\n"
+ "match incremental source"), zc.zc_value);
+ (void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
+ *cp = '@';
+ break;
+ case ETXTBSY:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "destination %s has been modified\n"
+ "since most recent snapshot"), zc.zc_name);
+ (void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
+ break;
+ case EEXIST:
+ cp = strchr(zc.zc_value, '@');
+ if (newfs) {
+ /* it's the containing fs that exists */
+ *cp = '\0';
+ }
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "destination already exists"));
+ (void) zfs_error_fmt(hdl, EZFS_EXISTS,
+ dgettext(TEXT_DOMAIN, "cannot restore to %s"),
+ zc.zc_value);
+ *cp = '@';
+ break;
+ case EINVAL:
+ (void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
+ break;
+ case ECKSUM:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid stream (checksum mismatch)"));
+ (void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
+ break;
+ default:
+ (void) zfs_standard_error(hdl, ioctl_errno, errbuf);
+ }
+ }
+
+ /*
+ * Mount or recreate the /dev links for the target filesystem
+ * (if created, or if we tore them down to do an incremental
+ * restore), and the /dev links for the new snapshot (if
+ * created). Also mount any children of the target filesystem
+ * if we did an incremental receive.
+ */
+ cp = strchr(zc.zc_value, '@');
+ if (cp && (ioctl_err == 0 || !newfs)) {
+ zfs_handle_t *h;
+
+ *cp = '\0';
+ h = zfs_open(hdl, zc.zc_value,
+ ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
+ if (h != NULL) {
+ if (h->zfs_type == ZFS_TYPE_VOLUME) {
+ *cp = '@';
+ err = zvol_create_link(hdl, h->zfs_name);
+ if (err == 0 && ioctl_err == 0)
+ err = zvol_create_link(hdl,
+ zc.zc_value);
+ } else if (newfs) {
+ /*
+ * Track the first/top of hierarchy fs,
+ * for mounting and sharing later.
+ */
+ if (top_zfs && *top_zfs == NULL)
+ *top_zfs = zfs_strdup(hdl, zc.zc_value);
+ }
+ zfs_close(h);
+ }
+ *cp = '@';
+ }
+
+ if (clp) {
+ err |= changelist_postfix(clp);
+ changelist_free(clp);
+ }
+
+ if (err || ioctl_err)
+ return (-1);
+
+ if (flags.verbose) {
+ char buf1[64];
+ char buf2[64];
+ uint64_t bytes = zc.zc_cookie;
+ time_t delta = time(NULL) - begin_time;
+ if (delta == 0)
+ delta = 1;
+ zfs_nicenum(bytes, buf1, sizeof (buf1));
+ zfs_nicenum(bytes/delta, buf2, sizeof (buf1));
+
+ (void) printf("received %sB stream in %lu seconds (%sB/sec)\n",
+ buf1, delta, buf2);
+ }
+
+ return (0);
+}
+
+static int
+zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap, recvflags_t flags,
+ int infd, avl_tree_t *stream_avl, char **top_zfs)
+{
+ int err;
+ dmu_replay_record_t drr, drr_noswap;
+ struct drr_begin *drrb = &drr.drr_u.drr_begin;
+ char errbuf[1024];
+ zio_cksum_t zcksum = { 0 };
+
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot receive"));
+
+ if (flags.isprefix &&
+ !zfs_dataset_exists(hdl, tosnap, ZFS_TYPE_DATASET)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified fs "
+ "(%s) does not exist"), tosnap);
+ return (zfs_error(hdl, EZFS_NOENT, errbuf));
+ }
+
+ /* read in the BEGIN record */
+ if (0 != (err = recv_read(hdl, infd, &drr, sizeof (drr), B_FALSE,
+ &zcksum)))
+ return (err);
+
+ if (drr.drr_type == DRR_END || drr.drr_type == BSWAP_32(DRR_END)) {
+ /* It's the double end record at the end of a package */
+ return (ENODATA);
+ }
+
+ /* the kernel needs the non-byteswapped begin record */
+ drr_noswap = drr;
+
+ flags.byteswap = B_FALSE;
+ if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
+ /*
+ * We computed the checksum in the wrong byteorder in
+ * recv_read() above; do it again correctly.
+ */
+ bzero(&zcksum, sizeof (zio_cksum_t));
+ fletcher_4_incremental_byteswap(&drr, sizeof (drr), &zcksum);
+ flags.byteswap = B_TRUE;
+
+ drr.drr_type = BSWAP_32(drr.drr_type);
+ drr.drr_payloadlen = BSWAP_32(drr.drr_payloadlen);
+ drrb->drr_magic = BSWAP_64(drrb->drr_magic);
+ drrb->drr_version = BSWAP_64(drrb->drr_version);
+ drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
+ drrb->drr_type = BSWAP_32(drrb->drr_type);
+ drrb->drr_flags = BSWAP_32(drrb->drr_flags);
+ drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
+ drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
+ }
+
+ if (drrb->drr_magic != DMU_BACKUP_MAGIC || drr.drr_type != DRR_BEGIN) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
+ "stream (bad magic number)"));
+ return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
+ }
+
+ if (strchr(drrb->drr_toname, '@') == NULL) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
+ "stream (bad snapshot name)"));
+ return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
+ }
+
+ if (drrb->drr_version == DMU_BACKUP_STREAM_VERSION) {
+ return (zfs_receive_one(hdl, infd, tosnap, flags,
+ &drr, &drr_noswap, stream_avl, top_zfs));
+ } else if (drrb->drr_version == DMU_BACKUP_HEADER_VERSION) {
+ return (zfs_receive_package(hdl, infd, tosnap, flags,
+ &drr, &zcksum, top_zfs));
+ } else {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "stream is unsupported version %llu"),
+ drrb->drr_version);
+ return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
+ }
+}
+
+/*
+ * Restores a backup of tosnap from the file descriptor specified by infd.
+ * Return 0 on total success, -2 if some things couldn't be
+ * destroyed/renamed/promoted, -1 if some things couldn't be received.
+ * (-1 will override -2).
+ */
+int
+zfs_receive(libzfs_handle_t *hdl, const char *tosnap, recvflags_t flags,
+ int infd, avl_tree_t *stream_avl)
+{
+ char *top_zfs = NULL;
+ int err;
+
+ err = zfs_receive_impl(hdl, tosnap, flags, infd, stream_avl, &top_zfs);
+
+ if (err == 0 && top_zfs) {
+ zfs_handle_t *zhp;
+ prop_changelist_t *clp;
+
+ zhp = zfs_open(hdl, top_zfs, ZFS_TYPE_FILESYSTEM);
+ if (zhp != NULL) {
+ clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT,
+ CL_GATHER_MOUNT_ALWAYS, 0);
+ zfs_close(zhp);
+ if (clp != NULL) {
+ /* mount and share received datasets */
+ err = changelist_postfix(clp);
+ changelist_free(clp);
+ }
+ }
+ if (zhp == NULL || clp == NULL || err)
+ err = -1;
+ }
+ if (top_zfs)
+ free(top_zfs);
+
+ return (err);
+}
diff --git a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_status.c b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_status.c
index 3eba97a..c7eb04e 100644
--- a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_status.c
+++ b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_status.c
@@ -19,18 +19,16 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* This file contains the functions which analyze the status of a pool. This
* include both the status of an active pool, as well as the status exported
* pools. Returns one of the ZPOOL_STATUS_* defines describing the status of
* the pool. This status is independent (to a certain degree) from the state of
- * the pool. A pool's state descsribes only whether or not it is capable of
+ * the pool. A pool's state describes only whether or not it is capable of
* providing the necessary fault tolerance for data. The status describes the
* overall status of devices. A pool that is online can still have a device
* that is experiencing errors.
@@ -47,7 +45,7 @@
#include "libzfs_impl.h"
/*
- * Message ID table. This must be kep in sync with the ZPOOL_STATUS_* defines
+ * Message ID table. This must be kept in sync with the ZPOOL_STATUS_* defines
* in libzfs.h. Note that there are some status results which go past the end
* of this table, and hence have no associated message ID.
*/
@@ -62,26 +60,10 @@ static char *zfs_msgid_table[] = {
"ZFS-8000-8A",
"ZFS-8000-9P",
"ZFS-8000-A5",
- "ZFS-8000-EY"
-};
-
-/*
- * If the pool is active, a certain class of static errors is overridden by the
- * faults as analayzed by FMA. These faults have separate knowledge articles,
- * and the article referred to by 'zpool status' must match that indicated by
- * the syslog error message. We override missing data as well as corrupt pool.
- */
-static char *zfs_msgid_table_active[] = {
- "ZFS-8000-14",
- "ZFS-8000-D3", /* overridden */
- "ZFS-8000-D3", /* overridden */
- "ZFS-8000-4J",
- "ZFS-8000-5E",
- "ZFS-8000-6X",
- "ZFS-8000-CS", /* overridden */
- "ZFS-8000-8A",
- "ZFS-8000-9P",
- "ZFS-8000-CS", /* overridden */
+ "ZFS-8000-EY",
+ "ZFS-8000-HC",
+ "ZFS-8000-JQ",
+ "ZFS-8000-K4",
};
#define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
@@ -96,9 +78,16 @@ vdev_missing(uint64_t state, uint64_t aux, uint64_t errs)
/* ARGSUSED */
static int
+vdev_faulted(uint64_t state, uint64_t aux, uint64_t errs)
+{
+ return (state == VDEV_STATE_FAULTED);
+}
+
+/* ARGSUSED */
+static int
vdev_errors(uint64_t state, uint64_t aux, uint64_t errs)
{
- return (errs != 0);
+ return (state == VDEV_STATE_DEGRADED || errs != 0);
}
/* ARGSUSED */
@@ -163,9 +152,9 @@ find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t))
* following:
*
* - Check for a complete and valid configuration
- * - Look for any missing devices in a non-replicated config
+ * - Look for any faulted or missing devices in a non-replicated config
* - Check for any data errors
- * - Check for any missing devices in a replicated config
+ * - Check for any faulted or missing devices in a replicated config
* - Look for any devices showing errors
* - Check for any resilvering devices
*
@@ -181,6 +170,7 @@ check_status(nvlist_t *config, boolean_t isimport)
uint64_t nerr;
uint64_t version;
uint64_t stateval;
+ uint64_t suspended;
uint64_t hostid = 0;
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
@@ -215,9 +205,31 @@ check_status(nvlist_t *config, boolean_t isimport)
return (ZPOOL_STATUS_BAD_GUID_SUM);
/*
- * Missing devices in non-replicated config.
+ * Check whether the pool has suspended due to failed I/O.
+ */
+ if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED,
+ &suspended) == 0) {
+ if (suspended == ZIO_FAILURE_MODE_CONTINUE)
+ return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
+ return (ZPOOL_STATUS_IO_FAILURE_WAIT);
+ }
+
+ /*
+ * Could not read a log.
+ */
+ if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
+ vs->vs_aux == VDEV_AUX_BAD_LOG) {
+ return (ZPOOL_STATUS_BAD_LOG);
+ }
+
+ /*
+ * Bad devices in non-replicated config.
*/
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
+ find_vdev_problem(nvroot, vdev_faulted))
+ return (ZPOOL_STATUS_FAULTED_DEV_NR);
+
+ if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
find_vdev_problem(nvroot, vdev_missing))
return (ZPOOL_STATUS_MISSING_DEV_NR);
@@ -244,6 +256,8 @@ check_status(nvlist_t *config, boolean_t isimport)
/*
* Missing devices in a replicated config.
*/
+ if (find_vdev_problem(nvroot, vdev_faulted))
+ return (ZPOOL_STATUS_FAULTED_DEV_R);
if (find_vdev_problem(nvroot, vdev_missing))
return (ZPOOL_STATUS_MISSING_DEV_R);
if (find_vdev_problem(nvroot, vdev_broken))
@@ -270,7 +284,7 @@ check_status(nvlist_t *config, boolean_t isimport)
/*
* Outdated, but usable, version
*/
- if (version < ZFS_VERSION)
+ if (version < SPA_VERSION)
return (ZPOOL_STATUS_VERSION_OLDER);
return (ZPOOL_STATUS_OK);
@@ -284,7 +298,7 @@ zpool_get_status(zpool_handle_t *zhp, char **msgid)
if (ret >= NMSGID)
*msgid = NULL;
else
- *msgid = zfs_msgid_table_active[ret];
+ *msgid = zfs_msgid_table[ret];
return (ret);
}
diff --git a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_util.c b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_util.c
index c706126..9d60d60 100644
--- a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_util.c
+++ b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_util.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* Internal utility routines for the ZFS library.
*/
@@ -37,6 +35,8 @@
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
+#include <ctype.h>
+#include <math.h>
#include <sys/mnttab.h>
#include <sys/mntent.h>
#include <sys/types.h>
@@ -44,6 +44,7 @@
#include <libzfs.h>
#include "libzfs_impl.h"
+#include "zfs_prop.h"
int
libzfs_errno(libzfs_handle_t *hdl)
@@ -133,6 +134,14 @@ libzfs_error_description(libzfs_handle_t *hdl)
return (dgettext(TEXT_DOMAIN, "unshare(1M) failed"));
case EZFS_SHARENFSFAILED:
return (dgettext(TEXT_DOMAIN, "share(1M) failed"));
+ case EZFS_UNSHARESMBFAILED:
+ return (dgettext(TEXT_DOMAIN, "smb remove share failed"));
+ case EZFS_SHARESMBFAILED:
+ return (dgettext(TEXT_DOMAIN, "smb add share failed"));
+ case EZFS_ISCSISVCUNAVAIL:
+ return (dgettext(TEXT_DOMAIN,
+ "iscsitgt service need to be enabled by "
+ "a privileged user"));
case EZFS_DEVLINKS:
return (dgettext(TEXT_DOMAIN, "failed to create /dev links"));
case EZFS_PERM:
@@ -169,6 +178,38 @@ libzfs_error_description(libzfs_handle_t *hdl)
"this pool operation"));
case EZFS_NAMETOOLONG:
return (dgettext(TEXT_DOMAIN, "dataset name is too long"));
+ case EZFS_OPENFAILED:
+ return (dgettext(TEXT_DOMAIN, "open failed"));
+ case EZFS_NOCAP:
+ return (dgettext(TEXT_DOMAIN,
+ "disk capacity information could not be retrieved"));
+ case EZFS_LABELFAILED:
+ return (dgettext(TEXT_DOMAIN, "write of label failed"));
+ case EZFS_BADWHO:
+ return (dgettext(TEXT_DOMAIN, "invalid user/group"));
+ case EZFS_BADPERM:
+ return (dgettext(TEXT_DOMAIN, "invalid permission"));
+ case EZFS_BADPERMSET:
+ return (dgettext(TEXT_DOMAIN, "invalid permission set name"));
+ case EZFS_NODELEGATION:
+ return (dgettext(TEXT_DOMAIN, "delegated administration is "
+ "disabled on pool"));
+ case EZFS_PERMRDONLY:
+ return (dgettext(TEXT_DOMAIN, "snapshot permissions cannot be"
+ " modified"));
+ case EZFS_BADCACHE:
+ return (dgettext(TEXT_DOMAIN, "invalid or missing cache file"));
+ case EZFS_ISL2CACHE:
+ return (dgettext(TEXT_DOMAIN, "device is in use as a cache"));
+ case EZFS_VDEVNOTSUP:
+ return (dgettext(TEXT_DOMAIN, "vdev specification is not "
+ "supported"));
+ case EZFS_NOTSUP:
+ return (dgettext(TEXT_DOMAIN, "operation not supported "
+ "on this dataset"));
+ case EZFS_ACTIVE_SPARE:
+ return (dgettext(TEXT_DOMAIN, "pool has active shared spare "
+ "device"));
case EZFS_UNKNOWN:
return (dgettext(TEXT_DOMAIN, "unknown error"));
default:
@@ -249,6 +290,10 @@ zfs_common_error(libzfs_handle_t *hdl, int error, const char *fmt,
zfs_verror(hdl, EZFS_PERM, fmt, ap);
return (-1);
+ case ECANCELED:
+ zfs_verror(hdl, EZFS_NODELEGATION, fmt, ap);
+ return (-1);
+
case EIO:
zfs_verror(hdl, EZFS_IO, fmt, ap);
return (-1);
@@ -280,9 +325,9 @@ zfs_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
return (-1);
}
-
switch (error) {
case ENXIO:
+ case ENODEV:
zfs_verror(hdl, EZFS_IO, fmt, ap);
break;
@@ -308,11 +353,17 @@ zfs_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
"dataset is busy"));
zfs_verror(hdl, EZFS_BUSY, fmt, ap);
break;
-
+ case EROFS:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "snapshot permissions cannot be modified"));
+ zfs_verror(hdl, EZFS_PERMRDONLY, fmt, ap);
+ break;
case ENAMETOOLONG:
zfs_verror(hdl, EZFS_NAMETOOLONG, fmt, ap);
break;
-
+ case ENOTSUP:
+ zfs_verror(hdl, EZFS_BADVERSION, fmt, ap);
+ break;
default:
zfs_error_aux(hdl, strerror(errno));
zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
@@ -361,7 +412,7 @@ zpool_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool is busy"));
- zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
+ zfs_verror(hdl, EZFS_BUSY, fmt, ap);
break;
case ENXIO:
@@ -382,6 +433,11 @@ zpool_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
zfs_verror(hdl, EZFS_POOL_INVALARG, fmt, ap);
break;
+ case ENOSPC:
+ case EDQUOT:
+ zfs_verror(hdl, EZFS_NOSPC, fmt, ap);
+ return (-1);
+
default:
zfs_error_aux(hdl, strerror(error));
zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
@@ -483,9 +539,8 @@ zfs_nicenum(uint64_t num, char *buf, size_t buflen)
*/
int i;
for (i = 2; i >= 0; i--) {
- (void) snprintf(buf, buflen, "%.*f%c", i,
- (double)num / (1ULL << 10 * index), u);
- if (strlen(buf) <= 5)
+ if (snprintf(buf, buflen, "%.*f%c", i,
+ (double)num / (1ULL << 10 * index), u) <= 5)
break;
}
}
@@ -538,6 +593,9 @@ libzfs_init(void)
hdl->libzfs_sharetab = fopen(ZFS_EXPORTS_PATH, "r");
+ zfs_prop_init();
+ zpool_prop_init();
+
return (hdl);
}
@@ -549,6 +607,10 @@ libzfs_fini(libzfs_handle_t *hdl)
(void) fclose(hdl->libzfs_mnttab);
if (hdl->libzfs_sharetab)
(void) fclose(hdl->libzfs_sharetab);
+ zfs_uninit_libshare(hdl);
+ if (hdl->libzfs_log_str)
+ (void) free(hdl->libzfs_log_str);
+ zpool_free_handles(hdl);
namespace_clear(hdl);
free(hdl);
}
@@ -565,6 +627,12 @@ zfs_get_handle(zfs_handle_t *zhp)
return (zhp->zfs_hdl);
}
+zpool_handle_t *
+zfs_get_pool_handle(const zfs_handle_t *zhp)
+{
+ return (zhp->zpool_hdl);
+}
+
/*
* Given a name, determine whether or not it's a valid path
* (starts with '/' or "./"). If so, walk the mnttab trying
@@ -637,13 +705,14 @@ zcmd_expand_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc)
void
zcmd_free_nvlists(zfs_cmd_t *zc)
{
+ free((void *)(uintptr_t)zc->zc_nvlist_conf);
free((void *)(uintptr_t)zc->zc_nvlist_src);
free((void *)(uintptr_t)zc->zc_nvlist_dst);
}
-int
-zcmd_write_src_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl,
- size_t *size)
+static int
+zcmd_write_nvlist_com(libzfs_handle_t *hdl, uint64_t *outnv, uint64_t *outlen,
+ nvlist_t *nvl)
{
char *packed;
size_t len;
@@ -655,14 +724,26 @@ zcmd_write_src_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl,
verify(nvlist_pack(nvl, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
- zc->zc_nvlist_src = (uint64_t)(uintptr_t)packed;
- zc->zc_nvlist_src_size = len;
+ *outnv = (uint64_t)(uintptr_t)packed;
+ *outlen = len;
- if (size)
- *size = len;
return (0);
}
+int
+zcmd_write_conf_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl)
+{
+ return (zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_conf,
+ &zc->zc_nvlist_conf_size, nvl));
+}
+
+int
+zcmd_write_src_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl)
+{
+ return (zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_src,
+ &zc->zc_nvlist_src_size, nvl));
+}
+
/*
* Unpacks an nvlist from the ZFS ioctl command structure.
*/
@@ -676,10 +757,32 @@ zcmd_read_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t **nvlp)
return (0);
}
+int
+zfs_ioctl(libzfs_handle_t *hdl, int request, zfs_cmd_t *zc)
+{
+ int error;
+
+ zc->zc_history = (uint64_t)(uintptr_t)hdl->libzfs_log_str;
+ error = ioctl(hdl->libzfs_fd, request, zc);
+ if (hdl->libzfs_log_str) {
+ free(hdl->libzfs_log_str);
+ hdl->libzfs_log_str = NULL;
+ }
+ zc->zc_history = 0;
+
+ return (error);
+}
+
+/*
+ * ================================================================
+ * API shared by zfs and zpool property management
+ * ================================================================
+ */
+
static void
-zfs_print_prop_headers(libzfs_get_cbdata_t *cbp)
+zprop_print_headers(zprop_get_cbdata_t *cbp, zfs_type_t type)
{
- zfs_proplist_t *pl = cbp->cb_proplist;
+ zprop_list_t *pl = cbp->cb_proplist;
int i;
char *title;
size_t len;
@@ -711,8 +814,12 @@ zfs_print_prop_headers(libzfs_get_cbdata_t *cbp)
/*
* 'PROPERTY' column
*/
- if (pl->pl_prop != ZFS_PROP_INVAL) {
- len = strlen(zfs_prop_to_name(pl->pl_prop));
+ if (pl->pl_prop != ZPROP_INVAL) {
+ const char *propname = (type == ZFS_TYPE_POOL) ?
+ zpool_prop_to_name(pl->pl_prop) :
+ zfs_prop_to_name(pl->pl_prop);
+
+ len = strlen(propname);
if (len > cbp->cb_colwidths[GET_COL_PROPERTY])
cbp->cb_colwidths[GET_COL_PROPERTY] = len;
} else {
@@ -731,7 +838,8 @@ zfs_print_prop_headers(libzfs_get_cbdata_t *cbp)
/*
* 'NAME' and 'SOURCE' columns
*/
- if (pl->pl_prop == ZFS_PROP_NAME &&
+ if (pl->pl_prop == (type == ZFS_TYPE_POOL ? ZPOOL_PROP_NAME :
+ ZFS_PROP_NAME) &&
pl->pl_width > cbp->cb_colwidths[GET_COL_NAME]) {
cbp->cb_colwidths[GET_COL_NAME] = pl->pl_width;
cbp->cb_colwidths[GET_COL_SOURCE] = pl->pl_width +
@@ -777,8 +885,8 @@ zfs_print_prop_headers(libzfs_get_cbdata_t *cbp)
* structure.
*/
void
-libzfs_print_one_property(const char *name, libzfs_get_cbdata_t *cbp,
- const char *propname, const char *value, zfs_source_t sourcetype,
+zprop_print_one_property(const char *name, zprop_get_cbdata_t *cbp,
+ const char *propname, const char *value, zprop_source_t sourcetype,
const char *source)
{
int i;
@@ -792,7 +900,7 @@ libzfs_print_one_property(const char *name, libzfs_get_cbdata_t *cbp,
return;
if (cbp->cb_first)
- zfs_print_prop_headers(cbp);
+ zprop_print_headers(cbp, cbp->cb_type);
for (i = 0; i < 4; i++) {
switch (cbp->cb_columns[i]) {
@@ -810,23 +918,23 @@ libzfs_print_one_property(const char *name, libzfs_get_cbdata_t *cbp,
case GET_COL_SOURCE:
switch (sourcetype) {
- case ZFS_SRC_NONE:
+ case ZPROP_SRC_NONE:
str = "-";
break;
- case ZFS_SRC_DEFAULT:
+ case ZPROP_SRC_DEFAULT:
str = "default";
break;
- case ZFS_SRC_LOCAL:
+ case ZPROP_SRC_LOCAL:
str = "local";
break;
- case ZFS_SRC_TEMPORARY:
+ case ZPROP_SRC_TEMPORARY:
str = "temporary";
break;
- case ZFS_SRC_INHERITED:
+ case ZPROP_SRC_INHERITED:
(void) snprintf(buf, sizeof (buf),
"inherited from %s", source);
str = buf;
@@ -851,3 +959,451 @@ libzfs_print_one_property(const char *name, libzfs_get_cbdata_t *cbp,
(void) printf("\n");
}
+
+/*
+ * Given a numeric suffix, convert the value into a number of bits that the
+ * resulting value must be shifted.
+ */
+static int
+str2shift(libzfs_handle_t *hdl, const char *buf)
+{
+ const char *ends = "BKMGTPEZ";
+ int i;
+
+ if (buf[0] == '\0')
+ return (0);
+ for (i = 0; i < strlen(ends); i++) {
+ if (toupper(buf[0]) == ends[i])
+ break;
+ }
+ if (i == strlen(ends)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid numeric suffix '%s'"), buf);
+ return (-1);
+ }
+
+ /*
+ * We want to allow trailing 'b' characters for 'GB' or 'Mb'. But don't
+ * allow 'BB' - that's just weird.
+ */
+ if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0' &&
+ toupper(buf[0]) != 'B'))
+ return (10*i);
+
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid numeric suffix '%s'"), buf);
+ return (-1);
+}
+
+/*
+ * Convert a string of the form '100G' into a real number. Used when setting
+ * properties or creating a volume. 'buf' is used to place an extended error
+ * message for the caller to use.
+ */
+int
+zfs_nicestrtonum(libzfs_handle_t *hdl, const char *value, uint64_t *num)
+{
+ char *end;
+ int shift;
+
+ *num = 0;
+
+ /* Check to see if this looks like a number. */
+ if ((value[0] < '0' || value[0] > '9') && value[0] != '.') {
+ if (hdl)
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "bad numeric value '%s'"), value);
+ return (-1);
+ }
+
+ /* Rely on stroll() to process the numeric portion. */
+ errno = 0;
+ *num = strtoll(value, &end, 10);
+
+ /*
+ * Check for ERANGE, which indicates that the value is too large to fit
+ * in a 64-bit value.
+ */
+ if (errno == ERANGE) {
+ if (hdl)
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "numeric value is too large"));
+ return (-1);
+ }
+
+ /*
+ * If we have a decimal value, then do the computation with floating
+ * point arithmetic. Otherwise, use standard arithmetic.
+ */
+ if (*end == '.') {
+ double fval = strtod(value, &end);
+
+ if ((shift = str2shift(hdl, end)) == -1)
+ return (-1);
+
+ fval *= pow(2, shift);
+
+ if (fval > UINT64_MAX) {
+ if (hdl)
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "numeric value is too large"));
+ return (-1);
+ }
+
+ *num = (uint64_t)fval;
+ } else {
+ if ((shift = str2shift(hdl, end)) == -1)
+ return (-1);
+
+ /* Check for overflow */
+ if (shift >= 64 || (*num << shift) >> shift != *num) {
+ if (hdl)
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "numeric value is too large"));
+ return (-1);
+ }
+
+ *num <<= shift;
+ }
+
+ return (0);
+}
+
+/*
+ * Given a propname=value nvpair to set, parse any numeric properties
+ * (index, boolean, etc) if they are specified as strings and add the
+ * resulting nvpair to the returned nvlist.
+ *
+ * At the DSL layer, all properties are either 64-bit numbers or strings.
+ * We want the user to be able to ignore this fact and specify properties
+ * as native values (numbers, for example) or as strings (to simplify
+ * command line utilities). This also handles converting index types
+ * (compression, checksum, etc) from strings to their on-disk index.
+ */
+int
+zprop_parse_value(libzfs_handle_t *hdl, nvpair_t *elem, int prop,
+ zfs_type_t type, nvlist_t *ret, char **svalp, uint64_t *ivalp,
+ const char *errbuf)
+{
+ data_type_t datatype = nvpair_type(elem);
+ zprop_type_t proptype;
+ const char *propname;
+ char *value;
+ boolean_t isnone = B_FALSE;
+
+ if (type == ZFS_TYPE_POOL) {
+ proptype = zpool_prop_get_type(prop);
+ propname = zpool_prop_to_name(prop);
+ } else {
+ proptype = zfs_prop_get_type(prop);
+ propname = zfs_prop_to_name(prop);
+ }
+
+ /*
+ * Convert any properties to the internal DSL value types.
+ */
+ *svalp = NULL;
+ *ivalp = 0;
+
+ switch (proptype) {
+ case PROP_TYPE_STRING:
+ if (datatype != DATA_TYPE_STRING) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be a string"), nvpair_name(elem));
+ goto error;
+ }
+ (void) nvpair_value_string(elem, svalp);
+ if (strlen(*svalp) >= ZFS_MAXPROPLEN) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' is too long"), nvpair_name(elem));
+ goto error;
+ }
+ break;
+
+ case PROP_TYPE_NUMBER:
+ if (datatype == DATA_TYPE_STRING) {
+ (void) nvpair_value_string(elem, &value);
+ if (strcmp(value, "none") == 0) {
+ isnone = B_TRUE;
+ } else if (zfs_nicestrtonum(hdl, value, ivalp)
+ != 0) {
+ goto error;
+ }
+ } else if (datatype == DATA_TYPE_UINT64) {
+ (void) nvpair_value_uint64(elem, ivalp);
+ } else {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be a number"), nvpair_name(elem));
+ goto error;
+ }
+
+ /*
+ * Quota special: force 'none' and don't allow 0.
+ */
+ if ((type & ZFS_TYPE_DATASET) && *ivalp == 0 && !isnone &&
+ (prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_REFQUOTA)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "use 'none' to disable quota/refquota"));
+ goto error;
+ }
+ break;
+
+ case PROP_TYPE_INDEX:
+ if (datatype != DATA_TYPE_STRING) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be a string"), nvpair_name(elem));
+ goto error;
+ }
+
+ (void) nvpair_value_string(elem, &value);
+
+ if (zprop_string_to_index(prop, value, ivalp, type) != 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "'%s' must be one of '%s'"), propname,
+ zprop_values(prop, type));
+ goto error;
+ }
+ break;
+
+ default:
+ abort();
+ }
+
+ /*
+ * Add the result to our return set of properties.
+ */
+ if (*svalp != NULL) {
+ if (nvlist_add_string(ret, propname, *svalp) != 0) {
+ (void) no_memory(hdl);
+ return (-1);
+ }
+ } else {
+ if (nvlist_add_uint64(ret, propname, *ivalp) != 0) {
+ (void) no_memory(hdl);
+ return (-1);
+ }
+ }
+
+ return (0);
+error:
+ (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+ return (-1);
+}
+
+static int
+addlist(libzfs_handle_t *hdl, char *propname, zprop_list_t **listp,
+ zfs_type_t type)
+{
+ int prop;
+ zprop_list_t *entry;
+
+ prop = zprop_name_to_prop(propname, type);
+
+ if (prop != ZPROP_INVAL && !zprop_valid_for_type(prop, type))
+ prop = ZPROP_INVAL;
+
+ /*
+ * When no property table entry can be found, return failure if
+ * this is a pool property or if this isn't a user-defined
+ * dataset property,
+ */
+ if (prop == ZPROP_INVAL && (type == ZFS_TYPE_POOL ||
+ !zfs_prop_user(propname))) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid property '%s'"), propname);
+ return (zfs_error(hdl, EZFS_BADPROP,
+ dgettext(TEXT_DOMAIN, "bad property list")));
+ }
+
+ if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL)
+ return (-1);
+
+ entry->pl_prop = prop;
+ if (prop == ZPROP_INVAL) {
+ if ((entry->pl_user_prop = zfs_strdup(hdl, propname)) == NULL) {
+ free(entry);
+ return (-1);
+ }
+ entry->pl_width = strlen(propname);
+ } else {
+ entry->pl_width = zprop_width(prop, &entry->pl_fixed,
+ type);
+ }
+
+ *listp = entry;
+
+ return (0);
+}
+
+/*
+ * Given a comma-separated list of properties, construct a property list
+ * containing both user-defined and native properties. This function will
+ * return a NULL list if 'all' is specified, which can later be expanded
+ * by zprop_expand_list().
+ */
+int
+zprop_get_list(libzfs_handle_t *hdl, char *props, zprop_list_t **listp,
+ zfs_type_t type)
+{
+ *listp = NULL;
+
+ /*
+ * If 'all' is specified, return a NULL list.
+ */
+ if (strcmp(props, "all") == 0)
+ return (0);
+
+ /*
+ * If no props were specified, return an error.
+ */
+ if (props[0] == '\0') {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "no properties specified"));
+ return (zfs_error(hdl, EZFS_BADPROP, dgettext(TEXT_DOMAIN,
+ "bad property list")));
+ }
+
+ /*
+ * It would be nice to use getsubopt() here, but the inclusion of column
+ * aliases makes this more effort than it's worth.
+ */
+ while (*props != '\0') {
+ size_t len;
+ char *p;
+ char c;
+
+ if ((p = strchr(props, ',')) == NULL) {
+ len = strlen(props);
+ p = props + len;
+ } else {
+ len = p - props;
+ }
+
+ /*
+ * Check for empty options.
+ */
+ if (len == 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "empty property name"));
+ return (zfs_error(hdl, EZFS_BADPROP,
+ dgettext(TEXT_DOMAIN, "bad property list")));
+ }
+
+ /*
+ * Check all regular property names.
+ */
+ c = props[len];
+ props[len] = '\0';
+
+ if (strcmp(props, "space") == 0) {
+ static char *spaceprops[] = {
+ "name", "avail", "used", "usedbysnapshots",
+ "usedbydataset", "usedbyrefreservation",
+ "usedbychildren", NULL
+ };
+ int i;
+
+ for (i = 0; spaceprops[i]; i++) {
+ if (addlist(hdl, spaceprops[i], listp, type))
+ return (-1);
+ listp = &(*listp)->pl_next;
+ }
+ } else {
+ if (addlist(hdl, props, listp, type))
+ return (-1);
+ listp = &(*listp)->pl_next;
+ }
+
+ props = p;
+ if (c == ',')
+ props++;
+ }
+
+ return (0);
+}
+
+void
+zprop_free_list(zprop_list_t *pl)
+{
+ zprop_list_t *next;
+
+ while (pl != NULL) {
+ next = pl->pl_next;
+ free(pl->pl_user_prop);
+ free(pl);
+ pl = next;
+ }
+}
+
+typedef struct expand_data {
+ zprop_list_t **last;
+ libzfs_handle_t *hdl;
+ zfs_type_t type;
+} expand_data_t;
+
+int
+zprop_expand_list_cb(int prop, void *cb)
+{
+ zprop_list_t *entry;
+ expand_data_t *edp = cb;
+
+ if ((entry = zfs_alloc(edp->hdl, sizeof (zprop_list_t))) == NULL)
+ return (ZPROP_INVAL);
+
+ entry->pl_prop = prop;
+ entry->pl_width = zprop_width(prop, &entry->pl_fixed, edp->type);
+ entry->pl_all = B_TRUE;
+
+ *(edp->last) = entry;
+ edp->last = &entry->pl_next;
+
+ return (ZPROP_CONT);
+}
+
+int
+zprop_expand_list(libzfs_handle_t *hdl, zprop_list_t **plp, zfs_type_t type)
+{
+ zprop_list_t *entry;
+ zprop_list_t **last;
+ expand_data_t exp;
+
+ if (*plp == NULL) {
+ /*
+ * If this is the very first time we've been called for an 'all'
+ * specification, expand the list to include all native
+ * properties.
+ */
+ last = plp;
+
+ exp.last = last;
+ exp.hdl = hdl;
+ exp.type = type;
+
+ if (zprop_iter_common(zprop_expand_list_cb, &exp, B_FALSE,
+ B_FALSE, type) == ZPROP_INVAL)
+ return (-1);
+
+ /*
+ * Add 'name' to the beginning of the list, which is handled
+ * specially.
+ */
+ if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL)
+ return (-1);
+
+ entry->pl_prop = (type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME :
+ ZFS_PROP_NAME;
+ entry->pl_width = zprop_width(entry->pl_prop,
+ &entry->pl_fixed, type);
+ entry->pl_all = B_TRUE;
+ entry->pl_next = *plp;
+ *plp = entry;
+ }
+ return (0);
+}
+
+int
+zprop_iter(zprop_func func, void *cb, boolean_t show_all, boolean_t ordered,
+ zfs_type_t type)
+{
+ return (zprop_iter_common(func, cb, show_all, ordered, type));
+}
OpenPOWER on IntegriCloud