summaryrefslogtreecommitdiffstats
path: root/sys/cddl
diff options
context:
space:
mode:
authordelphij <delphij@FreeBSD.org>2014-12-15 18:22:45 +0000
committerdelphij <delphij@FreeBSD.org>2014-12-15 18:22:45 +0000
commit55ed102bbcd7fb8a8c81c15f00712508cb9ea8b5 (patch)
tree0fac2602e32f8d87a21c62ec1f35a9d8b772aa96 /sys/cddl
parent50872743f93c1edaa918140eed44a7300d41beb8 (diff)
downloadFreeBSD-src-55ed102bbcd7fb8a8c81c15f00712508cb9ea8b5.zip
FreeBSD-src-55ed102bbcd7fb8a8c81c15f00712508cb9ea8b5.tar.gz
MFV r275783:
Convert ARC flags to use enum. Previously, public flags are defined in arc.h and private flags are defined in arc.c which can lead to confusion and programming errors. Consistently use 'hdr' (when referencing arc_buf_hdr_t) instead of 'buf' or 'ab' because arc_buf_t are often named 'buf' as well. Illumos issue: 5369 arc flags should be an enum 5370 consistent arc_buf_hdr_t naming scheme MFC after: 2 weeks
Diffstat (limited to 'sys/cddl')
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c690
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c11
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_diff.c2
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_objset.c6
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c6
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_traverse.c12
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scan.c8
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/arc.h41
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zil.c4
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c2
10 files changed, 394 insertions, 388 deletions
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c
index 55cfa7c..975ab7a 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c
@@ -618,7 +618,7 @@ struct arc_buf_hdr {
arc_buf_hdr_t *b_hash_next;
arc_buf_t *b_buf;
- uint32_t b_flags;
+ arc_flags_t b_flags;
uint32_t b_datacnt;
arc_callback_t *b_acb;
@@ -666,52 +666,26 @@ sysctl_vfs_zfs_arc_meta_limit(SYSCTL_HANDLER_ARGS)
static arc_buf_t *arc_eviction_list;
static kmutex_t arc_eviction_mtx;
static arc_buf_hdr_t arc_eviction_hdr;
-static void arc_get_data_buf(arc_buf_t *buf);
-static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
-static int arc_evict_needed(arc_buf_contents_t type);
-static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
-#ifdef illumos
-static void arc_buf_watch(arc_buf_t *buf);
-#endif /* illumos */
-
-static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
#define GHOST_STATE(state) \
((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
(state) == arc_l2c_only)
-/*
- * Private ARC flags. These flags are private ARC only flags that will show up
- * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
- * be passed in as arc_flags in things like arc_read. However, these flags
- * should never be passed and should only be set by ARC code. When adding new
- * public flags, make sure not to smash the private ones.
- */
-
-#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
-#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
-#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
-#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
-#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
-#define ARC_INDIRECT (1 << 14) /* this is an indirect block */
-#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */
-#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */
-#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */
-#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */
-
-#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
-#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
-#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
-#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH)
-#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
-#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
-#define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
-#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE)
-#define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \
- (hdr)->b_l2hdr != NULL)
-#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING)
-#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED)
-#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
+#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
+#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
+#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
+#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH)
+#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FLAG_FREED_IN_READ)
+#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_FLAG_BUF_AVAILABLE)
+#define HDR_FREE_IN_PROGRESS(hdr) \
+ ((hdr)->b_flags & ARC_FLAG_FREE_IN_PROGRESS)
+#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE)
+#define HDR_L2_READING(hdr) \
+ ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS && \
+ (hdr)->b_l2hdr != NULL)
+#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
+#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
+#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
/*
* Other sizes
@@ -903,14 +877,20 @@ static kmutex_t l2arc_feed_thr_lock;
static kcondvar_t l2arc_feed_thr_cv;
static uint8_t l2arc_thread_exit;
-static void l2arc_read_done(zio_t *zio);
+static void arc_get_data_buf(arc_buf_t *);
+static void arc_access(arc_buf_hdr_t *, kmutex_t *);
+static int arc_evict_needed(arc_buf_contents_t);
+static void arc_evict_ghost(arc_state_t *, uint64_t, int64_t);
+static void arc_buf_watch(arc_buf_t *);
+
+static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *);
+static void l2arc_read_done(zio_t *);
static void l2arc_hdr_stat_add(void);
static void l2arc_hdr_stat_remove(void);
-static boolean_t l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr);
-static void l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr,
- enum zio_compress c);
-static void l2arc_release_cdata_buf(arc_buf_hdr_t *ab);
+static boolean_t l2arc_compress_buf(l2arc_buf_hdr_t *);
+static void l2arc_decompress_zio(zio_t *, arc_buf_hdr_t *, enum zio_compress);
+static void l2arc_release_cdata_buf(arc_buf_hdr_t *);
static uint64_t
buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
@@ -955,14 +935,14 @@ buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
uint64_t birth = BP_PHYSICAL_BIRTH(bp);
uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
- arc_buf_hdr_t *buf;
+ arc_buf_hdr_t *hdr;
mutex_enter(hash_lock);
- for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
- buf = buf->b_hash_next) {
- if (BUF_EQUAL(spa, dva, birth, buf)) {
+ for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL;
+ hdr = hdr->b_hash_next) {
+ if (BUF_EQUAL(spa, dva, birth, hdr)) {
*lockp = hash_lock;
- return (buf);
+ return (hdr);
}
}
mutex_exit(hash_lock);
@@ -977,27 +957,27 @@ buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
* Otherwise returns NULL.
*/
static arc_buf_hdr_t *
-buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
+buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp)
{
- uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
+ uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
- arc_buf_hdr_t *fbuf;
+ arc_buf_hdr_t *fhdr;
uint32_t i;
- ASSERT(!DVA_IS_EMPTY(&buf->b_dva));
- ASSERT(buf->b_birth != 0);
- ASSERT(!HDR_IN_HASH_TABLE(buf));
+ ASSERT(!DVA_IS_EMPTY(&hdr->b_dva));
+ ASSERT(hdr->b_birth != 0);
+ ASSERT(!HDR_IN_HASH_TABLE(hdr));
*lockp = hash_lock;
mutex_enter(hash_lock);
- for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
- fbuf = fbuf->b_hash_next, i++) {
- if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
- return (fbuf);
+ for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL;
+ fhdr = fhdr->b_hash_next, i++) {
+ if (BUF_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr))
+ return (fhdr);
}
- buf->b_hash_next = buf_hash_table.ht_table[idx];
- buf_hash_table.ht_table[idx] = buf;
- buf->b_flags |= ARC_IN_HASH_TABLE;
+ hdr->b_hash_next = buf_hash_table.ht_table[idx];
+ buf_hash_table.ht_table[idx] = hdr;
+ hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE;
/* collect some hash table performance data */
if (i > 0) {
@@ -1015,22 +995,22 @@ buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
}
static void
-buf_hash_remove(arc_buf_hdr_t *buf)
+buf_hash_remove(arc_buf_hdr_t *hdr)
{
- arc_buf_hdr_t *fbuf, **bufp;
- uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
+ arc_buf_hdr_t *fhdr, **hdrp;
+ uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
- ASSERT(HDR_IN_HASH_TABLE(buf));
+ ASSERT(HDR_IN_HASH_TABLE(hdr));
- bufp = &buf_hash_table.ht_table[idx];
- while ((fbuf = *bufp) != buf) {
- ASSERT(fbuf != NULL);
- bufp = &fbuf->b_hash_next;
+ hdrp = &buf_hash_table.ht_table[idx];
+ while ((fhdr = *hdrp) != hdr) {
+ ASSERT(fhdr != NULL);
+ hdrp = &fhdr->b_hash_next;
}
- *bufp = buf->b_hash_next;
- buf->b_hash_next = NULL;
- buf->b_flags &= ~ARC_IN_HASH_TABLE;
+ *hdrp = hdr->b_hash_next;
+ hdr->b_hash_next = NULL;
+ hdr->b_flags &= ~ARC_FLAG_IN_HASH_TABLE;
/* collect some hash table performance data */
ARCSTAT_BUMPDOWN(arcstat_hash_elements);
@@ -1067,12 +1047,12 @@ buf_fini(void)
static int
hdr_cons(void *vbuf, void *unused, int kmflag)
{
- arc_buf_hdr_t *buf = vbuf;
+ arc_buf_hdr_t *hdr = vbuf;
- bzero(buf, sizeof (arc_buf_hdr_t));
- refcount_create(&buf->b_refcnt);
- cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
- mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
+ bzero(hdr, sizeof (arc_buf_hdr_t));
+ refcount_create(&hdr->b_refcnt);
+ cv_init(&hdr->b_cv, NULL, CV_DEFAULT, NULL);
+ mutex_init(&hdr->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
return (0);
@@ -1099,12 +1079,12 @@ buf_cons(void *vbuf, void *unused, int kmflag)
static void
hdr_dest(void *vbuf, void *unused)
{
- arc_buf_hdr_t *buf = vbuf;
+ arc_buf_hdr_t *hdr = vbuf;
- ASSERT(BUF_EMPTY(buf));
- refcount_destroy(&buf->b_refcnt);
- cv_destroy(&buf->b_cv);
- mutex_destroy(&buf->b_freeze_lock);
+ ASSERT(BUF_EMPTY(hdr));
+ refcount_destroy(&hdr->b_refcnt);
+ cv_destroy(&hdr->b_cv);
+ mutex_destroy(&hdr->b_freeze_lock);
arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
}
@@ -1186,7 +1166,7 @@ arc_cksum_verify(arc_buf_t *buf)
mutex_enter(&buf->b_hdr->b_freeze_lock);
if (buf->b_hdr->b_freeze_cksum == NULL ||
- (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
+ (buf->b_hdr->b_flags & ARC_FLAG_IO_ERROR)) {
mutex_exit(&buf->b_hdr->b_freeze_lock);
return;
}
@@ -1281,7 +1261,7 @@ arc_buf_thaw(arc_buf_t *buf)
if (zfs_flags & ZFS_DEBUG_MODIFY) {
if (buf->b_hdr->b_state != arc_anon)
panic("modifying non-anon buffer!");
- if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
+ if (buf->b_hdr->b_flags & ARC_FLAG_IO_IN_PROGRESS)
panic("modifying buffer while i/o in progress!");
arc_cksum_verify(buf);
}
@@ -1324,11 +1304,11 @@ arc_buf_freeze(arc_buf_t *buf)
}
static void
-get_buf_info(arc_buf_hdr_t *ab, arc_state_t *state, list_t **list, kmutex_t **lock)
+get_buf_info(arc_buf_hdr_t *hdr, arc_state_t *state, list_t **list, kmutex_t **lock)
{
- uint64_t buf_hashid = buf_hash(ab->b_spa, &ab->b_dva, ab->b_birth);
+ uint64_t buf_hashid = buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
- if (ab->b_type == ARC_BUFC_METADATA)
+ if (hdr->b_type == ARC_BUFC_METADATA)
buf_hashid &= (ARC_BUFC_NUMMETADATALISTS - 1);
else {
buf_hashid &= (ARC_BUFC_NUMDATALISTS - 1);
@@ -1341,59 +1321,59 @@ get_buf_info(arc_buf_hdr_t *ab, arc_state_t *state, list_t **list, kmutex_t **lo
static void
-add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
+add_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
{
ASSERT(MUTEX_HELD(hash_lock));
- if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
- (ab->b_state != arc_anon)) {
- uint64_t delta = ab->b_size * ab->b_datacnt;
- uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
+ if ((refcount_add(&hdr->b_refcnt, tag) == 1) &&
+ (hdr->b_state != arc_anon)) {
+ uint64_t delta = hdr->b_size * hdr->b_datacnt;
+ uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
list_t *list;
kmutex_t *lock;
- get_buf_info(ab, ab->b_state, &list, &lock);
+ get_buf_info(hdr, hdr->b_state, &list, &lock);
ASSERT(!MUTEX_HELD(lock));
mutex_enter(lock);
- ASSERT(list_link_active(&ab->b_arc_node));
- list_remove(list, ab);
- if (GHOST_STATE(ab->b_state)) {
- ASSERT0(ab->b_datacnt);
- ASSERT3P(ab->b_buf, ==, NULL);
- delta = ab->b_size;
+ ASSERT(list_link_active(&hdr->b_arc_node));
+ list_remove(list, hdr);
+ if (GHOST_STATE(hdr->b_state)) {
+ ASSERT0(hdr->b_datacnt);
+ ASSERT3P(hdr->b_buf, ==, NULL);
+ delta = hdr->b_size;
}
ASSERT(delta > 0);
ASSERT3U(*size, >=, delta);
atomic_add_64(size, -delta);
mutex_exit(lock);
/* remove the prefetch flag if we get a reference */
- if (ab->b_flags & ARC_PREFETCH)
- ab->b_flags &= ~ARC_PREFETCH;
+ if (hdr->b_flags & ARC_FLAG_PREFETCH)
+ hdr->b_flags &= ~ARC_FLAG_PREFETCH;
}
}
static int
-remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
+remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
{
int cnt;
- arc_state_t *state = ab->b_state;
+ arc_state_t *state = hdr->b_state;
ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
ASSERT(!GHOST_STATE(state));
- if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
+ if (((cnt = refcount_remove(&hdr->b_refcnt, tag)) == 0) &&
(state != arc_anon)) {
- uint64_t *size = &state->arcs_lsize[ab->b_type];
+ uint64_t *size = &state->arcs_lsize[hdr->b_type];
list_t *list;
kmutex_t *lock;
- get_buf_info(ab, state, &list, &lock);
+ get_buf_info(hdr, state, &list, &lock);
ASSERT(!MUTEX_HELD(lock));
mutex_enter(lock);
- ASSERT(!list_link_active(&ab->b_arc_node));
- list_insert_head(list, ab);
- ASSERT(ab->b_datacnt > 0);
- atomic_add_64(size, ab->b_size * ab->b_datacnt);
+ ASSERT(!list_link_active(&hdr->b_arc_node));
+ list_insert_head(list, hdr);
+ ASSERT(hdr->b_datacnt > 0);
+ atomic_add_64(size, hdr->b_size * hdr->b_datacnt);
mutex_exit(lock);
}
return (cnt);
@@ -1404,21 +1384,22 @@ remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
* for the buffer must be held by the caller.
*/
static void
-arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
+arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
+ kmutex_t *hash_lock)
{
- arc_state_t *old_state = ab->b_state;
- int64_t refcnt = refcount_count(&ab->b_refcnt);
+ arc_state_t *old_state = hdr->b_state;
+ int64_t refcnt = refcount_count(&hdr->b_refcnt);
uint64_t from_delta, to_delta;
list_t *list;
kmutex_t *lock;
ASSERT(MUTEX_HELD(hash_lock));
ASSERT3P(new_state, !=, old_state);
- ASSERT(refcnt == 0 || ab->b_datacnt > 0);
- ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
- ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon);
+ ASSERT(refcnt == 0 || hdr->b_datacnt > 0);
+ ASSERT(hdr->b_datacnt == 0 || !GHOST_STATE(new_state));
+ ASSERT(hdr->b_datacnt <= 1 || old_state != arc_anon);
- from_delta = to_delta = ab->b_datacnt * ab->b_size;
+ from_delta = to_delta = hdr->b_datacnt * hdr->b_size;
/*
* If this buffer is evictable, transfer it from the
@@ -1427,24 +1408,24 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
if (refcnt == 0) {
if (old_state != arc_anon) {
int use_mutex;
- uint64_t *size = &old_state->arcs_lsize[ab->b_type];
+ uint64_t *size = &old_state->arcs_lsize[hdr->b_type];
- get_buf_info(ab, old_state, &list, &lock);
+ get_buf_info(hdr, old_state, &list, &lock);
use_mutex = !MUTEX_HELD(lock);
if (use_mutex)
mutex_enter(lock);
- ASSERT(list_link_active(&ab->b_arc_node));
- list_remove(list, ab);
+ ASSERT(list_link_active(&hdr->b_arc_node));
+ list_remove(list, hdr);
/*
* If prefetching out of the ghost cache,
* we will have a non-zero datacnt.
*/
- if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
+ if (GHOST_STATE(old_state) && hdr->b_datacnt == 0) {
/* ghost elements have a ghost size */
- ASSERT(ab->b_buf == NULL);
- from_delta = ab->b_size;
+ ASSERT(hdr->b_buf == NULL);
+ from_delta = hdr->b_size;
}
ASSERT3U(*size, >=, from_delta);
atomic_add_64(size, -from_delta);
@@ -1454,20 +1435,20 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
}
if (new_state != arc_anon) {
int use_mutex;
- uint64_t *size = &new_state->arcs_lsize[ab->b_type];
+ uint64_t *size = &new_state->arcs_lsize[hdr->b_type];
- get_buf_info(ab, new_state, &list, &lock);
+ get_buf_info(hdr, new_state, &list, &lock);
use_mutex = !MUTEX_HELD(lock);
if (use_mutex)
mutex_enter(lock);
- list_insert_head(list, ab);
+ list_insert_head(list, hdr);
/* ghost elements have a ghost size */
if (GHOST_STATE(new_state)) {
- ASSERT(ab->b_datacnt == 0);
- ASSERT(ab->b_buf == NULL);
- to_delta = ab->b_size;
+ ASSERT(hdr->b_datacnt == 0);
+ ASSERT(hdr->b_buf == NULL);
+ to_delta = hdr->b_size;
}
atomic_add_64(size, to_delta);
@@ -1476,9 +1457,9 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
}
}
- ASSERT(!BUF_EMPTY(ab));
- if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab))
- buf_hash_remove(ab);
+ ASSERT(!BUF_EMPTY(hdr));
+ if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr))
+ buf_hash_remove(hdr);
/* adjust state sizes */
if (to_delta)
@@ -1487,7 +1468,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
ASSERT3U(old_state->arcs_size, >=, from_delta);
atomic_add_64(&old_state->arcs_size, -from_delta);
}
- ab->b_state = new_state;
+ hdr->b_state = new_state;
/* adjust l2arc hdr stats */
if (new_state == arc_l2c_only)
@@ -1689,7 +1670,7 @@ arc_buf_add_ref(arc_buf_t *buf, void* tag)
arc_access(hdr, hash_lock);
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
- ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
+ ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_FLAG_PREFETCH),
demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
data, metadata, hits);
}
@@ -1920,7 +1901,7 @@ arc_buf_free(arc_buf_t *buf, void *tag)
} else {
ASSERT(buf == hdr->b_buf);
ASSERT(buf->b_efunc == NULL);
- hdr->b_flags |= ARC_BUF_AVAILABLE;
+ hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
}
mutex_exit(hash_lock);
} else if (HDR_IO_IN_PROGRESS(hdr)) {
@@ -1971,7 +1952,7 @@ arc_buf_remove_ref(arc_buf_t *buf, void* tag)
} else if (no_callback) {
ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
ASSERT(buf->b_efunc == NULL);
- hdr->b_flags |= ARC_BUF_AVAILABLE;
+ hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
}
ASSERT(no_callback || hdr->b_datacnt > 1 ||
refcount_is_zero(&hdr->b_refcnt));
@@ -2047,7 +2028,7 @@ arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
arc_state_t *evicted_state;
uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
int64_t bytes_remaining;
- arc_buf_hdr_t *ab, *ab_prev = NULL;
+ arc_buf_hdr_t *hdr, *hdr_prev = NULL;
list_t *evicted_list, *list, *evicted_list_start, *list_start;
kmutex_t *lock, *evicted_lock;
kmutex_t *hash_lock;
@@ -2130,25 +2111,25 @@ evict_start:
mutex_enter(lock);
mutex_enter(evicted_lock);
- for (ab = list_tail(list); ab; ab = ab_prev) {
- ab_prev = list_prev(list, ab);
- bytes_remaining -= (ab->b_size * ab->b_datacnt);
+ for (hdr = list_tail(list); hdr; hdr = hdr_prev) {
+ hdr_prev = list_prev(list, hdr);
+ bytes_remaining -= (hdr->b_size * hdr->b_datacnt);
/* prefetch buffers have a minimum lifespan */
- if (HDR_IO_IN_PROGRESS(ab) ||
- (spa && ab->b_spa != spa) ||
- (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
- ddi_get_lbolt() - ab->b_arc_access <
+ if (HDR_IO_IN_PROGRESS(hdr) ||
+ (spa && hdr->b_spa != spa) ||
+ (hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT) &&
+ ddi_get_lbolt() - hdr->b_arc_access <
arc_min_prefetch_lifespan)) {
skipped++;
continue;
}
/* "lookahead" for better eviction candidate */
- if (recycle && ab->b_size != bytes &&
- ab_prev && ab_prev->b_size == bytes)
+ if (recycle && hdr->b_size != bytes &&
+ hdr_prev && hdr_prev->b_size == bytes)
continue;
/* ignore markers */
- if (ab->b_spa == 0)
+ if (hdr->b_spa == 0)
continue;
/*
@@ -2161,34 +2142,34 @@ evict_start:
* the hot code path, so don't sleep.
*/
if (!recycle && count++ > arc_evict_iterations) {
- list_insert_after(list, ab, &marker);
+ list_insert_after(list, hdr, &marker);
mutex_exit(evicted_lock);
mutex_exit(lock);
kpreempt(KPREEMPT_SYNC);
mutex_enter(lock);
mutex_enter(evicted_lock);
- ab_prev = list_prev(list, &marker);
+ hdr_prev = list_prev(list, &marker);
list_remove(list, &marker);
count = 0;
continue;
}
- hash_lock = HDR_LOCK(ab);
+ hash_lock = HDR_LOCK(hdr);
have_lock = MUTEX_HELD(hash_lock);
if (have_lock || mutex_tryenter(hash_lock)) {
- ASSERT0(refcount_count(&ab->b_refcnt));
- ASSERT(ab->b_datacnt > 0);
- while (ab->b_buf) {
- arc_buf_t *buf = ab->b_buf;
+ ASSERT0(refcount_count(&hdr->b_refcnt));
+ ASSERT(hdr->b_datacnt > 0);
+ while (hdr->b_buf) {
+ arc_buf_t *buf = hdr->b_buf;
if (!mutex_tryenter(&buf->b_evict_lock)) {
missed += 1;
break;
}
if (buf->b_data) {
- bytes_evicted += ab->b_size;
- if (recycle && ab->b_type == type &&
- ab->b_size == bytes &&
- !HDR_L2_WRITING(ab)) {
+ bytes_evicted += hdr->b_size;
+ if (recycle && hdr->b_type == type &&
+ hdr->b_size == bytes &&
+ !HDR_L2_WRITING(hdr)) {
stolen = buf->b_data;
recycle = FALSE;
}
@@ -2197,7 +2178,7 @@ evict_start:
mutex_enter(&arc_eviction_mtx);
arc_buf_destroy(buf,
buf->b_data == stolen, FALSE);
- ab->b_buf = buf->b_next;
+ hdr->b_buf = buf->b_next;
buf->b_hdr = &arc_eviction_hdr;
buf->b_next = arc_eviction_list;
arc_eviction_list = buf;
@@ -2210,26 +2191,26 @@ evict_start:
}
}
- if (ab->b_l2hdr) {
+ if (hdr->b_l2hdr) {
ARCSTAT_INCR(arcstat_evict_l2_cached,
- ab->b_size);
+ hdr->b_size);
} else {
- if (l2arc_write_eligible(ab->b_spa, ab)) {
+ if (l2arc_write_eligible(hdr->b_spa, hdr)) {
ARCSTAT_INCR(arcstat_evict_l2_eligible,
- ab->b_size);
+ hdr->b_size);
} else {
ARCSTAT_INCR(
arcstat_evict_l2_ineligible,
- ab->b_size);
+ hdr->b_size);
}
}
- if (ab->b_datacnt == 0) {
- arc_change_state(evicted_state, ab, hash_lock);
- ASSERT(HDR_IN_HASH_TABLE(ab));
- ab->b_flags |= ARC_IN_HASH_TABLE;
- ab->b_flags &= ~ARC_BUF_AVAILABLE;
- DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
+ if (hdr->b_datacnt == 0) {
+ arc_change_state(evicted_state, hdr, hash_lock);
+ ASSERT(HDR_IN_HASH_TABLE(hdr));
+ hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE;
+ hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE;
+ DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
}
if (!have_lock)
mutex_exit(hash_lock);
@@ -2290,7 +2271,7 @@ evict_start:
static void
arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
{
- arc_buf_hdr_t *ab, *ab_prev;
+ arc_buf_hdr_t *hdr, *hdr_prev;
arc_buf_hdr_t marker = { 0 };
list_t *list, *list_start;
kmutex_t *hash_lock, *lock;
@@ -2315,18 +2296,18 @@ evict_start:
lock = ARCS_LOCK(state, idx + offset);
mutex_enter(lock);
- for (ab = list_tail(list); ab; ab = ab_prev) {
- ab_prev = list_prev(list, ab);
- if (ab->b_type > ARC_BUFC_NUMTYPES)
- panic("invalid ab=%p", (void *)ab);
- if (spa && ab->b_spa != spa)
+ for (hdr = list_tail(list); hdr; hdr = hdr_prev) {
+ hdr_prev = list_prev(list, hdr);
+ if (hdr->b_type > ARC_BUFC_NUMTYPES)
+ panic("invalid hdr=%p", (void *)hdr);
+ if (spa && hdr->b_spa != spa)
continue;
/* ignore markers */
- if (ab->b_spa == 0)
+ if (hdr->b_spa == 0)
continue;
- hash_lock = HDR_LOCK(ab);
+ hash_lock = HDR_LOCK(hdr);
/* caller may be trying to modify this buffer, skip it */
if (MUTEX_HELD(hash_lock))
continue;
@@ -2338,35 +2319,35 @@ evict_start:
* before reacquiring the lock.
*/
if (count++ > arc_evict_iterations) {
- list_insert_after(list, ab, &marker);
+ list_insert_after(list, hdr, &marker);
mutex_exit(lock);
kpreempt(KPREEMPT_SYNC);
mutex_enter(lock);
- ab_prev = list_prev(list, &marker);
+ hdr_prev = list_prev(list, &marker);
list_remove(list, &marker);
count = 0;
continue;
}
if (mutex_tryenter(hash_lock)) {
- ASSERT(!HDR_IO_IN_PROGRESS(ab));
- ASSERT(ab->b_buf == NULL);
+ ASSERT(!HDR_IO_IN_PROGRESS(hdr));
+ ASSERT(hdr->b_buf == NULL);
ARCSTAT_BUMP(arcstat_deleted);
- bytes_deleted += ab->b_size;
+ bytes_deleted += hdr->b_size;
- if (ab->b_l2hdr != NULL) {
+ if (hdr->b_l2hdr != NULL) {
/*
* This buffer is cached on the 2nd Level ARC;
* don't destroy the header.
*/
- arc_change_state(arc_l2c_only, ab, hash_lock);
+ arc_change_state(arc_l2c_only, hdr, hash_lock);
mutex_exit(hash_lock);
} else {
- arc_change_state(arc_anon, ab, hash_lock);
+ arc_change_state(arc_anon, hdr, hash_lock);
mutex_exit(hash_lock);
- arc_hdr_destroy(ab);
+ arc_hdr_destroy(hdr);
}
- DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
+ DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
if (bytes >= 0 && bytes_deleted >= bytes)
break;
} else if (bytes < 0) {
@@ -2375,12 +2356,12 @@ evict_start:
* hash lock to become available. Once its
* available, restart from where we left off.
*/
- list_insert_after(list, ab, &marker);
+ list_insert_after(list, hdr, &marker);
mutex_exit(lock);
mutex_enter(hash_lock);
mutex_exit(hash_lock);
mutex_enter(lock);
- ab_prev = list_prev(list, &marker);
+ hdr_prev = list_prev(list, &marker);
list_remove(list, &marker);
} else {
bufs_skipped += 1;
@@ -2965,7 +2946,8 @@ arc_get_data_buf(arc_buf_t *buf)
* will end up on the mru list; so steal space from there.
*/
if (state == arc_mfu_ghost)
- state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
+ state = buf->b_hdr->b_flags & ARC_FLAG_PREFETCH ?
+ arc_mru : arc_mfu;
else if (state == arc_mru_ghost)
state = arc_mru;
@@ -3021,25 +3003,25 @@ out:
* NOTE: the hash lock is dropped in this function.
*/
static void
-arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
+arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
{
clock_t now;
ASSERT(MUTEX_HELD(hash_lock));
- if (buf->b_state == arc_anon) {
+ if (hdr->b_state == arc_anon) {
/*
* This buffer is not in the cache, and does not
* appear in our "ghost" list. Add the new buffer
* to the MRU state.
*/
- ASSERT(buf->b_arc_access == 0);
- buf->b_arc_access = ddi_get_lbolt();
- DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
- arc_change_state(arc_mru, buf, hash_lock);
+ ASSERT(hdr->b_arc_access == 0);
+ hdr->b_arc_access = ddi_get_lbolt();
+ DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
+ arc_change_state(arc_mru, hdr, hash_lock);
- } else if (buf->b_state == arc_mru) {
+ } else if (hdr->b_state == arc_mru) {
now = ddi_get_lbolt();
/*
@@ -3050,14 +3032,14 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
* - move the buffer to the head of the list if this is
* another prefetch (to make it less likely to be evicted).
*/
- if ((buf->b_flags & ARC_PREFETCH) != 0) {
- if (refcount_count(&buf->b_refcnt) == 0) {
- ASSERT(list_link_active(&buf->b_arc_node));
+ if ((hdr->b_flags & ARC_FLAG_PREFETCH) != 0) {
+ if (refcount_count(&hdr->b_refcnt) == 0) {
+ ASSERT(list_link_active(&hdr->b_arc_node));
} else {
- buf->b_flags &= ~ARC_PREFETCH;
+ hdr->b_flags &= ~ARC_FLAG_PREFETCH;
ARCSTAT_BUMP(arcstat_mru_hits);
}
- buf->b_arc_access = now;
+ hdr->b_arc_access = now;
return;
}
@@ -3066,18 +3048,18 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
* but it is still in the cache. Move it to the MFU
* state.
*/
- if (now > buf->b_arc_access + ARC_MINTIME) {
+ if (now > hdr->b_arc_access + ARC_MINTIME) {
/*
* More than 125ms have passed since we
* instantiated this buffer. Move it to the
* most frequently used state.
*/
- buf->b_arc_access = now;
- DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
- arc_change_state(arc_mfu, buf, hash_lock);
+ hdr->b_arc_access = now;
+ DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
+ arc_change_state(arc_mfu, hdr, hash_lock);
}
ARCSTAT_BUMP(arcstat_mru_hits);
- } else if (buf->b_state == arc_mru_ghost) {
+ } else if (hdr->b_state == arc_mru_ghost) {
arc_state_t *new_state;
/*
* This buffer has been "accessed" recently, but
@@ -3085,21 +3067,21 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
* MFU state.
*/
- if (buf->b_flags & ARC_PREFETCH) {
+ if (hdr->b_flags & ARC_FLAG_PREFETCH) {
new_state = arc_mru;
- if (refcount_count(&buf->b_refcnt) > 0)
- buf->b_flags &= ~ARC_PREFETCH;
- DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
+ if (refcount_count(&hdr->b_refcnt) > 0)
+ hdr->b_flags &= ~ARC_FLAG_PREFETCH;
+ DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
} else {
new_state = arc_mfu;
- DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
+ DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
}
- buf->b_arc_access = ddi_get_lbolt();
- arc_change_state(new_state, buf, hash_lock);
+ hdr->b_arc_access = ddi_get_lbolt();
+ arc_change_state(new_state, hdr, hash_lock);
ARCSTAT_BUMP(arcstat_mru_ghost_hits);
- } else if (buf->b_state == arc_mfu) {
+ } else if (hdr->b_state == arc_mfu) {
/*
* This buffer has been accessed more than once and is
* still in the cache. Keep it in the MFU state.
@@ -3109,13 +3091,13 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
* If it was a prefetch, we will explicitly move it to
* the head of the list now.
*/
- if ((buf->b_flags & ARC_PREFETCH) != 0) {
- ASSERT(refcount_count(&buf->b_refcnt) == 0);
- ASSERT(list_link_active(&buf->b_arc_node));
+ if ((hdr->b_flags & ARC_FLAG_PREFETCH) != 0) {
+ ASSERT(refcount_count(&hdr->b_refcnt) == 0);
+ ASSERT(list_link_active(&hdr->b_arc_node));
}
ARCSTAT_BUMP(arcstat_mfu_hits);
- buf->b_arc_access = ddi_get_lbolt();
- } else if (buf->b_state == arc_mfu_ghost) {
+ hdr->b_arc_access = ddi_get_lbolt();
+ } else if (hdr->b_state == arc_mfu_ghost) {
arc_state_t *new_state = arc_mfu;
/*
* This buffer has been accessed more than once but has
@@ -3123,28 +3105,28 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
* MFU state.
*/
- if (buf->b_flags & ARC_PREFETCH) {
+ if (hdr->b_flags & ARC_FLAG_PREFETCH) {
/*
* This is a prefetch access...
* move this block back to the MRU state.
*/
- ASSERT0(refcount_count(&buf->b_refcnt));
+ ASSERT0(refcount_count(&hdr->b_refcnt));
new_state = arc_mru;
}
- buf->b_arc_access = ddi_get_lbolt();
- DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
- arc_change_state(new_state, buf, hash_lock);
+ hdr->b_arc_access = ddi_get_lbolt();
+ DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
+ arc_change_state(new_state, hdr, hash_lock);
ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
- } else if (buf->b_state == arc_l2c_only) {
+ } else if (hdr->b_state == arc_l2c_only) {
/*
* This buffer is on the 2nd Level ARC.
*/
- buf->b_arc_access = ddi_get_lbolt();
- DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
- arc_change_state(arc_mfu, buf, hash_lock);
+ hdr->b_arc_access = ddi_get_lbolt();
+ DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
+ arc_change_state(arc_mfu, hdr, hash_lock);
} else {
ASSERT(!"invalid arc state");
}
@@ -3212,9 +3194,9 @@ arc_read_done(zio_t *zio)
(found == hdr && HDR_L2_READING(hdr)));
}
- hdr->b_flags &= ~ARC_L2_EVICTED;
- if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
- hdr->b_flags &= ~ARC_L2CACHE;
+ hdr->b_flags &= ~ARC_FLAG_L2_EVICTED;
+ if (l2arc_noprefetch && (hdr->b_flags & ARC_FLAG_PREFETCH))
+ hdr->b_flags &= ~ARC_FLAG_L2CACHE;
/* byteswap if necessary */
callback_list = hdr->b_acb;
@@ -3256,18 +3238,18 @@ arc_read_done(zio_t *zio)
}
}
hdr->b_acb = NULL;
- hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
+ hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS;
ASSERT(!HDR_BUF_AVAILABLE(hdr));
if (abuf == buf) {
ASSERT(buf->b_efunc == NULL);
ASSERT(hdr->b_datacnt == 1);
- hdr->b_flags |= ARC_BUF_AVAILABLE;
+ hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
}
ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
if (zio->io_error != 0) {
- hdr->b_flags |= ARC_IO_ERROR;
+ hdr->b_flags |= ARC_FLAG_IO_ERROR;
if (hdr->b_state != arc_anon)
arc_change_state(arc_anon, hdr, hash_lock);
if (HDR_IN_HASH_TABLE(hdr))
@@ -3333,8 +3315,8 @@ arc_read_done(zio_t *zio)
*/
int
arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
- void *private, zio_priority_t priority, int zio_flags, uint32_t *arc_flags,
- const zbookmark_phys_t *zb)
+ void *private, zio_priority_t priority, int zio_flags,
+ arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = NULL;
arc_buf_t *buf = NULL;
@@ -3356,16 +3338,16 @@ top:
if (hdr != NULL && hdr->b_datacnt > 0) {
- *arc_flags |= ARC_CACHED;
+ *arc_flags |= ARC_FLAG_CACHED;
if (HDR_IO_IN_PROGRESS(hdr)) {
- if (*arc_flags & ARC_WAIT) {
+ if (*arc_flags & ARC_FLAG_WAIT) {
cv_wait(&hdr->b_cv, hash_lock);
mutex_exit(hash_lock);
goto top;
}
- ASSERT(*arc_flags & ARC_NOWAIT);
+ ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
if (done) {
arc_callback_t *acb = NULL;
@@ -3403,24 +3385,24 @@ top:
ASSERT(buf->b_data);
if (HDR_BUF_AVAILABLE(hdr)) {
ASSERT(buf->b_efunc == NULL);
- hdr->b_flags &= ~ARC_BUF_AVAILABLE;
+ hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE;
} else {
buf = arc_buf_clone(buf);
}
- } else if (*arc_flags & ARC_PREFETCH &&
+ } else if (*arc_flags & ARC_FLAG_PREFETCH &&
refcount_count(&hdr->b_refcnt) == 0) {
- hdr->b_flags |= ARC_PREFETCH;
+ hdr->b_flags |= ARC_FLAG_PREFETCH;
}
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, hash_lock);
- if (*arc_flags & ARC_L2CACHE)
- hdr->b_flags |= ARC_L2CACHE;
- if (*arc_flags & ARC_L2COMPRESS)
- hdr->b_flags |= ARC_L2COMPRESS;
+ if (*arc_flags & ARC_FLAG_L2CACHE)
+ hdr->b_flags |= ARC_FLAG_L2CACHE;
+ if (*arc_flags & ARC_FLAG_L2COMPRESS)
+ hdr->b_flags |= ARC_FLAG_L2COMPRESS;
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
- ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
+ ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_FLAG_PREFETCH),
demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
data, metadata, hits);
@@ -3454,18 +3436,19 @@ top:
(void) arc_buf_remove_ref(buf, private);
goto top; /* restart the IO request */
}
+
/* if this is a prefetch, we don't have a reference */
- if (*arc_flags & ARC_PREFETCH) {
+ if (*arc_flags & ARC_FLAG_PREFETCH) {
(void) remove_reference(hdr, hash_lock,
private);
- hdr->b_flags |= ARC_PREFETCH;
+ hdr->b_flags |= ARC_FLAG_PREFETCH;
}
- if (*arc_flags & ARC_L2CACHE)
- hdr->b_flags |= ARC_L2CACHE;
- if (*arc_flags & ARC_L2COMPRESS)
- hdr->b_flags |= ARC_L2COMPRESS;
+ if (*arc_flags & ARC_FLAG_L2CACHE)
+ hdr->b_flags |= ARC_FLAG_L2CACHE;
+ if (*arc_flags & ARC_FLAG_L2COMPRESS)
+ hdr->b_flags |= ARC_FLAG_L2COMPRESS;
if (BP_GET_LEVEL(bp) > 0)
- hdr->b_flags |= ARC_INDIRECT;
+ hdr->b_flags |= ARC_FLAG_INDIRECT;
} else {
/* this block is in the ghost cache */
ASSERT(GHOST_STATE(hdr->b_state));
@@ -3474,14 +3457,14 @@ top:
ASSERT(hdr->b_buf == NULL);
/* if this is a prefetch, we don't have a reference */
- if (*arc_flags & ARC_PREFETCH)
- hdr->b_flags |= ARC_PREFETCH;
+ if (*arc_flags & ARC_FLAG_PREFETCH)
+ hdr->b_flags |= ARC_FLAG_PREFETCH;
else
add_reference(hdr, hash_lock, private);
- if (*arc_flags & ARC_L2CACHE)
- hdr->b_flags |= ARC_L2CACHE;
- if (*arc_flags & ARC_L2COMPRESS)
- hdr->b_flags |= ARC_L2COMPRESS;
+ if (*arc_flags & ARC_FLAG_L2CACHE)
+ hdr->b_flags |= ARC_FLAG_L2CACHE;
+ if (*arc_flags & ARC_FLAG_L2COMPRESS)
+ hdr->b_flags |= ARC_FLAG_L2COMPRESS;
buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
buf->b_hdr = hdr;
buf->b_data = NULL;
@@ -3503,7 +3486,7 @@ top:
ASSERT(hdr->b_acb == NULL);
hdr->b_acb = acb;
- hdr->b_flags |= ARC_IO_IN_PROGRESS;
+ hdr->b_flags |= ARC_FLAG_IO_IN_PROGRESS;
if (hdr->b_l2hdr != NULL &&
(vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
@@ -3530,7 +3513,7 @@ top:
DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
uint64_t, size, zbookmark_phys_t *, zb);
ARCSTAT_BUMP(arcstat_misses);
- ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
+ ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_FLAG_PREFETCH),
demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
data, metadata, misses);
#ifdef _KERNEL
@@ -3595,12 +3578,12 @@ top:
zio_t *, rzio);
ARCSTAT_INCR(arcstat_l2_read_bytes, b_asize);
- if (*arc_flags & ARC_NOWAIT) {
+ if (*arc_flags & ARC_FLAG_NOWAIT) {
zio_nowait(rzio);
return (0);
}
- ASSERT(*arc_flags & ARC_WAIT);
+ ASSERT(*arc_flags & ARC_FLAG_WAIT);
if (zio_wait(rzio) == 0)
return (0);
@@ -3626,10 +3609,10 @@ top:
rzio = zio_read(pio, spa, bp, buf->b_data, size,
arc_read_done, buf, priority, zio_flags, zb);
- if (*arc_flags & ARC_WAIT)
+ if (*arc_flags & ARC_FLAG_WAIT)
return (zio_wait(rzio));
- ASSERT(*arc_flags & ARC_NOWAIT);
+ ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
zio_nowait(rzio);
}
return (0);
@@ -3666,7 +3649,7 @@ arc_freed(spa_t *spa, const blkptr_t *bp)
if (HDR_BUF_AVAILABLE(hdr)) {
arc_buf_t *buf = hdr->b_buf;
add_reference(hdr, hash_lock, FTAG);
- hdr->b_flags &= ~ARC_BUF_AVAILABLE;
+ hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE;
mutex_exit(hash_lock);
arc_release(buf, FTAG);
@@ -3735,7 +3718,7 @@ arc_clear_callback(arc_buf_t *buf)
arc_buf_destroy(buf, FALSE, TRUE);
} else {
ASSERT(buf == hdr->b_buf);
- hdr->b_flags |= ARC_BUF_AVAILABLE;
+ hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
mutex_exit(&buf->b_evict_lock);
}
@@ -3844,7 +3827,7 @@ arc_release(arc_buf_t *buf, void *tag)
nhdr->b_buf = buf;
nhdr->b_state = arc_anon;
nhdr->b_arc_access = 0;
- nhdr->b_flags = flags & ARC_L2_WRITING;
+ nhdr->b_flags = flags & ARC_FLAG_L2_WRITING;
nhdr->b_l2hdr = NULL;
nhdr->b_datacnt = 1;
nhdr->b_freeze_cksum = NULL;
@@ -3930,7 +3913,7 @@ arc_write_ready(zio_t *zio)
mutex_exit(&hdr->b_freeze_lock);
}
arc_cksum_compute(buf, B_FALSE);
- hdr->b_flags |= ARC_IO_IN_PROGRESS;
+ hdr->b_flags |= ARC_FLAG_IO_IN_PROGRESS;
}
/*
@@ -4011,13 +3994,13 @@ arc_write_done(zio_t *zio)
ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
}
}
- hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
+ hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS;
/* if it's not anon, we are doing a scrub */
if (!exists && hdr->b_state == arc_anon)
arc_access(hdr, hash_lock);
mutex_exit(hash_lock);
} else {
- hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
+ hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS;
}
ASSERT(!refcount_is_zero(&hdr->b_refcnt));
@@ -4040,12 +4023,12 @@ arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
ASSERT(ready != NULL);
ASSERT(done != NULL);
ASSERT(!HDR_IO_ERROR(hdr));
- ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
+ ASSERT((hdr->b_flags & ARC_FLAG_IO_IN_PROGRESS) == 0);
ASSERT(hdr->b_acb == NULL);
if (l2arc)
- hdr->b_flags |= ARC_L2CACHE;
+ hdr->b_flags |= ARC_FLAG_L2CACHE;
if (l2arc_compress)
- hdr->b_flags |= ARC_L2COMPRESS;
+ hdr->b_flags |= ARC_FLAG_L2COMPRESS;
callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
callback->awcb_ready = ready;
callback->awcb_physdone = physdone;
@@ -4581,7 +4564,7 @@ arc_fini(void)
*/
static boolean_t
-l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
+l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr)
{
/*
* A buffer is *not* eligible for the L2ARC if it:
@@ -4590,19 +4573,19 @@ l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
* 3. has an I/O in progress (it may be an incomplete read).
* 4. is flagged not eligible (zfs property).
*/
- if (ab->b_spa != spa_guid) {
+ if (hdr->b_spa != spa_guid) {
ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch);
return (B_FALSE);
}
- if (ab->b_l2hdr != NULL) {
+ if (hdr->b_l2hdr != NULL) {
ARCSTAT_BUMP(arcstat_l2_write_in_l2);
return (B_FALSE);
}
- if (HDR_IO_IN_PROGRESS(ab)) {
+ if (HDR_IO_IN_PROGRESS(hdr)) {
ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress);
return (B_FALSE);
}
- if (!HDR_L2CACHE(ab)) {
+ if (!HDR_L2CACHE(hdr)) {
ARCSTAT_BUMP(arcstat_l2_write_not_cacheable);
return (B_FALSE);
}
@@ -4765,7 +4748,7 @@ l2arc_write_done(zio_t *zio)
l2arc_write_callback_t *cb;
l2arc_dev_t *dev;
list_t *buflist;
- arc_buf_hdr_t *head, *ab, *ab_prev;
+ arc_buf_hdr_t *head, *hdr, *hdr_prev;
l2arc_buf_hdr_t *abl2;
kmutex_t *hash_lock;
int64_t bytes_dropped = 0;
@@ -4789,17 +4772,17 @@ l2arc_write_done(zio_t *zio)
/*
* All writes completed, or an error was hit.
*/
- for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
- ab_prev = list_prev(buflist, ab);
- abl2 = ab->b_l2hdr;
+ for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) {
+ hdr_prev = list_prev(buflist, hdr);
+ abl2 = hdr->b_l2hdr;
/*
* Release the temporary compressed buffer as soon as possible.
*/
if (abl2->b_compress != ZIO_COMPRESS_OFF)
- l2arc_release_cdata_buf(ab);
+ l2arc_release_cdata_buf(hdr);
- hash_lock = HDR_LOCK(ab);
+ hash_lock = HDR_LOCK(hdr);
if (!mutex_tryenter(hash_lock)) {
/*
* This buffer misses out. It may be in a stage
@@ -4814,20 +4797,20 @@ l2arc_write_done(zio_t *zio)
/*
* Error - drop L2ARC entry.
*/
- list_remove(buflist, ab);
+ list_remove(buflist, hdr);
ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
bytes_dropped += abl2->b_asize;
- ab->b_l2hdr = NULL;
+ hdr->b_l2hdr = NULL;
trim_map_free(abl2->b_dev->l2ad_vdev, abl2->b_daddr,
- ab->b_size, 0);
+ hdr->b_size, 0);
kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
- ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
+ ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
}
/*
* Allow ARC to begin reads to this L2ARC entry.
*/
- ab->b_flags &= ~ARC_L2_WRITING;
+ hdr->b_flags &= ~ARC_FLAG_L2_WRITING;
mutex_exit(hash_lock);
}
@@ -4975,7 +4958,7 @@ l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
{
list_t *buflist;
l2arc_buf_hdr_t *abl2;
- arc_buf_hdr_t *ab, *ab_prev;
+ arc_buf_hdr_t *hdr, *hdr_prev;
kmutex_t *hash_lock;
uint64_t taddr;
int64_t bytes_evicted = 0;
@@ -5007,10 +4990,10 @@ l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
top:
mutex_enter(&l2arc_buflist_mtx);
- for (ab = list_tail(buflist); ab; ab = ab_prev) {
- ab_prev = list_prev(buflist, ab);
+ for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) {
+ hdr_prev = list_prev(buflist, hdr);
- hash_lock = HDR_LOCK(ab);
+ hash_lock = HDR_LOCK(hdr);
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. Retry.
@@ -5022,19 +5005,19 @@ top:
goto top;
}
- if (HDR_L2_WRITE_HEAD(ab)) {
+ if (HDR_L2_WRITE_HEAD(hdr)) {
/*
* We hit a write head node. Leave it for
* l2arc_write_done().
*/
- list_remove(buflist, ab);
+ list_remove(buflist, hdr);
mutex_exit(hash_lock);
continue;
}
- if (!all && ab->b_l2hdr != NULL &&
- (ab->b_l2hdr->b_daddr > taddr ||
- ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
+ if (!all && hdr->b_l2hdr != NULL &&
+ (hdr->b_l2hdr->b_daddr > taddr ||
+ hdr->b_l2hdr->b_daddr < dev->l2ad_hand)) {
/*
* We've evicted to the target address,
* or the end of the device.
@@ -5043,7 +5026,7 @@ top:
break;
}
- if (HDR_FREE_IN_PROGRESS(ab)) {
+ if (HDR_FREE_IN_PROGRESS(hdr)) {
/*
* Already on the path to destruction.
*/
@@ -5051,49 +5034,49 @@ top:
continue;
}
- if (ab->b_state == arc_l2c_only) {
- ASSERT(!HDR_L2_READING(ab));
+ if (hdr->b_state == arc_l2c_only) {
+ ASSERT(!HDR_L2_READING(hdr));
/*
* This doesn't exist in the ARC. Destroy.
* arc_hdr_destroy() will call list_remove()
* and decrement arcstat_l2_size.
*/
- arc_change_state(arc_anon, ab, hash_lock);
- arc_hdr_destroy(ab);
+ arc_change_state(arc_anon, hdr, hash_lock);
+ arc_hdr_destroy(hdr);
} else {
/*
* Invalidate issued or about to be issued
* reads, since we may be about to write
* over this location.
*/
- if (HDR_L2_READING(ab)) {
+ if (HDR_L2_READING(hdr)) {
ARCSTAT_BUMP(arcstat_l2_evict_reading);
- ab->b_flags |= ARC_L2_EVICTED;
+ hdr->b_flags |= ARC_FLAG_L2_EVICTED;
}
/*
* Tell ARC this no longer exists in L2ARC.
*/
- if (ab->b_l2hdr != NULL) {
- abl2 = ab->b_l2hdr;
+ if (hdr->b_l2hdr != NULL) {
+ abl2 = hdr->b_l2hdr;
ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
bytes_evicted += abl2->b_asize;
- ab->b_l2hdr = NULL;
+ hdr->b_l2hdr = NULL;
/*
* We are destroying l2hdr, so ensure that
* its compressed buffer, if any, is not leaked.
*/
ASSERT(abl2->b_tmp_cdata == NULL);
kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
- ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
+ ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
}
- list_remove(buflist, ab);
+ list_remove(buflist, hdr);
/*
* This may have been leftover after a
* failed write.
*/
- ab->b_flags &= ~ARC_L2_WRITING;
+ hdr->b_flags &= ~ARC_FLAG_L2_WRITING;
}
mutex_exit(hash_lock);
}
@@ -5106,7 +5089,7 @@ top:
/*
* Find and write ARC buffers to the L2ARC device.
*
- * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
+ * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
* for reading until they have completed writing.
* The headroom_boost is an in-out parameter used to maintain headroom boost
* state between calls to this function.
@@ -5118,7 +5101,7 @@ static uint64_t
l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
boolean_t *headroom_boost)
{
- arc_buf_hdr_t *ab, *ab_prev, *head;
+ arc_buf_hdr_t *hdr, *hdr_prev, *head;
list_t *list;
uint64_t write_asize, write_psize, write_sz, headroom,
buf_compress_minsz;
@@ -5140,7 +5123,7 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
write_sz = write_asize = write_psize = 0;
full = B_FALSE;
head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
- head->b_flags |= ARC_L2_WRITE_HEAD;
+ head->b_flags |= ARC_FLAG_L2_WRITE_HEAD;
ARCSTAT_BUMP(arcstat_l2_write_buffer_iter);
/*
@@ -5166,28 +5149,28 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
* head of the ARC lists rather than the tail.
*/
if (arc_warm == B_FALSE)
- ab = list_head(list);
+ hdr = list_head(list);
else
- ab = list_tail(list);
- if (ab == NULL)
+ hdr = list_tail(list);
+ if (hdr == NULL)
ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter);
headroom = target_sz * l2arc_headroom * 2 / ARC_BUFC_NUMLISTS;
if (do_headroom_boost)
headroom = (headroom * l2arc_headroom_boost) / 100;
- for (; ab; ab = ab_prev) {
+ for (; hdr; hdr = hdr_prev) {
l2arc_buf_hdr_t *l2hdr;
kmutex_t *hash_lock;
uint64_t buf_sz;
if (arc_warm == B_FALSE)
- ab_prev = list_next(list, ab);
+ hdr_prev = list_next(list, hdr);
else
- ab_prev = list_prev(list, ab);
- ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, ab->b_size);
+ hdr_prev = list_prev(list, hdr);
+ ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, hdr->b_size);
- hash_lock = HDR_LOCK(ab);
+ hash_lock = HDR_LOCK(hdr);
if (!mutex_tryenter(hash_lock)) {
ARCSTAT_BUMP(arcstat_l2_write_trylock_fail);
/*
@@ -5196,7 +5179,7 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
continue;
}
- passed_sz += ab->b_size;
+ passed_sz += hdr->b_size;
if (passed_sz > headroom) {
/*
* Searched too far.
@@ -5206,12 +5189,12 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
break;
}
- if (!l2arc_write_eligible(guid, ab)) {
+ if (!l2arc_write_eligible(guid, hdr)) {
mutex_exit(hash_lock);
continue;
}
- if ((write_sz + ab->b_size) > target_sz) {
+ if ((write_sz + hdr->b_size) > target_sz) {
full = B_TRUE;
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_l2_write_full);
@@ -5240,31 +5223,31 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
*/
l2hdr = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
l2hdr->b_dev = dev;
- ab->b_flags |= ARC_L2_WRITING;
+ hdr->b_flags |= ARC_FLAG_L2_WRITING;
/*
* Temporarily stash the data buffer in b_tmp_cdata.
* The subsequent write step will pick it up from
- * there. This is because can't access ab->b_buf
+ * there. This is because can't access hdr->b_buf
* without holding the hash_lock, which we in turn
* can't access without holding the ARC list locks
* (which we want to avoid during compression/writing).
*/
l2hdr->b_compress = ZIO_COMPRESS_OFF;
- l2hdr->b_asize = ab->b_size;
- l2hdr->b_tmp_cdata = ab->b_buf->b_data;
+ l2hdr->b_asize = hdr->b_size;
+ l2hdr->b_tmp_cdata = hdr->b_buf->b_data;
- buf_sz = ab->b_size;
- ab->b_l2hdr = l2hdr;
+ buf_sz = hdr->b_size;
+ hdr->b_l2hdr = l2hdr;
- list_insert_head(dev->l2ad_buflist, ab);
+ list_insert_head(dev->l2ad_buflist, hdr);
/*
* Compute and store the buffer cksum before
* writing. On debug the cksum is verified first.
*/
- arc_cksum_verify(ab->b_buf);
- arc_cksum_compute(ab->b_buf, B_TRUE);
+ arc_cksum_verify(hdr->b_buf);
+ arc_cksum_compute(hdr->b_buf, B_TRUE);
mutex_exit(hash_lock);
@@ -5290,21 +5273,22 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
* and work backwards, retracing the course of the buffer selector
* loop above.
*/
- for (ab = list_prev(dev->l2ad_buflist, head); ab;
- ab = list_prev(dev->l2ad_buflist, ab)) {
+ for (hdr = list_prev(dev->l2ad_buflist, head); hdr;
+ hdr = list_prev(dev->l2ad_buflist, hdr)) {
l2arc_buf_hdr_t *l2hdr;
uint64_t buf_sz;
/*
* We shouldn't need to lock the buffer here, since we flagged
- * it as ARC_L2_WRITING in the previous step, but we must take
- * care to only access its L2 cache parameters. In particular,
- * ab->b_buf may be invalid by now due to ARC eviction.
+ * it as ARC_FLAG_L2_WRITING in the previous step, but we must
+ * take care to only access its L2 cache parameters. In
+ * particular, hdr->b_buf may be invalid by now due to
+ * ARC eviction.
*/
- l2hdr = ab->b_l2hdr;
+ l2hdr = hdr->b_l2hdr;
l2hdr->b_daddr = dev->l2ad_hand;
- if ((ab->b_flags & ARC_L2COMPRESS) &&
+ if ((hdr->b_flags & ARC_FLAG_L2COMPRESS) &&
l2hdr->b_asize >= buf_compress_minsz) {
if (l2arc_compress_buf(l2hdr)) {
/*
@@ -5518,9 +5502,9 @@ l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
* done, we can dispose of it.
*/
static void
-l2arc_release_cdata_buf(arc_buf_hdr_t *ab)
+l2arc_release_cdata_buf(arc_buf_hdr_t *hdr)
{
- l2arc_buf_hdr_t *l2hdr = ab->b_l2hdr;
+ l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
ASSERT(L2ARC_IS_VALID_COMPRESS(l2hdr->b_compress));
if (l2hdr->b_compress != ZIO_COMPRESS_EMPTY) {
@@ -5529,7 +5513,7 @@ l2arc_release_cdata_buf(arc_buf_hdr_t *ab)
* temporary buffer for it, so now we need to release it.
*/
ASSERT(l2hdr->b_tmp_cdata != NULL);
- zio_data_buf_free(l2hdr->b_tmp_cdata, ab->b_size);
+ zio_data_buf_free(l2hdr->b_tmp_cdata, hdr->b_size);
l2hdr->b_tmp_cdata = NULL;
} else {
ASSERT(l2hdr->b_tmp_cdata == NULL);
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c
index 2cdc3d4..ea3c688 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c
@@ -507,7 +507,7 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
{
dnode_t *dn;
zbookmark_phys_t zb;
- uint32_t aflags = ARC_NOWAIT;
+ arc_flags_t aflags = ARC_FLAG_NOWAIT;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
@@ -560,9 +560,9 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
mutex_exit(&db->db_mtx);
if (DBUF_IS_L2CACHEABLE(db))
- aflags |= ARC_L2CACHE;
+ aflags |= ARC_FLAG_L2CACHE;
if (DBUF_IS_L2COMPRESSIBLE(db))
- aflags |= ARC_L2COMPRESS;
+ aflags |= ARC_FLAG_L2COMPRESS;
SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
@@ -574,7 +574,7 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
(*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
&aflags, &zb);
- if (aflags & ARC_CACHED)
+ if (aflags & ARC_FLAG_CACHED)
*flags |= DB_RF_CACHED;
}
@@ -1863,7 +1863,8 @@ dbuf_prefetch(dnode_t *dn, uint64_t blkid, zio_priority_t prio)
if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp) == 0) {
if (bp && !BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) {
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
- uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
+ arc_flags_t aflags =
+ ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_diff.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_diff.c
index cc81dc6..bd9e894 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_diff.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_diff.c
@@ -152,7 +152,7 @@ diff_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
} else if (zb->zb_level == 0) {
dnode_phys_t *blk;
arc_buf_t *abuf;
- uint32_t aflags = ARC_WAIT;
+ arc_flags_t aflags = ARC_FLAG_WAIT;
int blksz = BP_GET_LSIZE(bp);
int i;
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_objset.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_objset.c
index e39264c..36ac27a 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_objset.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_objset.c
@@ -293,15 +293,15 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
os->os_spa = spa;
os->os_rootbp = bp;
if (!BP_IS_HOLE(os->os_rootbp)) {
- uint32_t aflags = ARC_WAIT;
+ arc_flags_t aflags = ARC_FLAG_WAIT;
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
if (DMU_OS_IS_L2CACHEABLE(os))
- aflags |= ARC_L2CACHE;
+ aflags |= ARC_FLAG_L2CACHE;
if (DMU_OS_IS_L2COMPRESSIBLE(os))
- aflags |= ARC_L2COMPRESS;
+ aflags |= ARC_FLAG_L2COMPRESS;
dprintf_bp(os->os_rootbp, "reading %s", "");
err = arc_read(NULL, spa, os->os_rootbp,
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c
index 86aee07..d74c2cf 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c
@@ -479,7 +479,7 @@ backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
dnode_phys_t *blk;
int i;
int blksz = BP_GET_LSIZE(bp);
- uint32_t aflags = ARC_WAIT;
+ arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf;
if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
@@ -497,7 +497,7 @@ backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
}
(void) arc_buf_remove_ref(abuf, &abuf);
} else if (type == DMU_OT_SA) {
- uint32_t aflags = ARC_WAIT;
+ arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf;
int blksz = BP_GET_LSIZE(bp);
@@ -514,7 +514,7 @@ backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
err = dump_write_embedded(dsp, zb->zb_object,
zb->zb_blkid * blksz, blksz, bp);
} else { /* it's a level-0 block of a regular object */
- uint32_t aflags = ARC_WAIT;
+ arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf;
int blksz = BP_GET_LSIZE(bp);
uint64_t offset;
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_traverse.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_traverse.c
index bffdff6..7b1d6be 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_traverse.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_traverse.c
@@ -178,7 +178,7 @@ static void
traverse_prefetch_metadata(traverse_data_t *td,
const blkptr_t *bp, const zbookmark_phys_t *zb)
{
- uint32_t flags = ARC_NOWAIT | ARC_PREFETCH;
+ arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
if (!(td->td_flags & TRAVERSE_PREFETCH_METADATA))
return;
@@ -275,7 +275,7 @@ traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
}
if (BP_GET_LEVEL(bp) > 0) {
- uint32_t flags = ARC_WAIT;
+ arc_flags_t flags = ARC_FLAG_WAIT;
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
@@ -303,7 +303,7 @@ traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
break;
}
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
- uint32_t flags = ARC_WAIT;
+ arc_flags_t flags = ARC_FLAG_WAIT;
int i;
int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
@@ -326,7 +326,7 @@ traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
break;
}
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
- uint32_t flags = ARC_WAIT;
+ arc_flags_t flags = ARC_FLAG_WAIT;
objset_phys_t *osp;
dnode_phys_t *dnp;
@@ -442,7 +442,7 @@ traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
prefetch_data_t *pfd = arg;
- uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
+ arc_flags_t aflags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
ASSERT(pfd->pd_blks_fetched >= 0);
if (pfd->pd_cancel)
@@ -533,7 +533,7 @@ traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp,
/* See comment on ZIL traversal in dsl_scan_visitds. */
if (ds != NULL && !dsl_dataset_is_snapshot(ds) && !BP_IS_HOLE(rootbp)) {
- uint32_t flags = ARC_WAIT;
+ arc_flags_t flags = ARC_FLAG_WAIT;
objset_phys_t *osp;
arc_buf_t *buf;
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scan.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scan.c
index 135397d..3b1d732 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scan.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scan.c
@@ -571,7 +571,7 @@ dsl_scan_prefetch(dsl_scan_t *scn, arc_buf_t *buf, blkptr_t *bp,
uint64_t objset, uint64_t object, uint64_t blkid)
{
zbookmark_phys_t czb;
- uint32_t flags = ARC_NOWAIT | ARC_PREFETCH;
+ arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
if (zfs_no_scrub_prefetch)
return;
@@ -636,7 +636,7 @@ dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
int err;
if (BP_GET_LEVEL(bp) > 0) {
- uint32_t flags = ARC_WAIT;
+ arc_flags_t flags = ARC_FLAG_WAIT;
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
@@ -663,7 +663,7 @@ dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
}
(void) arc_buf_remove_ref(buf, &buf);
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
- uint32_t flags = ARC_WAIT;
+ arc_flags_t flags = ARC_FLAG_WAIT;
dnode_phys_t *cdnp;
int i, j;
int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
@@ -689,7 +689,7 @@ dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
(void) arc_buf_remove_ref(buf, &buf);
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
- uint32_t flags = ARC_WAIT;
+ arc_flags_t flags = ARC_FLAG_WAIT;
objset_phys_t *osp;
arc_buf_t *buf;
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/arc.h b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/arc.h
index 9065a22..eb21867 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/arc.h
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/arc.h
@@ -46,6 +46,36 @@ typedef int arc_evict_func_t(void *priv);
arc_done_func_t arc_bcopy_func;
arc_done_func_t arc_getbuf_func;
+typedef enum arc_flags
+{
+ /*
+ * Public flags that can be passed into the ARC by external consumers.
+ */
+ ARC_FLAG_NONE = 1 << 0, /* No flags set */
+ ARC_FLAG_WAIT = 1 << 1, /* perform sync I/O */
+ ARC_FLAG_NOWAIT = 1 << 2, /* perform async I/O */
+ ARC_FLAG_PREFETCH = 1 << 3, /* I/O is a prefetch */
+ ARC_FLAG_CACHED = 1 << 4, /* I/O was in cache */
+ ARC_FLAG_L2CACHE = 1 << 5, /* cache in L2ARC */
+ ARC_FLAG_L2COMPRESS = 1 << 6, /* compress in L2ARC */
+
+ /*
+ * Private ARC flags. These flags are private ARC only flags that
+ * will show up in b_flags in the arc_hdr_buf_t. These flags should
+ * only be set by ARC code.
+ */
+ ARC_FLAG_IN_HASH_TABLE = 1 << 7, /* buffer is hashed */
+ ARC_FLAG_IO_IN_PROGRESS = 1 << 8, /* I/O in progress */
+ ARC_FLAG_IO_ERROR = 1 << 9, /* I/O failed for buf */
+ ARC_FLAG_FREED_IN_READ = 1 << 10, /* freed during read */
+ ARC_FLAG_BUF_AVAILABLE = 1 << 11, /* block not in use */
+ ARC_FLAG_INDIRECT = 1 << 12, /* indirect block */
+ ARC_FLAG_FREE_IN_PROGRESS = 1 << 13, /* about to be freed */
+ ARC_FLAG_L2_WRITING = 1 << 14, /* write in progress */
+ ARC_FLAG_L2_EVICTED = 1 << 15, /* evicted during I/O */
+ ARC_FLAG_L2_WRITE_HEAD = 1 << 16, /* head of write list */
+} arc_flags_t;
+
struct arc_buf {
arc_buf_hdr_t *b_hdr;
arc_buf_t *b_next;
@@ -60,15 +90,6 @@ typedef enum arc_buf_contents {
ARC_BUFC_METADATA, /* buffer contains metadata */
ARC_BUFC_NUMTYPES
} arc_buf_contents_t;
-/*
- * These are the flags we pass into calls to the arc
- */
-#define ARC_WAIT (1 << 1) /* perform I/O synchronously */
-#define ARC_NOWAIT (1 << 2) /* perform I/O asynchronously */
-#define ARC_PREFETCH (1 << 3) /* I/O is a prefetch */
-#define ARC_CACHED (1 << 4) /* I/O was already in cache */
-#define ARC_L2CACHE (1 << 5) /* cache in L2ARC */
-#define ARC_L2COMPRESS (1 << 6) /* compress in L2ARC */
/*
* The following breakdows of arc_size exist for kstat only.
@@ -102,7 +123,7 @@ int arc_referenced(arc_buf_t *buf);
int arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
arc_done_func_t *done, void *priv, zio_priority_t priority, int flags,
- uint32_t *arc_flags, const zbookmark_phys_t *zb);
+ arc_flags_t *arc_flags, const zbookmark_phys_t *zb);
zio_t *arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress,
const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *physdone,
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zil.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zil.c
index 2084d88..2dcfe06 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zil.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zil.c
@@ -190,7 +190,7 @@ zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
char **end)
{
enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
- uint32_t aflags = ARC_WAIT;
+ arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf = NULL;
zbookmark_phys_t zb;
int error;
@@ -266,7 +266,7 @@ zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
{
enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
const blkptr_t *bp = &lr->lr_blkptr;
- uint32_t aflags = ARC_WAIT;
+ arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf = NULL;
zbookmark_phys_t zb;
int error;
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c
index 8408afb..3925e90 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c
@@ -2250,7 +2250,7 @@ zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
if (ddp->ddp_phys_birth != 0) {
arc_buf_t *abuf = NULL;
- uint32_t aflags = ARC_WAIT;
+ arc_flags_t aflags = ARC_FLAG_WAIT;
blkptr_t blk = *zio->io_bp;
int error;
OpenPOWER on IntegriCloud