diff options
Diffstat (limited to 'sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c')
-rw-r--r-- | sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c | 148 |
1 files changed, 136 insertions, 12 deletions
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c index 2f26e9fb..9d5c398 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c +++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c @@ -481,13 +481,49 @@ dbuf_verify(dmu_buf_impl_t *db) * If the blkptr isn't set but they have nonzero data, * it had better be dirty, otherwise we'll lose that * data when we evict this buffer. + * + * There is an exception to this rule for indirect blocks; in + * this case, if the indirect block is a hole, we fill in a few + * fields on each of the child blocks (importantly, birth time) + * to prevent hole birth times from being lost when you + * partially fill in a hole. */ if (db->db_dirtycnt == 0) { - uint64_t *buf = db->db.db_data; - int i; + if (db->db_level == 0) { + uint64_t *buf = db->db.db_data; + int i; - for (i = 0; i < db->db.db_size >> 3; i++) { - ASSERT(buf[i] == 0); + for (i = 0; i < db->db.db_size >> 3; i++) { + ASSERT(buf[i] == 0); + } + } else { + blkptr_t *bps = db->db.db_data; + ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, + db->db.db_size); + /* + * We want to verify that all the blkptrs in the + * indirect block are holes, but we may have + * automatically set up a few fields for them. + * We iterate through each blkptr and verify + * they only have those fields set. + */ + for (int i = 0; + i < db->db.db_size / sizeof (blkptr_t); + i++) { + blkptr_t *bp = &bps[i]; + ASSERT(ZIO_CHECKSUM_IS_ZERO( + &bp->blk_cksum)); + ASSERT( + DVA_IS_EMPTY(&bp->blk_dva[0]) && + DVA_IS_EMPTY(&bp->blk_dva[1]) && + DVA_IS_EMPTY(&bp->blk_dva[2])); + ASSERT0(bp->blk_fill); + ASSERT0(bp->blk_pad[0]); + ASSERT0(bp->blk_pad[1]); + ASSERT(!BP_IS_EMBEDDED(bp)); + ASSERT(BP_IS_HOLE(bp)); + ASSERT0(bp->blk_phys_birth); + } } } } @@ -655,10 +691,31 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) BP_IS_HOLE(db->db_blkptr)))) { arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); - DB_DNODE_EXIT(db); dbuf_set_data(db, arc_buf_alloc(db->db_objset->os_spa, db->db.db_size, db, type)); bzero(db->db.db_data, db->db.db_size); + + if (db->db_blkptr != NULL && db->db_level > 0 && + BP_IS_HOLE(db->db_blkptr) && + db->db_blkptr->blk_birth != 0) { + blkptr_t *bps = db->db.db_data; + for (int i = 0; i < ((1 << + DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t)); + i++) { + blkptr_t *bp = &bps[i]; + ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, + 1 << dn->dn_indblkshift); + BP_SET_LSIZE(bp, + BP_GET_LEVEL(db->db_blkptr) == 1 ? + dn->dn_datablksz : + BP_GET_LSIZE(db->db_blkptr)); + BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); + BP_SET_LEVEL(bp, + BP_GET_LEVEL(db->db_blkptr) - 1); + BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); + } + } + DB_DNODE_EXIT(db); db->db_state = DB_CACHED; mutex_exit(&db->db_mtx); return; @@ -2876,7 +2933,8 @@ dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) uint64_t fill = 0; int i; - ASSERT3P(db->db_blkptr, ==, bp); + ASSERT3P(db->db_blkptr, !=, NULL); + ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); DB_DNODE_ENTER(db); dn = DB_DNODE(db); @@ -2898,7 +2956,7 @@ dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) #ifdef ZFS_DEBUG if (db->db_blkid == DMU_SPILL_BLKID) { ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); - ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && + ASSERT(!(BP_IS_HOLE(bp)) && db->db_blkptr == &dn->dn_phys->dn_spill); } #endif @@ -2939,6 +2997,49 @@ dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) bp->blk_fill = fill; mutex_exit(&db->db_mtx); + + rw_enter(&dn->dn_struct_rwlock, RW_WRITER); + *db->db_blkptr = *bp; + rw_exit(&dn->dn_struct_rwlock); +} + +/* ARGSUSED */ +/* + * This function gets called just prior to running through the compression + * stage of the zio pipeline. If we're an indirect block comprised of only + * holes, then we want this indirect to be compressed away to a hole. In + * order to do that we must zero out any information about the holes that + * this indirect points to prior to before we try to compress it. + */ +static void +dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) +{ + dmu_buf_impl_t *db = vdb; + dnode_t *dn; + blkptr_t *bp; + uint64_t i; + int epbs; + + ASSERT3U(db->db_level, >, 0); + DB_DNODE_ENTER(db); + dn = DB_DNODE(db); + epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; + + /* Determine if all our children are holes */ + for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) { + if (!BP_IS_HOLE(bp)) + break; + } + + /* + * If all the children are holes, then zero them all out so that + * we may get compressed away. + */ + if (i == 1 << epbs) { + /* didn't find any non-holes */ + bzero(db->db.db_data, db->db.db_size); + } + DB_DNODE_EXIT(db); } /* @@ -3117,6 +3218,8 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) zio_t *zio; int wp_flag = 0; + ASSERT(dmu_tx_is_syncing(tx)); + DB_DNODE_ENTER(db); dn = DB_DNODE(db); os = dn->dn_objset; @@ -3175,6 +3278,14 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); DB_DNODE_EXIT(db); + /* + * We copy the blkptr now (rather than when we instantiate the dirty + * record), because its value can change between open context and + * syncing context. We do not need to hold dn_struct_rwlock to read + * db_blkptr because we are in syncing context. + */ + dr->dr_bp_copy = *db->db_blkptr; + if (db->db_level == 0 && dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { /* @@ -3184,8 +3295,9 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) void *contents = (data != NULL) ? data->b_data : NULL; dr->dr_zio = zio_write(zio, os->os_spa, txg, - db->db_blkptr, contents, db->db.db_size, &zp, - dbuf_write_override_ready, NULL, dbuf_write_override_done, + &dr->dr_bp_copy, contents, db->db.db_size, &zp, + dbuf_write_override_ready, NULL, NULL, + dbuf_write_override_done, dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); mutex_enter(&db->db_mtx); dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; @@ -3196,15 +3308,27 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); dr->dr_zio = zio_write(zio, os->os_spa, txg, - db->db_blkptr, NULL, db->db.db_size, &zp, - dbuf_write_nofill_ready, NULL, dbuf_write_nofill_done, db, + &dr->dr_bp_copy, NULL, db->db.db_size, &zp, + dbuf_write_nofill_ready, NULL, NULL, + dbuf_write_nofill_done, db, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); } else { ASSERT(arc_released(data)); + + /* + * For indirect blocks, we want to setup the children + * ready callback so that we can properly handle an indirect + * block that only contains holes. + */ + arc_done_func_t *children_ready_cb = NULL; + if (db->db_level != 0) + children_ready_cb = dbuf_write_children_ready; + dr->dr_zio = arc_write(zio, os->os_spa, txg, - db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db), + &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db), DBUF_IS_L2COMPRESSIBLE(db), &zp, dbuf_write_ready, + children_ready_cb, dbuf_write_physdone, dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); } |