Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

CI tests for combined #16722 and #16040 #16744

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 10 additions & 8 deletions module/zfs/arc.c
Original file line number Diff line number Diff line change
Expand Up @@ -9287,6 +9287,14 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
hdr->b_l2hdr.b_hits = 0;
hdr->b_l2hdr.b_arcs_state =
hdr->b_l1hdr.b_state->arcs_state;
arc_hdr_set_flags(hdr, ARC_FLAG_HAS_L2HDR |
ARC_FLAG_L2_WRITING);

(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
l2arc_hdr_arcstats_increment(hdr);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);

mutex_enter(&dev->l2ad_mtx);
if (pio == NULL) {
/*
Expand All @@ -9298,12 +9306,6 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
}
list_insert_head(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
arc_hdr_set_flags(hdr, ARC_FLAG_HAS_L2HDR |
ARC_FLAG_L2_WRITING);

(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
l2arc_hdr_arcstats_increment(hdr);

boolean_t commit = l2arc_log_blk_insert(dev, hdr);
mutex_exit(hash_lock);
Expand Down Expand Up @@ -9333,7 +9335,6 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
write_psize += psize;
write_asize += asize;
dev->l2ad_hand += asize;
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);

if (commit) {
/* l2ad_hand will be adjusted inside. */
Expand Down Expand Up @@ -10585,6 +10586,8 @@ l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio, l2arc_write_callback_t *cb)
(void) zio_nowait(wzio);

dev->l2ad_hand += asize;
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);

/*
* Include the committed log block's pointer in the list of pointers
* to log blocks present in the L2ARC device.
Expand All @@ -10598,7 +10601,6 @@ l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio, l2arc_write_callback_t *cb)
zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf);
zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf);
mutex_exit(&dev->l2ad_mtx);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);

/* bump the kstats */
ARCSTAT_INCR(arcstat_l2_write_bytes, asize);
Expand Down
28 changes: 26 additions & 2 deletions module/zfs/dsl_dataset.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@
#include <sys/zio_compress.h>
#include <zfs_fletcher.h>
#include <sys/zio_checksum.h>
#include <sys/brt.h>

/*
* The SPA supports block sizes up to 16MB. However, very large blocks
Expand Down Expand Up @@ -289,18 +290,41 @@ dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
if (BP_GET_LOGICAL_BIRTH(bp) > dsl_dataset_phys(ds)->ds_prev_snap_txg) {
int64_t delta;

dprintf_bp(bp, "freeing ds=%llu", (u_longlong_t)ds->ds_object);
dsl_free(tx->tx_pool, tx->tx_txg, bp);
/*
* Put blocks that would create IO on the pool's deadlist for
* dsl_process_async_destroys() to find. This is to prevent
* zio_free() from creating a ZIO_TYPE_FREE IO for them, which
* are very heavy and can lead to out-of-memory conditions if
* something tries to free millions of blocks on the same txg.
*/
boolean_t defer = spa_version(spa) >= SPA_VERSION_DEADLISTS &&
(BP_IS_GANG(bp) || BP_GET_DEDUP(bp) ||
brt_maybe_exists(spa, bp));

if (defer) {
dprintf_bp(bp, "putting on free list: %s", "");
bpobj_enqueue(&ds->ds_dir->dd_pool->dp_free_bpobj,
bp, B_FALSE, tx);
} else {
dprintf_bp(bp, "freeing ds=%llu",
(u_longlong_t)ds->ds_object);
dsl_free(tx->tx_pool, tx->tx_txg, bp);
}

mutex_enter(&ds->ds_lock);
ASSERT(dsl_dataset_phys(ds)->ds_unique_bytes >= used ||
!DS_UNIQUE_IS_ACCURATE(ds));
delta = parent_delta(ds, -used);
dsl_dataset_phys(ds)->ds_unique_bytes -= used;
mutex_exit(&ds->ds_lock);

dsl_dir_diduse_transfer_space(ds->ds_dir,
delta, -compressed, -uncompressed, -used,
DD_USED_REFRSRV, DD_USED_HEAD, tx);

if (defer)
dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
DD_USED_HEAD, used, compressed, uncompressed, tx);
} else {
dprintf_bp(bp, "putting on dead list: %s", "");
if (async) {
Expand Down