Skip to content

Commit

Permalink
Linux 6.12 compat: Rename range_tree_* to zfs_range_tree_*
Browse files Browse the repository at this point in the history
Linux 6.12 has conflicting range_tree_{find,destroy,clear} symbols.

Signed-off-by: Ivan Volosyuk <[email protected]>
  • Loading branch information
IvanVolosyuk committed Jan 30, 2025
1 parent 12f0baf commit 95b8396
Show file tree
Hide file tree
Showing 28 changed files with 639 additions and 639 deletions.
96 changes: 48 additions & 48 deletions cmd/zdb/zdb.c
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ static int flagbits[256];

static uint64_t max_inflight_bytes = 256 * 1024 * 1024; /* 256MB */
static int leaked_objects = 0;
static range_tree_t *mos_refd_objs;
static zfs_range_tree_t *mos_refd_objs;
static spa_t *spa;
static objset_t *os;
static boolean_t kernel_init_done;
Expand Down Expand Up @@ -325,7 +325,7 @@ typedef struct metaslab_verify {
/*
* What's currently allocated for this metaslab.
*/
range_tree_t *mv_allocated;
zfs_range_tree_t *mv_allocated;
} metaslab_verify_t;

typedef void ll_iter_t(dsl_deadlist_t *ll, void *arg);
Expand Down Expand Up @@ -417,7 +417,7 @@ metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
uint64_t txg = sme->sme_txg;

if (sme->sme_type == SM_ALLOC) {
if (range_tree_contains(mv->mv_allocated,
if (zfs_range_tree_contains(mv->mv_allocated,
offset, size)) {
(void) printf("ERROR: DOUBLE ALLOC: "
"%llu [%llx:%llx] "
Expand All @@ -426,11 +426,11 @@ metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
} else {
range_tree_add(mv->mv_allocated,
zfs_range_tree_add(mv->mv_allocated,
offset, size);
}
} else {
if (!range_tree_contains(mv->mv_allocated,
if (!zfs_range_tree_contains(mv->mv_allocated,
offset, size)) {
(void) printf("ERROR: DOUBLE FREE: "
"%llu [%llx:%llx] "
Expand All @@ -439,7 +439,7 @@ metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
} else {
range_tree_remove(mv->mv_allocated,
zfs_range_tree_remove(mv->mv_allocated,
offset, size);
}
}
Expand Down Expand Up @@ -615,10 +615,10 @@ livelist_metaslab_validate(spa_t *spa)

uint64_t shift, start;
range_seg_type_t type =
metaslab_calculate_range_tree_type(vd, m,
metaslab_calculate_zfs_range_tree_type(vd, m,
&start, &shift);
metaslab_verify_t mv;
mv.mv_allocated = range_tree_create(NULL,
mv.mv_allocated = zfs_range_tree_create(NULL,
type, NULL, start, shift);
mv.mv_vdid = vd->vdev_id;
mv.mv_msid = m->ms_id;
Expand All @@ -633,8 +633,8 @@ livelist_metaslab_validate(spa_t *spa)
spacemap_check_ms_sm(m->ms_sm, &mv);
spacemap_check_sm_log(spa, &mv);

range_tree_vacate(mv.mv_allocated, NULL, NULL);
range_tree_destroy(mv.mv_allocated);
zfs_range_tree_vacate(mv.mv_allocated, NULL, NULL);
zfs_range_tree_destroy(mv.mv_allocated);
zfs_btree_clear(&mv.mv_livelist_allocs);
zfs_btree_destroy(&mv.mv_livelist_allocs);
}
Expand Down Expand Up @@ -1633,9 +1633,9 @@ static void
dump_metaslab_stats(metaslab_t *msp)
{
char maxbuf[32];
range_tree_t *rt = msp->ms_allocatable;
zfs_range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *t = &msp->ms_allocatable_by_size;
int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
int free_pct = zfs_range_tree_space(rt) * 100 / msp->ms_size;

/* max sure nicenum has enough space */
_Static_assert(sizeof (maxbuf) >= NN_NUMBUF_SZ, "maxbuf truncated");
Expand Down Expand Up @@ -1668,7 +1668,7 @@ dump_metaslab(metaslab_t *msp)
if (dump_opt['m'] > 2 && !dump_opt['L']) {
mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp));
range_tree_stat_verify(msp->ms_allocatable);
zfs_range_tree_stat_verify(msp->ms_allocatable);
dump_metaslab_stats(msp);
metaslab_unload(msp);
mutex_exit(&msp->ms_lock);
Expand Down Expand Up @@ -2292,12 +2292,12 @@ dump_dtl(vdev_t *vd, int indent)
required ? "DTL-required" : "DTL-expendable");

for (int t = 0; t < DTL_TYPES; t++) {
range_tree_t *rt = vd->vdev_dtl[t];
if (range_tree_space(rt) == 0)
zfs_range_tree_t *rt = vd->vdev_dtl[t];
if (zfs_range_tree_space(rt) == 0)
continue;
(void) snprintf(prefix, sizeof (prefix), "\t%*s%s",
indent + 2, "", name[t]);
range_tree_walk(rt, dump_dtl_seg, prefix);
zfs_range_tree_walk(rt, dump_dtl_seg, prefix);
if (dump_opt['d'] > 5 && vd->vdev_children == 0)
dump_spacemap(spa->spa_meta_objset,
vd->vdev_dtl_sm);
Expand Down Expand Up @@ -6258,9 +6258,9 @@ load_unflushed_svr_segs_cb(spa_t *spa, space_map_entry_t *sme,
return (0);

if (sme->sme_type == SM_ALLOC)
range_tree_add(svr->svr_allocd_segs, offset, size);
zfs_range_tree_add(svr->svr_allocd_segs, offset, size);
else
range_tree_remove(svr->svr_allocd_segs, offset, size);
zfs_range_tree_remove(svr->svr_allocd_segs, offset, size);

return (0);
}
Expand Down Expand Up @@ -6314,18 +6314,18 @@ zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;

ASSERT0(range_tree_space(svr->svr_allocd_segs));
ASSERT0(zfs_range_tree_space(svr->svr_allocd_segs));

range_tree_t *allocs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
zfs_range_tree_t *allocs = zfs_range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];

ASSERT0(range_tree_space(allocs));
ASSERT0(zfs_range_tree_space(allocs));
if (msp->ms_sm != NULL)
VERIFY0(space_map_load(msp->ms_sm, allocs, SM_ALLOC));
range_tree_vacate(allocs, range_tree_add, svr->svr_allocd_segs);
zfs_range_tree_vacate(allocs, zfs_range_tree_add, svr->svr_allocd_segs);
}
range_tree_destroy(allocs);
zfs_range_tree_destroy(allocs);

iterate_through_spacemap_logs(spa, load_unflushed_svr_segs_cb, svr);

Expand All @@ -6334,12 +6334,12 @@ zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
* because we have not allocated mappings for
* it yet.
*/
range_tree_clear(svr->svr_allocd_segs,
zfs_range_tree_clear(svr->svr_allocd_segs,
vdev_indirect_mapping_max_offset(vim),
vd->vdev_asize - vdev_indirect_mapping_max_offset(vim));

zcb->zcb_removing_size += range_tree_space(svr->svr_allocd_segs);
range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
zcb->zcb_removing_size += zfs_range_tree_space(svr->svr_allocd_segs);
zfs_range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);

spa_config_exit(spa, SCL_CONFIG, FTAG);
}
Expand Down Expand Up @@ -6442,7 +6442,7 @@ checkpoint_sm_exclude_entry_cb(space_map_entry_t *sme, void *arg)
* also verify that the entry is there to begin with.
*/
mutex_enter(&ms->ms_lock);
range_tree_remove(ms->ms_allocatable, sme->sme_offset, sme->sme_run);
zfs_range_tree_remove(ms->ms_allocatable, sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);

cseea->cseea_checkpoint_size += sme->sme_run;
Expand Down Expand Up @@ -6573,9 +6573,9 @@ load_unflushed_cb(spa_t *spa, space_map_entry_t *sme, uint64_t txg, void *arg)
return (0);

if (*uic_maptype == sme->sme_type)
range_tree_add(ms->ms_allocatable, offset, size);
zfs_range_tree_add(ms->ms_allocatable, offset, size);
else
range_tree_remove(ms->ms_allocatable, offset, size);
zfs_range_tree_remove(ms->ms_allocatable, offset, size);

return (0);
}
Expand Down Expand Up @@ -6609,7 +6609,7 @@ load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
(longlong_t)vd->vdev_ms_count);

mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
zfs_range_tree_vacate(msp->ms_allocatable, NULL, NULL);

/*
* We don't want to spend the CPU manipulating the
Expand Down Expand Up @@ -6642,7 +6642,7 @@ load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp,
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;

mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
zfs_range_tree_vacate(msp->ms_allocatable, NULL, NULL);

/*
* We don't want to spend the CPU manipulating the
Expand All @@ -6666,7 +6666,7 @@ load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp,
*/
ASSERT3U(ent_offset + ent_len, <=,
msp->ms_start + msp->ms_size);
range_tree_add(msp->ms_allocatable, ent_offset, ent_len);
zfs_range_tree_add(msp->ms_allocatable, ent_offset, ent_len);
}

if (!msp->ms_loaded)
Expand Down Expand Up @@ -6812,7 +6812,7 @@ zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb)
for (uint64_t inner_offset = 0;
inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst);
inner_offset += 1ULL << vd->vdev_ashift) {
if (range_tree_contains(msp->ms_allocatable,
if (zfs_range_tree_contains(msp->ms_allocatable,
offset + inner_offset, 1ULL << vd->vdev_ashift)) {
obsolete_bytes += 1ULL << vd->vdev_ashift;
}
Expand Down Expand Up @@ -6895,10 +6895,10 @@ zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb)
* not referenced, which is not a bug.
*/
if (vd->vdev_ops == &vdev_indirect_ops) {
range_tree_vacate(msp->ms_allocatable,
zfs_range_tree_vacate(msp->ms_allocatable,
NULL, NULL);
} else {
range_tree_vacate(msp->ms_allocatable,
zfs_range_tree_vacate(msp->ms_allocatable,
zdb_leak, vd);
}
if (msp->ms_loaded) {
Expand Down Expand Up @@ -7796,7 +7796,7 @@ verify_checkpoint_sm_entry_cb(space_map_entry_t *sme, void *arg)
* their respective ms_allocateable trees should not contain them.
*/
mutex_enter(&ms->ms_lock);
range_tree_verify_not_present(ms->ms_allocatable,
zfs_range_tree_verify_not_present(ms->ms_allocatable,
sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);

Expand Down Expand Up @@ -7947,8 +7947,8 @@ verify_checkpoint_ms_spacemaps(spa_t *checkpoint, spa_t *current)
* This way we ensure that none of the blocks that
* are part of the checkpoint were freed by mistake.
*/
range_tree_walk(ckpoint_msp->ms_allocatable,
(range_tree_func_t *)range_tree_verify_not_present,
zfs_range_tree_walk(ckpoint_msp->ms_allocatable,
(zfs_range_tree_func_t *)zfs_range_tree_verify_not_present,
current_msp->ms_allocatable);
}
}
Expand Down Expand Up @@ -8088,7 +8088,7 @@ static void
mos_obj_refd(uint64_t obj)
{
if (obj != 0 && mos_refd_objs != NULL)
range_tree_add(mos_refd_objs, obj, 1);
zfs_range_tree_add(mos_refd_objs, obj, 1);
}

/*
Expand All @@ -8098,8 +8098,8 @@ static void
mos_obj_refd_multiple(uint64_t obj)
{
if (obj != 0 && mos_refd_objs != NULL &&
!range_tree_contains(mos_refd_objs, obj, 1))
range_tree_add(mos_refd_objs, obj, 1);
!zfs_range_tree_contains(mos_refd_objs, obj, 1))
zfs_range_tree_add(mos_refd_objs, obj, 1);
}

static void
Expand Down Expand Up @@ -8296,8 +8296,8 @@ dump_mos_leaks(spa_t *spa)
*/
uint64_t object = 0;
while (dmu_object_next(mos, &object, B_FALSE, 0) == 0) {
if (range_tree_contains(mos_refd_objs, object, 1)) {
range_tree_remove(mos_refd_objs, object, 1);
if (zfs_range_tree_contains(mos_refd_objs, object, 1)) {
zfs_range_tree_remove(mos_refd_objs, object, 1);
} else {
dmu_object_info_t doi;
const char *name;
Expand All @@ -8315,11 +8315,11 @@ dump_mos_leaks(spa_t *spa)
rv = 2;
}
}
(void) range_tree_walk(mos_refd_objs, mos_leaks_cb, NULL);
if (!range_tree_is_empty(mos_refd_objs))
(void) zfs_range_tree_walk(mos_refd_objs, mos_leaks_cb, NULL);
if (!zfs_range_tree_is_empty(mos_refd_objs))
rv = 2;
range_tree_vacate(mos_refd_objs, NULL, NULL);
range_tree_destroy(mos_refd_objs);
zfs_range_tree_vacate(mos_refd_objs, NULL, NULL);
zfs_range_tree_destroy(mos_refd_objs);
return (rv);
}

Expand Down Expand Up @@ -8441,7 +8441,7 @@ dump_zpool(spa_t *spa)

if (dump_opt['d'] || dump_opt['i']) {
spa_feature_t f;
mos_refd_objs = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
mos_refd_objs = zfs_range_tree_create(NULL, RANGE_SEG64, NULL, 0,
0);
dump_objset(dp->dp_meta_objset);

Expand Down
2 changes: 1 addition & 1 deletion include/sys/metaslab.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ void metaslab_set_selected_txg(metaslab_t *, uint64_t);

extern int metaslab_debug_load;

range_seg_type_t metaslab_calculate_range_tree_type(vdev_t *vdev,
range_seg_type_t metaslab_calculate_zfs_range_tree_type(vdev_t *vdev,
metaslab_t *msp, uint64_t *start, uint64_t *shift);

#ifdef __cplusplus
Expand Down
18 changes: 9 additions & 9 deletions include/sys/metaslab_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -398,8 +398,8 @@ struct metaslab {
uint64_t ms_size;
uint64_t ms_fragmentation;

range_tree_t *ms_allocating[TXG_SIZE];
range_tree_t *ms_allocatable;
zfs_range_tree_t *ms_allocating[TXG_SIZE];
zfs_range_tree_t *ms_allocatable;
uint64_t ms_allocated_this_txg;
uint64_t ms_allocating_total;

Expand All @@ -408,10 +408,10 @@ struct metaslab {
* ms_free*tree only have entries while syncing, and are empty
* between syncs.
*/
range_tree_t *ms_freeing; /* to free this syncing txg */
range_tree_t *ms_freed; /* already freed this syncing txg */
range_tree_t *ms_defer[TXG_DEFER_SIZE];
range_tree_t *ms_checkpointing; /* to add to the checkpoint */
zfs_range_tree_t *ms_freeing; /* to free this syncing txg */
zfs_range_tree_t *ms_freed; /* already freed this syncing txg */

Check failure on line 412 in include/sys/metaslab_impl.h

View workflow job for this annotation

GitHub Actions / checkstyle

line > 80 characters
zfs_range_tree_t *ms_defer[TXG_DEFER_SIZE];
zfs_range_tree_t *ms_checkpointing; /* to add to the checkpoint */

Check failure on line 414 in include/sys/metaslab_impl.h

View workflow job for this annotation

GitHub Actions / checkstyle

line > 80 characters

/*
* The ms_trim tree is the set of allocatable segments which are
Expand All @@ -421,7 +421,7 @@ struct metaslab {
* is unloaded. Its purpose is to aggregate freed ranges to
* facilitate efficient trimming.
*/
range_tree_t *ms_trim;
zfs_range_tree_t *ms_trim;

boolean_t ms_condensing; /* condensing? */
boolean_t ms_condense_wanted;
Expand Down Expand Up @@ -542,8 +542,8 @@ struct metaslab {
* Allocs and frees that are committed to the vdev log spacemap but
* not yet to this metaslab's spacemap.
*/
range_tree_t *ms_unflushed_allocs;
range_tree_t *ms_unflushed_frees;
zfs_range_tree_t *ms_unflushed_allocs;
zfs_range_tree_t *ms_unflushed_frees;

/*
* We have flushed entries up to but not including this TXG. In
Expand Down
Loading

0 comments on commit 95b8396

Please sign in to comment.