Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Metabase migration for SearchV2 #3125

Merged
merged 1 commit into from
Feb 18, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions pkg/local_object_storage/metabase/VERSION.md
Original file line number Diff line number Diff line change
Expand Up @@ -99,9 +99,21 @@ The lowest not used bucket index: 20.
- Name: `19` + container ID
- Key: first object ID
- Value: objects for corresponding split chain
- Metadata bucket
- Name: `255` + container ID
- Keys without values
- `0` + object ID
- `1` + attribute + `0xFF` + `0|1` + fixed256(value) + object ID: integer attributes. \
Sign byte is 0 for negatives, 1 otherwise. Bits are inverted for negatives also.
- `2` + attribute + `0xFF` + value + object ID: plain non-integer attributes
- `3` + object ID + attribute + `0xFF` + value

# History

## Version 3

Last version without metadata bucket introduced with `ObjectService.SearchV2` API.

## Version 2

- Container ID is encoded as 32-byte slice
Expand Down
21 changes: 21 additions & 0 deletions pkg/local_object_storage/metabase/metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,27 @@
return fmt.Errorf("invalid meta bucket key (prefix 0x%X): %w", key[0], cause)
}

func putMetadataForObject(tx *bbolt.Tx, hdr object.Object, root, phy bool) error {
owner := hdr.Owner()
if owner.IsZero() {
return fmt.Errorf("invalid owner: %w", user.ErrZeroID)
}

Check warning on line 55 in pkg/local_object_storage/metabase/metadata.go

View check run for this annotation

Codecov / codecov/patch

pkg/local_object_storage/metabase/metadata.go#L54-L55

Added lines #L54 - L55 were not covered by tests
pldHash, ok := hdr.PayloadChecksum()
if !ok {
return errors.New("missing payload checksum")
}

Check warning on line 59 in pkg/local_object_storage/metabase/metadata.go

View check run for this annotation

Codecov / codecov/patch

pkg/local_object_storage/metabase/metadata.go#L58-L59

Added lines #L58 - L59 were not covered by tests
var ver version.Version
if v := hdr.Version(); v != nil {
ver = *v
}
var pldHmmHash []byte
if h, ok := hdr.PayloadHomomorphicHash(); ok {
pldHmmHash = h.Value()
}
return putMetadata(tx, hdr.GetContainerID(), hdr.GetID(), ver, owner, hdr.Type(), hdr.CreationEpoch(), hdr.PayloadSize(), pldHash.Value(),
pldHmmHash, hdr.SplitID().ToV2(), hdr.GetParentID(), hdr.GetFirstID(), hdr.Attributes(), root, phy)
}

// TODO: fill on migration.
func putMetadata(tx *bbolt.Tx, cnr cid.ID, id oid.ID, ver version.Version, owner user.ID, typ object.Type, creationEpoch uint64,
payloadLen uint64, pldHash, pldHmmHash, splitID []byte, parentID, firstID oid.ID, attrs []object.Attribute,
Expand Down
54 changes: 53 additions & 1 deletion pkg/local_object_storage/metabase/version.go
Original file line number Diff line number Diff line change
@@ -1,17 +1,21 @@
package meta

import (
"bytes"
"encoding/binary"
"errors"
"fmt"

objectconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/object"
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/util/logicerr"
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
"github.com/nspcc-dev/neofs-sdk-go/object"
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
"go.etcd.io/bbolt"
)

// currentMetaVersion contains current metabase version.
const currentMetaVersion = 3
const currentMetaVersion = 4

var versionKey = []byte("version")

Expand Down Expand Up @@ -74,6 +78,7 @@

var migrateFrom = map[uint64]func(*DB, *bbolt.Tx) error{
2: migrateFrom2Version,
3: migrateFrom3Version,
}

func migrateFrom2Version(db *DB, tx *bbolt.Tx) error {
Expand Down Expand Up @@ -105,3 +110,50 @@

return updateVersion(tx, 3)
}

func migrateFrom3Version(_ *DB, tx *bbolt.Tx) error {
c := tx.Cursor()
pref := []byte{metadataPrefix}
if k, _ := c.Seek(pref); bytes.HasPrefix(k, pref) {
return fmt.Errorf("key with prefix 0x%X detected, metadata space is occupied by unexpected data or the version has not been updated to #%d", pref, currentMetaVersion)
}

Check warning on line 119 in pkg/local_object_storage/metabase/version.go

View check run for this annotation

Codecov / codecov/patch

pkg/local_object_storage/metabase/version.go#L118-L119

Added lines #L118 - L119 were not covered by tests
err := tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
switch name[0] {
default:
return nil
case primaryPrefix, tombstonePrefix, storageGroupPrefix, lockersPrefix, linkObjectsPrefix:
}
if len(name[1:]) != cid.Size {
return fmt.Errorf("invalid container bucket with prefix 0x%X: wrong CID len %d", name[0], len(name[1:]))
}

Check warning on line 128 in pkg/local_object_storage/metabase/version.go

View check run for this annotation

Codecov / codecov/patch

pkg/local_object_storage/metabase/version.go#L127-L128

Added lines #L127 - L128 were not covered by tests
cnr := cid.ID(name[1:])
err := b.ForEach(func(k, v []byte) error {
if len(k) != oid.Size {
return fmt.Errorf("wrong OID key len %d", len(k))
}

Check warning on line 133 in pkg/local_object_storage/metabase/version.go

View check run for this annotation

Codecov / codecov/patch

pkg/local_object_storage/metabase/version.go#L132-L133

Added lines #L132 - L133 were not covered by tests
id := oid.ID(k)
var hdr object.Object
if err := hdr.Unmarshal(v); err != nil {
return fmt.Errorf("decode header of object %s from bucket value: %w", id, err)
}

Check warning on line 138 in pkg/local_object_storage/metabase/version.go

View check run for this annotation

Codecov / codecov/patch

pkg/local_object_storage/metabase/version.go#L137-L138

Added lines #L137 - L138 were not covered by tests
par := hdr.Parent()
if err := putMetadataForObject(tx, hdr, par == nil, true); err != nil {
return fmt.Errorf("put metadata for object %s: %w", id, err)
}

Check warning on line 142 in pkg/local_object_storage/metabase/version.go

View check run for this annotation

Codecov / codecov/patch

pkg/local_object_storage/metabase/version.go#L141-L142

Added lines #L141 - L142 were not covered by tests
if par != nil {
if err := putMetadataForObject(tx, *par, true, false); err != nil {
return fmt.Errorf("put metadata for parent of object %s: %w", id, err)
}

Check warning on line 146 in pkg/local_object_storage/metabase/version.go

View check run for this annotation

Codecov / codecov/patch

pkg/local_object_storage/metabase/version.go#L145-L146

Added lines #L145 - L146 were not covered by tests
}
return nil
})
if err != nil {
return fmt.Errorf("process container 0x%X%s bucket: %w", name[0], cnr, err)
}

Check warning on line 152 in pkg/local_object_storage/metabase/version.go

View check run for this annotation

Codecov / codecov/patch

pkg/local_object_storage/metabase/version.go#L151-L152

Added lines #L151 - L152 were not covered by tests
return nil
})
if err != nil {
return err
}

Check warning on line 157 in pkg/local_object_storage/metabase/version.go

View check run for this annotation

Codecov / codecov/patch

pkg/local_object_storage/metabase/version.go#L156-L157

Added lines #L156 - L157 were not covered by tests
return updateVersion(tx, 4)
}
237 changes: 237 additions & 0 deletions pkg/local_object_storage/metabase/version_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,19 +3,31 @@ package meta
import (
"bytes"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"math/rand"
"os"
"path"
"path/filepath"
"slices"
"strconv"
"testing"

objectconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/object"
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard/mode"
"github.com/nspcc-dev/neofs-sdk-go/checksum"
"github.com/nspcc-dev/neofs-sdk-go/client"
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test"
"github.com/nspcc-dev/neofs-sdk-go/object"
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test"
objecttest "github.com/nspcc-dev/neofs-sdk-go/object/test"
usertest "github.com/nspcc-dev/neofs-sdk-go/user/test"
"github.com/nspcc-dev/neofs-sdk-go/version"
"github.com/nspcc-dev/tzhash/tz"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
)
Expand Down Expand Up @@ -346,3 +358,228 @@ func TestMigrate2to3(t *testing.T) {
})
require.NoError(t, err)
}

func TestMigrate3to4(t *testing.T) {
db := newDB(t)

typs := []object.Type{object.TypeRegular, object.TypeTombstone, object.TypeStorageGroup, object.TypeLock, object.TypeLink}
objs := make([]object.Object, len(typs))
var css, hcss [][]byte
for i := range objs {
objs[i].SetContainerID(cidtest.ID())
id := oidtest.ID()
objs[i].SetID(id)
ver := version.New(uint32(100*i), uint32(100*i+1))
objs[i].SetVersion(&ver)
objs[i].SetOwner(usertest.ID())
objs[i].SetType(typs[i])
objs[i].SetCreationEpoch(rand.Uint64())
objs[i].SetPayloadSize(rand.Uint64())
objs[i].SetPayloadChecksum(checksum.NewSHA256(id))
css = append(css, id[:])
var tzh [tz.Size]byte
rand.Read(tzh[:]) //nolint:staticcheck
objs[i].SetPayloadHomomorphicHash(checksum.NewTillichZemor(tzh))
hcss = append(hcss, tzh[:])
sid := objecttest.SplitID()
objs[i].SetSplitID(&sid)
objs[i].SetParentID(oidtest.ID())
objs[i].SetFirstID(oidtest.ID())
objs[i].SetAttributes(*object.NewAttribute("Index", strconv.Itoa(i)))
}

var par object.Object
par.SetContainerID(objs[0].GetContainerID())
par.SetID(oidtest.ID())
ver := version.New(1000, 1001)
par.SetVersion(&ver)
par.SetOwner(usertest.ID())
par.SetType(typs[0])
par.SetCreationEpoch(rand.Uint64())
par.SetPayloadSize(rand.Uint64())
pcs := oidtest.ID()
par.SetPayloadChecksum(checksum.NewSHA256(pcs))
var phcs [tz.Size]byte
rand.Read(phcs[:]) //nolint:staticcheck
par.SetPayloadHomomorphicHash(checksum.NewTillichZemor(phcs))
sid := objecttest.SplitID()
par.SetSplitID(&sid)
par.SetParentID(oidtest.ID())
par.SetFirstID(oidtest.ID())
par.SetAttributes(*object.NewAttribute("Index", "9999"))

objs[0].SetParent(&par)

for _, item := range []struct {
pref byte
hdr *object.Object
}{
{pref: 0x06, hdr: &objs[0]},
{pref: 0x06, hdr: &par},
{pref: 0x09, hdr: &objs[1]},
{pref: 0x08, hdr: &objs[2]},
{pref: 0x07, hdr: &objs[3]},
{pref: 0x12, hdr: &objs[4]},
} {
err := db.boltDB.Update(func(tx *bbolt.Tx) error {
cnr := item.hdr.GetContainerID()
bkt, err := tx.CreateBucketIfNotExists(slices.Concat([]byte{item.pref}, cnr[:]))
require.NoError(t, err)
id := item.hdr.GetID()
return bkt.Put(id[:], item.hdr.Marshal())
})
require.NoError(t, err)
}

// force old version
err := db.boltDB.Update(func(tx *bbolt.Tx) error {
if err := tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
if name[0] == 0xFF {
return tx.DeleteBucket(name)
}
return nil
}); err != nil {
return err
}

bkt := tx.Bucket([]byte{0x05})
require.NotNil(t, bkt)
return bkt.Put([]byte("version"), []byte{0x03, 0, 0, 0, 0, 0, 0, 0})
})
require.NoError(t, err)
// migrate
require.NoError(t, db.Init())
// check
err = db.boltDB.View(func(tx *bbolt.Tx) error {
bkt := tx.Bucket([]byte{0x05})
require.NotNil(t, bkt)
require.Equal(t, []byte{0x04, 0, 0, 0, 0, 0, 0, 0}, bkt.Get([]byte("version")))
return nil
})
require.NoError(t, err)

res, _, err := db.Search(objs[0].GetContainerID(), nil, nil, nil, nil, 1000)
require.NoError(t, err)
require.Len(t, res, 2)
require.True(t, slices.ContainsFunc(res, func(r client.SearchResultItem) bool { return r.ID == objs[0].GetID() }))
require.True(t, slices.ContainsFunc(res, func(r client.SearchResultItem) bool { return r.ID == par.GetID() }))

for i := range objs[1:] {
res, _, err := db.Search(objs[1+i].GetContainerID(), nil, nil, nil, nil, 1000)
require.NoError(t, err, i)
require.Len(t, res, 1, i)
require.Equal(t, objs[1+i].GetID(), res[0].ID, i)
}

for _, tc := range []struct {
attr string
val string
cnr cid.ID
exp oid.ID
par bool
}{
{attr: "$Object:version", val: "v0.1", cnr: objs[0].GetContainerID(), exp: objs[0].GetID()},
{attr: "$Object:version", val: "v100.101", cnr: objs[1].GetContainerID(), exp: objs[1].GetID()},
{attr: "$Object:version", val: "v200.201", cnr: objs[2].GetContainerID(), exp: objs[2].GetID()},
{attr: "$Object:version", val: "v300.301", cnr: objs[3].GetContainerID(), exp: objs[3].GetID()},
{attr: "$Object:version", val: "v400.401", cnr: objs[4].GetContainerID(), exp: objs[4].GetID()},
{attr: "$Object:version", val: "v1000.1001", cnr: par.GetContainerID(), exp: par.GetID()},
{attr: "$Object:ownerID", val: objs[0].Owner().String(), cnr: objs[0].GetContainerID(), exp: objs[0].GetID()},
{attr: "$Object:ownerID", val: objs[1].Owner().String(), cnr: objs[1].GetContainerID(), exp: objs[1].GetID()},
{attr: "$Object:ownerID", val: objs[2].Owner().String(), cnr: objs[2].GetContainerID(), exp: objs[2].GetID()},
{attr: "$Object:ownerID", val: objs[3].Owner().String(), cnr: objs[3].GetContainerID(), exp: objs[3].GetID()},
{attr: "$Object:ownerID", val: objs[4].Owner().String(), cnr: objs[4].GetContainerID(), exp: objs[4].GetID()},
{attr: "$Object:ownerID", val: par.Owner().String(), cnr: par.GetContainerID(), exp: par.GetID()},
{attr: "$Object:objectType", val: "REGULAR", cnr: objs[0].GetContainerID(), par: true},
{attr: "$Object:objectType", val: "TOMBSTONE", cnr: objs[1].GetContainerID(), exp: objs[1].GetID()},
{attr: "$Object:objectType", val: "STORAGE_GROUP", cnr: objs[2].GetContainerID(), exp: objs[2].GetID()},
{attr: "$Object:objectType", val: "LOCK", cnr: objs[3].GetContainerID(), exp: objs[3].GetID()},
{attr: "$Object:objectType", val: "LINK", cnr: objs[4].GetContainerID(), exp: objs[4].GetID()},
{attr: "$Object:creationEpoch", val: strconv.FormatUint(objs[0].CreationEpoch(), 10), cnr: objs[0].GetContainerID(), exp: objs[0].GetID()},
{attr: "$Object:creationEpoch", val: strconv.FormatUint(objs[1].CreationEpoch(), 10), cnr: objs[1].GetContainerID(), exp: objs[1].GetID()},
{attr: "$Object:creationEpoch", val: strconv.FormatUint(objs[2].CreationEpoch(), 10), cnr: objs[2].GetContainerID(), exp: objs[2].GetID()},
{attr: "$Object:creationEpoch", val: strconv.FormatUint(objs[3].CreationEpoch(), 10), cnr: objs[3].GetContainerID(), exp: objs[3].GetID()},
{attr: "$Object:creationEpoch", val: strconv.FormatUint(objs[4].CreationEpoch(), 10), cnr: objs[4].GetContainerID(), exp: objs[4].GetID()},
{attr: "$Object:creationEpoch", val: strconv.FormatUint(par.CreationEpoch(), 10), cnr: par.GetContainerID(), exp: par.GetID()},
{attr: "$Object:payloadLength", val: strconv.FormatUint(objs[0].PayloadSize(), 10), cnr: objs[0].GetContainerID(), exp: objs[0].GetID()},
{attr: "$Object:payloadLength", val: strconv.FormatUint(objs[1].PayloadSize(), 10), cnr: objs[1].GetContainerID(), exp: objs[1].GetID()},
{attr: "$Object:payloadLength", val: strconv.FormatUint(objs[2].PayloadSize(), 10), cnr: objs[2].GetContainerID(), exp: objs[2].GetID()},
{attr: "$Object:payloadLength", val: strconv.FormatUint(objs[3].PayloadSize(), 10), cnr: objs[3].GetContainerID(), exp: objs[3].GetID()},
{attr: "$Object:payloadLength", val: strconv.FormatUint(objs[4].PayloadSize(), 10), cnr: objs[4].GetContainerID(), exp: objs[4].GetID()},
{attr: "$Object:payloadLength", val: strconv.FormatUint(par.PayloadSize(), 10), cnr: par.GetContainerID(), exp: par.GetID()},
{attr: "$Object:payloadHash", val: hex.EncodeToString(css[0]), cnr: objs[0].GetContainerID(), exp: objs[0].GetID()},
{attr: "$Object:payloadHash", val: hex.EncodeToString(css[1]), cnr: objs[1].GetContainerID(), exp: objs[1].GetID()},
{attr: "$Object:payloadHash", val: hex.EncodeToString(css[2]), cnr: objs[2].GetContainerID(), exp: objs[2].GetID()},
{attr: "$Object:payloadHash", val: hex.EncodeToString(css[3]), cnr: objs[3].GetContainerID(), exp: objs[3].GetID()},
{attr: "$Object:payloadHash", val: hex.EncodeToString(css[4]), cnr: objs[4].GetContainerID(), exp: objs[4].GetID()},
{attr: "$Object:payloadHash", val: hex.EncodeToString(pcs[:]), cnr: par.GetContainerID(), exp: par.GetID()},
{attr: "$Object:homomorphicHash", val: hex.EncodeToString(hcss[0]), cnr: objs[0].GetContainerID(), exp: objs[0].GetID()},
{attr: "$Object:homomorphicHash", val: hex.EncodeToString(hcss[1]), cnr: objs[1].GetContainerID(), exp: objs[1].GetID()},
{attr: "$Object:homomorphicHash", val: hex.EncodeToString(hcss[2]), cnr: objs[2].GetContainerID(), exp: objs[2].GetID()},
{attr: "$Object:homomorphicHash", val: hex.EncodeToString(hcss[3]), cnr: objs[3].GetContainerID(), exp: objs[3].GetID()},
{attr: "$Object:homomorphicHash", val: hex.EncodeToString(hcss[4]), cnr: objs[4].GetContainerID(), exp: objs[4].GetID()},
{attr: "$Object:homomorphicHash", val: hex.EncodeToString(phcs[:]), cnr: par.GetContainerID(), exp: par.GetID()},
{attr: "$Object:split.splitID", val: objs[0].SplitID().String(), cnr: objs[0].GetContainerID(), exp: objs[0].GetID()},
{attr: "$Object:split.splitID", val: objs[1].SplitID().String(), cnr: objs[1].GetContainerID(), exp: objs[1].GetID()},
{attr: "$Object:split.splitID", val: objs[2].SplitID().String(), cnr: objs[2].GetContainerID(), exp: objs[2].GetID()},
{attr: "$Object:split.splitID", val: objs[3].SplitID().String(), cnr: objs[3].GetContainerID(), exp: objs[3].GetID()},
{attr: "$Object:split.splitID", val: objs[4].SplitID().String(), cnr: objs[4].GetContainerID(), exp: objs[4].GetID()},
{attr: "$Object:split.splitID", val: par.SplitID().String(), cnr: par.GetContainerID(), exp: par.GetID()},
{attr: "$Object:split.parent", val: objs[0].GetParentID().String(), cnr: objs[0].GetContainerID(), exp: objs[0].GetID()},
{attr: "$Object:split.parent", val: objs[1].GetParentID().String(), cnr: objs[1].GetContainerID(), exp: objs[1].GetID()},
{attr: "$Object:split.parent", val: objs[2].GetParentID().String(), cnr: objs[2].GetContainerID(), exp: objs[2].GetID()},
{attr: "$Object:split.parent", val: objs[3].GetParentID().String(), cnr: objs[3].GetContainerID(), exp: objs[3].GetID()},
{attr: "$Object:split.parent", val: objs[4].GetParentID().String(), cnr: objs[4].GetContainerID(), exp: objs[4].GetID()},
{attr: "$Object:split.parent", val: par.GetParentID().String(), cnr: par.GetContainerID(), exp: par.GetID()},
{attr: "$Object:split.first", val: objs[0].GetFirstID().String(), cnr: objs[0].GetContainerID(), exp: objs[0].GetID()},
{attr: "$Object:split.first", val: objs[1].GetFirstID().String(), cnr: objs[1].GetContainerID(), exp: objs[1].GetID()},
{attr: "$Object:split.first", val: objs[2].GetFirstID().String(), cnr: objs[2].GetContainerID(), exp: objs[2].GetID()},
{attr: "$Object:split.first", val: objs[3].GetFirstID().String(), cnr: objs[3].GetContainerID(), exp: objs[3].GetID()},
{attr: "$Object:split.first", val: objs[4].GetFirstID().String(), cnr: objs[4].GetContainerID(), exp: objs[4].GetID()},
{attr: "$Object:split.first", val: par.GetFirstID().String(), cnr: par.GetContainerID(), exp: par.GetID()},
{attr: "Index", val: "0", cnr: objs[0].GetContainerID(), exp: objs[0].GetID()},
{attr: "Index", val: "1", cnr: objs[1].GetContainerID(), exp: objs[1].GetID()},
{attr: "Index", val: "2", cnr: objs[2].GetContainerID(), exp: objs[2].GetID()},
{attr: "Index", val: "3", cnr: objs[3].GetContainerID(), exp: objs[3].GetID()},
{attr: "Index", val: "4", cnr: objs[4].GetContainerID(), exp: objs[4].GetID()},
{attr: "Index", val: "9999", cnr: par.GetContainerID(), exp: par.GetID()},
} {
var fs object.SearchFilters
fs.AddFilter(tc.attr, tc.val, object.MatchStringEqual)
res, _, err := db.Search(tc.cnr, fs, nil, nil, nil, 1000)
require.NoError(t, err, tc)
if !tc.par {
require.Len(t, res, 1, tc)
require.Equal(t, tc.exp, res[0].ID, tc)
} else {
require.Len(t, res, 2, tc)
require.True(t, slices.ContainsFunc(res, func(r client.SearchResultItem) bool { return r.ID == objs[0].GetID() }))
require.True(t, slices.ContainsFunc(res, func(r client.SearchResultItem) bool { return r.ID == par.GetID() }))
}
}

for i := range objs {
var fs object.SearchFilters
fs.AddRootFilter()
res, _, err = db.Search(objs[i].GetContainerID(), fs, nil, nil, nil, 1000)
require.NoError(t, err, i)
require.Len(t, res, 1, i)
if i == 0 {
require.Equal(t, par.GetID(), res[0].ID)
} else {
require.Equal(t, objs[i].GetID(), res[0].ID, i)
}
fs = fs[:0]
fs.AddPhyFilter()
res, _, err = db.Search(objs[i].GetContainerID(), fs, nil, nil, nil, 1000)
require.NoError(t, err, i)
if i == 0 {
require.Len(t, res, 2)
require.True(t, slices.ContainsFunc(res, func(r client.SearchResultItem) bool { return r.ID == objs[0].GetID() }))
require.True(t, slices.ContainsFunc(res, func(r client.SearchResultItem) bool { return r.ID == par.GetID() }))
} else {
require.Len(t, res, 1)
require.Equal(t, objs[i].GetID(), res[0].ID, i)
}
}
}
Loading