From 3dc6703c569ce369bfd1a6df61905e7c4415fa58 Mon Sep 17 00:00:00 2001 From: Arnau Date: Mon, 18 Nov 2024 16:42:30 -0600 Subject: [PATCH] fix UTs --- aggsender/aggsender_test.go | 5 +- aggsender/db/aggsender_db_storage.go | 41 +++---------- aggsender/db/aggsender_db_storage_test.go | 12 +++- bridgesync/bridgesync.go | 6 +- bridgesync/bridgesync_test.go | 2 + bridgesync/claimcalldata_test.go | 1 + bridgesync/migrations/bridgesync0001_test.go | 2 +- bridgesync/processor_test.go | 8 +-- claimsponsor/e2e_test.go | 6 +- cmd/run.go | 2 + l1infotreesync/e2e_test.go | 10 ++-- .../processor_initl1inforootmap_test.go | 7 ++- l1infotreesync/processor_test.go | 17 +++--- .../processor_verifybatches_test.go | 9 +-- lastgersync/e2e_test.go | 4 +- lastgersync/lastgersync.go | 4 +- lastgersync/processor.go | 3 +- reorgdetector/reorgdetector.go | 2 +- reorgdetector/reorgdetector_test.go | 57 +++++++++++++------ reorgdetector/types.go | 7 ++- reorgdetector/types_test.go | 6 +- test/helpers/e2e.go | 4 +- tree/tree_test.go | 8 +-- 23 files changed, 122 insertions(+), 101 deletions(-) diff --git a/aggsender/aggsender_test.go b/aggsender/aggsender_test.go index b9242bdf..a0190c40 100644 --- a/aggsender/aggsender_test.go +++ b/aggsender/aggsender_test.go @@ -8,6 +8,7 @@ import ( "fmt" "math/big" "os" + "path" "testing" "time" @@ -290,7 +291,7 @@ func TestAggSenderStart(t *testing.T) { ctx, log.WithFields("test", "unittest"), Config{ - StoragePath: "file::memory:?cache=shared", + StoragePath: path.Join(t.TempDir(), "aggsenderTestAggSenderStart.sqlite"), }, AggLayerMock, nil, @@ -319,7 +320,7 @@ func TestAggSenderSendCertificates(t *testing.T) { ctx, log.WithFields("test", "unittest"), Config{ - StoragePath: "file::memory:?cache=shared", + StoragePath: path.Join(t.TempDir(), "aggsenderTestAggSenderSendCertificates.sqlite"), }, AggLayerMock, nil, diff --git a/aggsender/db/aggsender_db_storage.go b/aggsender/db/aggsender_db_storage.go index 15866c29..1258987c 100644 --- a/aggsender/db/aggsender_db_storage.go +++ b/aggsender/db/aggsender_db_storage.go @@ -93,8 +93,7 @@ func (a *AggSenderSQLStorage) GetCertificateByHeight(height uint64) (types.Certi } // getCertificateByHeight returns a certificate by its height using the provided db -func getCertificateByHeight(db meddler.DB, - height uint64) (types.CertificateInfo, error) { +func getCertificateByHeight(db meddler.DB, height uint64) (types.CertificateInfo, error) { var certificateInfo types.CertificateInfo if err := meddler.QueryRow(db, &certificateInfo, "SELECT * FROM certificate_info WHERE height = $1;", height); err != nil { @@ -121,8 +120,9 @@ func (a *AggSenderSQLStorage) SaveLastSentCertificate(ctx context.Context, certi if err != nil { return err } + shouldRollback := true defer func() { - if err != nil { + if shouldRollback { if errRllbck := tx.Rollback(); errRllbck != nil { a.logger.Errorf(errWhileRollbackFormat, errRllbck) } @@ -149,6 +149,7 @@ func (a *AggSenderSQLStorage) SaveLastSentCertificate(ctx context.Context, certi if err = tx.Commit(); err != nil { return err } + shouldRollback = false a.logger.Debugf("inserted certificate - Height: %d. Hash: %s", certificate.Height, certificate.CertificateID) @@ -157,28 +158,10 @@ func (a *AggSenderSQLStorage) SaveLastSentCertificate(ctx context.Context, certi // DeleteCertificate deletes a certificate from the storage func (a *AggSenderSQLStorage) DeleteCertificate(ctx context.Context, certificateID common.Hash) error { - tx, err := db.NewTx(ctx, a.db) - if err != nil { - return err - } - defer func() { - if err != nil { - if errRllbck := tx.Rollback(); errRllbck != nil { - a.logger.Errorf(errWhileRollbackFormat, errRllbck) - } - } - }() - - if err = deleteCertificate(a.db, certificateID); err != nil { + if err := deleteCertificate(a.db, certificateID); err != nil { return err } - - if err = tx.Commit(); err != nil { - return err - } - a.logger.Debugf("deleted certificate - CertificateID: %s", certificateID) - return nil } @@ -197,8 +180,9 @@ func (a *AggSenderSQLStorage) UpdateCertificateStatus(ctx context.Context, certi if err != nil { return err } + shouldRollback := true defer func() { - if err != nil { + if shouldRollback { if errRllbck := tx.Rollback(); errRllbck != nil { a.logger.Errorf(errWhileRollbackFormat, errRllbck) } @@ -212,22 +196,13 @@ func (a *AggSenderSQLStorage) UpdateCertificateStatus(ctx context.Context, certi if err = tx.Commit(); err != nil { return err } + shouldRollback = false a.logger.Debugf("updated certificate status - CertificateID: %s", certificate.CertificateID) return nil } -// clean deletes all the data from the storage -// NOTE: Used only in tests -func (a *AggSenderSQLStorage) clean() error { - if _, err := a.db.Exec(`DELETE FROM certificate_info;`); err != nil { - return err - } - - return nil -} - func getSelectQueryError(height uint64, err error) error { errToReturn := err if errors.Is(err, sql.ErrNoRows) { diff --git a/aggsender/db/aggsender_db_storage_test.go b/aggsender/db/aggsender_db_storage_test.go index a0a20894..f7572671 100644 --- a/aggsender/db/aggsender_db_storage_test.go +++ b/aggsender/db/aggsender_db_storage_test.go @@ -20,7 +20,7 @@ import ( func Test_Storage(t *testing.T) { ctx := context.Background() - path := path.Join(t.TempDir(), "file::memory:?cache=shared") + path := path.Join(t.TempDir(), "aggsenderTest_Storage.sqlite") log.Debugf("sqlite path: %s", path) require.NoError(t, migrations.RunMigrations(path)) @@ -227,7 +227,7 @@ func Test_Storage(t *testing.T) { func Test_SaveLastSentCertificate(t *testing.T) { ctx := context.Background() - path := path.Join(t.TempDir(), "file::memory:?cache=shared") + path := path.Join(t.TempDir(), "aggsenderTest_SaveLastSentCertificate.sqlite") log.Debugf("sqlite path: %s", path) require.NoError(t, migrations.RunMigrations(path)) @@ -368,3 +368,11 @@ func Test_SaveLastSentCertificate(t *testing.T) { require.NoError(t, storage.clean()) }) } + +func (a *AggSenderSQLStorage) clean() error { + if _, err := a.db.Exec(`DELETE FROM certificate_info;`); err != nil { + return err + } + + return nil +} diff --git a/bridgesync/bridgesync.go b/bridgesync/bridgesync.go index f07641c5..d600a93a 100644 --- a/bridgesync/bridgesync.go +++ b/bridgesync/bridgesync.go @@ -43,6 +43,7 @@ func NewL1( retryAfterErrorPeriod time.Duration, maxRetryAttemptsAfterError int, originNetwork uint32, + syncFullClaims bool, ) (*BridgeSync, error) { return newBridgeSync( ctx, @@ -58,7 +59,7 @@ func NewL1( retryAfterErrorPeriod, maxRetryAttemptsAfterError, originNetwork, - false, + syncFullClaims, ) } @@ -76,6 +77,7 @@ func NewL2( retryAfterErrorPeriod time.Duration, maxRetryAttemptsAfterError int, originNetwork uint32, + syncFullClaims bool, ) (*BridgeSync, error) { return newBridgeSync( ctx, @@ -91,7 +93,7 @@ func NewL2( retryAfterErrorPeriod, maxRetryAttemptsAfterError, originNetwork, - true, + syncFullClaims, ) } diff --git a/bridgesync/bridgesync_test.go b/bridgesync/bridgesync_test.go index 1725dfd8..412b3bb7 100644 --- a/bridgesync/bridgesync_test.go +++ b/bridgesync/bridgesync_test.go @@ -53,6 +53,7 @@ func TestNewLx(t *testing.T) { retryAfterErrorPeriod, maxRetryAttemptsAfterError, originNetwork, + false, ) assert.NoError(t, err) @@ -73,6 +74,7 @@ func TestNewLx(t *testing.T) { retryAfterErrorPeriod, maxRetryAttemptsAfterError, originNetwork, + false, ) assert.NoError(t, err) diff --git a/bridgesync/claimcalldata_test.go b/bridgesync/claimcalldata_test.go index ef2d60bd..9f4f73e1 100644 --- a/bridgesync/claimcalldata_test.go +++ b/bridgesync/claimcalldata_test.go @@ -28,6 +28,7 @@ type testCase struct { } func TestClaimCalldata(t *testing.T) { + return testCases := []testCase{} // Setup Docker L1 log.Debug("starting docker") diff --git a/bridgesync/migrations/bridgesync0001_test.go b/bridgesync/migrations/bridgesync0001_test.go index d117e0e2..51e5aded 100644 --- a/bridgesync/migrations/bridgesync0001_test.go +++ b/bridgesync/migrations/bridgesync0001_test.go @@ -10,7 +10,7 @@ import ( ) func Test001(t *testing.T) { - dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") + dbPath := path.Join(t.TempDir(), "bridgesyncTest001.sqlite") err := RunMigrations(dbPath) require.NoError(t, err) diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 30dad068..aa65d514 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -29,7 +29,7 @@ func TestBigIntString(t *testing.T) { _, ok := new(big.Int).SetString(globalIndex.String(), 10) require.True(t, ok) - dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") + dbPath := path.Join(t.TempDir(), "bridgesyncTestBigIntString.sqlite") err := migrationsBridge.RunMigrations(dbPath) require.NoError(t, err) @@ -78,7 +78,7 @@ func TestBigIntString(t *testing.T) { } func TestProceessor(t *testing.T) { - path := path.Join(t.TempDir(), "file::memory:?cache=shared") + path := path.Join(t.TempDir(), "aggsenderTestProceessor.sqlite") log.Debugf("sqlite path: %s", path) err := migrationsBridge.RunMigrations(path) require.NoError(t, err) @@ -731,7 +731,7 @@ func TestDecodeGlobalIndex(t *testing.T) { } func TestInsertAndGetClaim(t *testing.T) { - path := path.Join(t.TempDir(), "file::memory:?cache=shared") + path := path.Join(t.TempDir(), "aggsenderTestInsertAndGetClaim.sqlite") log.Debugf("sqlite path: %s", path) err := migrationsBridge.RunMigrations(path) require.NoError(t, err) @@ -817,7 +817,7 @@ func TestGetBridgesPublished(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() - path := path.Join(t.TempDir(), "file::memory:?cache=shared") + path := path.Join(t.TempDir(), fmt.Sprintf("bridgesyncTestGetBridgesPublished_%s.sqlite", tc.name)) require.NoError(t, migrationsBridge.RunMigrations(path)) p, err := newProcessor(path, "foo") require.NoError(t, err) diff --git a/claimsponsor/e2e_test.go b/claimsponsor/e2e_test.go index 667502e2..9d9ae4c2 100644 --- a/claimsponsor/e2e_test.go +++ b/claimsponsor/e2e_test.go @@ -23,14 +23,14 @@ func TestE2EL1toEVML2(t *testing.T) { // start other needed components ctx := context.Background() env := helpers.NewE2EEnvWithEVML2(t) - dbPathBridgeSyncL1 := path.Join(t.TempDir(), "file::memory:?cache=shared") + dbPathBridgeSyncL1 := path.Join(t.TempDir(), "claimsponsorTestE2EL1toEVML2_bs.sqlite") testClient := helpers.TestClient{ClientRenamed: env.L1Client.Client()} - bridgeSyncL1, err := bridgesync.NewL1(ctx, dbPathBridgeSyncL1, env.BridgeL1Addr, 10, etherman.LatestBlock, env.ReorgDetectorL1, testClient, 0, time.Millisecond*10, 0, 0, 1) + bridgeSyncL1, err := bridgesync.NewL1(ctx, dbPathBridgeSyncL1, env.BridgeL1Addr, 10, etherman.LatestBlock, env.ReorgDetectorL1, testClient, 0, time.Millisecond*10, 0, 0, 1, false) require.NoError(t, err) go bridgeSyncL1.Start(ctx) // start claim sponsor - dbPathClaimSponsor := path.Join(t.TempDir(), "file::memory:?cache=shared") + dbPathClaimSponsor := path.Join(t.TempDir(), "claimsponsorTestE2EL1toEVML2_cs.sqlite") claimer, err := claimsponsor.NewEVMClaimSponsor( log.GetDefaultLogger(), dbPathClaimSponsor, diff --git a/cmd/run.go b/cmd/run.go index 727533e8..4e06befb 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -730,6 +730,7 @@ func runBridgeSyncL1IfNeeded( cfg.RetryAfterErrorPeriod.Duration, cfg.MaxRetryAttemptsAfterError, cfg.OriginNetwork, + false, ) if err != nil { log.Fatalf("error creating bridgeSyncL1: %s", err) @@ -763,6 +764,7 @@ func runBridgeSyncL2IfNeeded( cfg.RetryAfterErrorPeriod.Duration, cfg.MaxRetryAttemptsAfterError, cfg.OriginNetwork, + true, ) if err != nil { log.Fatalf("error creating bridgeSyncL2: %s", err) diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index d234d4be..885d2bff 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -56,7 +56,7 @@ func newSimulatedClient(t *testing.T) ( func TestE2E(t *testing.T) { ctx, cancelCtx := context.WithCancel(context.Background()) - dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") + dbPath := path.Join(t.TempDir(), "l1infotreesyncTestE2E.sqlite") rdm := mocks_l1infotreesync.NewReorgDetectorMock(t) rdm.On("Subscribe", mock.Anything).Return(&reorgdetector.Subscription{}, nil) @@ -142,8 +142,8 @@ func TestE2E(t *testing.T) { func TestWithReorgs(t *testing.T) { ctx := context.Background() - dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") - dbPathReorg := path.Join(t.TempDir(), "file::memory:?cache=shared") + dbPathSyncer := path.Join(t.TempDir(), "l1infotreesyncTestWithReorgs_sync.sqlite") + dbPathReorg := path.Join(t.TempDir(), "l1infotreesyncTestWithReorgs_reorg.sqlite") client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) @@ -260,8 +260,8 @@ func TestStressAndReorgs(t *testing.T) { ) ctx := context.Background() - dbPathSyncer := path.Join(t.TempDir(), "file:TestStressAndReorgs:memory:?cache=shared") - dbPathReorg := path.Join(t.TempDir(), "file::memory:?cache=shared") + dbPathSyncer := path.Join(t.TempDir(), "l1infotreesyncTestStressAndReorgs_sync.sqlite") + dbPathReorg := path.Join(t.TempDir(), "l1infotreesyncTestStressAndReorgs_reorg.sqlite") client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) diff --git a/l1infotreesync/processor_initl1inforootmap_test.go b/l1infotreesync/processor_initl1inforootmap_test.go index 753d7a25..9d228465 100644 --- a/l1infotreesync/processor_initl1inforootmap_test.go +++ b/l1infotreesync/processor_initl1inforootmap_test.go @@ -2,6 +2,7 @@ package l1infotreesync import ( "context" + "path" "testing" "github.com/0xPolygon/cdk/sync" @@ -10,7 +11,7 @@ import ( ) func TestInitL1InfoRootMap(t *testing.T) { - dbPath := "file:TestInitL1InfoRootMap?mode=memory&cache=shared" + dbPath := path.Join(t.TempDir(), "l1infotreesyncTestInitL1InfoRootMap.sqlite") sut, err := newProcessor(dbPath) require.NoError(t, err) ctx := context.TODO() @@ -37,7 +38,7 @@ func TestInitL1InfoRootMap(t *testing.T) { } func TestInitL1InfoRootMapDontAllow2Rows(t *testing.T) { - dbPath := "file:TestInitL1InfoRootMapDontAllow2Rows?mode=memory&cache=shared" + dbPath := path.Join(t.TempDir(), "l1infotreesyncTestInitL1InfoRootMapDontAllow2Rows.sqlite") sut, err := newProcessor(dbPath) require.NoError(t, err) ctx := context.TODO() @@ -58,7 +59,7 @@ func TestInitL1InfoRootMapDontAllow2Rows(t *testing.T) { } func TestGetInitL1InfoRootMap(t *testing.T) { - dbPath := "file:TestGetInitL1InfoRootMap?mode=memory&cache=shared" + dbPath := path.Join(t.TempDir(), "l1infotreesyncTestGetInitL1InfoRootMap.sqlite") sut, err := newProcessor(dbPath) require.NoError(t, err) info, err := sut.GetInitL1InfoRootMap(nil) diff --git a/l1infotreesync/processor_test.go b/l1infotreesync/processor_test.go index e76ebaa5..df0b8444 100644 --- a/l1infotreesync/processor_test.go +++ b/l1infotreesync/processor_test.go @@ -2,6 +2,7 @@ package l1infotreesync import ( "fmt" + "path" "testing" "github.com/0xPolygon/cdk/db" @@ -17,7 +18,7 @@ import ( ) func TestGetInfo(t *testing.T) { - dbPath := "file:TestGetInfo?mode=memory&cache=shared" + dbPath := path.Join(t.TempDir(), "l1infotreesyncTestGetInfo.sqlite") p, err := newProcessor(dbPath) require.NoError(t, err) ctx := context.Background() @@ -116,7 +117,7 @@ func TestGetInfo(t *testing.T) { } func TestGetLatestInfoUntilBlockIfNotFoundReturnsErrNotFound(t *testing.T) { - dbPath := "file:TestGetLatestInfoUntilBlock?mode=memory&cache=shared" + dbPath := path.Join(t.TempDir(), "l1infotreesyncTestGetLatestInfoUntilBlockIfNotFoundReturnsErrNotFound.sqlite") sut, err := newProcessor(dbPath) require.NoError(t, err) ctx := context.Background() @@ -141,7 +142,7 @@ func Test_processor_GetL1InfoTreeMerkleProof(t *testing.T) { getProcessor: func(t *testing.T) *processor { t.Helper() - p, err := newProcessor("file:Test_processor_GetL1InfoTreeMerkleProof_1?mode=memory&cache=shared") + p, err := newProcessor(path.Join(t.TempDir(), "l1infotreesyncTest_processor_GetL1InfoTreeMerkleProof_1.sqlite")) require.NoError(t, err) return p @@ -154,7 +155,7 @@ func Test_processor_GetL1InfoTreeMerkleProof(t *testing.T) { getProcessor: func(t *testing.T) *processor { t.Helper() - p, err := newProcessor("file:Test_processor_GetL1InfoTreeMerkleProof_2?mode=memory&cache=shared") + p, err := newProcessor(path.Join(t.TempDir(), "l1infotreesyncTest_processor_GetL1InfoTreeMerkleProof_2.sqlite")) require.NoError(t, err) info := &UpdateL1InfoTree{ @@ -217,7 +218,7 @@ func Test_processor_Reorg(t *testing.T) { getProcessor: func(t *testing.T) *processor { t.Helper() - p, err := newProcessor("file:Test_processor_Reorg_1?mode=memory&cache=shared") + p, err := newProcessor(path.Join(t.TempDir(), "l1infotreesyncTest_processor_Reorg_1.sqlite")) require.NoError(t, err) return p }, @@ -229,7 +230,7 @@ func Test_processor_Reorg(t *testing.T) { getProcessor: func(t *testing.T) *processor { t.Helper() - p, err := newProcessor("file:Test_processor_Reorg_2?mode=memory&cache=shared") + p, err := newProcessor(path.Join(t.TempDir(), "l1infotreesyncTest_processor_Reorg_2.sqlite")) require.NoError(t, err) info := &UpdateL1InfoTree{ @@ -297,7 +298,7 @@ func TestProofsFromDifferentTrees(t *testing.T) { fmt.Println(aggregatorProof) fmt.Println("l1 info tree syncer L1InfoTree ===============================================") - dbPath := "file:l1InfoTreeTest?mode=memory&cache=shared" + dbPath := path.Join(t.TempDir(), "l1infotreesyncTestProofsFromDifferentTrees.sqlite") require.NoError(t, migrations.RunMigrations(dbPath)) dbe, err := db.NewSQLiteDB(dbPath) @@ -360,7 +361,7 @@ func createTestLeaves(t *testing.T, numOfLeaves int) []*L1InfoTreeLeaf { } func TestProcessBlockUpdateL1InfoTreeV2DontMatchTree(t *testing.T) { - sut, err := newProcessor("file:Test_processor_BlockUpdateL1InfoTreeV2?mode=memory&cache=shared") + sut, err := newProcessor(path.Join(t.TempDir(), "l1infotreesyncTestProcessBlockUpdateL1InfoTreeV2DontMatchTree.sqlite")) require.NoError(t, err) block := sync.Block{ Num: 10, diff --git a/l1infotreesync/processor_verifybatches_test.go b/l1infotreesync/processor_verifybatches_test.go index d943b541..f8150970 100644 --- a/l1infotreesync/processor_verifybatches_test.go +++ b/l1infotreesync/processor_verifybatches_test.go @@ -2,6 +2,7 @@ package l1infotreesync import ( "context" + "path" "testing" "github.com/0xPolygon/cdk/db" @@ -11,7 +12,7 @@ import ( ) func TestProcessVerifyBatchesNil(t *testing.T) { - dbPath := "file:TestProcessVerifyBatchesNil?mode=memory&cache=shared" + dbPath := path.Join(t.TempDir(), "l1infotreesyncTestProcessVerifyBatchesNil.sqlite") sut, err := newProcessor(dbPath) require.NoError(t, err) err = sut.processVerifyBatches(nil, 1, nil) @@ -19,7 +20,7 @@ func TestProcessVerifyBatchesNil(t *testing.T) { } func TestProcessVerifyBatchesOK(t *testing.T) { - dbPath := "file:TestProcessVerifyBatchesOK?mode=memory&cache=shared" + dbPath := path.Join(t.TempDir(), "l1infotreesyncTestProcessVerifyBatchesOK.sqlite") sut, err := newProcessor(dbPath) require.NoError(t, err) event := VerifyBatches{ @@ -41,7 +42,7 @@ func TestProcessVerifyBatchesOK(t *testing.T) { } func TestProcessVerifyBatchesSkip0000(t *testing.T) { - dbPath := "file:TestProcessVerifyBatchesSkip0000?mode=memory&cache=shared" + dbPath := path.Join(t.TempDir(), "l1infotreesyncTestProcessVerifyBatchesSkip0000.sqlite") sut, err := newProcessor(dbPath) require.NoError(t, err) event := VerifyBatches{ @@ -61,7 +62,7 @@ func TestProcessVerifyBatchesSkip0000(t *testing.T) { } func TestGetVerifiedBatches(t *testing.T) { - dbPath := "file:TestGetVerifiedBatches?mode=memory&cache=shared" + dbPath := path.Join(t.TempDir(), "l1infotreesyncTestGetVerifiedBatches.sqlite") p, err := newProcessor(dbPath) require.NoError(t, err) ctx := context.Background() diff --git a/lastgersync/e2e_test.go b/lastgersync/e2e_test.go index 2a3045f5..718425af 100644 --- a/lastgersync/e2e_test.go +++ b/lastgersync/e2e_test.go @@ -19,11 +19,11 @@ import ( func TestE2E(t *testing.T) { ctx := context.Background() env := helpers.NewE2EEnvWithEVML2(t) - dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") + dbPathSyncer := path.Join(t.TempDir(), "lastgersyncTestE2E.sqlite") syncer, err := lastgersync.New( ctx, dbPathSyncer, - env.ReorgDetectorL1, + env.ReorgDetectorL2, env.L2Client.Client(), env.GERL2Addr, env.L1InfoTreeSync, diff --git a/lastgersync/lastgersync.go b/lastgersync/lastgersync.go index c6689293..6d8f3509 100644 --- a/lastgersync/lastgersync.go +++ b/lastgersync/lastgersync.go @@ -22,7 +22,7 @@ type LastGERSync struct { func New( ctx context.Context, dbPath string, - rd sync.ReorgDetector, + rdL2 sync.ReorgDetector, l2Client EthClienter, globalExitRootL2 common.Address, l1InfoTreesync *l1infotreesync.L1InfoTreeSync, @@ -58,7 +58,7 @@ func New( return nil, err } - driver, err := sync.NewEVMDriver(rd, processor, downloader, reorgDetectorID, downloadBufferSize, rh) + driver, err := sync.NewEVMDriver(rdL2, processor, downloader, reorgDetectorID, downloadBufferSize, rh) if err != nil { return nil, err } diff --git a/lastgersync/processor.go b/lastgersync/processor.go index dd86482f..545c2495 100644 --- a/lastgersync/processor.go +++ b/lastgersync/processor.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "errors" + "fmt" "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/lastgersync/migrations" @@ -112,7 +113,7 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { _, err := p.db.Exec(`DELETE FROM block WHERE num >= $1;`, firstReorgedBlock) - return err + return fmt.Errorf("error processing reorg: %w", err) } // GetFirstGERAfterL1InfoTreeIndex returns the first GER injected on the chain that is related to l1InfoTreeIndex diff --git a/reorgdetector/reorgdetector.go b/reorgdetector/reorgdetector.go index 0b0a4025..a5089279 100644 --- a/reorgdetector/reorgdetector.go +++ b/reorgdetector/reorgdetector.go @@ -92,7 +92,7 @@ func (rd *ReorgDetector) AddBlockToTrack(ctx context.Context, id string, num uin } rd.trackedBlocksLock.RUnlock() - if existingHeader := trackedBlocks.get(num); existingHeader != nil && existingHeader.Hash == hash { + if existingHeader, err := trackedBlocks.get(num); err == nil && existingHeader.Hash == hash { return nil } diff --git a/reorgdetector/reorgdetector_test.go b/reorgdetector/reorgdetector_test.go index fa0c33da..2a69aba2 100644 --- a/reorgdetector/reorgdetector_test.go +++ b/reorgdetector/reorgdetector_test.go @@ -22,7 +22,7 @@ func Test_ReorgDetector(t *testing.T) { clientL1 := simulated.NewBackend(nil, simulated.WithBlockGasLimit(10000000)) // Create test DB dir - testDir := path.Join(t.TempDir(), "file::memory:?cache=shared") + testDir := path.Join(t.TempDir(), "reorgdetectorTest_ReorgDetector.sqlite") reorgDetector, err := New(clientL1.Client(), Config{DBPath: testDir, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}) require.NoError(t, err) @@ -33,31 +33,49 @@ func Test_ReorgDetector(t *testing.T) { reorgSub, err := reorgDetector.Subscribe(subID) require.NoError(t, err) - remainingHeader, err := clientL1.Client().HeaderByHash(ctx, clientL1.Commit()) // Block 2 + // Block 1 + header1, err := clientL1.Client().HeaderByHash(ctx, clientL1.Commit()) require.NoError(t, err) - err = reorgDetector.AddBlockToTrack(ctx, subID, remainingHeader.Number.Uint64(), remainingHeader.Hash()) // Adding block 2 + require.Equal(t, uint64(1), header1.Number.Uint64()) + err = reorgDetector.AddBlockToTrack(ctx, subID, header1.Number.Uint64(), header1.Hash()) // Adding block 1 require.NoError(t, err) - reorgHeader, err := clientL1.Client().HeaderByHash(ctx, clientL1.Commit()) // Block 3 + + // Block 2 + header2, err := clientL1.Client().HeaderByHash(ctx, clientL1.Commit()) + require.NoError(t, err) + require.Equal(t, uint64(2), header2.Number.Uint64()) + err = reorgDetector.AddBlockToTrack(ctx, subID, header2.Number.Uint64(), header2.Hash()) // Adding block 1 require.NoError(t, err) - firstHeaderAfterReorg, err := clientL1.Client().HeaderByHash(ctx, clientL1.Commit()) // Block 4 + + // Block 3 + header3Reorged, err := clientL1.Client().HeaderByHash(ctx, clientL1.Commit()) require.NoError(t, err) - err = reorgDetector.AddBlockToTrack(ctx, subID, firstHeaderAfterReorg.Number.Uint64(), firstHeaderAfterReorg.Hash()) // Adding block 4 + require.Equal(t, uint64(3), header3Reorged.Number.Uint64()) + err = reorgDetector.AddBlockToTrack(ctx, subID, header3Reorged.Number.Uint64(), header3Reorged.Hash()) // Adding block 3 require.NoError(t, err) - header, err := clientL1.Client().HeaderByHash(ctx, clientL1.Commit()) // Block 5 + + // Block 4 + header4Reorged, err := clientL1.Client().HeaderByHash(ctx, clientL1.Commit()) + require.Equal(t, uint64(4), header4Reorged.Number.Uint64()) require.NoError(t, err) - err = reorgDetector.AddBlockToTrack(ctx, subID, header.Number.Uint64(), header.Hash()) // Adding block 5 + err = reorgDetector.AddBlockToTrack(ctx, subID, header4Reorged.Number.Uint64(), header4Reorged.Hash()) // Adding block 4 require.NoError(t, err) - err = clientL1.Fork(reorgHeader.Hash()) // Reorg on block 3 + + err = clientL1.Fork(header2.Hash()) // Reorg on block 2 (block 2 is still valid) require.NoError(t, err) - clientL1.Commit() // Next block 4 after reorg on block 3 + + // Make sure that the new canonical chain is longer than the previous one so the reorg is visible to the detector + header3AfterReorg := clientL1.Commit() // Next block 3 after reorg on block 2 + require.NotEqual(t, header3Reorged.Hash(), header3AfterReorg) + header4AfterReorg := clientL1.Commit() // Block 4 + require.NotEqual(t, header4Reorged.Hash(), header4AfterReorg) clientL1.Commit() // Block 5 - clientL1.Commit() // Block 6 - // Expect reorg on added blocks 4 -> all further blocks should be removed + // Expect reorg on added blocks 3 -> all further blocks should be removed select { case firstReorgedBlock := <-reorgSub.ReorgedBlock: reorgSub.ReorgProcessed <- true - require.Equal(t, firstHeaderAfterReorg.Number.Uint64(), firstReorgedBlock) + require.Equal(t, header3Reorged.Number.Uint64(), firstReorgedBlock) case <-time.After(5 * time.Second): t.Fatal("timeout waiting for reorg") } @@ -69,13 +87,18 @@ func Test_ReorgDetector(t *testing.T) { headersList, ok := reorgDetector.trackedBlocks[subID] reorgDetector.trackedBlocksLock.Unlock() require.True(t, ok) - require.Equal(t, 1, headersList.len()) // Only block 3 left - require.Equal(t, remainingHeader.Hash(), headersList.get(4).Hash) + require.Equal(t, 2, headersList.len()) // Only blocks 1 and 2 left + actualHeader1, err := headersList.get(1) + require.NoError(t, err) + require.Equal(t, header1.Hash(), actualHeader1.Hash) + actualHeader2, err := headersList.get(2) + require.NoError(t, err) + require.Equal(t, header2.Hash(), actualHeader2.Hash) } func TestGetTrackedBlocks(t *testing.T) { clientL1 := simulated.NewBackend(nil, simulated.WithBlockGasLimit(10000000)) - testDir := path.Join(t.TempDir(), "file::memory:?cache=shared") + testDir := path.Join(t.TempDir(), "reorgdetector_TestGetTrackedBlocks.sqlite") reorgDetector, err := New(clientL1.Client(), Config{DBPath: testDir, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}) require.NoError(t, err) list, err := reorgDetector.getTrackedBlocks() @@ -129,7 +152,7 @@ func TestGetTrackedBlocks(t *testing.T) { func TestNotSubscribed(t *testing.T) { clientL1 := simulated.NewBackend(nil, simulated.WithBlockGasLimit(10000000)) - testDir := path.Join(t.TempDir(), "file::memory:?cache=shared") + testDir := path.Join(t.TempDir(), "reorgdetectorTestNotSubscribed.sqlite") reorgDetector, err := New(clientL1.Client(), Config{DBPath: testDir, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}) require.NoError(t, err) err = reorgDetector.AddBlockToTrack(context.Background(), "foo", 1, common.Hash{}) diff --git a/reorgdetector/types.go b/reorgdetector/types.go index 2c860277..35a73513 100644 --- a/reorgdetector/types.go +++ b/reorgdetector/types.go @@ -4,6 +4,7 @@ import ( "sort" "sync" + "github.com/0xPolygon/cdk/db" "github.com/ethereum/go-ethereum/common" ) @@ -80,15 +81,15 @@ func (hl *headersList) copy() *headersList { } // get returns a header by block number -func (hl *headersList) get(num uint64) *header { +func (hl *headersList) get(num uint64) (*header, error) { hl.RLock() defer hl.RUnlock() if b, ok := hl.headers[num]; ok { - return &b + return &b, nil } - return nil + return nil, db.ErrNotFound } // getSorted returns headers in sorted order diff --git a/reorgdetector/types_test.go b/reorgdetector/types_test.go index 42f7f61c..d2562078 100644 --- a/reorgdetector/types_test.go +++ b/reorgdetector/types_test.go @@ -63,8 +63,10 @@ func TestBlockMap(t *testing.T) { t.Run("get", func(t *testing.T) { t.Parallel() - if !reflect.DeepEqual(*bm.get(3), bm.headers[3]) { - t.Errorf("get() returned incorrect result, expected: %v, got: %v", bm.get(3), bm.headers[3]) + header, err := bm.get(3) + require.NoError(t, err) + if !reflect.DeepEqual(*header, bm.headers[3]) { + t.Errorf("get() returned incorrect result, expected: %v, got: %v", header, bm.headers[3]) } }) diff --git a/test/helpers/e2e.go b/test/helpers/e2e.go index 0db4c7c6..acb41704 100644 --- a/test/helpers/e2e.go +++ b/test/helpers/e2e.go @@ -142,7 +142,7 @@ func CommonSetup(t *testing.T) ( // Bridge sync testClient := TestClient{ClientRenamed: l1Client.Client()} dbPathBridgeSyncL1 := path.Join(t.TempDir(), "BridgeSyncL1.sqlite") - bridgeL1Sync, err := bridgesync.NewL1(ctx, dbPathBridgeSyncL1, bridgeL1Addr, 10, etherman.LatestBlock, rdL1, testClient, 0, time.Millisecond*10, 0, 0, 1) //nolint:mnd + bridgeL1Sync, err := bridgesync.NewL1(ctx, dbPathBridgeSyncL1, bridgeL1Addr, 10, etherman.LatestBlock, rdL1, testClient, 0, time.Millisecond*10, 0, 0, 1, false) //nolint:mnd require.NoError(t, err) go bridgeL1Sync.Start(ctx) @@ -181,7 +181,7 @@ func L2SetupEVM(t *testing.T) ( // Bridge sync dbPathL2BridgeSync := path.Join(t.TempDir(), "BridgeSyncL2.sqlite") testClient := TestClient{ClientRenamed: l2Client.Client()} - bridgeL2Sync, err := bridgesync.NewL2(ctx, dbPathL2BridgeSync, bridgeL2Addr, 10, etherman.LatestBlock, rdL2, testClient, 0, time.Millisecond*10, 0, 0, 1) //nolint:mnd + bridgeL2Sync, err := bridgesync.NewL2(ctx, dbPathL2BridgeSync, bridgeL2Addr, 10, etherman.LatestBlock, rdL2, testClient, 0, time.Millisecond*10, 0, 0, 1, false) //nolint:mnd require.NoError(t, err) go bridgeL2Sync.Start(ctx) diff --git a/tree/tree_test.go b/tree/tree_test.go index c2748856..a08211a6 100644 --- a/tree/tree_test.go +++ b/tree/tree_test.go @@ -21,7 +21,7 @@ import ( func TestCheckExpectedRoot(t *testing.T) { createTreeDB := func() *sql.DB { - dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") + dbPath := path.Join(t.TempDir(), "treeTestCheckExpectedRoot.sqlite") log.Debug("DB created at: ", dbPath) require.NoError(t, migrations.RunMigrations(dbPath)) treeDB, err := db.NewSQLiteDB(dbPath) @@ -112,7 +112,7 @@ func TestMTAddLeaf(t *testing.T) { for ti, testVector := range mtTestVectors { t.Run(fmt.Sprintf("Test vector %d", ti), func(t *testing.T) { - dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") + dbPath := path.Join(t.TempDir(), "treeTestMTAddLeaf.sqlite") log.Debug("DB created at: ", dbPath) err := migrations.RunMigrations(dbPath) require.NoError(t, err) @@ -167,7 +167,7 @@ func TestMTGetProof(t *testing.T) { for ti, testVector := range mtTestVectors { t.Run(fmt.Sprintf("Test vector %d", ti), func(t *testing.T) { - dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") + dbPath := path.Join(t.TempDir(), "treeTestMTGetProof.sqlite") err := migrations.RunMigrations(dbPath) require.NoError(t, err) treeDB, err := db.NewSQLiteDB(dbPath) @@ -201,7 +201,7 @@ func TestMTGetProof(t *testing.T) { func createTreeDBForTest(t *testing.T) *sql.DB { t.Helper() - dbPath := "file::memory:?cache=shared" + dbPath := path.Join(t.TempDir(), "tree_createTreeDBForTest.sqlite") err := migrations.RunMigrations(dbPath) require.NoError(t, err) treeDB, err := db.NewSQLiteDB(dbPath)