diff --git a/src/Makefile.am b/src/Makefile.am index a505f44d7e..906d097821 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -81,7 +81,8 @@ TEST_FILES = $(TESTDATA_DIR)/stellar-core_example.cfg $(TESTDATA_DIR)/stellar-co $(TESTDATA_DIR)/stellar-core_testnet.cfg $(TESTDATA_DIR)/stellar-core_testnet_legacy.cfg \ $(TESTDATA_DIR)/stellar-history.testnet.6714239.json $(TESTDATA_DIR)/stellar-history.livenet.15686975.json \ $(TESTDATA_DIR)/stellar-core_testnet_validator.cfg $(TESTDATA_DIR)/stellar-core_example_validators.cfg \ - $(TESTDATA_DIR)/stellar-history.testnet.6714239.networkPassphrase.json + $(TESTDATA_DIR)/stellar-history.testnet.6714239.networkPassphrase.json \ + $(TESTDATA_DIR)/stellar-history.testnet.6714239.networkPassphrase.v2.json BUILT_SOURCES = $(SRC_X_FILES:.x=.h) main/StellarCoreVersion.cpp main/XDRFilesSha256.cpp $(TEST_FILES) diff --git a/src/bucket/BucketBase.cpp b/src/bucket/BucketBase.cpp index 0917b20a82..1d0326cf47 100644 --- a/src/bucket/BucketBase.cpp +++ b/src/bucket/BucketBase.cpp @@ -385,7 +385,7 @@ BucketBase::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, } if (countMergeEvents) { - bucketManager.incrMergeCounters(mc); + bucketManager.incrMergeCounters(mc); } std::vector shadowHashes; diff --git a/src/bucket/BucketManager.cpp b/src/bucket/BucketManager.cpp index 358745e50c..6535453201 100644 --- a/src/bucket/BucketManager.cpp +++ b/src/bucket/BucketManager.cpp @@ -330,18 +330,36 @@ BucketManager::getMergeTimer() return mBucketSnapMerge; } +template <> +MergeCounters +BucketManager::readMergeCounters() +{ + std::lock_guard lock(mBucketMutex); + return mLiveMergeCounters; +} + +template <> MergeCounters -BucketManager::readMergeCounters() +BucketManager::readMergeCounters() +{ + std::lock_guard lock(mBucketMutex); + return mHotArchiveMergeCounters; +} + +template <> +void +BucketManager::incrMergeCounters(MergeCounters const& delta) { std::lock_guard lock(mBucketMutex); - return mMergeCounters; + mLiveMergeCounters += delta; } +template <> void -BucketManager::incrMergeCounters(MergeCounters const& delta) +BucketManager::incrMergeCounters(MergeCounters const& delta) { std::lock_guard lock(mBucketMutex); - mMergeCounters += delta; + mHotArchiveMergeCounters += delta; } bool @@ -623,7 +641,7 @@ BucketManager::getMergeFutureInternal(MergeKey const& key, auto future = promise.get_future().share(); promise.set_value(bucket); mc.mFinishedMergeReattachments++; - incrMergeCounters(mc); + incrMergeCounters(mc); return future; } } @@ -638,7 +656,7 @@ BucketManager::getMergeFutureInternal(MergeKey const& key, "BucketManager::getMergeFuture returning running future for merge {}", key); mc.mRunningMergeReattachments++; - incrMergeCounters(mc); + incrMergeCounters(mc); return i->second; } @@ -1013,10 +1031,10 @@ BucketManager::snapshotLedger(LedgerHeader& currentHeader) currentHeader.ledgerVersion, BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) { - // TODO: Hash Archive Bucket - // Dependency: HAS supports Hot Archive BucketList - - hash = mLiveBucketList->getHash(); + SHA256 hsh; + hsh.add(mLiveBucketList->getHash()); + hsh.add(mHotArchiveBucketList->getHash()); + hash = hsh.finish(); } else { @@ -1061,15 +1079,17 @@ BucketManager::startBackgroundEvictionScan(uint32_t ledgerSeq, mSnapshotManager->copySearchableLiveBucketListSnapshot(); auto const& sas = cfg.stateArchivalSettings(); - using task_t = std::packaged_task; + using task_t = + std::packaged_task()>; // MSVC gotcha: searchableBL has to be shared_ptr because MSVC wants to // copy this lambda, otherwise we could use unique_ptr. auto task = std::make_shared( [bl = std::move(searchableBL), iter = cfg.evictionIterator(), ledgerSeq, ledgerVers, sas, &counters = mBucketListEvictionCounters, stats = mEvictionStatistics] { - return bl->scanForEviction(ledgerSeq, counters, iter, stats, sas, - ledgerVers); + return std::make_unique( + bl->scanForEviction(ledgerSeq, counters, iter, stats, sas, + ledgerVers)); }); mEvictionFuture = task->get_future(); @@ -1096,14 +1116,14 @@ BucketManager::resolveBackgroundEvictionScan( // If eviction related settings changed during the ledger, we have to // restart the scan - if (!evictionCandidates.isValid(ledgerSeq, - networkConfig.stateArchivalSettings())) + if (!evictionCandidates->isValid(ledgerSeq, ledgerVers, + networkConfig.stateArchivalSettings())) { startBackgroundEvictionScan(ledgerSeq, ledgerVers, networkConfig); evictionCandidates = mEvictionFuture.get(); } - auto& eligibleEntries = evictionCandidates.eligibleEntries; + auto& eligibleEntries = evictionCandidates->eligibleEntries; for (auto iter = eligibleEntries.begin(); iter != eligibleEntries.end();) { @@ -1121,7 +1141,7 @@ BucketManager::resolveBackgroundEvictionScan( auto remainingEntriesToEvict = networkConfig.stateArchivalSettings().maxEntriesToArchive; auto entryToEvictIter = eligibleEntries.begin(); - auto newEvictionIterator = evictionCandidates.endOfRegionIterator; + auto newEvictionIterator = evictionCandidates->endOfRegionIterator; // Return vectors include both evicted entry and associated TTL std::vector deletedKeys; @@ -1161,7 +1181,7 @@ BucketManager::resolveBackgroundEvictionScan( // region if (remainingEntriesToEvict != 0) { - newEvictionIterator = evictionCandidates.endOfRegionIterator; + newEvictionIterator = evictionCandidates->endOfRegionIterator; } networkConfig.updateEvictionIterator(ltx, newEvictionIterator); @@ -1229,51 +1249,71 @@ BucketManager::assumeState(HistoryArchiveState const& has, releaseAssert(threadIsMain()); releaseAssertOrThrow(mConfig.MODE_ENABLES_BUCKETLIST); - // TODO: Assume archival bucket state // Dependency: HAS supports Hot Archive BucketList - for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) - { - auto curr = getBucketByHashInternal( - hexToBin256(has.currentBuckets.at(i).curr), mSharedLiveBuckets); - auto snap = getBucketByHashInternal( - hexToBin256(has.currentBuckets.at(i).snap), mSharedLiveBuckets); - if (!(curr && snap)) - { - throw std::runtime_error("Missing bucket files while assuming " - "saved live BucketList state"); - } - auto const& nextFuture = has.currentBuckets.at(i).next; - std::shared_ptr nextBucket = nullptr; - if (nextFuture.hasOutputHash()) + auto processBucketList = [&](auto& bl, auto const& hasBuckets) { + auto kNumLevels = std::remove_reference::type::kNumLevels; + using BucketT = + typename std::remove_reference::type::bucket_type; + for (uint32_t i = 0; i < kNumLevels; ++i) { - nextBucket = getBucketByHashInternal( - hexToBin256(nextFuture.getOutputHash()), mSharedLiveBuckets); - if (!nextBucket) + auto curr = + getBucketByHash(hexToBin256(hasBuckets.at(i).curr)); + auto snap = + getBucketByHash(hexToBin256(hasBuckets.at(i).snap)); + if (!(curr && snap)) { - throw std::runtime_error( - "Missing future bucket files while " - "assuming saved live BucketList state"); + throw std::runtime_error("Missing bucket files while assuming " + "saved live BucketList state"); } - } - // Buckets on the BucketList should always be indexed - releaseAssert(curr->isEmpty() || curr->isIndexed()); - releaseAssert(snap->isEmpty() || snap->isIndexed()); - if (nextBucket) - { - releaseAssert(nextBucket->isEmpty() || nextBucket->isIndexed()); + auto const& nextFuture = hasBuckets.at(i).next; + std::shared_ptr nextBucket = nullptr; + if (nextFuture.hasOutputHash()) + { + nextBucket = getBucketByHash( + hexToBin256(nextFuture.getOutputHash())); + if (!nextBucket) + { + throw std::runtime_error( + "Missing future bucket files while " + "assuming saved live BucketList state"); + } + } + + // Buckets on the BucketList should always be indexed + releaseAssert(curr->isEmpty() || curr->isIndexed()); + releaseAssert(snap->isEmpty() || snap->isIndexed()); + if (nextBucket) + { + releaseAssert(nextBucket->isEmpty() || nextBucket->isIndexed()); + } + + bl.getLevel(i).setCurr(curr); + bl.getLevel(i).setSnap(snap); + bl.getLevel(i).setNext(nextFuture); } + }; - mLiveBucketList->getLevel(i).setCurr(curr); - mLiveBucketList->getLevel(i).setSnap(snap); - mLiveBucketList->getLevel(i).setNext(nextFuture); + processBucketList(*mLiveBucketList, has.currentBuckets); +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + if (has.hasHotArchiveBuckets()) + { + processBucketList(*mHotArchiveBucketList, has.hotArchiveBuckets); } +#endif if (restartMerges) { mLiveBucketList->restartMerges(mApp, maxProtocolVersion, has.currentLedger); +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + if (has.hasHotArchiveBuckets()) + { + mHotArchiveBucketList->restartMerges(mApp, maxProtocolVersion, + has.currentLedger); + } +#endif } cleanupStaleFiles(has); } @@ -1349,7 +1389,8 @@ BucketManager::loadCompleteLedgerState(HistoryArchiveState const& has) std::vector> hashes; for (uint32_t i = LiveBucketList::kNumLevels; i > 0; --i) { - HistoryStateBucket const& hsb = has.currentBuckets.at(i - 1); + HistoryStateBucket const& hsb = + has.currentBuckets.at(i - 1); hashes.emplace_back(hexToBin256(hsb.snap), fmt::format(FMT_STRING("snap {:d}"), i - 1)); hashes.emplace_back(hexToBin256(hsb.curr), @@ -1526,7 +1567,7 @@ BucketManager::visitLedgerEntries( std::vector> hashes; for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { - HistoryStateBucket const& hsb = has.currentBuckets.at(i); + HistoryStateBucket const& hsb = has.currentBuckets.at(i); hashes.emplace_back(hexToBin256(hsb.curr), fmt::format(FMT_STRING("curr {:d}"), i)); hashes.emplace_back(hexToBin256(hsb.snap), @@ -1579,16 +1620,35 @@ BucketManager::scheduleVerifyReferencedBucketsWork( continue; } - // TODO: Update verify to for ArchiveBucket - // Dependency: HAS supports Hot Archive BucketList - auto b = getBucketByHashInternal(h, mSharedLiveBuckets); - if (!b) - { - throw std::runtime_error(fmt::format( - FMT_STRING("Missing referenced bucket {}"), binToHex(h))); - } - seq.emplace_back(std::make_shared( - mApp, b->getFilename().string(), b->getHash(), nullptr)); + auto loadFilenameAndHash = [&]() -> std::pair { + auto live = getBucketByHashInternal(h, mSharedLiveBuckets); + if (!live) + { + auto hot = getBucketByHashInternal(h, mSharedHotArchiveBuckets); + + // Check both live and hot archive buckets for hash. If we don't + // find it in either, we're missing a bucket. Note that live and + // hot archive buckets are guaranteed to have no hash collisions + // due to type field in MetaEntry. + if (!hot) + { + throw std::runtime_error( + fmt::format(FMT_STRING("Missing referenced bucket {}"), + binToHex(h))); + } + return std::make_pair(hot->getFilename().string(), + hot->getHash()); + } + else + { + return std::make_pair(live->getFilename().string(), + live->getHash()); + } + }; + + auto [filename, hash] = loadFilenameAndHash(); + seq.emplace_back( + std::make_shared(mApp, filename, hash, nullptr)); } return mApp.getWorkScheduler().scheduleWork( "verify-referenced-buckets", seq); diff --git a/src/bucket/BucketManager.h b/src/bucket/BucketManager.h index 38df819a81..f8a89e059d 100644 --- a/src/bucket/BucketManager.h +++ b/src/bucket/BucketManager.h @@ -103,14 +103,15 @@ class BucketManager : NonMovableOrCopyable medida::Counter& mLiveBucketListSizeCounter; medida::Counter& mArchiveBucketListSizeCounter; EvictionCounters mBucketListEvictionCounters; - MergeCounters mMergeCounters; + MergeCounters mLiveMergeCounters; + MergeCounters mHotArchiveMergeCounters; std::shared_ptr mEvictionStatistics{}; std::map mBucketListEntryCountCounters; std::map mBucketListEntrySizeCounters; - std::future mEvictionFuture{}; + std::future> mEvictionFuture{}; // Copy app's config for thread-safe access Config const mConfig; @@ -203,8 +204,8 @@ class BucketManager : NonMovableOrCopyable // Reading and writing the merge counters is done in bulk, and takes a lock // briefly; this can be done from any thread. - MergeCounters readMergeCounters(); - void incrMergeCounters(MergeCounters const& delta); + template MergeCounters readMergeCounters(); + template void incrMergeCounters(MergeCounters const& delta); // Get a reference to a persistent bucket (in the BucketManager's bucket // directory), from the BucketManager's shared bucket-set. diff --git a/src/bucket/BucketSnapshotManager.cpp b/src/bucket/BucketSnapshotManager.cpp index aaa85a3e44..8eb7345b08 100644 --- a/src/bucket/BucketSnapshotManager.cpp +++ b/src/bucket/BucketSnapshotManager.cpp @@ -115,6 +115,17 @@ BucketSnapshotManager::recordBulkLoadMetrics(std::string const& label, return iter->second; } +namespace +{ +template +bool +needsUpdate(std::shared_ptr const& snapshot, + SnapshotPtrT const& curr) +{ + return !snapshot || snapshot->getLedgerSeq() < curr->getLedgerSeq(); +} +} + void BucketSnapshotManager::maybeCopySearchableBucketListSnapshot( SearchableSnapshotConstPtr& snapshot) @@ -123,9 +134,7 @@ BucketSnapshotManager::maybeCopySearchableBucketListSnapshot( // modified. Rather, a thread is checking it's copy against the canonical // snapshot, so use a shared lock. std::shared_lock lock(mSnapshotMutex); - - if (!snapshot || - snapshot->getLedgerSeq() < mCurrLiveSnapshot->getLedgerSeq()) + if (needsUpdate(snapshot, mCurrLiveSnapshot)) { snapshot = copySearchableLiveBucketListSnapshot(); } @@ -139,14 +148,33 @@ BucketSnapshotManager::maybeCopySearchableHotArchiveBucketListSnapshot( // modified. Rather, a thread is checking it's copy against the canonical // snapshot, so use a shared lock. std::shared_lock lock(mSnapshotMutex); - - if (!snapshot || - snapshot->getLedgerSeq() < mCurrHotArchiveSnapshot->getLedgerSeq()) + if (needsUpdate(snapshot, mCurrHotArchiveSnapshot)) { snapshot = copySearchableHotArchiveBucketListSnapshot(); } } +void +BucketSnapshotManager::maybeCopyLiveAndHotArchiveSnapshots( + SearchableSnapshotConstPtr& liveSnapshot, + SearchableHotArchiveSnapshotConstPtr& hotArchiveSnapshot) +{ + // The canonical snapshot held by the BucketSnapshotManager is not being + // modified. Rather, a thread is checking it's copy against the canonical + // snapshot, so use a shared lock. For consistency we hold the lock while + // updating both snapshots. + std::shared_lock lock(mSnapshotMutex); + if (needsUpdate(liveSnapshot, mCurrLiveSnapshot)) + { + liveSnapshot = copySearchableLiveBucketListSnapshot(); + } + + if (needsUpdate(hotArchiveSnapshot, mCurrHotArchiveSnapshot)) + { + hotArchiveSnapshot = copySearchableHotArchiveBucketListSnapshot(); + } +} + void BucketSnapshotManager::updateCurrentSnapshot( SnapshotPtrT&& liveSnapshot, diff --git a/src/bucket/BucketSnapshotManager.h b/src/bucket/BucketSnapshotManager.h index 948a7c0ee0..ddedc01d21 100644 --- a/src/bucket/BucketSnapshotManager.h +++ b/src/bucket/BucketSnapshotManager.h @@ -99,6 +99,13 @@ class BucketSnapshotManager : NonMovableOrCopyable void maybeCopySearchableHotArchiveBucketListSnapshot( SearchableHotArchiveSnapshotConstPtr& snapshot); + // This function is the same as snapshot refreshers above, but guarantees + // that both snapshots are consistent with the same lcl. This is required + // when querying both snapshot types as part of the same query. + void maybeCopyLiveAndHotArchiveSnapshots( + SearchableSnapshotConstPtr& liveSnapshot, + SearchableHotArchiveSnapshotConstPtr& hotArchiveSnapshot); + // All metric recording functions must only be called by the main thread void startPointLoadTimer() const; void endPointLoadTimer(LedgerEntryType t, bool bloomMiss) const; diff --git a/src/bucket/BucketUtils.cpp b/src/bucket/BucketUtils.cpp index 0647f4064d..a7ac7d31a9 100644 --- a/src/bucket/BucketUtils.cpp +++ b/src/bucket/BucketUtils.cpp @@ -3,6 +3,8 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/BucketUtils.h" +#include "bucket/BucketBase.h" +#include "util/ProtocolVersion.h" #include namespace stellar @@ -94,10 +96,26 @@ MergeCounters::operator==(MergeCounters const& other) const // Check that eviction scan is based off of current ledger snapshot and that // archival settings have not changed bool -EvictionResultCandidates::isValid(uint32_t currLedger, +EvictionResultCandidates::isValid(uint32_t currLedgerSeq, + uint32_t currLedgerVers, StateArchivalSettings const& currSas) const { - return initialLedger == currLedger && + // If the eviction scan started before a protocol upgrade, and the protocol + // upgrade changes eviction scan behavior during the scan, we need + // to restart with the new protocol version. We only care about + // `FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION`, other upgrades don't + // affect evictions scans. + if (protocolVersionIsBefore( + initialLedgerVers, + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION) && + protocolVersionStartsFrom( + currLedgerVers, + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + return false; + } + + return initialLedgerSeq == currLedgerSeq && initialSas.maxEntriesToArchive == currSas.maxEntriesToArchive && initialSas.evictionScanSize == currSas.evictionScanSize && initialSas.startingEvictionScanLevel == diff --git a/src/bucket/BucketUtils.h b/src/bucket/BucketUtils.h index dad92ac008..f7fcccd1f2 100644 --- a/src/bucket/BucketUtils.h +++ b/src/bucket/BucketUtils.h @@ -95,24 +95,28 @@ struct EvictionResultCandidates // Eviction iterator at the end of the scan region EvictionIterator endOfRegionIterator; - // LedgerSeq which this scan is based on - uint32_t initialLedger{}; + // LedgerSeq and ledger version which this scan is based on + uint32_t const initialLedgerSeq{}; + uint32_t const initialLedgerVers{}; // State archival settings that this scan is based on - StateArchivalSettings initialSas; + StateArchivalSettings const initialSas; - EvictionResultCandidates(StateArchivalSettings const& sas) : initialSas(sas) + EvictionResultCandidates(StateArchivalSettings const& sas, + uint32_t initialLedger, uint32_t initialLedgerVers) + : initialLedgerSeq(initialLedger) + , initialLedgerVers(initialLedgerVers) + , initialSas(sas) { } // Returns true if this is a valid archival scan for the current ledger // and archival settings. This is necessary because we start the scan // for ledger N immediately after N - 1 closes. However, ledger N may - // contain a network upgrade changing eviction scan settings. Legacy SQL - // scans will run based on the changes that occurred during ledger N, - // meaning the scan we started at ledger N - 1 is invalid since it was based - // off of older settings. - bool isValid(uint32_t currLedger, + // contain a network upgrade changing eviction scan settings. Eviction scans + // must be based on the network settings after applying any upgrades, so if + // this occurs we must restart the scan. + bool isValid(uint32_t currLedgerSeq, uint32_t currLedgerVers, StateArchivalSettings const& currSas) const; }; diff --git a/src/bucket/HotArchiveBucket.cpp b/src/bucket/HotArchiveBucket.cpp index 6ce3ed7041..0a8d53a71e 100644 --- a/src/bucket/HotArchiveBucket.cpp +++ b/src/bucket/HotArchiveBucket.cpp @@ -36,7 +36,7 @@ HotArchiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, if (countMergeEvents) { - bucketManager.incrMergeCounters(mc); + bucketManager.incrMergeCounters(mc); } return out.getBucket(bucketManager); diff --git a/src/bucket/HotArchiveBucket.h b/src/bucket/HotArchiveBucket.h index 772ec0c22d..02e965328f 100644 --- a/src/bucket/HotArchiveBucket.h +++ b/src/bucket/HotArchiveBucket.h @@ -25,11 +25,6 @@ typedef BucketOutputIterator HotArchiveBucketOutputIterator; class HotArchiveBucket : public BucketBase, public std::enable_shared_from_this { - static std::vector - convertToBucketEntry(std::vector const& archivedEntries, - std::vector const& restoredEntries, - std::vector const& deletedEntries); - public: // Entry type that this bucket stores using EntryT = HotArchiveBucketEntry; @@ -91,6 +86,11 @@ class HotArchiveBucket : public BucketBase, static std::shared_ptr bucketEntryToLoadResult(std::shared_ptr const& be); + static std::vector + convertToBucketEntry(std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries); + friend class HotArchiveBucketSnapshot; }; } \ No newline at end of file diff --git a/src/bucket/HotArchiveBucketList.h b/src/bucket/HotArchiveBucketList.h index 74a467435f..b9e187adbd 100644 --- a/src/bucket/HotArchiveBucketList.h +++ b/src/bucket/HotArchiveBucketList.h @@ -15,6 +15,8 @@ namespace stellar class HotArchiveBucketList : public BucketListBase { public: + using bucket_type = HotArchiveBucket; + void addBatch(Application& app, uint32_t currLedger, uint32_t currLedgerProtocol, std::vector const& archiveEntries, diff --git a/src/bucket/LiveBucket.cpp b/src/bucket/LiveBucket.cpp index 7001baa9cc..eb3222f8da 100644 --- a/src/bucket/LiveBucket.cpp +++ b/src/bucket/LiveBucket.cpp @@ -387,7 +387,7 @@ LiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, if (countMergeEvents) { - bucketManager.incrMergeCounters(mc); + bucketManager.incrMergeCounters(mc); } return out.getBucket(bucketManager); diff --git a/src/bucket/LiveBucketList.h b/src/bucket/LiveBucketList.h index 0f2a6ac268..6069949033 100644 --- a/src/bucket/LiveBucketList.h +++ b/src/bucket/LiveBucketList.h @@ -17,6 +17,8 @@ namespace stellar class LiveBucketList : public BucketListBase { public: + using bucket_type = LiveBucket; + // Reset Eviction Iterator position if an incoming spill or upgrade has // invalidated the previous position static void updateStartingEvictionIterator(EvictionIterator& iter, diff --git a/src/bucket/SearchableBucketList.cpp b/src/bucket/SearchableBucketList.cpp index 47fac8e742..a08a4f957f 100644 --- a/src/bucket/SearchableBucketList.cpp +++ b/src/bucket/SearchableBucketList.cpp @@ -31,7 +31,7 @@ SearchableLiveBucketListSnapshot::scanForEviction( LiveBucketList::updateStartingEvictionIterator( evictionIter, sas.startingEvictionScanLevel, ledgerSeq); - EvictionResultCandidates result(sas); + EvictionResultCandidates result(sas, ledgerSeq, ledgerVers); auto startIter = evictionIter; auto scanSize = sas.evictionScanSize; @@ -59,7 +59,6 @@ SearchableLiveBucketListSnapshot::scanForEviction( } result.endOfRegionIterator = evictionIter; - result.initialLedger = ledgerSeq; return result; } diff --git a/src/bucket/test/BucketListTests.cpp b/src/bucket/test/BucketListTests.cpp index 0a5b545097..ed5771dd31 100644 --- a/src/bucket/test/BucketListTests.cpp +++ b/src/bucket/test/BucketListTests.cpp @@ -1139,9 +1139,10 @@ TEST_CASE_VERSIONS("eviction scan", "[bucketlist][archival]") // Close ledgers until evicted DEADENTRYs merge with // original INITENTRYs. This checks that BucketList // invariants are respected - for (auto initialDeadMerges = - bm.readMergeCounters().mOldInitEntriesMergedWithNewDead; - bm.readMergeCounters().mOldInitEntriesMergedWithNewDead < + for (auto initialDeadMerges = bm.readMergeCounters() + .mOldInitEntriesMergedWithNewDead; + bm.readMergeCounters() + .mOldInitEntriesMergedWithNewDead < initialDeadMerges + tempEntries.size(); ++ledgerSeq) { diff --git a/src/bucket/test/BucketManagerTests.cpp b/src/bucket/test/BucketManagerTests.cpp index 5c22b3b997..34a63a6ed8 100644 --- a/src/bucket/test/BucketManagerTests.cpp +++ b/src/bucket/test/BucketManagerTests.cpp @@ -27,7 +27,10 @@ #include "test/test.h" #include "util/GlobalChecks.h" #include "util/Math.h" +#include "util/ProtocolVersion.h" #include "util/Timer.h" +#include "util/UnorderedSet.h" +#include "xdr/Stellar-ledger-entries.h" #include #include @@ -329,11 +332,33 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", BucketManager& bm = app->getBucketManager(); LiveBucketList& bl = bm.getLiveBucketList(); + HotArchiveBucketList& hotArchive = bm.getHotArchiveBucketList(); auto vers = getAppLedgerVersion(app); - + bool hasHotArchive = protocolVersionStartsFrom( + vers, LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION); // Add some entries to get to a nontrivial merge-state. uint32_t ledger = 0; - uint32_t level = 3; + uint32_t level = 4; + UnorderedSet addedHotArchiveKeys; + + // To prevent duplicate merges that can interfere with counters, seed + // the starting Bucket so that each merge is unique. Otherwise, the + // first call to addBatch will merge [{first_batch}, empty_bucket]. We + // will then see other instances of [{first_batch}, empty_bucket] merges + // later on as the Bucket moves its way down the bl. By providing a + // seeded bucket, the first addBatch is a [{first_batch}, seeded_bucket] + // merge, which will not be duplicated by empty bucket merges later. The + // live BL is automatically seeded with the genesis ledger. + if (hasHotArchive) + { + auto initialHotArchiveBucket = + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE}, 10, addedHotArchiveKeys); + hotArchive.getLevel(0).setCurr(HotArchiveBucket::fresh( + bm, vers, {}, initialHotArchiveBucket, {}, {}, + clock.getIOContext(), /*doFsync=*/true)); + } + do { ++ledger; @@ -345,6 +370,15 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", LedgerTestUtils::generateValidLedgerEntriesWithExclusions( {CONFIG_SETTING}, 10), {}); + if (protocolVersionStartsFrom( + vers, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + addHotArchiveBatchAndUpdateSnapshot( + *app, lh, {}, {}, + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE}, 10, addedHotArchiveKeys)); + } bm.forgetUnreferencedBuckets( app->getLedgerManager().getLastClosedLedgerHAS()); } while (!LiveBucketList::levelShouldSpill(ledger, level - 1)); @@ -354,17 +388,42 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", // eagerly) REQUIRE(bl.getLevel(level).getNext().isMerging()); + HistoryArchiveState has; + if (hasHotArchive) + { + REQUIRE(hotArchive.getLevel(level).getNext().isMerging()); + has = HistoryArchiveState(ledger, bl, hotArchive, + app->getConfig().NETWORK_PASSPHRASE); + REQUIRE(has.hasHotArchiveBuckets()); + } + else + { + has = HistoryArchiveState(ledger, bl, + app->getConfig().NETWORK_PASSPHRASE); + REQUIRE(!has.hasHotArchiveBuckets()); + } + // Serialize HAS. - HistoryArchiveState has(ledger, bl, - app->getConfig().NETWORK_PASSPHRASE); std::string serialHas = has.toString(); // Simulate level committing (and the FutureBucket clearing), // followed by the typical ledger-close bucket GC event. bl.getLevel(level).commit(); REQUIRE(!bl.getLevel(level).getNext().isMerging()); - auto ra = bm.readMergeCounters().mFinishedMergeReattachments; - REQUIRE(ra == 0); + if (hasHotArchive) + { + hotArchive.getLevel(level).commit(); + REQUIRE(!hotArchive.getLevel(level).getNext().isMerging()); + } + + REQUIRE( + bm.readMergeCounters().mFinishedMergeReattachments == + 0); + if (hasHotArchive) + { + REQUIRE(bm.readMergeCounters() + .mFinishedMergeReattachments == 0); + } // Deserialize HAS. HistoryArchiveState has2; @@ -375,12 +434,29 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", *app, vers, LiveBucketList::keepTombstoneEntries(level)); REQUIRE(has2.currentBuckets[level].next.isMerging()); + if (hasHotArchive) + { + has2.hotArchiveBuckets[level].next.makeLive( + *app, vers, HotArchiveBucketList::keepTombstoneEntries(level)); + REQUIRE(has2.hotArchiveBuckets[level].next.isMerging()); + + // Resolve reattached future. + has2.hotArchiveBuckets[level].next.resolve(); + } + // Resolve reattached future. has2.currentBuckets[level].next.resolve(); - // Check that we reattached to a finished merge. - ra = bm.readMergeCounters().mFinishedMergeReattachments; - REQUIRE(ra != 0); + // Check that we reattached to one finished merge per bl. + if (hasHotArchive) + { + REQUIRE(bm.readMergeCounters() + .mFinishedMergeReattachments == 1); + } + + REQUIRE( + bm.readMergeCounters().mFinishedMergeReattachments == + 1); }); } @@ -397,7 +473,10 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge", BucketManager& bm = app->getBucketManager(); LiveBucketList& bl = bm.getLiveBucketList(); + HotArchiveBucketList& hotArchive = bm.getHotArchiveBucketList(); auto vers = getAppLedgerVersion(app); + bool hasHotArchive = protocolVersionStartsFrom( + vers, LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION); // This test is a race that will (if all goes well) eventually be won: // we keep trying to do an immediate-reattach to a running merge and @@ -420,8 +499,28 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge", // testsuite with no explanation. uint32_t ledger = 0; uint32_t limit = 10000; - while (ledger < limit && - bm.readMergeCounters().mRunningMergeReattachments == 0) + + // Iterate until we've reached the limit, or stop early if both the Hot + // Archive and live BucketList have seen a running merge reattachment. + auto cond = [&]() { + bool reattachmentsNotFinished; + if (hasHotArchive) + { + reattachmentsNotFinished = + bm.readMergeCounters() + .mRunningMergeReattachments < 1 || + bm.readMergeCounters() + .mRunningMergeReattachments < 1; + } + else + { + reattachmentsNotFinished = bm.readMergeCounters() + .mRunningMergeReattachments < 1; + } + return ledger < limit && reattachmentsNotFinished; + }; + + while (cond()) { ++ledger; // Merges will start on one or more levels here, starting a race @@ -435,12 +534,30 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge", LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( {CONFIG_SETTING}, 100), {}); + if (hasHotArchive) + { + addHotArchiveBatchAndUpdateSnapshot( + *app, lh, + LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( + {CONTRACT_CODE}, 100), + {}, {}); + } bm.forgetUnreferencedBuckets( app->getLedgerManager().getLastClosedLedgerHAS()); - HistoryArchiveState has(ledger, bl, - app->getConfig().NETWORK_PASSPHRASE); + HistoryArchiveState has; + if (hasHotArchive) + { + has = HistoryArchiveState(ledger, bl, hotArchive, + app->getConfig().NETWORK_PASSPHRASE); + } + else + { + has = HistoryArchiveState(ledger, bl, + app->getConfig().NETWORK_PASSPHRASE); + } + std::string serialHas = has.toString(); // Deserialize and reactivate levels of HAS. Races with the merge @@ -460,12 +577,32 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge", LiveBucketList::keepTombstoneEntries(level)); } } + + for (uint32_t level = 0; level < has2.hotArchiveBuckets.size(); + ++level) + { + if (has2.hotArchiveBuckets[level].next.hasHashes()) + { + has2.hotArchiveBuckets[level].next.makeLive( + *app, vers, + HotArchiveBucketList::keepTombstoneEntries(level)); + } + } } CLOG_INFO(Bucket, "reattached to running merge at or around ledger {}", ledger); REQUIRE(ledger < limit); - auto ra = bm.readMergeCounters().mRunningMergeReattachments; - REQUIRE(ra != 0); + + // Because there is a race, we can't guarantee that we'll see exactly 1 + // reattachment, but we should see at least 1. + if (hasHotArchive) + { + REQUIRE(bm.readMergeCounters() + .mRunningMergeReattachments >= 1); + } + + REQUIRE(bm.readMergeCounters().mRunningMergeReattachments >= + 1); }); } @@ -555,64 +692,101 @@ TEST_CASE_VERSIONS( for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { VirtualClock clock; - Application::pointer app = createTestApplication(clock, cfg); + auto app = createTestApplication(clock, cfg); auto vers = getAppLedgerVersion(app); auto& hm = app->getHistoryManager(); auto& bm = app->getBucketManager(); + auto& lm = app->getLedgerManager(); + bool hasHotArchive = protocolVersionStartsFrom( + vers, LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION); hm.setPublicationEnabled(false); app->getHistoryArchiveManager().initializeHistoryArchive( tcfg.getArchiveDirName()); + UnorderedSet hotArchiveKeys{}; + auto lastLcl = lm.getLastClosedLedgerNum(); while (hm.getPublishQueueCount() < 5) { // Do not merge this line with the next line: CLOG and // readMergeCounters each acquire a mutex, and it's possible to // deadlock with one of the worker threads if you try to hold them // both at the same time. - auto ra = bm.readMergeCounters().mFinishedMergeReattachments; - CLOG_INFO(Bucket, "finished-merge reattachments while queueing: {}", - ra); - auto lh = - app->getLedgerManager().getLastClosedLedgerHeader().header; - lh.ledgerSeq++; - addLiveBatchAndUpdateSnapshot( - *app, lh, {}, - LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 100), - {}); + auto ra = + bm.readMergeCounters().mFinishedMergeReattachments; + auto raHotArchive = bm.readMergeCounters() + .mFinishedMergeReattachments; + CLOG_INFO(Bucket, + "finished-merge reattachments while queueing: live " + "BucketList {}, Hot Archive BucketList {}", + ra, raHotArchive); + if (lm.getLastClosedLedgerNum() != lastLcl) + { + lastLcl = lm.getLastClosedLedgerNum(); + lm.setNextLedgerEntryBatchForBucketTesting( + {}, + LedgerTestUtils:: + generateValidUniqueLedgerEntriesWithExclusions( + {CONFIG_SETTING}, 100), + {}); + if (hasHotArchive) + { + lm.setNextArchiveBatchForBucketTesting( + {}, {}, + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE}, 10, hotArchiveKeys)); + } + } + clock.crank(false); bm.forgetUnreferencedBuckets( app->getLedgerManager().getLastClosedLedgerHAS()); } + // We should have published nothing and have the first // checkpoint still queued. REQUIRE(hm.getPublishSuccessCount() == 0); REQUIRE(HistoryManager::getMinLedgerQueuedToPublish(app->getConfig()) == 7); - auto oldReattachments = - bm.readMergeCounters().mFinishedMergeReattachments; + auto oldLiveReattachments = + bm.readMergeCounters().mFinishedMergeReattachments; + auto oldHotArchiveReattachments = + bm.readMergeCounters() + .mFinishedMergeReattachments; auto HASs = HistoryManager::getPublishQueueStates(app->getConfig()); REQUIRE(HASs.size() == 5); for (auto& has : HASs) { has.prepareForPublish(*app); + REQUIRE(has.hasHotArchiveBuckets() == hasHotArchive); } - auto ra = bm.readMergeCounters().mFinishedMergeReattachments; + auto liveRa = + bm.readMergeCounters().mFinishedMergeReattachments; + auto hotArchiveRa = bm.readMergeCounters() + .mFinishedMergeReattachments; if (protocolVersionIsBefore(vers, LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { // Versions prior to FIRST_PROTOCOL_SHADOWS_REMOVED re-attach to // finished merges - REQUIRE(ra > oldReattachments); + REQUIRE(liveRa > oldLiveReattachments); CLOG_INFO(Bucket, - "finished-merge reattachments after making-live: {}", ra); + "finished-merge reattachments after making-live: {}", + liveRa); + + // Sanity check: Hot archive disabled in older protocols + releaseAssert(!hasHotArchive); } else { // Versions after FIRST_PROTOCOL_SHADOWS_REMOVED do not re-attach, // because merges are cleared - REQUIRE(ra == oldReattachments); + REQUIRE(liveRa == oldLiveReattachments); + + if (hasHotArchive) + { + REQUIRE(hotArchiveRa == oldHotArchiveReattachments); + } } // Un-cork the publication process, nothing should be broken. @@ -661,10 +835,11 @@ TEST_CASE_VERSIONS( // 2048). class StopAndRestartBucketMergesTest { + template static void - resolveAllMerges(LiveBucketList& bl) + resolveAllMerges(BucketListT& bl) { - for (uint32 i = 0; i < LiveBucketList::kNumLevels; ++i) + for (uint32 i = 0; i < BucketListT::kNumLevels; ++i) { auto& level = bl.getLevel(i); auto& next = level.getNext(); @@ -680,226 +855,332 @@ class StopAndRestartBucketMergesTest Hash mCurrBucketHash; Hash mSnapBucketHash; Hash mBucketListHash; + Hash mHotArchiveBucketListHash; Hash mLedgerHeaderHash; - MergeCounters mMergeCounters; + MergeCounters mLiveMergeCounters; + MergeCounters mHotArchiveMergeCounters; void - dumpMergeCounters(std::string const& label, uint32_t level) const + checkEmptyHotArchiveMetrics() const { - CLOG_INFO(Bucket, "MergeCounters: {} (designated level: {})", label, - level); - CLOG_INFO(Bucket, "PreInitEntryProtocolMerges: {}", - mMergeCounters.mPreInitEntryProtocolMerges); - CLOG_INFO(Bucket, "PostInitEntryProtocolMerges: {}", - mMergeCounters.mPostInitEntryProtocolMerges); - CLOG_INFO(Bucket, "mPreShadowRemovalProtocolMerges: {}", - mMergeCounters.mPreShadowRemovalProtocolMerges); - CLOG_INFO(Bucket, "mPostShadowRemovalProtocolMerges: {}", - mMergeCounters.mPostShadowRemovalProtocolMerges); - CLOG_INFO(Bucket, "RunningMergeReattachments: {}", - mMergeCounters.mRunningMergeReattachments); - CLOG_INFO(Bucket, "FinishedMergeReattachments: {}", - mMergeCounters.mFinishedMergeReattachments); - CLOG_INFO(Bucket, "NewMetaEntries: {}", - mMergeCounters.mNewMetaEntries); - CLOG_INFO(Bucket, "NewInitEntries: {}", - mMergeCounters.mNewInitEntries); - CLOG_INFO(Bucket, "NewLiveEntries: {}", - mMergeCounters.mNewLiveEntries); - CLOG_INFO(Bucket, "NewDeadEntries: {}", - mMergeCounters.mNewDeadEntries); - CLOG_INFO(Bucket, "OldMetaEntries: {}", - mMergeCounters.mOldMetaEntries); - CLOG_INFO(Bucket, "OldInitEntries: {}", - mMergeCounters.mOldInitEntries); - CLOG_INFO(Bucket, "OldLiveEntries: {}", - mMergeCounters.mOldLiveEntries); - CLOG_INFO(Bucket, "OldDeadEntries: {}", - mMergeCounters.mOldDeadEntries); - CLOG_INFO(Bucket, "OldEntriesDefaultAccepted: {}", - mMergeCounters.mOldEntriesDefaultAccepted); - CLOG_INFO(Bucket, "NewEntriesDefaultAccepted: {}", - mMergeCounters.mNewEntriesDefaultAccepted); - CLOG_INFO(Bucket, "NewInitEntriesMergedWithOldDead: {}", - mMergeCounters.mNewInitEntriesMergedWithOldDead); - CLOG_INFO(Bucket, "OldInitEntriesMergedWithNewLive: {}", - mMergeCounters.mOldInitEntriesMergedWithNewLive); - CLOG_INFO(Bucket, "OldInitEntriesMergedWithNewDead: {}", - mMergeCounters.mOldInitEntriesMergedWithNewDead); - CLOG_INFO(Bucket, "NewEntriesMergedWithOldNeitherInit: {}", - mMergeCounters.mNewEntriesMergedWithOldNeitherInit); - CLOG_INFO(Bucket, "ShadowScanSteps: {}", - mMergeCounters.mShadowScanSteps); - CLOG_INFO(Bucket, "MetaEntryShadowElisions: {}", - mMergeCounters.mMetaEntryShadowElisions); - CLOG_INFO(Bucket, "LiveEntryShadowElisions: {}", - mMergeCounters.mLiveEntryShadowElisions); - CLOG_INFO(Bucket, "InitEntryShadowElisions: {}", - mMergeCounters.mInitEntryShadowElisions); - CLOG_INFO(Bucket, "DeadEntryShadowElisions: {}", - mMergeCounters.mDeadEntryShadowElisions); - CLOG_INFO(Bucket, "OutputIteratorTombstoneElisions: {}", - mMergeCounters.mOutputIteratorTombstoneElisions); - CLOG_INFO(Bucket, "OutputIteratorBufferUpdates: {}", - mMergeCounters.mOutputIteratorBufferUpdates); - CLOG_INFO(Bucket, "OutputIteratorActualWrites: {}", - mMergeCounters.mOutputIteratorActualWrites); + // If before p23, check that all hot archive metrics are zero + CHECK(mHotArchiveMergeCounters.mPreInitEntryProtocolMerges == 0); + CHECK(mHotArchiveMergeCounters.mPostInitEntryProtocolMerges == 0); + CHECK(mHotArchiveMergeCounters.mPreShadowRemovalProtocolMerges == + 0); + CHECK(mHotArchiveMergeCounters.mPostShadowRemovalProtocolMerges == + 0); + CHECK(mHotArchiveMergeCounters.mNewMetaEntries == 0); + CHECK(mHotArchiveMergeCounters.mNewInitEntries == 0); + CHECK(mHotArchiveMergeCounters.mNewLiveEntries == 0); + CHECK(mHotArchiveMergeCounters.mNewDeadEntries == 0); + CHECK(mHotArchiveMergeCounters.mOldMetaEntries == 0); + CHECK(mHotArchiveMergeCounters.mOldInitEntries == 0); + CHECK(mHotArchiveMergeCounters.mOldLiveEntries == 0); + CHECK(mHotArchiveMergeCounters.mOldDeadEntries == 0); + CHECK(mHotArchiveMergeCounters.mOldEntriesDefaultAccepted == 0); + CHECK(mHotArchiveMergeCounters.mNewEntriesDefaultAccepted == 0); + CHECK(mHotArchiveMergeCounters.mNewInitEntriesMergedWithOldDead == + 0); + CHECK(mHotArchiveMergeCounters.mOldInitEntriesMergedWithNewLive == + 0); + CHECK(mHotArchiveMergeCounters.mOldInitEntriesMergedWithNewDead == + 0); + CHECK( + mHotArchiveMergeCounters.mNewEntriesMergedWithOldNeitherInit == + 0); + CHECK(mHotArchiveMergeCounters.mShadowScanSteps == 0); + CHECK(mHotArchiveMergeCounters.mMetaEntryShadowElisions == 0); + CHECK(mHotArchiveMergeCounters.mLiveEntryShadowElisions == 0); + CHECK(mHotArchiveMergeCounters.mInitEntryShadowElisions == 0); + CHECK(mHotArchiveMergeCounters.mDeadEntryShadowElisions == 0); + CHECK(mHotArchiveMergeCounters.mOutputIteratorBufferUpdates == 0); + CHECK(mHotArchiveMergeCounters.mOutputIteratorActualWrites == 0); + } + + void + dumpMergeCounters(std::string const& label, uint32_t level, + uint32_t protocol) const + { + auto dumpCounters = [&](std::string const& label, uint32_t level, + MergeCounters const& counters) { + CLOG_INFO(Bucket, "MergeCounters: {} (designated level: {})", + label, level); + CLOG_INFO(Bucket, "PreInitEntryProtocolMerges: {}", + counters.mPreInitEntryProtocolMerges); + CLOG_INFO(Bucket, "PostInitEntryProtocolMerges: {}", + counters.mPostInitEntryProtocolMerges); + CLOG_INFO(Bucket, "mPreShadowRemovalProtocolMerges: {}", + counters.mPreShadowRemovalProtocolMerges); + CLOG_INFO(Bucket, "mPostShadowRemovalProtocolMerges: {}", + counters.mPostShadowRemovalProtocolMerges); + CLOG_INFO(Bucket, "RunningMergeReattachments: {}", + counters.mRunningMergeReattachments); + CLOG_INFO(Bucket, "FinishedMergeReattachments: {}", + counters.mFinishedMergeReattachments); + CLOG_INFO(Bucket, "NewMetaEntries: {}", + counters.mNewMetaEntries); + CLOG_INFO(Bucket, "NewInitEntries: {}", + counters.mNewInitEntries); + CLOG_INFO(Bucket, "NewLiveEntries: {}", + counters.mNewLiveEntries); + CLOG_INFO(Bucket, "NewDeadEntries: {}", + counters.mNewDeadEntries); + CLOG_INFO(Bucket, "OldMetaEntries: {}", + counters.mOldMetaEntries); + CLOG_INFO(Bucket, "OldInitEntries: {}", + counters.mOldInitEntries); + CLOG_INFO(Bucket, "OldLiveEntries: {}", + counters.mOldLiveEntries); + CLOG_INFO(Bucket, "OldDeadEntries: {}", + counters.mOldDeadEntries); + CLOG_INFO(Bucket, "OldEntriesDefaultAccepted: {}", + counters.mOldEntriesDefaultAccepted); + CLOG_INFO(Bucket, "NewEntriesDefaultAccepted: {}", + counters.mNewEntriesDefaultAccepted); + CLOG_INFO(Bucket, "NewInitEntriesMergedWithOldDead: {}", + counters.mNewInitEntriesMergedWithOldDead); + CLOG_INFO(Bucket, "OldInitEntriesMergedWithNewLive: {}", + counters.mOldInitEntriesMergedWithNewLive); + CLOG_INFO(Bucket, "OldInitEntriesMergedWithNewDead: {}", + counters.mOldInitEntriesMergedWithNewDead); + CLOG_INFO(Bucket, "NewEntriesMergedWithOldNeitherInit: {}", + counters.mNewEntriesMergedWithOldNeitherInit); + CLOG_INFO(Bucket, "ShadowScanSteps: {}", + counters.mShadowScanSteps); + CLOG_INFO(Bucket, "MetaEntryShadowElisions: {}", + counters.mMetaEntryShadowElisions); + CLOG_INFO(Bucket, "LiveEntryShadowElisions: {}", + counters.mLiveEntryShadowElisions); + CLOG_INFO(Bucket, "InitEntryShadowElisions: {}", + counters.mInitEntryShadowElisions); + CLOG_INFO(Bucket, "DeadEntryShadowElisions: {}", + counters.mDeadEntryShadowElisions); + CLOG_INFO(Bucket, "OutputIteratorTombstoneElisions: {}", + counters.mOutputIteratorTombstoneElisions); + CLOG_INFO(Bucket, "OutputIteratorBufferUpdates: {}", + counters.mOutputIteratorBufferUpdates); + CLOG_INFO(Bucket, "OutputIteratorActualWrites: {}", + counters.mOutputIteratorActualWrites); + }; + + dumpCounters(label + " (live)", level, mLiveMergeCounters); + if (protocolVersionStartsFrom( + protocol, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + dumpCounters(label + " (hot)", level, mHotArchiveMergeCounters); + } } void checkSensiblePostInitEntryMergeCounters(uint32_t protocol) const { - CHECK(mMergeCounters.mPostInitEntryProtocolMerges != 0); + // Check live merge counters + CHECK(mLiveMergeCounters.mPostInitEntryProtocolMerges != 0); if (protocolVersionIsBefore( protocol, LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { - CHECK(mMergeCounters.mPostShadowRemovalProtocolMerges == 0); + CHECK(mLiveMergeCounters.mPostShadowRemovalProtocolMerges == 0); } else { - CHECK(mMergeCounters.mPostShadowRemovalProtocolMerges != 0); + CHECK(mLiveMergeCounters.mPostShadowRemovalProtocolMerges != 0); } - CHECK(mMergeCounters.mNewMetaEntries == 0); - CHECK(mMergeCounters.mNewInitEntries != 0); - CHECK(mMergeCounters.mNewLiveEntries != 0); - CHECK(mMergeCounters.mNewDeadEntries != 0); + CHECK(mLiveMergeCounters.mNewMetaEntries == 0); + CHECK(mLiveMergeCounters.mNewInitEntries != 0); + CHECK(mLiveMergeCounters.mNewLiveEntries != 0); + CHECK(mLiveMergeCounters.mNewDeadEntries != 0); - CHECK(mMergeCounters.mOldMetaEntries == 0); - CHECK(mMergeCounters.mOldInitEntries != 0); - CHECK(mMergeCounters.mOldLiveEntries != 0); - CHECK(mMergeCounters.mOldDeadEntries != 0); + CHECK(mLiveMergeCounters.mOldMetaEntries == 0); + CHECK(mLiveMergeCounters.mOldInitEntries != 0); + CHECK(mLiveMergeCounters.mOldLiveEntries != 0); + CHECK(mLiveMergeCounters.mOldDeadEntries != 0); - CHECK(mMergeCounters.mOldEntriesDefaultAccepted != 0); - CHECK(mMergeCounters.mNewEntriesDefaultAccepted != 0); - CHECK(mMergeCounters.mNewInitEntriesMergedWithOldDead != 0); - CHECK(mMergeCounters.mOldInitEntriesMergedWithNewLive != 0); - CHECK(mMergeCounters.mOldInitEntriesMergedWithNewDead != 0); - CHECK(mMergeCounters.mNewEntriesMergedWithOldNeitherInit != 0); + CHECK(mLiveMergeCounters.mOldEntriesDefaultAccepted != 0); + CHECK(mLiveMergeCounters.mNewEntriesDefaultAccepted != 0); + CHECK(mLiveMergeCounters.mNewInitEntriesMergedWithOldDead != 0); + CHECK(mLiveMergeCounters.mOldInitEntriesMergedWithNewLive != 0); + CHECK(mLiveMergeCounters.mOldInitEntriesMergedWithNewDead != 0); + CHECK(mLiveMergeCounters.mNewEntriesMergedWithOldNeitherInit != 0); if (protocolVersionIsBefore( protocol, LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { - CHECK(mMergeCounters.mShadowScanSteps != 0); - CHECK(mMergeCounters.mLiveEntryShadowElisions != 0); + CHECK(mLiveMergeCounters.mShadowScanSteps != 0); + CHECK(mLiveMergeCounters.mLiveEntryShadowElisions != 0); } else { - CHECK(mMergeCounters.mShadowScanSteps == 0); - CHECK(mMergeCounters.mLiveEntryShadowElisions == 0); + CHECK(mLiveMergeCounters.mShadowScanSteps == 0); + CHECK(mLiveMergeCounters.mLiveEntryShadowElisions == 0); } - CHECK(mMergeCounters.mMetaEntryShadowElisions == 0); - CHECK(mMergeCounters.mInitEntryShadowElisions == 0); - CHECK(mMergeCounters.mDeadEntryShadowElisions == 0); + CHECK(mLiveMergeCounters.mMetaEntryShadowElisions == 0); + CHECK(mLiveMergeCounters.mInitEntryShadowElisions == 0); + CHECK(mLiveMergeCounters.mDeadEntryShadowElisions == 0); - CHECK(mMergeCounters.mOutputIteratorBufferUpdates != 0); - CHECK(mMergeCounters.mOutputIteratorActualWrites != 0); - CHECK(mMergeCounters.mOutputIteratorBufferUpdates >= - mMergeCounters.mOutputIteratorActualWrites); + CHECK(mLiveMergeCounters.mOutputIteratorBufferUpdates != 0); + CHECK(mLiveMergeCounters.mOutputIteratorActualWrites != 0); + CHECK(mLiveMergeCounters.mOutputIteratorBufferUpdates >= + mLiveMergeCounters.mOutputIteratorActualWrites); + + // Check hot archive merge counters + if (protocolVersionStartsFrom( + protocol, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + CHECK(mHotArchiveMergeCounters.mPostInitEntryProtocolMerges == + 0); + CHECK( + mHotArchiveMergeCounters.mPostShadowRemovalProtocolMerges == + 0); + + CHECK(mHotArchiveMergeCounters.mNewMetaEntries == 0); + CHECK(mHotArchiveMergeCounters.mNewInitEntries == 0); + CHECK(mHotArchiveMergeCounters.mNewLiveEntries == 0); + CHECK(mHotArchiveMergeCounters.mNewDeadEntries == 0); + + CHECK(mHotArchiveMergeCounters.mOldMetaEntries == 0); + CHECK(mHotArchiveMergeCounters.mOldInitEntries == 0); + CHECK(mHotArchiveMergeCounters.mOldLiveEntries == 0); + CHECK(mHotArchiveMergeCounters.mOldDeadEntries == 0); + + CHECK(mHotArchiveMergeCounters.mOldEntriesDefaultAccepted != 0); + CHECK(mHotArchiveMergeCounters.mNewEntriesDefaultAccepted != 0); + CHECK( + mHotArchiveMergeCounters.mNewInitEntriesMergedWithOldDead == + 0); + CHECK( + mHotArchiveMergeCounters.mOldInitEntriesMergedWithNewLive == + 0); + CHECK( + mHotArchiveMergeCounters.mOldInitEntriesMergedWithNewDead == + 0); + CHECK(mHotArchiveMergeCounters + .mNewEntriesMergedWithOldNeitherInit == 0); + + CHECK(mHotArchiveMergeCounters.mShadowScanSteps == 0); + CHECK(mHotArchiveMergeCounters.mLiveEntryShadowElisions == 0); + + CHECK(mHotArchiveMergeCounters.mMetaEntryShadowElisions == 0); + CHECK(mHotArchiveMergeCounters.mInitEntryShadowElisions == 0); + CHECK(mHotArchiveMergeCounters.mDeadEntryShadowElisions == 0); + + CHECK(mHotArchiveMergeCounters.mOutputIteratorBufferUpdates != + 0); + CHECK(mHotArchiveMergeCounters.mOutputIteratorActualWrites != + 0); + CHECK(mHotArchiveMergeCounters.mOutputIteratorBufferUpdates >= + mHotArchiveMergeCounters.mOutputIteratorActualWrites); + } + else + { + checkEmptyHotArchiveMetrics(); + } } void - checkSensiblePreInitEntryMergeCounters() const + checkSensiblePreInitEntryMergeCounters(uint32_t protocol) const { - CHECK(mMergeCounters.mPreInitEntryProtocolMerges != 0); - CHECK(mMergeCounters.mPreShadowRemovalProtocolMerges != 0); - - CHECK(mMergeCounters.mNewMetaEntries == 0); - CHECK(mMergeCounters.mNewInitEntries == 0); - CHECK(mMergeCounters.mNewLiveEntries != 0); - CHECK(mMergeCounters.mNewDeadEntries != 0); - - CHECK(mMergeCounters.mOldMetaEntries == 0); - CHECK(mMergeCounters.mOldInitEntries == 0); - CHECK(mMergeCounters.mOldLiveEntries != 0); - CHECK(mMergeCounters.mOldDeadEntries != 0); - - CHECK(mMergeCounters.mOldEntriesDefaultAccepted != 0); - CHECK(mMergeCounters.mNewEntriesDefaultAccepted != 0); - CHECK(mMergeCounters.mNewInitEntriesMergedWithOldDead == 0); - CHECK(mMergeCounters.mOldInitEntriesMergedWithNewLive == 0); - CHECK(mMergeCounters.mOldInitEntriesMergedWithNewDead == 0); - CHECK(mMergeCounters.mNewEntriesMergedWithOldNeitherInit != 0); - - CHECK(mMergeCounters.mShadowScanSteps != 0); - CHECK(mMergeCounters.mMetaEntryShadowElisions == 0); - CHECK(mMergeCounters.mLiveEntryShadowElisions != 0); - CHECK(mMergeCounters.mInitEntryShadowElisions == 0); - CHECK(mMergeCounters.mDeadEntryShadowElisions != 0); - - CHECK(mMergeCounters.mOutputIteratorBufferUpdates != 0); - CHECK(mMergeCounters.mOutputIteratorActualWrites != 0); - CHECK(mMergeCounters.mOutputIteratorBufferUpdates >= - mMergeCounters.mOutputIteratorActualWrites); + CHECK(mLiveMergeCounters.mPreInitEntryProtocolMerges != 0); + CHECK(mLiveMergeCounters.mPreShadowRemovalProtocolMerges != 0); + + CHECK(mLiveMergeCounters.mNewMetaEntries == 0); + CHECK(mLiveMergeCounters.mNewInitEntries == 0); + CHECK(mLiveMergeCounters.mNewLiveEntries != 0); + CHECK(mLiveMergeCounters.mNewDeadEntries != 0); + + CHECK(mLiveMergeCounters.mOldMetaEntries == 0); + CHECK(mLiveMergeCounters.mOldInitEntries == 0); + CHECK(mLiveMergeCounters.mOldLiveEntries != 0); + CHECK(mLiveMergeCounters.mOldDeadEntries != 0); + + CHECK(mLiveMergeCounters.mOldEntriesDefaultAccepted != 0); + CHECK(mLiveMergeCounters.mNewEntriesDefaultAccepted != 0); + CHECK(mLiveMergeCounters.mNewInitEntriesMergedWithOldDead == 0); + CHECK(mLiveMergeCounters.mOldInitEntriesMergedWithNewLive == 0); + CHECK(mLiveMergeCounters.mOldInitEntriesMergedWithNewDead == 0); + CHECK(mLiveMergeCounters.mNewEntriesMergedWithOldNeitherInit != 0); + + CHECK(mLiveMergeCounters.mShadowScanSteps != 0); + CHECK(mLiveMergeCounters.mMetaEntryShadowElisions == 0); + CHECK(mLiveMergeCounters.mLiveEntryShadowElisions != 0); + CHECK(mLiveMergeCounters.mInitEntryShadowElisions == 0); + CHECK(mLiveMergeCounters.mDeadEntryShadowElisions != 0); + + CHECK(mLiveMergeCounters.mOutputIteratorBufferUpdates != 0); + CHECK(mLiveMergeCounters.mOutputIteratorActualWrites != 0); + CHECK(mLiveMergeCounters.mOutputIteratorBufferUpdates >= + mLiveMergeCounters.mOutputIteratorActualWrites); } void checkEqualMergeCounters(Survey const& other) const { - CHECK(mMergeCounters.mPreInitEntryProtocolMerges == - other.mMergeCounters.mPreInitEntryProtocolMerges); - CHECK(mMergeCounters.mPostInitEntryProtocolMerges == - other.mMergeCounters.mPostInitEntryProtocolMerges); - - CHECK(mMergeCounters.mPreShadowRemovalProtocolMerges == - other.mMergeCounters.mPreShadowRemovalProtocolMerges); - CHECK(mMergeCounters.mPostShadowRemovalProtocolMerges == - other.mMergeCounters.mPostShadowRemovalProtocolMerges); - - CHECK(mMergeCounters.mRunningMergeReattachments == - other.mMergeCounters.mRunningMergeReattachments); - CHECK(mMergeCounters.mFinishedMergeReattachments == - other.mMergeCounters.mFinishedMergeReattachments); - - CHECK(mMergeCounters.mNewMetaEntries == - other.mMergeCounters.mNewMetaEntries); - CHECK(mMergeCounters.mNewInitEntries == - other.mMergeCounters.mNewInitEntries); - CHECK(mMergeCounters.mNewLiveEntries == - other.mMergeCounters.mNewLiveEntries); - CHECK(mMergeCounters.mNewDeadEntries == - other.mMergeCounters.mNewDeadEntries); - CHECK(mMergeCounters.mOldMetaEntries == - other.mMergeCounters.mOldMetaEntries); - CHECK(mMergeCounters.mOldInitEntries == - other.mMergeCounters.mOldInitEntries); - CHECK(mMergeCounters.mOldLiveEntries == - other.mMergeCounters.mOldLiveEntries); - CHECK(mMergeCounters.mOldDeadEntries == - other.mMergeCounters.mOldDeadEntries); - - CHECK(mMergeCounters.mOldEntriesDefaultAccepted == - other.mMergeCounters.mOldEntriesDefaultAccepted); - CHECK(mMergeCounters.mNewEntriesDefaultAccepted == - other.mMergeCounters.mNewEntriesDefaultAccepted); - CHECK(mMergeCounters.mNewInitEntriesMergedWithOldDead == - other.mMergeCounters.mNewInitEntriesMergedWithOldDead); - CHECK(mMergeCounters.mOldInitEntriesMergedWithNewLive == - other.mMergeCounters.mOldInitEntriesMergedWithNewLive); - CHECK(mMergeCounters.mOldInitEntriesMergedWithNewDead == - other.mMergeCounters.mOldInitEntriesMergedWithNewDead); - CHECK(mMergeCounters.mNewEntriesMergedWithOldNeitherInit == - other.mMergeCounters.mNewEntriesMergedWithOldNeitherInit); - - CHECK(mMergeCounters.mShadowScanSteps == - other.mMergeCounters.mShadowScanSteps); - CHECK(mMergeCounters.mMetaEntryShadowElisions == - other.mMergeCounters.mMetaEntryShadowElisions); - CHECK(mMergeCounters.mLiveEntryShadowElisions == - other.mMergeCounters.mLiveEntryShadowElisions); - CHECK(mMergeCounters.mInitEntryShadowElisions == - other.mMergeCounters.mInitEntryShadowElisions); - CHECK(mMergeCounters.mDeadEntryShadowElisions == - other.mMergeCounters.mDeadEntryShadowElisions); - - CHECK(mMergeCounters.mOutputIteratorTombstoneElisions == - other.mMergeCounters.mOutputIteratorTombstoneElisions); - CHECK(mMergeCounters.mOutputIteratorBufferUpdates == - other.mMergeCounters.mOutputIteratorBufferUpdates); - CHECK(mMergeCounters.mOutputIteratorActualWrites == - other.mMergeCounters.mOutputIteratorActualWrites); + auto checkCountersEqual = [](auto const& counters, + auto const& other) { + CHECK(counters.mPreInitEntryProtocolMerges == + other.mPreInitEntryProtocolMerges); + CHECK(counters.mPostInitEntryProtocolMerges == + other.mPostInitEntryProtocolMerges); + + CHECK(counters.mPreShadowRemovalProtocolMerges == + other.mPreShadowRemovalProtocolMerges); + CHECK(counters.mPostShadowRemovalProtocolMerges == + other.mPostShadowRemovalProtocolMerges); + + CHECK(counters.mRunningMergeReattachments == + other.mRunningMergeReattachments); + CHECK(counters.mFinishedMergeReattachments == + other.mFinishedMergeReattachments); + + CHECK(counters.mNewMetaEntries == other.mNewMetaEntries); + CHECK(counters.mNewInitEntries == other.mNewInitEntries); + CHECK(counters.mNewLiveEntries == other.mNewLiveEntries); + CHECK(counters.mNewDeadEntries == other.mNewDeadEntries); + CHECK(counters.mOldMetaEntries == other.mOldMetaEntries); + CHECK(counters.mOldInitEntries == other.mOldInitEntries); + CHECK(counters.mOldLiveEntries == other.mOldLiveEntries); + CHECK(counters.mOldDeadEntries == other.mOldDeadEntries); + + CHECK(counters.mOldEntriesDefaultAccepted == + other.mOldEntriesDefaultAccepted); + CHECK(counters.mNewEntriesDefaultAccepted == + other.mNewEntriesDefaultAccepted); + CHECK(counters.mNewInitEntriesMergedWithOldDead == + other.mNewInitEntriesMergedWithOldDead); + CHECK(counters.mOldInitEntriesMergedWithNewLive == + other.mOldInitEntriesMergedWithNewLive); + CHECK(counters.mOldInitEntriesMergedWithNewDead == + other.mOldInitEntriesMergedWithNewDead); + CHECK(counters.mNewEntriesMergedWithOldNeitherInit == + other.mNewEntriesMergedWithOldNeitherInit); + + CHECK(counters.mShadowScanSteps == other.mShadowScanSteps); + CHECK(counters.mMetaEntryShadowElisions == + other.mMetaEntryShadowElisions); + CHECK(counters.mLiveEntryShadowElisions == + other.mLiveEntryShadowElisions); + CHECK(counters.mInitEntryShadowElisions == + other.mInitEntryShadowElisions); + CHECK(counters.mDeadEntryShadowElisions == + other.mDeadEntryShadowElisions); + + CHECK(counters.mOutputIteratorTombstoneElisions == + other.mOutputIteratorTombstoneElisions); + CHECK(counters.mOutputIteratorBufferUpdates == + other.mOutputIteratorBufferUpdates); + CHECK(counters.mOutputIteratorActualWrites == + other.mOutputIteratorActualWrites); + }; + + checkCountersEqual(mLiveMergeCounters, other.mLiveMergeCounters); + checkCountersEqual(mHotArchiveMergeCounters, + other.mHotArchiveMergeCounters); } + void checkEqual(Survey const& other) const { @@ -907,17 +1188,28 @@ class StopAndRestartBucketMergesTest CHECK(mSnapBucketHash == other.mSnapBucketHash); CHECK(mBucketListHash == other.mBucketListHash); CHECK(mLedgerHeaderHash == other.mLedgerHeaderHash); + CHECK(mHotArchiveBucketListHash == other.mHotArchiveBucketListHash); checkEqualMergeCounters(other); } - Survey(Application& app, uint32_t level) + Survey(Application& app, uint32_t level, uint32_t protocol) { LedgerManager& lm = app.getLedgerManager(); BucketManager& bm = app.getBucketManager(); LiveBucketList& bl = bm.getLiveBucketList(); + HotArchiveBucketList& hotBl = bm.getHotArchiveBucketList(); // Complete those merges we're about to inspect. resolveAllMerges(bl); + if (protocolVersionStartsFrom( + protocol, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + resolveAllMerges(hotBl); + mHotArchiveBucketListHash = hotBl.getHash(); + mHotArchiveMergeCounters = + bm.readMergeCounters(); + } - mMergeCounters = bm.readMergeCounters(); + mLiveMergeCounters = bm.readMergeCounters(); mLedgerHeaderHash = lm.getLastClosedLedgerHeader().hash; mBucketListHash = bl.getHash(); BucketLevel& blv = bl.getLevel(level); @@ -931,13 +1223,20 @@ class StopAndRestartBucketMergesTest std::set mDesignatedLedgers; std::map mControlSurveys; std::map mFinalEntries; + std::map mFinalArchiveEntries; std::vector> mInitEntryBatches; std::vector> mLiveEntryBatches; std::vector> mDeadEntryBatches; + std::vector> mArchiveEntryBatches; + + // Initial entries in Hot Archive BucketList, a "genesis leger" equivalent + // for Hot Archive + std::vector mHotArchiveInitialBatch; void collectLedgerEntries(Application& app, - std::map& entries) + std::map& liveEntries, + std::map& archiveEntries) { auto bl = app.getBucketManager().getLiveBucketList(); for (uint32_t i = LiveBucketList::kNumLevels; i > 0; --i) @@ -951,12 +1250,41 @@ class StopAndRestartBucketMergesTest if (e.type() == LIVEENTRY || e.type() == INITENTRY) { auto le = e.liveEntry(); - entries[LedgerEntryKey(le)] = le; + liveEntries[LedgerEntryKey(le)] = le; } else { assert(e.type() == DEADENTRY); - entries.erase(e.deadEntry()); + liveEntries.erase(e.deadEntry()); + } + } + } + } + + if (protocolVersionStartsFrom( + getAppLedgerVersion(app), + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + HotArchiveBucketList& hotBl = + app.getBucketManager().getHotArchiveBucketList(); + for (uint32_t i = HotArchiveBucketList::kNumLevels; i > 0; --i) + { + BucketLevel const& level = + hotBl.getLevel(i - 1); + for (auto bucket : {level.getSnap(), level.getCurr()}) + { + for (HotArchiveBucketInputIterator bi(bucket); bi; ++bi) + { + auto const& e = *bi; + if (e.type() == HOT_ARCHIVE_LIVE) + { + archiveEntries.erase(e.key()); + } + else + { + archiveEntries[LedgerEntryKey(e.archivedEntry())] = + e.archivedEntry(); + } } } } @@ -966,23 +1294,33 @@ class StopAndRestartBucketMergesTest void collectFinalLedgerEntries(Application& app) { - collectLedgerEntries(app, mFinalEntries); - CLOG_INFO(Bucket, "Collected final ledger state with {} entries.", - mFinalEntries.size()); + collectLedgerEntries(app, mFinalEntries, mFinalArchiveEntries); + CLOG_INFO(Bucket, + "Collected final ledger live state with {} entries, archived " + "state with {} entries", + mFinalEntries.size(), mFinalArchiveEntries.size()); } void checkAgainstFinalLedgerEntries(Application& app) { std::map testEntries; - collectLedgerEntries(app, testEntries); - CLOG_INFO(Bucket, "Collected test ledger state with {} entries.", - testEntries.size()); + std::map testArchiveEntries; + collectLedgerEntries(app, testEntries, testArchiveEntries); + CLOG_INFO(Bucket, + "Collected test ledger state with {} live entries, {} " + "archived entries", + testEntries.size(), testArchiveEntries.size()); CHECK(testEntries.size() == mFinalEntries.size()); + CHECK(testArchiveEntries.size() == mFinalArchiveEntries.size()); for (auto const& pair : testEntries) { CHECK(mFinalEntries[pair.first] == pair.second); } + for (auto const& pair : testArchiveEntries) + { + CHECK(mFinalArchiveEntries[pair.first] == pair.second); + } } void @@ -1066,10 +1404,37 @@ class StopAndRestartBucketMergesTest "Collecting control surveys in ledger range 2..{} = {:#x}", finalLedger, finalLedger); auto app = createTestApplication(clock, cfg); + auto hasHotArchive = protocolVersionStartsFrom( + mProtocol, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION); std::vector allKeys; std::map currLive; std::map currDead; + std::map currArchive; + + // To prevent duplicate merges that can interfere with counters, seed + // the starting Bucket so that each merge is unique. Otherwise, the + // first call to addBatch will merge [{first_batch}, empty_bucket]. We + // will then see other instances of [{first_batch}, empty_bucket] merges + // later on as the Bucket moves its way down the bl. By providing a + // seeded bucket, the first addBatch is a [{first_batch}, seeded_bucket] + // merge, which will not be duplicated by empty bucket merges later. The + // live BL is automatically seeded with the genesis ledger. + if (hasHotArchive) + { + UnorderedSet empty; + mHotArchiveInitialBatch = + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE}, 10, empty); + app->getBucketManager() + .getHotArchiveBucketList() + .getLevel(0) + .setCurr(HotArchiveBucket::fresh( + app->getBucketManager(), mProtocol, {}, + mHotArchiveInitialBatch, {}, {}, + app->getClock().getIOContext(), /*doFsync=*/true)); + } for (uint32_t i = 2; !app->getClock().getIOContext().stopped() && i < finalLedger; ++i) @@ -1078,6 +1443,7 @@ class StopAndRestartBucketMergesTest std::vector initEntries; std::vector liveEntries; std::vector deadEntries; + std::vector archiveEntries; if (mInitEntryBatches.size() > 2) { std::set changedEntries; @@ -1143,6 +1509,22 @@ class StopAndRestartBucketMergesTest allKeys.emplace_back(k); currLive.emplace(std::make_pair(k, e)); } + auto newRandomArchive = + LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( + {CONTRACT_CODE}, nEntriesInBatch); + for (auto const& e : newRandomArchive) + { + auto k = LedgerEntryKey(e); + auto [iter, inserted] = + currArchive.emplace(std::make_pair(k, e)); + + // only insert new entries to Archive BucketList + if (inserted) + { + archiveEntries.emplace_back(e); + } + } + mInitEntryBatches.emplace_back(initEntries); mLiveEntryBatches.emplace_back(liveEntries); mDeadEntryBatches.emplace_back(deadEntries); @@ -1150,13 +1532,20 @@ class StopAndRestartBucketMergesTest lm.setNextLedgerEntryBatchForBucketTesting( mInitEntryBatches.back(), mLiveEntryBatches.back(), mDeadEntryBatches.back()); + if (hasHotArchive) + { + mArchiveEntryBatches.emplace_back(archiveEntries); + lm.setNextArchiveBatchForBucketTesting( + mArchiveEntryBatches.back(), {}, {}); + } + closeLedger(*app); assert(i == lm.getLastClosedLedgerHeader().header.ledgerSeq); if (shouldSurveyLedger(i)) { CLOG_INFO(Bucket, "Taking survey at {} = {:#x}", i, i); - mControlSurveys.insert( - std::make_pair(i, Survey(*app, mDesignatedLevel))); + mControlSurveys.insert(std::make_pair( + i, Survey(*app, mDesignatedLevel, mProtocol))); } } @@ -1189,6 +1578,20 @@ class StopAndRestartBucketMergesTest CLOG_INFO(Bucket, "Running stop/restart test in ledger range 2..{} = {:#x}", finalLedger, finalLedger2); + + if (protocolVersionStartsFrom( + firstProtocol, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + app->getBucketManager() + .getHotArchiveBucketList() + .getLevel(0) + .setCurr(HotArchiveBucket::fresh( + app->getBucketManager(), mProtocol, {}, + mHotArchiveInitialBatch, {}, {}, + app->getClock().getIOContext(), /*doFsync=*/true)); + } + for (uint32_t i = 2; !app->getClock().getIOContext().stopped() && i < finalLedger; ++i) { @@ -1197,9 +1600,21 @@ class StopAndRestartBucketMergesTest mInitEntryBatches[i - 2], mLiveEntryBatches[i - 2], mDeadEntryBatches[i - 2]); resolveAllMerges(app->getBucketManager().getLiveBucketList()); - auto countersBeforeClose = - app->getBucketManager().readMergeCounters(); + if (protocolVersionStartsFrom( + firstProtocol, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + lm.setNextArchiveBatchForBucketTesting( + mArchiveEntryBatches[i - 2], {}, {}); + resolveAllMerges( + app->getBucketManager().getHotArchiveBucketList()); + } + + auto liveCountersBeforeClose = + app->getBucketManager().readMergeCounters(); + auto archiveCountersBeforeClose = + app->getBucketManager().readMergeCounters(); if (firstProtocol != secondProtocol && i == protocolSwitchLedger) { CLOG_INFO(Bucket, @@ -1234,12 +1649,25 @@ class StopAndRestartBucketMergesTest BucketLevel& blv = bl.getLevel(mDesignatedLevel); REQUIRE(blv.getNext().isMerging()); + if (protocolVersionStartsFrom( + currProtocol, + LiveBucket:: + FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + HotArchiveBucketList& hotBl = + app->getBucketManager().getHotArchiveBucketList(); + BucketLevel& hotBlv = + hotBl.getLevel(mDesignatedLevel); + REQUIRE(hotBlv.getNext().isMerging()); + } } if (currProtocol == firstProtocol) { + resolveAllMerges( + app->getBucketManager().getHotArchiveBucketList()); // Check that the survey matches expectations. - Survey s(*app, mDesignatedLevel); + Survey s(*app, mDesignatedLevel, currProtocol); s.checkEqual(j->second); } @@ -1268,17 +1696,31 @@ class StopAndRestartBucketMergesTest BucketLevel& blv = bl.getLevel(mDesignatedLevel); REQUIRE(blv.getNext().isMerging()); + if (protocolVersionStartsFrom( + currProtocol, + LiveBucket:: + FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + HotArchiveBucketList& hotBl = + app->getBucketManager().getHotArchiveBucketList(); + BucketLevel& hotBlv = + hotBl.getLevel(mDesignatedLevel); + REQUIRE(hotBlv.getNext().isMerging()); + } } // If there are restarted merges, we need to reset the counters // to the values they had _before_ the ledger-close so the // restarted merges don't count twice. - app->getBucketManager().incrMergeCounters(countersBeforeClose); + app->getBucketManager().incrMergeCounters( + liveCountersBeforeClose); + app->getBucketManager().incrMergeCounters( + archiveCountersBeforeClose); if (currProtocol == firstProtocol) { // Re-check that the survey matches expectations. - Survey s2(*app, mDesignatedLevel); + Survey s2(*app, mDesignatedLevel, currProtocol); s2.checkEqual(j->second); } } @@ -1303,16 +1745,16 @@ class StopAndRestartBucketMergesTest LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) { mControlSurveys.rbegin()->second.dumpMergeCounters( - "control, Post-INITENTRY", mDesignatedLevel); + "control, Post-INITENTRY", mDesignatedLevel, mProtocol); mControlSurveys.rbegin() ->second.checkSensiblePostInitEntryMergeCounters(mProtocol); } else { mControlSurveys.rbegin()->second.dumpMergeCounters( - "control, Pre-INITENTRY", mDesignatedLevel); + "control, Pre-INITENTRY", mDesignatedLevel, mProtocol); mControlSurveys.rbegin() - ->second.checkSensiblePreInitEntryMergeCounters(); + ->second.checkSensiblePreInitEntryMergeCounters(mProtocol); } runStopAndRestartTest(mProtocol, mProtocol); runStopAndRestartTest(mProtocol, mProtocol + 1); @@ -1328,7 +1770,13 @@ TEST_CASE("bucket persistence over app restart with initentry", 1, static_cast( LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), - static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}) + static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED) +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + , + static_cast( + HotArchiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION) +#endif + }) { for (uint32_t level : {2, 3}) { @@ -1348,7 +1796,13 @@ TEST_CASE("bucket persistence over app restart with initentry - extended", 1, static_cast( LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), - static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}) + static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED) +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + , + static_cast( + HotArchiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION) +#endif + }) { for (uint32_t level : {2, 3, 4, 5}) { diff --git a/src/bucket/test/BucketTestUtils.cpp b/src/bucket/test/BucketTestUtils.cpp index c5b79d9f04..bf2d4e1304 100644 --- a/src/bucket/test/BucketTestUtils.cpp +++ b/src/bucket/test/BucketTestUtils.cpp @@ -84,7 +84,13 @@ for_versions_with_differing_bucket_logic( 1, static_cast( LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), - static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}, + static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED) +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + , + static_cast( + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION) +#endif + }, cfg, f); } @@ -251,9 +257,16 @@ LedgerManagerForBucketTests::transferLedgerEntriesToBucketList( restoredKeys.emplace_back(key); } } + mTestRestoredEntries.insert(mTestRestoredEntries.end(), + restoredKeys.begin(), + restoredKeys.end()); + mTestArchiveEntries.insert( + mTestArchiveEntries.end(), + evictedState.archivedEntries.begin(), + evictedState.archivedEntries.end()); mApp.getBucketManager().addHotArchiveBatch( - mApp, lh, evictedState.archivedEntries, restoredKeys, - {}); + mApp, lh, mTestArchiveEntries, mTestRestoredEntries, + mTestDeletedEntries); } if (ledgerCloseMeta) @@ -284,7 +297,14 @@ LedgerManagerForBucketTests::transferLedgerEntriesToBucketList( // Use the testing values. mApp.getBucketManager().addLiveBatch( mApp, lh, mTestInitEntries, mTestLiveEntries, mTestDeadEntries); + mUseTestEntries = false; + mTestInitEntries.clear(); + mTestLiveEntries.clear(); + mTestDeadEntries.clear(); + mTestArchiveEntries.clear(); + mTestRestoredEntries.clear(); + mTestDeletedEntries.clear(); } else { diff --git a/src/bucket/test/BucketTestUtils.h b/src/bucket/test/BucketTestUtils.h index 62aa7265b5..3839344a31 100644 --- a/src/bucket/test/BucketTestUtils.h +++ b/src/bucket/test/BucketTestUtils.h @@ -66,6 +66,10 @@ class LedgerManagerForBucketTests : public LedgerManagerImpl std::vector mTestLiveEntries; std::vector mTestDeadEntries; + std::vector mTestArchiveEntries; + std::vector mTestRestoredEntries; + std::vector mTestDeletedEntries; + protected: void transferLedgerEntriesToBucketList( AbstractLedgerTxn& ltx, @@ -85,6 +89,18 @@ class LedgerManagerForBucketTests : public LedgerManagerImpl mTestDeadEntries = deadEntries; } + void + setNextArchiveBatchForBucketTesting( + std::vector const& archiveEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries) + { + mUseTestEntries = true; + mTestArchiveEntries = archiveEntries; + mTestRestoredEntries = restoredEntries; + mTestDeletedEntries = deletedEntries; + } + LedgerManagerForBucketTests(Application& app) : LedgerManagerImpl(app) { } diff --git a/src/catchup/ApplyBucketsWork.cpp b/src/catchup/ApplyBucketsWork.cpp index 30769476ca..97f447bab5 100644 --- a/src/catchup/ApplyBucketsWork.cpp +++ b/src/catchup/ApplyBucketsWork.cpp @@ -190,11 +190,15 @@ ApplyBucketsWork::doWork() { ZoneScoped; - // Step 1: index buckets. Step 2: apply buckets. Step 3: assume state + // Step 1: index live buckets. Step 2: apply buckets. Step 3: assume state if (!mIndexBucketsWork) { - // Spawn indexing work for the first time - mIndexBucketsWork = addWork(mBucketsToApply); + // Spawn indexing work for the first time. Hot Archive buckets aren't + // needed for apply (since we only store live state in SQL tables), so + // for now only index the live BL. AssumeStateWork will take care of + // index hot archive buckets later. + mIndexBucketsWork = + addWork>(mBucketsToApply); return State::WORK_RUNNING; } diff --git a/src/catchup/ApplyBucketsWork.h b/src/catchup/ApplyBucketsWork.h index e2eab2c518..a5d210157c 100644 --- a/src/catchup/ApplyBucketsWork.h +++ b/src/catchup/ApplyBucketsWork.h @@ -14,7 +14,8 @@ namespace stellar class AssumeStateWork; class LiveBucketList; class Bucket; -class IndexBucketsWork; +template class IndexBucketsWork; +class LiveBucket; struct HistoryArchiveState; struct LedgerHeaderHistoryEntry; @@ -25,7 +26,7 @@ class ApplyBucketsWork : public Work bool mSpawnedAssumeStateWork{false}; std::shared_ptr mAssumeStateWork{}; - std::shared_ptr mIndexBucketsWork{}; + std::shared_ptr> mIndexBucketsWork{}; size_t mTotalBuckets{0}; size_t mAppliedBuckets{0}; size_t mAppliedEntries{0}; diff --git a/src/catchup/AssumeStateWork.cpp b/src/catchup/AssumeStateWork.cpp index 9460d0fb03..2b63f8635e 100644 --- a/src/catchup/AssumeStateWork.cpp +++ b/src/catchup/AssumeStateWork.cpp @@ -26,34 +26,52 @@ AssumeStateWork::AssumeStateWork(Application& app, // Maintain reference to all Buckets in HAS to avoid garbage collection, // including future buckets that have already finished merging auto& bm = mApp.getBucketManager(); - for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) - { - auto curr = bm.getBucketByHash( - hexToBin256(mHas.currentBuckets.at(i).curr)); - auto snap = bm.getBucketByHash( - hexToBin256(mHas.currentBuckets.at(i).snap)); - if (!(curr && snap)) + auto processBuckets = [&](auto const& hasBuckets, size_t expectedLevels, + auto& workBuckets) { + releaseAssert(hasBuckets.size() == expectedLevels); + using BucketT = typename std::decay_t< + decltype(hasBuckets)>::value_type::bucket_type; + for (uint32_t i = 0; i < expectedLevels; ++i) { - throw std::runtime_error("Missing bucket files while " - "assuming saved BucketList state"); - } - - mBuckets.emplace_back(curr); - mBuckets.emplace_back(snap); - auto& nextFuture = mHas.currentBuckets.at(i).next; - if (nextFuture.hasOutputHash()) - { - auto nextBucket = bm.getBucketByHash( - hexToBin256(nextFuture.getOutputHash())); - if (!nextBucket) + auto curr = + bm.getBucketByHash(hexToBin256(hasBuckets.at(i).curr)); + auto snap = + bm.getBucketByHash(hexToBin256(hasBuckets.at(i).snap)); + if (!(curr && snap)) { - throw std::runtime_error("Missing future bucket files while " + throw std::runtime_error("Missing bucket files while " "assuming saved BucketList state"); } - mBuckets.emplace_back(nextBucket); + workBuckets.emplace_back(curr); + workBuckets.emplace_back(snap); + auto& nextFuture = hasBuckets.at(i).next; + if (nextFuture.hasOutputHash()) + { + auto nextBucket = bm.getBucketByHash( + hexToBin256(nextFuture.getOutputHash())); + if (!nextBucket) + { + throw std::runtime_error( + "Missing future bucket files while " + "assuming saved BucketList state"); + } + + workBuckets.emplace_back(nextBucket); + } } + }; + + processBuckets(mHas.currentBuckets, LiveBucketList::kNumLevels, + mLiveBuckets); + +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + if (has.hasHotArchiveBuckets()) + { + processBuckets(mHas.hotArchiveBuckets, HotArchiveBucketList::kNumLevels, + mHotArchiveBuckets); } +#endif } BasicWork::State @@ -64,19 +82,27 @@ AssumeStateWork::doWork() std::vector> seq; // Index Bucket files - seq.push_back(std::make_shared(mApp, mBuckets)); + seq.push_back( + std::make_shared>(mApp, mLiveBuckets)); +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + seq.push_back(std::make_shared>( + mApp, mHotArchiveBuckets)); +#endif // Add bucket files to BucketList and restart merges auto assumeStateCB = [&has = mHas, maxProtocolVersion = mMaxProtocolVersion, restartMerges = mRestartMerges, - &buckets = mBuckets](Application& app) { + &liveBuckets = mLiveBuckets, + &hotArchiveBuckets = + mHotArchiveBuckets](Application& app) { app.getBucketManager().assumeState(has, maxProtocolVersion, restartMerges); // Drop bucket references once assume state complete since buckets // now referenced by BucketList - buckets.clear(); + liveBuckets.clear(); + hotArchiveBuckets.clear(); // Check invariants after state has been assumed app.getInvariantManager().checkAfterAssumeState(has.currentLedger); diff --git a/src/catchup/AssumeStateWork.h b/src/catchup/AssumeStateWork.h index 92dc4b903c..88cfcf9299 100644 --- a/src/catchup/AssumeStateWork.h +++ b/src/catchup/AssumeStateWork.h @@ -12,6 +12,7 @@ namespace stellar class Bucket; struct HistoryArchiveState; class LiveBucket; +class HotArchiveBucket; class AssumeStateWork : public Work { @@ -22,7 +23,8 @@ class AssumeStateWork : public Work // Keep strong reference to buckets in HAS so they are not garbage // collected during indexing - std::vector> mBuckets{}; + std::vector> mLiveBuckets{}; + std::vector> mHotArchiveBuckets{}; public: AssumeStateWork(Application& app, HistoryArchiveState const& has, diff --git a/src/catchup/CatchupWork.cpp b/src/catchup/CatchupWork.cpp index 0902a044a9..e3acc0a94c 100644 --- a/src/catchup/CatchupWork.cpp +++ b/src/catchup/CatchupWork.cpp @@ -75,7 +75,6 @@ setHerderStateTo(FileTransferInfo const& ft, uint32_t ledger, Application& app) CatchupWork::CatchupWork(Application& app, CatchupConfiguration catchupConfiguration, - std::set> bucketsToRetain, std::shared_ptr archive) : Work(app, "catchup", BasicWork::RETRY_NEVER) , mLocalState{app.getLedgerManager().getLastClosedLedgerHAS()} @@ -83,7 +82,6 @@ CatchupWork::CatchupWork(Application& app, mApp.getTmpDirManager().tmpDir(getName()))} , mCatchupConfiguration{catchupConfiguration} , mArchive{archive} - , mRetainedBuckets{bucketsToRetain} { if (mArchive) { @@ -126,7 +124,8 @@ CatchupWork::doReset() ZoneScoped; mBucketsAppliedEmitted = false; mTransactionsVerifyEmitted = false; - mBuckets.clear(); + mLiveBuckets.clear(); + mHotBuckets.clear(); mDownloadVerifyLedgersSeq.reset(); mBucketVerifyApplySeq.reset(); mTransactionsVerifyApplySeq.reset(); @@ -143,7 +142,6 @@ CatchupWork::doReset() mCurrentWork.reset(); mHAS.reset(); mBucketHAS.reset(); - mRetainedBuckets.clear(); } void @@ -216,10 +214,10 @@ CatchupWork::downloadApplyBuckets() // Download buckets, or skip if catchup is local if (!mCatchupConfiguration.localBucketsOnly()) { - std::vector hashes = - mBucketHAS->differingBuckets(mLocalState); + auto hashes = mBucketHAS->differingBuckets(mLocalState); auto getBuckets = std::make_shared( - mApp, mBuckets, hashes, *mDownloadDir, mArchive); + mApp, mLiveBuckets, mHotBuckets, hashes.live, hashes.hot, + *mDownloadDir, mArchive); seq.push_back(getBuckets); auto verifyHASCallback = [has = *mBucketHAS](Application& app) { @@ -237,7 +235,7 @@ CatchupWork::downloadApplyBuckets() } auto applyBuckets = std::make_shared( - mApp, mBuckets, *mBucketHAS, version); + mApp, mLiveBuckets, *mBucketHAS, version); seq.push_back(applyBuckets); return std::make_shared(mApp, "download-verify-apply-buckets", seq, RETRY_NEVER); @@ -497,7 +495,8 @@ CatchupWork::runCatchupStep() mVerifiedLedgerRangeStart, !mCatchupConfiguration.localBucketsOnly()); mBucketsAppliedEmitted = true; - mBuckets.clear(); + mLiveBuckets.clear(); + mHotBuckets.clear(); mLastApplied = mApp.getLedgerManager().getLastClosedLedgerHeader(); diff --git a/src/catchup/CatchupWork.h b/src/catchup/CatchupWork.h index 45c68a62ca..34c4330da4 100644 --- a/src/catchup/CatchupWork.h +++ b/src/catchup/CatchupWork.h @@ -47,7 +47,8 @@ class CatchupWork : public Work protected: HistoryArchiveState mLocalState; std::unique_ptr mDownloadDir; - std::map> mBuckets; + std::map> mLiveBuckets; + std::map> mHotBuckets; void doReset() override; BasicWork::State doWork() override; @@ -65,7 +66,6 @@ class CatchupWork : public Work static uint32_t const PUBLISH_QUEUE_MAX_SIZE; CatchupWork(Application& app, CatchupConfiguration catchupConfiguration, - std::set> bucketsToRetain, std::shared_ptr archive = nullptr); virtual ~CatchupWork(); std::string getStatus() const override; @@ -128,6 +128,5 @@ class CatchupWork : public Work std::optional mHAS; std::optional mBucketHAS; - std::set> mRetainedBuckets; }; } diff --git a/src/catchup/IndexBucketsWork.cpp b/src/catchup/IndexBucketsWork.cpp index 49b7a29fe4..fde6955eb7 100644 --- a/src/catchup/IndexBucketsWork.cpp +++ b/src/catchup/IndexBucketsWork.cpp @@ -5,6 +5,7 @@ #include "IndexBucketsWork.h" #include "bucket/BucketIndex.h" #include "bucket/BucketManager.h" +#include "bucket/HotArchiveBucket.h" #include "bucket/LiveBucket.h" #include "util/Fs.h" #include "util/Logging.h" @@ -13,14 +14,16 @@ namespace stellar { -IndexBucketsWork::IndexWork::IndexWork(Application& app, - std::shared_ptr b) +template +IndexBucketsWork::IndexWork::IndexWork(Application& app, + std::shared_ptr b) : BasicWork(app, "index-work", BasicWork::RETRY_NEVER), mBucket(b) { } +template BasicWork::State -IndexBucketsWork::IndexWork::onRun() +IndexBucketsWork::IndexWork::onRun() { if (mState == State::WORK_WAITING) { @@ -30,20 +33,23 @@ IndexBucketsWork::IndexWork::onRun() return mState; } +template bool -IndexBucketsWork::IndexWork::onAbort() +IndexBucketsWork::IndexWork::onAbort() { return true; }; +template void -IndexBucketsWork::IndexWork::onReset() +IndexBucketsWork::IndexWork::onReset() { mState = BasicWork::State::WORK_WAITING; } +template void -IndexBucketsWork::IndexWork::postWork() +IndexBucketsWork::IndexWork::postWork() { Application& app = this->mApp; asio::io_context& ctx = app.getWorkerIOContext(); @@ -85,8 +91,7 @@ IndexBucketsWork::IndexWork::postWork() if (!self->mIndex) { - // TODO: Fix this when archive BucketLists assume state - self->mIndex = BucketIndex::createIndex( + self->mIndex = BucketIndex::createIndex( bm, self->mBucket->getFilename(), self->mBucket->getHash(), ctx); } @@ -117,14 +122,16 @@ IndexBucketsWork::IndexWork::postWork() "IndexWork: starting in background"); } -IndexBucketsWork::IndexBucketsWork( - Application& app, std::vector> const& buckets) +template +IndexBucketsWork::IndexBucketsWork( + Application& app, std::vector> const& buckets) : Work(app, "index-bucketList", BasicWork::RETRY_NEVER), mBuckets(buckets) { } +template BasicWork::State -IndexBucketsWork::doWork() +IndexBucketsWork::doWork() { if (!mWorkSpawned) { @@ -134,17 +141,19 @@ IndexBucketsWork::doWork() return checkChildrenStatus(); } +template void -IndexBucketsWork::doReset() +IndexBucketsWork::doReset() { mWorkSpawned = false; } +template void -IndexBucketsWork::spawnWork() +IndexBucketsWork::spawnWork() { UnorderedSet indexedBuckets; - auto spawnIndexWork = [&](std::shared_ptr const& b) { + auto spawnIndexWork = [&](auto const& b) { // Don't index empty bucket or buckets that are already being // indexed. Sometimes one level's snap bucket may be another // level's future bucket. The indexing job may have started but @@ -167,4 +176,7 @@ IndexBucketsWork::spawnWork() mWorkSpawned = true; } -} \ No newline at end of file + +template class IndexBucketsWork; +template class IndexBucketsWork; +} diff --git a/src/catchup/IndexBucketsWork.h b/src/catchup/IndexBucketsWork.h index 08415387ee..749fbf139f 100644 --- a/src/catchup/IndexBucketsWork.h +++ b/src/catchup/IndexBucketsWork.h @@ -14,20 +14,19 @@ namespace stellar class Bucket; class BucketIndex; class BucketManager; -class LiveBucket; -class IndexBucketsWork : public Work +template class IndexBucketsWork : public Work { class IndexWork : public BasicWork { - std::shared_ptr mBucket; + std::shared_ptr mBucket; std::unique_ptr mIndex; BasicWork::State mState{BasicWork::State::WORK_WAITING}; void postWork(); public: - IndexWork(Application& app, std::shared_ptr b); + IndexWork(Application& app, std::shared_ptr b); protected: State onRun() override; @@ -35,14 +34,14 @@ class IndexBucketsWork : public Work void onReset() override; }; - std::vector> const& mBuckets; + std::vector> const& mBuckets; bool mWorkSpawned{false}; void spawnWork(); public: IndexBucketsWork(Application& app, - std::vector> const& buckets); + std::vector> const& buckets); protected: State doWork() override; diff --git a/src/catchup/LedgerApplyManager.h b/src/catchup/LedgerApplyManager.h index 9a3987eee5..959cf2e07e 100644 --- a/src/catchup/LedgerApplyManager.h +++ b/src/catchup/LedgerApplyManager.h @@ -68,10 +68,8 @@ class LedgerApplyManager // LedgerManager detects it is desynchronized from SCP's consensus ledger. // This method is present in the public interface to permit testing and // offline catchups. - virtual void - startCatchup(CatchupConfiguration configuration, - std::shared_ptr archive, - std::set> bucketsToRetain) = 0; + virtual void startCatchup(CatchupConfiguration configuration, + std::shared_ptr archive) = 0; // Return status of catchup for or empty string, if no catchup in progress virtual std::string getStatus() const = 0; diff --git a/src/catchup/LedgerApplyManagerImpl.cpp b/src/catchup/LedgerApplyManagerImpl.cpp index b24369a64e..98d142d313 100644 --- a/src/catchup/LedgerApplyManagerImpl.cpp +++ b/src/catchup/LedgerApplyManagerImpl.cpp @@ -266,9 +266,8 @@ LedgerApplyManagerImpl::processLedger(LedgerCloseData const& ledgerData, } void -LedgerApplyManagerImpl::startCatchup( - CatchupConfiguration configuration, std::shared_ptr archive, - std::set> bucketsToRetain) +LedgerApplyManagerImpl::startCatchup(CatchupConfiguration configuration, + std::shared_ptr archive) { ZoneScoped; releaseAssert(threadIsMain()); @@ -293,7 +292,7 @@ LedgerApplyManagerImpl::startCatchup( // NB: if WorkScheduler is aborting this returns nullptr, // which means we don't "really" start catchup. mCatchupWork = mApp.getWorkScheduler().scheduleWork( - configuration, bucketsToRetain, archive); + configuration, archive); } std::string @@ -445,7 +444,7 @@ LedgerApplyManagerImpl::startOnlineCatchup() auto hash = std::make_optional(lcd.getTxSet()->previousLedgerHash()); startCatchup({LedgerNumHashPair(firstBufferedLedgerSeq - 1, hash), getCatchupCount(), CatchupConfiguration::Mode::ONLINE}, - nullptr, {}); + nullptr); } void diff --git a/src/catchup/LedgerApplyManagerImpl.h b/src/catchup/LedgerApplyManagerImpl.h index 8e140d27f4..6bc4bbb6f0 100644 --- a/src/catchup/LedgerApplyManagerImpl.h +++ b/src/catchup/LedgerApplyManagerImpl.h @@ -87,10 +87,8 @@ class LedgerApplyManagerImpl : public LedgerApplyManager ProcessLedgerResult processLedger(LedgerCloseData const& ledgerData, bool isLatestSlot) override; - void startCatchup( - CatchupConfiguration configuration, - std::shared_ptr archive, - std::set> bucketsToRetain) override; + void startCatchup(CatchupConfiguration configuration, + std::shared_ptr archive) override; std::string getStatus() const override; diff --git a/src/herder/test/UpgradesTests.cpp b/src/herder/test/UpgradesTests.cpp index c8f17f20b8..dd239bbd67 100644 --- a/src/herder/test/UpgradesTests.cpp +++ b/src/herder/test/UpgradesTests.cpp @@ -2070,7 +2070,7 @@ TEST_CASE("upgrade to version 11", "[upgrades]") std::this_thread::sleep_for(std::chrono::milliseconds(10)); bl.resolveAnyReadyFutures(); } - auto mc = bm.readMergeCounters(); + auto mc = bm.readMergeCounters(); CLOG_INFO(Bucket, "Ledger {} did {} old-protocol merges, {} new-protocol " @@ -2193,7 +2193,7 @@ TEST_CASE("upgrade to version 12", "[upgrades]") std::this_thread::sleep_for(std::chrono::milliseconds(10)); bl.resolveAnyReadyFutures(); } - auto mc = bm.readMergeCounters(); + auto mc = bm.readMergeCounters(); if (ledgerSeq < 5) { diff --git a/src/history/FileTransferInfo.h b/src/history/FileTransferInfo.h index 348d47de89..bc4dfaf482 100644 --- a/src/history/FileTransferInfo.h +++ b/src/history/FileTransferInfo.h @@ -37,11 +37,13 @@ class FileTransferInfo std::string getLocalDir(TmpDir const& localRoot) const; public: - FileTransferInfo(LiveBucket const& bucket) + template + FileTransferInfo(BucketT const& bucket) : mType(FileType::HISTORY_FILE_TYPE_BUCKET) , mHexDigits(binToHex(bucket.getHash())) , mLocalPath(bucket.getFilename().string()) { + BUCKET_TYPE_ASSERT(BucketT); } FileTransferInfo(TmpDir const& snapDir, FileType const& snapType, diff --git a/src/history/HistoryArchive.cpp b/src/history/HistoryArchive.cpp index a5b345bf82..1cface7815 100644 --- a/src/history/HistoryArchive.cpp +++ b/src/history/HistoryArchive.cpp @@ -8,6 +8,7 @@ #include "util/asio.h" #include "history/HistoryArchive.h" #include "bucket/BucketManager.h" +#include "bucket/HotArchiveBucketList.h" #include "bucket/LiveBucket.h" #include "bucket/LiveBucketList.h" #include "crypto/Hex.h" @@ -35,8 +36,6 @@ namespace stellar { -unsigned const HistoryArchiveState::HISTORY_ARCHIVE_STATE_VERSION = 1; - template std::string formatString(std::string const& templateString, Tokens const&... tokens) @@ -65,15 +64,33 @@ HistoryArchiveState::futuresAllResolved() const return false; } } + + for (auto const& level : hotArchiveBuckets) + { + if (level.next.isMerging()) + { + return false; + } + } return true; } bool HistoryArchiveState::futuresAllClear() const { - return std::all_of( - currentBuckets.begin(), currentBuckets.end(), - [](HistoryStateBucket const& bl) { return bl.next.isClear(); }); + if (!std::all_of(currentBuckets.begin(), currentBuckets.end(), + [](auto const& bl) { return bl.next.isClear(); })) + { + return false; + } + + if (hasHotArchiveBuckets()) + { + return std::all_of(hotArchiveBuckets.begin(), hotArchiveBuckets.end(), + [](auto const& bl) { return bl.next.isClear(); }); + } + + return true; } void @@ -87,19 +104,32 @@ HistoryArchiveState::resolveAllFutures() level.next.resolve(); } } + + for (auto& level : hotArchiveBuckets) + { + if (level.next.isMerging()) + { + level.next.resolve(); + } + } } void HistoryArchiveState::resolveAnyReadyFutures() { ZoneScoped; - for (auto& level : currentBuckets) - { - if (level.next.isMerging() && level.next.mergeComplete()) + auto resolveMerged = [](auto& buckets) { + for (auto& level : buckets) { - level.next.resolve(); + if (level.next.isMerging() && level.next.mergeComplete()) + { + level.next.resolve(); + } } - } + }; + + resolveMerged(currentBuckets); + resolveMerged(hotArchiveBuckets); } void @@ -141,7 +171,8 @@ HistoryArchiveState::load(std::string const& inFile) in.exceptions(std::ios::badbit); cereal::JSONInputArchive ar(in); serialize(ar); - if (version != HISTORY_ARCHIVE_STATE_VERSION) + if (version != HISTORY_ARCHIVE_STATE_VERSION_PRE_PROTOCOL_22 && + version != HISTORY_ARCHIVE_STATE_VERSION_POST_PROTOCOL_22) { CLOG_ERROR(History, "Unexpected history archive state version: {}", version); @@ -210,18 +241,31 @@ HistoryArchiveState::getBucketListHash() const // relatively-different representations. Everything will explode if there is // any difference in these algorithms anyways, so.. - SHA256 totalHash; - for (auto const& level : currentBuckets) + auto hashBuckets = [](auto const& buckets) { + SHA256 hash; + for (auto const& level : buckets) + { + SHA256 levelHash; + levelHash.add(hexToBin(level.curr)); + levelHash.add(hexToBin(level.snap)); + hash.add(levelHash.finish()); + } + + return hash.finish(); + }; + + if (hasHotArchiveBuckets()) { - SHA256 levelHash; - levelHash.add(hexToBin(level.curr)); - levelHash.add(hexToBin(level.snap)); - totalHash.add(levelHash.finish()); + SHA256 hash; + hash.add(hashBuckets(currentBuckets)); + hash.add(hashBuckets(hotArchiveBuckets)); + return hash.finish(); } - return totalHash.finish(); + + return hashBuckets(currentBuckets); } -std::vector +HistoryArchiveState::BucketHashReturnT HistoryArchiveState::differingBuckets(HistoryArchiveState const& other) const { ZoneScoped; @@ -229,40 +273,48 @@ HistoryArchiveState::differingBuckets(HistoryArchiveState const& other) const std::set inhibit; uint256 zero; inhibit.insert(binToHex(zero)); - for (auto b : other.currentBuckets) - { - inhibit.insert(b.curr); - if (b.next.isLive()) + auto processBuckets = [&inhibit](auto const& buckets, + auto const& otherBuckets) { + std::vector ret; + for (auto b : otherBuckets) { - b.next.resolve(); - } - if (b.next.hasOutputHash()) - { - inhibit.insert(b.next.getOutputHash()); - } - inhibit.insert(b.snap); - } - std::vector ret; - for (size_t i = LiveBucketList::kNumLevels; i != 0; --i) - { - auto s = currentBuckets[i - 1].snap; - auto n = s; - if (currentBuckets[i - 1].next.hasOutputHash()) - { - n = currentBuckets[i - 1].next.getOutputHash(); + inhibit.insert(b.curr); + if (b.next.isLive()) + { + b.next.resolve(); + } + if (b.next.hasOutputHash()) + { + inhibit.insert(b.next.getOutputHash()); + } + inhibit.insert(b.snap); } - auto c = currentBuckets[i - 1].curr; - auto bs = {s, n, c}; - for (auto const& j : bs) + + for (size_t i = buckets.size(); i != 0; --i) { - if (inhibit.find(j) == inhibit.end()) + auto s = buckets[i - 1].snap; + auto n = s; + if (buckets[i - 1].next.hasOutputHash()) { - ret.push_back(j); - inhibit.insert(j); + n = buckets[i - 1].next.getOutputHash(); + } + auto c = buckets[i - 1].curr; + auto bs = {s, n, c}; + for (auto const& j : bs) + { + if (inhibit.find(j) == inhibit.end()) + { + ret.push_back(j); + inhibit.insert(j); + } } } - } - return ret; + return ret; + }; + + auto liveHashes = processBuckets(currentBuckets, other.currentBuckets); + auto hotHashes = processBuckets(hotArchiveBuckets, other.hotArchiveBuckets); + return BucketHashReturnT(std::move(liveHashes), std::move(hotHashes)); } std::vector @@ -270,13 +322,18 @@ HistoryArchiveState::allBuckets() const { ZoneScoped; std::set buckets; - for (auto const& level : currentBuckets) - { - buckets.insert(level.curr); - buckets.insert(level.snap); - auto nh = level.next.getHashes(); - buckets.insert(nh.begin(), nh.end()); - } + auto processBuckets = [&buckets](auto const& bucketList) { + for (auto const& level : bucketList) + { + buckets.insert(level.curr); + buckets.insert(level.snap); + auto nh = level.next.getHashes(); + buckets.insert(nh.begin(), nh.end()); + } + }; + + processBuckets(currentBuckets); + processBuckets(hotArchiveBuckets); return std::vector(buckets.begin(), buckets.end()); } @@ -285,91 +342,123 @@ HistoryArchiveState::containsValidBuckets(Application& app) const { ZoneScoped; // This function assumes presence of required buckets to verify state - uint32_t minBucketVersion = 0; - bool nonEmptySeen = false; - auto validateBucketVersion = [&](uint32_t bucketVersion) { - if (bucketVersion < minBucketVersion) - { - CLOG_ERROR(History, - "Incompatible bucket versions: expected version " - "{} or higher, got {}", - minBucketVersion, bucketVersion); - return false; - } - minBucketVersion = bucketVersion; - return true; - }; + auto validateBucketList = [&](auto const& buckets, + uint32_t expectedLevels) { + // Get Bucket version and set nonEmptySeen + bool nonEmptySeen = false; + auto getVersionAndCheckEmpty = [&](auto const& bucket) { + int32_t version = 0; + releaseAssert(bucket); + if (!bucket->isEmpty()) + { + version = bucket->getBucketVersion(); + if (!nonEmptySeen) + { + nonEmptySeen = true; + } + } + return version; + }; - // Process bucket, return version - auto processBucket = [&](std::string const& bucketHash) { - auto bucket = app.getBucketManager().getBucketByHash( - hexToBin256(bucketHash)); - releaseAssert(bucket); - int32_t version = 0; - if (!bucket->isEmpty()) - { - version = bucket->getBucketVersion(); - if (!nonEmptySeen) + uint32_t minBucketVersion = 0; + auto validateBucketVersion = [&](uint32_t bucketVersion) { + if (bucketVersion < minBucketVersion) { - nonEmptySeen = true; + CLOG_ERROR(History, + "Incompatible bucket versions: expected version " + "{} or higher, got {}", + minBucketVersion, bucketVersion); + return false; } - } - return version; - }; + minBucketVersion = bucketVersion; + return true; + }; - // Iterate bottom-up, from oldest to newest buckets - for (uint32_t j = LiveBucketList::kNumLevels; j != 0; --j) - { - auto i = j - 1; - auto const& level = currentBuckets[i]; + using BucketT = + typename std::decay_t::value_type::bucket_type; - // Note: snap is always older than curr, and therefore must be processed - // first - if (!validateBucketVersion(processBucket(level.snap)) || - !validateBucketVersion(processBucket(level.curr))) + if (buckets.size() != expectedLevels) { + CLOG_ERROR(History, "Invalid HAS: bucket list size mismatch"); return false; } - // Level 0 future buckets are always clear - if (i == 0) + for (uint32_t j = expectedLevels; j != 0; --j) { - if (!level.next.isClear()) + + auto i = j - 1; + auto const& level = buckets[i]; + + // Note: snap is always older than curr, and therefore must be + // processed first + auto curr = app.getBucketManager().getBucketByHash( + hexToBin256(level.curr)); + auto snap = app.getBucketManager().getBucketByHash( + hexToBin256(level.snap)); + if (!validateBucketVersion(getVersionAndCheckEmpty(snap)) || + !validateBucketVersion(getVersionAndCheckEmpty(curr))) { - CLOG_ERROR(History, - "Invalid HAS: next must be clear at level 0"); return false; } - break; - } - // Validate "next" field - // Use previous level snap to determine "next" validity - auto const& prev = currentBuckets[i - 1]; - uint32_t prevSnapVersion = processBucket(prev.snap); + // Level 0 future buckets are always clear + if (i == 0) + { + if (!level.next.isClear()) + { + CLOG_ERROR(History, + "Invalid HAS: next must be clear at level 0"); + return false; + } + break; + } - if (!nonEmptySeen) - { - // No real buckets seen yet, move on - continue; - } - else if (protocolVersionStartsFrom( - prevSnapVersion, - LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) - { - if (!level.next.isClear()) + // Validate "next" field + // Use previous level snap to determine "next" validity + auto const& prev = buckets[i - 1]; + auto prevSnap = app.getBucketManager().getBucketByHash( + hexToBin256(prev.snap)); + uint32_t prevSnapVersion = getVersionAndCheckEmpty(prevSnap); + + if (!nonEmptySeen) + { + // We're iterating from the bottom up, so if we haven't seen a + // non-empty bucket yet, we can skip the check because the + // bucket is default initialized + continue; + } + else if (protocolVersionStartsFrom( + prevSnapVersion, + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + { + if (!level.next.isClear()) + { + CLOG_ERROR(History, "Invalid HAS: future must be cleared "); + return false; + } + } + else if (!level.next.hasOutputHash()) { - CLOG_ERROR(History, "Invalid HAS: future must be cleared "); + CLOG_ERROR(History, + "Invalid HAS: future must have resolved output"); return false; } } - else if (!level.next.hasOutputHash()) - { - CLOG_ERROR(History, - "Invalid HAS: future must have resolved output"); - return false; - } + + return true; + }; + + if (!validateBucketList(currentBuckets, LiveBucketList::kNumLevels)) + { + return false; + } + + if (hasHotArchiveBuckets() && + !validateBucketList(hotArchiveBuckets, + HotArchiveBucketList::kNumLevels)) + { + return false; } return true; @@ -379,39 +468,50 @@ void HistoryArchiveState::prepareForPublish(Application& app) { ZoneScoped; - // Level 0 future buckets are always clear - releaseAssert(currentBuckets[0].next.isClear()); + auto prepareBucketList = [&](auto& buckets, size_t numLevels) { + using BucketT = + typename std::decay_t::value_type::bucket_type; - for (uint32_t i = 1; i < LiveBucketList::kNumLevels; i++) - { - auto& level = currentBuckets[i]; - auto& prev = currentBuckets[i - 1]; - - auto snap = app.getBucketManager().getBucketByHash( - hexToBin256(prev.snap)); - if (!level.next.isClear() && - protocolVersionStartsFrom( - snap->getBucketVersion(), - LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) - { - level.next.clear(); - } - else if (level.next.hasHashes() && !level.next.isLive()) + // Level 0 future buckets are always clear + releaseAssert(buckets[0].next.isClear()); + for (uint32_t i = 1; i < numLevels; i++) { - // Note: this `maxProtocolVersion` is over-approximate. The actual - // max for the ledger being published might be lower, but if the - // "true" (lower) max-value were actually in conflict with the state - // we're about to publish it should have caused an error earlier - // anyways, back when the bucket list and HAS for this state was - // initially formed. Since we're just reconstituting a HAS here, we - // assume it was legit when formed. Given that getting the true - // value here therefore doesn't seem to add much checking, and given - // that it'd be somewhat convoluted _to_ materialize the true value - // here, we're going to live with the approximate value for now. - uint32_t maxProtocolVersion = - app.getConfig().LEDGER_PROTOCOL_VERSION; - level.next.makeLive(app, maxProtocolVersion, i); + auto& level = buckets[i]; + auto& prev = buckets[i - 1]; + + auto snap = app.getBucketManager().getBucketByHash( + hexToBin256(prev.snap)); + if (!level.next.isClear() && + protocolVersionStartsFrom( + snap->getBucketVersion(), + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + { + level.next.clear(); + } + else if (level.next.hasHashes() && !level.next.isLive()) + { + // Note: this `maxProtocolVersion` is over-approximate. The + // actual max for the ledger being published might be lower, but + // if the "true" (lower) max-value were actually in conflict + // with the state we're about to publish it should have caused + // an error earlier anyways, back when the bucket list and HAS + // for this state was initially formed. Since we're just + // reconstituting a HAS here, we assume it was legit when + // formed. Given that getting the true value here therefore + // doesn't seem to add much checking, and given that it'd be + // somewhat convoluted _to_ materialize the true value here, + // we're going to live with the approximate value for now. + uint32_t maxProtocolVersion = + app.getConfig().LEDGER_PROTOCOL_VERSION; + level.next.makeLive(app, maxProtocolVersion, i); + } } + }; + + prepareBucketList(currentBuckets, LiveBucketList::kNumLevels); + if (hasHotArchiveBuckets()) + { + prepareBucketList(hotArchiveBuckets, HotArchiveBucketList::kNumLevels); } } @@ -419,7 +519,7 @@ HistoryArchiveState::HistoryArchiveState() : server(STELLAR_CORE_VERSION) { uint256 u; std::string s = binToHex(u); - HistoryStateBucket b; + HistoryStateBucket b; b.curr = s; b.snap = s; while (currentBuckets.size() < LiveBucketList::kNumLevels) @@ -437,7 +537,7 @@ HistoryArchiveState::HistoryArchiveState(uint32_t ledgerSeq, { for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { - HistoryStateBucket b; + HistoryStateBucket b; auto& level = buckets.getLevel(i); b.curr = binToHex(level.getCurr()->getHash()); b.next = level.getNext(); @@ -446,6 +546,23 @@ HistoryArchiveState::HistoryArchiveState(uint32_t ledgerSeq, } } +HistoryArchiveState::HistoryArchiveState(uint32_t ledgerSeq, + LiveBucketList const& liveBuckets, + HotArchiveBucketList const& hotBuckets, + std::string const& passphrase) + : HistoryArchiveState(ledgerSeq, liveBuckets, passphrase) +{ + version = HISTORY_ARCHIVE_STATE_VERSION_POST_PROTOCOL_22; + for (uint32_t i = 0; i < HotArchiveBucketList::kNumLevels; ++i) + { + HistoryStateBucket b; + b.curr = binToHex(hotBuckets.getLevel(i).getCurr()->getHash()); + b.next = hotBuckets.getLevel(i).getNext(); + b.snap = binToHex(hotBuckets.getLevel(i).getSnap()->getHash()); + hotArchiveBuckets.push_back(b); + } +} + HistoryArchive::HistoryArchive(HistoryArchiveConfiguration const& config) : mConfig(config) { diff --git a/src/history/HistoryArchive.h b/src/history/HistoryArchive.h index d6ca646b42..d604c31a62 100644 --- a/src/history/HistoryArchive.h +++ b/src/history/HistoryArchive.h @@ -4,8 +4,12 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/BucketUtils.h" #include "bucket/FutureBucket.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/HotArchiveBucketList.h" #include "main/Config.h" +#include "util/GlobalChecks.h" #include "xdr/Stellar-types.h" #include @@ -29,13 +33,16 @@ namespace stellar class Application; class LiveBucketList; class Bucket; +class LiveBucketList; +class HotArchiveBucketList; -struct HistoryStateBucket +template struct HistoryStateBucket { + BUCKET_TYPE_ASSERT(BucketT); + using bucket_type = BucketT; std::string curr; - // TODO: Add archival buckets to history - FutureBucket next; + FutureBucket next; std::string snap; template @@ -62,17 +69,37 @@ struct HistoryStateBucket */ struct HistoryArchiveState { - static unsigned const HISTORY_ARCHIVE_STATE_VERSION; + static inline unsigned const HISTORY_ARCHIVE_STATE_VERSION_PRE_PROTOCOL_22 = + 1; + static inline unsigned const + HISTORY_ARCHIVE_STATE_VERSION_POST_PROTOCOL_22 = 2; + + struct BucketHashReturnT + { + std::vector live; + std::vector hot; - unsigned version{HISTORY_ARCHIVE_STATE_VERSION}; + explicit BucketHashReturnT(std::vector&& live, + std::vector&& hot) + : live(live), hot(hot) + { + } + }; + + unsigned version{HISTORY_ARCHIVE_STATE_VERSION_PRE_PROTOCOL_22}; std::string server; std::string networkPassphrase; uint32_t currentLedger{0}; - std::vector currentBuckets; + std::vector> currentBuckets; + std::vector> hotArchiveBuckets; HistoryArchiveState(); - HistoryArchiveState(uint32_t ledgerSeq, LiveBucketList const& buckets, + HistoryArchiveState(uint32_t ledgerSeq, LiveBucketList const& liveBuckets, + HotArchiveBucketList const& hotBuckets, + std::string const& networkPassphrase); + + HistoryArchiveState(uint32_t ledgerSeq, LiveBucketList const& liveBuckets, std::string const& networkPassphrase); static std::string baseName(); @@ -89,9 +116,8 @@ struct HistoryArchiveState // Return vector of buckets to fetch/apply to turn 'other' into 'this'. // Vector is sorted from largest/highest-numbered bucket to smallest/lowest, // and with snap buckets occurring before curr buckets. Zero-buckets are - // omitted. - std::vector - differingBuckets(HistoryArchiveState const& other) const; + // omitted. Hashes are distinguished by live and Hot Archive buckets. + BucketHashReturnT differingBuckets(HistoryArchiveState const& other) const; // Return vector of all buckets referenced by this state. std::vector allBuckets() const; @@ -105,12 +131,22 @@ struct HistoryArchiveState { ar(CEREAL_NVP(networkPassphrase)); } - catch (cereal::Exception&) + catch (cereal::Exception& e) { // networkPassphrase wasn't parsed. - // This is expected when the input file does not contain it. + // This is expected when the input file does not contain it, but + // should only ever happen for older versions of History Archive + // State. + if (version >= HISTORY_ARCHIVE_STATE_VERSION_POST_PROTOCOL_22) + { + throw e; + } } ar(CEREAL_NVP(currentBuckets)); + if (version >= HISTORY_ARCHIVE_STATE_VERSION_POST_PROTOCOL_22) + { + ar(CEREAL_NVP(hotArchiveBuckets)); + } } template @@ -122,7 +158,18 @@ struct HistoryArchiveState { ar(CEREAL_NVP(networkPassphrase)); } + else + { + // New versions of HistoryArchiveState should always have a + // networkPassphrase. + releaseAssertOrThrow( + version < HISTORY_ARCHIVE_STATE_VERSION_POST_PROTOCOL_22); + } ar(CEREAL_NVP(currentBuckets)); + if (version >= HISTORY_ARCHIVE_STATE_VERSION_POST_PROTOCOL_22) + { + ar(CEREAL_NVP(hotArchiveBuckets)); + } } // Return true if all futures are in FB_CLEAR state @@ -149,6 +196,12 @@ struct HistoryArchiveState void prepareForPublish(Application& app); bool containsValidBuckets(Application& app) const; + + bool + hasHotArchiveBuckets() const + { + return version >= HISTORY_ARCHIVE_STATE_VERSION_POST_PROTOCOL_22; + } }; class HistoryArchive : public std::enable_shared_from_this diff --git a/src/history/HistoryManager.h b/src/history/HistoryManager.h index 4a17bb795d..e215c4538d 100644 --- a/src/history/HistoryManager.h +++ b/src/history/HistoryManager.h @@ -319,14 +319,17 @@ class HistoryManager // Calls queueCurrentHistory() if the current ledger is a multiple of // getCheckpointFrequency() -- equivalently, the LCL is one _less_ than // a multiple of getCheckpointFrequency(). Returns true if checkpoint - // publication of the LCL was queued, otherwise false. - virtual bool maybeQueueHistoryCheckpoint(uint32_t lcl) = 0; + // publication of the LCL was queued, otherwise false. ledgerVers must align + // with lcl. + virtual bool maybeQueueHistoryCheckpoint(uint32_t lcl, + uint32_t ledgerVers) = 0; // Checkpoint the LCL -- both the log of history from the previous // checkpoint to it, as well as the bucketlist of its state -- to a // publication-queue in the database. This should be followed shortly - // (typically after commit) with a call to publishQueuedHistory. - virtual void queueCurrentHistory(uint32_t lcl) = 0; + // (typically after commit) with a call to publishQueuedHistory. ledgerVers + // must align with lcl. + virtual void queueCurrentHistory(uint32_t lcl, uint32_t ledgerVers) = 0; // Return the youngest ledger still in the outgoing publish queue; // returns 0 if the publish queue has nothing in it. diff --git a/src/history/HistoryManagerImpl.cpp b/src/history/HistoryManagerImpl.cpp index a8d9e23c5c..c0b56dec8b 100644 --- a/src/history/HistoryManagerImpl.cpp +++ b/src/history/HistoryManagerImpl.cpp @@ -375,7 +375,8 @@ HistoryManager::getMaxLedgerQueuedToPublish(Config const& cfg) } bool -HistoryManagerImpl::maybeQueueHistoryCheckpoint(uint32_t lcl) +HistoryManagerImpl::maybeQueueHistoryCheckpoint(uint32_t lcl, + uint32_t ledgerVers) { if (!publishCheckpointOnLedgerClose(lcl, mApp.getConfig())) { @@ -389,12 +390,12 @@ HistoryManagerImpl::maybeQueueHistoryCheckpoint(uint32_t lcl) return false; } - queueCurrentHistory(lcl); + queueCurrentHistory(lcl, ledgerVers); return true; } void -HistoryManagerImpl::queueCurrentHistory(uint32_t ledger) +HistoryManagerImpl::queueCurrentHistory(uint32_t ledger, uint32_t ledgerVers) { ZoneScoped; @@ -406,7 +407,20 @@ HistoryManagerImpl::queueCurrentHistory(uint32_t ledger) bl = mApp.getBucketManager().getLiveBucketList(); } - HistoryArchiveState has(ledger, bl, mApp.getConfig().NETWORK_PASSPHRASE); + HistoryArchiveState has; + if (protocolVersionStartsFrom( + ledgerVers, + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + auto hotBl = mApp.getBucketManager().getHotArchiveBucketList(); + has = HistoryArchiveState(ledger, bl, hotBl, + mApp.getConfig().NETWORK_PASSPHRASE); + } + else + { + has = HistoryArchiveState(ledger, bl, + mApp.getConfig().NETWORK_PASSPHRASE); + } CLOG_DEBUG(History, "Queueing publish state for ledger {}", ledger); mEnqueueTimes.emplace(ledger, std::chrono::steady_clock::now()); diff --git a/src/history/HistoryManagerImpl.h b/src/history/HistoryManagerImpl.h index 60c146fd26..36c4e23eed 100644 --- a/src/history/HistoryManagerImpl.h +++ b/src/history/HistoryManagerImpl.h @@ -46,9 +46,10 @@ class HistoryManagerImpl : public HistoryManager void logAndUpdatePublishStatus() override; - bool maybeQueueHistoryCheckpoint(uint32_t lcl) override; + bool maybeQueueHistoryCheckpoint(uint32_t lcl, + uint32_t ledgerVers) override; - void queueCurrentHistory(uint32_t lcl) override; + void queueCurrentHistory(uint32_t lcl, uint32_t ledgerVers) override; void takeSnapshotAndPublish(HistoryArchiveState const& has); diff --git a/src/history/StateSnapshot.cpp b/src/history/StateSnapshot.cpp index 3ab2a9e66e..08ce68bc2c 100644 --- a/src/history/StateSnapshot.cpp +++ b/src/history/StateSnapshot.cpp @@ -120,7 +120,9 @@ StateSnapshot::differingHASFiles(HistoryArchiveState const& other) addIfExists(mTransactionResultSnapFile); addIfExists(mSCPHistorySnapFile); - for (auto const& hash : mLocalState.differingBuckets(other)) + auto hashes = mLocalState.differingBuckets(other); + + for (auto const& hash : hashes.live) { auto b = mApp.getBucketManager().getBucketByHash( hexToBin256(hash)); @@ -128,6 +130,16 @@ StateSnapshot::differingHASFiles(HistoryArchiveState const& other) addIfExists(std::make_shared(*b)); } +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + for (auto const& hash : hashes.hot) + { + auto b = mApp.getBucketManager().getBucketByHash( + hexToBin256(hash)); + releaseAssert(b); + addIfExists(std::make_shared(*b)); + } +#endif + return files; } } diff --git a/src/history/serialize-tests/stellar-history.testnet.6714239.networkPassphrase.v2.json b/src/history/serialize-tests/stellar-history.testnet.6714239.networkPassphrase.v2.json new file mode 100644 index 0000000000..18fb8d3b29 --- /dev/null +++ b/src/history/serialize-tests/stellar-history.testnet.6714239.networkPassphrase.v2.json @@ -0,0 +1,184 @@ +{ + "version": 2, + "server": "v9.0.1-dirty", + "currentLedger": 6714239, + "networkPassphrase": "(V) (;,,;) (V)", + "currentBuckets": [ + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "c3131b946b5cadf713ca88d299505fe16572ffeefa083b2858a674452fd8ba76", + "next": { + "state": 1, + "output": "0000000000000000000000000000000000000000000000000000000000000000" + }, + "snap": "e08d65b07ca3cb0999a340247afcf0fedbe1d1e1df6ada0c34422e2d3b905735" + }, + { + "curr": "b767206bf07e3dbbe14cff681234b7ccfd4dab5957ce6d440f692409498ff909", + "next": { + "state": 1, + "output": "e08d65b07ca3cb0999a340247afcf0fedbe1d1e1df6ada0c34422e2d3b905735" + }, + "snap": "0bdeee425d0b4c3458353b7b20901e60eb8b5289dd8a714e59f910a47b49d66e" + }, + { + "curr": "7a1132e7566dea51a35f6981181ad3f108256bb5f9470f0e9df3222c138c6446", + "next": { + "state": 1, + "output": "0bdeee425d0b4c3458353b7b20901e60eb8b5289dd8a714e59f910a47b49d66e" + }, + "snap": "1863067ae6d91218c589b2ccc40a983edc144196ca3a2cd43c7426275a8a3f40" + }, + { + "curr": "f4e99dd7c25206f6766911dc812502f0ec2cd5469f4742b7848523aa6e0da03e", + "next": { + "state": 1, + "output": "dd9bcfba61bf17be7093f56eb6e1392d5f25981282d4331cb51961852c11ee16" + }, + "snap": "04a5699bb688ef82e8a352b2ccfa134458c794a0365dddfac00f2e6fc7c159f9" + }, + { + "curr": "f9de28d23c53d1affe871a97a5c9747bbc9a208754388dc88cdea96852977471", + "next": { + "state": 1, + "output": "b6d012ce7af5624c24d4ff386ae172516ff0cd13f70cd030edbb503b87ad196b" + }, + "snap": "1fd4b80ec5278fc08269f96728206fcfbf5d3f5efe1bf7f93d4a3d79a75eeca8" + }, + { + "curr": "71f4453669ec84632afcdd1f2a97685121cef52a01db58c8d4c810310c07c0d8", + "next": { + "state": 1, + "output": "c0992883bd5f4631f736c5287538342c08e00f80be16b36a5a794772114a3db9" + }, + "snap": "b8913fa01d3b58b763fc04ee1528317c0ec71f250500758e09d0a839ca405be4" + }, + { + "curr": "a113930757a7ff48a8898dad74c1446a942b5e5b5f443626a8f943768432ec41", + "next": { + "state": 1, + "output": "9b6feec6e7e366b898a59ad562b31ce3305d7e1545f92bf5fda5c13e032bc0f9" + }, + "snap": "d3b1a36290f39d4cd09e7ef80b7cb871df9a3a5b1e40d8e5cfd26c754914ca84" + }, + { + "curr": "e57d1c6342f6e47c2ac0305cd5251bb0fb2cdd40923af87c4657e896e33acdc5", + "next": { + "state": 1, + "output": "de8805e4232fe81c04f5536487e586ab6d3ef38eff93bad5bf6872a3e53ced6b" + }, + "snap": "fcddef737957961d828023a081b84449dc0ab20524e5155837bae12a3b18ac64" + }, + { + "curr": "5c3387bcaad3139bb48ff2a99010d6f075cc9b20ba2f22c194fcda2a97926f55", + "next": { + "state": 1, + "output": "3373185b0eb537b909c56e6e16e76e33d966dc7ee1e7168123cfe1114d444e88" + }, + "snap": "2958d66f083ca13ca97a184a5be3a03b3c2e494f832b1ac1a3e16d7b02e9f50c" + }, + { + "curr": "ae7e4814b50e176d8e3532e462e2e9db02f218adebd74603d7e349cc19f489e1", + "next": { + "state": 1, + "output": "50abed8a9d86c072cfe8388246b7a378dc355fe996fd7384a5ee57e8da2ad51d" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "hotArchiveBuckets": [ + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "c3131b946b5cadf713ca88d299505fe16572ffeefa083b2858a674452fd8ba74", + "next": { + "state": 1, + "output": "0000000000000000000000000000000000000000000000000000000000000000" + }, + "snap": "e08d65b07ca3cb0999a340247afcf0fedbe1d1e1df6ada0c34422e2d3b905732" + }, + { + "curr": "b767206bf07e3dbbe14cff681234b7ccfd4dab5957ce6d440f692409498ff901", + "next": { + "state": 1, + "output": "e08d65b07ca3cb0999a340247afcf0fedbe1d1e1df6ada0c34422e2d3b905732" + }, + "snap": "0bdeee425d0b4c3458353b7b20901e60eb8b5289dd8a714e59f910a47b49d661" + }, + { + "curr": "7a1132e7566dea51a35f6981181ad3f108256bb5f9470f0e9df3222c138c6442", + "next": { + "state": 1, + "output": "0bdeee425d0b4c3458353b7b20901e60eb8b5289dd8a714e59f910a47b49d661" + }, + "snap": "1863067ae6d91218c589b2ccc40a983edc144196ca3a2cd43c7426275a8a3f42" + }, + { + "curr": "f4e99dd7c25206f6766911dc812502f0ec2cd5469f4742b7848523aa6e0da031", + "next": { + "state": 1, + "output": "dd9bcfba61bf17be7093f56eb6e1392d5f25981282d4331cb51961852c11ee12" + }, + "snap": "04a5699bb688ef82e8a352b2ccfa134458c794a0365dddfac00f2e6fc7c159f1" + }, + { + "curr": "f9de28d23c53d1affe871a97a5c9747bbc9a208754388dc88cdea96852977472", + "next": { + "state": 1, + "output": "b6d012ce7af5624c24d4ff386ae172516ff0cd13f70cd030edbb503b87ad1961" + }, + "snap": "1fd4b80ec5278fc08269f96728206fcfbf5d3f5efe1bf7f93d4a3d79a75eeca2" + }, + { + "curr": "71f4453669ec84632afcdd1f2a97685121cef52a01db58c8d4c810310c07c0d1", + "next": { + "state": 1, + "output": "c0992883bd5f4631f736c5287538342c08e00f80be16b36a5a794772114a3db2" + }, + "snap": "b8913fa01d3b58b763fc04ee1528317c0ec71f250500758e09d0a839ca405be1" + }, + { + "curr": "a113930757a7ff48a8898dad74c1446a942b5e5b5f443626a8f943768432ec42", + "next": { + "state": 1, + "output": "9b6feec6e7e366b898a59ad562b31ce3305d7e1545f92bf5fda5c13e032bc0f1" + }, + "snap": "d3b1a36290f39d4cd09e7ef80b7cb871df9a3a5b1e40d8e5cfd26c754914ca24" + }, + { + "curr": "e57d1c6342f6e47c2ac0305cd5251bb0fb2cdd40923af87c4657e896e33acdc1", + "next": { + "state": 1, + "output": "de8805e4232fe81c04f5536487e586ab6d3ef38eff93bad5bf6872a3e53ced62" + }, + "snap": "fcddef737957961d828023a081b84449dc0ab20524e5155837bae12a3b18ac61" + }, + { + "curr": "5c3387bcaad3139bb48ff2a99010d6f075cc9b20ba2f22c194fcda2a97926f52", + "next": { + "state": 1, + "output": "3373185b0eb537b909c56e6e16e76e33d966dc7ee1e7168123cfe1114d444e81" + }, + "snap": "2958d66f083ca13ca97a184a5be3a03b3c2e494f832b1ac1a3e16d7b02e9f502" + }, + { + "curr": "ae7e4814b50e176d8e3532e462e2e9db02f218adebd74603d7e349cc19f489e2", + "next": { + "state": 1, + "output": "50abed8a9d86c072cfe8388246b7a378dc355fe996fd7384a5ee57e8da2ad52" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] +} \ No newline at end of file diff --git a/src/history/test/HistoryTests.cpp b/src/history/test/HistoryTests.cpp index 817b55560e..772499cf12 100644 --- a/src/history/test/HistoryTests.cpp +++ b/src/history/test/HistoryTests.cpp @@ -172,57 +172,158 @@ TEST_CASE("History bucket verification", "[history][catchup]") auto bucketGenerator = TestBucketGenerator{ *app, app->getHistoryArchiveManager().getHistoryArchive( cg->getArchiveDirName())}; - std::vector hashes; + std::vector liveHashes; + std::vector hotHashes; auto& wm = app->getWorkScheduler(); - std::map> mBuckets; + + std::map> buckets; + std::map> hotBuckets; auto tmpDir = std::make_unique(app->getTmpDirManager().tmpDir("bucket-test")); - SECTION("successful download and verify") - { - hashes.push_back(bucketGenerator.generateBucket( + SECTION("successful download and verify"){SECTION("live buckets"){ + liveHashes.push_back(bucketGenerator.generateBucket( TestBucketState::CONTENTS_AND_HASH_OK)); - hashes.push_back(bucketGenerator.generateBucket( - TestBucketState::CONTENTS_AND_HASH_OK)); - auto verify = - wm.executeWork(mBuckets, hashes, *tmpDir); - REQUIRE(verify->getState() == BasicWork::State::WORK_SUCCESS); - } - SECTION("download fails file not found") - { - hashes.push_back( - bucketGenerator.generateBucket(TestBucketState::FILE_NOT_UPLOADED)); + liveHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::CONTENTS_AND_HASH_OK)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_SUCCESS); +} - auto verify = - wm.executeWork(mBuckets, hashes, *tmpDir); - REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); - } - SECTION("download succeeds but unzip fails") - { - hashes.push_back(bucketGenerator.generateBucket( - TestBucketState::CORRUPTED_ZIPPED_FILE)); +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION +SECTION("hot archive buckets") +{ + hotHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::CONTENTS_AND_HASH_OK)); + hotHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::CONTENTS_AND_HASH_OK)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_SUCCESS); +} - auto verify = - wm.executeWork(mBuckets, hashes, *tmpDir); - REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); - } - SECTION("verify fails hash mismatch") - { - hashes.push_back( - bucketGenerator.generateBucket(TestBucketState::HASH_MISMATCH)); +SECTION("both live and hot archive buckets") +{ + liveHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::CONTENTS_AND_HASH_OK)); + hotHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::CONTENTS_AND_HASH_OK)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_SUCCESS); +} +#endif +} +SECTION("download fails file not found"){SECTION("live buckets"){ + liveHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::FILE_NOT_UPLOADED)); +auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); +REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); +} - auto verify = - wm.executeWork(mBuckets, hashes, *tmpDir); - REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION +SECTION("hot archive buckets") +{ + hotHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::FILE_NOT_UPLOADED)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); +} + +SECTION("both live and hot archive buckets") +{ + liveHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::FILE_NOT_UPLOADED)); + hotHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::FILE_NOT_UPLOADED)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); +} +#endif +} +SECTION("download succeeds but unzip fails"){SECTION("live buckets"){ + liveHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::CORRUPTED_ZIPPED_FILE)); +auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); +REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); +} +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION +SECTION("hot archive buckets") +{ + hotHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::CORRUPTED_ZIPPED_FILE)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); +} + +SECTION("both live and hot archive buckets") +{ + liveHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::CORRUPTED_ZIPPED_FILE)); + hotHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::CORRUPTED_ZIPPED_FILE)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); +} +#endif +} +SECTION("verify fails hash mismatch"){SECTION("live buckets"){ + liveHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::HASH_MISMATCH)); +auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); +REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); +} + +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION +SECTION("hot archive buckets") +{ + hotHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::HASH_MISMATCH)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); +} + +SECTION("both live and hot archive buckets") +{ + liveHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::HASH_MISMATCH)); + hotHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::HASH_MISMATCH)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); +} +#endif +} +SECTION("no hashes to verify") +{ + // Ensure proper behavior when no hashes are passed in + SECTION("live buckets") + { + auto verify = wm.executeWork( + buckets, hotBuckets, std::vector(), + std::vector(), *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_SUCCESS); } - SECTION("no hashes to verify") + + SECTION("hot archive buckets") { - // Ensure proper behavior when no hashes are passed in auto verify = wm.executeWork( - mBuckets, std::vector(), *tmpDir); + buckets, hotBuckets, std::vector(), + std::vector(), *tmpDir); REQUIRE(verify->getState() == BasicWork::State::WORK_SUCCESS); } } +} TEST_CASE("Ledger chain verification", "[ledgerheaderverification]") { @@ -1145,6 +1246,16 @@ TEST_CASE("Catchup with protocol upgrade", "[catchup][history]") testUpgrade(SOROBAN_PROTOCOL_VERSION); } } + SECTION("hot archive bucket upgrade") + { + if (protocolVersionEquals( + Config::CURRENT_LEDGER_PROTOCOL_VERSION, + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + testUpgrade( + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION); + } + } } TEST_CASE("Catchup fatal failure", "[catchup][history]") @@ -1203,7 +1314,7 @@ TEST_CASE("Catchup non-initentry buckets to initentry-supporting works", // Check that during catchup/replay, we did not use any INITENTRY code, // were still on the old protocol. - auto mc = a->getBucketManager().readMergeCounters(); + auto mc = a->getBucketManager().readMergeCounters(); REQUIRE(mc.mPostInitEntryProtocolMerges == 0); REQUIRE(mc.mNewInitEntries == 0); REQUIRE(mc.mOldInitEntries == 0); @@ -1245,7 +1356,7 @@ TEST_CASE("Catchup non-initentry buckets to initentry-supporting works", } // Check that we did in fact use INITENTRY code. - mc = a->getBucketManager().readMergeCounters(); + mc = a->getBucketManager().readMergeCounters(); REQUIRE(mc.mPostInitEntryProtocolMerges != 0); REQUIRE(mc.mNewInitEntries != 0); REQUIRE(mc.mOldInitEntries != 0); @@ -1346,7 +1457,9 @@ TEST_CASE_VERSIONS( BucketTestUtils::for_versions_with_differing_bucket_logic( cfg, [&](Config const& cfg) { - Application::pointer app = createTestApplication(clock, cfg); + auto app = + createTestApplication( + clock, cfg); auto& hm = app->getHistoryManager(); auto& lm = app->getLedgerManager(); auto& bl = app->getBucketManager().getLiveBucketList(); @@ -1355,9 +1468,11 @@ TEST_CASE_VERSIONS( { auto lcl = lm.getLastClosedLedgerHeader(); lcl.header.ledgerSeq += 1; - BucketTestUtils::addLiveBatchAndUpdateSnapshot( - *app, lcl.header, {}, - LedgerTestUtils::generateValidUniqueLedgerEntries(8), {}); + lm.setNextLedgerEntryBatchForBucketTesting( + {}, + LedgerTestUtils::generateValidLedgerEntriesWithExclusions( + {LedgerEntryType::CONFIG_SETTING}, 8), + {}); clock.crank(true); } diff --git a/src/history/test/HistoryTestsUtils.cpp b/src/history/test/HistoryTestsUtils.cpp index 1632c38d0e..d9cb8f2b40 100644 --- a/src/history/test/HistoryTestsUtils.cpp +++ b/src/history/test/HistoryTestsUtils.cpp @@ -4,6 +4,9 @@ #include "history/test/HistoryTestsUtils.h" #include "bucket/BucketManager.h" +#include "bucket/BucketUtils.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/HotArchiveBucketList.h" #include "catchup/CatchupRange.h" #include "crypto/Hex.h" #include "crypto/Random.h" @@ -14,12 +17,14 @@ #include "ledger/LedgerRange.h" #include "ledger/LedgerTxn.h" #include "ledger/LedgerTxnHeader.h" +#include "ledger/test/LedgerTestUtils.h" #include "lib/catch.hpp" #include "main/ApplicationUtils.h" #include "test/TestAccount.h" #include "test/TestUtils.h" #include "test/TxTests.h" #include "test/test.h" +#include "util/GlobalChecks.h" #include "util/Math.h" #include "util/XDROperators.h" #include "work/WorkScheduler.h" @@ -32,6 +37,21 @@ namespace stellar namespace historytestutils { +namespace +{ +void +setConfigForArchival(Config& cfg) +{ + // Evict very aggressively, but only 1 entry at a time so that Hot + // Archive Buckets churn + cfg.OVERRIDE_EVICTION_PARAMS_FOR_TESTING = true; + cfg.TESTING_MINIMUM_PERSISTENT_ENTRY_LIFETIME = 10; + cfg.TESTING_STARTING_EVICTION_SCAN_LEVEL = 1; + cfg.TESTING_EVICTION_SCAN_SIZE = 100'000; + cfg.TESTING_MAX_ENTRIES_TO_ARCHIVE = 1; +} +} + std::string HistoryConfigurator::getArchiveDirName() const { @@ -124,36 +144,50 @@ RealGenesisTmpDirHistoryConfigurator::configure(Config& mCfg, return mCfg; } -BucketOutputIteratorForTesting::BucketOutputIteratorForTesting( +template +BucketOutputIteratorForTesting::BucketOutputIteratorForTesting( std::string const& tmpDir, uint32_t protocolVersion, MergeCounters& mc, asio::io_context& ctx) - : BucketOutputIterator{ + : BucketOutputIterator{ tmpDir, true, testutil::testBucketMetadata(protocolVersion), mc, ctx, /*doFsync=*/true} { } +template std::pair -BucketOutputIteratorForTesting::writeTmpTestBucket() +BucketOutputIteratorForTesting::writeTmpTestBucket() { - auto ledgerEntries = - LedgerTestUtils::generateValidUniqueLedgerEntries(NUM_ITEMS_PER_BUCKET); - auto bucketEntries = - LiveBucket::convertToBucketEntry(false, {}, ledgerEntries, {}); - for (auto const& bucketEntry : bucketEntries) + auto generateEntries = [this]() { + if constexpr (std::is_same_v) + { + auto le = LedgerTestUtils::generateValidUniqueLedgerEntries( + NUM_ITEMS_PER_BUCKET); + return BucketT::convertToBucketEntry(false, {}, le, {}); + } + else + { + UnorderedSet empty; + auto keys = LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE}, NUM_ITEMS_PER_BUCKET, empty); + return BucketT::convertToBucketEntry({}, {}, keys); + } + }; + + for (auto const& bucketEntry : generateEntries()) { - put(bucketEntry); + this->put(bucketEntry); } // Finish writing and close the bucket file - REQUIRE(mBuf); - mOut.writeOne(*mBuf, &mHasher, &mBytesPut); - mObjectsPut++; - mBuf.reset(); - mOut.close(); - - return std::pair(mFilename.string(), - mHasher.finish()); + REQUIRE(this->mBuf); + this->mOut.writeOne(*this->mBuf, &this->mHasher, &this->mBytesPut); + this->mObjectsPut++; + this->mBuf.reset(); + this->mOut.close(); + + return std::pair(this->mFilename.string(), + this->mHasher.finish()); }; TestBucketGenerator::TestBucketGenerator( @@ -164,9 +198,12 @@ TestBucketGenerator::TestBucketGenerator( mApp.getTmpDirManager().tmpDir("tmp-bucket-generator")); } +template std::string TestBucketGenerator::generateBucket(TestBucketState state) { + BUCKET_TYPE_ASSERT(BucketT); + uint256 hash = HashUtils::pseudoRandomForTesting(); if (state == TestBucketState::FILE_NOT_UPLOADED) { @@ -174,7 +211,7 @@ TestBucketGenerator::generateBucket(TestBucketState state) return binToHex(hash); } MergeCounters mc; - BucketOutputIteratorForTesting bucketOut{ + BucketOutputIteratorForTesting bucketOut{ mTmpDir->getName(), mApp.getConfig().LEDGER_PROTOCOL_VERSION, mc, mApp.getClock().getIOContext()}; std::string filename; @@ -381,7 +418,11 @@ CatchupSimulation::CatchupSimulation(VirtualClock::Mode mode, bool startApp, Config::TestDbMode dbMode) : mClock(std::make_unique(mode)) , mHistoryConfigurator(cg) - , mCfg(getTestConfig(0, dbMode)) + , mCfg([&] { + auto cfg = getTestConfig(0, dbMode); + setConfigForArchival(cfg); + return cfg; + }()) , mAppPtr(createTestApplication(*mClock, mHistoryConfigurator->configure(mCfg, true), /*newDB*/ true, /*startApp*/ false)) @@ -493,10 +534,13 @@ CatchupSimulation::generateRandomLedger(uint32_t version) 10; res.writeBytes = 100'000; uint32_t inclusion = 100; + sorobanTxs.push_back(createUploadWasmTx( - getApp(), stroopy, inclusion, DEFAULT_TEST_RESOURCE_FEE, res)); + getApp(), stroopy, inclusion, DEFAULT_TEST_RESOURCE_FEE * 5, + res, {}, 0, rand_uniform(101, 2'000))); sorobanTxs.push_back(createUploadWasmTx( - getApp(), eve, inclusion * 5, DEFAULT_TEST_RESOURCE_FEE, res)); + getApp(), eve, inclusion * 5, DEFAULT_TEST_RESOURCE_FEE * 5, + res, {}, 0, rand_uniform(101, 2'000))); check = true; } } @@ -617,6 +661,19 @@ CatchupSimulation::ensureLedgerAvailable(uint32_t targetLedger, getApp().getBucketManager().getLiveBucketList(); } } + + // Make sure the Hot Archive isn't empty + if (protocolVersionStartsFrom( + getApp() + .getLedgerManager() + .getLastClosedLedgerHeader() + .header.ledgerVersion, + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + releaseAssert( + getApp().getBucketManager().getHotArchiveBucketList().getSize() >= + 1'000); + } } void @@ -740,10 +797,12 @@ CatchupSimulation::createCatchupApplication( mCfgs.back().CATCHUP_COMPLETE = count == std::numeric_limits::max(); mCfgs.back().CATCHUP_RECENT = count; + setConfigForArchival(mCfgs.back()); if (ledgerVersion) { mCfgs.back().TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = *ledgerVersion; } + mSpawnedAppsClocks.emplace_front(); auto newApp = createTestApplication( mSpawnedAppsClocks.front(), @@ -765,7 +824,7 @@ CatchupSimulation::catchupOffline(Application::pointer app, uint32_t toLedger, : CatchupConfiguration::Mode::OFFLINE_BASIC; auto catchupConfiguration = CatchupConfiguration{toLedger, app->getConfig().CATCHUP_RECENT, mode}; - lm.startCatchup(catchupConfiguration, nullptr, {}); + lm.startCatchup(catchupConfiguration, nullptr); REQUIRE(!app->getClock().getIOContext().stopped()); auto& lam = app->getLedgerApplyManager(); @@ -1119,5 +1178,10 @@ CatchupSimulation::restartApp() mClock = std::make_unique(mClock->getMode()); mAppPtr = createTestApplication(*mClock, mCfg, /*newDB*/ false); } + +template std::string + TestBucketGenerator::generateBucket(TestBucketState); +template std::string + TestBucketGenerator::generateBucket(TestBucketState); } } diff --git a/src/history/test/HistoryTestsUtils.h b/src/history/test/HistoryTestsUtils.h index 29a5564cc7..cfa2f4692d 100644 --- a/src/history/test/HistoryTestsUtils.h +++ b/src/history/test/HistoryTestsUtils.h @@ -4,6 +4,7 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/BucketUtils.h" #include "bucket/HotArchiveBucketList.h" #include "bucket/LiveBucketList.h" #include "catchup/VerifyLedgerChainWork.h" @@ -47,7 +48,7 @@ enum class TestBucketState class HistoryConfigurator; class TestBucketGenerator; -class BucketOutputIteratorForTesting; +template class BucketOutputIteratorForTesting; struct CatchupPerformedWork; class HistoryConfigurator : NonCopyable @@ -99,8 +100,11 @@ class RealGenesisTmpDirHistoryConfigurator : public TmpDirHistoryConfigurator Config& configure(Config& cfg, bool writable) const override; }; -class BucketOutputIteratorForTesting : public LiveBucketOutputIterator +template +class BucketOutputIteratorForTesting : public BucketOutputIterator { + BUCKET_TYPE_ASSERT(BucketT); + const size_t NUM_ITEMS_PER_BUCKET = 5; public: @@ -121,6 +125,7 @@ class TestBucketGenerator TestBucketGenerator(Application& app, std::shared_ptr archive); + template std::string generateBucket( TestBucketState desiredState = TestBucketState::CONTENTS_AND_HASH_OK); }; diff --git a/src/history/test/SerializeTests.cpp b/src/history/test/SerializeTests.cpp index cb03465b10..d9df52caf3 100644 --- a/src/history/test/SerializeTests.cpp +++ b/src/history/test/SerializeTests.cpp @@ -15,7 +15,8 @@ TEST_CASE("Serialization round trip", "[history]") std::vector testFiles = { "stellar-history.testnet.6714239.json", "stellar-history.livenet.15686975.json", - "stellar-history.testnet.6714239.networkPassphrase.json"}; + "stellar-history.testnet.6714239.networkPassphrase.json", + "stellar-history.testnet.6714239.networkPassphrase.v2.json"}; for (size_t i = 0; i < testFiles.size(); i++) { std::string fnPath = "testdata/"; diff --git a/src/historywork/DownloadBucketsWork.cpp b/src/historywork/DownloadBucketsWork.cpp index a4b0d01106..0c185e3b8e 100644 --- a/src/historywork/DownloadBucketsWork.cpp +++ b/src/historywork/DownloadBucketsWork.cpp @@ -18,13 +18,17 @@ namespace stellar DownloadBucketsWork::DownloadBucketsWork( Application& app, - std::map>& buckets, - std::vector hashes, TmpDir const& downloadDir, - std::shared_ptr archive) + std::map>& liveBuckets, + std::map>& hotBuckets, + std::vector liveHashes, std::vector hotHashes, + TmpDir const& downloadDir, std::shared_ptr archive) : BatchWork{app, "download-verify-buckets"} - , mBuckets{buckets} - , mHashes{hashes} - , mNextBucketIter{mHashes.begin()} + , mLiveBuckets{liveBuckets} + , mHotBuckets{hotBuckets} + , mLiveHashes{liveHashes} + , mHotHashes{hotHashes} + , mNextLiveBucketIter{mLiveHashes.begin()} + , mNextHotBucketIter{mHotHashes.begin()} , mDownloadDir{downloadDir} , mArchive{archive} { @@ -35,11 +39,14 @@ DownloadBucketsWork::getStatus() const { if (!isDone() && !isAborting()) { - if (!mHashes.empty()) + if (!mLiveHashes.empty()) { - auto numStarted = std::distance(mHashes.begin(), mNextBucketIter); + auto numStarted = + std::distance(mLiveHashes.begin(), mNextLiveBucketIter) + + std::distance(mHotHashes.begin(), mNextHotBucketIter); auto numDone = numStarted - getNumWorksInBatch(); - auto total = static_cast(mHashes.size()); + auto total = + static_cast(mLiveHashes.size() + mHotHashes.size()); auto pct = (100 * numDone) / total; return fmt::format( FMT_STRING( @@ -53,13 +60,15 @@ DownloadBucketsWork::getStatus() const bool DownloadBucketsWork::hasNext() const { - return mNextBucketIter != mHashes.end(); + return mNextLiveBucketIter != mLiveHashes.end() || + mNextHotBucketIter != mHotHashes.end(); } void DownloadBucketsWork::resetIter() { - mNextBucketIter = mHashes.begin(); + mNextLiveBucketIter = mLiveHashes.begin(); + mNextHotBucketIter = mHotHashes.begin(); } std::shared_ptr @@ -71,7 +80,10 @@ DownloadBucketsWork::yieldMoreWork() throw std::runtime_error("Nothing to iterate over!"); } - auto hash = *mNextBucketIter; + // Iterate through live hashes then Hot Archive hashes + auto isHotHash = mNextLiveBucketIter == mLiveHashes.end(); + auto hash = isHotHash ? *mNextHotBucketIter : *mNextLiveBucketIter; + FileTransferInfo ft(mDownloadDir, FileType::HISTORY_FILE_TYPE_BUCKET, hash); auto w1 = std::make_shared(mApp, ft, mArchive); @@ -90,16 +102,29 @@ DownloadBucketsWork::yieldMoreWork() }; std::weak_ptr weak( std::static_pointer_cast(shared_from_this())); - auto successCb = [weak, ft, hash](Application& app) -> bool { + auto successCb = [weak, ft, hash, isHotHash](Application& app) -> bool { auto self = weak.lock(); if (self) { auto bucketPath = ft.localPath_nogz(); - auto b = app.getBucketManager().adoptFileAsBucket( - bucketPath, hexToBin256(hash), - /*mergeKey=*/nullptr, - /*index=*/nullptr); - self->mBuckets[hash] = b; + + if (isHotHash) + { + auto b = + app.getBucketManager().adoptFileAsBucket( + bucketPath, hexToBin256(hash), + /*mergeKey=*/nullptr, + /*index=*/nullptr); + self->mHotBuckets[hash] = b; + } + else + { + auto b = app.getBucketManager().adoptFileAsBucket( + bucketPath, hexToBin256(hash), + /*mergeKey=*/nullptr, + /*index=*/nullptr); + self->mLiveBuckets[hash] = b; + } } return true; }; @@ -111,7 +136,14 @@ DownloadBucketsWork::yieldMoreWork() auto w4 = std::make_shared( mApp, "download-verify-sequence-" + hash, seq); - ++mNextBucketIter; + if (isHotHash) + { + ++mNextHotBucketIter; + } + else + { + ++mNextLiveBucketIter; + } return w4; } } diff --git a/src/historywork/DownloadBucketsWork.h b/src/historywork/DownloadBucketsWork.h index 573f5d8a82..83c5c736cb 100644 --- a/src/historywork/DownloadBucketsWork.h +++ b/src/historywork/DownloadBucketsWork.h @@ -17,17 +17,23 @@ class HistoryArchive; class DownloadBucketsWork : public BatchWork { - std::map>& mBuckets; - std::vector mHashes; - std::vector::const_iterator mNextBucketIter; + std::map>& mLiveBuckets; + std::map>& mHotBuckets; + std::vector mLiveHashes; + std::vector mHotHashes; + std::vector::const_iterator mNextLiveBucketIter; + std::vector::const_iterator mNextHotBucketIter; TmpDir const& mDownloadDir; std::shared_ptr mArchive; public: + // Note: hashes must contain both live and hot archive bucket hashes DownloadBucketsWork( Application& app, - std::map>& buckets, - std::vector hashes, TmpDir const& downloadDir, + std::map>& liveBuckets, + std::map>& hotBuckets, + std::vector liveHashes, std::vector hotHashes, + TmpDir const& downloadDir, std::shared_ptr archive = nullptr); ~DownloadBucketsWork() = default; std::string getStatus() const override; diff --git a/src/ledger/LedgerManager.h b/src/ledger/LedgerManager.h index e10f11ab3d..b5754e470f 100644 --- a/src/ledger/LedgerManager.h +++ b/src/ledger/LedgerManager.h @@ -170,10 +170,8 @@ class LedgerManager // LedgerManager detects it is desynchronized from SCP's consensus ledger. // This method is present in the public interface to permit testing and // offline catchups. - virtual void - startCatchup(CatchupConfiguration configuration, - std::shared_ptr archive, - std::set> bucketsToRetain) = 0; + virtual void startCatchup(CatchupConfiguration configuration, + std::shared_ptr archive) = 0; // Forcibly close the current ledger, applying `ledgerData` as the consensus // changes. This is normally done automatically as part of diff --git a/src/ledger/LedgerManagerImpl.cpp b/src/ledger/LedgerManagerImpl.cpp index affd40e066..dcdbff9d54 100644 --- a/src/ledger/LedgerManagerImpl.cpp +++ b/src/ledger/LedgerManagerImpl.cpp @@ -656,14 +656,12 @@ LedgerManagerImpl::valueExternalized(LedgerCloseData const& ledgerData, } void -LedgerManagerImpl::startCatchup( - CatchupConfiguration configuration, std::shared_ptr archive, - std::set> bucketsToRetain) +LedgerManagerImpl::startCatchup(CatchupConfiguration configuration, + std::shared_ptr archive) { ZoneScoped; setState(LM_CATCHING_UP_STATE); - mApp.getLedgerApplyManager().startCatchup(configuration, archive, - bucketsToRetain); + mApp.getLedgerApplyManager().startCatchup(configuration, archive); } uint64_t @@ -1056,9 +1054,15 @@ LedgerManagerImpl::closeLedger(LedgerCloseData const& ledgerData, // Step 1. Maybe queue the current checkpoint file for publishing; this // should not race with main, since publish on main begins strictly _after_ - // this call. + // this call. There is a bug in the upgrade path where the initial + // ledgerVers is used in some places during ledgerClose, and the upgraded + // ledgerVers is used in other places (see comment in ledgerClosed). + // On the ledger when an upgrade occurs, the ledger header will contain the + // newly incremented ledgerVers. Because the history checkpoint must be + // consistent with the ledger header, we must base checkpoints off the new + // ledgerVers here and not the initial ledgerVers. auto& hm = mApp.getHistoryManager(); - hm.maybeQueueHistoryCheckpoint(ledgerSeq); + hm.maybeQueueHistoryCheckpoint(ledgerSeq, maybeNewVersion); // step 2 ltx.commit(); @@ -1740,8 +1744,20 @@ LedgerManagerImpl::storeCurrentLedger(LedgerHeader const& header, // Store the current HAS in the database; this is really just to // checkpoint the bucketlist so we can survive a restart and re-attach // to the buckets. - HistoryArchiveState has(header.ledgerSeq, bl, - mApp.getConfig().NETWORK_PASSPHRASE); + HistoryArchiveState has; + if (protocolVersionStartsFrom( + header.ledgerVersion, + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + auto hotBl = mApp.getBucketManager().getHotArchiveBucketList(); + has = HistoryArchiveState(header.ledgerSeq, bl, hotBl, + mApp.getConfig().NETWORK_PASSPHRASE); + } + else + { + has = HistoryArchiveState(header.ledgerSeq, bl, + mApp.getConfig().NETWORK_PASSPHRASE); + } mApp.getPersistentState().setState(PersistentState::kHistoryArchiveState, has.toString(), sess); @@ -1846,12 +1862,10 @@ LedgerManagerImpl::ledgerClosed( // there are two different assumptions in different parts of the // ledger-close path: // - In closeLedger we mostly treat the ledger as being on vN, eg. - // during - // tx apply and LCM construction. + // during tx apply and LCM construction. // - In the final stage, when we call ledgerClosed, we pass vN+1 - // because - // the upgrade completed and modified the ltx header, and we fish - // the protocol out of the ltx header + // because the upgrade completed and modified the ltx header, and we + // fish the protocol out of the ltx header // Before LedgerCloseMetaV1, this inconsistency was mostly harmless // since LedgerCloseMeta was not modified after the LTX header was // modified. However, starting with protocol 20, LedgerCloseMeta is diff --git a/src/ledger/LedgerManagerImpl.h b/src/ledger/LedgerManagerImpl.h index 3538021a04..8afcfffc8a 100644 --- a/src/ledger/LedgerManagerImpl.h +++ b/src/ledger/LedgerManagerImpl.h @@ -224,10 +224,8 @@ class LedgerManagerImpl : public LedgerManager Database& getDatabase() override; - void startCatchup( - CatchupConfiguration configuration, - std::shared_ptr archive, - std::set> bucketsToRetain) override; + void startCatchup(CatchupConfiguration configuration, + std::shared_ptr archive) override; void closeLedger(LedgerCloseData const& ledgerData, bool calledViaExternalize) override; diff --git a/src/main/ApplicationUtils.cpp b/src/main/ApplicationUtils.cpp index 9d3b4565c0..4a893beb5e 100644 --- a/src/main/ApplicationUtils.cpp +++ b/src/main/ApplicationUtils.cpp @@ -489,7 +489,7 @@ dumpStateArchivalStatistics(Config cfg) std::vector hashes; for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { - HistoryStateBucket const& hsb = has.currentBuckets.at(i); + HistoryStateBucket const& hsb = has.currentBuckets.at(i); hashes.emplace_back(hexToBin256(hsb.curr)); hashes.emplace_back(hexToBin256(hsb.snap)); } @@ -892,7 +892,7 @@ catchup(Application::pointer app, CatchupConfiguration cc, try { - app->getLedgerManager().startCatchup(cc, archive, {}); + app->getLedgerManager().startCatchup(cc, archive); } catch (std::invalid_argument const&) { diff --git a/src/main/QueryServer.cpp b/src/main/QueryServer.cpp index 13feb0a77b..0a246826aa 100644 --- a/src/main/QueryServer.cpp +++ b/src/main/QueryServer.cpp @@ -6,8 +6,10 @@ #include "bucket/BucketSnapshotManager.h" #include "bucket/SearchableBucketList.h" #include "ledger/LedgerTxnImpl.h" +#include "ledger/LedgerTypeUtils.h" #include "util/Logging.h" #include "util/XDRStream.h" // IWYU pragma: keep +#include "util/types.h" #include #include @@ -54,7 +56,12 @@ namespace stellar { QueryServer::QueryServer(const std::string& address, unsigned short port, int maxClient, size_t threadPoolSize, - BucketSnapshotManager& bucketSnapshotManager) + BucketSnapshotManager& bucketSnapshotManager +#ifdef BUILD_TESTS + , + bool useMainThreadForTesting +#endif + ) : mServer(address, port, maxClient, threadPoolSize) , mBucketSnapshotManager(bucketSnapshotManager) { @@ -63,12 +70,28 @@ QueryServer::QueryServer(const std::string& address, unsigned short port, mServer.add404(std::bind(&QueryServer::notFound, this, _1, _2, _3)); addRoute("getledgerentryraw", &QueryServer::getLedgerEntryRaw); + addRoute("getledgerentry", &QueryServer::getLedgerEntry); - auto workerPids = mServer.start(); - for (auto pid : workerPids) +#ifdef BUILD_TESTS + if (useMainThreadForTesting) { - mBucketListSnapshots[pid] = std::move( - bucketSnapshotManager.copySearchableLiveBucketListSnapshot()); + mBucketListSnapshots[std::this_thread::get_id()] = + bucketSnapshotManager.copySearchableLiveBucketListSnapshot(); + mHotArchiveBucketListSnapshots[std::this_thread::get_id()] = + bucketSnapshotManager.copySearchableHotArchiveBucketListSnapshot(); + } + else +#endif + { + auto workerPids = mServer.start(); + for (auto pid : workerPids) + { + mBucketListSnapshots[pid] = + bucketSnapshotManager.copySearchableLiveBucketListSnapshot(); + mHotArchiveBucketListSnapshots[pid] = + bucketSnapshotManager + .copySearchableHotArchiveBucketListSnapshot(); + } } } @@ -190,4 +213,196 @@ QueryServer::getLedgerEntryRaw(std::string const& params, retStr = Json::FastWriter().write(root); return true; } + +// This query needs to load all the given ledger entries and their "state" +// (live, archived, evicted, new). This requires a loading entry and TTL from +// the live BucketList and then checking the Hot Archive for any keys we didn't +// find. We do three passes: +// 1. Load all keys from the live BucketList +// 2. For any Soroban keys not in the live BucketList, load them from the Hot +// Archive +// 3. Load TTL keys for any live Soroban entries found in 1. +bool +QueryServer::getLedgerEntry(std::string const& params, std::string const& body, + std::string& retStr) +{ + ZoneScoped; + Json::Value root; + + std::map> paramMap; + httpThreaded::server::server::parsePostParams(body, paramMap); + + auto keys = paramMap["key"]; + auto snapshotLedger = parseOptionalParam(paramMap, "ledgerSeq"); + + if (keys.empty()) + { + throw std::invalid_argument( + "Must specify ledger key in POST body: key="); + } + + // Get snapshots for both live and hot archive bucket lists + auto& liveBl = mBucketListSnapshots.at(std::this_thread::get_id()); + auto& hotArchiveBl = + mHotArchiveBucketListSnapshots.at(std::this_thread::get_id()); + + // orderedNotFoundKeys is a set of keys we have not yet found (not in live + // BucketList or in an archived state in the Hot Archive) + LedgerKeySet orderedNotFoundKeys; + for (auto const& key : keys) + { + LedgerKey k; + fromOpaqueBase64(k, key); + + // Check for TTL keys which are not allowed + if (k.type() == TTL) + { + retStr = "TTL keys are not allowed"; + return false; + } + + orderedNotFoundKeys.emplace(k); + } + + mBucketSnapshotManager.maybeCopyLiveAndHotArchiveSnapshots(liveBl, + hotArchiveBl); + + std::vector liveEntries; + std::vector archivedEntries; + uint32_t ledgerSeq = + snapshotLedger ? *snapshotLedger : liveBl->getLedgerSeq(); + root["ledgerSeq"] = ledgerSeq; + + auto liveEntriesOp = + liveBl->loadKeysFromLedger(orderedNotFoundKeys, ledgerSeq); + + // Return 404 if ledgerSeq not found + if (!liveEntriesOp) + { + retStr = "LedgerSeq not found"; + return false; + } + + liveEntries = std::move(*liveEntriesOp); + + // Remove keys found in live bucketList + for (auto const& le : liveEntries) + { + orderedNotFoundKeys.erase(LedgerEntryKey(le)); + } + + LedgerKeySet hotArchiveKeysToSearch; + for (auto const& lk : orderedNotFoundKeys) + { + if (isSorobanEntry(lk)) + { + hotArchiveKeysToSearch.emplace(lk); + } + } + + // Only query archive for remaining keys + if (!hotArchiveKeysToSearch.empty()) + { + auto archivedEntriesOp = + hotArchiveBl->loadKeysFromLedger(hotArchiveKeysToSearch, ledgerSeq); + if (!archivedEntriesOp) + { + retStr = "LedgerSeq not found"; + return false; + } + archivedEntries = std::move(*archivedEntriesOp); + } + + // Collect TTL keys for Soroban entries in the live BucketList + LedgerKeySet ttlKeys; + for (auto const& le : liveEntries) + { + if (isSorobanEntry(le.data)) + { + ttlKeys.emplace(getTTLKey(le)); + } + } + + std::vector ttlEntries; + if (!ttlKeys.empty()) + { + // We haven't updated the live snapshot so we will never not have the + // requested ledgerSeq and return nullopt. + ttlEntries = + std::move(liveBl->loadKeysFromLedger(ttlKeys, ledgerSeq).value()); + } + + std::unordered_map ttlMap; + for (auto const& ttlEntry : ttlEntries) + { + ttlMap.emplace(LedgerEntryKey(ttlEntry), ttlEntry); + } + + // Process live entries + for (auto const& le : liveEntries) + { + Json::Value entry; + entry["e"] = toOpaqueBase64(le); + + // Check TTL for Soroban entries + if (isSorobanEntry(le.data)) + { + auto ttlIter = ttlMap.find(getTTLKey(le)); + releaseAssertOrThrow(ttlIter != ttlMap.end()); + if (isLive(ttlIter->second, ledgerSeq)) + { + entry["state"] = "live"; + entry["ttl"] = ttlIter->second.data.ttl().liveUntilLedgerSeq; + } + else + { + entry["state"] = "archived"; + } + } + else + { + entry["state"] = "live"; + } + + root["entries"].append(entry); + } + + // Process archived entries - all are evicted since they come from hot + // archive + for (auto const& be : archivedEntries) + { + // If we get to this point, we know the key is not in the live + // BucketList, so if we get a DELETED or RESTORED entry, the entry is + // new wrt ledger state. + if (be.type() != HOT_ARCHIVE_ARCHIVED) + { + continue; + } + + auto const& le = be.archivedEntry(); + + // At this point we've "found" the key and know it's archived, so remove + // it from our search set + orderedNotFoundKeys.erase(LedgerEntryKey(le)); + + Json::Value entry; + entry["e"] = toOpaqueBase64(le); + entry["state"] = "evicted"; + root["entries"].append(entry); + } + + // Since we removed entries found in the live BucketList and archived + // entries found in the Hot Archive, any remaining keys must be new. + for (auto const& key : orderedNotFoundKeys) + { + Json::Value entry; + entry["e"] = toOpaqueBase64(key); + entry["state"] = "new"; + root["entries"].append(entry); + } + + retStr = Json::FastWriter().write(root); + return true; +} } \ No newline at end of file diff --git a/src/main/QueryServer.h b/src/main/QueryServer.h index 10d88f6401..b45ba81713 100644 --- a/src/main/QueryServer.h +++ b/src/main/QueryServer.h @@ -29,6 +29,9 @@ class QueryServer std::unordered_map mBucketListSnapshots; + std::unordered_map + mHotArchiveBucketListSnapshots; + BucketSnapshotManager& mBucketSnapshotManager; bool safeRouter(HandlerRoute route, std::string const& params, @@ -39,14 +42,25 @@ class QueryServer void addRoute(std::string const& name, HandlerRoute route); +#ifdef BUILD_TESTS + public: +#endif // Returns raw LedgerKeys for the given keys from the Live BucketList. Does // not query other BucketLists or reason about archival. bool getLedgerEntryRaw(std::string const& params, std::string const& body, std::string& retStr); + bool getLedgerEntry(std::string const& params, std::string const& body, + std::string& retStr); + public: QueryServer(const std::string& address, unsigned short port, int maxClient, size_t threadPoolSize, - BucketSnapshotManager& bucketSnapshotManager); + BucketSnapshotManager& bucketSnapshotManager +#ifdef BUILD_TESTS + , + bool useMainThreadForTesting = false +#endif + ); }; -} \ No newline at end of file +} diff --git a/src/main/test/QueryServerTests.cpp b/src/main/test/QueryServerTests.cpp new file mode 100644 index 0000000000..ec0d26669d --- /dev/null +++ b/src/main/test/QueryServerTests.cpp @@ -0,0 +1,250 @@ +// Copyright 2025 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +#include "bucket/BucketManager.h" +#include "bucket/test/BucketTestUtils.h" +#include "ledger/LedgerTxnImpl.h" +#include "ledger/LedgerTypeUtils.h" +#include "ledger/test/LedgerTestUtils.h" +#include "lib/catch.hpp" +#include "main/Application.h" +#include "main/Config.h" +#include "main/QueryServer.h" +#include "test/test.h" +#include "util/Math.h" +#include + +using namespace stellar; + +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION +// TODO: Better testing of errors, edge cases like an entry being in both live +// and archive bl, etc. +TEST_CASE("getledgerentry", "[queryserver]") +{ + VirtualClock clock; + auto cfg = getTestConfig(); + cfg.QUERY_SNAPSHOT_LEDGERS = 5; + + auto app = createTestApplication( + clock, cfg); + auto& lm = app->getLedgerManager(); + + // Query Server is disabled by default in cfg. Instead of enabling it, we're + // going to manage a versio here manually so we can directly call functions + // and avoid sending network requests. + auto qServer = std::make_unique( + "127.0.0.1", // Address + 0, // port (0 = random) + 1, // maxClient + 1, // threadPoolSize + app->getBucketManager().getBucketSnapshotManager(), true); + + std::unordered_map liveEntryMap; + + // Map code/data lk -> ttl value + std::unordered_map liveTTLEntryMap; + std::unordered_map archivedEntryMap; + std::unordered_map evictedEntryMap; + + // Create some test entries + for (auto i = 0; i < 15; ++i) + { + auto lcl = app->getLedgerManager().getLastClosedLedgerNum(); + auto liveEntries = + LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( + {CONTRACT_DATA, CONTRACT_CODE, ACCOUNT}, 5); + + std::vector liveEntriesToInsert; + for (auto const& le : liveEntries) + { + if (isSorobanEntry(le.data)) + { + LedgerEntry ttl; + ttl.data.type(TTL); + ttl.data.ttl().keyHash = getTTLKey(le).ttl().keyHash; + + // Make half of the entries archived on the live BL + if (rand_flip()) + { + ttl.data.ttl().liveUntilLedgerSeq = lcl + 100; + } + else + { + ttl.data.ttl().liveUntilLedgerSeq = 0; + } + liveTTLEntryMap[LedgerEntryKey(le)] = + ttl.data.ttl().liveUntilLedgerSeq; + liveEntriesToInsert.push_back(ttl); + } + + liveEntriesToInsert.push_back(le); + liveEntryMap[LedgerEntryKey(le)] = le; + } + + auto archivedEntries = + LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( + {CONTRACT_DATA, CONTRACT_CODE}, 5); + for (auto const& le : archivedEntries) + { + archivedEntryMap[LedgerEntryKey(le)] = le; + } + + lm.setNextLedgerEntryBatchForBucketTesting({}, liveEntriesToInsert, {}); + lm.setNextArchiveBatchForBucketTesting(archivedEntries, {}, {}); + closeLedger(*app); + } + + // Lambda to build request body + auto buildRequestBody = + [](std::optional ledgerSeq, + std::vector const& keys) -> std::string { + std::string body; + if (ledgerSeq) + { + body = "ledgerSeq=" + std::to_string(*ledgerSeq); + } + + for (auto const& key : keys) + { + body += (body.empty() ? "" : "&") + std::string("key=") + + toOpaqueBase64(key); + } + return body; + }; + + // Lambda to check entry details in response + auto checkEntry = [](std::string const& retStr, LedgerEntry const& le, + std::optional expectedTTL, + uint32_t ledgerSeq) -> bool { + Json::Value root; + Json::Reader reader; + REQUIRE(reader.parse(retStr, root)); + REQUIRE(root.isMember("entries")); + REQUIRE(root.isMember("ledgerSeq")); + REQUIRE(root["ledgerSeq"].asUInt() == ledgerSeq); + + auto const& entries = root["entries"]; + for (auto const& entry : entries) + { + REQUIRE(entry.isMember("e")); + REQUIRE(entry.isMember("state")); + + LedgerEntry responseLE; + fromOpaqueBase64(responseLE, entry["e"].asString()); + if (responseLE == le) + { + std::string expectedState; + if (!isSorobanEntry(le.data)) + { + expectedState = "live"; + } + else + { + if (expectedTTL) + { + if (ledgerSeq >= *expectedTTL) + { + expectedState = "archived"; + } + else + { + expectedState = "live"; + } + } + else + { + expectedState = "evicted"; + } + } + + REQUIRE(entry["state"].asString() == expectedState); + if (isSorobanEntry(le.data) && expectedState == "live") + { + REQUIRE(entry.isMember("ttl")); + REQUIRE(entry["ttl"].asUInt() == *expectedTTL); + } + else + { + REQUIRE(!entry.isMember("ttl")); + } + + return true; + } + } + return false; + }; + + // Lambda to check new entry response + auto checkNewEntry = [](std::string const& retStr, LedgerKey const& key, + uint32_t ledgerSeq) -> bool { + Json::Value root; + Json::Reader reader; + REQUIRE(reader.parse(retStr, root)); + REQUIRE(root.isMember("entries")); + REQUIRE(root.isMember("ledgerSeq")); + REQUIRE(root["ledgerSeq"].asUInt() == ledgerSeq); + + auto const& entries = root["entries"]; + for (auto const& entry : entries) + { + REQUIRE(entry.isMember("e")); + REQUIRE(entry.isMember("state")); + REQUIRE(entry["state"].asString() == "new"); + + LedgerKey responseKey; + fromOpaqueBase64(responseKey, entry["e"].asString()); + if (responseKey == key) + { + REQUIRE(!entry.isMember("ttl")); + return true; + } + } + return false; + }; + + UnorderedSet seenKeys; + for (auto const& [lk, le] : liveEntryMap) + { + auto body = buildRequestBody(std::nullopt, {lk}); + std::string retStr; + std::string empty; + REQUIRE(qServer->getLedgerEntry(empty, body, retStr)); + + auto ttlIter = liveTTLEntryMap.find(lk); + std::optional expectedTTL = + ttlIter != liveTTLEntryMap.end() + ? std::make_optional(ttlIter->second) + : std::nullopt; + REQUIRE( + checkEntry(retStr, le, expectedTTL, lm.getLastClosedLedgerNum())); + + // Remove any duplicates we've already found + archivedEntryMap.erase(lk); + seenKeys.insert(lk); + } + + for (auto const& [lk, le] : archivedEntryMap) + { + auto body = buildRequestBody(std::nullopt, {lk}); + std::string retStr; + std::string empty; + REQUIRE(qServer->getLedgerEntry(empty, body, retStr)); + REQUIRE( + checkEntry(retStr, le, std::nullopt, lm.getLastClosedLedgerNum())); + seenKeys.insert(lk); + } + + // Now check for new entries + auto newKeys = LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {TRUSTLINE, CONTRACT_DATA, CONTRACT_CODE}, 5, seenKeys); + for (auto const& key : newKeys) + { + auto body = buildRequestBody(std::nullopt, {key}); + std::string retStr; + std::string empty; + REQUIRE(qServer->getLedgerEntry(empty, body, retStr)); + REQUIRE(checkNewEntry(retStr, key, lm.getLastClosedLedgerNum())); + } +} +#endif \ No newline at end of file diff --git a/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json b/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json index c3c55557a0..4e995116f6 100644 --- a/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json +++ b/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json @@ -6,24 +6,24 @@ "v": 0 }, "ledgerHeader": { - "hash": "d4ac85b1db60e58b070999d97cc3e60881db5da799ff2a71e87c2a6db3978ad4", + "hash": "c4e89b27d71a10046b3993b1692d3941ed0139b190cfeaf42fc7ca1575de2726", "header": { "ledgerVersion": 23, - "previousLedgerHash": "114439ca9d61a89a909b9a5d2aa726e9be3ecd4a5b0f0cab36de604a5f643cc6", + "previousLedgerHash": "cd0e8831e112ba4d7ba52b8a295f2f5d9c922a4f32408a0bd902bf31988aa06c", "scpValue": { - "txSetHash": "31a57ddc07402ae0623b06b041a6a4196a19caac487491e0a5205c8a4f76e732", + "txSetHash": "1f7a881c1be1201af678467343ba1a2801c15183e4140c4b16ff6277cc7c26a2", "closeTime": 1451692800, "upgrades": [], "ext": { "v": "STELLAR_VALUE_SIGNED", "lcValueSignature": { "nodeID": "GDDOUW25MRFLNXQMN3OODP6JQEXSGLMHAFZV4XPQ2D3GA4QFIDMEJG2O", - "signature": "ba83d21fab1c12875efbe5fe7bd9882247a5ca115a81896834d0a161df85c5258b1950831c3dc9da3241deee47b94594b21d8ef315b2f060dc66917f9fd1e609" + "signature": "5869d4582ee630594be8e7f1a2bbd20192dbaf635f945872d7fa02fb8b9d33e73d387860a12a628e3c98238e03551e15951b9e4990482c236a22c8c836eef50a" } } }, "txSetResultHash": "65b6fe91abfe43ed98fa2163f08fdf3f2f3231101bba05102521186c25a1cc4b", - "bucketListHash": "685227c142ea174d494b5efe8dd9c01aee683dddcbd75076b5acc8729a56e883", + "bucketListHash": "2c092b2b7db88c611a3a10b308ac925fb82eb1331204aaee84e247485b486e3b", "ledgerSeq": 28, "totalCoins": 1000000000000000000, "feePool": 804520, @@ -49,7 +49,7 @@ "txSet": { "v": 1, "v1TxSet": { - "previousLedgerHash": "114439ca9d61a89a909b9a5d2aa726e9be3ecd4a5b0f0cab36de604a5f643cc6", + "previousLedgerHash": "cd0e8831e112ba4d7ba52b8a295f2f5d9c922a4f32408a0bd902bf31988aa06c", "phases": [ { "v": 0, diff --git a/src/testdata/ledger-close-meta-v1-protocol-23.json b/src/testdata/ledger-close-meta-v1-protocol-23.json index 5e8ede27a8..6fe4d2791f 100644 --- a/src/testdata/ledger-close-meta-v1-protocol-23.json +++ b/src/testdata/ledger-close-meta-v1-protocol-23.json @@ -6,24 +6,24 @@ "v": 0 }, "ledgerHeader": { - "hash": "ede8d13efde47058a47388532e2a6ce544f6744423a268ecf513aff86916283a", + "hash": "1a1722f149f5348813f73ff4c0cb45245224d43ebcb69b44e8290e7697b90793", "header": { "ledgerVersion": 23, - "previousLedgerHash": "87fdf0a3595bf4021274bb88fac521cf02060dd961b9fda38879b287a5418cb6", + "previousLedgerHash": "5263ba08cd1c7ea229e165999b8aaf27c9c99a2f25bed68deedf9fd729fb614b", "scpValue": { - "txSetHash": "df738c70c1acc6753d41ceea35716fd95888db1f36558af535bb2b8a78c856e3", + "txSetHash": "60b3ac298285f6dbe97be76298af5c11d10c1be4df942e719c12ce8b27213fc9", "closeTime": 0, "upgrades": [], "ext": { "v": "STELLAR_VALUE_SIGNED", "lcValueSignature": { "nodeID": "GDDOUW25MRFLNXQMN3OODP6JQEXSGLMHAFZV4XPQ2D3GA4QFIDMEJG2O", - "signature": "95c4d776f015ad5e69167c6e47be677311b30efffab6617b7dd8f94de86b2f219011cd9bb908ec08d39227594fa4b2580cb73f65fa583ac06f03d0e7dae5dd08" + "signature": "2f16c8287011bb137d7f3d10758a418c1d7f25ce963d5fea61d4b57de6a9455653d890c4eb236273208daf8700b3bb5d8c7e76dc58f8cf0190b3e132f409030a" } } }, - "txSetResultHash": "f66233c106977a4cc148e019411ff6ddfaf76c337d004ed9a304a70407b161d0", - "bucketListHash": "bf090cb59a5f1fd97d083af9e132128ba69a84998e804d6f02fc34e82c9e4b9e", + "txSetResultHash": "249b974bacf8b5c4a8f0b5598194c1b9eca64af0b5c1506daa871c1533b6baac", + "bucketListHash": "f54692ac02d6c9d4715dc33ee72dd50277c43957c60106f4773e926a572cb20e", "ledgerSeq": 7, "totalCoins": 1000000000000000000, "feePool": 800, @@ -49,7 +49,7 @@ "txSet": { "v": 1, "v1TxSet": { - "previousLedgerHash": "87fdf0a3595bf4021274bb88fac521cf02060dd961b9fda38879b287a5418cb6", + "previousLedgerHash": "5263ba08cd1c7ea229e165999b8aaf27c9c99a2f25bed68deedf9fd729fb614b", "phases": [ { "v": 0, @@ -188,43 +188,22 @@ "txProcessing": [ { "result": { - "transactionHash": "324d0628e2a215d367f181f0e3aacbaa26fa638e676e73fb9ad26a360314a7b7", + "transactionHash": "0db2322d85e9d8ea2421559922bb6107429650ebdad304c907480853d465c10d", "result": { - "feeCharged": 300, + "feeCharged": 100, "result": { - "code": "txFEE_BUMP_INNER_SUCCESS", - "innerResultPair": { - "transactionHash": "b28c171f9658320b5ce8d50e4e1a36b74afbb2a92eec7df92a8981067131b025", - "result": { - "feeCharged": 200, - "result": { - "code": "txSUCCESS", - "results": [ - { - "code": "opINNER", - "tr": { - "type": "PAYMENT", - "paymentResult": { - "code": "PAYMENT_SUCCESS" - } - } - }, - { - "code": "opINNER", - "tr": { - "type": "PAYMENT", - "paymentResult": { - "code": "PAYMENT_SUCCESS" - } - } - } - ] - }, - "ext": { - "v": 0 + "code": "txSUCCESS", + "results": [ + { + "code": "opINNER", + "tr": { + "type": "PAYMENT", + "paymentResult": { + "code": "PAYMENT_SUCCESS" + } } } - } + ] }, "ext": { "v": 0 @@ -235,13 +214,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 4, + "lastModifiedLedgerSeq": 5, "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 400000000, - "seqNum": 17179869184, + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989700, + "seqNum": 3, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -249,7 +228,31 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 5, + "seqTime": 0 + } + } + } + } + } } } }, @@ -265,9 +268,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 399999700, - "seqNum": 17179869184, + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989600, + "seqNum": 3, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -275,7 +278,31 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 5, + "seqTime": 0 + } + } + } + } + } } } }, @@ -299,61 +326,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 399999700, - "seqNum": 17179869184, - "numSubEntries": 0, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], - "ext": { - "v": 0 - } - } - }, - "ext": { - "v": 0 - } - } - }, - { - "type": "LEDGER_ENTRY_UPDATED", - "updated": { - "lastModifiedLedgerSeq": 7, - "data": { - "type": "ACCOUNT", - "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 399999700, - "seqNum": 17179869184, - "numSubEntries": 0, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], - "ext": { - "v": 0 - } - } - }, - "ext": { - "v": 0 - } - } - }, - { - "type": "LEDGER_ENTRY_STATE", - "state": { - "lastModifiedLedgerSeq": 5, - "data": { - "type": "ACCOUNT", - "account": { - "accountID": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q", - "balance": 200010000, - "seqNum": 21474836480, + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989600, + "seqNum": 3, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -361,7 +336,31 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 5, + "seqTime": 0 + } + } + } + } + } } } }, @@ -377,9 +376,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q", - "balance": 200010000, - "seqNum": 21474836481, + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989600, + "seqNum": 4, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -429,18 +428,43 @@ "state": { "lastModifiedLedgerSeq": 6, "data": { - "type": "TRUSTLINE", - "trustLine": { + "type": "ACCOUNT", + "account": { "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "asset": { - "assetCode": "CUR1", - "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" - }, - "balance": 0, - "limit": 100, - "flags": 1, + "balance": 399999900, + "seqNum": 12884901889, + "numSubEntries": 1, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 6, + "seqTime": 0 + } + } + } + } + } } } }, @@ -454,18 +478,43 @@ "updated": { "lastModifiedLedgerSeq": 7, "data": { - "type": "TRUSTLINE", - "trustLine": { + "type": "ACCOUNT", + "account": { "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "asset": { - "assetCode": "CUR1", - "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" - }, - "balance": 50, - "limit": 100, - "flags": 1, + "balance": 400000900, + "seqNum": 12884901889, + "numSubEntries": 1, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 6, + "seqTime": 0 + } + } + } + } + } } } }, @@ -473,28 +522,49 @@ "v": 0 } } - } - ] - }, - { - "changes": [ + }, { "type": "LEDGER_ENTRY_STATE", "state": { "lastModifiedLedgerSeq": 7, "data": { - "type": "TRUSTLINE", - "trustLine": { - "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "asset": { - "assetCode": "CUR1", - "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" - }, - "balance": 50, - "limit": 100, - "flags": 1, + "type": "ACCOUNT", + "account": { + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989600, + "seqNum": 4, + "numSubEntries": 0, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 7, + "seqTime": 0 + } + } + } + } + } } } }, @@ -508,18 +578,43 @@ "updated": { "lastModifiedLedgerSeq": 7, "data": { - "type": "TRUSTLINE", - "trustLine": { - "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "asset": { - "assetCode": "CUR1", - "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" - }, - "balance": 100, - "limit": 100, - "flags": 1, + "type": "ACCOUNT", + "account": { + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999988600, + "seqNum": 4, + "numSubEntries": 0, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 7, + "seqTime": 0 + } + } + } + } + } } } }, @@ -538,22 +633,43 @@ }, { "result": { - "transactionHash": "0db2322d85e9d8ea2421559922bb6107429650ebdad304c907480853d465c10d", + "transactionHash": "324d0628e2a215d367f181f0e3aacbaa26fa638e676e73fb9ad26a360314a7b7", "result": { - "feeCharged": 100, + "feeCharged": 300, "result": { - "code": "txSUCCESS", - "results": [ - { - "code": "opINNER", - "tr": { - "type": "PAYMENT", - "paymentResult": { - "code": "PAYMENT_SUCCESS" - } + "code": "txFEE_BUMP_INNER_SUCCESS", + "innerResultPair": { + "transactionHash": "b28c171f9658320b5ce8d50e4e1a36b74afbb2a92eec7df92a8981067131b025", + "result": { + "feeCharged": 200, + "result": { + "code": "txSUCCESS", + "results": [ + { + "code": "opINNER", + "tr": { + "type": "PAYMENT", + "paymentResult": { + "code": "PAYMENT_SUCCESS" + } + } + }, + { + "code": "opINNER", + "tr": { + "type": "PAYMENT", + "paymentResult": { + "code": "PAYMENT_SUCCESS" + } + } + } + ] + }, + "ext": { + "v": 0 } } - ] + } }, "ext": { "v": 0 @@ -564,13 +680,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 5, + "lastModifiedLedgerSeq": 4, "data": { "type": "ACCOUNT", "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989700, - "seqNum": 3, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 400000000, + "seqNum": 17179869184, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -578,31 +694,7 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 5, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -618,9 +710,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989600, - "seqNum": 3, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 399999700, + "seqNum": 17179869184, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -628,57 +720,85 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, + "v": 0 + } + } + }, + "ext": { + "v": 0 + } + } + } + ], + "txApplyProcessing": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "txChangesBefore": [ + { + "type": "LEDGER_ENTRY_STATE", + "state": { + "lastModifiedLedgerSeq": 7, + "data": { + "type": "ACCOUNT", + "account": { + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 399999700, + "seqNum": 17179869184, + "numSubEntries": 0, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 5, - "seqTime": 0 - } - } - } + "v": 0 + } + } + }, + "ext": { + "v": 0 + } + } + }, + { + "type": "LEDGER_ENTRY_UPDATED", + "updated": { + "lastModifiedLedgerSeq": 7, + "data": { + "type": "ACCOUNT", + "account": { + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 399999700, + "seqNum": 17179869184, + "numSubEntries": 0, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], + "ext": { + "v": 0 } } + }, + "ext": { + "v": 0 } } }, - "ext": { - "v": 0 - } - } - } - ], - "txApplyProcessing": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "txChangesBefore": [ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 7, + "lastModifiedLedgerSeq": 5, "data": { "type": "ACCOUNT", "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989600, - "seqNum": 3, + "accountID": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q", + "balance": 200010000, + "seqNum": 21474836480, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -686,31 +806,7 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 5, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -726,9 +822,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989600, - "seqNum": 4, + "accountID": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q", + "balance": 200010000, + "seqNum": 21474836481, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -778,43 +874,18 @@ "state": { "lastModifiedLedgerSeq": 6, "data": { - "type": "ACCOUNT", - "account": { + "type": "TRUSTLINE", + "trustLine": { "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "balance": 399999900, - "seqNum": 12884901889, - "numSubEntries": 1, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], + "asset": { + "assetCode": "CUR1", + "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" + }, + "balance": 0, + "limit": 100, + "flags": 1, "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 6, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -828,43 +899,18 @@ "updated": { "lastModifiedLedgerSeq": 7, "data": { - "type": "ACCOUNT", - "account": { + "type": "TRUSTLINE", + "trustLine": { "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "balance": 400000900, - "seqNum": 12884901889, - "numSubEntries": 1, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], + "asset": { + "assetCode": "CUR1", + "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" + }, + "balance": 50, + "limit": 100, + "flags": 1, "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 6, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -872,49 +918,28 @@ "v": 0 } } - }, + } + ] + }, + { + "changes": [ { "type": "LEDGER_ENTRY_STATE", "state": { "lastModifiedLedgerSeq": 7, "data": { - "type": "ACCOUNT", - "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989600, - "seqNum": 4, - "numSubEntries": 0, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], + "type": "TRUSTLINE", + "trustLine": { + "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "asset": { + "assetCode": "CUR1", + "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" + }, + "balance": 50, + "limit": 100, + "flags": 1, "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 7, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -928,43 +953,18 @@ "updated": { "lastModifiedLedgerSeq": 7, "data": { - "type": "ACCOUNT", - "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999988600, - "seqNum": 4, - "numSubEntries": 0, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], + "type": "TRUSTLINE", + "trustLine": { + "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "asset": { + "assetCode": "CUR1", + "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" + }, + "balance": 100, + "limit": 100, + "flags": 1, "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 7, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } },