Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Z/testing suite #1442

Draft
wants to merge 15 commits into
base: main
Choose a base branch
from
9 changes: 6 additions & 3 deletions cmd/config.go
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
package cmd

import (
"github.com/deso-protocol/core/lib"
"github.com/golang/glog"
"github.com/spf13/viper"
"net/url"
"os"
"path/filepath"
"strings"

"github.com/deso-protocol/core/lib"
"github.com/golang/glog"
"github.com/spf13/viper"
)

type Config struct {
Expand Down Expand Up @@ -79,6 +80,7 @@ type Config struct {
LogDBSummarySnapshots bool
DatadogProfiler bool
TimeEvents bool
NoLogToStdErr bool

// State Syncer
StateChangeDir string
Expand Down Expand Up @@ -192,6 +194,7 @@ func LoadConfig() *Config {
config.LogDBSummarySnapshots = viper.GetBool("log-db-summary-snapshots")
config.DatadogProfiler = viper.GetBool("datadog-profiler")
config.TimeEvents = viper.GetBool("time-events")
config.NoLogToStdErr = false

// State Syncer
config.StateChangeDir = viper.GetString("state-change-dir")
Expand Down
5 changes: 3 additions & 2 deletions cmd/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,15 @@ import (
"encoding/hex"
"flag"
"fmt"
"github.com/deso-protocol/go-deadlock"
"net"
"os"
"os/signal"
"sync"
"syscall"
"time"

"github.com/deso-protocol/go-deadlock"

"github.com/DataDog/datadog-go/v5/statsd"
"github.com/btcsuite/btcd/addrmgr"
"github.com/btcsuite/btcd/wire"
Expand Down Expand Up @@ -66,7 +67,7 @@ func (node *Node) Start(exitChannels ...*chan struct{}) {
flag.Set("log_dir", node.Config.LogDirectory)
flag.Set("v", fmt.Sprintf("%d", node.Config.GlogV))
flag.Set("vmodule", node.Config.GlogVmodule)
flag.Set("alsologtostderr", "true")
flag.Set("alsologtostderr", fmt.Sprintf("%t", !node.Config.NoLogToStdErr))
flag.Parse()
glog.CopyStandardLogTo("INFO")
node.runningMutex.Lock()
Expand Down
1 change: 1 addition & 0 deletions integration_testing/tools.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ func _generateConfig(t *testing.T, config *cmd.Config, port uint32, dataDir stri
config.ConnectIPs = []string{}
config.PrivateMode = true
config.GlogV = 0
config.LogToStdErr = true
config.GlogVmodule = "*bitcoin_manager*=0,*balance*=0,*view*=0,*frontend*=0,*peer*=0,*addr*=0,*network*=0,*utils*=0,*connection*=0,*main*=0,*server*=0,*mempool*=0,*miner*=0,*blockchain*=0"
config.MaxInboundPeers = maxPeers
config.TargetOutboundPeers = maxPeers
Expand Down
1 change: 1 addition & 0 deletions lib/block_view_association.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package lib
import (
"bytes"
"fmt"

"github.com/golang/glog"
"github.com/pkg/errors"
)
Expand Down
6 changes: 5 additions & 1 deletion lib/block_view_flush.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package lib

import (
"fmt"
"github.com/deso-protocol/uint256"
"reflect"

"github.com/btcsuite/btcd/btcec/v2"
Expand Down Expand Up @@ -948,9 +949,12 @@ func (bav *UtxoView) _flushDAOCoinBalanceEntriesToDbWithTxn(txn *badger.Txn, blo
balanceKey, computedBalanceKey)
}

// Either a zero entry or an isDeleted annotation results in a deletion.
isDeleted := balanceEntry.BalanceNanos.Eq(uint256.NewInt(0)) || balanceEntry.isDeleted

// Delete the existing mappings in the db for this balance key. They will be re-added
// if the corresponding entry in memory has isDeleted=false.
if err := DBDeleteBalanceEntryMappingsWithTxn(txn, bav.Snapshot, &(balanceKey.HODLerPKID), &(balanceKey.CreatorPKID), true, bav.EventManager, balanceEntry.isDeleted); err != nil {
if err := DBDeleteBalanceEntryMappingsWithTxn(txn, bav.Snapshot, &(balanceKey.HODLerPKID), &(balanceKey.CreatorPKID), true, bav.EventManager, isDeleted); err != nil {

return errors.Wrapf(
err, "_flushDAOCoinBalanceEntriesToDbWithTxn: Problem deleting mappings "+
Expand Down
4 changes: 2 additions & 2 deletions lib/block_view_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -5525,7 +5525,7 @@ func DecodeExtraData(rr io.Reader) (map[string][]byte, error) {
}
_, err = io.ReadFull(rr, keyBytes)
if err != nil {
return nil, fmt.Errorf("DecodeExtraData: Problem reading key #{ii}")
return nil, fmt.Errorf("DecodeExtraData: Problem reading key: %v", err)
}

// Convert the key to a string and check if it already exists in the map.
Expand Down Expand Up @@ -5623,7 +5623,7 @@ func DecodeMapStringUint64(rr *bytes.Reader) (map[string]uint64, error) {
}
_, err = io.ReadFull(rr, keyBytes)
if err != nil {
return nil, fmt.Errorf("DecodeExtraData: Problem reading key #{ii}")
return nil, fmt.Errorf("DecodeExtraData: Problem reading key %v", err)
}

// Convert the key to a string and check if it already exists in the map.
Expand Down
3 changes: 2 additions & 1 deletion lib/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -928,9 +928,10 @@ func (params *DeSoParams) EnableRegtest(isAcceleratedRegtest bool) {
params.DefaultEpochDurationNumBlocks = uint64(5)
params.DefaultBlockProductionIntervalMillisecondsPoS = 1000 // 1s
params.DefaultTimeoutIntervalMillisecondsPoS = 2000 // 2s
fmt.Println("Regtest mode enabled with accelerated regtest")
newSeedBalance := &DeSoOutput{
PublicKey: MustBase58CheckDecode("tBCKVERmG9nZpHTk2AVPqknWc1Mw9HHAnqrTpW1RnXpXMQ4PsQgnmV"),
AmountNanos: 1e14,
AmountNanos: 1e18,
}

params.SeedBalances = append(params.SeedBalances, newSeedBalance)
Expand Down
50 changes: 37 additions & 13 deletions lib/db_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -1108,12 +1108,23 @@ func DBSetWithTxn(txn *badger.Txn, snap *Snapshot, key []byte, value []byte, eve
var ancestralValue []byte
var getError error

isCoreState := isCoreStateKey(key)

// If snapshot was provided, we will need to load the current value of the record
// so that we can later write it in the ancestral record. We first lookup cache.
if isState {
// We check if we've already read this key and stored it in the cache.
// Otherwise, we fetch the current value of this record from the DB.
ancestralValue, getError = DBGetWithTxn(txn, snap, key)
if isState || (isCoreState && eventManager != nil && eventManager.isMempoolManager) {

// When we are syncing state from the mempool, we need to read the last committed view txn.
// This is because we will be querying the badger DB, and during the flush loop, every entry that is
// updated will first be deleted. In order to counteract this, we reference a badger transaction that was
// initiated before the flush loop started.
if eventManager != nil && eventManager.isMempoolManager && eventManager.lastCommittedViewTxn != nil {
ancestralValue, getError = DBGetWithTxn(eventManager.lastCommittedViewTxn, nil, key)
} else {
// We check if we've already read this key and stored it in the cache.
// Otherwise, we fetch the current value of this record from the DB.
ancestralValue, getError = DBGetWithTxn(txn, snap, key)
}

// If there is some error with the DB read, other than non-existent key, we return.
if getError != nil && getError != badger.ErrKeyNotFound {
Expand Down Expand Up @@ -1204,26 +1215,39 @@ func DBDeleteWithTxn(txn *badger.Txn, snap *Snapshot, key []byte, eventManager *
var getError error
isState := snap != nil && snap.isState(key)

isCoreState := isCoreStateKey(key)

// If snapshot was provided, we will need to load the current value of the record
// so that we can later write it in the ancestral record. We first lookup cache.
if isState {
// We check if we've already read this key and stored it in the cache.
// Otherwise, we fetch the current value of this record from the DB.
ancestralValue, getError = DBGetWithTxn(txn, snap, key)
// If the key doesn't exist then there is no point in deleting this entry.
if getError == badger.ErrKeyNotFound {
return nil
if isState || (isCoreState && eventManager != nil && eventManager.isMempoolManager) {
// When we are syncing state from the mempool, we need to read the last committed view txn.
// This is because we will be querying the badger DB, and during the flush loop, every entry that is
// updated will first be deleted. In order to counteract this, we reference a badger transaction that was
// initiated before the flush loop started.
if eventManager != nil && eventManager.isMempoolManager && eventManager.lastCommittedViewTxn != nil {
ancestralValue, getError = DBGetWithTxn(eventManager.lastCommittedViewTxn, snap, key)
} else {
// We check if we've already read this key and stored it in the cache.
// Otherwise, we fetch the current value of this record from the DB.
ancestralValue, getError = DBGetWithTxn(txn, snap, key)
// If the key doesn't exist then there is no point in deleting this entry.
if getError == badger.ErrKeyNotFound {
return nil
}
}

// If there is some error with the DB read, other than non-existent key, we return.
if getError != nil {
if getError != nil && getError != badger.ErrKeyNotFound {
return errors.Wrapf(getError, "DBDeleteWithTxn: problem checking for DB record "+
"with key: %v", key)
}
}

err := txn.Delete(key)
if err != nil {
if err != nil && err == badger.ErrKeyNotFound && eventManager != nil && eventManager.isMempoolManager {
// If the key doesn't exist then there is no point in deleting this entry.
return nil
} else if err != nil {
return errors.Wrapf(err, "DBDeleteWithTxn: Problem deleting record "+
"from DB with key: %v", key)
}
Expand Down
10 changes: 8 additions & 2 deletions lib/event_manager.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
package lib

import "github.com/google/uuid"
import (
"github.com/dgraph-io/badger/v3"
"github.com/google/uuid"
)

type TransactionEventFunc func(event *TransactionEvent)
type StateSyncerOperationEventFunc func(event *StateSyncerOperationEvent)
Expand Down Expand Up @@ -59,7 +62,10 @@ type EventManager struct {
blockCommittedHandlers []BlockEventFunc
blockAcceptedHandlers []BlockEventFunc
snapshotCompletedHandlers []SnapshotCompletedEventFunc
isMempoolManager bool
// A transaction used by the state syncer mempool routine to reference the state of the badger db
// prior to flushing mempool transactions. This represents the last committed view of the db.
lastCommittedViewTxn *badger.Txn
isMempoolManager bool
}

func NewEventManager() *EventManager {
Expand Down
12 changes: 6 additions & 6 deletions lib/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ type Server struct {
// It can be used to find computational bottlenecks.
timer *Timer

stateChangeSyncer *StateChangeSyncer
StateChangeSyncer *StateChangeSyncer
// DbMutex protects the badger database from concurrent access when it's being closed & re-opened.
// This is necessary because the database is closed & re-opened when the node finishes hypersyncing in order
// to change the database options from Default options to Performance options.
Expand Down Expand Up @@ -494,7 +494,7 @@ func NewServer(
}

if stateChangeSyncer != nil {
srv.stateChangeSyncer = stateChangeSyncer
srv.StateChangeSyncer = stateChangeSyncer
}

// The same timesource is used in the chain data structure and in the connection
Expand Down Expand Up @@ -566,8 +566,8 @@ func NewServer(
_connectIps, _targetOutboundPeers, _maxInboundPeers, _limitOneInboundConnectionPerIP,
_peerConnectionRefreshIntervalMillis, _minFeeRateNanosPerKB, nodeServices)

if srv.stateChangeSyncer != nil {
srv.stateChangeSyncer.BlockHeight = uint64(_chain.headerTip().Height)
if srv.StateChangeSyncer != nil {
srv.StateChangeSyncer.BlockHeight = uint64(_chain.headerTip().Height)
}

// Create a mempool to store transactions until they're ready to be mined into
Expand Down Expand Up @@ -3330,8 +3330,8 @@ func (srv *Server) Start() {
}

// Initialize state syncer mempool job, if needed.
if srv.stateChangeSyncer != nil {
srv.stateChangeSyncer.StartMempoolSyncRoutine(srv)
if srv.StateChangeSyncer != nil {
srv.StateChangeSyncer.StartMempoolSyncRoutine(srv)
}

// Start the network manager's internal event loop to open and close connections to peers.
Expand Down
Loading