-
Notifications
You must be signed in to change notification settings - Fork 33
/
Copy pathchain.go
356 lines (318 loc) · 11.8 KB
/
chain.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
package backfill
import (
"context"
"fmt"
"math/big"
"strings"
"time"
"github.com/synapsecns/sanguine/ethergo/util"
"github.com/ethereum/go-ethereum/common"
ethTypes "github.com/ethereum/go-ethereum/core/types"
"github.com/jpillora/backoff"
indexerconfig "github.com/synapsecns/sanguine/services/explorer/config/indexer"
"github.com/synapsecns/sanguine/services/explorer/consumer/fetcher"
"github.com/synapsecns/sanguine/services/explorer/consumer/parser"
"github.com/synapsecns/sanguine/services/explorer/db"
"golang.org/x/sync/errgroup"
)
// ChainBackfiller is an explorer backfiller for a chain.
type ChainBackfiller struct {
// consumerDB is the database that the backfiller will use to store the events.
consumerDB db.ConsumerDB
// bridgeParser is the parser to use to parse bridge events.
bridgeParser *parser.BridgeParser
// swapParsers is a map from contract address -> parser.
swapParsers map[common.Address]*parser.SwapParser
// messageBusParser is the parser to use to parse message bus events.
messageBusParser *parser.MessageBusParser
// cctpParser is the parser to use to parse cctp events.
cctpParser *parser.CCTPParser
// rfqParser is the parser to use to parse rfq events.
rfqParser *parser.RFQParser
// Fetcher is the Fetcher to use to fetch logs.
Fetcher fetcher.ScribeFetcher
// chainConfig is the chain config for the chain.
chainConfig indexerconfig.ChainConfig
}
type contextKey string
const (
chainKey contextKey = "chainID"
)
// NewChainBackfiller creates a new backfiller for a chain.
func NewChainBackfiller(consumerDB db.ConsumerDB, bridgeParser *parser.BridgeParser, swapParsers map[common.Address]*parser.SwapParser, messageBusParser *parser.MessageBusParser, cctpParser *parser.CCTPParser, rfqParser *parser.RFQParser, fetcher fetcher.ScribeFetcher, chainConfig indexerconfig.ChainConfig) *ChainBackfiller {
return &ChainBackfiller{
consumerDB: consumerDB,
bridgeParser: bridgeParser,
swapParsers: swapParsers,
messageBusParser: messageBusParser,
cctpParser: cctpParser,
rfqParser: rfqParser,
Fetcher: fetcher,
chainConfig: chainConfig,
}
}
// Backfill fetches logs from the GraphQL database, parses them, and stores them in the consumer database.
// nolint:cyclop,gocognit
func (c *ChainBackfiller) Backfill(ctx context.Context, livefill bool, refreshRate int) (err error) {
chainCtx := context.WithValue(ctx, chainKey, fmt.Sprintf("%d", c.chainConfig.ChainID))
contractsGroup, contractCtx := errgroup.WithContext(chainCtx)
if !livefill {
for i := range c.chainConfig.Contracts {
contract := c.chainConfig.Contracts[i]
contractsGroup.Go(func() error {
err := c.backfillContractLogs(contractCtx, contract)
if err != nil {
return fmt.Errorf("could not backfill contract logs: %w", err)
}
return nil
})
}
} else {
for i := range c.chainConfig.Contracts {
contract := c.chainConfig.Contracts[i]
contractsGroup.Go(func() error {
b := &backoff.Backoff{
Factor: 2,
Jitter: true,
Min: 1 * time.Second,
Max: 3 * time.Second,
}
timeout := time.Duration(0)
for {
select {
case <-chainCtx.Done():
logger.Errorf("livefill of contract %s on chain %d failed: %v", contract.Address, c.chainConfig.ChainID, chainCtx.Err())
return fmt.Errorf("livefill of contract %s on chain %d failed: %w", contract.Address, c.chainConfig.ChainID, chainCtx.Err())
case <-time.After(timeout):
err := c.backfillContractLogs(contractCtx, contract)
if err != nil {
timeout = b.Duration()
logger.Warnf("could not livefill contract %s on chain %d, retrying %v", contract.Address, c.chainConfig.ChainID, err)
continue
}
b.Reset()
timeout = time.Duration(refreshRate) * time.Second
logger.Infof("processed range for contract %s on chain %d, continuing to livefill in %d seconds - refresh rate %d ", contract.Address, c.chainConfig.ChainID, timeout, refreshRate)
}
}
})
}
}
if err := contractsGroup.Wait(); err != nil {
logger.Errorf("error backfilling chain %d completed %v", c.chainConfig.ChainID, err)
return fmt.Errorf("error while backfilling chain %d: %w", c.chainConfig.ChainID, err)
}
return nil
}
// makeEventParser returns a parser for a contract using it's config.
// in the event one is not present, this function will return an error.
func (c *ChainBackfiller) makeEventParser(contract indexerconfig.ContractConfig) (eventParser parser.Parser, err error) {
contractType, err := indexerconfig.ContractTypeFromString(contract.ContractType)
if err != nil {
return nil, fmt.Errorf("could not create event parser for unknown contract type: %s", contract.ContractType)
}
switch contractType {
case indexerconfig.BridgeContractType:
eventParser = c.bridgeParser
case indexerconfig.SwapContractType:
eventParser = c.swapParsers[common.HexToAddress(contract.Address)]
case indexerconfig.MessageBusContractType:
eventParser = c.messageBusParser
case indexerconfig.MetaSwapContractType:
eventParser = c.swapParsers[common.HexToAddress(contract.Address)]
case indexerconfig.CCTPContractType:
eventParser = c.cctpParser
case indexerconfig.RFQContractType:
eventParser = c.rfqParser
}
return eventParser, nil
}
// backfillContractLogs creates a backfiller for a given contract with an independent context
// nolint:cyclop,gocognit
func (c *ChainBackfiller) backfillContractLogs(parentCtx context.Context, contract indexerconfig.ContractConfig) (err error) {
// make the event parser
eventParser, err := c.makeEventParser(contract)
if err != nil {
return err
}
startHeight := uint64(contract.StartBlock)
// Set start block to -1 to trigger backfill from last block stored by explorer,
// otherwise backfilling will begin at the block number specified in the config file.
if contract.StartBlock < 0 {
startHeight, err = c.consumerDB.GetLastStoredBlock(parentCtx, c.chainConfig.ChainID, contract.Address)
if err != nil {
return fmt.Errorf("could not get last block number: %w, %s", err, contract.ContractType)
}
}
var endHeight uint64
err = c.retryWithBackoff(parentCtx, func(ctx context.Context) error {
// TODO change to get last unconfirmed block
endHeight, err = c.Fetcher.FetchLastIndexed(parentCtx, c.chainConfig.ChainID, contract.Address)
if err != nil {
return fmt.Errorf("could not get last indexed height, %w", err)
}
return nil
})
if err != nil {
return fmt.Errorf("could not get last indexed for contract %s: %w, %v", contract.Address, err, c.chainConfig)
}
currentHeight := startHeight
// Iterate over all blocks and fetch logs with the current contract address.
for currentHeight <= endHeight {
// Create context for backfilling chunks
g, groupCtx := errgroup.WithContext(parentCtx)
chunkStart := currentHeight
chunkEnd := currentHeight + (c.chainConfig.FetchBlockIncrement-1)*uint64(c.chainConfig.MaxGoroutines)
if chunkEnd > endHeight {
chunkEnd = endHeight
}
iterator := util.NewChunkIterator(big.NewInt(int64(chunkStart)), big.NewInt(int64(chunkEnd)), int(c.chainConfig.FetchBlockIncrement)-1, true)
for subChunk := iterator.NextChunk(); subChunk != nil; subChunk = iterator.NextChunk() {
chunkVar := subChunk
g.Go(func() error {
b := &backoff.Backoff{
Factor: 2,
Jitter: true,
Min: 1 * time.Second,
Max: 3 * time.Second,
}
timeout := time.Duration(0)
for {
select {
case <-groupCtx.Done():
return fmt.Errorf("context canceled: %w", groupCtx.Err())
case <-time.After(timeout):
rangeEnd := chunkVar.EndBlock.Uint64()
// Fetch the logs from Scribe.
logs, err := c.Fetcher.FetchLogsInRange(groupCtx, c.chainConfig.ChainID, chunkVar.StartBlock.Uint64(), rangeEnd, common.HexToAddress(contract.Address))
if err != nil {
timeout = b.Duration()
logger.Warnf("could not fetch logs for chain %d: %v. Retrying in %s", c.chainConfig.ChainID, err, timeout)
continue
}
parsedLogs, err := ProcessLogs(groupCtx, logs, c.chainConfig.ChainID, eventParser)
if err != nil {
timeout = b.Duration()
logger.Warnf("could not process logs for chain %d: %s", c.chainConfig.ChainID, err)
continue
}
if len(parsedLogs) > 0 {
g.Go(func() error {
return c.storeParsedLogs(groupCtx, parsedLogs)
})
}
return nil
}
}
})
}
if err := g.Wait(); err != nil {
return fmt.Errorf("error while backfilling chain %d: %w", c.chainConfig.ChainID, err)
}
logger.Infof("backfilling contract %s chunk completed, %d to %d", contract.Address, chunkStart, chunkEnd)
// Store the last block in clickhouse
err = c.retryWithBackoff(parentCtx, func(ctx context.Context) error {
err = c.consumerDB.StoreLastBlock(parentCtx, c.chainConfig.ChainID, chunkEnd, contract.Address)
if err != nil {
return fmt.Errorf("error storing last block, %w", err)
}
return nil
})
if err != nil {
logger.Errorf("could not store last block for chain %d: %s %d, %s, %s", c.chainConfig.ChainID, err, chunkEnd, contract.Address, contract.ContractType)
return fmt.Errorf("could not store last block for chain %d: %w", c.chainConfig.ChainID, err)
}
currentHeight = chunkEnd + 1
}
return nil
}
// ProcessLogs processes the logs and stores them in the consumer database.
//
//nolint:gocognit,cyclop
func ProcessLogs(ctx context.Context, logs []ethTypes.Log, chainID uint32, eventParser parser.Parser) (parsedLogs []interface{}, _ error) {
b := &backoff.Backoff{
Factor: 2,
Jitter: true,
Min: 1 * time.Second,
Max: 10 * time.Second,
}
timeout := time.Duration(0)
logIdx := 0
for {
select {
case <-ctx.Done():
return parsedLogs, fmt.Errorf("context canceled: %w", ctx.Err())
case <-time.After(timeout):
if logIdx >= len(logs) {
return parsedLogs, nil
}
parsedLog, err := eventParser.Parse(ctx, logs[logIdx], chainID)
if err != nil || parsedLog == nil {
// TODO: this should really, REALLY use errors.IS and wrap the underlying error
if strings.Contains(err.Error(), parser.ErrUnknownTopic) {
logger.Warnf("could not parse log (ErrUnknownTopic) %d, %s %s blocknumber: %d, %s", chainID, logs[logIdx].TxHash, logs[logIdx].Address, logs[logIdx].BlockNumber, err)
} else { // retry
logger.Errorf("could not parse log %d, %s blocknumber: %d, %s", chainID, logs[logIdx].Address, logs[logIdx].BlockNumber, err)
timeout = b.Duration()
continue
}
}
parsedLogs = append(parsedLogs, parsedLog)
logIdx++
// Reset the backoff after successful log parse run to prevent bloated back offs.
b.Reset()
timeout = time.Duration(0)
}
}
}
func (c *ChainBackfiller) storeParsedLogs(ctx context.Context, parsedEvents []interface{}) error {
b := &backoff.Backoff{
Factor: 2,
Jitter: true,
Min: 3 * time.Millisecond,
Max: 2 * time.Second,
}
timeout := time.Duration(0)
for {
select {
case <-ctx.Done():
return fmt.Errorf("context canceled while storing events: %w", ctx.Err())
case <-time.After(timeout):
err := c.consumerDB.StoreEvents(ctx, parsedEvents)
if err != nil {
logger.Errorf("Error storing events: %v", err)
timeout = b.Duration()
continue
}
return nil
}
}
}
const maxAttempt = 20
type retryableFunc func(ctx context.Context) error
// retryWithBackoff will retry to get data with a backoff.
func (c *ChainBackfiller) retryWithBackoff(ctx context.Context, doFunc retryableFunc) error {
b := &backoff.Backoff{
Factor: 2,
Jitter: true,
Min: 1 * time.Second,
Max: 3 * time.Second,
}
timeout := time.Duration(0)
attempts := 0
for attempts < maxAttempt {
select {
case <-ctx.Done():
return fmt.Errorf("%w while retrying", ctx.Err())
case <-time.After(timeout):
err := doFunc(ctx)
if err != nil {
timeout = b.Duration()
attempts++
} else {
return nil
}
}
}
return fmt.Errorf("max attempts reached while retrying")
}