diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3e405bf..d63e920 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -16,6 +16,8 @@ jobs: style: name: Check Style runs-on: ubuntu-latest + env: + SQLX_OFFLINE: true steps: - uses: actions/checkout@v3 - uses: actions/cache@v2 @@ -31,6 +33,7 @@ jobs: components: rustfmt # - run: cargo fmt -- --check + - run: cargo check --all-targets - run: cargo clippy -- --deny warnings tests: @@ -74,9 +77,19 @@ jobs: brew install docker-buildx - name: Test docker run: docker run hello-world + - name: Install sqlx-cli + run: cargo install --version='~0.7' sqlx-cli --no-default-features --features rustls,mysql + - name: Migrate database + # linux: mysql-client, macos: brew install mysql + run: | + sudo apt-get install mysql-client -y + ./scripts/init_db.sh # - name: Check run: cargo check + # To re-sync run `cargo sqlx prepare` ref: https://github.com/launchbadge/sqlx/blob/main/sqlx-cli/README.md#enable-building-in-offline-mode-with-query + - name: Ensure sqlx-offline data is synced + run: cargo sqlx prepare --check - name: Test run: cargo test -- --test-threads=1 --nocapture @@ -119,6 +132,8 @@ jobs: docs: name: Docs runs-on: ubuntu-latest + env: + SQLX_OFFLINE: true steps: - uses: actions/checkout@v3 - uses: actions/cache@v2 diff --git a/.sqlx/query-1c988e58e1a28c631098f070e75230d0b595dafc7fac2ea2f16144749220539f.json b/.sqlx/query-1c988e58e1a28c631098f070e75230d0b595dafc7fac2ea2f16144749220539f.json new file mode 100644 index 0000000..9dd5a5d --- /dev/null +++ b/.sqlx/query-1c988e58e1a28c631098f070e75230d0b595dafc7fac2ea2f16144749220539f.json @@ -0,0 +1,12 @@ +{ + "db_name": "MySQL", + "query": "INSERT INTO anchor_block (anchor_block_json, block_number) VALUES (?, ?)", + "describe": { + "columns": [], + "parameters": { + "Right": 2 + }, + "nullable": [] + }, + "hash": "1c988e58e1a28c631098f070e75230d0b595dafc7fac2ea2f16144749220539f" +} diff --git a/.sqlx/query-1f880a26afcee6a90f9320f58fd91f1663c23cfd81d1942caa53365b4c7e2859.json b/.sqlx/query-1f880a26afcee6a90f9320f58fd91f1663c23cfd81d1942caa53365b4c7e2859.json new file mode 100644 index 0000000..4d692ec --- /dev/null +++ b/.sqlx/query-1f880a26afcee6a90f9320f58fd91f1663c23cfd81d1942caa53365b4c7e2859.json @@ -0,0 +1,25 @@ +{ + "db_name": "MySQL", + "query": "SELECT post_data_nonce FROM users WHERE eth_address = ? FOR UPDATE", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "post_data_nonce", + "type_info": { + "type": "LongLong", + "flags": "", + "char_set": 63, + "max_size": 20 + } + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + true + ] + }, + "hash": "1f880a26afcee6a90f9320f58fd91f1663c23cfd81d1942caa53365b4c7e2859" +} diff --git a/.sqlx/query-518dfce8e564b0cbe8a1bcff9fc8ffd5137de07da32a969f5a89bc64b305c8e6.json b/.sqlx/query-518dfce8e564b0cbe8a1bcff9fc8ffd5137de07da32a969f5a89bc64b305c8e6.json new file mode 100644 index 0000000..bb48f85 --- /dev/null +++ b/.sqlx/query-518dfce8e564b0cbe8a1bcff9fc8ffd5137de07da32a969f5a89bc64b305c8e6.json @@ -0,0 +1,25 @@ +{ + "db_name": "MySQL", + "query": "SELECT anchor_block_json FROM anchor_block ORDER BY block_number DESC LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "anchor_block_json", + "type_info": { + "type": "Blob", + "flags": "NOT_NULL | BLOB | NO_DEFAULT_VALUE", + "char_set": 224, + "max_size": 4294967295 + } + } + ], + "parameters": { + "Right": 0 + }, + "nullable": [ + false + ] + }, + "hash": "518dfce8e564b0cbe8a1bcff9fc8ffd5137de07da32a969f5a89bc64b305c8e6" +} diff --git a/.sqlx/query-df9e59f4d0e9924c0168a6e5b1511e2ceb01ca939e89015da0a721904550494a.json b/.sqlx/query-df9e59f4d0e9924c0168a6e5b1511e2ceb01ca939e89015da0a721904550494a.json new file mode 100644 index 0000000..0bc7ec3 --- /dev/null +++ b/.sqlx/query-df9e59f4d0e9924c0168a6e5b1511e2ceb01ca939e89015da0a721904550494a.json @@ -0,0 +1,12 @@ +{ + "db_name": "MySQL", + "query": "UPDATE users SET post_data_nonce = ? WHERE eth_address = ?", + "describe": { + "columns": [], + "parameters": { + "Right": 2 + }, + "nullable": [] + }, + "hash": "df9e59f4d0e9924c0168a6e5b1511e2ceb01ca939e89015da0a721904550494a" +} diff --git a/.sqlx/query-f526aedee4ce66498740ef5a8429e1b330698b160fed08ac58b83646a8edd02b.json b/.sqlx/query-f526aedee4ce66498740ef5a8429e1b330698b160fed08ac58b83646a8edd02b.json new file mode 100644 index 0000000..bc3d553 --- /dev/null +++ b/.sqlx/query-f526aedee4ce66498740ef5a8429e1b330698b160fed08ac58b83646a8edd02b.json @@ -0,0 +1,12 @@ +{ + "db_name": "MySQL", + "query": "\nINSERT INTO data_intents (id, eth_address, data, data_len, data_hash, max_blob_gas_price, data_hash_signature)\nVALUES (?, ?, ?, ?, ?, ?, ?)\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 7 + }, + "nullable": [] + }, + "hash": "f526aedee4ce66498740ef5a8429e1b330698b160fed08ac58b83646a8edd02b" +} diff --git a/Cargo.lock b/Cargo.lock index e22de0d..2ceed1b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -540,6 +540,25 @@ dependencies = [ "rustc_version 0.4.0", ] +[[package]] +name = "atoi" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" +dependencies = [ + "num-traits", +] + +[[package]] +name = "atomic-write-file" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edcdbedc2236483ab103a53415653d6b4442ea6141baf1ffa85df29635e88436" +dependencies = [ + "nix", + "rand", +] + [[package]] name = "aurora-engine-modexp" version = "1.0.0" @@ -622,6 +641,17 @@ dependencies = [ "serde", ] +[[package]] +name = "bigdecimal" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + [[package]] name = "bindgen" version = "0.66.1" @@ -698,6 +728,7 @@ dependencies = [ "async-trait", "bytes", "c-kzg", + "chrono", "clap", "criterion", "dotenv", @@ -709,6 +740,7 @@ dependencies = [ "hex", "lazy_static", "log", + "num-traits", "once_cell", "prometheus", "proptest", @@ -719,12 +751,14 @@ dependencies = [ "serde", "serde_json", "sha2", + "sqlx", "tempfile", "tokio", "tracing", "tracing-log", "tracing-subscriber", "url", + "uuid 1.6.1", ] [[package]] @@ -1246,11 +1280,21 @@ dependencies = [ "scopeguard", ] +[[package]] +name = "crossbeam-queue" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9bcf5bdbfdd6030fb4a1c497b5d5fc5921aa2f60d359a17e249c0e6df3de153" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" +checksum = "c06d96137f14f244c37f989d9fff8f95e6c18b918e71f36638f8c49112e4c78f" dependencies = [ "cfg-if", ] @@ -1340,6 +1384,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ "const-oid", + "pem-rfc7468", "zeroize", ] @@ -1452,6 +1497,12 @@ version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f" +[[package]] +name = "dotenvy" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" + [[package]] name = "dunce" version = "1.0.4" @@ -1477,6 +1528,9 @@ name = "either" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +dependencies = [ + "serde", +] [[package]] name = "elliptic-curve" @@ -1573,6 +1627,17 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "etcetera" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" +dependencies = [ + "cfg-if", + "home", + "windows-sys", +] + [[package]] name = "eth-keystore" version = "0.5.0" @@ -1592,7 +1657,7 @@ dependencies = [ "sha2", "sha3", "thiserror", - "uuid", + "uuid 0.8.2", ] [[package]] @@ -1905,6 +1970,12 @@ dependencies = [ "yansi", ] +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + [[package]] name = "eyre" version = "0.6.9" @@ -1942,6 +2013,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "finl_unicode" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" + [[package]] name = "fixed-hash" version = "0.8.0" @@ -1970,6 +2047,17 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "flume" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +dependencies = [ + "futures-core", + "futures-sink", + "spin 0.9.8", +] + [[package]] name = "fnv" version = "1.0.7" @@ -2058,6 +2146,17 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-intrusive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" +dependencies = [ + "futures-core", + "lock_api", + "parking_lot", +] + [[package]] name = "futures-io" version = "0.3.29" @@ -2242,11 +2341,23 @@ dependencies = [ "fxhash", ] +[[package]] +name = "hashlink" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +dependencies = [ + "hashbrown 0.14.2", +] + [[package]] name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +dependencies = [ + "unicode-segmentation", +] [[package]] name = "hermit-abi" @@ -2269,6 +2380,15 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" +[[package]] +name = "hkdf" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" +dependencies = [ + "hmac", +] + [[package]] name = "hmac" version = "0.12.1" @@ -2536,6 +2656,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.9" @@ -2693,6 +2822,17 @@ dependencies = [ "redox_syscall", ] +[[package]] +name = "libsqlite3-sys" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf4e226dcd58b4be396f7bd3c20da8fdee2911400705297ba7d2d7cc2c30f716" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linux-raw-sys" version = "0.4.11" @@ -2844,6 +2984,17 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54" +[[package]] +name = "nix" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +dependencies = [ + "bitflags 2.4.1", + "cfg-if", + "libc", +] + [[package]] name = "nom" version = "7.1.3" @@ -2889,6 +3040,23 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-bigint-dig" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +dependencies = [ + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand", + "smallvec", + "zeroize", +] + [[package]] name = "num-complex" version = "0.4.4" @@ -3184,6 +3352,15 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -3304,6 +3481,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der", + "pkcs8", + "spki", +] + [[package]] name = "pkcs8" version = "0.10.2" @@ -3864,6 +4052,26 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "rsa" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af6c4b23d99685a1408194da11270ef8e9809aff951cc70ec9b17350b087e474" +dependencies = [ + "const-oid", + "digest 0.10.7", + "num-bigint-dig", + "num-integer", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core", + "signature", + "spki", + "subtle", + "zeroize", +] + [[package]] name = "ruint" version = "1.11.1" @@ -4424,6 +4632,9 @@ name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] [[package]] name = "spki" @@ -4435,6 +4646,229 @@ dependencies = [ "der", ] +[[package]] +name = "sqlformat" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce81b7bd7c4493975347ef60d8c7e8b742d4694f4c49f93e0a12ea263938176c" +dependencies = [ + "itertools 0.12.0", + "nom", + "unicode_categories", +] + +[[package]] +name = "sqlx" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dba03c279da73694ef99763320dea58b51095dfe87d001b1d4b5fe78ba8763cf" +dependencies = [ + "sqlx-core", + "sqlx-macros", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", +] + +[[package]] +name = "sqlx-core" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d84b0a3c3739e220d94b3239fd69fb1f74bc36e16643423bd99de3b43c21bfbd" +dependencies = [ + "ahash", + "atoi", + "bigdecimal", + "byteorder", + "bytes", + "chrono", + "crc", + "crossbeam-queue", + "dotenvy", + "either", + "event-listener", + "futures-channel", + "futures-core", + "futures-intrusive", + "futures-io", + "futures-util", + "hashlink", + "hex", + "indexmap 2.1.0", + "log", + "memchr", + "once_cell", + "paste", + "percent-encoding", + "rustls", + "rustls-pemfile", + "serde", + "serde_json", + "sha2", + "smallvec", + "sqlformat", + "thiserror", + "tokio", + "tokio-stream", + "tracing", + "url", + "uuid 1.6.1", + "webpki-roots", +] + +[[package]] +name = "sqlx-macros" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89961c00dc4d7dffb7aee214964b065072bff69e36ddb9e2c107541f75e4f2a5" +dependencies = [ + "proc-macro2", + "quote", + "sqlx-core", + "sqlx-macros-core", + "syn 1.0.109", +] + +[[package]] +name = "sqlx-macros-core" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0bd4519486723648186a08785143599760f7cc81c52334a55d6a83ea1e20841" +dependencies = [ + "atomic-write-file", + "dotenvy", + "either", + "heck", + "hex", + "once_cell", + "proc-macro2", + "quote", + "serde", + "serde_json", + "sha2", + "sqlx-core", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", + "syn 1.0.109", + "tempfile", + "tokio", + "url", +] + +[[package]] +name = "sqlx-mysql" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e37195395df71fd068f6e2082247891bc11e3289624bbc776a0cdfa1ca7f1ea4" +dependencies = [ + "atoi", + "base64 0.21.5", + "bigdecimal", + "bitflags 2.4.1", + "byteorder", + "bytes", + "chrono", + "crc", + "digest 0.10.7", + "dotenvy", + "either", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "generic-array", + "hex", + "hkdf", + "hmac", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "percent-encoding", + "rand", + "rsa", + "serde", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "tracing", + "uuid 1.6.1", + "whoami", +] + +[[package]] +name = "sqlx-postgres" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6ac0ac3b7ccd10cc96c7ab29791a7dd236bd94021f31eec7ba3d46a74aa1c24" +dependencies = [ + "atoi", + "base64 0.21.5", + "bigdecimal", + "bitflags 2.4.1", + "byteorder", + "chrono", + "crc", + "dotenvy", + "etcetera", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "hex", + "hkdf", + "hmac", + "home", + "itoa", + "log", + "md-5", + "memchr", + "num-bigint", + "once_cell", + "rand", + "serde", + "serde_json", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "tracing", + "uuid 1.6.1", + "whoami", +] + +[[package]] +name = "sqlx-sqlite" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "210976b7d948c7ba9fced8ca835b11cbb2d677c59c79de41ac0d397e14547490" +dependencies = [ + "atoi", + "chrono", + "flume", + "futures-channel", + "futures-core", + "futures-executor", + "futures-intrusive", + "futures-util", + "libsqlite3-sys", + "log", + "percent-encoding", + "serde", + "sqlx-core", + "tracing", + "url", + "urlencoding", + "uuid 1.6.1", +] + [[package]] name = "static_assertions" version = "1.1.0" @@ -4454,6 +4888,17 @@ dependencies = [ "precomputed-hash", ] +[[package]] +name = "stringprep" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" +dependencies = [ + "finl_unicode", + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "strsim" version = "0.10.0" @@ -4764,6 +5209,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-stream" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-tungstenite" version = "0.20.1" @@ -5016,6 +5472,12 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +[[package]] +name = "unicode_categories" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" + [[package]] name = "untrusted" version = "0.7.1" @@ -5039,6 +5501,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + [[package]] name = "utf-8" version = "0.7.6" @@ -5061,6 +5529,16 @@ dependencies = [ "serde", ] +[[package]] +name = "uuid" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" +dependencies = [ + "getrandom", + "serde", +] + [[package]] name = "valuable" version = "0.1.0" @@ -5207,6 +5685,12 @@ dependencies = [ "rustix", ] +[[package]] +name = "whoami" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" + [[package]] name = "winapi" version = "0.3.9" diff --git a/Cargo.toml b/Cargo.toml index 0897ad2..7135ce8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,6 +61,10 @@ prometheus = { version = "0.13.3", features = ["push"] } lazy_static = "1.4.0" dotenv = "0.15.0" tracing-log = "0.2.0" +sqlx = { version = "0.7", default-features = false, features = ["runtime-tokio-rustls", "macros", "mysql", "uuid", "chrono", "migrate", "bigdecimal"] } +num-traits = "0.2.17" +uuid = { version = "1.6.1", features = ["v4", "serde"] } +chrono = { version = "0.4.31", default-features = false, features = ["clock", "serde"] } [dev-dependencies] hex = "0.4.3" diff --git a/Dockerfile b/Dockerfile index 155a02c..7ac744f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM rust:1.73.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev WORKDIR /app COPY . . -RUN cargo build --release +RUN SQLX_OFFLINE=1 cargo build --release # Final layer to minimize size FROM gcr.io/distroless/cc-debian11 diff --git a/README.md b/README.md index 54877ae..21fab64 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ Implementation of trusted blob sharing protocol. Supports submissions via a perm ``` -Usage: blobshare [OPTIONS] +Usage: blobshare [OPTIONS] --database-url Options: -p, --port @@ -29,6 +29,8 @@ Options: Consider blocks `finalize_depth` behind current head final. If there's a re-org deeper than this depth, the app will crash and expect to re-sync on restart [env: FINALIZE_DEPTH=] [default: 64] --max-pending-transactions Max count of pending transactions that will be sent before waiting for inclusion of the previously sent transactions. A number higher than the max count of blobs per block should not result better UX. However, a higher number risks creating transactions that can become underpriced in volatile network conditions [env: MAX_PENDING_TRANSACTIONS=] [default: 6] + --database-url + Database URL to mysql DB with format `mysql://user:password@localhost/test` [env: DATABASE_URL=] --metrics Enable serving metrics [env: METRICS=] --metrics-port diff --git a/benches/packing.rs b/benches/packing.rs index 3af7ee2..cc1b0ef 100644 --- a/benches/packing.rs +++ b/benches/packing.rs @@ -1,4 +1,6 @@ -use blob_share::packing::{pack_items_brute_force, pack_items_greedy_sorted, pack_items_knapsack}; +use blob_share::packing::{ + self, pack_items_brute_force, pack_items_greedy_sorted, pack_items_knapsack, +}; use criterion::{criterion_group, criterion_main, Criterion}; use rand::{rngs::StdRng, Rng, SeedableRng}; @@ -12,14 +14,14 @@ fn brute_force_benchmark(c: &mut Criterion) { let range_cost_per_len = cost_per_len..2 * cost_per_len; let mut items = (0..n) .map(|_| { - ( + packing::Item::new( rng.gen_range(range_len.clone()), rng.gen_range(range_cost_per_len.clone()), ) }) - .collect::>(); + .collect::>(); - items.sort_by(|a, b| a.0.cmp(&b.0)); + items.sort_by(|a, b| a.len.cmp(&b.len)); c.bench_function(&format!("greedy sorted n={n}"), |b| { b.iter(|| pack_items_greedy_sorted(&items, max_len, cost_per_len).unwrap()); @@ -34,7 +36,7 @@ fn brute_force_benchmark(c: &mut Criterion) { (1000, 4096), (10000, 4096), ] { - let items = vec![(1, 10); n]; + let items = vec![packing::Item::new(1, 10); n]; let cost_per_len = 1; c.bench_function(&format!("knapsack n={n} max_len={max_len}"), |b| { @@ -43,7 +45,7 @@ fn brute_force_benchmark(c: &mut Criterion) { } for n in [8, 16, 31] { - let items = vec![(1, 10); n]; + let items = vec![packing::Item::new(1, 10); n]; // performance is only dependant on n, values or irrelevant let max_len = 2 * n; let cost_per_len = 1; diff --git a/migrations/20231223063356_create_data_intent_table.sql b/migrations/20231223063356_create_data_intent_table.sql new file mode 100644 index 0000000..7f1a1e4 --- /dev/null +++ b/migrations/20231223063356_create_data_intent_table.sql @@ -0,0 +1,27 @@ +-- Stores single data intents from a user. +-- - User may submit the same data twice +-- - Economically bounded, only accepts data intents that the user can afford +-- - Never pruned, data intents remain in the DB forever after finalization +CREATE TABLE data_intents ( + id BINARY(16) PRIMARY KEY, -- UUID as binary + -- sender address + eth_address BINARY(20), + -- binary data to publish, MEDIUMBLOB = binary large object with max length of 2^24-1 bytes (16MB) + data MEDIUMBLOB NOT NULL, + -- byte length of data, max possible size is 131,072 < INT::MAX = 2,147,483,647 + data_len INT UNSIGNED NOT NULL, + -- hash of data (keccak256) + data_hash BINARY(32) NOT NULL, + -- Max BIGINT = 2^63-1. Max gas price possible to represent is 9.2 ETH / byte, or 1,208,925 ETH per blob + max_blob_gas_price BIGINT UNSIGNED NOT NULL, + -- Optional ECDSA signature over data_hash, serialized + data_hash_signature BINARY(65) DEFAULT NULL, + -- Transaction hash of the included + inclusion_tx_hash BINARY(32) DEFAULT NULL, + -- Timestamp with milisecond level precision, automatically populated + updated_at TIMESTAMP(3) DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), + -- Index created_at to query recently added intents efficiently. The query: + -- `EXPLAIN ANALYZE SELECT * FROM data_intents WHERE updated_at > '2024-01-01 00:00:00';` + -- against PlanetScale consumes 0 row read credits if there are no matches. + INDEX(updated_at) +); diff --git a/migrations/20231223124040_create_users_table.sql b/migrations/20231223124040_create_users_table.sql new file mode 100644 index 0000000..6c7b6df --- /dev/null +++ b/migrations/20231223124040_create_users_table.sql @@ -0,0 +1,7 @@ +-- Catch all table for user related data +CREATE TABLE users ( + -- 0x prefixed hex encoded lowercase address + eth_address CHAR(42) PRIMARY KEY, + -- BIGINT = 64 bits signed, optional only required through an unsafe channel for replay protection + post_data_nonce BIGINT +); diff --git a/migrations/20231224015302_create_anchor_block_table.sql b/migrations/20231224015302_create_anchor_block_table.sql new file mode 100644 index 0000000..7c2e666 --- /dev/null +++ b/migrations/20231224015302_create_anchor_block_table.sql @@ -0,0 +1,7 @@ +CREATE TABLE anchor_block ( + -- AnchorBlock serialized as JSON + -- TODO: Serialize as compact binary format + anchor_block_json LONGTEXT NOT NULL, + -- Block number of the AnchorBlock serialized value + block_number INT UNSIGNED NOT NULL +); diff --git a/scripts/init_db.sh b/scripts/init_db.sh new file mode 100755 index 0000000..029e1ab --- /dev/null +++ b/scripts/init_db.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +set -x +set -eo pipefail + +if ! [ -x "$(command -v mysql)" ]; then + echo >&2 "Error: mysql is not installed." + exit 1 +fi + +if ! [ -x "$(command -v sqlx)" ]; then + echo >&2 "Error: sqlx is not installed." + echo >&2 "Use:" + echo >&2 " cargo install --version='~0.7' sqlx-cli --no-default-features --features rustls,mysql" + echo >&2 "to install it." + exit 1 +fi + +DB_USER="${MYSQL_USER:=mysql}" +DB_PASSWORD="${MYSQL_PASSWORD:=password}" +DB_NAME="${MYSQL_DB:=blob_share}" +DB_PORT="${MYSQL_PORT:=3306}" +DB_HOST="${MYSQL_HOST:=localhost}" + +# To connect and debug the DB: +# `mysql -h localhost --protocol=TCP -u"mysql" -p"password"` + +# Allow to skip Docker if a dockerized MySQL database is already running +if [[ -z "${SKIP_DOCKER}" ]] +then + # if a mysql container is running, print instructions to kill it and exit + RUNNING_MYSQL_CONTAINER=$(docker ps --filter 'name=test_mysql' --format '{{.ID}}') + if [[ -n $RUNNING_MYSQL_CONTAINER ]]; then + echo "there is a mysql container already running, killing it" + docker kill ${RUNNING_MYSQL_CONTAINER} + fi + # Launch mysql using Docker + docker run \ + -e MYSQL_USER=${DB_USER} \ + -e MYSQL_PASSWORD=${DB_PASSWORD} \ + -e MYSQL_ROOT_PASSWORD=${DB_PASSWORD} \ + -e MYSQL_DATABASE=${DB_NAME} \ + -p "${DB_PORT}":3306 \ + -d \ + --name "test_mysql_$(date '+%s')" \ + mysql + # Note: Adjust MySQL Docker settings as needed +fi + +# Keep pinging MySQL until it's ready to accept commands +until mysql -h "${DB_HOST}" -P "${DB_PORT}" --protocol=TCP -u"${DB_USER}" -p"${DB_PASSWORD}" -e "SELECT 1" ${DB_NAME}; do + >&2 echo "MySQL is still unavailable - sleeping" + sleep 1 +done + +>&2 echo "MySQL is up and running on port ${DB_PORT} - running migrations now!" + +export DATABASE_URL=mysql://${DB_USER}:${DB_PASSWORD}@${DB_HOST}:${DB_PORT}/${DB_NAME} +sqlx database create +sqlx migrate run + +>&2 echo "MySQL has been migrated, ready to go!" + diff --git a/scripts/run_tests_macos.sh b/scripts/run_tests_macos.sh new file mode 100755 index 0000000..b0822bc --- /dev/null +++ b/scripts/run_tests_macos.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -x +set -eo pipefail + +# increase limit for macos +ulimit -n 4096 + +cargo test -- --test-threads=1 --nocapture "$@" diff --git a/scripts/sync_readme.py b/scripts/sync_readme.py index 7d5aaef..75756ad 100755 --- a/scripts/sync_readme.py +++ b/scripts/sync_readme.py @@ -1,12 +1,31 @@ #!/usr/bin/python import subprocess +import os tag_start = "" tag_end = "" readme_file = "README.md" -help_text = subprocess.check_output(['cargo', 'run', '--bin', 'blobshare', '--', '--help']).decode('utf-8') + +# Set up a clean environment with only essential variables +# To prevent leaking defaults or sensitive data from a local .env +def load_env_file(file_path): + env_vars = {} + if os.path.exists(file_path): + with open(file_path, 'r') as file: + for line in file: + line = line.strip() + if line and not line.startswith('#'): + key, _, _ = line.partition('=') + env_vars[key] = '' + return env_vars +clean_env = load_env_file('.env') + +help_text = subprocess.check_output( + ['cargo', 'run', '--bin', 'blobshare', '--', '--help'], + env={ **clean_env, 'PATH': os.environ['PATH'] } +).decode('utf-8') with open(readme_file, 'r') as file: data = file.read() @@ -21,3 +40,5 @@ with open(readme_file, 'w') as file: file.write(out) + + diff --git a/src/anchor_block.rs b/src/anchor_block.rs new file mode 100644 index 0000000..b439161 --- /dev/null +++ b/src/anchor_block.rs @@ -0,0 +1,100 @@ +use std::path::PathBuf; + +use eyre::{bail, eyre, Context, Result}; +use sqlx::MySqlPool; +use tokio::{fs, io}; + +use crate::{eth_provider::EthProvider, sync::AnchorBlock, BlockGasSummary, StartingPoint}; + +pub(crate) async fn get_anchor_block( + anchor_block_filepath: &PathBuf, + db_pool: &MySqlPool, + provider: &EthProvider, + starting_point: StartingPoint, +) -> Result { + // TODO: choose starting point that's not genesis + // Attempt to read persisted file first if exists + match fs::read_to_string(&anchor_block_filepath).await { + Ok(str) => return serde_json::from_str(&str).wrap_err_with(|| "parsing anchor block file"), + Err(e) => match e.kind() { + io::ErrorKind::NotFound => {} // Ok continue + _ => bail!( + "error opening anchor_block file {}: {e:?}", + anchor_block_filepath.to_string_lossy() + ), + }, + } + + // Second, fetch from DB + if let Some(anchor_block) = fetch_anchor_block_from_db(db_pool).await? { + return Ok(anchor_block); + } + + // Last initialize from network at starting point + match starting_point { + StartingPoint::StartingBlock(starting_block) => { + anchor_block_from_starting_block(provider, starting_block).await + } + } +} + +/// Fetch AnchorBlock from DB +pub async fn fetch_anchor_block_from_db(db_pool: &MySqlPool) -> Result> { + let row = sqlx::query!( + "SELECT anchor_block_json FROM anchor_block ORDER BY block_number DESC LIMIT 1", + ) + .fetch_optional(db_pool) + .await?; + + Ok(match row { + Some(row) => Some(serde_json::from_str(&row.anchor_block_json)?), + None => None, + }) +} + +/// Persist AnchorBlock to DB row. +/// TODO: Keep a single row with latest block +pub async fn persist_anchor_block_to_db( + db_pool: &MySqlPool, + anchor_block: AnchorBlock, +) -> Result<()> { + // Serialize the AnchorBlock (except the block_number field) to a JSON string + let anchor_block_json = serde_json::to_string(&anchor_block)?; + let block_number = anchor_block.number; + + // Insert the data into the database + sqlx::query!( + "INSERT INTO anchor_block (anchor_block_json, block_number) VALUES (?, ?)", + anchor_block_json, + block_number + ) + .execute(db_pool) + .await?; + + Ok(()) +} + +/// Initialize empty anchor block state from a network block +pub async fn anchor_block_from_starting_block( + provider: &EthProvider, + starting_block: u64, +) -> Result { + let anchor_block = provider + .get_block(starting_block) + .await? + .ok_or_else(|| eyre!("genesis block not available"))?; + let hash = anchor_block + .hash + .ok_or_else(|| eyre!("block has no hash property"))?; + let number = anchor_block + .number + .ok_or_else(|| eyre!("block has no number property"))? + .as_u64(); + Ok(AnchorBlock { + hash, + number, + gas: BlockGasSummary::from_block(&anchor_block)?, + // At genesis all balances are zero + finalized_balances: <_>::default(), + }) +} diff --git a/src/app.rs b/src/app.rs new file mode 100644 index 0000000..35231a4 --- /dev/null +++ b/src/app.rs @@ -0,0 +1,270 @@ +use ethers::{signers::LocalWallet, types::Address}; +use eyre::{bail, Context, Result}; +use sqlx::MySqlPool; +use tokio::sync::{Notify, RwLock}; + +use crate::{ + client::{DataIntentDbRowFull, DataIntentId, DataIntentStatus, DataIntentSummary}, + data_intent_tracker::{ + fetch_data_intent_db_full, fetch_data_intent_db_summary, fetch_many_data_intent_db_full, + store_data_intent, update_inclusion_tx_hashes, DataIntentTracker, + }, + eth_provider::EthProvider, + routes::SyncStatusBlock, + sync::{BlockSync, BlockWithTxs, SyncBlockError, SyncBlockOutcome, TxInclusion}, + utils::{address_to_hex_lowercase, txhash_from_vec}, + AppConfig, BlobTxSummary, DataIntent, +}; + +pub(crate) struct AppData { + pub config: AppConfig, + pub kzg_settings: c_kzg::KzgSettings, + pub provider: EthProvider, + pub sender_wallet: LocalWallet, + pub notify: Notify, + pub chain_id: u64, + // Private members, to ensure consistent manipulation + data_intent_tracker: RwLock, + sync: RwLock, + db_pool: MySqlPool, +} + +impl AppData { + #[allow(clippy::too_many_arguments)] + pub fn new( + config: AppConfig, + kzg_settings: c_kzg::KzgSettings, + db_pool: MySqlPool, + provider: EthProvider, + sender_wallet: LocalWallet, + chain_id: u64, + data_intent_tracker: DataIntentTracker, + sync: BlockSync, + ) -> Self { + AppData { + config, + kzg_settings, + db_pool, + provider, + sender_wallet, + notify: <_>::default(), + chain_id, + data_intent_tracker: data_intent_tracker.into(), + sync: sync.into(), + } + } + + #[tracing::instrument(skip(self, data_intent))] + pub async fn atomic_update_post_data_on_unsafe_channel( + &self, + data_intent: DataIntent, + nonce: u64, + ) -> Result { + let eth_address = address_to_hex_lowercase(*data_intent.from()); + + let mut tx = self.db_pool.begin().await?; + + // Fetch user row, may not have any records yet + let user_row = sqlx::query!( + "SELECT post_data_nonce FROM users WHERE eth_address = ? FOR UPDATE", + eth_address, + ) + .fetch_optional(&mut *tx) + .await?; + + // Check user balance + let last_nonce = user_row.and_then(|row| row.post_data_nonce); + + // Check nonce is higher + if let Some(last_nonce) = last_nonce { + if nonce <= last_nonce.try_into()? { + bail!("Nonce not new, replay protection"); + } + } + + // Update balance and nonce + // TODO: Should assert that 1 row was affected? + sqlx::query!( + "UPDATE users SET post_data_nonce = ? WHERE eth_address = ?", + Some(nonce), + eth_address, + ) + .execute(&mut *tx) + .await?; + + let id = store_data_intent(&mut tx, data_intent).await?; + + // Commit transaction + tx.commit().await?; + + Ok(id) + } + + pub async fn evict_underpriced_pending_txs(&self) -> Result { + let underpriced_txs = { self.sync.write().await.evict_underpriced_pending_txs() }; + + if !underpriced_txs.is_empty() { + let mut data_intent_tracker = self.data_intent_tracker.write().await; + for tx in &underpriced_txs { + // TODO: should handle each individual error or abort iteration? + data_intent_tracker.revert_item_to_pending(tx.tx_hash)?; + } + } + + Ok(underpriced_txs.len()) + } + + pub async fn maybe_advance_anchor_block(&self) -> Result, u64)>> { + if let Some((finalized_txs, new_anchor_block_number)) = + self.sync.write().await.maybe_advance_anchor_block()? + { + let mut data_intent_tracker = self.data_intent_tracker.write().await; + for tx in &finalized_txs { + data_intent_tracker.finalize_tx(tx.tx_hash); + } + + Ok(Some((finalized_txs, new_anchor_block_number))) + } else { + Ok(None) + } + } + + pub async fn blob_gas_price_next_head_block(&self) -> u128 { + self.sync + .read() + .await + .get_head_gas() + .blob_gas_price_next_block() + } + + pub async fn register_sent_blob_tx( + &self, + data_intent_ids: &[DataIntentId], + blob_tx: BlobTxSummary, + ) -> Result<()> { + update_inclusion_tx_hashes(&self.db_pool, data_intent_ids, blob_tx.tx_hash).await?; + + self.sync + .write() + .await + .register_pending_blob_tx(blob_tx) + .wrap_err("consistency error with blob_tx")?; + + Ok(()) + } + + pub async fn sync_data_intents(&self) -> Result<()> { + self.data_intent_tracker + .write() + .await + .sync_with_db(&self.db_pool) + .await + } + + pub async fn sync_next_head( + &self, + block: BlockWithTxs, + ) -> Result { + BlockSync::sync_next_head(&self.sync, &self.provider, block).await + } + + pub async fn reserve_next_available_nonce( + &self, + sender_address: Address, + ) -> Result> { + self.sync + .write() + .await + .reserve_next_available_nonce(&self.provider, sender_address) + .await + } + + pub async fn unreserve_nonce(&self, sender_address: Address, nonce: u64) { + self.sync + .write() + .await + .unreserve_nonce(sender_address, nonce); + } + + #[tracing::instrument(skip(self))] + pub async fn pending_total_data_len(&self, address: &Address) -> usize { + self.data_intent_tracker + .read() + .await + .pending_intents_total_data_len(address) + + self.sync.read().await.pending_txs_data_len(address) + } + + #[tracing::instrument(skip(self))] + pub async fn balance_of_user(&self, from: &Address) -> i128 { + self.sync.read().await.balance_with_pending(from) + - self + .data_intent_tracker + .read() + .await + .pending_intents_total_cost(from) as i128 + } + + #[tracing::instrument(skip(self))] + pub async fn status_by_id(&self, id: &DataIntentId) -> Result { + Ok( + match fetch_data_intent_db_summary(&self.db_pool, id).await? { + None => DataIntentStatus::Unknown, + Some(data_intent) => { + match data_intent.inclusion_tx_hash { + None => DataIntentStatus::Pending, + Some(tx_hash) => { + let tx_hash = txhash_from_vec(tx_hash)?; + match self.sync.read().await.get_tx_status(tx_hash) { + Some(TxInclusion::Pending) => { + DataIntentStatus::InPendingTx { tx_hash } + } + Some(TxInclusion::Included(block_hash)) => { + DataIntentStatus::InConfirmedTx { + tx_hash, + block_hash, + } + } + None => { + // Should never happen, review this case + DataIntentStatus::Unknown + } + } + } + } + } + }, + ) + } + + pub async fn data_intent_by_id(&self, id: &DataIntentId) -> Result { + fetch_data_intent_db_full(&self.db_pool, id).await + } + + pub async fn data_intents_by_id( + &self, + ids: &[DataIntentId], + ) -> Result> { + fetch_many_data_intent_db_full(&self.db_pool, ids).await + } + + pub async fn get_all_pending(&self) -> Vec { + self.data_intent_tracker.read().await.get_all_pending() + } + + pub async fn get_sync(&self) -> (SyncStatusBlock, SyncStatusBlock) { + ( + self.sync.read().await.get_anchor().into(), + self.sync.read().await.get_head(), + ) + } + + pub async fn serialize_anchor_block(&self) -> Result { + serde_json::to_string(self.sync.read().await.get_anchor()) + } + + pub async fn collect_metrics(&self) { + self.sync.read().await.collect_metrics(); + self.data_intent_tracker.read().await.collect_metrics(); + } +} diff --git a/src/blob_sender_task.rs b/src/blob_sender_task.rs index d9f6358..c9df2be 100644 --- a/src/blob_sender_task.rs +++ b/src/blob_sender_task.rs @@ -4,12 +4,17 @@ use ethers::signers::Signer; use eyre::{Context, Result}; use crate::{ + blob_tx_data::BlobTxParticipant, + client::DataIntentSummary, + data_intent::BlobGasPrice, + data_intent_tracker::DataIntentDbRowFull, debug, gas::GasConfig, kzg::{construct_blob_tx, BlobTx, TxParams}, metrics, packing::{pack_items, Item}, - warn, AppData, DataIntent, MAX_USABLE_BLOB_DATA_LEN, + utils::address_from_vec, + warn, AppData, MAX_USABLE_BLOB_DATA_LEN, }; pub(crate) async fn blob_sender_task(app_data: Arc) -> Result<()> { @@ -60,15 +65,13 @@ pub(crate) enum SendResult { pub(crate) async fn maybe_send_blob_tx(app_data: Arc, _id: u64) -> Result { let _timer = metrics::BLOB_SENDER_TASK_TIMES.start_timer(); - let max_fee_per_blob_gas = app_data - .sync - .read() - .await - .get_head_gas() - .blob_gas_price_next_block(); + // Sync available intents + app_data.sync_data_intents().await?; + + let max_fee_per_blob_gas = app_data.blob_gas_price_next_head_block().await; - let next_blob_items = { - let items = app_data.data_intent_tracker.read().await.get_all_pending(); + let data_intent_summaries = { + let items = app_data.get_all_pending().await; debug!( "attempting to pack valid blob, max_fee_per_blob_gas {} items {}", max_fee_per_blob_gas, @@ -76,24 +79,29 @@ pub(crate) async fn maybe_send_blob_tx(app_data: Arc, _id: u64) -> Resu ); let _timer_pck = metrics::PACKING_TIMES.start_timer(); - if let Some(next_blob_items) = select_next_blob_items(&items, max_fee_per_blob_gas) { + if let Some(next_blob_items) = + select_next_blob_items(&items, max_fee_per_blob_gas.try_into()?) + { next_blob_items } else { return Ok(SendResult::NoViableSet); } }; - let data_intent_ids = next_blob_items + let data_intent_ids = data_intent_summaries .iter() - .map(|item| item.id()) + .map(|item| item.id) .collect::>(); debug!( "selected {} items for blob tx: {:?}", - next_blob_items.len(), + data_intent_summaries.len(), data_intent_ids ); + // TODO: Do this sequence atomic, lock data intent rows here + let data_intents = app_data.data_intents_by_id(&data_intent_ids).await?; + // TODO: Check if it's necessary to do a round-trip to the EL to estimate gas let (max_fee_per_gas, max_priority_fee_per_gas) = app_data.provider.estimate_eip1559_fees().await?; @@ -102,15 +110,13 @@ pub(crate) async fn maybe_send_blob_tx(app_data: Arc, _id: u64) -> Resu max_priority_fee_per_gas: max_priority_fee_per_gas.as_u128(), max_fee_per_blob_gas, }; + debug!("gas_config {:?}", gas_config); let sender_address = app_data.sender_wallet.address(); // Make getting the nonce reliable + heing able to send multiple txs at once let nonce = if let Some(nonce) = app_data - .sync - .write() - .await - .reserve_next_available_nonce(&app_data.provider, sender_address) + .reserve_next_available_nonce(sender_address) .await? { nonce @@ -119,14 +125,10 @@ pub(crate) async fn maybe_send_blob_tx(app_data: Arc, _id: u64) -> Resu }; let blob_tx = - match construct_and_send_tx(app_data.clone(), nonce, &gas_config, next_blob_items).await { + match construct_and_send_tx(app_data.clone(), nonce, &gas_config, data_intents).await { Ok(blob_tx) => blob_tx, Err(e) => { - app_data - .sync - .write() - .await - .unreserve_nonce(sender_address, nonce); + app_data.unreserve_nonce(sender_address, nonce).await; return Err(e); } }; @@ -138,15 +140,12 @@ pub(crate) async fn maybe_send_blob_tx(app_data: Arc, _id: u64) -> Resu // // Declare items as pending on the computed tx_hash { - // Grab the lock of both data_intent_tracker and sync at once to ensure data intent status - // is consistent in both structs - let mut data_intent_tracker = app_data.data_intent_tracker.write().await; - let mut sync = app_data.sync.write().await; - data_intent_tracker - .include_in_blob_tx(&data_intent_ids, blob_tx.tx_hash) - .wrap_err("consistency error with blob_tx intents")?; - sync.register_pending_blob_tx(blob_tx.tx_summary) - .wrap_err("consistency error with blob_tx")?; + app_data + .register_sent_blob_tx(&data_intent_ids, blob_tx.tx_summary) + .await?; + + // TODO: Review when it's best to re-sync the data_intent_tracker + app_data.sync_data_intents().await?; } Ok(SendResult::SentBlobTx) @@ -157,22 +156,37 @@ async fn construct_and_send_tx( app_data: Arc, nonce: u64, gas_config: &GasConfig, - next_blob_items: Vec, + next_blob_items: Vec, ) -> Result { - let intent_ids = next_blob_items.iter().map(|i| i.id()).collect::>(); + let intent_ids = next_blob_items.iter().map(|i| i.id).collect::>(); let tx_params = TxParams { chain_id: app_data.chain_id, nonce, }; + let participants = next_blob_items + .iter() + .map(|item| { + Ok(BlobTxParticipant { + address: address_from_vec(item.eth_address.clone())?, + data_len: item.data.len(), + }) + }) + .collect::>>()?; + let datas = next_blob_items + .into_iter() + .map(|item| item.data) + .collect::>(); + let blob_tx = construct_blob_tx( &app_data.kzg_settings, - &app_data.publish_config, + app_data.config.l1_inbox_address, gas_config, &tx_params, &app_data.sender_wallet, - next_blob_items, + participants, + datas, )?; metrics::PACKED_BLOB_USED_LEN.observe(blob_tx.tx_summary.used_bytes as f64); @@ -225,12 +239,12 @@ async fn construct_and_send_tx( // TODO: is ok to represent wei units as usize? #[tracing::instrument(skip(data_intents))] fn select_next_blob_items( - data_intents: &[DataIntent], - blob_gas_price: u128, -) -> Option> { + data_intents: &[DataIntentSummary], + blob_gas_price: BlobGasPrice, +) -> Option> { let items: Vec = data_intents .iter() - .map(|e| (e.data().len(), e.max_blob_gas_price())) + .map(|e| Item::new(e.data_len, e.max_blob_gas_price)) .collect::>(); pack_items(&items, MAX_USABLE_BLOB_DATA_LEN, blob_gas_price).map(|selected_indexes| { @@ -241,104 +255,3 @@ fn select_next_blob_items( .collect::>() }) } - -#[cfg(test)] -mod tests { - use ethers::types::H160; - - use crate::{data_intent::DataIntentNoSignature, DataIntent, MAX_USABLE_BLOB_DATA_LEN}; - - use super::select_next_blob_items; - - #[test] - fn select_next_blob_items_case_no_items() { - run_select_next_blob_items_test(&[], 1, None); - } - - #[test] - fn select_next_blob_items_case_one_small() { - run_select_next_blob_items_test(&[(MAX_USABLE_BLOB_DATA_LEN / 4, 1)], 1, None); - } - - #[test] - fn select_next_blob_items_case_one_big() { - run_select_next_blob_items_test( - &[(MAX_USABLE_BLOB_DATA_LEN, 1)], - 1, - Some(&[(MAX_USABLE_BLOB_DATA_LEN, 1)]), - ); - } - - #[test] - fn select_next_blob_items_case_multiple_small() { - run_select_next_blob_items_test( - &[ - (MAX_USABLE_BLOB_DATA_LEN / 4, 1), - (MAX_USABLE_BLOB_DATA_LEN / 4, 2), - (MAX_USABLE_BLOB_DATA_LEN / 2, 3), - (MAX_USABLE_BLOB_DATA_LEN / 2, 4), - ], - 1, - Some(&[ - (MAX_USABLE_BLOB_DATA_LEN / 4, 2), - (MAX_USABLE_BLOB_DATA_LEN / 4, 1), - (MAX_USABLE_BLOB_DATA_LEN / 2, 3), - ]), - ); - } - - fn run_select_next_blob_items_test( - all_items: &[(usize, u128)], - blob_gas_price: u128, - expected_selected_items: Option<&[(usize, u128)]>, - ) { - let mut all_items = generate_data_intents(all_items); - let expected_selected_items = - expected_selected_items.map(|items| generate_data_intents(items)); - - let selected_items = select_next_blob_items(all_items.as_mut_slice(), blob_gas_price); - - assert_eq!( - items_to_summary(selected_items), - items_to_summary(expected_selected_items) - ) - } - - fn items_to_summary(items: Option>) -> Option> { - items.map(|mut items| { - // Sort for stable comparision - items.sort_by(|a, b| { - a.data_len() - .cmp(&b.data_len()) - .then_with(|| b.max_blob_gas_price().cmp(&a.max_blob_gas_price())) - }); - - items - .iter() - .map(|d| { - format!( - "(MAX / {}, {})", - MAX_USABLE_BLOB_DATA_LEN / d.data_len(), - d.max_blob_gas_price() - ) - }) - .collect() - }) - } - - fn generate_data_intents(items: &[(usize, u128)]) -> Vec { - items - .iter() - .map(|(data_len, max_cost_wei)| generate_data_intent(*data_len, *max_cost_wei)) - .collect() - } - - fn generate_data_intent(data_len: usize, max_blob_gas_price: u128) -> DataIntent { - DataIntent::NoSignature(DataIntentNoSignature { - from: H160([0xff; 20]), - data: vec![0xbb; data_len], - data_hash: [0xaa; 32].into(), - max_blob_gas_price, - }) - } -} diff --git a/src/block_subscriber_task.rs b/src/block_subscriber_task.rs index a526e72..1e11412 100644 --- a/src/block_subscriber_task.rs +++ b/src/block_subscriber_task.rs @@ -6,7 +6,7 @@ use tokio::fs; use crate::{ debug, error, info, metrics, - sync::{BlockSync, BlockWithTxs, SyncBlockError, SyncBlockOutcome}, + sync::{BlockWithTxs, SyncBlockError, SyncBlockOutcome}, AppData, }; @@ -69,12 +69,9 @@ async fn sync_block(app_data: Arc, block_hash: TxHash) -> Result<(), Sy .ok_or_else(|| eyre!("block with txs not available {}", block_hash))?; let block_number = block_with_txs.number; - let outcome = BlockSync::sync_next_head( - &app_data.sync, - &app_data.provider, - BlockWithTxs::from_ethers_block(block_with_txs)?, - ) - .await?; + let outcome = app_data + .sync_next_head(BlockWithTxs::from_ethers_block(block_with_txs)?) + .await?; match &outcome { SyncBlockOutcome::BlockKnown => metrics::SYNC_BLOCK_KNOWN.inc(), @@ -102,46 +99,16 @@ async fn sync_block(app_data: Arc, block_hash: TxHash) -> Result<(), Sy ); // Check if any pending transactions need re-pricing - let underpriced_txs = { app_data.sync.write().await.evict_underpriced_pending_txs() }; - - if !underpriced_txs.is_empty() { - { - let mut data_intent_tracker = app_data.data_intent_tracker.write().await; - for tx in &underpriced_txs { - // TODO: should handle each individual error or abort iteration? - data_intent_tracker.revert_item_to_pending(tx.tx_hash)?; - } - metrics::UNDERPRICED_TXS_EVICTED.inc_by(underpriced_txs.len() as f64); - } - + let underpriced_txs = app_data.evict_underpriced_pending_txs().await?; + if underpriced_txs > 0 { + metrics::UNDERPRICED_TXS_EVICTED.inc_by(underpriced_txs as f64); // Potentially prepare new blob transactions with correct pricing app_data.notify.notify_one(); } - // Check if any intents are underpriced - { - let blob_gas_price_next_block = { - app_data - .sync - .read() - .await - .get_head_gas() - .blob_gas_price_next_block() - }; - let mut data_intent_tracker = app_data.data_intent_tracker.write().await; - let items = data_intent_tracker.get_all_pending(); - for item in items { - if item.max_blob_gas_price() < blob_gas_price_next_block { - // Underpriced transaction, evict - data_intent_tracker.evict_underpriced_intent(&item.id())?; - metrics::UNDERPRICED_INTENTS_EVICTED.inc(); - } - } - } - // Finalize transactions - let new_anchor_block_number = if let Some((finalized_txs, new_anchor_block_number)) = - app_data.sync.write().await.maybe_advance_anchor_block()? + if let Some((finalized_txs, new_anchor_block_number)) = + app_data.maybe_advance_anchor_block().await? { let finalized_tx_hashes = finalized_txs .iter() @@ -154,21 +121,12 @@ async fn sync_block(app_data: Arc, block_hash: TxHash) -> Result<(), Sy metrics::SYNC_ANCHOR_NUMBER.set(new_anchor_block_number as f64); metrics::FINALIZED_TXS.inc_by(finalized_tx_hashes.len() as f64); - let mut data_intent_tracker = app_data.data_intent_tracker.write().await; - for tx in finalized_txs { - data_intent_tracker.finalize_tx(tx.tx_hash); - } - - Some(new_anchor_block_number) - } else { - None - }; - - // Persist anchor block - // TODO: Throttle to not persist every block, not necessary - if new_anchor_block_number.is_some() { + // Persist anchor block + // TODO: Throttle to not persist every block, not necessary let anchor_block_str = { - serde_json::to_string(app_data.sync.read().await.get_anchor()) + app_data + .serialize_anchor_block() + .await .wrap_err("serializing AnchorBlock")? }; fs::write(&app_data.config.anchor_block_filepath, anchor_block_str) diff --git a/src/client.rs b/src/client.rs index b40cd62..e806d67 100644 --- a/src/client.rs +++ b/src/client.rs @@ -5,11 +5,16 @@ use ethers::{ use eyre::{eyre, Result}; use url::Url; +pub use crate::data_intent_tracker::{ + DataIntentDbRowFull, DataIntentDbRowSummary, DataIntentSummary, +}; pub use crate::eth_provider::EthProvider; -pub use crate::routes::{DataIntentStatus, PostDataIntentV1, PostDataResponse, SenderDetails}; +pub use crate::routes::{ + DataIntentStatus, PostDataIntentV1, PostDataIntentV1Signed, PostDataResponse, SenderDetails, +}; +use crate::{data_intent::BlobGasPrice, utils::address_to_hex_lowercase}; pub use crate::{data_intent::DataIntentId, DataIntent}; -use crate::{data_intent::DataIntentSummary, routes::SyncStatus, utils::unix_timestamps_millis}; -use crate::{routes::PostDataIntentV1Signed, utils::address_to_hex}; +use crate::{routes::SyncStatus, utils::unix_timestamps_millis}; use crate::{utils::is_ok_response, BlockGasSummary}; pub struct Client { @@ -90,7 +95,7 @@ impl Client { Ok(is_ok_response(response).await?.json().await?) } - pub async fn get_data_by_id(&self, id: &DataIntentId) -> Result { + pub async fn get_data_by_id(&self, id: &DataIntentId) -> Result { let response = self .client .get(&self.url(&format!("v1/data/{}", id))) @@ -111,16 +116,7 @@ impl Client { pub async fn get_balance_by_address(&self, address: Address) -> Result { let response = self .client - .get(&self.url(&format!("v1/balance/{}", address_to_hex(address)))) - .send() - .await?; - Ok(is_ok_response(response).await?.json().await?) - } - - pub async fn get_last_seen_nonce_by_address(&self, address: Address) -> Result> { - let response = self - .client - .get(&self.url(&format!("v1/last_seen_nonce/{}", address_to_hex(address)))) + .get(&self.url(&format!("v1/balance/{}", address_to_hex_lowercase(address)))) .send() .await?; Ok(is_ok_response(response).await?.json().await?) @@ -134,12 +130,13 @@ impl Client { pub enum GasPreference { RelativeToHead(EthProvider, f64), + Value(BlobGasPrice), } const FACTOR_RESOLUTION: u128 = 1000; impl GasPreference { - pub async fn max_blob_gas_price(&self) -> Result { + pub async fn max_blob_gas_price(&self) -> Result { match self { GasPreference::RelativeToHead(provider, factor_to_next_block) => { // Choose data pricing correctly @@ -152,18 +149,19 @@ impl GasPreference { BlockGasSummary::from_block(&head_block)?.blob_gas_price_next_block(); Ok(if *factor_to_next_block == 1.0 { - blob_gas_price_next_block + blob_gas_price_next_block as BlobGasPrice } else { - ((FACTOR_RESOLUTION as f64 * factor_to_next_block) as u128 + (((FACTOR_RESOLUTION as f64 * factor_to_next_block) as u128 * blob_gas_price_next_block) - / FACTOR_RESOLUTION + / FACTOR_RESOLUTION) as BlobGasPrice }) } + GasPreference::Value(blob_gas_price) => Ok(*blob_gas_price), } } } pub enum NoncePreference { Timebased, - Value(u128), + Value(u64), } diff --git a/src/data_intent.rs b/src/data_intent.rs index ae243f1..0aa463a 100644 --- a/src/data_intent.rs +++ b/src/data_intent.rs @@ -8,9 +8,12 @@ use ethers::{ types::{Address, Signature}, utils::keccak256, }; -use eyre::{bail, Result}; +use eyre::Result; use serde::{Deserialize, Serialize}; -use serde_utils::hex_vec; +use uuid::Uuid; + +// Max gas price possible to represent is ~18 ETH / byte, or ~2.4e6 ETH per blob +pub type BlobGasPrice = u64; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum DataIntent { @@ -23,7 +26,7 @@ pub struct DataIntentNoSignature { pub from: Address, pub data: Vec, pub data_hash: DataHash, - pub max_blob_gas_price: u128, + pub max_blob_gas_price: BlobGasPrice, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] @@ -31,29 +34,22 @@ pub struct DataIntentWithSignature { pub from: Address, pub data: Vec, pub data_hash: DataHash, - pub max_blob_gas_price: u128, + pub max_blob_gas_price: BlobGasPrice, pub signature: Signature, } impl DataIntent { pub fn max_cost(&self) -> u128 { - self.data().len() as u128 * self.max_blob_gas_price() + data_intent_max_cost(self.data_len(), self.max_blob_gas_price()) } - pub fn max_blob_gas_price(&self) -> u128 { + pub fn max_blob_gas_price(&self) -> BlobGasPrice { match self { DataIntent::NoSignature(d) => d.max_blob_gas_price, DataIntent::WithSignature(d) => d.max_blob_gas_price, } } - pub fn id(&self) -> DataIntentId { - match self { - DataIntent::NoSignature(d) => DataIntentId::new(d.from, d.data_hash), - DataIntent::WithSignature(d) => DataIntentId::new(d.from, d.data_hash), - } - } - pub fn from(&self) -> &Address { match self { DataIntent::NoSignature(d) => &d.from, @@ -62,6 +58,8 @@ impl DataIntent { } pub fn data_len(&self) -> usize { + // TODO: charge and coerce data to at least 31 bytes to prevent too expensive packing + // rounds. Add consistency tests for signed data of coerced inputs self.data().len() } @@ -72,10 +70,24 @@ impl DataIntent { } } + pub fn data_hash(&self) -> &DataHash { + match self { + DataIntent::NoSignature(d) => &d.data_hash, + DataIntent::WithSignature(d) => &d.data_hash, + } + } + + pub fn data_hash_signature(&self) -> Option<&Signature> { + match self { + DataIntent::NoSignature(_) => None, + DataIntent::WithSignature(d) => Some(&d.signature), + } + } + pub async fn with_signature( wallet: &LocalWallet, data: Vec, - max_blob_gas_price: u128, + max_blob_gas_price: BlobGasPrice, ) -> Result { let data_hash = DataHash::from_data(&data); let signature: Signature = wallet.sign_message(data_hash.0).await?; @@ -90,82 +102,12 @@ impl DataIntent { } } -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct DataIntentSummary { - pub id: String, - pub from: Address, - #[serde(with = "hex_vec")] - pub data_hash: Vec, - pub data_len: usize, - pub max_blob_gas_price: u128, +/// Max possible cost of data intent, billed cost prior to inclusion +pub(crate) fn data_intent_max_cost(data_len: usize, max_blob_gas_price: BlobGasPrice) -> u128 { + data_len as u128 * max_blob_gas_price as u128 } -impl From<&DataIntent> for DataIntentSummary { - fn from(value: &DataIntent) -> Self { - let id = value.id().to_string(); - match value { - DataIntent::WithSignature(d) => Self { - id, - from: d.from, - data_hash: d.data_hash.to_vec(), - data_len: d.data.len(), - max_blob_gas_price: d.max_blob_gas_price, - }, - DataIntent::NoSignature(d) => Self { - id, - from: d.from, - data_hash: d.data_hash.to_vec(), - data_len: d.data.len(), - max_blob_gas_price: d.max_blob_gas_price, - }, - } - } -} - -#[derive(Clone, Copy, Serialize, Deserialize, Hash, Eq, PartialEq)] -pub struct DataIntentId(Address, DataHash); - -impl DataIntentId { - fn new(from: Address, data_hash: DataHash) -> Self { - Self(from, data_hash) - } -} - -impl std::fmt::Debug for DataIntentId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "v1-{}-{}", hex::encode(self.0), hex::encode(self.1 .0)) - } -} - -impl Display for DataIntentId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "v1-{}-{}", hex::encode(self.0), hex::encode(self.1 .0)) - } -} - -impl FromStr for DataIntentId { - type Err = eyre::Report; - - fn from_str(s: &str) -> Result { - let parts: Vec<&str> = s.split('-').collect(); - - if parts.len() != 3 { - bail!("Invalid id format format".to_string()); - } - let version = parts[0]; - let address = parts[1]; - let data_hash = parts[2]; - - if version != "v1" { - bail!("Unsupported version {}", version); - } - - let address = Address::from_str(address)?; - let data_hash = DataHash::from_str(data_hash)?; - - Ok(DataIntentId::new(address, data_hash)) - } -} +pub type DataIntentId = Uuid; #[derive(Clone, Copy, Serialize, Deserialize, Hash, Eq, PartialEq)] pub struct DataHash([u8; 32]); @@ -205,21 +147,28 @@ impl DataHash { pub fn to_vec(self) -> Vec { self.0.to_vec() } + + pub fn to_fixed_bytes(self) -> [u8; 32] { + self.0 + } } #[cfg(test)] mod tests { use std::str::FromStr; - use ethers::types::H160; - use super::*; #[test] fn data_intent_id_str_serde() { - let id = DataIntentId::new(H160([0xab; 20]), [0xfe; 32].into()); - let id_str = id.to_string(); - assert_eq!(id_str, "v1-abababababababababababababababababababab-fefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefe"); - assert_eq!(DataIntentId::from_str(&id_str).unwrap(), id); + let id_str = "c4f1bdd0-3331-4470-b427-28a2c514f483"; + let id = DataIntentId::from_str(id_str).unwrap(); + assert_eq!(format!("{}", id), id_str); + assert_eq!(format!("{:?}", id), id_str); + + let id_as_json = format!("\"{}\"", id_str); + let id_from_json = serde_json::from_str(&id_as_json).unwrap(); + assert_eq!(id, id_from_json); + assert_eq!(serde_json::to_string(&id).unwrap(), id_as_json); } } diff --git a/src/data_intent_tracker.rs b/src/data_intent_tracker.rs index 74bb25d..17218d9 100644 --- a/src/data_intent_tracker.rs +++ b/src/data_intent_tracker.rs @@ -1,22 +1,64 @@ -use std::collections::HashMap; +//! Data intent is a request from the user to get data bundled on a blob. +//! +//! # Cost +//! +//! Data intents have a max specified cost by the user. Data intents not included in blocks are +//! charged at their max cost. After block inclusion, they are charged at the cost of that block's +//! gas parameters. +//! +//! # Canonical storage +//! +//! There are two canonical sources of data for a blob share instance: +//! - Blockchain state of the target network (e.g. Ethereum mainnet) +//! - MySQL DB holding pending data (non-included data intents, non-included blob transactions) +//! +//! Data intents are stored in the data_intents table. A blob share instance may pack a number of +//! data intent items not yet included into a blob transaction. If the blob transaction is latter +//! dropped due to underpriced gas, those data intents may be reverted to pending. +//! +//! Underpriced data intents will remain in the internal blob share pool consuming user balance. A +//! user can cancel data intents by ID at any point. +//! +//! TODO: consider evicting underpriced data intents after some time. +//! +//! # Cache +//! +//! A blob share instance must run the packing algorithm against all pending data intent summaries. +//! Quering the DB on each packing iteration may become a bottleneck. Thus keep a local in-memory +//! cache of data intent summaries. Before each packing run check if there are new records after +//! the last fetched timestamp. +//! +use chrono::{DateTime, Utc}; use ethers::types::{Address, TxHash}; -use eyre::{bail, eyre, Result}; +use eyre::{bail, eyre, Context, Result}; +use futures::StreamExt; +use num_traits::cast::FromPrimitive; +use serde::{Deserialize, Serialize}; +use serde_utils::hex_vec; +use sqlx::{types::BigDecimal, FromRow, MySql, MySqlPool, QueryBuilder}; +use std::{cmp, collections::HashMap}; +use uuid::Uuid; -use crate::{data_intent::DataIntentId, metrics, DataIntent}; +use crate::{ + data_intent::{data_intent_max_cost, BlobGasPrice, DataIntentId}, + metrics, + utils::{address_from_vec, option_hex_vec, txhash_from_vec}, + DataIntent, +}; #[derive(Default)] pub struct DataIntentTracker { + // DateTime default = NaiveDateTime default = timestamp(0) + last_sync_table_data_intents: DateTime, pending_intents: HashMap, included_intents: HashMap>, } #[derive(Clone)] pub enum DataIntentItem { - // TODO: Evicted items are never pruned - Evicted, - Pending(DataIntent), - Included(DataIntent, TxHash), + Pending(DataIntentSummary), + Included(DataIntentSummary, TxHash), } // TODO: Need to prune all items once included for long enough @@ -26,89 +68,80 @@ impl DataIntentTracker { metrics::INCLUDED_INTENTS_CACHE.set(self.included_intents.len() as f64); } + pub async fn sync_with_db(&mut self, db_pool: &MySqlPool) -> Result<()> { + let from = self.last_sync_table_data_intents; + let to: DateTime = Utc::now(); + + let mut stream = sqlx::query( + r#" +SELECT id, eth_address, data_len, data_hash, max_blob_gas_price, data_hash_signature, inclusion_tx_hash, updated_at +FROM data_intents +WHERE updated_at BETWEEN ? AND ? +ORDER BY updated_at ASC + "#, + ) + .bind(from) + .bind(to) + .fetch(db_pool); + + while let Some(row) = stream.next().await { + let data_intent = DataIntentDbRowSummary::from_row(&row?)?; + + let updated_at = data_intent.updated_at; + self.pending_intents + .insert(data_intent.id, data_intent.try_into()?); + self.last_sync_table_data_intents = + cmp::max(self.last_sync_table_data_intents, updated_at); + } + + Ok(()) + } + /// Returns the total sum of pending itents cost from `from`. pub fn pending_intents_total_cost(&self, from: &Address) -> u128 { self.pending_intents .values() .map(|item| match item { DataIntentItem::Pending(data_intent) => { - if data_intent.from() == from { - data_intent.max_cost() + if &data_intent.from == from { + data_intent_max_cost(data_intent.data_len, data_intent.max_blob_gas_price) } else { 0 } } - DataIntentItem::Evicted | DataIntentItem::Included(_, _) => 0, + DataIntentItem::Included(_, _) => 0, }) .sum() } - pub fn get_all_pending(&self) -> Vec { + /// Returns the total sum of pending itents total length. + pub fn pending_intents_total_data_len(&self, from: &Address) -> usize { + self.pending_intents + .values() + .map(|item| match item { + DataIntentItem::Pending(data_intent) => { + if &data_intent.from == from { + data_intent.data_len + } else { + 0 + } + } + DataIntentItem::Included(_, _) => 0, + }) + .sum() + } + + pub fn get_all_pending(&self) -> Vec { self.pending_intents .values() // TODO: Do not clone here, the sum of all DataIntents can be big .filter_map(|item| match item { - DataIntentItem::Evicted => None, DataIntentItem::Pending(data_intent) => Some(data_intent.clone()), DataIntentItem::Included(_, _) => None, }) .collect() } - pub fn add(&mut self, data_intent: DataIntent) -> Result<()> { - let id = data_intent.id(); - - match self.pending_intents.get(&id) { - None => {} // Ok insert - Some(DataIntentItem::Evicted) => {} // Allow to re-insert evicted intents - // TODO: Handle bumping the registered max price - Some(DataIntentItem::Pending(_)) | Some(DataIntentItem::Included(_, _)) => { - bail!("data intent {id} already known") - } - }; - - self.pending_intents - .insert(id, DataIntentItem::Pending(data_intent)); - Ok(()) - } - - pub fn evict_underpriced_intent(&mut self, id: &DataIntentId) -> Result<()> { - match self.pending_intents.get(id) { - None => bail!("unknown intent {}", id), - Some(DataIntentItem::Evicted) => bail!("intent already evicted {}", id), - Some(DataIntentItem::Included(_, prev_tx_hash)) => { - bail!("attempting to evict intent included in transaction {prev_tx_hash:?} {id}") - } - Some(DataIntentItem::Pending(_)) => { - self.pending_intents.insert(*id, DataIntentItem::Evicted); - Ok(()) - } - } - } - - pub fn include_in_blob_tx(&mut self, ids: &[DataIntentId], tx_hash: TxHash) -> Result<()> { - for id in ids { - match self.pending_intents.remove(id) { - None => bail!("pending intent removed while moving into pending {}", id), - Some(DataIntentItem::Evicted) => bail!("intent has been evicted {}", id), - Some(DataIntentItem::Included(data_intent, prev_tx_hash)) => { - self.pending_intents - .insert(*id, DataIntentItem::Included(data_intent, prev_tx_hash)); - bail!("pending item already included in transaction {:?} while moving into pending {}", prev_tx_hash, id) - } - Some(DataIntentItem::Pending(data_intent)) => { - self.pending_intents - .insert(*id, DataIntentItem::Included(data_intent, tx_hash)); - } - } - } - - // TODO: should handle double inclusion for same transaction hash - self.included_intents.insert(tx_hash, ids.to_vec()); - - Ok(()) - } - pub fn revert_item_to_pending(&mut self, tx_hash: TxHash) -> Result<()> { let ids = self .included_intents @@ -118,7 +151,6 @@ impl DataIntentTracker { for id in ids { match self.pending_intents.remove(&id) { None => bail!("pending intent removed while moving into pending {}", id), - Some(DataIntentItem::Evicted) => bail!("item evicted {}", id), // TODO: Should check that the transaction is consistent? Some(DataIntentItem::Included(data_intent, _)) | Some(DataIntentItem::Pending(data_intent)) => self @@ -137,29 +169,264 @@ impl DataIntentTracker { } } } +} - pub fn data_by_id(&self, id: &DataIntentId) -> Option { - match self.pending_intents.get(id) { - None => None, - Some(DataIntentItem::Evicted) => None, - Some(DataIntentItem::Pending(data_intent)) => Some(data_intent.clone()), - Some(DataIntentItem::Included(data_intent, _)) => Some(data_intent.clone()), - } +#[derive(Debug, FromRow, Serialize, Deserialize)] +pub struct DataIntentDbRowFull { + pub id: Uuid, + #[serde(with = "hex_vec")] + pub eth_address: Vec, // BINARY(20) + #[serde(with = "hex_vec")] + pub data: Vec, // MEDIUMBLOB + pub data_len: u32, // INT + #[serde(with = "hex_vec")] + pub data_hash: Vec, // BINARY(32) + pub max_blob_gas_price: u64, // BIGINT + #[serde(with = "option_hex_vec")] + pub data_hash_signature: Option>, // BINARY(65), Optional + #[serde(with = "option_hex_vec")] + pub inclusion_tx_hash: Option>, // BINARY(32), Optional + pub updated_at: DateTime, // TIMESTAMP(3) +} + +#[derive(Debug, FromRow, Serialize)] +pub struct DataIntentDbRowSummary { + pub id: Uuid, + pub eth_address: Vec, // BINARY(20) + pub data_len: u32, // INT + pub data_hash: Vec, // BINARY(32) + pub max_blob_gas_price: u64, // BIGINT + pub data_hash_signature: Option>, // BINARY(65), Optional + pub inclusion_tx_hash: Option>, // BINARY(32), Optional + pub updated_at: DateTime, // TIMESTAMP(3) +} + +pub(crate) async fn fetch_data_intent_db_full( + db_pool: &MySqlPool, + id: &Uuid, +) -> Result { + let data_intent = sqlx::query_as::<_, DataIntentDbRowFull>( + r#" +SELECT id, eth_address, data, data_len, data_hash, max_blob_gas_price, data_hash_signature, inclusion_tx_hash, updated_at +FROM data_intents +WHERE id = ? + "#) + .bind(id) + .fetch_one(db_pool) + .await?; + + Ok(data_intent) +} + +pub(crate) async fn fetch_many_data_intent_db_full( + db_pool: &MySqlPool, + ids: &[Uuid], +) -> Result> { + let mut query_builder: QueryBuilder = QueryBuilder::new( + r#" +SELECT id, eth_address, data, data_len, data_hash, max_blob_gas_price, data_hash_signature, inclusion_tx_hash, updated_at +FROM data_intents +WHERE id in + "#, + ); + + // TODO: limit the amount of ids to not reach a limit + // TODO: try to use different API than `.push_tuples` since you only query by id + query_builder.push_tuples(ids.iter(), |mut b, id| { + b.push_bind(id); + }); + + let rows = query_builder.build().fetch_all(db_pool).await?; + + rows.iter() + .map(|row| DataIntentDbRowFull::from_row(row).wrap_err("error decoding data_intent DB row")) + .collect::>>() +} + +pub(crate) async fn fetch_data_intent_db_summary( + db_pool: &MySqlPool, + id: &Uuid, +) -> Result> { + let data_intent = sqlx::query_as::<_, DataIntentDbRowSummary>( + r#" +SELECT id, eth_address, data_len, data_hash, max_blob_gas_price, data_hash_signature, inclusion_tx_hash, updated_at +FROM data_intents +WHERE id = ? + "#) + .bind(id) + .fetch_optional(db_pool) + .await?; + + Ok(data_intent) +} + +/// Store data intent to SQL DB +pub(crate) async fn store_data_intent<'c>( + db_tx: &mut sqlx::Transaction<'c, sqlx::MySql>, + data_intent: DataIntent, +) -> Result { + let id = Uuid::new_v4(); + let eth_address = data_intent.from().to_fixed_bytes().to_vec(); + let data = data_intent.data(); + let data_len = data.len() as u32; + let data_hash = data_intent.data_hash().to_vec(); + let max_blob_gas_price = BigDecimal::from_u64(data_intent.max_blob_gas_price()); + let data_hash_signature = data_intent.data_hash_signature().map(|sig| sig.to_vec()); + + // Persist data request + sqlx::query!( + r#" +INSERT INTO data_intents (id, eth_address, data, data_len, data_hash, max_blob_gas_price, data_hash_signature) +VALUES (?, ?, ?, ?, ?, ?, ?) + "#, + id, + eth_address, + data, + data_len, + data_hash, + max_blob_gas_price, + data_hash_signature + ) + .execute(&mut **db_tx) + .await?; + + // TODO: Prevent inserting duplicates + + // match self.pending_intents.get(&id) { + // None => {} // Ok insert + // // TODO: Handle bumping the registered max price + // Some(DataIntentItem::Pending(_)) | Some(DataIntentItem::Included(_, _)) => { + // bail!("data intent {id} already known") + // } + // }; + + Ok(id) +} + +pub(crate) async fn update_inclusion_tx_hashes( + db_pool: &MySqlPool, + ids: &[Uuid], + new_inclusion_tx_hash: TxHash, +) -> Result<()> { + let mut tx = db_pool.begin().await?; + + #[derive(Debug, FromRow, Serialize)] + struct Row { + id: Uuid, + inclusion_tx_hash: Option>, } - pub fn status_by_id(&self, id: &DataIntentId) -> DataIntentItemStatus { - match self.pending_intents.get(id) { - None => DataIntentItemStatus::Unknown, - Some(DataIntentItem::Evicted) => DataIntentItemStatus::Evicted, - Some(DataIntentItem::Pending(_)) => DataIntentItemStatus::Pending, - Some(DataIntentItem::Included(_, tx_hash)) => DataIntentItemStatus::Included(*tx_hash), + // Bulk fetch all rows + let mut query_builder: QueryBuilder = QueryBuilder::new( + r#" +SELECT id, inclusion_tx_hash +FROM data_intents +WHERE id IN + "#, + ); + + // TODO: limit the amount of ids to not reach a limit + // TODO: try to use different API than `.push_tuples` since you only query by id + query_builder.push_tuples(ids.iter(), |mut b, id| { + b.push_bind(id); + }); + + let rows = query_builder.build().fetch_all(&mut *tx).await?; + + // Filter IDs where inclusion_tx_hash is not set + for row in rows { + let row = Row::from_row(&row)?; + if let Some(tx_hash) = row.inclusion_tx_hash { + bail!( + "data_intent {} is already included in a tx {}", + row.id, + hex::encode(tx_hash) + ); } } + + // Batch update the filtered IDs + let new_inclusion_tx_hash = new_inclusion_tx_hash.to_fixed_bytes().to_vec(); + for id in ids { + sqlx::query("UPDATE data_intents SET inclusion_tx_hash = ? WHERE id = ?") + .bind(&new_inclusion_tx_hash) + .bind(id) + .execute(&mut *tx) + .await?; + } + + // Commit transaction + tx.commit().await?; + + Ok(()) +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct DataIntentSummary { + pub id: DataIntentId, + pub from: Address, + #[serde(with = "hex_vec")] + pub data_hash: Vec, + pub data_len: usize, + pub max_blob_gas_price: BlobGasPrice, + pub updated_at: DateTime, +} + +impl TryFrom for DataIntentSummary { + type Error = eyre::Report; + + fn try_from(value: DataIntentDbRowSummary) -> Result { + Ok(DataIntentSummary { + id: value.id, + from: address_from_vec(value.eth_address)?, + data_hash: value.data_hash, + data_len: value.data_len.try_into()?, + max_blob_gas_price: value.max_blob_gas_price, + updated_at: value.updated_at, + }) + } } -pub enum DataIntentItemStatus { - Pending, - Included(TxHash), - Evicted, - Unknown, +impl TryFrom for DataIntentItem { + type Error = eyre::Report; + + fn try_from(value: DataIntentDbRowSummary) -> std::result::Result { + Ok(match value.inclusion_tx_hash.clone() { + None => DataIntentItem::Pending(value.try_into()?), + Some(tx_hash) => DataIntentItem::Included(value.try_into()?, txhash_from_vec(tx_hash)?), + }) + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use chrono::DateTime; + use uuid::Uuid; + + use super::DataIntentDbRowFull; + + #[test] + fn serde_data_intent_db_row_full() { + let item = DataIntentDbRowFull { + id: Uuid::from_str("1bcb4515-8c91-456c-a87d-7c4f5f3f0d9e").unwrap(), + eth_address: vec![0xaa; 20], + data: vec![0xbb; 10], + data_len: 10, + data_hash: vec![0xcc; 32], + data_hash_signature: None, + max_blob_gas_price: 100000000, + inclusion_tx_hash: Some(vec![0xee; 32]), + updated_at: DateTime::from_str("2023-01-01T12:12:12.202889Z").unwrap(), + }; + + let expected_item_str = "{\"id\":\"1bcb4515-8c91-456c-a87d-7c4f5f3f0d9e\",\"eth_address\":\"0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\",\"data\":\"0xbbbbbbbbbbbbbbbbbbbb\",\"data_len\":10,\"data_hash\":\"0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\",\"max_blob_gas_price\":100000000,\"data_hash_signature\":null,\"inclusion_tx_hash\":\"0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee\",\"updated_at\":\"2023-01-01T12:12:12.202889Z\"}"; + + assert_eq!(serde_json::to_string(&item).unwrap(), expected_item_str); + let item_recv: DataIntentDbRowFull = serde_json::from_str(expected_item_str).unwrap(); + // test eq of dedicated serde fiels with Option> + assert_eq!(item_recv.data_hash_signature, item.data_hash_signature); + assert_eq!(item_recv.inclusion_tx_hash, item.inclusion_tx_hash); + } } diff --git a/src/gas.rs b/src/gas.rs index 9409a42..55e54eb 100644 --- a/src/gas.rs +++ b/src/gas.rs @@ -8,6 +8,7 @@ const MIN_BLOB_GASPRICE: u128 = 1; const BLOB_GASPRICE_UPDATE_FRACTION: u128 = 3338477; const TARGET_BLOB_GAS_PER_BLOCK: u128 = 393216; +#[derive(Debug)] pub struct GasConfig { pub max_priority_fee_per_gas: u128, pub max_fee_per_gas: u128, diff --git a/src/kzg.rs b/src/kzg.rs index 08851e0..906be65 100644 --- a/src/kzg.rs +++ b/src/kzg.rs @@ -17,7 +17,7 @@ use crate::{ tx_eip4844::TxEip4844, tx_sidecar::{BlobTransaction, BlobTransactionSidecar}, }, - DataIntent, PublishConfig, MAX_USABLE_BLOB_DATA_LEN, + MAX_USABLE_BLOB_DATA_LEN, }; pub const VERSIONED_HASH_VERSION_KZG: u8 = 0x01; @@ -42,24 +42,17 @@ pub fn kzg_to_versioned_hash(commitment: &c_kzg::KzgCommitment) -> B256 { pub(crate) fn construct_blob_tx( kzg_settings: &c_kzg::KzgSettings, - publish_config: &PublishConfig, + l1_inbox_address: ethers::types::Address, gas_config: &GasConfig, tx_params: &TxParams, wallet: &LocalWallet, - next_blob_items: Vec, + participants: Vec, + datas: Vec>, ) -> Result { - let participants = next_blob_items - .iter() - .map(|item| BlobTxParticipant { - address: *item.from(), - data_len: item.data().len(), - }) - .collect::>(); - let mut data = vec![]; - for item in next_blob_items.into_iter() { + for item in datas.into_iter() { // TODO: do less copying - data.extend_from_slice(item.data()); + data.extend_from_slice(&item); } // TODO: should chunk data in 31 bytes to ensure each field element if < BLS_MODULUS @@ -88,7 +81,7 @@ pub(crate) fn construct_blob_tx( max_fee_per_gas: gas_config.max_fee_per_gas, // TODO Adjust gas with input gas_limit: 100_000_u64, - to: Address::from(publish_config.l1_inbox_address.to_fixed_bytes()), + to: Address::from(l1_inbox_address.to_fixed_bytes()), value: <_>::default(), input: input.into(), access_list: <_>::default(), @@ -193,11 +186,12 @@ mod tests { use eyre::Result; use crate::{ + blob_tx_data::BlobTxParticipant, gas::GasConfig, kzg::{decode_blob_to_data, TxParams}, load_kzg_settings, reth_fork::tx_sidecar::BlobTransaction, - DataIntent, PublishConfig, ADDRESS_ZERO, MAX_USABLE_BLOB_DATA_LEN, + ADDRESS_ZERO, MAX_USABLE_BLOB_DATA_LEN, }; use super::{construct_blob_tx, encode_data_to_blob}; @@ -221,26 +215,26 @@ mod tests { max_priority_fee_per_gas: 1u128.into(), }; - let mut data_intents: Vec = vec![]; + let mut participants: Vec = vec![]; + let mut datas: Vec> = vec![]; for i in 0..2 { let wallet = LocalWallet::from_bytes(&[i + 1; 32])?; - data_intents.push( - DataIntent::with_signature(&wallet, vec![i + 0x10; 1000 * i as usize], 1) - .await - .unwrap(), - ); + let data = vec![i + 0x10; 1000 * i as usize]; + participants.push(BlobTxParticipant { + address: wallet.address(), + data_len: data.len(), + }); + datas.push(data); } - let participants = data_intents.iter().map(|p| *p.from()).collect::>(); let blob_tx = construct_blob_tx( &load_kzg_settings()?, - &PublishConfig { - l1_inbox_address: Address::from_str(ADDRESS_ZERO)?, - }, + Address::from_str(ADDRESS_ZERO)?, &gas_config, &TxParams { chain_id, nonce: 0 }, &wallet, - data_intents, + participants.clone(), + datas, )?; // EIP-2718 TransactionPayload @@ -275,15 +269,7 @@ mod tests { ); // Assert participants - assert_eq!( - blob_tx - .tx_summary - .participants - .iter() - .map(|p| p.address) - .collect::>(), - participants, - ); + assert_eq!(blob_tx.tx_summary.participants, participants); Ok(()) } diff --git a/src/lib.rs b/src/lib.rs index 229d475..8351e4f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,34 +4,32 @@ use clap::Parser; use data_intent_tracker::DataIntentTracker; use eth_provider::EthProvider; use ethers::{ - signers::{coins_bip39::English, LocalWallet, MnemonicBuilder, Signer}, + signers::{coins_bip39::English, MnemonicBuilder, Signer}, types::Address, }; -use eyre::{bail, eyre, Context, Result}; -use std::{ - collections::HashMap, env, io, net::TcpListener, path::PathBuf, str::FromStr, sync::Arc, - time::Duration, -}; -use tokio::{ - fs, - sync::{Notify, RwLock}, -}; +use eyre::{Context, Result}; +use sqlx::mysql::MySqlPoolOptions; +use std::{env, net::TcpListener, path::PathBuf, str::FromStr, sync::Arc, time::Duration}; +use tokio::fs; use url::Url; use crate::{ + anchor_block::get_anchor_block, + app::AppData, blob_sender_task::blob_sender_task, block_subscriber_task::block_subscriber_task, metrics::{get_metrics, push_metrics_task}, routes::{ - get_balance_by_address, get_data, get_data_by_id, get_health, get_home, - get_last_seen_nonce_by_address, get_sender, get_status_by_id, get_sync, - post_data::post_data, + get_balance_by_address, get_data, get_data_by_id, get_health, get_home, get_sender, + get_status_by_id, get_sync, post_data::post_data, }, - sync::{AnchorBlock, BlockSync, BlockSyncConfig}, + sync::{BlockSync, BlockSyncConfig}, trusted_setup::TrustedSetup, utils::parse_basic_auth, }; +pub mod anchor_block; +mod app; pub mod beacon_api_client; mod blob_sender_task; mod blob_tx_data; @@ -53,7 +51,7 @@ mod utils; pub use blob_tx_data::BlobTxSummary; pub use client::Client; -pub use data_intent::DataIntent; +pub use data_intent::{BlobGasPrice, DataIntent}; pub use gas::BlockGasSummary; pub use metrics::{PushMetricsConfig, PushMetricsFormat}; pub use utils::increase_by_min_percent; @@ -70,6 +68,9 @@ pub(crate) use std::{println as error, println as warn, println as info, println /// Current encoding needs one byte per field element pub const MAX_USABLE_BLOB_DATA_LEN: usize = 31 * FIELD_ELEMENTS_PER_BLOB; +/// Max data allowed per user as pending data intents before inclusion +pub const MAX_PENDING_DATA_LEN_PER_USER: usize = MAX_USABLE_BLOB_DATA_LEN * 16; +/// Default target address const ADDRESS_ZERO: &str = "0x0000000000000000000000000000000000000000"; pub const TRUSTED_SETUP_BYTES: &[u8] = include_bytes!("../trusted_setup.json"); @@ -122,6 +123,10 @@ pub struct Args { #[arg(env, long, default_value_t = 6)] pub max_pending_transactions: u64, + /// Database URL to mysql DB with format `mysql://user:password@localhost/test` + #[arg(env, long)] + pub database_url: String, + /// Enable serving metrics #[arg(env, long)] pub metrics: bool, @@ -152,38 +157,14 @@ impl Args { } } -struct PublishConfig { - pub(crate) l1_inbox_address: Address, -} - struct AppConfig { + l1_inbox_address: Address, panic_on_background_task_errors: bool, anchor_block_filepath: PathBuf, metrics_server_bearer_token: Option, metrics_push: Option, } -struct AppData { - kzg_settings: c_kzg::KzgSettings, - data_intent_tracker: RwLock, - // TODO: Store in remote DB persisting - sign_nonce_tracker: RwLock>, - sync: RwLock, - provider: EthProvider, - sender_wallet: LocalWallet, - publish_config: PublishConfig, - notify: Notify, - chain_id: u64, - config: AppConfig, -} - -impl AppData { - async fn collect_metrics(&self) { - self.sync.read().await.collect_metrics(); - self.data_intent_tracker.read().await.collect_metrics(); - } -} - pub struct App { port: u16, server: Server, @@ -231,28 +212,18 @@ impl App { .await .wrap_err_with(|| "creating data dir")?; + // TODO: Should use connect_lazy_with + let db_pool = MySqlPoolOptions::new() + .max_connections(5) + .connect(&args.database_url) + .await?; + let anchor_block_filepath = data_dir.join("anchor_block.json"); // TODO: choose starting point that's not genesis - let anchor_block = { - // Attempt to read persisted file first if exists - match fs::read_to_string(&anchor_block_filepath).await { - Ok(str) => { - serde_json::from_str(&str).wrap_err_with(|| "parsing anchor block file")? - } - Err(e) => match e.kind() { - io::ErrorKind::NotFound => match starting_point { - StartingPoint::StartingBlock(starting_block) => { - anchor_block_from_starting_block(&provider, starting_block).await? - } - }, - _ => bail!( - "error opening anchor_block file {}: {e:?}", - anchor_block_filepath.to_string_lossy() - ), - }, - } - }; + let anchor_block = + get_anchor_block(&anchor_block_filepath, &db_pool, &provider, starting_point).await?; + debug!("retrieved anchor block: {:?}", anchor_block); let sync = BlockSync::new( BlockSyncConfig { @@ -262,43 +233,45 @@ impl App { }, anchor_block, ); - - let app_data = Arc::new(AppData { - kzg_settings: load_kzg_settings()?, - notify: <_>::default(), - data_intent_tracker: <_>::default(), - sign_nonce_tracker: <_>::default(), - sync: sync.into(), - publish_config: PublishConfig { - l1_inbox_address: Address::from_str(ADDRESS_ZERO)?, + // TODO: handle initial sync here with a nice progress bar + + let mut data_intent_tracker = DataIntentTracker::default(); + info!("syncing data intent track"); + data_intent_tracker.sync_with_db(&db_pool).await?; + info!("synced data intent track"); + + let config = AppConfig { + l1_inbox_address: Address::from_str(ADDRESS_ZERO)?, + panic_on_background_task_errors: args.panic_on_background_task_errors, + anchor_block_filepath, + metrics_server_bearer_token: args.metrics_bearer_token.clone(), + metrics_push: if let Some(url) = &args.metrics_push_url { + Some(PushMetricsConfig { + url: Url::parse(url) + .wrap_err_with(|| format!("invalid push gateway URL {url}"))?, + basic_auth: if let Some(auth) = &args.metrics_push_basic_auth { + Some(parse_basic_auth(auth).wrap_err_with(|| "invalid push gateway auth")?) + } else { + None + }, + interval: Duration::from_secs(args.metrics_push_interval_sec), + format: args.metrics_push_format, + }) + } else { + None }, + }; + + let app_data = Arc::new(AppData::new( + config, + load_kzg_settings()?, + db_pool, provider, - sender_wallet: wallet, + wallet, chain_id, - config: AppConfig { - panic_on_background_task_errors: args.panic_on_background_task_errors, - anchor_block_filepath, - metrics_server_bearer_token: args.metrics_bearer_token.clone(), - metrics_push: if let Some(url) = &args.metrics_push_url { - Some(PushMetricsConfig { - url: Url::parse(url) - .wrap_err_with(|| format!("invalid push gateway URL {url}"))?, - basic_auth: if let Some(auth) = &args.metrics_push_basic_auth { - Some( - parse_basic_auth(auth) - .wrap_err_with(|| "invalid push gateway auth")?, - ) - } else { - None - }, - interval: Duration::from_secs(args.metrics_push_interval_sec), - format: args.metrics_push_format, - }) - } else { - None - }, - }, - }); + data_intent_tracker, + sync, + )); info!( "connected to eth node at {} chain {}", @@ -323,8 +296,7 @@ impl App { .service(get_data) .service(get_data_by_id) .service(get_status_by_id) - .service(get_balance_by_address) - .service(get_last_seen_nonce_by_address); + .service(get_balance_by_address); // Conditionally register the metrics route if args.metrics && args.metrics_port == args.port { @@ -383,28 +355,3 @@ pub(crate) fn load_kzg_settings() -> Result { &trusted_setup.g2_points(), )?) } - -/// Initialize empty anchor block state from a network block -async fn anchor_block_from_starting_block( - provider: &EthProvider, - starting_block: u64, -) -> Result { - let anchor_block = provider - .get_block(starting_block) - .await? - .ok_or_else(|| eyre!("genesis block not available"))?; - let hash = anchor_block - .hash - .ok_or_else(|| eyre!("block has no hash property"))?; - let number = anchor_block - .number - .ok_or_else(|| eyre!("block has no number property"))? - .as_u64(); - Ok(AnchorBlock { - hash, - number, - gas: BlockGasSummary::from_block(&anchor_block)?, - // At genesis all balances are zero - finalized_balances: <_>::default(), - }) -} diff --git a/src/metrics.rs b/src/metrics.rs index 5c4aa3a..f139e78 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -55,8 +55,6 @@ lazy_static! { register_gauge!("blobshare_sync_anchor_number", "sync anchor number").unwrap(); pub(crate) static ref UNDERPRICED_TXS_EVICTED: Counter = register_counter!("blobshare_underpriced_txs_evicted_total", "underpriced txs evicted total").unwrap(); - pub(crate) static ref UNDERPRICED_INTENTS_EVICTED: Counter = - register_counter!("blobshare_underpriced_intents_evicted_total", "underpriced intents evicted total").unwrap(); pub(crate) static ref FINALIZED_TXS: Counter = register_counter!("blobshare_finalized_txs_total", "finalized txs total").unwrap(); // @@ -281,7 +279,6 @@ myprefix_test_counter{mykey=\"myvalue\"} 0 SYNC_HEAD_NUMBER.inc(); SYNC_ANCHOR_NUMBER.inc(); UNDERPRICED_TXS_EVICTED.inc(); - UNDERPRICED_INTENTS_EVICTED.inc(); FINALIZED_TXS.inc(); BLOB_SENDER_TASK_TIMES.observe(0.); BLOB_SENDER_TASK_ERRORS.inc(); diff --git a/src/packing.rs b/src/packing.rs index 8df6b00..3aa1499 100644 --- a/src/packing.rs +++ b/src/packing.rs @@ -1,11 +1,23 @@ use std::cmp; +use crate::increase_by_min_percent; + /// (len, max_len_price) -pub type Item = (usize, u128); +#[derive(Copy, Clone)] +pub struct Item { + pub len: usize, + pub max_len_price: u64, +} + +impl Item { + pub fn new(len: usize, max_len_price: u64) -> Self { + Self { len, max_len_price } + } +} const MAX_COUNT_FOR_BRUTEFORCE: usize = 8; -pub fn pack_items(items: &[Item], max_len: usize, cost_per_len: u128) -> Option> { +pub fn pack_items(items: &[Item], max_len: usize, cost_per_len: u64) -> Option> { if items.len() < MAX_COUNT_FOR_BRUTEFORCE { return pack_items_brute_force(items, max_len, cost_per_len); } @@ -13,7 +25,7 @@ pub fn pack_items(items: &[Item], max_len: usize, cost_per_len: u128) -> Option< // Filter items that don't event meet the current len price let mut items = items .iter() - .filter(|(_, max_len_price)| *max_len_price >= cost_per_len) + .filter(|item| item.max_len_price >= cost_per_len) .copied() .enumerate() .collect::>(); @@ -23,7 +35,7 @@ pub fn pack_items(items: &[Item], max_len: usize, cost_per_len: u128) -> Option< // special case // } - items.sort_by(|a, b| a.1 .0.cmp(&b.1 .0)); + items.sort_by(|a, b| a.1.len.cmp(&b.1.len)); let index_ordered = items.iter().map(|e| e.0).collect::>(); let items_sorted = items.into_iter().map(|e| e.1).collect::>(); @@ -51,7 +63,7 @@ pub fn pack_items(items: &[Item], max_len: usize, cost_per_len: u128) -> Option< pub fn pack_items_brute_force( items: &[Item], max_len: usize, - cost_per_len: u128, + cost_per_len: u64, ) -> Option> { let n = items.len(); // Max n to shift mask to @@ -59,17 +71,16 @@ pub fn pack_items_brute_force( let mut best_combination = None; let mut best_selected_len = 0; - let fixed_cost = max_len as u128 * cost_per_len; + let fixed_cost = max_len as u128 * cost_per_len as u128; // Iterate over all possible combinations 'comb: for mask in 0..(1_u32 << n) { let mut selected_len = 0; - let mut min_len_price_combination = u128::MAX; + let mut min_len_price_combination = u64::MAX; for (i, item) in items.iter().enumerate().take(n) { if mask & (1 << i) != 0 { - let (len, max_len_price) = item; - selected_len += len; + selected_len += item.len; // Invalid combination, stop early if selected_len > max_len { @@ -77,23 +88,23 @@ pub fn pack_items_brute_force( } // Track min len price of the combination - if *max_len_price < min_len_price_combination { - min_len_price_combination = *max_len_price; + if item.max_len_price < min_len_price_combination { + min_len_price_combination = item.max_len_price; } } } - if selected_len > 0 { + if selected_len > 0 && // Check if combination is valid - if item_is_priced_ok(fixed_cost, selected_len, min_len_price_combination) + fixed_cost / (selected_len as u128) <= min_len_price_combination as u128 + // Persist best combination && selected_len > best_selected_len - { - best_selected_len = selected_len; - best_combination = Some(mask); - // Found optimal combination - if selected_len == max_len { - break; - } + { + best_selected_len = selected_len; + best_combination = Some(mask); + // Found optimal combination + if selected_len == max_len { + break; } } } @@ -111,17 +122,13 @@ pub fn pack_items_brute_force( } } -fn item_is_priced_ok(fixed_cost: u128, selected_len: usize, max_len_price: u128) -> bool { - fixed_cost / (selected_len as u128) <= max_len_price -} - pub fn pack_items_knapsack( - items: &[(usize, u128)], + items: &[Item], max_len: usize, - _cost_per_len: u128, + _cost_per_len: u64, ) -> Option> { // TODO: consider max_cost - let item_lens = items.iter().map(|e| e.0).collect::>(); + let item_lens = items.iter().map(|e| e.len).collect::>(); Some(knapsack(max_len, &item_lens, &item_lens)) } @@ -154,9 +161,9 @@ fn knapsack(w_max: usize, wt: &[usize], val: &[usize]) -> Vec { /// Expects items to by sorted ascending by data len pub fn pack_items_greedy_sorted( - items: &[(usize, u128)], + items: &[Item], max_len: usize, - cost_per_len: u128, + cost_per_len: u64, ) -> Option> { // Keep only items that price at least the current cost @@ -174,7 +181,9 @@ pub fn pack_items_greedy_sorted( return None; } else { // Handles low values to at ensure that min_cost increases in each loop - min_cost_per_len_to_select = percent_mult_ceil(min_cost_per_len_to_select, 110); + min_cost_per_len_to_select = + increase_by_min_percent(min_cost_per_len_to_select, 1.1); + continue; } } PickResult::EmptySelection => return None, @@ -189,22 +198,22 @@ enum PickResult { } fn pick_first_items_sorted_ascending( - items: &[(usize, u128)], + items: &[Item], max_len: usize, - cost_per_len: u128, - min_cost_per_len_to_select: u128, + cost_per_len: u64, + min_cost_per_len_to_select: u64, ) -> PickResult { let mut len = 0; - let mut min_max_price = u128::MAX; + let mut min_max_price = u64::MAX; let mut indexes = vec![]; for (i, item) in items.iter().enumerate() { - if item.1 >= min_cost_per_len_to_select { + if item.max_len_price >= min_cost_per_len_to_select { // Ascending sort, any next item will be over the limit - if len + item.0 > max_len { + if len + item.len > max_len { break; } - len += item.0; - min_max_price = cmp::min(min_max_price, item.1); + len += item.len; + min_max_price = cmp::min(min_max_price, item.max_len_price); indexes.push(i); } } @@ -213,27 +222,20 @@ fn pick_first_items_sorted_ascending( // effective_cost_per_len = max_len * cost_per_len / len < min_max_price if len == 0 { PickResult::EmptySelection - } else if (max_len as u128 * cost_per_len) <= len as u128 * min_max_price { + } else if (max_len as u128 * cost_per_len as u128) <= len as u128 * min_max_price as u128 { PickResult::Some(indexes) } else { PickResult::InvalidSelection } } -fn percent_mult_ceil(value: u128, percent: u128) -> u128 { - let new_value = (value * percent) / 100; - if new_value == value { - new_value + 1 - } else { - new_value - } -} - #[cfg(test)] mod tests { use super::*; use proptest::prelude::*; + type ItemTuple = (usize, u64); + #[test] fn test_pack_items_brute_force_manual() { // Empty case @@ -271,9 +273,9 @@ mod tests { fn run_test_brute_force( max_len: usize, - cost_per_len: u128, - expected_best_combination: Option<&[Item]>, - extra_items: &[Item], + cost_per_len: u64, + expected_best_combination: Option<&[ItemTuple]>, + extra_items: &[ItemTuple], ) { let mut items = vec![]; if let Some(combination) = expected_best_combination { @@ -281,7 +283,7 @@ mod tests { } items.extend_from_slice(extra_items); - let best_combination = pack_items_brute_force(&items, max_len, cost_per_len); + let best_combination = pack_items_brute_force(&from_tuples(&items), max_len, cost_per_len); if best_combination != expected_best_combination.map(|v| (0..v.len()).collect()) { panic!( @@ -296,11 +298,11 @@ mod tests { proptest! { #[test] fn test_pack_items_brute_force_proptest( - items in prop::collection::vec((0..50usize, 1..1000u128), 1..10), // Generate vectors of items (length, max_price) + items in prop::collection::vec((0..50usize, 1..1000 as u64), 1..10), // Generate vectors of items (length, max_price) max_len in 1..100usize, // Random max length - cost_per_len in 1..10u128, // Random price per length unit + cost_per_len in 1..10 as u64, // Random price per length unit ) { - if let Some(indexes) = pack_items_brute_force(&items, max_len, cost_per_len) { + if let Some(indexes) = pack_items_brute_force(&from_tuples(&items), max_len, cost_per_len) { let selected_items = unwrap_items(indexes, &items); let selected_len = items_total_len(&selected_items); prop_assert!(selected_len <= max_len); @@ -311,12 +313,18 @@ mod tests { } } - fn is_priced_ok(item: &Item, max_len: usize, cost_per_len: u128, selected_len: usize) -> bool { - let effective_cost_per_len = (max_len as u128 * cost_per_len) / selected_len as u128; - effective_cost_per_len <= item.1 + fn is_priced_ok( + item: &ItemTuple, + max_len: usize, + cost_per_len: u64, + selected_len: usize, + ) -> bool { + let effective_cost_per_len = + (max_len as u128 * cost_per_len as u128) / selected_len as u128; + effective_cost_per_len as u64 <= item.1 } - fn items_total_len(items: &[Item]) -> usize { + fn items_total_len(items: &[ItemTuple]) -> usize { items.iter().map(|e| e.0).sum() } @@ -351,8 +359,11 @@ mod tests { fn run_test_knapsack_equals_bruteforce(item_lens: &[usize], max_len: usize) -> bool { let items = item_lens .iter() - .map(|len| (*len, 10 * max_len as u128)) - .collect::>(); + .map(|len| Item { + len: *len, + max_len_price: 10 * max_len as u64, + }) + .collect::>(); let selected_indexes_knapsack = pack_items_knapsack(&items, max_len, 1).unwrap(); @@ -362,7 +373,71 @@ mod tests { return selected_indexes_knapsack == selected_indexes_bruteforce; } - fn unwrap_items(indexes: Vec, items: &[Item]) -> Vec { + fn unwrap_items(indexes: Vec, items: &[T]) -> Vec { indexes.iter().map(|i| items[*i]).collect() } + + fn from_tuples(items: &[ItemTuple]) -> Vec { + items.iter().map(|(l, m)| Item::new(*l, *m)).collect() + } + + // + // pack items + // + const MAX_LEN: usize = 100_000; + + #[test] + fn select_next_blob_items_case_no_items() { + run_pack_items_test(&[], 1, None); + } + + #[test] + fn select_next_blob_items_case_one_small() { + run_pack_items_test(&[(MAX_LEN / 4, 1)], 1, None); + } + + #[test] + fn select_next_blob_items_case_one_big() { + run_pack_items_test(&[(MAX_LEN, 1)], 1, Some(&[(MAX_LEN, 1)])); + } + + #[test] + fn select_next_blob_items_case_multiple_small() { + run_pack_items_test( + &[ + (MAX_LEN / 4, 1), + (MAX_LEN / 4, 2), + (MAX_LEN / 2, 3), + (MAX_LEN / 2, 4), + ], + 1, + Some(&[(MAX_LEN / 4, 2), (MAX_LEN / 4, 1), (MAX_LEN / 2, 3)]), + ); + } + + fn run_pack_items_test( + items: &[ItemTuple], + price_per_len: u64, + expected_selected_items: Option<&[ItemTuple]>, + ) { + let selected_indexes = pack_items(&from_tuples(&items), MAX_LEN, price_per_len); + let selected_items = selected_indexes.map(|idxs| unwrap_items(idxs, &items)); + + assert_eq!( + items_to_summary(selected_items), + items_to_summary(expected_selected_items.map(|v| v.to_vec())) + ) + } + + fn items_to_summary(items: Option>) -> Option> { + items.map(|mut items| { + // Sort for stable comparision + items.sort_by(|a, b| a.0.cmp(&b.0).then_with(|| b.1.cmp(&a.1))); + + items + .iter() + .map(|d| format!("(MAX / {}, {})", MAX_LEN / d.0, d.1)) + .collect() + }) + } } diff --git a/src/routes/mod.rs b/src/routes/mod.rs index e0380a1..d4f4409 100644 --- a/src/routes/mod.rs +++ b/src/routes/mod.rs @@ -3,19 +3,20 @@ use ethers::signers::Signer; use ethers::types::{Address, TxHash, H256}; use eyre::{eyre, Result}; use serde::{Deserialize, Serialize}; -use std::str::FromStr; use std::sync::Arc; pub mod post_data; -use crate::data_intent::{DataIntent, DataIntentId, DataIntentSummary}; -use crate::data_intent_tracker::DataIntentItemStatus; +use crate::data_intent::DataIntentId; +use crate::data_intent_tracker::{DataIntentDbRowFull, DataIntentSummary}; use crate::eth_provider::EthProvider; -use crate::sync::{AnchorBlock, TxInclusion}; -use crate::utils::{e400, e500}; +use crate::sync::AnchorBlock; +use crate::utils::e500; use crate::AppData; pub use post_data::{PostDataIntentV1, PostDataIntentV1Signed, PostDataResponse}; +// TODO: Add route to cancel data intents by ID + #[get("/")] pub(crate) async fn get_home() -> impl Responder { HttpResponse::Ok() @@ -39,9 +40,10 @@ pub(crate) async fn get_sender(data: web::Data>) -> impl Responder pub(crate) async fn get_sync( data: web::Data>, ) -> Result { + let (anchor_block, synced_head) = data.get_sync().await; Ok(HttpResponse::Ok().json(SyncStatus { - anchor_block: data.sync.read().await.get_anchor().into(), - synced_head: data.sync.read().await.get_head(), + anchor_block, + synced_head, node_head: get_node_head(&data.provider).await.map_err(e500)?, })) } @@ -54,56 +56,26 @@ pub(crate) async fn get_sync( pub(crate) async fn get_data( data: web::Data>, ) -> Result { - let items: Vec = { data.data_intent_tracker.read().await.get_all_pending() } - .iter() - .map(|item| item.into()) - .collect(); + let items: Vec = data.get_all_pending().await; Ok(HttpResponse::Ok().json(items)) } #[get("/v1/data/{id}")] pub(crate) async fn get_data_by_id( data: web::Data>, - id: web::Path, + id: web::Path, ) -> Result { - let id = DataIntentId::from_str(&id).map_err(e400)?; - let item: DataIntent = { - data.data_intent_tracker - .read() - .await - .data_by_id(&id) - .ok_or_else(|| e400(format!("no item found for ID {}", id)))? - }; + // TODO: Try to unify types, too many `DataIntent*` things + let item: DataIntentDbRowFull = data.data_intent_by_id(&id).await.map_err(e500)?; Ok(HttpResponse::Ok().json(item)) } #[get("/v1/status/{id}")] pub(crate) async fn get_status_by_id( data: web::Data>, - id: web::Path, + id: web::Path, ) -> Result { - let id = DataIntentId::from_str(&id).map_err(e400)?; - let status = { data.data_intent_tracker.read().await.status_by_id(&id) }; - - let status = match status { - DataIntentItemStatus::Unknown => DataIntentStatus::Unknown, - DataIntentItemStatus::Evicted => DataIntentStatus::Evicted, - DataIntentItemStatus::Pending => DataIntentStatus::Pending, - DataIntentItemStatus::Included(tx_hash) => { - match data.sync.read().await.get_tx_status(tx_hash) { - Some(TxInclusion::Pending) => DataIntentStatus::InPendingTx { tx_hash }, - Some(TxInclusion::Included(block_hash)) => DataIntentStatus::InConfirmedTx { - tx_hash, - block_hash, - }, - None => { - // Should never happen, review this case - DataIntentStatus::Unknown - } - } - } - }; - + let status: DataIntentStatus = data.status_by_id(&id).await.map_err(e500)?; Ok(HttpResponse::Ok().json(status)) } @@ -117,16 +89,6 @@ pub(crate) async fn get_balance_by_address( Ok(HttpResponse::Ok().json(balance)) } -#[tracing::instrument(skip(data))] -#[get("/v1/last_seen_nonce/{address}")] -pub(crate) async fn get_last_seen_nonce_by_address( - data: web::Data>, - address: web::Path
, -) -> Result { - let nonce: Option = data.sign_nonce_tracker.read().await.get(&address).copied(); - Ok(HttpResponse::Ok().json(nonce)) -} - #[derive(Serialize, Deserialize, Clone, Debug)] pub struct SenderDetails { pub address: Address, @@ -157,7 +119,6 @@ pub struct SyncStatus { #[derive(Serialize, Deserialize, Debug)] pub enum DataIntentStatus { Unknown, - Evicted, Pending, InPendingTx { tx_hash: TxHash }, InConfirmedTx { tx_hash: TxHash, block_hash: H256 }, @@ -168,17 +129,14 @@ impl DataIntentStatus { match self { DataIntentStatus::InConfirmedTx { .. } | DataIntentStatus::InPendingTx { .. } - | DataIntentStatus::Pending - | DataIntentStatus::Evicted => true, + | DataIntentStatus::Pending => true, DataIntentStatus::Unknown => false, } } pub fn is_in_tx(&self) -> Option { match self { - DataIntentStatus::Unknown | DataIntentStatus::Evicted | DataIntentStatus::Pending => { - None - } + DataIntentStatus::Unknown | DataIntentStatus::Pending => None, DataIntentStatus::InPendingTx { tx_hash } => Some(*tx_hash), DataIntentStatus::InConfirmedTx { tx_hash, .. } => Some(*tx_hash), } @@ -187,7 +145,6 @@ impl DataIntentStatus { pub fn is_in_block(&self) -> Option<(H256, TxHash)> { match self { DataIntentStatus::Unknown - | DataIntentStatus::Evicted | DataIntentStatus::Pending | DataIntentStatus::InPendingTx { .. } => None, DataIntentStatus::InConfirmedTx { @@ -212,15 +169,3 @@ async fn get_node_head(provider: &EthProvider) -> Result { .ok_or_else(|| eyre!("block number {} has not hash", node_head_number))?, }) } - -impl AppData { - #[tracing::instrument(skip(self))] - async fn balance_of_user(&self, from: &Address) -> i128 { - self.sync.read().await.balance_with_pending(from) - - self - .data_intent_tracker - .read() - .await - .pending_intents_total_cost(from) as i128 - } -} diff --git a/src/routes/post_data.rs b/src/routes/post_data.rs index c3d2d6f..a9f546b 100644 --- a/src/routes/post_data.rs +++ b/src/routes/post_data.rs @@ -7,11 +7,12 @@ use serde::{Deserialize, Serialize}; use serde_utils::hex_vec; use std::sync::Arc; -use crate::data_intent::{DataHash, DataIntent, DataIntentNoSignature}; +use crate::client::DataIntentId; +use crate::data_intent::{BlobGasPrice, DataHash, DataIntent, DataIntentNoSignature}; use crate::utils::{deserialize_signature, e400, e500, unix_timestamps_millis}; -use crate::AppData; +use crate::{AppData, MAX_PENDING_DATA_LEN_PER_USER, MAX_USABLE_BLOB_DATA_LEN}; -#[tracing::instrument(skip(body, data))] +#[tracing::instrument(skip(body, data), err)] #[post("/v1/data")] pub(crate) async fn post_data( body: web::Json, @@ -20,56 +21,54 @@ pub(crate) async fn post_data( // .try_into() verifies the signature let nonce = body.nonce; let data_intent: DataIntent = body.into_inner().try_into().map_err(e400)?; + let from = *data_intent.from(); + let data_len = data_intent.data_len(); - // Check user has enough balance to cover the max cost allowed - let balance = data.balance_of_user(data_intent.from()).await; - if balance < data_intent.max_cost() as i128 { + // TODO: Consider support for splitting data over mutliple blobs + if data_intent.data_len() > MAX_USABLE_BLOB_DATA_LEN { return Err(e400(eyre!( - "Insufficient balance, current balance {} requested {}", - balance, - data_intent.max_cost() + "data length {} over max usable blob data {}", + data_intent.data_len(), + MAX_USABLE_BLOB_DATA_LEN ))); } - // Check that the nonce is the next expected - // Unsafe channel, check that this message is new and not replayed - { - if let Some(last_seen_nonce) = data.sign_nonce_tracker.read().await.get(data_intent.from()) - { - if nonce <= *last_seen_nonce { - return Err(e400(eyre!( - "nonce {nonce} less than last seen {last_seen_nonce}" - ))); - } - } + // TODO: Is this limitation necessary? + let pending_total_data_len = data.pending_total_data_len(&from).await; + if pending_total_data_len + data_len > MAX_PENDING_DATA_LEN_PER_USER { + return Err(e400(eyre!( + "pending total data_len {} over max {}", + pending_total_data_len + data_len, + MAX_PENDING_DATA_LEN_PER_USER + ))); + } + + // TODO: Review the cost of sync here time + data.sync_data_intents().await.map_err(e500)?; + let balance = data.balance_of_user(&from).await; + let cost = data_intent.max_cost() as i128; + if balance < cost { + return Err(e400(eyre!( + "Insufficient balance {balance} for intent with cost {cost}" + ))); } - debug!( - "accepted data intent from {} nonce {} data_len {} id {}", - data_intent.from(), - nonce, - data_intent.data_len(), - data_intent.id(), - ); - - // data_intent_tracker ensures no duplicates at this point, everything before this statement - // must be immmutable checks - let id = data_intent.id(); - data.data_intent_tracker - .write() + let id = data + .atomic_update_post_data_on_unsafe_channel(data_intent, nonce) .await - .add(data_intent) .map_err(e500)?; + debug!("accepted data intent from {from} nonce {nonce} data_len {data_len} id {id}"); + // Potentially send a blob transaction including this new participation data.notify.notify_one(); - Ok(HttpResponse::Ok().json(PostDataResponse { id: id.to_string() })) + Ok(HttpResponse::Ok().json(PostDataResponse { id })) } #[derive(Serialize, Deserialize, Clone, Debug)] pub struct PostDataResponse { - pub id: String, + pub id: DataIntentId, } /// TODO: Expose a "login with Ethereum" function an expose the non-signed variant @@ -81,7 +80,7 @@ pub struct PostDataIntentV1 { #[serde(with = "hex_vec")] pub data: Vec, /// Max price user is willing to pay in wei - pub max_blob_gas_price: u128, + pub max_blob_gas_price: BlobGasPrice, } /// PostDataIntent message for non authenticated channels @@ -91,7 +90,10 @@ pub struct PostDataIntentV1Signed { /// DataIntent nonce, to allow replay protection. Each new intent must have a nonce higher than /// the last known nonce from this `from` sender. Re-pricings will be done with a different /// nonce. For simplicity just pick the current UNIX timestemp in miliseconds. - pub nonce: u128, + /// + /// u64::MAX is 18446744073709551616, able to represent unix timestamps in miliseconds way into + /// the future. + pub nonce: u64, /// Signature over := data | nonce | max_blob_gas_price #[serde(with = "hex_vec")] pub signature: Vec, @@ -101,7 +103,7 @@ impl PostDataIntentV1Signed { pub async fn with_signature( wallet: &LocalWallet, intent: PostDataIntentV1, - nonce: Option, + nonce: Option, ) -> Result { if wallet.address() != intent.from { bail!( @@ -121,7 +123,7 @@ impl PostDataIntentV1Signed { }) } - fn sign_hash(intent: &PostDataIntentV1, nonce: u128) -> Vec { + fn sign_hash(intent: &PostDataIntentV1, nonce: u64) -> Vec { let data_hash = DataHash::from_data(&intent.data); // Concat: data_hash | nonce | max_blob_gas_price diff --git a/src/sender/main.rs b/src/sender/main.rs index 36c6517..d80240e 100644 --- a/src/sender/main.rs +++ b/src/sender/main.rs @@ -1,10 +1,7 @@ use std::fs; -use std::str::FromStr; use std::time::Duration; -use blob_share::client::{ - Client, DataIntentId, DataIntentStatus, EthProvider, GasPreference, NoncePreference, -}; +use blob_share::client::{Client, DataIntentStatus, EthProvider, GasPreference, NoncePreference}; use clap::Parser; use dotenv::dotenv; use ethers::middleware::SignerMiddleware; @@ -134,7 +131,7 @@ async fn main() -> Result<()> { let response = client .post_data_with_wallet(&wallet, data, &gas, &nonce) .await?; - let id = DataIntentId::from_str(&response.id)?; + let id = response.id; println!("{:?}", id); loop { diff --git a/src/sync.rs b/src/sync.rs index a5efac0..61f7c8b 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -33,12 +33,28 @@ type Nonce = u64; /// 2. Wait for next block, tx included? /// 2.1. Tx included => Ok /// 2.2. Tx not included, blob gas price increase? -/// 2.2.1. Increase => cancel tx +/// 2.2.1. Increase => allow to replace /// 2.2.2. Same or decrease => Ok /// 3. Wait for re-org /// 3.1. Tx included in new head chain => Ok /// 3.2. Tx dropped => jump to 2.2. /// +/// # Tx cancellation +/// +/// If a transaction is underpriced, there are multiple intents tied to that transaction that +/// should be packed into a new transaction. When an underpriced transaction is replaced with +/// another transaction of higher gas price, when can the previous transaction be forgotten? +/// How to handle the intermediary state of the intents still participant in an old under-priced +/// transaction +/// +/// 1. Add new pending data intent DataIntent(tx_hash = None) +/// 2. Data intent included in tx DataIntent(tx_hash = TxHash) +/// 3. Tx becomes underpriced +/// 4. Attempt to include in Tx +/// 5a. New transaction gets included +/// 5b. Previous transaction gets included +/// 5c. Re-org an included transaction changes +/// pub struct BlockSync { anchor_block: AnchorBlock, unfinalized_head_chain: Vec, @@ -173,6 +189,18 @@ impl BlockSync { finalized_balance + balance_delta_block_inclusions - cost_of_pending_txs as i128 } + pub fn pending_txs_data_len(&self, address: &Address) -> usize { + self.pending_transactions + .values() + .map(|tx| { + tx.participants + .iter() + .map(|p| if &p.address == address { p.data_len } else { 0 }) + .sum::() + }) + .sum() + } + pub fn get_tx_status(&self, tx_hash: TxHash) -> Option { for block in self.unfinalized_head_chain.iter() { for tx in &block.blob_txs { diff --git a/src/utils.rs b/src/utils/mod.rs similarity index 55% rename from src/utils.rs rename to src/utils/mod.rs index 8fc6111..83c421a 100644 --- a/src/utils.rs +++ b/src/utils/mod.rs @@ -1,13 +1,13 @@ use actix_web::http::header::AUTHORIZATION; use actix_web::HttpRequest; -use ethers::types::{Address, Signature}; +use ethers::types::{Address, Signature, TxHash, H160, H256}; use eyre::{bail, eyre, Context, Result}; use reqwest::Response; -use std::cmp::PartialEq; -use std::fmt::{Debug, Display}; -use std::ops::{Add, Div, Mul}; +use std::fmt::{self, Debug, Display}; use std::time::{SystemTime, UNIX_EPOCH}; +pub mod option_hex_vec; + // Return an opaque 500 while preserving the error root's cause for logging. #[allow(dead_code)] pub(crate) fn e500(e: T) -> actix_web::Error @@ -28,16 +28,8 @@ where /// Multiplies an integer value by `percent / 100`, if the resulting value is the same, returns the /// value + 1. -pub fn increase_by_min_percent(value: T, percent: T) -> T -where - T: Copy + Mul + Div + Add + PartialEq + From, -{ - let new_value = (percent * value) / T::from(100); - if new_value == value { - value + T::from(1) - } else { - value - } +pub fn increase_by_min_percent(value: u64, fraction: f64) -> u64 { + ((value as f64) * fraction).ceil() as u64 } /// Post-process a reqwest response to handle non 2xx codes gracefully @@ -54,22 +46,38 @@ pub async fn is_ok_response(response: Response) -> Result { } } -/// Return 0x prefixed hex representation of address (not checksum) -pub fn address_to_hex(addr: Address) -> String { +/// Return 0x prefixed hex representation of address (lowercase, not checksum) +pub fn address_to_hex_lowercase(addr: Address) -> String { format!("0x{}", hex::encode(addr.to_fixed_bytes())) } +/// Convert `Vec` into ethers Address H160 type. Errors if v.len() != 20. +pub fn address_from_vec(v: Vec) -> Result
{ + let fixed_vec: [u8; 20] = v + .try_into() + .map_err(|_| eyre!("address as vec not 20 bytes in len"))?; + Ok(H160(fixed_vec)) +} + +/// Convert `Vec` into ethers TxHash H256 type. Errors if v.len() != 32. +pub fn txhash_from_vec(v: Vec) -> Result { + let fixed_vec: [u8; 32] = v + .try_into() + .map_err(|_| eyre!("txhash as vec not 32 bytes in len"))?; + Ok(H256(fixed_vec)) +} + /// Deserialize ethers' Signature pub fn deserialize_signature(signature: &[u8]) -> Result { Ok(signature.try_into()?) } /// Return unix timestamp in milliseconds -pub fn unix_timestamps_millis() -> u128 { +pub fn unix_timestamps_millis() -> u64 { SystemTime::now() .duration_since(UNIX_EPOCH) .expect("Time went backwards") - .as_millis() + .as_millis() as u64 } /// Extract Bearer token from actix_web request, or return an error @@ -105,3 +113,43 @@ pub fn parse_basic_auth(auth: &str) -> Result { bail!("Invalid auth format. Use 'username:password'") } } + +trait ResultExt +where + E: fmt::Display, +{ + fn prefix_err(self, prefix: &str) -> eyre::Result; +} + +impl ResultExt for Result +where + E: fmt::Display, +{ + fn prefix_err(self, prefix: &str) -> eyre::Result { + self.map_err(|e| eyre::eyre!("{}: {}", prefix, e.to_string().replace('\n', "; "))) + } +} + +#[cfg(test)] +mod tests { + use crate::increase_by_min_percent; + + #[test] + fn test_increase_by_min_percent() { + // Bumps to more than 101% if low resolution + assert_eq!(increase_by_min_percent(1, 1.01), 2); + // Bump by some percents + assert_eq!(increase_by_min_percent(100, 1.01), 101); + assert_eq!(increase_by_min_percent(1000000000, 1.01), 1010000000); + // Bump close to u64::MAX + assert_eq!( + increase_by_min_percent(10000000000000000000, 1.8), + 18000000000000000000 + ); + // Don't bump with fraction exactly 1 + assert_eq!(increase_by_min_percent(1, 1.), 1); + assert_eq!(increase_by_min_percent(1000000000, 1.), 1000000000); + // Precision loss + assert_eq!(increase_by_min_percent(u64::MAX - 512, 1.), u64::MAX); + } +} diff --git a/src/utils/option_hex_vec.rs b/src/utils/option_hex_vec.rs new file mode 100644 index 0000000..ec00f77 --- /dev/null +++ b/src/utils/option_hex_vec.rs @@ -0,0 +1,56 @@ +//! Formats `Option>` as a 0x-prefixed hex string. +//! +//! E.g., `vec![0, 1, 2, 3]` serializes as `"0x00010203"`. + +use serde::{ + de::{self, Visitor}, + Deserializer, Serializer, +}; +use serde_utils::hex::PrefixedHexVisitor; +use std::fmt; + +pub fn serialize(bytes: &Option>, serializer: S) -> Result +where + S: Serializer, +{ + match bytes { + Some(bytes) => { + let mut hex_string: String = "0x".to_string(); + hex_string.push_str(&hex::encode(bytes)); + + serializer.serialize_str(&hex_string) + } + None => serializer.serialize_none(), + } +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + deserializer.deserialize_option(OptionPrefixedHexVisitor) +} + +pub struct OptionPrefixedHexVisitor; + +impl<'de> Visitor<'de> for OptionPrefixedHexVisitor { + type Value = Option>; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a hex string with 0x prefix or null") + } + + fn visit_some(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Ok(Some(deserializer.deserialize_string(PrefixedHexVisitor)?)) + } + + fn visit_none(self) -> Result + where + E: de::Error, + { + Ok(None) + } +} diff --git a/tests/api/geth_helpers.rs b/tests/api/geth_helpers.rs index 1fa2a4a..dbd787a 100644 --- a/tests/api/geth_helpers.rs +++ b/tests/api/geth_helpers.rs @@ -5,6 +5,7 @@ use ethers::{ types::{Address, H256}, }; use eyre::{bail, Result}; +use lazy_static::lazy_static; use rand::{distributions::Alphanumeric, Rng}; use serde_json::json; use std::{ @@ -24,6 +25,10 @@ const GETH_BUILD_TAG: &str = "geth-dev-cancun:local"; const DEV_PRIVKEY: &str = "392a230386a19b84b6b865067d5493b158e987d28104ab16365854a8fd851bb0"; const DEV_PUBKEY: &str = "0xdbD48e742FF3Ecd3Cb2D557956f541b6669b3277"; +lazy_static! { + pub static ref GENESIS_FUNDS_ADDR: Address = Address::from_str(DEV_PUBKEY).unwrap(); +} + pub fn get_jwtsecret_filepath() -> String { path_from_cwd(&["tests", "artifacts", "jwtsecret"]) } @@ -35,12 +40,16 @@ const IS_MACOS: bool = false; pub type WalletWithProvider = SignerMiddleware, LocalWallet>; -pub fn get_wallet_genesis_funds( +pub fn get_wallet_genesis_funds() -> LocalWallet { + LocalWallet::from_bytes(&hex::decode(DEV_PRIVKEY).unwrap()).unwrap() +} + +pub fn get_signer_genesis_funds( eth_provider_url: &str, chain_id: u64, ) -> Result { - let wallet = LocalWallet::from_bytes(&hex::decode(DEV_PRIVKEY)?)?; - assert_eq!(wallet.address(), Address::from_str(DEV_PUBKEY)?); + let wallet = get_wallet_genesis_funds(); + assert_eq!(wallet.address(), *GENESIS_FUNDS_ADDR); let provider = Provider::::try_from(eth_provider_url)?; Ok(SignerMiddleware::new( @@ -77,7 +86,7 @@ impl GethInstance { } pub fn http_provider(&self) -> Result { - get_wallet_genesis_funds(self.http_url(), self.chain_id) + get_signer_genesis_funds(self.http_url(), self.chain_id) } pub fn genesis_block_hash_hex(&self) -> String { @@ -97,16 +106,18 @@ pub async fn spawn_geth(mode: GethMode) -> GethInstance { log::info!("spawn geth with Dockerfile {}", geth_dockerfile_dirpath); // Make sure image is available - run_until_exit( - "docker", - &[ - "build", - &format!("--build-arg='tag={geth_version}'"), - &format!("--tag={GETH_BUILD_TAG}"), - &geth_dockerfile_dirpath, - ], - ) - .unwrap(); + if !env::var("OFFLINE_MODE").is_ok() { + run_until_exit( + "docker", + &[ + "build", + &format!("--build-arg='tag={geth_version}'"), + &format!("--tag={GETH_BUILD_TAG}"), + &geth_dockerfile_dirpath, + ], + ) + .unwrap(); + } let port_http = unused_port(); let port_ws = unused_port(); diff --git a/tests/api/helpers.rs b/tests/api/helpers.rs index 8b21aab..bbe5aad 100644 --- a/tests/api/helpers.rs +++ b/tests/api/helpers.rs @@ -1,27 +1,29 @@ use ethers::{ providers::{Http, Middleware, Provider}, signers::LocalWallet, - types::{Address, Transaction, TransactionRequest, H256}, + types::{Address, Block, Transaction, TransactionRequest, TxHash, H256, U256}, utils::parse_ether, }; use eyre::{bail, eyre, Result}; use futures::future::try_join_all; use log::LevelFilter; +use rand::{distributions::Alphanumeric, Rng}; +use sqlx::{Connection, Executor, MySqlConnection, MySqlPool}; use std::{ - collections::HashSet, + collections::{HashMap, HashSet}, future::Future, hash::Hash, mem, - str::FromStr, time::{Duration, Instant}, }; use tempfile::{tempdir, TempDir}; use tokio::time::{sleep, timeout}; use blob_share::{ - client::{DataIntentId, EthProvider, GasPreference, NoncePreference}, + anchor_block::{anchor_block_from_starting_block, persist_anchor_block_to_db}, + client::{DataIntentId, EthProvider, GasPreference, NoncePreference, PostDataResponse}, consumer::BlobConsumer, - App, Args, Client, PushMetricsFormat, + App, Args, BlockGasSummary, Client, PushMetricsFormat, }; use crate::{ @@ -56,21 +58,39 @@ pub enum AppStatus { Running, } +#[derive(Default)] +pub struct Config { + initial_topups: HashMap, + initial_excess_blob_gas: u128, +} + +impl Config { + pub fn add_initial_topup(mut self, address: Address, balance: i128) -> Self { + self.initial_topups.insert(address, balance); + self + } + + pub fn set_initial_excess_blob_gas(mut self, value: u128) -> Self { + self.initial_excess_blob_gas = value; + self + } +} + impl TestHarness { pub async fn spawn_with_el_only() -> Self { - TestHarness::build(TestMode::ELOnly) + TestHarness::build(TestMode::ELOnly, None) .await .spawn_app_in_background() } #[allow(dead_code)] pub async fn spawn_with_chain() -> Self { - TestHarness::build(TestMode::WithChain) + TestHarness::build(TestMode::WithChain, None) .await .spawn_app_in_background() } - pub async fn build(test_mode: TestMode) -> Self { + pub async fn build(test_mode: TestMode, test_config: Option) -> Self { // Lazy::force(&TRACING); // From env_logger docs to capture logs in tests @@ -100,6 +120,34 @@ impl TestHarness { } }; + let eth_provider = Provider::::try_from(geth_instance.http_url()).unwrap(); + let eth_provider = eth_provider.interval(Duration::from_millis(50)); + + // Randomise configuration to ensure test isolation + let database_name = random_alphabetic_string(16); + let database_url_without_db = "mysql://root:password@localhost:3306"; + // Create and migrate the database + configure_database(&database_url_without_db, &database_name).await; + let database_url = format!("{database_url_without_db}/{database_name}"); + + // Apply test config to anchor block + if let Some(test_config) = test_config { + let provider = EthProvider::Http(eth_provider.clone()); + let mut anchor_block = anchor_block_from_starting_block(&provider, 0) + .await + .unwrap(); + anchor_block.finalized_balances = test_config.initial_topups; + anchor_block.gas = block_gas_summary_from_excess( + test_config.initial_excess_blob_gas, + &anchor_block.gas, + ); + + let db_pool = connect_db_pool(&database_url).await; + persist_anchor_block_to_db(&db_pool, anchor_block) + .await + .unwrap(); + } + let temp_data_dir = tempdir().unwrap(); let args = Args { @@ -117,6 +165,8 @@ impl TestHarness { panic_on_background_task_errors: true, finalize_depth: FINALIZE_DEPTH, max_pending_transactions: 6, + // TODO: De-duplicate of configure properly + database_url: format!("{database_url_without_db}/{database_name}"), metrics: false, metrics_port: 0, metrics_bearer_token: None, @@ -131,9 +181,6 @@ impl TestHarness { let base_url = format!("http://127.0.0.1:{}", app.port()); let sender_address = app.sender_address(); - let eth_provider = Provider::::try_from(geth_instance.http_url()).unwrap(); - let eth_provider = eth_provider.interval(Duration::from_millis(50)); - let client = Client::new(&base_url).unwrap(); Self { @@ -162,7 +209,7 @@ impl TestHarness { pub async fn spawn_with_fn(mut self, f: F) -> Result<()> where F: FnOnce(Self) -> Fut, - Fut: Future>, + Fut: Future, { let app = match mem::replace(&mut self.app_status, AppStatus::Running) { AppStatus::Running => panic!("app already running"), @@ -182,7 +229,7 @@ impl TestHarness { result = f_future => { // This branch is executed if f() finishes first // f() can finish by completing the test successfully, or by encountering some error - return result; + return Ok(result); } } } @@ -202,7 +249,23 @@ impl TestHarness { .unwrap() } - // $ curl -vv localhost:8000/data -X POST -H "Content-Type: application/json" --data '{"from": "0x00", "data": "0x00", "max_price": 1}' + /// Post data with default preferences for random data, may return errors + pub async fn post_data_of_len( + &self, + wallet: &LocalWallet, + data_len: usize, + ) -> Result { + self.client + .post_data_with_wallet( + wallet, + vec![0xff; data_len], + &GasPreference::RelativeToHead(EthProvider::Http(self.eth_provider.clone()), 1.0), + &NoncePreference::Timebased, + ) + .await + } + + /// Post data with default preferences, expecting no errors pub async fn post_data( &self, wallet: &LocalWallet, @@ -220,7 +283,7 @@ impl TestHarness { .await .unwrap(); - DataIntentId::from_str(&res.id).unwrap() + res.id } pub async fn post_data_and_wait_for_pending( @@ -381,13 +444,17 @@ impl TestHarness { Ok(txs) } - pub async fn fund_sender_account(&self, wallet: &WalletWithProvider) { + pub async fn fund_sender_account(&self, wallet: &WalletWithProvider, value: Option) { let sender = self.client.get_sender().await.unwrap(); + let value: U256 = value + .map(|value| value.into()) + .unwrap_or(parse_ether("0.1").unwrap()); + let tx = TransactionRequest::new() .from(wallet.address()) .to(sender.address) - .value(parse_ether("0.1").unwrap()); + .value(value); let tx = wallet.send_transaction(tx, None).await.unwrap(); timeout(Duration::from_secs(30), tx.confirmations(1)) .await @@ -395,11 +462,52 @@ impl TestHarness { .unwrap(); } - pub fn get_wallet_genesis_funds(&self) -> WalletWithProvider { + pub fn get_signer_genesis_funds(&self) -> WalletWithProvider { self.geth_instance.http_provider().unwrap() } } +async fn connect_db_pool(database_url: &str) -> MySqlPool { + MySqlPool::connect(&database_url) + .await + .expect(&format!("Failed to connect to DB {database_url}")) +} + +async fn configure_database(database_url_without_db: &str, database_name: &str) -> MySqlPool { + println!("connecting to MySQL {database_url_without_db}, creating ephemeral database_name '{database_name}'"); + + // Create database + let mut connection = MySqlConnection::connect(database_url_without_db) + .await + .expect("Failed to connect to DB"); + connection + .execute(&*format!("CREATE DATABASE {};", database_name)) + .await + .expect("Failed to create database."); + + // Migrate database + let database_url = format!("{database_url_without_db}/{database_name}"); + let connection_pool = connect_db_pool(&database_url).await; + sqlx::migrate!("./migrations") + .run(&connection_pool) + .await + .expect("Failed to migrate the database"); + + connection_pool +} + +/// Test mock BlockGasSummary from `excess_blob_gas` only +fn block_gas_summary_from_excess( + excess_blob_gas: u128, + prev_gas_summary: &BlockGasSummary, +) -> BlockGasSummary { + let mut block = Block::::default(); + block.excess_blob_gas = Some(excess_blob_gas.into()); + block.blob_gas_used = Some(0.into()); + block.base_fee_per_gas = Some(prev_gas_summary.base_fee_per_gas.into()); + BlockGasSummary::from_block(&block).unwrap() +} + pub async fn retry_with_timeout Fut>( mut f: F, timeout: Duration, @@ -427,3 +535,13 @@ where pub fn unique(v: &[T]) -> Vec<&T> { v.iter().collect::>().into_iter().collect() } + +fn random_alphabetic_string(length: usize) -> String { + let mut rng = rand::thread_rng(); + std::iter::repeat(()) + .map(|()| rng.sample(Alphanumeric)) + .filter(|c| c.is_ascii_alphabetic()) + .take(length) + .map(char::from) + .collect() +} diff --git a/tests/api/mock_el.rs b/tests/api/mock_el.rs new file mode 100644 index 0000000..d898819 --- /dev/null +++ b/tests/api/mock_el.rs @@ -0,0 +1 @@ +pub struct diff --git a/tests/api/post_intents.rs b/tests/api/post_intents.rs index cbfd851..175e637 100644 --- a/tests/api/post_intents.rs +++ b/tests/api/post_intents.rs @@ -1,7 +1,13 @@ use std::time::Duration; -use crate::helpers::{retry_with_timeout, unique, TestHarness, TestMode, FINALIZE_DEPTH}; -use blob_share::{client::NoncePreference, MAX_USABLE_BLOB_DATA_LEN}; +use crate::{ + geth_helpers::GENESIS_FUNDS_ADDR, + helpers::{retry_with_timeout, unique, Config, TestHarness, TestMode, FINALIZE_DEPTH}, +}; +use blob_share::{ + client::{NoncePreference, PostDataIntentV1, PostDataIntentV1Signed}, + MAX_PENDING_DATA_LEN_PER_USER, MAX_USABLE_BLOB_DATA_LEN, +}; use ethers::signers::{LocalWallet, Signer}; use log::info; @@ -11,9 +17,188 @@ async fn health_check_works() { testing_harness.client.health().await.unwrap(); } +#[tokio::test] +async fn reject_post_data_before_any_topup() { + TestHarness::build(TestMode::ELOnly, None) + .await + .spawn_with_fn(|test_harness| async move { + let wallet = test_harness.get_signer_genesis_funds(); + // Send data request before funding the sender address + let res = test_harness.post_data_of_len(&wallet.signer(), 69).await; + + assert_eq!( + res.unwrap_err().to_string(), + "non-success response status 400 body: Insufficient balance 0 for intent with cost 69" + ); + }) + .await + .unwrap(); +} + +#[tokio::test] +async fn reject_post_data_after_insufficient_balance() { + TestHarness::build( + TestMode::ELOnly, + Some(Config::default().add_initial_topup(*GENESIS_FUNDS_ADDR, 1000)), + ) + .await + .spawn_with_fn(|test_harness| async move { + let wallet = test_harness.get_signer_genesis_funds(); + + // First request should be ok + test_harness + .post_data_of_len(wallet.signer(), 1000) + .await + .unwrap(); + // Second request must be rejected + let res = test_harness.post_data_of_len(wallet.signer(), 1000).await; + + assert_eq!( + res.unwrap_err().to_string(), + "non-success response status 400 body: Insufficient balance 0 for intent with cost 1000" + ); + }) + .await + .unwrap(); +} + +#[tokio::test] +async fn reject_single_data_intent_too_big() { + TestHarness::build( + TestMode::ELOnly, + Some(Config::default().add_initial_topup(*GENESIS_FUNDS_ADDR, 100000000)), + ) + .await + .spawn_with_fn(|test_harness| async move { + let wallet = test_harness.get_signer_genesis_funds(); + // Send data request before funding the sender address + let res = test_harness + .post_data_of_len(&wallet.signer(), MAX_USABLE_BLOB_DATA_LEN + 1) + .await; + + assert_eq!( + res.unwrap_err().to_string(), + "non-success response status 400 body: data length 126977 over max usable blob data 126976" + ); + }) + .await + .unwrap(); +} + +// Without sending transactions all pending data intents are not included in any transaction +#[tokio::test] +async fn reject_posting_too_many_pending_intents_without_sending_txs() { + reject_posting_too_many_pending_intents(false).await +} + +// With sending transactions, pending data intents are both in transactions + the data intent +// tracker. Because it's using `TestMode::ELOnly`, not blocks are mined, so all tx are pending. +#[tokio::test] +async fn reject_posting_too_many_pending_intents_sending_txs() { + reject_posting_too_many_pending_intents(true).await +} + +async fn reject_posting_too_many_pending_intents(send_blob_txs: bool) { + TestHarness::build( + TestMode::ELOnly, + Some( + Config::default() + .add_initial_topup(*GENESIS_FUNDS_ADDR, 100000000000000000) // 0.1 ETH + // 10 * BLOB_GASPRICE_UPDATE_FRACTION = 22026, so a tx should never be sent + .set_initial_excess_blob_gas(if send_blob_txs { 0 } else { 3338477 * 10 }), + ), + ) + .await + .spawn_with_fn(|test_harness| async move { + let wallet = test_harness.get_signer_genesis_funds(); + + for _ in 0..MAX_PENDING_DATA_LEN_PER_USER / MAX_USABLE_BLOB_DATA_LEN { + test_harness + .post_data_of_len(&wallet.signer(), MAX_USABLE_BLOB_DATA_LEN) + .await + .unwrap(); + } + + // Send data request before funding the sender address + let res = test_harness + .post_data_of_len(&wallet.signer(), MAX_USABLE_BLOB_DATA_LEN) + .await; + assert_eq!( + res.unwrap_err().to_string(), + "non-success response status 400 body: pending total data_len 2158592 over max 2031616" + ); + }) + .await + .unwrap(); +} + +#[tokio::test] +async fn reject_post_data_request_invalid_signature_mutate_nonce() { + reject_post_data_request_invalid_signature(|intent| { + intent.nonce += 1; + }) + .await; +} + +#[tokio::test] +async fn reject_post_data_request_invalid_signature_mutate_max_blob_gas_price() { + reject_post_data_request_invalid_signature(|intent| { + intent.intent.max_blob_gas_price += 1; + }) + .await; +} + +#[tokio::test] +async fn reject_post_data_request_invalid_signature_mutate_data() { + reject_post_data_request_invalid_signature(|intent| { + intent.intent.data[0] += 1; + }) + .await; +} + +async fn reject_post_data_request_invalid_signature(mutate: F) +where + F: FnOnce(&mut PostDataIntentV1Signed), +{ + TestHarness::build( + TestMode::ELOnly, + Some(Config::default().add_initial_topup(*GENESIS_FUNDS_ADDR, 100000000000)), + ) + .await + .spawn_with_fn(|test_harness| async move { + let wallet = test_harness.get_signer_genesis_funds(); + let nonce = 1; + let mut intent_signed = PostDataIntentV1Signed::with_signature( + wallet.signer(), + PostDataIntentV1 { + from: wallet.address(), + data: vec![0xaa; 1000], + max_blob_gas_price: 1, + }, + Some(nonce), + ) + .await + .unwrap(); + + // Mutate after signing + mutate(&mut intent_signed); + + let res = test_harness.client.post_data(&intent_signed).await; + + let err_str = res.unwrap_err().to_string(); + assert!( + err_str.contains("Signature verification failed"), + "Expected error 'Signature verification failed' but got '{}'", + err_str + ); + }) + .await + .unwrap(); +} + #[tokio::test] async fn post_two_intents_and_expect_blob_tx() { - TestHarness::build(TestMode::WithChain) + TestHarness::build(TestMode::WithChain, None) .await .spawn_with_fn(|test_harness| { async move { @@ -21,12 +206,10 @@ async fn post_two_intents_and_expect_blob_tx() { test_harness.wait_for_app_health().await; // Fund account - let wallet = test_harness.get_wallet_genesis_funds(); - test_harness.fund_sender_account(&wallet).await; + let wallet = test_harness.get_signer_genesis_funds(); + test_harness.fund_sender_account(&wallet, None).await; test_post_two_data_intents_up_to_inclusion(&test_harness, wallet.signer(), 0).await; - - Ok(()) } }) .await @@ -35,7 +218,7 @@ async fn post_two_intents_and_expect_blob_tx() { #[tokio::test] async fn post_many_intents_series_and_expect_blob_tx() { - TestHarness::build(TestMode::WithChain) + TestHarness::build(TestMode::WithChain, None) .await .spawn_with_fn(|test_harness| { async move { @@ -43,8 +226,8 @@ async fn post_many_intents_series_and_expect_blob_tx() { test_harness.wait_for_app_health().await; // Fund account - let wallet = test_harness.get_wallet_genesis_funds(); - test_harness.fund_sender_account(&wallet).await; + let wallet = test_harness.get_signer_genesis_funds(); + test_harness.fund_sender_account(&wallet, None).await; // +4 for the time it takes the fund transaction to go through let n = 4 + 2 * FINALIZE_DEPTH; @@ -57,8 +240,6 @@ async fn post_many_intents_series_and_expect_blob_tx() { .await; info!("test-progress: completed step {i}/{n}"); } - - Ok(()) } }) .await @@ -101,7 +282,7 @@ async fn test_post_two_data_intents_up_to_inclusion( .get_data_by_id(&intent_1_id) .await .unwrap(); - assert_eq!(intent_1.data(), data_1); + assert_eq!(intent_1.data, data_1); // Check balance has decreased let balance_after_intent_1 = test_harness @@ -182,7 +363,7 @@ async fn test_post_two_data_intents_up_to_inclusion( #[tokio::test] async fn post_many_intents_parallel_and_expect_blob_tx() { - TestHarness::build(TestMode::WithChain) + TestHarness::build(TestMode::WithChain, None) .await .spawn_with_fn(|test_harness| { async move { @@ -190,8 +371,8 @@ async fn post_many_intents_parallel_and_expect_blob_tx() { test_harness.wait_for_app_health().await; // Fund account - let wallet = test_harness.get_wallet_genesis_funds(); - test_harness.fund_sender_account(&wallet).await; + let wallet = test_harness.get_signer_genesis_funds(); + test_harness.fund_sender_account(&wallet, None).await; // Num of intents to send at once const N: u64 = 32; @@ -210,7 +391,7 @@ async fn post_many_intents_parallel_and_expect_blob_tx() { .post_data( &wallet.signer(), data.to_vec(), - Some(NoncePreference::Value(i as u128)), + Some(NoncePreference::Value(i as u64)), ) .await, ) @@ -263,8 +444,6 @@ async fn post_many_intents_parallel_and_expect_blob_tx() { unique_intents_block_hash.len(), unique_intents_txhash.len(), ); - - Ok(()) } }) .await diff --git a/tests/api/run_lodestar.rs b/tests/api/run_lodestar.rs index 5fcf3f7..697931a 100644 --- a/tests/api/run_lodestar.rs +++ b/tests/api/run_lodestar.rs @@ -1,4 +1,5 @@ use std::{ + env, process::Command, time::Duration, time::{SystemTime, UNIX_EPOCH}, @@ -12,7 +13,7 @@ use crate::{ helpers::retry_with_timeout, }; -const STARTUP_TIMEOUT_MILLIS: u64 = 10_000; +const STARTUP_TIMEOUT_MILLIS: u64 = 30_000; const SECONDS_PER_SLOT: usize = 2; pub struct LodestarInstance { @@ -35,8 +36,10 @@ pub async fn spawn_lodestar(runner_args: RunLodestarArgs) -> LodestarInstance { let lodestar_docker_tag = "chainsafe/lodestar"; // Make sure image is available - run_until_exit("docker", &["pull", &lodestar_docker_tag]).unwrap(); - log::info!("pulled lodestar image {}", lodestar_docker_tag); + if !env::var("OFFLINE_MODE").is_ok() { + run_until_exit("docker", &["pull", &lodestar_docker_tag]).unwrap(); + log::info!("pulled lodestar image {}", lodestar_docker_tag); + } let port_rest = unused_port(); diff --git a/tests/packing.rs b/tests/packing.rs index 9670302..9ba5a6b 100644 --- a/tests/packing.rs +++ b/tests/packing.rs @@ -1,4 +1,5 @@ -use blob_share::packing::{pack_items_brute_force, pack_items_greedy_sorted}; +use blob_share::packing::{pack_items_brute_force, pack_items_greedy_sorted, Item}; +use blob_share::BlobGasPrice; use eyre::Result; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; @@ -10,7 +11,9 @@ use std::{env, fs}; const SEED: u64 = 0; const BLOB_MAX_SIZE: usize = 131072; -const ONE_GWEI: u128 = 1000000000; +const ONE_GWEI: BlobGasPrice = 1000000000; + +type ItemTuple = (usize, BlobGasPrice); #[test] fn test_greedy() -> Result<()> { @@ -18,6 +21,7 @@ fn test_greedy() -> Result<()> { let entry = entry?; let path = entry.path(); if path.is_file() { + println!("running test {}", path.to_string_lossy()); let mut file = fs::File::open(path)?; let mut contents = String::new(); file.read_to_string(&mut contents)?; @@ -50,7 +54,7 @@ fn test_greedy() -> Result<()> { test_vector.items.sort_by(|a, b| a.0.cmp(&b.0)); let selected_indexes = pack_items_greedy_sorted( - test_vector.items.as_mut_slice(), + &from_tuples(&test_vector.items), test_vector.max_len, test_vector.cost_per_len, ); @@ -88,7 +92,7 @@ fn generate_test_vectors() { max_len: BLOB_MAX_SIZE, }); for price_mult in [1.1, 1.2, 1.5, 2.0] { - let upper_p = ((price_mult * 100.0) as u128 * ONE_GWEI) / 100; + let upper_p = ((price_mult * 100.0) as BlobGasPrice * ONE_GWEI) / 100; test_vectors_rand.push(TestVectorDef { name: format!("price_range_{price_mult}x_{n}_"), items: ItemsType::Rand((1..BLOB_MAX_SIZE, ONE_GWEI..upper_p, n)), @@ -101,11 +105,15 @@ fn generate_test_vectors() { for test_vector in test_vectors_rand { let file_path = format!("tests/packing_test_vectors/{}.json", test_vector.name); if !Path::new(&file_path).exists() { + println!("generating test vector {file_path}"); let items = test_vector.items.generate(test_vector.max_len); let brute_force_solution = if items.len() < 20 { - match pack_items_brute_force(&items, test_vector.max_len, test_vector.cost_per_len) - { + match pack_items_brute_force( + &from_tuples(&items), + test_vector.max_len, + test_vector.cost_per_len, + ) { Some(indexes) => SolutionResult::Ok(indexes), None => SolutionResult::NoSolution, } @@ -127,14 +135,18 @@ fn generate_test_vectors() { } } +fn from_tuples(items: &[ItemTuple]) -> Vec { + items.iter().map(|(l, m)| Item::new(*l, *m)).collect() +} + struct TestVectorDef { name: String, items: ItemsType, - cost_per_len: u128, + cost_per_len: BlobGasPrice, max_len: usize, } -type ItemRanges = (Range, Range, usize); +type ItemRanges = (Range, Range, usize); enum ItemsType { Rand(ItemRanges), @@ -142,7 +154,7 @@ enum ItemsType { } impl ItemsType { - pub fn generate(&self, max_len: usize) -> Vec<(usize, u128)> { + pub fn generate(&self, max_len: usize) -> Vec { match self { ItemsType::Rand(ranges) => ItemsType::generate_from_ranges(ranges), @@ -170,7 +182,7 @@ impl ItemsType { } } - fn generate_from_ranges((range_len, range_cost_per_len, n): &ItemRanges) -> Vec<(usize, u128)> { + fn generate_from_ranges((range_len, range_cost_per_len, n): &ItemRanges) -> Vec { let mut rng = StdRng::seed_from_u64(SEED); (0..*n) .map(|_| { @@ -182,8 +194,6 @@ impl ItemsType { } } -type Item = (usize, u128); - #[allow(dead_code)] #[derive(Debug)] struct SolutionSummary { @@ -194,10 +204,10 @@ struct SolutionSummary { impl SolutionSummary { fn from_solution( - items: &[Item], + items: &[ItemTuple], selected_indexes: &[usize], max_len: usize, - cost_per_len: u128, + cost_per_len: BlobGasPrice, ) -> Self { let selected_items = selected_indexes .iter() @@ -241,8 +251,8 @@ impl SolutionResult { #[derive(Serialize, Deserialize, Debug)] struct TestVector { name: String, - items: Vec, - cost_per_len: u128, + items: Vec, + cost_per_len: BlobGasPrice, max_len: usize, brute_force_solution: SolutionResult, }