From 69b5f4dc80d4f5aa488d1102fba32f32b55bfcf9 Mon Sep 17 00:00:00 2001 From: Carlos Baez Date: Fri, 8 Nov 2024 16:25:18 +0100 Subject: [PATCH] Squashed commit of the following: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 7839b9501066108cb2322ba9039120a41781a1b0 Author: John Martin Date: Mon Nov 4 14:39:56 2024 -0800 add client-request-method to CORS allowed headers (#20160) (#20168) ## Description adds `client-request-method` to allowed cors methods ## Test plan --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ecffa454e86b994b79dfc6fb9032d02ee95a0697 Author: John Martin Date: Mon Nov 4 13:13:22 2024 -0800 fix: remove high cardinality address label from safe_client_latency (… (#20169) …#20165) ## Description safe_client_latency is the highest cardinality metric across our observability system due to the `address` label + being a histogram: ``` root@sui-node-mysten-rpc-0:/sui# curl -s localhost:9184/metrics | grep safe_client_latency_bucket | wc -l 8512 ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 1493483238672e1d5c22dbff15eb670b1d57883a Author: Eugene Boguslavsky Date: Mon Nov 4 20:00:02 2024 +0000 Sui v1.37.1 Version Bump commit 6646b75dd68808b7bedb679c41706b12ae79a5a9 Author: Eugene Boguslavsky Date: Thu Oct 31 18:46:36 2024 -0700 Sui `v1.37.0` Framework Bytecode Snapshot (#20131) ## Description Sui `v1.37.0` Framework Bytecode Snapshot ## Test plan `cargo run --bin sui-framework-snapshot` commit fa5d48fcb3e06a11677dc14e90f12cac59e8f2fc Author: Ashok Menon Date: Fri Nov 1 01:03:44 2024 +0000 chore(indexer-alt): alphabetize Cargo.toml commit e116dfa6547e96608bee6767a483e080bb0a1e99 Author: Ashok Menon Date: Fri Nov 1 00:39:20 2024 +0000 indexer-alt: watermark timestamps and other logging improvements (#20120) ## Description Track the timestamp associated with a watermark, include it in tracing messages, and introduce gauges to track the timestamps of the latest checkpoint gathered and written to the DB. This change also introduces a couple of other timestamp/watermark related tracing improvements: - Measure watermarks gathered and in DB for the sequential pipeline (previously just tracked the watermark after they were written to the DB). These will usually be the exact same, but may differ if the pipeline needs to retry a write. - Gets rid of the ordering and equality impls for `CommitterWatermark` -- these are no longer required. - Standardise the ordering of watermark traces between the sequential and concurrent pipelines, so that they are easier to compare with each other (they now share a common prefix). - Standardised tracing messages during committer/watermark teardown: Now in each exit edge, we log the reason for the exit, and then a message with the final watermark. ## Test plan Run `sum_obj_types` and `wal_obj_types` with a consistent range of `3600` and note that the difference in their timestamps is roughly an hour (to begin with, it's about 70 minutes): ``` sui$ cargo run -p sui-indexer-alt --release -- \ --database-url 'postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt' \ indexer --remote-store-url 'https://checkpoints.mainnet.sui.io' \ --consistent-range 3600 --pipeline wal_obj_types --pipeline sum_obj_types` Running `/Users/ashokmenon/sui/idx-poc/target/release/sui-indexer-alt --database-url 'postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt' indexer --remote-store-url 'https://checkpoints.mainnet.sui.io' --consistent-range 3600 --pipeline wal_obj_types --pipeline sum_obj_types` 2024-10-31T15:16:26.867221Z INFO sui_indexer_alt::db: Running migrations ... 2024-10-31T15:16:26.889145Z INFO sui_indexer_alt::db: Migrations complete. 2024-10-31T15:16:27.024406Z INFO sui_indexer_alt: Skipping pipeline ev_emit_mod 2024-10-31T15:16:27.024413Z INFO sui_indexer_alt: Skipping pipeline ev_struct_inst 2024-10-31T15:16:27.024414Z INFO sui_indexer_alt: Skipping pipeline kv_checkpoints 2024-10-31T15:16:27.024414Z INFO sui_indexer_alt: Skipping pipeline kv_objects 2024-10-31T15:16:27.024415Z INFO sui_indexer_alt: Skipping pipeline kv_transactions 2024-10-31T15:16:27.024416Z INFO sui_indexer_alt: Skipping pipeline obj_versions 2024-10-31T15:16:27.024416Z INFO sui_indexer_alt: Skipping pipeline tx_affected_objects 2024-10-31T15:16:27.024417Z INFO sui_indexer_alt: Skipping pipeline tx_balance_changes 2024-10-31T15:16:27.024417Z INFO sui_indexer_alt: Skipping pipeline wal_coin_balances 2024-10-31T15:16:27.033550Z INFO sui_indexer_alt: Skipping pipeline sum_coin_balances 2024-10-31T15:16:27.033607Z INFO sui_indexer_alt::pipeline::processor: Starting processor pipeline="wal_obj_types" 2024-10-31T15:16:27.033856Z INFO sui_indexer_alt::pipeline::concurrent::collector: Starting collector pipeline="wal_obj_types" 2024-10-31T15:16:27.033869Z INFO sui_indexer_alt::pipeline::concurrent::committer: Starting committer pipeline="wal_obj_types" 2024-10-31T15:16:27.033983Z INFO sui_indexer_alt: Skipping pipeline sum_packages 2024-10-31T15:16:27.034011Z INFO sui_indexer_alt::pipeline::processor: Starting processor pipeline="sum_obj_types" 2024-10-31T15:16:27.034183Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Starting watermark pipeline="wal_obj_types" watermark=CommitterWatermark { pipeline: "wal_obj_types", epoch_hi_inclusive: 0, checkpoint_hi_inclusive: 0, tx_hi: 0, timestamp_ms_hi_inclusive: 0 } 2024-10-31T15:16:27.034260Z INFO sui_indexer_alt::pipeline::sequential::committer: Starting committer pipeline="sum_obj_types" watermark=CommitterWatermark { pipeline: "sum_obj_types", epoch_hi_inclusive: 0, checkpoint_hi_inclusive: 0, tx_hi: 0, timestamp_ms_hi_inclusive: 0 } 2024-10-31T15:16:27.034305Z INFO sui_indexer_alt: Ingestion range first_checkpoint=0 last_checkpoint=None 2024-10-31T15:16:27.034339Z INFO sui_indexer_alt::metrics: Starting metrics service on 0.0.0.0:9184 2024-10-31T15:16:27.034351Z INFO sui_indexer_alt::ingestion::regulator: Starting ingestion regulator 2024-10-31T15:16:27.034355Z INFO sui_indexer_alt::ingestion::broadcaster: Starting ingestion broadcaster 2024-10-31T15:16:27.541400Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Watermark pipeline="wal_obj_types" epoch=0 checkpoint=212 transaction=213 timestamp=2023-04-13 13:28:25.836 UTC updated=true elapsed_ms=5.486208 2024-10-31T15:16:28.043533Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Watermark pipeline="wal_obj_types" epoch=0 checkpoint=515 transaction=516 timestamp=2023-04-13 13:36:17.240 UTC updated=true elapsed_ms=5.512666 2024-10-31T15:16:28.538421Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Watermark pipeline="wal_obj_types" epoch=0 checkpoint=1017 transaction=1018 timestamp=2023-04-13 13:48:04.584 UTC updated=true elapsed_ms=3.468209 2024-10-31T15:16:29.041788Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Watermark pipeline="wal_obj_types" epoch=0 checkpoint=1422 transaction=1423 timestamp=2023-04-13 13:57:11.577 UTC updated=true elapsed_ms=7.184292 2024-10-31T15:16:29.540675Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Watermark pipeline="wal_obj_types" epoch=0 checkpoint=1824 transaction=1825 timestamp=2023-04-13 14:05:59.154 UTC updated=true elapsed_ms=5.7144580000000005 2024-10-31T15:16:30.040987Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Watermark pipeline="wal_obj_types" epoch=0 checkpoint=2226 transaction=2227 timestamp=2023-04-13 14:14:54.721 UTC updated=true elapsed_ms=5.955416 2024-10-31T15:16:30.539394Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Watermark pipeline="wal_obj_types" epoch=0 checkpoint=2630 transaction=2631 timestamp=2023-04-13 14:23:21.636 UTC updated=true elapsed_ms=5.0003340000000005 2024-10-31T15:16:31.035944Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Watermark pipeline="wal_obj_types" epoch=0 checkpoint=3142 transaction=3143 timestamp=2023-04-13 14:34:00.192 UTC updated=true elapsed_ms=1.368042 2024-10-31T15:16:31.532346Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=71 transaction=72 timestamp=2023-04-13 13:24:36.662 UTC 2024-10-31T15:16:31.535237Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Watermark pipeline="wal_obj_types" epoch=0 checkpoint=3646 transaction=3647 timestamp=2023-04-13 14:45:19.227 UTC updated=true elapsed_ms=0.7080409999999999 2024-10-31T15:16:31.556459Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=103 transaction=104 timestamp=2023-04-13 13:25:35.491 UTC 2024-10-31T15:16:31.605247Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=151 transaction=152 timestamp=2023-04-13 13:27:01.087 UTC 2024-10-31T15:16:31.658676Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=203 transaction=204 timestamp=2023-04-13 13:28:14.431 UTC 2024-10-31T15:16:31.709093Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=251 transaction=252 timestamp=2023-04-13 13:29:41.350 UTC 2024-10-31T15:16:31.774101Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=304 transaction=305 timestamp=2023-04-13 13:31:05.962 UTC 2024-10-31T15:16:31.823847Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=351 transaction=352 timestamp=2023-04-13 13:32:06.748 UTC 2024-10-31T15:16:31.875613Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=404 transaction=405 timestamp=2023-04-13 13:33:26.122 UTC 2024-10-31T15:16:31.923673Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=452 transaction=453 timestamp=2023-04-13 13:34:41.672 UTC 2024-10-31T15:16:31.973523Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=502 transaction=503 timestamp=2023-04-13 13:35:54.245 UTC 2024-10-31T15:16:32.020968Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=553 transaction=554 timestamp=2023-04-13 13:37:12.723 UTC 2024-10-31T15:16:32.034336Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Watermark pipeline="wal_obj_types" epoch=0 checkpoint=4147 transaction=4148 timestamp=2023-04-13 14:56:30.470 UTC updated=true elapsed_ms=0.478916 2024-10-31T15:16:32.081115Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=603 transaction=604 timestamp=2023-04-13 13:38:26.226 UTC 2024-10-31T15:16:32.143228Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=657 transaction=658 timestamp=2023-04-13 13:39:32.583 UTC 2024-10-31T15:16:32.201323Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=710 transaction=711 timestamp=2023-04-13 13:40:44.461 UTC 2024-10-31T15:16:32.239715Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=751 transaction=752 timestamp=2023-04-13 13:41:40.955 UTC 2024-10-31T15:16:32.306828Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=803 transaction=804 timestamp=2023-04-13 13:43:00.433 UTC 2024-10-31T15:16:32.351995Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=854 transaction=855 timestamp=2023-04-13 13:44:21.487 UTC 2024-10-31T15:16:32.408044Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=903 transaction=904 timestamp=2023-04-13 13:45:33.468 UTC 2024-10-31T15:16:32.478623Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=963 transaction=964 timestamp=2023-04-13 13:46:48.531 UTC 2024-10-31T15:16:32.517822Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=1004 transaction=1005 timestamp=2023-04-13 13:47:43.997 UTC 2024-10-31T15:16:32.540701Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Watermark pipeline="wal_obj_types" epoch=0 checkpoint=4550 transaction=4551 timestamp=2023-04-13 15:05:30.690 UTC updated=true elapsed_ms=5.776375 2024-10-31T15:16:32.574600Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=1052 transaction=1053 timestamp=2023-04-13 13:48:55.253 UTC 2024-10-31T15:16:32.625726Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=1102 transaction=1103 timestamp=2023-04-13 13:49:57.288 UTC 2024-10-31T15:16:32.679135Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=1159 transaction=1160 timestamp=2023-04-13 13:51:06.819 UTC 2024-10-31T15:16:32.729858Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=1203 transaction=1204 timestamp=2023-04-13 13:52:11.201 UTC 2024-10-31T15:16:32.777044Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=1257 transaction=1258 timestamp=2023-04-13 13:53:26.750 UTC 2024-10-31T15:16:32.819603Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=1301 transaction=1302 timestamp=2023-04-13 13:54:17.123 UTC 2024-10-31T15:16:32.870028Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=1351 transaction=1352 timestamp=2023-04-13 13:55:24.124 UTC 2024-10-31T15:16:32.923059Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=1402 transaction=1403 timestamp=2023-04-13 13:56:49.078 UTC 2024-10-31T15:16:32.969159Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=1451 transaction=1452 timestamp=2023-04-13 13:57:56.351 UTC 2024-10-31T15:16:33.034831Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Watermark pipeline="wal_obj_types" epoch=0 checkpoint=5055 transaction=5056 timestamp=2023-04-13 15:16:38.038 UTC updated=true elapsed_ms=1.0442500000000001 2024-10-31T15:16:33.036266Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=1501 transaction=1502 timestamp=2023-04-13 13:58:56.227 UTC ^C2024-10-31T15:16:33.091189Z INFO sui_indexer_alt::pipeline::concurrent::collector: Shutdown received, stopping collector pipeline="wal_obj_types" 2024-10-31T15:16:33.091195Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Shutdown received pipeline="wal_obj_types" 2024-10-31T15:16:33.091201Z INFO sui_indexer_alt::ingestion::regulator: Shutdown received, stopping regulator 2024-10-31T15:16:33.091213Z INFO sui_indexer_alt::metrics: Shutdown received, stopping metrics service 2024-10-31T15:16:33.091225Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Stopping watermark task pipeline="wal_obj_types" watermark=CommitterWatermark { pipeline: "wal_obj_types", epoch_hi_inclusive: 0, checkpoint_hi_inclusive: 5055, tx_hi: 5056, timestamp_ms_hi_inclusive: 1681398998038 } 2024-10-31T15:16:33.091410Z INFO sui_indexer_alt::pipeline::concurrent::committer: Batches done, stopping committer pipeline="wal_obj_types" 2024-10-31T15:16:33.091721Z INFO sui_indexer_alt::pipeline::sequential::committer: Shutdown received pipeline="sum_obj_types" 2024-10-31T15:16:33.091725Z INFO sui_indexer_alt::pipeline::sequential::committer: Stopping committer pipeline="sum_obj_types" watermark=CommitterWatermark { pipeline: "sum_obj_types", epoch_hi_inclusive: 0, checkpoint_hi_inclusive: 1546, tx_hi: 1547, timestamp_ms_hi_inclusive: 1681394391225 } 2024-10-31T15:16:33.093829Z INFO sui_indexer_alt::pipeline::processor: Shutdown received, stopping processor pipeline="wal_obj_types" 2024-10-31T15:16:33.094014Z INFO sui_indexer_alt::pipeline::processor: Shutdown received, stopping processor pipeline="sum_obj_types" 2024-10-31T15:16:33.094406Z INFO sui_indexer_alt::ingestion::broadcaster: Shutdown received, stopping ingestion broadcaster 2024-10-31T15:16:33.094513Z INFO sui_indexer_alt: Indexing pipeline gracefully shut down ``` ## Stack - #20089 - #20114 - #20116 - #20117 - #20119 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4b727cbbfd89db7ebfd09b21e05faa58ac2d8c96 Author: Ashok Menon Date: Fri Nov 1 00:37:19 2024 +0000 indexer-alt: allow sequential pipeline immediate reset (#20119) ## Description If the sequential pipeline committer can guarantee that it could process more checkpoints by looking at its pending buffer, we now reset the polling interval immediately, so it does not wait to issue the next write. This mimics a similar behaviour in the concurrent pipeline. I made this change after noticing how the pipeline behaves when ingestion is stuck retrying a checkpoint, on my local machine. Usually when running locally, performance is limited by checkpoint download rate, but in a sequential pipeline, if a checkpoint failed to download, it is possible for many checkpoints to end up processed and pending. With the previous implementation, once ingestion had recovered (the checkpoint is fetched), the pending buffer kept growing because it was only able to land `MAX_BATCH_CHECKPOINTS / commit_interval`, so if checkpoints were getting added faster than that, it would never recover. With this change, the pipeline recovers almost instantly, and I expect that in GCP where bandwidth is not the rate limiting factor, this should improve throughput during backfill, and synthetic benchmarks. ## Test plan Run the indexer with a large ingestion buffer and concurrency, wait for ingestion to fail to fetch a checkpoint, and then notice how the situation recovers (instead of getting worse until the pipeline eventually complains that it has too many pending checkpoints): ``` sui$ cargo run -p sui-indexer-alt --release -- \ --database-url "postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt" \ indexer --remote-store-url https://checkpoints.mainnet.sui.io \ --last-checkpoint 1200000 --pipeline sum_packages \ --checkpoint-buffer-size 50000 --ingest-concurrency 20000 ``` ## Stack - #20089 - #20114 - #20116 - #20117 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b65f34c6a00801620e11469575523895a8e074ac Author: Ashok Menon Date: Fri Nov 1 00:21:38 2024 +0000 indexer-alt: wal_coin_balances pipeline (#20117) ## Description `wal_coin_balances` is to `sum_coin_balances` what `wal_obj_types` is to `sum_obj_types`. ## Test plan Run the indexer, and correlate the live object set calculated from the write-ahead log against the one that's already in the summary table: ``` sui$ cargo run -p sui-indexer-alt --release -- \ --database-url "postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt" \ indexer --remote-store-url https://checkpoints.mainnet.sui.io/ \ --last-checkpoint 5000 ``` ``` sui_indexer_alt=# SELECT COUNT(*) FROM sum_coin_balances; count ------- 178 (1 row) sui_indexer_alt=# SELECT COUNT(*) FROM ( SELECT DISTINCT ON (object_id) * FROM wal_coin_balances ORDER BY object_id, object_version DESC ) o WHERE o.owner_id IS NOT NULL; count ------- 178 (1 row) ``` ## Stack - #20089 - #20114 - #20116 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 71f1faa1a8557a178e0bf89f20829a8ea503f156 Author: Ashok Menon Date: Fri Nov 1 00:18:46 2024 +0000 indexer-alt: wal_obj_types pipeline (#20116) ## Description Adds the concurrent pipeline that writes the write-ahead log for `sum_obj_types`. It re-uses the `process` implementation from `sum_obj_types` and then writes it into an append-only table. Note that today: - The pipelines are completely independent, which means the WAL pipeline redoes the processing work of the summary pipeline (this is presumably not an issue because the repeated work is not particularly heavy). - This change does not include the pruning necessary to keep this table's size in check (in practice it should only be a couple of gigs in size). This will come in a follow-up PR. ## Test plan Run the indexer and cross check the live-object set calculated from the write-ahead log with the summary: ``` sui$ cargo run -p sui-indexer-alt --release -- \ --database-url "postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt" \ indexer --remote-store-url https://checkpoints.mainnet.sui.io \ --last-checkpoint 5000 ``` ``` sui_indexer_alt=# SELECT COUNT(*) FROM sum_obj_types; count ------- 592 (1 row) ^ sui_indexer_alt=# SELECT COUNT(*) FROM ( SELECT DISTINCT ON (object_id) * FROM wal_obj_types ORDER BY object_id, object_version DESC ) o WHERE o.owner_kind IS NOT NULL; count ------- 592 (1 row) ``` ## Stack - #20089 - #20114 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7de02417c27844c8a086866b1279b4736ab862ec Author: Ashok Menon Date: Thu Oct 31 23:22:42 2024 +0000 indexer-alt: obj_versions pipeline (#20114) ## Description Pipeline to fill `obj_versions` table -- this is the table that serves "parent version" (dynamic field) queries. ## Test plan ``` sui$ cargo run -p sui-indexer-alt --release -- \ --database-url "postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt" \ indexer --remote-url "https://checkpoints.mainnet.sui.io" \ --last-checkpoint 5000 ``` ## Stack - #20089 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 53eaed5e300a67f0d01cf68119a145410925f9de Author: Ashok Menon Date: Wed Oct 30 00:06:07 2024 +0000 indexer-alt: delays for sequential pipelines ## Description Add the ability to include an optional delay to sequential pipelines. This can be used to implement behaviour similar to `objects_snapshot` in the new architecture. Sequential pipelines honour this configuration in their committer tasks: - They process checkpoints at the same rate as other pipelines - But hold back data in the committer's "pending" map until the committer witnesses a checkpoint that is at least `checkpoint_lag` checkpoints ahead. This also requires slightly complicating the set-up and tear-down logic: - When setting up, we may need to wait until the pending buffer has been primed enough to set a valid commit upperbound. - If the pipeline runs dry before that point, we need to add an extra check to close down the committer. - When tearing down the committer after its channel has closed, we need to check whether batching would have picked up the first pending checkpoint (whether it is a valid "next checkpoint" and it's not held up because of a lag parameter). ## Test plan Run the indexer with various parameterisations for the consistent range, and note that the watermark for the summary pipelines lag behind the watermarks for other pipelines. ``` sui$ cargo run -p sui-indexer-alt --release -- \ --database-url "postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt" \ indexer --remote-store-url https://checkpoints.mainnet.sui.io \ --pipeline kv_objects \ --pipeline sum_obj_types --pipeline sum_coin_balances \ --last-checkpoint 5000 --consistent-range 1000 [...] 2024-10-30T00:24:45.148259Z INFO sui_indexer_alt::ingestion::regulator: Checkpoints done, stopping regulator 2024-10-30T00:24:45.161682Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=3605 transaction=3606 2024-10-30T00:24:45.212394Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_coin_balances" epoch=0 checkpoint=2703 transaction=2704 2024-10-30T00:24:45.222986Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=3656 transaction=3657 2024-10-30T00:24:45.274318Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=3702 transaction=3703 2024-10-30T00:24:45.340714Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=3753 transaction=3754 2024-10-30T00:24:45.355934Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Watermark pipeline="kv_objects" elapsed_ms=0.949083 updated=true epoch=0 checkpoint=4760 transaction=4761 2024-10-30T00:24:45.401262Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=3801 transaction=3802 2024-10-30T00:24:45.463134Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=3852 transaction=3853 2024-10-30T00:24:45.544933Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=3904 transaction=3905 2024-10-30T00:24:45.609068Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_obj_types" epoch=0 checkpoint=3952 transaction=3953 2024-10-30T00:24:45.668560Z INFO sui_indexer_alt::ingestion::broadcaster: Checkpoints done, stopping ingestion broadcaster 2024-10-30T00:24:45.668593Z INFO sui_indexer_alt::pipeline::processor: Checkpoints done, stopping processor pipeline="kv_objects" 2024-10-30T00:24:45.668605Z INFO sui_indexer_alt::pipeline::processor: Checkpoints done, stopping processor pipeline="sum_obj_types" 2024-10-30T00:24:45.668605Z INFO sui_indexer_alt::pipeline::processor: Checkpoints done, stopping processor pipeline="sum_coin_balances" 2024-10-30T00:24:45.674305Z INFO sui_indexer_alt::pipeline::sequential::committer: Processor closed channel, pending rows empty, stopping committer pipeline="sum_obj_types" watermark=CommitterWatermark { pipeline: "sum_obj_types", epoch_hi_inclusive: 0, checkpoint_hi_inclusive: 4000, tx_hi: 4001 } 2024-10-30T00:24:45.713856Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_coin_balances" epoch=0 checkpoint=3003 transaction=3004 2024-10-30T00:24:45.863837Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Watermark pipeline="kv_objects" elapsed_ms=1.83825 updated=true epoch=0 checkpoint=4960 transaction=4961 2024-10-30T00:24:46.120459Z INFO sui_indexer_alt::pipeline::concurrent::collector: Processor closed channel, pending rows empty, stopping collector pipeline="kv_objects" 2024-10-30T00:24:46.131407Z INFO sui_indexer_alt::pipeline::concurrent::committer: Batches done, stopping committer pipeline="kv_objects" 2024-10-30T00:24:46.214267Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_coin_balances" epoch=0 checkpoint=3303 transaction=3304 2024-10-30T00:24:46.356941Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Watermark pipeline="kv_objects" elapsed_ms=0.68 updated=true epoch=0 checkpoint=5000 transaction=5001 2024-10-30T00:24:46.356970Z INFO sui_indexer_alt::pipeline::concurrent::watermark: Committer closed channel, stopping watermark task pipeline="kv_objects" watermark=CommitterWatermark { pipeline: "kv_objects", epoch_hi_inclusive: 0, checkpoint_hi_inclusive: 5000, tx_hi: 5001 } 2024-10-30T00:24:46.714498Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_coin_balances" epoch=0 checkpoint=3603 transaction=3604 2024-10-30T00:24:47.214758Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_coin_balances" epoch=0 checkpoint=3903 transaction=3904 2024-10-30T00:24:47.715841Z INFO sui_indexer_alt::pipeline::sequential::committer: Watermark pipeline="sum_coin_balances" epoch=0 checkpoint=4000 transaction=4001 2024-10-30T00:24:47.715917Z INFO sui_indexer_alt::pipeline::sequential::committer: Processor closed channel, pending rows empty, stopping committer pipeline="sum_coin_balances" watermark=CommitterWatermark { pipeline: "sum_coin_balances", epoch_hi_inclusive: 0, checkpoint_hi_inclusive: 4000, tx_hi: 4001 } 2024-10-30T00:24:47.716485Z INFO sui_indexer_alt: Indexing pipeline gracefully shut down 2024-10-30T00:24:47.716647Z INFO sui_indexer_alt::metrics: Shutdown received, stopping metrics service ``` commit d57326492b62ec8ba7ff8826ad473ec1b5008f11 Author: Ashok Menon Date: Wed Oct 30 00:01:45 2024 +0000 fix(indexer-alt): remove double logging on pipeline skip ## Description We were logging the pipeline being skipped twice. ## Test plan Check logs while the indexer starts up: ``` sui$ cargo run -p sui-indexer-alt --release -- \ --database-url "postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt" \ indexer --remote-store-url "https://checkpoints.mainnet.sui.io" \ --last-checkpoint 5000 --pipeline sum_obj_types ``` commit ef0d78c638e3ef0d0a3b28787b618ea56a140284 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Thu Oct 31 19:30:51 2024 -0400 add sui-field-count for sui-indexer-alt (#20126) ## Description title, so that we can replace hard-coded field count in pipelines. a crate did very similar things but it's archived, so prob better to have our own impl, also b/c the functionality is relatively simple https://github.com/discosultan/field-count ## Test plan added tests in models/ --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c92dd1b4f16074ced8c1fe5c56a10e675f68bbe6 Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Thu Oct 31 18:28:34 2024 -0400 [kv store] bigtable. Add support for querying checkpoint by digest (#20122) ## Description Adds support for querying checkpoint by digest --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 74ace7b6166f10bddfd714568e0ddc20db2ca75d Author: JasonRUAN Date: Fri Nov 1 06:25:44 2024 +0800 [CLI] fix `sui keytool import` --alias command and default alias does not work (#20111) ## Description fix `sui keytool import` --alias command and default alias does not work Before Fix: image After Fix: - **--alias** image - **default alias** image ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: `sui keytool import --alias` is now fixed and correctly records the provided alias. - [ ] Rust SDK: - [ ] REST API: commit e41a759769fdea08f660160a23e945bbad80b337 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Thu Oct 31 23:50:37 2024 +0200 Version Packages (#20127) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/kms@0.0.3 ### Patch Changes - 02c9e46: Fix exports on the bundled package Co-authored-by: github-actions[bot] commit 02c9e4691b429b4a0ec85b71eace37b72515d055 Author: Manolis Liolios Date: Thu Oct 31 23:38:15 2024 +0200 Fix build, re-publish @mysten/kms (#20125) ## Description Build will always treat `src` as the base for outputs when building. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 16f2683a99e7b8824aa5879d3f20ffef936a7546 Author: William Smith Date: Thu Oct 31 17:27:16 2024 -0400 [sui-benchmark] Add expected failure payloads (#19831) ## Description Introduce a new payload type into the benchmark tool - `ExpectedFailurePayload`. This payload type is configured with some expected failure type (currently only one implemented - user signature failure - but others can be added with minimal scaffolding) such that `payload.make_transaction()` generates a transaction that will fail in this manner. Note that the failures that this payload type is concerned with are failures to execute the transaction itself, rather than during execution by the MoveVM. In other words, it is expected that the failure mode will not consume gas or produce effects. Note that for this reason, its failure mode is inverted. It will be tallied for metrics purposes as an `error` if it succeeds and tallied as an `expected_error` (which is a success) if it fails. Also note that transaction responses for this type are handled by producing a `NextOp::Retry` (with some additional logging/metrics) since it is functionally equivalent to a retryable error. ## Test plan Ran the following: ``` SIM_STRESS_TEST_QPS=200 SIM_STRESS_TEST_WORKERS=20 RUST_LOG=debug cargo simtest --nocapture test_simulated_load_expected_failure_traffic_control ``` Observed the following in logs ``` 2022-01-03T02:05:47.734507Z INFO node{id=1 name="client"}: simtest::test: crates/sui-benchmark/tests/simtest.rs:1088: end of test BenchmarkStats { duration: 50.000002242s, num_error_txes: 0, num_expected_error_txes: 8049, num_success_txes: 348, num_success_cmds: 1977, total_gas_used: 3215776400, latency_ms: HistogramWrapper { histogram: Histogram { auto_resize: false, highest_trackable_value: 120000, lowest_discernible_value: 1, significant_value_digits: 3, bucket_count: 7, sub_bucket_count: 2048, sub_bucket_half_count: 1024, sub_bucket_half_count_magnitude: 10, sub_bucket_mask: 2047, leading_zero_count_base: 53, unit_magnitude: 0, unit_magnitude_mask: 0, max_value: 14359, min_non_zero_value: 72, total_count: 348, counts: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 8, 12, 9, 20, 25, 22, 19 ... ``` [Also ran in PTN](https://metrics.sui.io/d/adl51ctsvmkg0a/traffic-control-dos-protection-dashboard?from=2024-10-31T19:00:50.548Z&to=2024-10-31T20:20:03.673Z&timezone=browser&var-network=private-testnet&var-host=$__all&var-fullnode=$__all) --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d95918cea0d8fa0f1a4007273f477e3c12b61302 Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Thu Oct 31 17:18:17 2024 -0400 [data ingestion] add support for bigtable in main workflow binary (#20018) ## Description adding BigTable KV as a variant to main binary that manages internal workflows --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c33374e735257b2a460a2f882e062619f8958bc5 Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Thu Oct 31 12:51:47 2024 -0700 [mvr] indexer schema subset and reduced persist tasks (#20100) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7c517e7e5ce7193d1f51ef7f20a31b56f0e66797 Author: Xun Li Date: Thu Oct 31 12:49:58 2024 -0700 Add system_invariant_violation macro (#20105) ## Description Introduce a system invariant violation macro. We can then set up alerts whenever the counter value changes. Used it in one place as an example. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b248f35879801b3bae9b76c4d204f36540d816f8 Author: Andrew Schran Date: Thu Oct 31 14:16:48 2024 -0400 Add support for TLS connections with self-signed cert on validator gRPC interface (#19796) Client side use of TLS will be enabled-by-default in a future PR. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [x] Nodes (Validators and Full nodes): Adds support for TLS on validator gRPC interface. - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e06610823b985bdc01c40e23c9a9a2f8d5b571d1 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Thu Oct 31 19:58:58 2024 +0200 Version Packages (#20076) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/sui@1.14.0 ### Minor Changes - c24814b: Adds a custom header; 'Client-Request-Method' which will contain the method name used in each outgoing jsonrpc request ## @mysten/create-dapp@0.3.28 ### Patch Changes - Updated dependencies [c24814b] - @mysten/sui@1.14.0 - @mysten/dapp-kit@0.14.28 ## @mysten/dapp-kit@0.14.28 ### Patch Changes - Updated dependencies [c24814b] - @mysten/sui@1.14.0 - @mysten/wallet-standard@0.13.9 - @mysten/zksend@0.11.9 ## @mysten/deepbook@0.8.23 ### Patch Changes - Updated dependencies [c24814b] - @mysten/sui@1.14.0 ## @mysten/deepbook-v3@0.12.2 ### Patch Changes - Updated dependencies [c24814b] - @mysten/sui@1.14.0 ## @mysten/enoki@0.4.7 ### Patch Changes - Updated dependencies [c24814b] - @mysten/sui@1.14.0 - @mysten/zklogin@0.7.24 ## @mysten/graphql-transport@0.2.25 ### Patch Changes - Updated dependencies [c24814b] - @mysten/sui@1.14.0 ## @mysten/kiosk@0.9.23 ### Patch Changes - 4166d71: Fix doc comment on `getKiosk` command - Updated dependencies [c24814b] - @mysten/sui@1.14.0 ## @mysten/kms@0.0.2 ### Patch Changes - b3f3925: Introduces @mysten/kms which initially exposes a Sui AWS KMS signer - Updated dependencies [c24814b] - @mysten/sui@1.14.0 ## @mysten/suins-toolkit@0.5.23 ### Patch Changes - Updated dependencies [c24814b] - @mysten/sui@1.14.0 ## @mysten/wallet-standard@0.13.9 ### Patch Changes - Updated dependencies [c24814b] - @mysten/sui@1.14.0 ## @mysten/zklogin@0.7.24 ### Patch Changes - Updated dependencies [c24814b] - @mysten/sui@1.14.0 ## @mysten/zksend@0.11.9 ### Patch Changes - Updated dependencies [c24814b] - @mysten/sui@1.14.0 - @mysten/wallet-standard@0.13.9 Co-authored-by: github-actions[bot] commit 093954a09bce1942a9f9773b845f59bf5124efbf Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Thu Oct 31 11:35:27 2024 -0600 [docs] Rpc update (#20094) ## Description Updates the RPC render to include component schemas. Also updates a few comments. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit fa66617b7dfa7096fd64a2dc1b002d154223be4a Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Thu Oct 31 10:18:57 2024 -0700 [mvr] renaming to sui-mvr-indexer and Dockerfile (#20099) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 434206f14a0a21bc62ea08480b4a5c4d36d6cc25 Author: Todd Nowacki Date: Thu Oct 31 10:18:46 2024 -0700 [move-compiler] Re-organize warning filters (#20103) ## Description - Re-organize warning filters. It should make it a bit cleaner to refactor diagnostic reporting ## Test plan - ran tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 0d16b6f8b7c4ff79f70400e8a94d7486b2b2c3fe Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Thu Oct 31 09:36:37 2024 -0700 [mvr] new crate for custom mvr-indexer, copied from sui-indexer crate (#20098) ## Description Simple copy-paste of the sui-indexer crate for the custom mvr indexer ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b3f39253ff0153b69b42bc298d1ecdbb0524e276 Author: John Naulty Jr. Date: Thu Oct 31 09:21:29 2024 -0700 AWS KMS TS Signer (#16139) ## Description Describe the changes or additions included in this PR. ## Test Plan How did you test the new or updated feature? --- If your changes are not user-facing and do not break anything, you can skip the following section. Otherwise, please briefly describe what has changed under the Release Notes section. ### Type of Change (Check all that apply) - [ ] protocol change - [ ] user-visible impact - [ ] breaking change for a client SDKs - [ ] breaking change for FNs (FN binary must upgrade) - [ ] breaking change for validators or node operators (must upgrade binaries) - [ ] breaking change for on-chain data layout - [ ] necessitate either a data wipe or data migration ### Release notes --------- Co-authored-by: Manolis Liolios commit f78c7b006c28c4c1afd3f7d42e21e53970902f5d Author: Brandon Williams Date: Wed Oct 30 19:50:45 2024 -0500 rest: revert client change to requested protobuf and request bcs instead Revert the client change to request protobuf as a response type, and instead request bcs, because the support for protobuf hasn't rolled out to other networks yet. commit 108ad4c8ea53c68bd90d0b3fcdc88fbc75640c61 Author: Brandon Williams Date: Wed Oct 30 12:50:43 2024 -0500 rest: support protobuf format for committee endpoints commit 718a244bca584382faedde56f8247c6b26225b07 Author: Brandon Williams Date: Wed Oct 30 11:03:43 2024 -0500 rest: support protobuf format for resolve, simulate, and execute transaction endpoints commit 2987f5967ecffa8ddbec2b8ebf9ee2eec09dad60 Author: Brandon Williams Date: Wed Oct 30 08:55:51 2024 -0500 rest: support protobuf format for GetTransaction and ListTransactions endpoints commit 672ecbd2a58c7514aa24d139c442316a03d7f89e Author: Brandon Williams Date: Tue Oct 29 21:30:42 2024 -0500 rest: support protobuf format for GetObject endpoints commit c8472c057523ff88040ae688f1410d34164b0ebc Author: Brandon Williams Date: Tue Oct 29 19:00:39 2024 -0500 rest: remove ListFullCheckpoint api Remove the ListFullCheckpoint api as its a bit too heavy-weight and instead we'll look to introduce a streaming api in the future. commit 39f1ae21fd5467e71522729519720d2add9468dc Author: Xun Li Date: Wed Oct 30 22:25:50 2024 -0700 Clean up local execution (#20086) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 65b02e203ad897f438f87d0e5cfe46c56a1d4247 Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Wed Oct 30 21:26:54 2024 -0400 [pruner] enable periodic compaction of individual SST files by default (#20101) ## Description enables periodic compaction of individual SST files by default --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f2670718f694edd59e8a062bc932d4efd96ba765 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Wed Oct 30 16:49:01 2024 -0700 [execution] Update to deps-only mode (#20102) ## Description Disallow new modules from being added to packages in deps-only mode. ## Test plan Added new tests to check for both previous and new behavior. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [X] Protocol: Updated restrictions on deps-only packages to not allow adding new modules to the package. - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7e6319fa33b7021a94ff7dcc07e479a4bae92b55 Author: Adam Welc Date: Wed Oct 30 15:51:37 2024 -0700 [trace-view] Added support for native functions and global locations (#20080) ## Description Adds support for native functions where the `CloseFrame` event happens right after `OpenFrame` event and both need to be skipped to skip over the native function. Also added support for handling global locations - they should be parsed properly but largely ignored in the trace as they cannot directly affect values of local variables ## Test plan All old and new tests must pass commit 21639a51847f1a04b6b221ac77df3875c0430be7 Author: Xun Li Date: Wed Oct 30 14:30:59 2024 -0700 [RPC] Fix coin metadata api inefficiency (#19794) ## Description This PR fixes a minor inefficiency in the coin metadata api. It does not need to fetch the transaction, as it only needs the effects. This likely avoids a remote KV fetch. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c24814b54853d70c42cfe17960043458a670e50d Author: John Martin Date: Wed Oct 30 13:22:18 2024 -0700 add Client-Request-Method header to all TS SDK requests (#20071) ## Description This will be used by the new edge proxy for routing execute transaction requests to fullnodes with lower consensus latency ## Test plan unit tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8f9c6fc2da199ef62d79f0e2bedf28ab30fb4a7c Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Wed Oct 30 12:59:46 2024 -0700 [move] Add support for raw `abort`s in clever errors (#20063) ## Description This adds support for raw `abort`s in a similar manner to raw `assert!`ions. It will introduce the line number that the `abort` was called on (and behaves in the same manner as `assert!` with no error codes for macro line number remapping). ## Test plan Added parser tests as well as additional transactional tests, and verified that they produce the exact same output as the derived line number assertion tests that they copy. commit b261681d2135ebef9adeb6b09b89b8d0df12f695 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Wed Oct 30 12:59:26 2024 -0700 [move] Unify `debugging` and `tracing` feature flags for tracing in the VM (#20084) ## Description This unifies the old tracing under the `tracing` flag, and removes the `debugging` feature flag from the VM (one less feature flag 🎉). Otherwise behavior is kept the exact same. ## Test plan Tested manually + CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ebffb7540b78c68522bf465fe5f73198f780a490 Author: Eugene Boguslavsky Date: Wed Oct 30 11:10:29 2024 -0700 Add emoji to slack error messages (#20092) ## Description Add emoji to slack error messages ## Test plan 👀 commit 831e97a52d63b043f642882b31974db33a757911 Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Wed Oct 30 11:04:26 2024 -0700 [indexer] drop tx_recipients and tx_senders tables from schema (#20087) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 419bac683f46da4a7eadad4617ea0679be493eeb Author: Jort Date: Wed Oct 30 11:02:33 2024 -0700 [CLI] upgrade error formatting should use existing diag wrapper (#20014) ## Description Reuse the work within `move-compiler/src/diagnostics/` instead of reimplementing code span reporting usage directly. ## Test plan Snapshots --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 22b44ed3baf484bda849edff1355e7ce1b38d0ff Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Wed Oct 30 09:18:24 2024 -0700 [docs] Update docs renaming `gas-profiler` to `tracing` (#20082) ## Description Updates to docs renaming the `gas-profiler` feature flag to `tracing` commit f43e8724602a92e7a4de56a3664f115384e9ca4b Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Wed Oct 30 09:18:12 2024 -0700 Rename `gas-profiler` feature flag to `tracing` (#20081) ## Description Renames the `gas-profiler` feature flag to `tracing`. Otherwise everything else is unchanged. If you were previously building with `--features gas-profiler` you should use `--features tracing` and everything should work and behave as before. Renamings performed: 1. `gas-profiler` => `tracing` 2. `gas_profiler_feature` => `tracing_feature` ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [X] CLI: The `gas-profiler` Rust feature flag was renamed to `tracing`. If you were previously building the Sui CLI from source with `--features gas-profiler` this will no longer work, and you should instead use `--features tracing`. This will enable the same features as before. - [ ] Rust SDK: - [ ] REST API: commit d52d38d6737ca18b3cfe7a4226dfc10e988ffd0d Author: Andrew Schran Date: Wed Oct 30 12:14:57 2024 -0400 Reduce minimum number of random beacon shares (#20091) --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a7d6e2803579279a1b2a96a5a8a3ab3ab864bfe3 Author: Tony Lee Date: Wed Oct 30 08:32:43 2024 -0500 Deepbook Indexer Manager Balance (#20083) commit 72b76232bac96189c4fbce7fef15e783647d6986 Author: Brandon Williams Date: Mon Oct 28 11:21:51 2024 -0500 rest: rework proto format to better reuse of common types commit f6501deb7ce074014265ba7d066695cce81fc06b Author: Brandon Williams Date: Fri Oct 25 17:32:56 2024 -0500 rest: add list_checkpoint tests commit 57b88de7bcb99a13745f105f1a007ec936f8760b Author: Brandon Williams Date: Fri Oct 25 17:05:05 2024 -0500 rest: make it less error prone when requesting a particular accept format commit 15730318bd688c885a565d9175e4b60a0fbdeb7b Author: Brandon Williams Date: Fri Oct 25 15:46:49 2024 -0500 e2e-tests: restructure rest tests commit 3eeb8b3003834f3d41be3e0fd8da97c757532c24 Author: Brandon Williams Date: Thu Oct 24 15:32:31 2024 -0500 rest: introduce protobuf as an acceptable content type Given the BCS format is inherently un-evolvable (unless you utilize enums) it makes a poor wire format as any changes made lead to incompatibilities with existing clients. Today the REST api uses JSON as the default, and human-readable, format and BCS for the binary one. This PR introduces Protobuf as another possible format type and converts the `list_checkpoints` api to support requesting a page of checkpoints in a protobuf format. The plan is to introduce support for protobuf to other apis and to phase out bcs in most places. commit 29cb03e5b0a605a9acd9209629ceb34747939c5e Author: Todd Nowacki Date: Tue Oct 29 16:03:38 2024 -0700 [move-compiler] Remove WarningFiltersScope from CompilationEnv (#20065) ## Description - To make the compiler more parallel friendly, the warning filter scope needs to be local to the environment it is in - Conceptually, CompilationEnv was always the wrong place for this, and trying to add parallelism just exposed this - Added a RwLock around Diagnostics to help with linters. It was a bit hard to tie the knot without doing this as a part of this PR ## Test plan - Ran tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b2700dac7c2491b3fe3c4459d0fff437473c3540 Author: Ashok Menon Date: Mon Oct 28 15:23:08 2024 +0000 indexer-alt: sum_coin_balances pipeline ## Description Similar to `sum_obj_types` in that it is tracking the live object set, but this index only covers coin objects owned by addresses, and it orders them by balance, which allows queries to return them in decreasing balance order. ## Test plan Manually run the indexer on the first 100,000 checkpoints: ``` sui$ cargo run -p sui-indexer-alt --release -- \ --database-url "postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt" \ --remote-store-url https://checkpoints.mainnet.sui.io \ --last-checkpoint 100000 ``` commit cb836faead40214e9cc918fc913aa40ea0734a5d Author: Ashok Menon Date: Mon Oct 28 23:27:02 2024 +0000 indexer-alt: factor out object updates ## Description The `sum_coin_balances` pipeline also needs to represent object updates, so this change factors out a type that can represent an update to an object (at a particular version), or a deletion at that version. ## Test plan ``` sui$ cargo nextest run -p sui-indexer-alt ``` commit e8690c52f89bf5b56fa961e97f71e67850a61ac6 Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Tue Oct 29 14:52:05 2024 -0600 [docs] Updates for dbv3 (#20073) ## Description Update for the move of dbv3 to mainnet. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4166d71653835aa3b57bfa52d0d57e2e26f31fd6 Author: Manolis Liolios Date: Tue Oct 29 22:40:58 2024 +0200 [kiosk sdk] Fix doc comment (#20068) ## Description Fix doc comment to match the expected arguments on `getKiosk` call. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d526a74b8d02de62a912d13e1d02758d48fa902a Author: Andrew Schran Date: Tue Oct 29 16:28:54 2024 -0400 Enable gas-budget-based congestion control with overage & absolute cap (#20072) ## Description - Changes congestion control mode to use gas budget with cap. - Sets a separate (lower) limit for randomness-using tx. - Allows up to one tx per-commit per-object to exceed the budget, with accumulated debt tracking. - Adds absolute capped cost at a multiple of per-commit budget. ## Test plan Unit tests & simtests in PRs adding these `ProtocolConfig` parameters. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [x] Protocol: Enables gas-budget-based congestion control with allowed overage & absolute cap - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 90fd92b4a8f5cf357a82ea5d9dd2021dae7ef5b4 Author: Ashok Menon Date: Tue Oct 29 18:49:30 2024 +0000 easy(indexer-alt): clarify names of tuning parameters (#20055) ## Description Trying to make the purposes of various tuning parameters clearer by renaming them. ## Test plan :eyes: ## Stack - #20050 - #20051 - #20052 - #20053 - #20054 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 27978f1649649055d0f527070e1c2fc6f02bce2f Author: Ashok Menon Date: Tue Oct 29 18:31:27 2024 +0000 indexer-alt: sum_obj_types pipeline (#20054) ## Description The `sum_obj_types` sequential pipeline is the `sui-indexer-alt` equivalent of `objects` and `objects_snapshot`. It is a `sum`-mary of the object owner and type information as of a particular checkpoint (its watermark). It differs from `objects` and `objects_snapshot` in the following ways: - It only contains owner and type information (no coin balance, content, digest, or dynamic field information), but has a more "complete" set of indices for this data. - The type information has been standardised into the same style as used by event indices. - It uses the painter's algorithm to avoid processing redundant object updates (process transactions in a checkpoint in reverse order). - Updates to the underlying table are atomic (all writes are sent out within a single transaction, along with a watermark update). - It avoids redundant writes across checkpoints as well (e.g. if an object is modified multiple times, only the last modification will be turned into a DB update). This PR does not include the logic to delay updates (necessary to replicate the behaviour of `objects_snapshot`). This will be added at a later date. ## Test plan Ran the indexer on the first 1M checkpoints in mainnet (produced 2481 live objects by the end): ``` sui$ cargo run -p sui-indexer-alt --release -- \ --database-url "postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt" \ --remote-store-url https://checkpoints.mainnet.sui.io \ --last-checkpoint 1000000 ``` ## Stack - #20050 - #20051 - #20052 - #20053 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5ee814a7d3c75ef4e95c31635a017a80e97af39b Author: Ashok Menon Date: Tue Oct 29 14:52:25 2024 +0000 indexer-alt: sequential pipeline (#20053) ## Description Introduce a new kind of pipeline for indexing that needs commit data in checkpoint order. This will be used for indexing data that would previously have gone into `objects` or `objects_snapshot`, where rows are modified in place, and so can't be committed out-of-order. Sequential pipelines are split into two parts: - A `processor` which is shared with the existing concurrent pipeline, and is responsible for turning checkpoint data into values to be sent to the database. - A `committer` which is responsible for batching up prefixes of updates and sending them to the DB when they are complete (no gaps between the last write and what has been buffered). The key design constraints of the sequential pipeline are as follows: - Although the committer must write out rows in order, it can buffer the results of checkpoint processed out-of-order. - It uses the ingestion service's regulator for back-pressure: The ingestion service is only allowed to run ahead of all sequential pipelines by its buffer size, which bounds the memory that each pipeline must use to buffer pending writes. - Sequential pipelines have different tuning parameters compared to concurrent pipelines: - `MIN_BATCH_ROWS`: The threshold for eagerly writing to the DB. - `MAX_BATCH_CHECKPOINTS`: The maximum number of checkpoints that will be batched together in a single transaction. - They guarantee atomicity using DB transactions: All the writes for a single checkpoint, and the corresponding watermark update are put into the same DB transaction. - They support simplifying/merging writes to the DB: If the same object is modified multiple times across multiple checkpoints, only the latest write will make it to the DB. ## Test plan This change is primarily tested by the `sum_obj_types` pipeline introduced in the next change. ## Stack - #20050 - #20051 - #20052 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 3f45c764764b1aee7d9313e65a05b923e1e906f7 Author: Ashok Menon Date: Mon Oct 28 11:19:14 2024 +0000 fix(indexer-alt): watermark update off-by-one ## Description Fix a quirk of the watermark update logic in the concurrent pipeline, where we would never issue an update for just the genesis checkpoint. This fix was identified while working on the sequential pipeline where we watermark updates and row updates are coupled and we therefore cannot afford to ignore the first checkpoint (doing so would cause it to drop the all the rows from the first checkpoint as well). ## Test plan Run the indexer on just one checkpoint: ``` sui$ cargo run -p sui-indexer-alt --release -- \ --database-url "postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt" \ --remote-store-url https://checkpoints.mainnet.sui.io \ --last-checkpoint 1 ``` It will write a watermark. commit 60fdf465434a26d1dfc6d01ff9b134db02149c9a Author: Ashok Menon Date: Mon Oct 28 15:20:59 2024 +0000 docs(indexer-alt): Explain tx_affected_object affected field commit bb4a3cc976011297997048b30d0b9a0cfb3e397f Author: Ashok Menon Date: Mon Oct 28 11:14:50 2024 +0000 docs(indexer-alt): remove stale references to committer task ## Description Docs in the concurrent watermark task referred to itself as the committer task. This is vestigial from the time when a single task handled both of these jobs. This change just updates the docs. ## Test plan :eyes: commit c8af1613084d4da6b2e4b2b87ff9d8897de06c7c Author: Ashok Menon Date: Sat Oct 26 17:52:19 2024 +0100 docs(indexer-alt): explain back-pressure for concurrent pipeline commit d28aafd75ba11d3a0d9d7865156b5806091dab32 Author: Ashok Menon Date: Sat Oct 26 16:32:45 2024 +0100 refactor(indexer-alt): make Handler concurrent-pipeline-specific ## Description Sequential pipelines need different parameters and processing logic than concurrent pipelines, so it no longer makes sense to share the `Handler` trait. This change factors out the shared part as its own trait -- `Processor` -- and moves the rest into the `concurrent` module. ## Test plan ``` sui$ cargo build -p sui-indexer-alt sui$ cargo nextest run -p sui-indexer-alt ``` commit 688b70f29c941381799d408438a2227e39445d7f Author: Jort Date: Tue Oct 29 09:37:07 2024 -0700 simplify workspace equal true deps (#20066) ## Description Use more consistency with deps which only have `workspace = true` ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 0c9c30157f512e4207c0f082e35cd9397c96e4f9 Author: Eugene Boguslavsky Date: Tue Oct 29 09:08:57 2024 -0700 Fix slack messages (#20060) ## Description Fix slack messages ## Test plan 👀 --------- Co-authored-by: John Martin commit d4b30e8a3321ee50c46b33e0af963a955c6ca100 Author: Andrew Schran Date: Tue Oct 29 12:05:03 2024 -0400 Add support for an absolute limit on capped tx cost based on a multiple of per-commit budget (#20010) ## Test plan Added tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 0fc16ba603e72bbbee93fcf65b4a05e90de41812 Author: Xun Li Date: Tue Oct 29 08:50:33 2024 -0700 [indexer-alt] Add ResetDatabase command (#20067) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit aec94f8f11bdc6856258a88a1dc7f23c1155b521 Author: Tony Lee Date: Tue Oct 29 08:35:27 2024 -0500 Historical Volume Query (Deepbook) (#20061) commit 77662a6931d4e6d2e9c41e4beceae46dbb02508f Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Mon Oct 28 18:26:35 2024 -0700 Fix lint with `cargo xclippy` (#20001) ## Description `cargo xclippy` has `--all-features` enabled. Seeing these warnings: ``` warning: unused import: `move_vm_profiler::GasProfiler` --> external-crates/move/move-execution/v2/crates/move-vm-runtime/src/interpreter.rs:23:5 | 23 | use move_vm_profiler::GasProfiler; | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = note: `#[warn(unused_imports)]` on by default warning: unused import: `move_vm_profiler::GasProfiler` --> external-crates/move/move-execution/v1/crates/move-vm-runtime/src/interpreter.rs:23:5 | 23 | use move_vm_profiler::GasProfiler; | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = note: `#[warn(unused_imports)]` on by default warning: unused import: `move_vm_profiler::GasProfiler` --> external-crates/move/move-execution/v0/crates/move-vm-runtime/src/interpreter.rs:24:5 | 24 | use move_vm_profiler::GasProfiler; | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = note: `#[warn(unused_imports)]` on by default ``` ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 68ac44a2b180547635cb6733b0fb9b2ab6d7d0a9 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Mon Oct 28 17:08:41 2024 -0700 [move] Update some calls to `is_empty` in loops (#19918) ## Description Optimizes some calls to `is_empty` in loop conditions. This saves on the function call overhead and uses the bytecode instruction directly. ## Test plan Make sure existing tests pass. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [X] Protocol: New protocol version added due to internal changes to the Sui framework. - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7378f3e42eade218377c6d110aba0c3aac86971b Author: Jordan Gensler Date: Mon Oct 28 19:46:54 2024 -0400 Revert "Stake warning" (#20064) Reverts MystenLabs/sui#20027 commit cb8014022dd676af96382dae6271dbd29889d00f Author: Todd Nowacki Date: Mon Oct 28 15:38:58 2024 -0700 [move-compiler] Add macro for "simple" visitors (#20062) ## Description - Add a macro for the simple visitor declarations ## Test plan - Updated one test --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 495dbb7516ff489e4b0aab4f1e0147630457507d Author: Adam Welc Date: Mon Oct 28 14:35:44 2024 -0700 [move-ide] Avoid analyzing unmodified dependencies (#20046) ## Description This PR implements an optimization to the symbolication algorithm that avoids analyzing dependencies if they have not changed. Previously we were avoiding re-compiling unchanged dependencies but we were still analyzing them which introduces unnecessary overhead. The implementation involved separating analysis of the main program and the dependencies, and merging the results of the two together in the end (whether the dependencies are computed fresh or obtained from the cache). In particular, we now create two analysis visitors per analysis phase, one for the main program and one for the dependencies. For a simple package with Sui framework as a dependency we observe a significant reduction in analysis time. Before: ![image](https://github.com/user-attachments/assets/06148508-9b22-4f52-b8c9-21968dcc4b1f) After: ![image](https://github.com/user-attachments/assets/d14f6b5e-f2fa-4312-a1c2-81f68ce210c2) ## Test plan All existing test must pass. I also tested manually to verify that reporting references (now merged between the main program and the dependencies) works correctly. commit 1837a5e366553e51d1f87ff2d034720476ee1b39 Author: Antonio Yang Date: Mon Oct 28 23:08:40 2024 +0800 sui-move: drop duplicate `--path` options (#20005) ## Description Use `default_value_if` to instead of using the same alias, and avoid to have panic in debug build. Fix #20003 ## Test plan Reuse existing test cases --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [X] CLI: add `--run-bytecode-verifier` and `--print-diags-to-stderr` for `sui-move` - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> commit 1709d22fa258d4135e302e1673a79071566f68db Author: Ashok Menon Date: Thu Oct 24 15:12:30 2024 +0100 fix(indexer-alt): check for closed and empty receiver ## Description When the collector checks for a closed channel, it should also check that it's empty. commit 60eb88a778bb9c5afefcd9ca527f04af315e9eee Author: Ashok Menon Date: Thu Oct 24 12:22:23 2024 +0100 indexer-alt: introduce a "Pending" type ## Description Split apart `Indexed` into two parts -- `Indexed` and `Pending`. Originally, the hope was that both the sequential and concurrent pipelines could use the previous `Indexed` and `collector` implementation, but in hindsight, that was not a good idea, because the existing collector is now designed to deal with chunks of data that are not checkpoint-aligned, and sequential pipelines must write data out checkpoint-by-checkpoint. This change corrects that by splitting out the part of `Indexed` that dealt with splitting apart a single checkpoint's data into a dedicated `Pending` type that is internal to the concurrent collector. ## Test plan Run the unit tests and run the pipeline on the first 5000 checkpoints: ``` sui$ cargo nextest run -p sui-indexer-alt sui$ cargo run -p sui-indexer-alt --release -- \ --database-url "postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt" \ --remote-store-url https://checkpoints.mainnet.sui.io \ --last-checkpoint 5000 ``` commit ab99ed8794b65ff1c5965fe044c931d1b31b3e2c Author: Ashok Menon Date: Thu Oct 24 10:35:32 2024 +0100 indexer-alt: concurrent writes ## Description Separate out the concurrent committer one more time: Into a collector and a committer, and add support for concurrent writes to the new committer. To support this we need to also deal with the fact that the watermark task might receive the watermark for a checkpoint that is only partially written out, which is done through the introduction of the `WatermarkPart` -- this tracks a watermark as well as a fraction of the rows that come from the checkpoint at the tip of this watermark. ## Test plan Run the indexer pipeline from scratch up to checkpoint 5000, re-run it, run just a single pipeline, and run it at some later checkpoint as well (e.g. 9,000,000 to 9,002,000) and make sure it exits cleanly after write a complete data stream, in all cases. commit 24229cc2f1cd56fbd9c36e4ff234ec95fba6a9ee Author: Ashok Menon Date: Thu Oct 24 10:31:07 2024 +0100 fix(indexer-alt): stop early termination of watermark task ## Description Checking whether the watermark task's receiver channel is closed is not sufficient -- we also need to check whether it is empty, because all its senders may be gone, but it needs to flush its queue as well. ## Test plan This issue was easiest to reproduce by running a single pipeline that ended with a large number of empty checkpoints, eg. `ev_struct_inst` up to checkpoint 5000. Without this change, it would drop the last 100 or so (empty) checkpoints, and with it, it will reliably update the watermark to 5000 before exiting: ``` sui$ cargo run -p sui-indexer-alt --release -- \ --database-url "postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt" \ --remote-store-url https://checkpoints.mainnet.sui.io \ --last-checkpoint 5000 --pipeline ev_struct_inst ``` commit 0bf3cb3e8dbc95fa993f1d616899962baae1cf05 Author: Ashok Menon Date: Thu Oct 24 10:29:07 2024 +0100 chore(indexer-alt): watermarks docs/trace changes ## Description - Update tracing to refer to the task as the "watermark task" eveywhere, not just "watermark". - Update the help message for the interval to omit a full stop to be consistent with other help messages. ## Test plan :eyes: commit 305d395d202d6be60a4e7b9620aeeaa8a1120d97 Author: Ashok Menon Date: Thu Oct 24 10:18:59 2024 +0100 chore(indexer-alt): handler -> processor in log messages ## Description Missed some places in the codebase where the processor task was still being referred to as the "handler". ## Test plan Searched for more references for "handler" in the codebase -- there are no more stale references. commit ecf46689136fb442303ee25c1c2ff0a75eeb881e Author: Ashok Menon Date: Thu Oct 24 10:16:06 2024 +0100 chore(indexer-alt): More detailed CLI flags ## Description Make CLI flag names a bit longer to accommodate more detail. This is in preparation for adding more flags (e.g. `--write-concurrency`) which would have otherwise conflicted. ## Test plan :eyes: commit 818b174efbadfb48e8e610635aee81ec8360bebf Author: Damir Shamanaev Date: Sat Oct 26 20:16:19 2024 +0300 [vscode] Update workspace recommendations (#20045) ## Description Update workspace recommendations, so that VSCode and GH codespaces recommend the right set for the project. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d9c5b9543941c7a248be9bb30483d0f177f63e83 Author: Ashok Menon Date: Sat Oct 26 01:42:18 2024 +0100 sui-system: fix next epoch stake book-keeping (#20039) ## Description Update `next_epoch_stake` when redeeming a fungible staked sui. This value is used as a sanity check that everything matches up at the end of an epoch. ## Test plan ``` sui$ cargo nextest run -p sui-framework-tests ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [x] Protocol: Protocol bumped to 68, introducing a framework change to fix next_epoch_stake book-keeping while redeeming fungible staked sui. - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Arun Koshy Co-authored-by: Sam Blackshear Co-authored-by: Emma Zhong commit a044c13ebbb93befccf58cbb5ece58351a0daa9e Author: Stefanos Pleros <36567567+StefPler@users.noreply.github.com> Date: Sat Oct 26 00:35:32 2024 +0300 [Docs] Update local-network.mdx (#20038) ## Description Fixes anchor to "Persist local network state" section ## Test plan Before: https://docs.sui.io/guides/developer/getting-started/local-network#persist-local-network After: https://sui-docs-git-sp-anchor-patch-sui-foundation.vercel.app/guides/developer/getting-started/local-network#persist-local-network-state --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4df3956df8588c8bfac560ae9751ca9378ad5b89 Author: Daniel Leavitt <71237296+dantheman8300@users.noreply.github.com> Date: Fri Oct 25 14:04:30 2024 -0700 Revert "Update e2e counter docs [Docs] (#19719)" (#20043) commit 83a3d52e1ef303d1812c433878cec4662aedf6bc Author: Tony Lee Date: Fri Oct 25 13:00:27 2024 -0400 Multi pool query 24hr endpoint (#20042) commit a4a8a8f84d53e2ddb54eb8ebdd4efb65b0d34cbb Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Fri Oct 25 12:57:42 2024 -0400 indexer: drop objects_history full indices (#20041) ## Description the partial indices have been in prod for tnt and mnt, we can now drop the full indices. I will proactively drop them CONCURRENTLY before the release so that the release migration run will be fast. ## Test plan local run of diesel migrations --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5fd33ae11bfc77bc16d72c7b849713e5d17c2d59 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Fri Oct 25 09:56:16 2024 -0700 [replay] Allow local override of framework (#20026) ## Description This allows the Sui replay tool to replay transaction's using a local version of the framework. This can be done by passing the protocol override version to use as `-1` -- so `replay -tx -p -1` ## Test plan Local/manual testing commit 183f1465bb2e3adbc3bbf862e09ff8efd16dad35 Author: Andrew Schran Date: Fri Oct 25 12:07:23 2024 -0400 Add separate congestion control budget limit for randomness tx (#20009) ## Test plan Updated simtest --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 316249457ba8a246e96cbf56727f4628bfa44f48 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Fri Oct 25 08:55:21 2024 -0700 [move] Use u256 instead of bigint for parsing (#19998) ## Description Switches to using u256 instead of bigint for parsing. ## Test plan CI + additional tests to make sure parity was preserved. commit c4826d0cb01cbf3eb29fee4d93953aba53b2b30f Author: 0xaslan <161349919+0xaslan@users.noreply.github.com> Date: Fri Oct 25 10:58:44 2024 -0400 DeepBook Indexer: update net balances query (#20037) ## Description Describe the changes or additions included in this PR. Update the DeepBook indexer API. ## Test plan How did you test the new or updated feature? Local testing end to end. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 773fd58b7e2ed3f9c16a93084f309e0b4785031b Author: Jordan Gensler Date: Thu Oct 24 21:39:49 2024 -0400 Stake warning (#20027) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e0b51bcf85fa1e35a83142d4b1ffd8b3543d4d17 Author: Tom Cat <48447545+tx-tomcat@users.noreply.github.com> Date: Fri Oct 25 05:53:37 2024 +0700 [Linter] Needless else (#16874) # Description Detects empty `else` branches in conditional structures, suggesting their removal for cleaner code. Aims to flag potentially unnecessary or unimplemented placeholders within `if-else` statements. Encourages code clarity and maintainability by eliminating redundant branches. # Run the linter ``` cargo test move_check_testsuit ``` # Testing File testing: needless_else.move ## Release notes - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: --------- Co-authored-by: jamedzung Co-authored-by: Todd Nowacki commit 3bf947d7ef8f1b2855d7dca91156a61694792bf5 Author: Eugene Boguslavsky Date: Thu Oct 24 14:41:48 2024 -0700 Move IDE tests into it's own workflow file (#20019) ## Description Move IDE tests into it's own workflow file ## Test plan https://github.com/MystenLabs/sui/actions/runs/11507121245/job/32032582303?pr=20019 commit b6237148fe2e9787f9a741ef186b61a950376b10 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Thu Oct 24 17:33:23 2024 -0400 indexer: handle sui safe mode (#20015) ## Description title ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ba49d2257b4379164993473494f6222224e27383 Author: 0xaslan <161349919+0xaslan@users.noreply.github.com> Date: Thu Oct 24 16:34:48 2024 -0400 DeepBook indexer - add event_digest, replace id, add checkpoint_timestamp_ms (#20008) ## Description Describe the changes or additions included in this PR. * Added a new column event_digest - digest + event index. This is a unique value. It is used as primary key and will ensure an event is not added twice. * Added a new column checkpoint_timestamp_ms - best way to know when the event was emitted, since some events don't emit the onchain timestamp. ## Test plan How did you test the new or updated feature? Local end to end testing. I manipulated the local progress_store to force reread checkpoints, which resulted in an error when adding duplicate events (as expected). --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit df3cc8b62fb14fed2a8be5681599e20c09e9d16f Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Thu Oct 24 16:08:25 2024 -0400 [kv store] basic bigtable client (#19890) ## Description Initial implementation of a KV store on top of BigTable. notes: * includes instructions for setting up a local BigTable instance via the emulator and a basic script for data ingestion(later will be added as an option to main ingestion binary) * generated proto files are included in the repo, so users won't have to compile the definitions themselves or install the `protoc` dependency. We can add an optional build step in a follow-up PR --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit cf2a573c6c117227992ad272e5034f063441ee1f Author: Xun Li Date: Thu Oct 24 13:03:35 2024 -0700 Support system tx in replay (#20016) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 786cf7317f40ae22f929ae77f0fe571a884db742 Author: Adam Welc Date: Thu Oct 24 12:43:13 2024 -0700 [trace-view] Added support for line breakpoints (#19961) ## Description Added support for line breakpoints. ## Test plan Old and new tests must pass commit eef06e50ee70af185db51955b6cbe0b26599b3c6 Author: Tony Lee Date: Thu Oct 24 13:53:15 2024 -0400 Faucet Filtering (#19988) ## Description Faucet filtering for paths /v1/gas, /gas, and group all /v1/status More conditional checks to prevent panic ## Test plan How did you test the new or updated feature? Tested locally, to confirm on testnet ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 82ab06a91deafefc300e9691df089b83a9014e14 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Thu Oct 24 10:32:23 2024 -0700 [move] Move parsing to core types (#19992) This move parsing from `move-command-line-common` to `move-core-types/parsing` in preparation for further changes. ## Test plan CI commit 463aa77ac291ab3ecc5908f03b2a4f8a0152dd9e Author: Eugene Boguslavsky Date: Thu Oct 24 09:29:21 2024 -0700 Upgrading slack api to v1.27.0 and using slack-message instead of json (#20012) ## Description Upgrading slack api to v1.27.0 and using slack-message instead of json # Test Plan 👀 commit 0de6c86795fa776f566195d92af651faf2fbdbab Author: Andrew Schran Date: Wed Oct 23 23:47:13 2024 -0400 Fix bug in `load_initial_object_debts` (#20002) ## Test plan Follow-up PR --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6b690129928ba7ae454a6c5962fca7ceb5613bed Author: Todd Nowacki Date: Wed Oct 23 16:54:59 2024 -0700 [move-compiler] Improve error message for if without else (#19995) ## Description - Improve error message for if without an else - Persisting through typing for lints ## Test plan - New test --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 934166722b9f1d285a8153cb77f8dc90673be457 Author: Eugene Boguslavsky Date: Wed Oct 23 15:30:29 2024 -0700 [ci]: Adding pre-land tests for Trace Adapter changes (#19985) ## Description Adding pre-land tests for Trace Adapter changes ## Test plan https://github.com/MystenLabs/sui/actions/runs/11488029510/job/31973922001?pr=19985 commit d808e331de71b1ef9ff8b52296a01bd6302175c3 Author: jk jensen Date: Wed Oct 23 15:26:07 2024 -0700 [suiop][env] add load-env subcommand (#19957) ## Description Let users load pulumi ESC envs into their current shell by name. The user can supply an env name or select one from pulumi's listing. ## Test plan ``` suiop-cli [jkj/suiop-load-env●●] cargo run -- help Finished `dev` profile [unoptimized + debuginfo] target(s) in 0.49s Running `/Users/jkjensen/mysten/sui/target/debug/suiop help` Usage: suiop Commands: docker iam incidents pulumi service ci load-env Load an environment from pulumi help Print this message or the help of the given subcommand(s) Options: -h, --help Print help -V, --version Print version ``` ``` suiop-cli [jkj/suiop-load-env●●] cargo run -- load-env --help Finished `dev` profile [unoptimized + debuginfo] target(s) in 0.52s Running `/Users/jkjensen/mysten/sui/target/debug/suiop load-env --help` Load an environment from pulumi if no environment name is provided, the user will be prompted to select one from the list Usage: suiop load-env [ENVIRONMENT_NAME] Arguments: [ENVIRONMENT_NAME] the optional name of the environment to load Options: -h, --help Print help (see a summary with '-h') ``` ``` suiop-cli [jkj/suiop-load-env●●] cargo run -- load-env mysten/incident-management/im-tooling Finished `dev` profile [unoptimized + debuginfo] target(s) in 0.55s Running `/Users/jkjensen/mysten/sui/target/debug/suiop load-env mysten/incident-management/im-tooling` 2024-10-22T12:31:41.094852Z INFO suioplib::cli::env: setting environment variable NOTION_API_TOKEN 2024-10-22T12:31:41.094988Z INFO suioplib::cli::env: setting environment variable PD_API_KEY 2024-10-22T12:31:41.094993Z INFO suioplib::cli::env: setting environment variable SLACK_BOT_TOKEN 2024-10-22T12:31:41.094998Z INFO suioplib::cli::env: finished loading environment ``` ``` suiop-cli [jkj/suiop-load-env●●] cargo run -- load-env Finished `dev` profile [unoptimized + debuginfo] target(s) in 0.51s Running `/Users/jkjensen/mysten/sui/target/debug/suiop load-env` > Select an environment: mysten/default/gcp-app-env 2024-10-22T12:31:51.335249Z INFO suioplib::cli::env: setting environment variable AWS_ACCESS_KEY_ID 2024-10-22T12:31:51.335369Z INFO suioplib::cli::env: setting environment variable AWS_SECRET_ACCESS_KEY 2024-10-22T12:31:51.335381Z INFO suioplib::cli::env: setting environment variable AWS_SESSION_TOKEN 2024-10-22T12:31:51.335391Z INFO suioplib::cli::env: setting environment variable CLOUDSDK_AUTH_ACCESS_TOKEN 2024-10-22T12:31:51.335398Z INFO suioplib::cli::env: setting environment variable CLOUDSDK_CORE_PROJECT 2024-10-22T12:31:51.335407Z INFO suioplib::cli::env: setting environment variable GOOGLE_OAUTH_ACCESS_TOKEN 2024-10-22T12:31:51.335415Z INFO suioplib::cli::env: Failed to set environment variable: GOOGLE_PROJECT. Value is not a string. 2024-10-22T12:31:51.335422Z INFO suioplib::cli::env: setting environment variable TOKEN_EXPIRY 2024-10-22T12:31:51.335430Z INFO suioplib::cli::env: finished loading environment ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6f55a89d64c1fa692e2f6d871bde701e8048786e Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Wed Oct 23 18:09:50 2024 -0400 indexer: support force start and end checkpoints (#19987) ## Description useful for benchmark so that we can replay the same traffic again and again wo DB ops ## Test plan - ci to make sure it does not break other ingestion - added an ingestion test for the new start and end functions --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4aa1303ee2a8e46ae496b85d83d8a7e7b12cd671 Author: Daniel Leavitt <71237296+dantheman8300@users.noreply.github.com> Date: Wed Oct 23 14:48:22 2024 -0700 Update e2e counter docs [Docs] (#19719) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> commit bf5a2b66b0dbc0f164ac47604ec06aa10dd62d46 Author: Jk Jensen Date: Wed Oct 23 14:03:27 2024 -0700 [suiop][pulumi] add runtime arg to support ts projects commit 31faa714d409e0d5e9dcd2fa7e0a3bc9a7a2cde2 Author: gorpig <146006860+chris-gorham@users.noreply.github.com> Date: Wed Oct 23 15:26:22 2024 -0500 Cg/sui indexer alt docker (#19939) ## Description Add Dockerfile for sui-indexer-alt testing. ## Test plan executed build.sh script locally. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 1d62a22aadf9e3128b07a8d690f9c4f7c2128fb1 Author: Adam Welc Date: Wed Oct 23 12:29:27 2024 -0700 [trace-viewer] Added missing test dependencies (#19986) ## Description I was trying to reduce memory footprint of files pushed to the repo for testing the trace viewer but missed one Move stdlib dependency that still needs to be there. This PR fixes this. ## Test plan All tests must pass (tested on a fresh checkout of the repo that they do) commit e46477f430d7e4c306f988daa3111edc5c6ea5cd Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Wed Oct 23 10:29:59 2024 -0600 [docs][ci] Examples change notification (#19978) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 752fc35fe01d4caf29ba317d86d790188fcec2af Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Wed Oct 23 09:12:40 2024 -0700 Fix logic for end-of-epoch checkpoint when dkg has failed (#19976) If DKG has failed, we might accidentally construct two end of epoch checkpoints. commit 6d2ff01d2c94eb50da7b68cd598e22ddc06b0bf7 Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Tue Oct 22 23:32:32 2024 -0700 [indexer] committers should read watermark hi directly from table (#19980) commit 3a4089d13261770b64655c4b7bc7620ec407a687 Author: Cam Swords Date: Tue Oct 22 22:29:23 2024 -0700 [move][move-2024][matching] Preserve int subject annotation during match pattern typing (#19973) ## Description Preserve int subject annotation during match pattern typing ## Test plan New tests pass --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 72a6e25c68605cc4a136a0cd9ffa11c8f3d3f5c5 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Tue Oct 22 22:03:06 2024 -0700 [Fastpath] support consensus-certified user transactions in consensus commit handler (#19682) ## Description - Extract common logic between handling fastpath certified and consensus certified transaction to `process_consensus_user_transaction()`. - Process consensus-certified transactions in commit handler and send them to execution. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit cb0a21a34d87ca6913ef4711a9e10b6d63e6c998 Author: Arun Koshy <97870774+arun-koshy@users.noreply.github.com> Date: Tue Oct 22 21:51:26 2024 -0700 [consensus] Update proposer metrics (#19655) ## Description Add metric for the interval between propsals. ## Test plan [private-testnet](https://metrics.sui.io/goto/UhZ-qDkNR?orgId=1) --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4a2d6fbb5bdcde82f30236e49d6b94576dc8d3c0 Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Tue Oct 22 20:36:35 2024 -0700 [CLI] Add dev inspect to CLI (#19972) ## Description This PR adds a `dev-inspect` flag to all executing-related commands. ## Test plan Existing tests. ``` sui client split-coin --coin-id 0x167f03292b0689800a149b62d6ac2f5163e1dd4995964195c521e0f3ff46183a --count 2 --dev-inspect Dev inspect completed, execution status: success ╭───────────────────────────────────────────────────────────────────────────────────────────────────╮ │ Transaction Effects │ ├───────────────────────────────────────────────────────────────────────────────────────────────────┤ │ Digest: EHA6riF3SUU7UCa6xi11yTs3pN8exdzhF8hoNfiTGFF6 │ │ Status: Success │ │ Executed Epoch: 1 │ │ │ │ Created Objects: │ │ ┌── │ │ │ ID: 0x3232cf6376bd59392f453b3ec15d0a98178e35c9966c2e23e797ccbc84c342e0 │ │ │ Owner: Account Address ( 0xa24c44f9e1f09e3675d39c50f6182b36e5e8a8c39b677666910c8091d681bd8c ) │ │ │ Version: 3 │ │ │ Digest: BqjHeBCvAwT3GdpqEK2JoeGTjmRSD68i8nuRQ51k8UBk │ │ └── │ │ Mutated Objects: │ │ ┌── │ │ │ ID: 0x167f03292b0689800a149b62d6ac2f5163e1dd4995964195c521e0f3ff46183a │ │ │ Owner: Account Address ( 0xa24c44f9e1f09e3675d39c50f6182b36e5e8a8c39b677666910c8091d681bd8c ) │ │ │ Version: 3 │ │ │ Digest: 3XpnrJY6nBve6GGVepHyN9j3ht7KyvunQiJdhGy1rbLH │ │ └── │ │ ┌── │ │ │ ID: 0x87b0a482a46961d091743e6669d797c836a49e9b8c6c8ba31b127d8c42ace233 │ │ │ Owner: Account Address ( 0xa24c44f9e1f09e3675d39c50f6182b36e5e8a8c39b677666910c8091d681bd8c ) │ │ │ Version: 3 │ │ │ Digest: 3PYdrT9b13m8zsG4gCcTiUEeJsEEvanmmCpZP6PE6BNc │ │ └── │ │ Gas Object: │ │ ┌── │ │ │ ID: 0x87b0a482a46961d091743e6669d797c836a49e9b8c6c8ba31b127d8c42ace233 │ │ │ Owner: Account Address ( 0xa24c44f9e1f09e3675d39c50f6182b36e5e8a8c39b677666910c8091d681bd8c ) │ │ │ Version: 3 │ │ │ Digest: 3PYdrT9b13m8zsG4gCcTiUEeJsEEvanmmCpZP6PE6BNc │ │ └── │ │ Gas Cost Summary: │ │ Storage Cost: 2964000 MIST │ │ Computation Cost: 1000000 MIST │ │ Storage Rebate: 978120 MIST │ │ Non-refundable Storage Fee: 9880 MIST │ │ │ │ Transaction Dependencies: │ │ AFFmrzPEf73hmgZkeEQbAMr2br6JKQZsiFUNoAPrDq94 │ │ AJudmLDT8jJ6uyh1gqZVF5gZ61GwuKx882BJkbkJMrXa │ ╰───────────────────────────────────────────────────────────────────────────────────────────────────╯ ╭─────────────────────────────╮ │ No transaction block events │ ╰─────────────────────────────╯ Execution Result Mutable Reference Outputs Sui Argument: Input(0) Sui TypeTag: SuiTypeTag("0x2::coin::Coin<0x2::sui::SUI>") Bytes: [22, 127, 3, 41, 43, 6, 137, 128, 10, 20, 155, 98, 214, 172, 47, 81, 99, 225, 221, 73, 149, 150, 65, 149, 197, 33, 224, 243, 255, 70, 24, 58, 0, 232, 118, 72, 23, 0, 0, 0] Return values ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: You can now pass `--dev-inspect` flag to all relevant `sui client` commands, similarly to the existing `--dry-run` flag. - [ ] Rust SDK: - [ ] REST API: commit ac10df6e7ce5bee86ca401d47e48ea2326f4423f Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Tue Oct 22 17:24:48 2024 -0700 Fix one more possible crash in execution_driver (#19870) Don't even try to execute certs from prior epochs commit 9bafc95e3d3f3c604b3d4170b32b267691477f8c Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Tue Oct 22 14:57:35 2024 -0700 Remove execution driver retries everywhere except mainnet (#19952) commit 63ab19da3a61fc2aba79c08161df6665c00dae6d Author: 0xaslan <161349919+0xaslan@users.noreply.github.com> Date: Tue Oct 22 17:57:25 2024 -0400 DBv3 proposal parsing bug (#19970) ## Description Fixed a bug where proposals were not being parsed correctly. ## Test plan Tested locally end to end --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 482bc5baabdc9d860401509d62fca540fcde5214 Author: Jort Date: Tue Oct 22 14:19:14 2024 -0700 Upgrade error formatting: missing from module (#19795) ## Description Add formatting to missing from module errors from upgrades. When a struct, enum, or function is missing from a module the relevant module definition is shown along with the specifics of what was missing. ## Test plan Snapshot testing --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7e01df826d676519cba3c0be4e1c68652adf7df0 Author: Yaroslav Nekryach <80450727+YaroslavNekryach@users.noreply.github.com> Date: Tue Oct 22 23:47:35 2024 +0300 CLI generate-struct-layouts fix (#19954) Depending on the platform, "write" function may fail if the full directory path does not exist. So we have to create it before. ## Description According to this doc https://doc.rust-lang.org/std/fs/fn.write.html "write" function may fail if the full directory path does not exist. And it does on macOS with an error "No such file or directory (os error 2)". I added a create_dir_all call before, to create dir first. ## Test plan build --generate-struct-layouts flag must work on all platforms. It did not work on macOS Sequoia --- ## Release notes - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: Fixed an issue on the `sui move` command regarding writing into a non existing directory. - [ ] Rust SDK: - [ ] REST API: commit f1dd33b7f25a3977ab6a73d5e75f17007a09c07c Author: Andrew Schran Date: Tue Oct 22 16:20:07 2024 -0400 Add support for overages on congestion control limits, with accumulated debt tracking (#19911) ## Description "Allow overage" mode allows up to one tx per-object per-commit to exceed congestion control limits, as long as the limit was not already exceeded. Accumulated debts are applied to future commits. ## Test plan Added tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 101b731a9c959dc7ff7d4fdf4139c24a58c0a890 Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Tue Oct 22 12:28:11 2024 -0700 simplify tx ledger hash in multisig toolkit (#19971) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7c22ad312edbff6b42eb12ffec4f0f2cb2487d61 Author: John Naulty Jr. Date: Tue Oct 22 11:55:56 2024 -0700 Show Transaction Hash of Ledger in MultiSig Toolkit (#19965) ## Description Show the Transaction Hash the user sees on the Ledger as an option for the User in the Multisig Toolkit. Currently, any user signing a Sui transaction with a ledger has a hard time verifying the transaction they are signing is the transaction they expect to sign. This feature provides a way for the user to verify the Transaction Hash presented by the Ledger from the MultiSig Toolkit Interface ## Test plan Tested Locally created transaction: ``` sui client transfer-sui --to 0x497f1b4197a6d9f7db028d79d068af120cf37d0250acd392eedb312a537712f9 --sui-coin-object-id 0xa9692324f4fd9ea6771e57aa0c600d18852da022d4bfc1f32a7e9a0c99b7eef9 --gas-budget 300000000 --serialize-unsigned-transaction [warn] Client/Server api version mismatch, client api version : 1.34.0, server api version : 1.36.0 AAABACBJfxtBl6bZ99sCjXnQaK8SDPN9AlCs05Lu2zEqU3cS+QEBAQABAABJfxtBl6bZ99sCjXnQaK8SDPN9AlCs05Lu2zEqU3cS+QGpaSMk9P2epnceV6oMYA0YhS2gItS/wfMqfpoMmbfu+QUAAAAAAAAAIPBDd5NqCOpDQdJEBJ8Vwn9iC+AVUxn/Z8opYbvVTYgESX8bQZem2ffbAo150GivEgzzfQJQrNOS7tsxKlN3EvnoAwAAAAAAAACj4REAAAAAAA== ``` Compute Transaction hash in multisig toolkit: `0x46ac3b8e4f61981f7c016e0bcb53cc2bbd4bc52f8d5e51e335e0a5dc15ab9cb5` image Verify same hash Ledger shows ![image](https://github.com/user-attachments/assets/5688ae33-3f7e-4541-9acd-ea122248e450) --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 77304be7015e20ddd2442577ecb580a8f4469e2f Author: Adam Welc Date: Tue Oct 22 11:53:36 2024 -0700 [move-ide] Add debugging info about compilation/analysis times (#19964) ## Description What the title says (to make it easier to get timing feedback from the users). commit 5d9237771a35aee85e9bdd6c754e9cab477f6b31 Author: 0xaslan <161349919+0xaslan@users.noreply.github.com> Date: Tue Oct 22 14:22:51 2024 -0400 DeepBook V3 TVL endpoint (#19962) ## Description Added an endpoint that calculates the net deposits at some timestamp. ## Test plan Tested locally. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 3aac32e2e5c97293a20644bd1fbc287dd61d8be0 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Tue Oct 22 11:05:16 2024 -0700 Fix rare crash when a transaction executes after its shared object assignments have been deleted. (#19949) Fix rare crash when a transaction executes after its shared object assignments have been deleted. This is only possible if a second execution of the same tx starts concurrently, and the shared version assignments have been deleted as we are checking for object availability in TransactionManager commit aca726944422426ac908a24506b43feee7aab993 Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Tue Oct 22 13:07:53 2024 -0400 [data ingestion] avoid ls in collocated setup (#19960) ## Description Avoid listing all files in the local directory, as this can increase CPU usage in some setups --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5c6968bbd45f3ce13627f966aaa55b780c12c31b Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Tue Oct 22 10:01:51 2024 -0700 Fix possible (but probably rare) race condition (#19951) Fix crashes in execution_driver due to inability to execute transactions. -------- We must hold the lock for the object entry while inserting to the `object_by_id_cache`. Otherwise, a surprising bug can occur: 1. A thread executing TX1 can write object (O,1) to the dirty set and then pause. 2. TX2, which reads (O,1) can begin executing, because TransactionManager immediately schedules transactions if their inputs are available. It does not matter that TX1 hasn't finished executing yet. 3. TX2 can write (O,2) to both the dirty set and the object_by_id_cache. 4. The thread executing TX1 can resume and write (O,1) to the object_by_id_cache. Now, any subsequent attempt to get the latest version of O will return (O,1) instead of (O,2). This seems very unlikely, but it may be more likely under the following circumstances: - While a thread is unlikely to pause for so long, moka cache uses optimistic lock-free algorithms that have retry loops. Possibly, under high contention, this code might spin for a surprisingly long time. - Additionally, many concurrent re-executions of the same tx could happen due to the tx finalizer, plus checkpoint executor, consensus, and RPCs from fullnodes. Unfortunately I have not been able to reproduce this bug, so we cannot be sure that this fixes the crashes we've seen. But this is certainly a possible bug. commit d82ec1f0b15687b781b21efa9b54e5a25fb28644 Author: Patrick Kuo Date: Tue Oct 22 18:00:50 2024 +0100 [bridge indexer] - bridge indexer e2e test (#19880) ## Description e2e test using bridge test cluster and local database ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f6b1300ec953cc53f725a79dd3d36184dc7f43d3 Author: jk jensen Date: Tue Oct 22 09:58:02 2024 -0700 [suiop][inc] minor improvements (#19941) ## Description - create the cache dir if it doesn't exist - print out the days when given ## Test plan tested locally --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a1459053f1174796771a23767904b9ed84b1df9e Author: Chaofan Shou Date: Tue Oct 22 09:02:43 2024 -0700 examples: fix custom indexer (#19930) ## Description The custom indexer example is broken: ``` error[E0195]: lifetime parameters or bounds on method `process_checkpoint` do not match the trait declaration --> remote_reader.rs:14:14 | 14 | async fn process_checkpoint(&self, checkpoint: CheckpointData) -> Result<()> { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ lifetimes do not match method in trait ``` This PR fixes it. ## Test plan ``` cd examples/custom-indexer/rust cargo run --bin local_reader --release cargo run --bin remote_reader --release ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit fb6ec113647758e20166528edfadebf1c761924f Author: JasonRUAN Date: Tue Oct 22 23:51:43 2024 +0800 fix spelling mistake (#19956) ## Description fix spelling mistake ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 547c039c80b71123553deaee663b9d4dbc721619 Author: Xun Li Date: Tue Oct 22 08:31:15 2024 -0700 [Indexer] Save workload in benchmark (#19959) ## Description Be able to save generated workload in a specified dir instead of temp dir. This requires adding a flag to ask the reader to not gc files. ## Test plan Run locally --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 832da303fdcdbfde5dd0c43ee0b63f17dc4c3a1e Author: Brandon Williams Date: Mon Oct 21 18:39:57 2024 -0500 jsonrpc: don't explicitly remove old tables Don't explicitly remove old jsonrpc index tables as well as remove a possible panic when reporting rocksdb metrics. commit fc3259ad32717cae4bc37ca53862326d3a98420f Author: Ashok Menon Date: Tue Oct 22 12:38:14 2024 +0100 indexer-alt: pull out watermark task (#19943) ## Description Move logic for updating watermarks out from the committer and into its own task. This serves multiple purposes: - Simplifies the committer - Prepares the committer for supporting multiple concurrent writes (each write can then push its watermarks to this task to accumulate and update). - We may be able to re-use some of these pieces in a sequential pipeline (this is not a sure thing because the sequential pipeline may have slightly different requirements for all parts). ## Test plan Ran the indexer with the following configurations: - With no limit and shutdown by interrupt, - From scratch, up to checkpoint 3000, - Between checkpoints 9,000,000 and 9,002,000, - With and without `--skip-watermarks` enabled, - From a clean DB and on a re-run. And ensured it made progress and successfully shutdown at the expected time in each case. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5aea124ff83e8fe6961a6393cf8421cfc05f7f3f Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Mon Oct 21 20:23:46 2024 -0700 Must not wait on notify_read_effects after epoch ends, on a transaction that might be reverted (#19934) Found this crash after adding the delay failpoint seen in this PR. I don't quite understand why that exposed this crash. commit 5a346e61755cb682ef7bba85f4b41846aed38157 Author: Xun Li Date: Mon Oct 21 17:52:34 2024 -0700 [Indexer] Add --skip-migrations option to --reset-database (#19945) ## Description This is useful when I need to manually run migrations. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4092045e83ab72ae47ba0ec9ed7f3ae56c6abd54 Author: plam-ml <127577476+plam-ml@users.noreply.github.com> Date: Mon Oct 21 17:11:06 2024 -0700 add tracking for provider (#19946) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 368fc4b5748aea7a21202f0f3f3d82be477ec338 Author: Ashok Menon Date: Mon Oct 21 23:04:21 2024 +0100 refactor(indexer-alt): pull out `pipeline` (#19933) ## Descrption Separate the `pipeline` abstraction from the `handler` abstraction, and put it into its own module. This is in preparation for: - Splitting up the batching and committing processes. This will make it possible to test committing multiple chunks in parallel, and it may be possible to re-use some of this logic in... - an `in_order::pipeline` to use for pipelines that need to write out data strictly in checkpoint order and along checkpoint boundaries. ## Test plan Existing tests and CI. ## Stack - #19926 - #19932 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 54e4738486c3208455ebf2498f54a9a3538f94c7 Author: Ashok Menon Date: Mon Oct 21 22:53:14 2024 +0100 indexer-alt: introduce regulator for ingestion (#19932) ## Description The regulator is responsible for stopping the flow of checkpoints if it runs too far ahead of a high watermark. The watermark is optionally set by any ingestion subscriber, and will be used by pipelines that need to commit data in checkpoint order (like the `objects`/`objects_snapshot` pipelines) to prevent their internal buffers from growing without bound. This complements the existing back-pressure that's based on channel buffers, which is a useful mechanism for out-of-order/concurrent pipelines, but not so useful for in-order pipelines. ## Test plan The regulator is not used by any existing pipelines, but has been plumbed into the indexer and new tests have been added: ``` sui$ cargo nextest run -- ingestion::regulator ``` ## Stack - #19926 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e636d4daf29770aac8b5117b1abb71387d7c16f5 Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Mon Oct 21 14:29:40 2024 -0700 [indexer] test events query forward and backward pagination using indexer backed rpc in cluster test (#19914) ## Description Add an e2e test that creates a package to emit events, and then execute a transaction to emit them. Then query indexer-backed rpc for expected behavior. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 06574f755f2bb0e17c05aad427dedab25c82ec64 Author: Manolis Liolios Date: Tue Oct 22 00:15:47 2024 +0300 [wallet] Disconnect active origin on dapp-kit disconnect call (#19916) ## Description Currently, when disconnecting from any dapp, the internal connection of the wallet for the given origin is not reset, making the experience unpredictable and confusing (main UX confusion we've been facing on every single bug bash). ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 11ddc9e6f58d2d6140ad27cc41413ab55e3fec13 Author: Sam Blackshear Date: Mon Oct 14 09:52:03 2024 -0700 [sui-types] avoid deserialization when fetching ID of non-upgraded packages The `original_package_id` function is showing up in some slow traces, so make it less expensive in the (common) case of grabbing the ID of a non-upgraded package commit 82b2907bd8cae3eabd07e8e224523ce4778b1d29 Author: John Martin Date: Mon Oct 21 13:56:31 2024 -0700 [fix] support pre-existing paths in provided bridge urls (#19940) ## Description ![image](https://github.com/user-attachments/assets/f3e20264-3b7f-4687-a839-fd054901a7df) ## Test plan will apply fix to mainnet proxy --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 9bb6b499268a0fa528e5fce4a4cb6a4765ce4124 Author: Ashok Menon Date: Sat Oct 19 18:12:20 2024 +0100 indexer-alt: simplify event indexing ## Description Instead of having a separate table for each component of the cascading index, have a single table, and add multiple indices to it for each of the cascading cases. This should reduce the footprint and ingress to the DB, but mildly increases the risk that the DB picks a bad query plan. ## Test plan Run all the existing tests, and also run an experiment to confirm that the DB can successfully plan queries against this kind of schema. Our initial fear was that if we had multiple indices on a single table, then the DB may pick the wrong index, and there is still a chance that might happen if we add indices for disparate filters to the same table (i.e. we combine the event emit module and event struct instantiation fields into one table), but we can guarantee that the reader will only issue one query to each of these merged tables, and it should entirely overlap with one of its indices. commit 62bea940b25fd78d56d1cb617c4ad6dc25211dc9 Author: Ashok Menon Date: Sat Oct 19 15:46:19 2024 +0100 indexer-alt: event indices ## Description Adding pipelines to index all tables used to filter events. They differ from the equivalent schemas in the existing indexer in the following ways: - They only mention the transaction sequence number, and not the event sequent number. To use these tables, we first filter down to the transaction containing the event, and then scan the events in that transaction. - Struct instantiations are stored as a separate name field and then a BCS encoded type tag. This is to reduce their footprint (package IDs weight twice as much when stored as text compared to BCS), and because we only ever filter using an exact match, so we don't need to store the instantiation as text. ## Test plan Ran the indexer locally and spot checked the events. commit bd4b4cefe262c8e70c5dab2188564d23b33b5210 Author: Ashok Menon Date: Sat Oct 19 16:53:05 2024 +0100 indexer-alt: committer handles empty batches, checkpoint stream ## Description This change handles two edge cases related to out-of-order commits that were uncovered through the work on event indices. In both cases the committer could get stuck, because none of the conditions guarding select arms were met, but the scenario was different in each case: - In the first case, the committer could get stuck because there were no more pending rows to write, but there were still pre-committed checkpoints to handle, and the logic to update watermarks based on the pre-committed checkpoints was guarded by a testing that `pending_rows > 0`. - In the second case, the committer could get stuck because the pipeline shutdown before any checkpoints came through, meaning it had no work to do. The fix was to allow the main `poll.tick()` arm to run if the receiving channel was closed, or there were pending precommits left. A short circuit was also added to move empty batches directly into the precommit list because we can treat them as already written out. ## Test plan Ran the following pipeline twice: The first time, it should exit without writing any data (except the watermark), and the second time it should just exit because there is no data between its watermark and the given last checkpoint: ``` sui$ cargo run -p sui-indexer-alt -- \ --database-url "postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt" \ --remote-store-url https://checkpoints.mainnet.sui.io \ --last-checkpoint 1000 --pipeline ev_struct_inst ``` commit 6bc9f54956bac2489540c0df705f56bee9ba7e44 Author: Hao Huynh Nhat <54271806+huynhnhathao@users.noreply.github.com> Date: Tue Oct 22 02:30:31 2024 +0700 deepbook doc query-the-pool fix typo (#19927) ## Description Fix typo in deepbook doc commit 9b79eb3ca038513b01f42b18fa2353db03b6ad98 Author: Brandon Williams Date: Thu Oct 10 10:20:59 2024 -0500 passkey: use same signing message as other schemes for consistency commit 84bd893f88191b842ca7afb77376d2408897caf1 Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Mon Oct 21 10:53:42 2024 -0700 [Faucet] Add faucet version metrics for Grafana (#19938) ## Description This PR adds the faucet version metrics so that it can be displayed in Grafana. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b7bc2199050af9730b39ac5881e8bc271d7a94e7 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Mon Oct 21 10:50:54 2024 -0700 [doc] add overview and limiter for sui bridge (#19931) ## Description as title ## Test plan markdown preview in IDE --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Ronny Roland commit 4a13cb193aa9a6e7c2e431ebfdcf409942abbd7e Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Mon Oct 21 10:49:37 2024 -0700 allow dry run for eth claim (#19937) ## Description as title ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 314960e2e857e4011bc353aa56e017b0847202d2 Author: Andrew Schran Date: Mon Oct 21 13:13:29 2024 -0400 Remove unsigned version of Discovery protocol (#19936) --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [x] Nodes (Validators and Full nodes): removes unsigned version of Discovery protocol - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ee5a129edaee52cc1a0c06eed6d233df7f0389a4 Author: Andrew Schran Date: Mon Oct 21 12:59:04 2024 -0400 Remove assert preventing disabling random beacon on mainnet/testnet (#19935) ## Description This is safe because the corresponding commit is now in a mainnet release. The assert prevents current full nodes from starting from epochs before random beacon was enabled. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [x] Nodes (Validators and Full nodes): restores ability to launch fullnode from epochs before random beacon was enabled - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6831e94cadcd248a61fe9e472e7bbe129cdcb224 Author: Xun Li Date: Mon Oct 21 09:37:29 2024 -0700 Add local ingestion client to indexer-alt (#19924) ## Description This PR adds an IngestionClientTrait that abstracts over checkpoint fetching. The existing one is moved into RemoteIngestionClient. Added a new one LocalIngestionClient to read from files. The retry logic is kept in a top-level struct, which then calls into the traits. Each trait decides the type of error for various cases. ## Test plan Added a test. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a2eac6491e3722ec8ca1f306328cc15c74b55385 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Mon Oct 21 09:18:49 2024 -0700 add readme for bridge indexer (#19912) ## Description as title ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> commit df0cb674415537ff628bf98f459cd500f28d4f23 Author: Jonas Lindstrøm Date: Mon Oct 21 12:03:54 2024 +0200 Uncompressed G1 group elements (#19684) ## Description Currently, group elements are represented in compressed form and must be decompressed before each operation. This is a rather expensive operation to do, in particular if many elements are used. This PR introduces a new uncompressed representation of BLS12381-G1 elements and a function to add a vector of uncompressed elements. For 500 terms, computing a sum will be about 100x faster to compute than using compressed representation. ## Test plan Unit tests. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [x] Protocol: Introduce an uncompressed representation of BLS12381-G1 group elements for faster addition of group elements. - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: benr-ml <112846738+benr-ml@users.noreply.github.com> commit 4e79cd11722b4ee204ed33be4a3f66d79581ed06 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Sun Oct 20 15:39:09 2024 -0700 [bridge] remove disable-eth (#19923) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit cc167f6135096a7276eda6795f28829349c61be1 Author: Emma Zhong Date: Sat Oct 19 17:20:30 2024 -0700 [indexer][test cluster] support indexer backed rpc in cluster test and add some indexer reader tests (#19906) ## Description This PR adds support in TestCluster to start indexer writer and indexer backed jsonrpc so that any testing done using TestCluster and fullnode jsonrpc can now be easily switched to using indexer jsonrpc. It's a step needed towards deprecation of fullnode jsonrpc. And a few tests are added/adapted from existing rpc tests. ## Test plan Added tests. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 05b60183fb73308d0e7ab48b2c288b89ef5949e7 Author: Brandon Williams Date: Fri Oct 18 10:09:07 2024 -0500 consensus: use tonic-rustls for building tonic client with custom rustls config commit 80b00a51b146b345af190fe76229458d68053a01 Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Fri Oct 18 16:12:49 2024 -0700 [indexer] align watermarks table schema in live indexer to alt indexer (#19908) ## Description Since watermarks table isn't being written to yet, modify the db schema to match alt-indexer. The changes are to rename entity -> pipeline, tx_hi_inclusive -> tx_hi, and pruner_hi_inclusive -> pruner_hi and make it a non-null column. This works out nicely for graphql, since the transactions query implementations expect a half-open interval. Also simplifies pruner logic, since it can write the `reader_lo` as `pruner_hi` after delay, and table pruners will delete between `[table_data, pruner_hi)`. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8519d53ae3c060c453deff0e487faba9f044a0fc Author: Brandon Williams Date: Fri Oct 18 17:07:29 2024 -0500 jsonrpc_index: bump coin index version to 1 commit 5102c1bc461f619746b8a216f8d9549f7a4bb840 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Fri Oct 18 15:31:37 2024 -0700 Abort checkpoint service tasks on epoch change (#19915) ## Description Checkpoint builder may never finish because it misses transactions and consensus on peers has shut down. ## Test plan CI `test_simulated_load_reconfig_with_crashes_and_delays` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 1f30f8cdc099577868acda36ed34cf458a33c6a5 Author: William Smith Date: Fri Oct 18 17:47:11 2024 -0400 [Bridge] Retry on finalized transaction not observed (#19882) ## Description In some cases, the client observes a finalized transaction before most bridge authorities. In such cases, today the code will return an error without retries, causing this validator to be skipped in terms of signature aggregation. If bridge validators are using slow ethereum fullnode providers, for example, this can be a large amount of the committee, resulting in failed signature aggregation attempts in some cases (due to not achieving quorum), or high gas costs in others (due to not getting fewer higher staked validator signatures). The solution here is to add retry logic in the map function. ## Test plan Will run on a testnet node and see what happens. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 799591b88bf281cd5ff833915c321c15fc4e28c9 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Fri Oct 18 17:20:29 2024 -0400 indexer handler: limit unprocessed buffer size (#19913) ## Description without this change, the unprocessed buffer will grow unboundedly until OOM it did not manifest on previous processor b/c it has sleep codes of ``` _ = tokio::time::sleep(std::time::Duration::from_secs(config.sleep_duration)) ``` and `sleep_duration` is 5 seconds. ## Test plan - correctness via the added objects_snapshot test - oom via experiment result on benchmark env with mem usage [link](https://app.datadoghq.com/metric/explorer?fromUser=false&start=1729280070529&end=1729283670529&paused=false#N4Ig7glgJg5gpgFxALlAGwIYE8D2BXJVEADxQEYAaELcqyKBAC1pEbghkcLIF8qo4AMwgA7CAgg4RKUAiwAHOChASAtnADOcAE4RNIKtrgBHPJoQaUAbVBGN8qVoD6gnNtUZCKiOq279VKY6epbINiAiGOrKQdpYZAYgUJ4YThr42gDGSsgg6gi6mZaBZnHKGABuMMiZUggYojoAdOqqblhNeBoY8MAA1ngARnBOkb7yGNnI2vKZALTDIpmMHtp9AAQU67Ui9Y3ao1FwyBp4EHOiAsQ6POuDWOvADlCH6jwgPAC6VK7ueJihcK-VT-DAxUrxD7fEAaORoHKgCbwhAIHJJHAwJyZAEaCCZRJoRpOOSKZTpQlQAlE+hMZQiNweNAffgQeyYLDEhRowkiJRfHh8GHyQkIADCUmEMBQIn+aB4QA) --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 54bc3f053dea89e68ffabbaf50c8f898d6242aa9 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Fri Oct 18 13:32:24 2024 -0700 [bridge] add watchdog to bridge node (#19878) ## Description 1. run watchdogs on bridge nodes. 2. merges `sui-bridge-watchdog` to `sui-bridge` crate, so there is no circular dependencies ## Test plan unit tests, will deploy locally and test --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 3d9d5f25d79bc7547266fae790dd74a859b8da45 Author: Xun Li Date: Fri Oct 18 11:03:14 2024 -0700 [Indexer] Add synthetic ingestion and benchmark framework (#19899) ## Description This PR adds two things: 1. A synthetic workload generator for the indexer. It generates synthetic checkpoints and transactions which are written to the ingestion dir. Currently only transfer transactions are generated, but we could extend it later. 2. A benchmark framework that allows us to connect the synthetic ingestion with any indexer implementation. The indexer implementation will need to implement a wrapper for a trait type. ## Test plan Added an e2e test. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f78712373aed59080129700e154592f3518c3790 Author: Adam Welc Date: Fri Oct 18 10:36:36 2024 -0700 [trace-view] Added support for tracking reference values (#19864) ## Description This PR adds support for tracking reference values and displaying reference type when hovering over the variable name. The idea is really simple - for references, simply remember where they are pointing at and when asked to display their value, display the value of the original variable ## Test plan Tested manually commit 0ad70e1af06c22857627895c8949e437b5716eb0 Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Fri Oct 18 04:38:50 2024 -0700 [indexer] quick fix for events in descending order without cursor (#19902) ## Description This PR fixes a bug where events queries in descending order without a cursor return no results ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit db54dac9442f52589403ebc91c5e61e43713e37f Author: Ashok Menon Date: Fri Oct 18 12:10:03 2024 +0100 move: extracting visitor (#19732) ## Description Introduce a visitor that can extract a value from some sub-structure, by using a path of `Element`s to find the sub-structure and then delegating to another visitor to deserialize it. This can be used as a building block in other visitors. It will be used to replace existing logic to create GraphQL representations of Move Values, and also to expose functionality to extract sub-values in the GraphQL schema. Paths work by either selecting some sub-structure (by field name, or offset), or filtering on that sub-structure's type (or variant name, if it is an enum variant). Offsets work for selecting elements of vectors, but also fields in structs and variants, this is to simplify the process of describing the location of a field in a positional struct/variant without leaking how its field names are generated. There is no affordance for selecting multiple values, to avoid introducing a complicated return value representation: The extractor either produces `Ok(Some(v))` if the path exists and the inner visitor succeeds, `None` if the path is not found or some error if the path is found but the inner visitor failed. ## Test plan New unit tests for this functionality ``` move-core-types$ cargo nextest run -- extractor_test ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 27595b424899e0d08cfd212186c5923d5c9ccfdc Author: Ashok Menon Date: Fri Oct 18 10:50:55 2024 +0100 chore(indexer-alt): note potential experiments commit e8eea2193d6eceaf294f08ae293d59cdd8027261 Author: Ashok Menon Date: Fri Oct 18 00:46:35 2024 +0100 indexer-alt: --skip-watermark ## Description Add a flag to skip writing to the watermark table. This would be useful when an indexer is running in parallel with another indexer and it's just not even productive to try writing to the watermarks table, because it will always be behind. ## Test plan Tested locally. commit eb821d3364f80279010f303b62c07598af5e57d3 Author: Ashok Menon Date: Fri Oct 18 00:41:40 2024 +0100 fix(indexer-alt): Flush pending rows before shutdown ## Description When running the indexer on a finite range of checkpoints. Make sure commiters' buffers are completely empty before shutting down their task, otherwise we may not fully write out all the intended rows for the range of checkpoints provided (there may be some data left at the bottom of the barrel). ## Test plan Ran the following: ``` cargo run -p sui-indexer-alt --release -- \ --database-url "postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt" \ --remote-store-url https://checkpoints.mainnet.sui.io \ --last-checkpoint 2000 ``` Corroborated that the data that results in the DB at the end: - Stops at the expected checkpoint (not before or after) - Matches counts of rows in the production mainnet DB for the equivalent tables at the same checkpoints. This can/should be made into an automated test, but that requires tempdb and migrations to be implemented (a comment has been added to this effect). commit f847ad930074db22a9ff25c11da034e98973f64e Author: Ashok Menon Date: Thu Oct 17 16:46:27 2024 +0100 indexer-alt: support only running some pipelines ## Description This makes it easier to perform selective backfills. ## Test plan ``` sui$ RUST_LOG="info" cargo run -p sui-indexer-alt --release -- \ --database-url "postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt" \ --remote-store-url https://checkpoints.mainnet.sui.io \ --last-checkpoint 1000 \ --pipeline kv_transactions \ --pipeline tx_affected_objects ``` Note that for now, the system does clip off the end of the checkpoint stream, rather than waiting for all workers to finish, and that still needs to be fixed. commit d3fa2b345b29f23fe193cf221ce3d27cecc365a4 Author: Ashok Menon Date: Thu Oct 17 16:27:26 2024 +0100 indexer-alt: support checkpoint upperbound for graceful shutdown ## Description Previously, the service would just hang after having wound down all pipelines during a graceful shutdown. This is because the graceful shutdown process was left waiting for an explicit interrupt signal to unblock the whole shutdown process. This change relaxes that constraint: If the service is already shutting down, then there is no need to wait for the explicit Ctrl-C. This makes it feasible to run the indexer for specific ranges of checkpoints. Note that we don't want to call the global cancellation token once ingestion is done, because that might stop a downstream task before it has acted on the downloaded checkpoints. Instead, the ingestion service communicates completion by closing its channel, which propagates through the handlers and committers. ## Test plan ``` RUST_LOG="info,sui_indexer_alt=debug" cargo run -p sui-indexer-alt --release -- \ --database-url "postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt" \ --remote-store-url https://checkpoints.mainnet.sui.io \ --last-checkpoint 1000 ``` commit a2384b6e0ca0d996945846d3033664afec916009 Author: Ashok Menon Date: Thu Oct 17 15:25:47 2024 +0100 easy(indexer-alt): Process checkpoint message mentions sequence number ## Description Add the sequence number for the checkpoint that the pipeline processed to the tracing message, so it's easier to follow along. ## Test plan Run the indexer locally. commit d0ad1361c031464bd188600bc0c974fd0b58f297 Author: Ashok Menon Date: Thu Oct 17 15:24:11 2024 +0100 chore(indexer-alt): Mention patch file in `diesel.toml` ## Description This makes it so that calls to `diesel setup` or `diesel migration run` will not forget to add the copyright notice to the schema file. ## Test plan Run the following: ``` sui-indexer-alt$ diesel migration run --database-url="postgres://postgres:postgrespw@localhost:5432/sui_indexer_alt" --migration-dir migrations ``` Notice that the schema doesn't change (the copyright notice is not remove) commit f74a74cbb1f2ae1fc0d5dadf27454f7e63d7d7ec Author: Ashok Menon Date: Thu Oct 17 15:15:11 2024 +0100 indexer-alt: committer watermarks ## Description Introduce the watermarks table, and update the committer to update it, taking into account out-of-order writes to the underlying tables. This change also allows the indexer to pick up where it left off by consulting the watermarks table for the high watermarks it had previously written. It also introduces the ability to configure the range of checkpoints being indexed (set an upper and lowerbound). The metrics introduced by this change can be used to track per-pipeline checkpoint rate, epoch rate and transaction rate. Some TODOs have also been added for various tests and experiments that should be run based on this work. ## Test plan A lot of manual testing. Some TODOs have been left for more extensive testing once there is more of a test set-up (depends on tempdb and auto-migration work). commit 3c32b64bc4bed11a7e4d15aed9f43cb8a0fe93f3 Author: Ashok Menon Date: Wed Oct 16 23:47:09 2024 +0100 chore(indexer-alt): Fix typos commit 75ae28b872f43972953e1858296175e6eb7e3653 Author: Ashok Menon Date: Wed Oct 16 23:40:45 2024 +0100 refactor(indexer-alt): Introduce Indexer abstraction ## Description Introduce an `Indexer` type to reduce the boilerplate of introducing a new pipeline, and gracefully shutting everything down. This also prepares the codebase for watermarks support: The initial checkpoint to start ingestion at can be calculated by taking the MIN checkpoint in the watermark table across all pipelines. ## Test plan Unit tests: ``` sui$ cargo nextest run -p sui-indexer-alt ``` And run the indexer locally. commit 995dac1e44724ffae04406c944ffeb07f7d93420 Author: Ashok Menon Date: Wed Oct 16 22:59:47 2024 +0100 indexer-alt: Make tx_digest the key for kv_transactions commit ae150cea897d6375c9ec0ea01dd9400bad19de6b Author: Ashok Menon Date: Wed Oct 16 22:42:01 2024 +0100 chore(indexer-alt): unpluralize StoredTxAffectedObjects commit 8667ef9f17594e29daf002b7e85b57f8661c6366 Author: Ashok Menon Date: Wed Oct 16 22:41:17 2024 +0100 indexer-alt: split out tx balance changes commit f7be6fd130d5a7821a398ca739fa2fba3898fb0d Author: Ashok Menon Date: Wed Oct 16 01:48:31 2024 +0100 indexer-alt: tx affected objects pipeline commit c59aee1c33458cd46e6cbbc3a253bbe65e61aee0 Author: Ashok Menon Date: Wed Oct 16 01:09:15 2024 +0100 indexer-alt: kv transactions pipeline ## Description Port ove an idealised KV transactions table: - Removes the following fields: - `transaction_digest` - `transaction_kind` - `success_command_count` - Standardise naming for `tx_sequence_number` and `cp_sequence_number`. - Standardise representation of arrays of BCS encoded values (use a BCS-encoded array instead). So that in the end, the schema matches the schema we would eventually store in the KV sore. This PR also re-implements balance change calculation (rather than relying on the implementation in `sui-json-rpc`). ## Test plan Pipeline run locally. commit a4c8b75d544cfd9dc6aa31d2895ea0478345ab85 Author: Ashok Menon Date: Tue Oct 15 17:59:01 2024 +0100 indexer-alt: kv objects pipeline ## Description A pipeline to fill what was previously `full_objects_history`. ## Test plan Pipeline was run locally. commit 7946b3cffe233573048bde808867d121f805a52d Author: Ashok Menon Date: Tue Oct 15 15:16:45 2024 +0100 indexer-alt: indexing pipelines + checkpoint indexing ## Description Introduce a framework for writing indexing pipelines and use it to add a pipeline for checkpoints. ## Test plan Tested the pipeline locally. commit 94614c86dc16ad486134f12437078314b907ca48 Author: Ashok Menon Date: Mon Oct 14 20:35:00 2024 +0100 indexer-alt: graceful shutdown on panic ## Description Handle the case where one part of the indexer panics and we need to cleanly shutdown. ## Test plan Introduced a panic in the dummy ingester and make sure everything winds down correctly. commit 87879dcd3dfed263130e5c62cf1765024c487c47 Author: Ashok Menon Date: Mon Oct 14 19:27:57 2024 +0100 indexer-alt: integration diesel ## Description Initial set-up integrating a DB connection pool into the indexer. ## Test plan Run the indexer and check the stats from the DB pool are propogated to prometheus. commit a4868a39400c9ec2766624ab05d0f1c655490f2c Author: Ashok Menon Date: Mon Oct 14 13:05:44 2024 +0100 indexer-alt: cancel fetch retry ## Description If ingestion is stuck talking to a server that is repeatedly producing transient errors, it can get stuck. This change gives us a way to tell the ingestion service to give up from outside. ## Test plan New unit tests: ``` sui$ cargo nextest run -p sui-indexer-alt -- fail_on_cancel ``` commit 26d5b4c81cf0c62724498b9f3fe2225b41e28845 Author: Ashok Menon Date: Mon Oct 14 12:33:44 2024 +0100 indexer-alt: retry on request errors ## Description Recover from failures to send a request to the remote store in the first place (Initially, I mistakenly thought that `send` would only fail if the request was malformed, and that we would otherwise get a Response of some kind, but this is not true). ## Test plan New unit test: ``` sui$ cargo nextest run -p sui-indexer-alt -- retry_on_reuest_error ``` commit 930f8c9cf883853fa0d5beaefd145061b467af2f Author: Ashok Menon Date: Mon Oct 14 11:44:15 2024 +0100 refactor(indexer-alt): helper to log retries ## Description Add a helper function to log retrying a fetched checkpoint. Updates a counter, and traces the error and reason. commit fd1ed4c9efe1a69e17861c9c067411bd29bd684a Author: Ashok Menon Date: Mon Oct 14 11:31:08 2024 +0100 indexer-alt: tag retries with reason ## Description Add a label to the transient retries counter to mark the reason why we're retrying, in case there are trends in particular retries. commit 835925ec13421443a15e13fea7e63eb1c7c2ea1c Author: Ashok Menon Date: Mon Oct 14 11:01:08 2024 +0100 indexer-alt: treat bcs deserialization errors as transient ## Description Assume that whatever store checkpoint data is being fetched from is correct. If it was not possible to get a full response back from the store, or that response could not be successfully deserialized, back-off and try again. ## Test plan New unit test: ``` sui$ cargo nextest run -p sui-indexer-alt -- retry_on_deserialization_error ``` commit 12e1f094234fd79cfbcb8e2601d932b89c182a8a Author: Ashok Menon Date: Mon Oct 14 01:39:51 2024 +0100 indexer-alt: testing ingestion service ## Description Adding tests fo the ingestion service. In particular, how it handles various error scenarios, back-pressure, buffering, etc. ## Test plan ``` sui$ cargo nextest run -p sui-indexer-alt ``` commit 719cff028696260ff512f4ba9087f033a71b380e Author: Ashok Menon Date: Wed Oct 16 22:06:56 2024 +0100 chore(indexer-alt): factor out ingestion clients and errors commit b5a5ea9e9e4f26f40c8b3303a353b2748073e1a4 Author: Ashok Menon Date: Sun Oct 13 23:07:30 2024 +0100 indexer: dummy backfill, printing digests ## Description Creating an ingestion backfill that just prints the checkpoint digest, for comparison with the dummy ingester in `sui-indexer-alt`. ## Test plan See previous commit for details -- this backfill is run with: ``` sui$ cargo run --bin sui-indexer --release -- \ --database-url postgres://postgres:postgespw@localhost:5432/sui_indexer \ run-back-fill $CP 70000000 \ ingestion digest https://checkpoints.mainnet.sui.io ``` commit 6b3841cdcc40596fde816a80b4796fe8dc5a0c53 Author: Ashok Menon Date: Sun Oct 13 22:54:55 2024 +0100 indexer-alt: ingestion service ## Description A service the fetches checkpoints and pushes them down to subscribers to process. ## Test plan Created a dummy ingester that prints the checkpoint sequence number and digest to test throughput. This was run for 100s on my local machine at two different points in mainnet's history, around checkpoint 9M, and checkpoint 64M. The former is around the time of Sui8192 and when on-chain checkpoint rate was around 1 per second, and the latter is from the FOMO mint when checkpoint rate on-chain was around 5 per second: ``` sui$ cargo run -p sui-indexer-alt --release -- \ --remote-store-url https://checkpoints.mainnet.sui.io \ --start-checkpoint $CP ``` Locally, this simple ingester was limited in both cases by bandwidth (around 100Mbps), which translated to a checkpoint rates of roughly 70 and 160 per second at checkpoints 9M and 64M respectively. In a follow-up PR, I introduce an equivalent dummy ingester to the existing data ingestion framework, and it reached rates of around 50 and 130 per second. It's possible that this is still due to bandwidth, because the existing framework can end up dropping a partially downloaded checkpoint if it times out (and has quite an aggressive timeout). commit 9e15b42b5e5fc833877effef24b5d27550aa044a Author: Ashok Menon Date: Sun Oct 13 00:00:29 2024 +0100 indexer-alt: tracing and telemetry ## Description Set-up the telmetry subscriber, and a prometheus metrics service. Instrument the `IngestionClient` to count statistics related to checkpoints being ingested. ## Test plan ``` sui$ cargo run -p sui-indexer-alt -- --remote-store-url https://checkpoints.mainnet.sui.io ``` commit 39ebf29194835fa5333603c09f505abc12402a2f Author: Ashok Menon Date: Sat Oct 12 19:03:35 2024 +0100 indexer-alt: detect transient errors and retry ## Description Add logic to detect specific errors and retry on just those, while surfacing client errors back up. The retry backoff is configured to not ever give up under an assumption that an ingestion pipeline is designed to keep chugging along regardless of what happens around it, and the world will shift around it to correct for transient errors. ## Test plan New unit tests: ``` sui$ cargo nextest run -p sui-indexer-alt ``` commit 871d6f18b4d47f3792e02e8cc5f5fda91c545c8d Author: Ashok Menon Date: Sat Oct 12 17:40:34 2024 +0100 indexer-alt: Fetch checkpoints from remote store commit e7caecd4e12d84811e591870ca549e9f39169e02 Author: Ashok Menon Date: Sat Oct 12 16:15:13 2024 +0100 indexer-alt: argument parsing commit bba750ee0256097cd3e9c6b65fa8fb45404f22de Author: Ashok Menon Date: Sat Oct 12 16:08:06 2024 +0100 indexer-alt: initial commit commit 8d2bb84a1cc1773077208e48182283fc20e58aba Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Thu Oct 17 22:09:33 2024 -0700 [Checkpoint] wait for checkpoint service to stop during reconfig (#17556) ## Description Currently during reconfig, `CheckpointService` tasks, including `CheckpointBuilder` and `CheckpointAggregator`, are [notified to shut down](https://github.com/MystenLabs/sui/blob/b1540cdb2019f0501d2cd7f6dad208f65a66b6d2/crates/sui-node/src/lib.rs#L1551). But reconfig does not wait for them to finish shutting down. There can be a race between the reconfig loop proceeding to [drop the epoch db handle](https://github.com/MystenLabs/sui/blob/b1540cdb2019f0501d2cd7f6dad208f65a66b6d2/crates/sui-node/src/lib.rs#L1633), while `CheckpointBuilder` tries to [read from epoch db when creating a new checkpoint](https://github.com/MystenLabs/sui/blob/b1540cdb2019f0501d2cd7f6dad208f65a66b6d2/crates/sui-core/src/checkpoints/mod.rs#L1378). The race can result in panics. ## Test plan CI Simulation --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit ae86147e9149cea898c29a31d161d1b70c7742db Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Thu Oct 17 20:07:35 2024 -0700 [bridge] add responses metrics in bridge auth aggregator (#19877) ## Description so we can monitor this in a dashboard. ## Test plan unit tests & test in production --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 15b559a9ed6e4244983b296192aa65d512b77eb7 Author: Eugene Boguslavsky Date: Thu Oct 17 20:05:45 2024 -0700 Sui v1.37.0 Version Bump (#19905) ## Description Sui v1.37.0 Version Bump ## Test plan 👀 commit 775f2cb85e6113639f15877127ed66c1f8ad4671 Author: Eugene Boguslavsky Date: Thu Oct 17 19:25:36 2024 -0700 Sui `v1.36.0` Framework Bytecode snapshot (#19904) ## Description Sui `v1.36.0` Framework Bytecode snapshot ## Test plan `cargo run --bin sui-framework-snapshot` commit 3e8d2312106e66e9b24e6acfcbdb721aa1a78651 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Thu Oct 17 18:44:08 2024 -0700 Revert congestion control change, use 3 txn per commit limit. (#19900) commit d239be81239619feefeb9fbc007f6d6c78335961 Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Thu Oct 17 15:51:31 2024 -0700 [indexer][watermarks][3/n] pruner updates watermarks lower bound (#19650) ## Description With the committer writing upper bounds, the pruner can now read from watermarks and determine whether the lower bounds need to be updated. Pruner does this by spawning a separate task, without touching the extant pruning logic (so all things are as is.) It will ignore any entries from watermarks that do not correspond to a `PrunableTable` variant. Part of a stack of PRs for watermarks simplify setting up test indexer: https://github.com/MystenLabs/sui/pull/19663 update pruner config: https://github.com/MystenLabs/sui/pull/19637 committer writes upper bounds https://github.com/MystenLabs/sui/pull/19649 pruner writes lower bounds: https://github.com/MystenLabs/sui/pull/19650 pruner prunes (wip) ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 446d7d5f111330bf915f9d5bf4e96ff4cf7aa857 Author: Bridgerz Date: Thu Oct 17 21:57:20 2024 +0100 Fix ViewSuiBridge Bridge CLI command (#19869) ## Description Handle the case where a bridge committee member is no longer a validator. ## Test plan Tested locally. commit 67ac5c85a7df1c897336f987a7080f5887923f9a Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Thu Oct 17 13:27:15 2024 -0700 Version Packages (#19898) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/sui@1.13.0 ### Minor Changes - 477d2a4: Add new errors to ExecutionFailureStatus enum ## @mysten/create-dapp@0.3.27 ### Patch Changes - Updated dependencies [477d2a4] - @mysten/sui@1.13.0 - @mysten/dapp-kit@0.14.27 ## @mysten/dapp-kit@0.14.27 ### Patch Changes - Updated dependencies [477d2a4] - @mysten/sui@1.13.0 - @mysten/wallet-standard@0.13.8 - @mysten/zksend@0.11.8 ## @mysten/deepbook@0.8.22 ### Patch Changes - Updated dependencies [477d2a4] - @mysten/sui@1.13.0 ## @mysten/deepbook-v3@0.12.1 ### Patch Changes - Updated dependencies [477d2a4] - @mysten/sui@1.13.0 ## @mysten/enoki@0.4.6 ### Patch Changes - Updated dependencies [477d2a4] - @mysten/sui@1.13.0 - @mysten/zklogin@0.7.23 ## @mysten/graphql-transport@0.2.24 ### Patch Changes - Updated dependencies [477d2a4] - @mysten/sui@1.13.0 ## @mysten/kiosk@0.9.22 ### Patch Changes - Updated dependencies [477d2a4] - @mysten/sui@1.13.0 ## @mysten/suins-toolkit@0.5.22 ### Patch Changes - Updated dependencies [477d2a4] - @mysten/sui@1.13.0 ## @mysten/wallet-standard@0.13.8 ### Patch Changes - Updated dependencies [477d2a4] - @mysten/sui@1.13.0 ## @mysten/zklogin@0.7.23 ### Patch Changes - Updated dependencies [477d2a4] - @mysten/sui@1.13.0 ## @mysten/zksend@0.11.8 ### Patch Changes - Updated dependencies [477d2a4] - @mysten/sui@1.13.0 - @mysten/wallet-standard@0.13.8 Co-authored-by: github-actions[bot] commit 477d2a41ae8c6a2af6afe4a241cdce73cda6aef9 Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Thu Oct 17 13:12:59 2024 -0700 [ts sdk] Add new errors to ExecutionFailureStatus (#19897) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c3562a362bc04802e7ae074ab9947fa9697e4488 Author: Andrew Schran Date: Thu Oct 17 16:04:36 2024 -0400 Enable signed Discovery messages by default (#19895) ## Description Followup to PR #19587. ## Test plan As tested in first PR. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [x] Nodes (Validators and Full nodes): Adds authentication signatures to Discovery protocol messages. - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 491dcfe38a6d23e6771ebe1a33a674a0dbbe7d05 Author: Brandon Williams Date: Thu Oct 17 14:30:02 2024 -0500 ci: set log level to error commit a762240611457bf262699713bf3db71004631139 Author: Jonas Lindstrøm Date: Thu Oct 17 20:26:58 2024 +0200 Extend fixed-point numbers module (#19336) ## Description - Deprecated `fixed_point32` - Added `uq32_32` to replace it ## Test plan - New tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [X] CLI: Move `fixed_point32` has been deprecated for a new `uq32_32` module - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Todd Nowacki commit 0393579cd214511d5af61ba6fe052c42a34ff8a9 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Thu Oct 17 12:03:38 2024 -0400 Version Packages (#19892) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.12.0 ### Minor Changes - 60f96ee: New stablecoin pool params Co-authored-by: github-actions[bot] commit 60f96ee37c2c7c251ab07f495190ef89ad479ed8 Author: Tony Lee Date: Thu Oct 17 11:28:22 2024 -0400 New Deepbook Pool Params (#19891) ## Description New Deepbook Pool Params ## Test plan How did you test the new or updated feature? Mainnet ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit df56cb03c8c04653a43cc076b4450d2cecb32e8e Author: Patrick Kuo Date: Thu Oct 17 12:59:38 2024 +0100 [Rosetta] - serialize total_coin_value to string instead of number to prevent precision lost (#19580) ## Description As titled ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: nikos.kitmeridis commit 8adfe733b6977acb879583a1cba354d419d5c707 Author: jk jensen Date: Wed Oct 16 16:29:58 2024 -0700 [suiop][image] enable q or esc to quit watching (#19881) ## Description Make it so the user can hit 'q' or 'esc' to exit the image watch interface ## Test plan https://github.com/user-attachments/assets/578ab67e-763e-4cd8-a4a7-1ac6006a4004 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6d87bd24f2f5e88757d697a21cc13398f0f932a1 Author: Jort Date: Wed Oct 16 16:01:58 2024 -0700 [move] add return locations to source map (#19885) ## Description store the return loc's in source maps ## Test plan --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6f3ce94e08d727635236980d6332bd91d6d46c78 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Wed Oct 16 14:18:48 2024 -0700 [move] Initial trace format and implementation (#18729) (#19858) This PR adds the initial trace format, and the implementation of this trace format in the VM. I've gone through the tracing format with most of you synchronously so won't write out all the details here (but I will take it on to write a spec for the trace format once this is done so other folks can use it when consuming this format). Happy to go through it at any point in real time. Other TODO: right now the `MoveValue`s only support serialize and not deserialize back into Rust so we can only push a trace out from Rust but not consume it. The next thing I'm working on right now is adding support for that, and it may be either an update to this PR, or an additional PR depending on how involved that is... Tests and integration of this into the Move CLI (not the Sui CLI yet) is in the PR above this one. This keeps the tracing largely feature-gated in the VM, and the only additional overhead/change at runtime with `gas-profiling` turned off is the additional argument, but this argument is unused and should be optimized away (and at worst only add essentially no overhead). I kept the `gas-profiling` feature flag and gated the new tracing under it. The plan being to eventually rename that flag at the same time that we update test coverage and gas profiling to use this new tracing format as well (but that's for a couple future PRs!). https://github.com/MystenLabs/sui/pull/19452 is stacked on top of this and cleans up the insertion points of the tracer into the VM. --- Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit cbffe5b8522ed7723371cf8fcfa802f47c1178ed Author: Eugene Boguslavsky Date: Wed Oct 16 12:51:13 2024 -0700 Update mac os runner (#19883) ## Description `macos-latest-xl` is being depricated. ![Screenshot 2024-10-16 at 12 44 20 PM](https://github.com/user-attachments/assets/d0f49345-2538-4836-bdbf-9975d6df5299) ## Test plan 👀 commit b78eb1098ee2fcd08fb1576d049c354dd57fc8ca Author: Anastasios Kichidis Date: Wed Oct 16 20:00:19 2024 +0100 [Consensus] fix amnesia recovery boot run (#19774) ## Description Currently if someone recovers their node within epoch `R` with a snapshot that's from epoch `< R-1` , consensus will not start in amnesia recovery mode in epoch R as the boot counter will have already been incremented as node is trying to catch up from earlier epochs. This is problematic as it defies the whole point of the automatic amnesia recovery. Instead on this PR we track consensus participation activity from earlier epochs/run and only then we increment the boot counter. Otherwise we keep the boot counter to `0` so we effectively enforce amnesia recovery until we get to an epoch where the node is able to participate. In this case participate means "able to have performed at least one commit". ## Test plan CI/PT --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 62f72cdea33c0782f3116dfd194ae81c562a0b43 Author: jk jensen Date: Wed Oct 16 10:57:07 2024 -0700 [suiop][image] add --watch/-w to continuously query image build (#19872) ## Description Add new arg to continuously query image remote builds ## Test plan https://github.com/user-attachments/assets/a6b3ec5d-605d-48f2-a241-9ade8bfbfbf3 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 692f6c6a89b2e32659574fa20828222d289b102d Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Wed Oct 16 10:55:35 2024 -0700 [move][ir-to-bytecode] Add source locations to `Type` in the Move IR (#19875) ## Description Plumbs in source locations for `Type`s in the Move IR. This will be useful for adding return types to the Move source maps. ## Test plan Make sure existing types pass. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 31b15dde1758a6ba7d7029ecbd74804180f4800c Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Wed Oct 16 08:51:49 2024 -0700 [bridge] remove test-cluster's dependency on sui-bridge crate (#19840) ## Description as title. It also removes the transitive dependency of other crates on sui-bridge. ## Test plan tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8fec5f8caeaf0d0d5a9d13c776fc8dd80219719e Author: Arun Koshy <97870774+arun-koshy@users.noreply.github.com> Date: Wed Oct 16 01:48:58 2024 -0700 Add known peers for p2p network connection monitor (#19815) ## Description This will enable quinn stats for p2p network --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f8e33f58104e6ed0d38e756beb0572e3bc263f10 Author: Arun Koshy <97870774+arun-koshy@users.noreply.github.com> Date: Wed Oct 16 00:58:39 2024 -0700 [consensus] Migrate sui to Mysticeti connection monitor (#19814) ## Description Will follow up to ensure that known peers are set in sui via discovery so that metrics will be updated. Also will be adding TonicConnectionMonitor for Mysticeti to use. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 2224cf355dc25d6ded1d9a4b93c6f31939b80a01 Author: Brandon Williams Date: Tue Oct 15 20:53:11 2024 -0500 rest: enable resolving of literals (#19857) Improve the format of `UnresolvedTransaction` as well as introduce the ability to resolve un-serialized pure argument literals. Example of JSON payload of an `UnresolvedTransaction`: ``` { "inputs": [ { "object_id": "0x2d7f57570815c43eb485be9018caabd11ac863e9d49b1d9e33b3f4ac40cadc72" }, { "value": 1 }, { "value": "0xc6fc0e38458632b1dd1d60b3a833865268a0faebe36864c71fb9805bd5a116cf" } ], "commands": [ { "command": "split_coins", "coin": { "input": 0 }, "amounts": [ { "input": 1 }, { "input": 1 } ] }, { "command": "transfer_objects", "objects": [ { "result": [ 0, 1 ] }, { "result": [ 0, 0 ] } ], "address": { "input": 2 } } ], "sender": "0xff69fdb72bfc6ff5a337ff01c650fb0ce72447105ff050c2039c6b5b267b04a7" } ``` which is resolved into the following `Transaction`: ``` { "version": "1", "kind": { "kind": "programmable_transaction", "inputs": [ { "type": "immutable_or_owned", "object_id": "0x03517c0699f36a1df2e93b6c18db815d8f247a853465aec9cc48f9ceae4561ca", "version": "1", "digest": "7WyoNoiZQmTj75viHKYhA48tCSJ5CFqA6HtzJ55hehxP" }, { "type": "pure", "value": "AQAAAAAAAAA=" }, { "type": "pure", "value": "JtcBTFgpW/n7HipqY6oz4bka0J8PyUPlSQbjR5lCq0Y=" } ], "commands": [ { "command": "split_coins", "coin": { "input": 0 }, "amounts": [ { "input": 1 }, { "input": 1 } ] }, { "command": "transfer_objects", "objects": [ { "result": [ 0, 1 ] }, { "result": [ 0, 0 ] } ], "address": { "input": 2 } } ] }, "sender": "0xb73663359e72a36122aaf3f08629fa684b667e0fe6e356b119c623c7c9459888", "gas_payment": { "objects": [ { "object_id": "0x94b1bef12a8db7b60fa89ad9bc2966d661a3a1002d921ada981e700648470304", "version": "1", "digest": "9kcUt38E4i8g5DartpUdBxW9m5n1u8AaJLyintWiddd6" }, { "object_id": "0xacc757731db589ef093130e0d6c839e809f9673a51be92667ecbcd486db73995", "version": "1", "digest": "2U3xxN1G9vf4raCGUHz6AejqVMWJCkEBmsbLgqwae5be" }, { "object_id": "0xd0891f6c419f3dd1a531e70779979f3c7aa91d13ae9125ffbae05f3960ee4249", "version": "1", "digest": "DkJRVUKfwV9pZ1NYEydvKwGpJei7YDDRemfRahruBDsQ" }, { "object_id": "0xde8bdc786f18e7d1b9d2ac975acd640952fd3c75303e4f35d0657f90ab7e947e", "version": "1", "digest": "8RJuNzFawuVbFz6zSH1GMeJuwiHfT2ZzfHKHdr6LrJrU" } ], "owner": "0xb73663359e72a36122aaf3f08629fa684b667e0fe6e356b119c623c7c9459888", "price": "1000", "budget": "5952000" }, "expiration": null } ``` commit 6cc663c639ca3fa1421db7021e0af33b0466e1be Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Tue Oct 15 18:42:57 2024 -0700 [indexer][writer] Add first_tx_sequence_number to epochs table to decouple from checkpoints table (#19773) ## Description The `epoch_total_transactions` field on `epochs` table is calculated today from `checkpoints.network_total_transactions`, which is not ideal since the latter is prunable and `epochs` will not be pruned for the foreseeable future. To remove this dependency, we add `first_tx_sequence_number` to the `epochs` table at epoch boundary. That is, the network total transaction count from the final checkpoint of the epoch becomes the first tx sequence number of the new epoch. This also means that at epoch boundary, the current-to-be-previous epoch's `epoch_total_transactions` is derived from the checkpoint's network total transactions - the epoch's `first_tx_sequence_number`. Consequently, this will also help in the pruner implementation, as given an epoch we'd like to know the corresponding cp and tx. This encompasses just the writer change. Before updating the read path, we will need to backfill the instance. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 71f9203abbf9bf0a3a887e1f2157a7caed7bcef2 Author: Brandon Williams Date: Tue Oct 15 10:19:36 2024 -0500 jsonrpc: introduce a meta column family for tracking index initialization Introduce a meta column family that is used to track initialization of the Index DB itself as well as the status of the various column families themsevles. commit 814af2a99e0e66bda4fec1748082ce4af7fd3137 Author: Brandon Williams Date: Tue Oct 15 10:18:02 2024 -0500 jsonrpc, rest: reduce batch size to 128MB before writting Reduce the minimum batch size to write out during indexing the live object set from 256MB to 128MB, these smaller batch sizes resulted in a small improvement to index initialization. commit d7e977a0ac58b885a98c4967e3e82cce43e720ff Author: Brandon Williams Date: Fri Oct 11 15:24:26 2024 -0500 jsonrpc: add test for sorted coin balances commit c3371a28c783ff1e54b71c873b98f80b394f30a3 Author: Brandon Williams Date: Fri Oct 11 14:07:40 2024 -0500 jsonrpc: initialize new coin_index_2 Initialize the new coin_index_2 index and clear the old coin_index column family. commit 47ba7e4684147b91cbf0fa628ce83f4a4b80d3bc Author: Brandon Williams Date: Fri Oct 11 12:47:08 2024 -0500 jsonrpc: introduce coin_index_2 with coins sorted by balance Introduce a new coin_index_2 index with coins sorted by balance in decreasing value (sorted high to low). commit 6159973ae01f7b74505271d5ae76681f6594fe59 Author: Brandon Williams Date: Fri Oct 11 10:25:19 2024 -0500 core: factor out parallel live object indexing Factor out the parallel live object indexing, used for initializing the rest indexes, into more general purpose and reusable logic. commit 4ad76b5679ec0e11b635537074b552cc96d584ba Author: Brandon Williams Date: Fri Oct 11 09:07:27 2024 -0500 jsonrpc: move indexes from sui-storage to sui-core commit 99cebd9469dea1c2bbbd73d4cd6fb8284203a93a Author: Brandon Williams Date: Fri Oct 11 08:45:29 2024 -0500 db_tool: remove loaded_child_object_versions search match arm commit 455e6c981e2af4da7bcd91e1e6642b3f8a10cb5a Author: Eugene Boguslavsky Date: Tue Oct 15 16:00:17 2024 -0700 Fix node operator doc quote (#19874) ## Description Fix node operator doc quote ## Test plan Before: ![Screenshot 2024-10-15 at 3 31 55 PM](https://github.com/user-attachments/assets/c6aabc7b-328c-4404-be4a-b8b5c124e111) After: ![Screenshot 2024-10-15 at 3 32 00 PM](https://github.com/user-attachments/assets/3ef6d6dc-516a-4771-9a2f-a898610893bb) commit 3be7841e208731a69b8dd7ad5b39fa94f92fe59f Author: Eugene Boguslavsky Date: Tue Oct 15 15:28:22 2024 -0700 Add release info for sui-full-node doc (#19808) ## Description Add release info for sui-full-node doc ## Test plan @ronny-mysten ! --------- Co-authored-by: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> commit ee100a7ed7e4cc6503fb5e7b94349151f24597e0 Author: techdebt-99 <150741822+techdebt-99@users.noreply.github.com> Date: Tue Oct 15 16:21:56 2024 -0600 Update sponsor-txn.mdx (#19873) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8fa9f571a26f3e3faeec78afca706c8eedb2b283 Author: Zhe Wu Date: Tue Oct 15 14:31:16 2024 -0700 Use more realistic cap factor in simtest (#19862) ## Description So that we can exercise cap factor is higher and lower than the gas budget. ## Test plan Updating tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e2c7aa1f4e3d0facbff0af78aad5bda283bc145e Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Tue Oct 15 15:31:04 2024 -0600 [docs] Update dbv3 content (#19867) ## Description DBv3 is now on Mainnet. Makes the sentence removed somewhat pointless. ## Test plan 👀 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 25c52c659ff00c0c54834de73760b38b6dbca938 Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Tue Oct 15 13:57:16 2024 -0700 [gql-transport] fix rawInput for transaction queries (#19866) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit aa2ca1d79fc3de862e398716f044337b6757acc1 Author: Tom Cat <48447545+tx-tomcat@users.noreply.github.com> Date: Wed Oct 16 03:31:17 2024 +0700 [Linter] Redundant ref deref (#16491) ## Description This lint rule detects and reports unnecessary temporary borrow operations followed by a dereference and a local borrow in Move code. It aims to improve code efficiency by suggesting direct usage of expressions without redundant operations. Implementation The lint rule is implemented as follows: A temporary borrow (TempBorrow) Followed by a dereference (Dereference) Where the dereferenced expression is either a BorrowLocal, Borrow, or another TempBorrow ## Test plan Added more use case including false positive, false negative case ## Release notes - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: --------- Co-authored-by: jamedzung Co-authored-by: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Co-authored-by: Cameron Swords commit 6b231597e707bae887ca038d670ba3aa02775d37 Author: pei-mysten <147538877+pei-mysten@users.noreply.github.com> Date: Tue Oct 15 11:59:35 2024 -0700 [docker] fix dockerfile for kaniko (#19865) ## Description kaniko doesn't like `*` wildcard, so have to be a little bit smarter. ## Test plan had a successfully built from kaniko. I pulled it down and inspected manually, binaries are ther --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8aac6c2cfcbc48efd1ccb34ea8cfd28eae99b701 Author: Tony Lee Date: Tue Oct 15 14:44:56 2024 -0400 Faucet Routes (#19692) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e510867bd334fdd7b95bf0dab2ea7ce6b49f2409 Author: nikos-terzo <139557324+nikos-terzo@users.noreply.github.com> Date: Tue Oct 15 21:17:19 2024 +0300 Update typescript sdk examples (#19816) ## Description Update Typescript SDK examples in README to work ## Test plan Copy-pasted changed code to a new project depending on '@mysten/sui' and checked for no relevant typescript errors. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4241c352fff0d71b40ec3148e62b558877c82d13 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Tue Oct 15 11:06:54 2024 -0700 [move][docgen] Render string constants for error constants (#19823) ## Description This updates how constants annotated with `#[error]` are rendered in docgen. This also adds the `#[error]` annotation on them in the generated documentation. Note that we don't try to render all bytearrays as strings, but only constants with the `#[error]` annotation to avoid rendering normal bytearrays strings. ## Test plan Added a test to make sure we render error-annotated consts as we expect. commit 4357bfa20c4a4eb1675f2f31e32a34b8f5956db2 Author: Eugene Boguslavsky Date: Tue Oct 15 10:35:55 2024 -0700 Remove mysten-tap sui.rb update (#19861) ## Description Remove mysten-tap sui.rb update ## Test plan 👀 commit 1e72cfbe5a269b880245c4de777a8126c4d3557f Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Tue Oct 15 10:07:29 2024 -0700 [db] allow putting larger layer in higher levels (#19854) ## Description A followup to #19770. For cfs larger than 250GB, they need to be able to shard the files. ## Test plan Applied the new setting to other large cfs (transactions, effects) on fullnodes. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 34416bfce4f949c7ba07e08a47535f15f59dbf4a Author: Anastasios Kichidis Date: Tue Oct 15 13:26:52 2024 +0100 [Consensus] DagState to evict blocks based on GC round (#19465) ## Description Currently we evict the cached ref entries in DagState whenever we `flush`. At this point we evict the entries for each authority by dropping all the blocks which are `<= evict_round`, where `evict_round = authority_latest_commit_round - CACHED_ROUNDS` . The `CACHED_ROUNDS` here allow us to keep around for a little longer committed blocks. Of course all the blocks that are `> evict_round` are kept. This can work fine so far where we don't use GC , as we expect eventually to include blocks from other peers as weak links - no matter how far back they are - and that will move the `authority_latest_commit_round` and trigger the eviction of their blocks from our memory. Now with GC we don't have those guarantees. It is possible to get to a scenario where even a group of slow nodes that are constantly behind `gc_round`, they keep proposing but their blocks never get committed. Although their blocks should not end up in others DAGs , they will remain in their own and fill up their memory. Overall, the current approach will provide weaker guarantees. This PR is changing the eviction strategy so it's driven by the `gc_round`. Doing though the eviction purely on the `gc_round` will change a lot the semantics of the `DagState` as one of the intentions was to keep recent cached data from each authority. That would also be particularly visible for authorities for which we do not have frequent block proposals, as we could end up always evicting all their blocks if they are behind the `gc_round`. Then this would not allow us to do certain operations we used to do before with cached data(ex get latest cached block per authority). For that reason this PR is changing a bit the semantics of the `CACHED_ROUNDS` and from now on it will be the minimum/desired amount of rounds we want to keep in cache for each of authority. The eviction algorithm will still attempt to clean up records that are `<= gc_round`, but it will also make sure that `CACHED_ROUNDS` worth of data are kept around. Especially for more edge case situation where a node has not produced blocks `> gc_round`, we guarantee to keep `CACHED_ROUNDS` even when all of them are `<= gc_round`, but we'll eventually evict anything before - practically like a moving window. ## Test plan CI/PT --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 72e54057fc8fe651334772d5918c687a28d21a78 Author: Zhe Wu Date: Mon Oct 14 23:33:00 2024 -0700 Implement gas budget based congestion control with PTB/Obj cap (#19853) ## Description Introducing a new congestion control mechanism using gas budget as estimate but cap transaction cost based on transaction shape. ## Test plan Unit test Integration test --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 2e681a0063cecb4c31ff7aa3c42f1821d8237799 Author: Arun Koshy <97870774+arun-koshy@users.noreply.github.com> Date: Mon Oct 14 21:56:42 2024 -0700 [consensus] Enable distributed vote scoring in mainnet v63 (#19799) ## Description Bump to protocol version 63 ## Test plan How did you test the new or updated feature? Has been running nightly in private-testnet and also running successfully in testnet https://metrics.sui.io/goto/KDyf-3kHR?orgId=1 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [X] Protocol: Enable distributed vote scoring in mainnet v63 - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e920c3e0cfc8673e0858c69a94d8bbc261b0fa27 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Mon Oct 14 21:36:13 2024 -0700 Lower per object queue age and length thresholds (#19851) ## Description The original limits on the object queue are set with 1s checkpoint interval. Having the oldest transaction on an object queued for 1s is almost tolerable. In hindsight it should be lower than the checkpoint interval to ensure healthy system. Now checkpoint interval is targeting 0.2s - 0.25s, so lowering the limits further to 0.2s, to ensure smooth checkpoint constructions. ## Test plan CI PT Reading `num_rejected_cert_during_overload` metric from validators. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6592989f0985347985f20b5ea4f037575c031d7b Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Mon Oct 14 14:00:28 2024 -0400 Version Packages (#19846) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.11.0 ### Minor Changes - 7b8e8ad: Mainnet pool packages ## @mysten/graphql-transport@0.2.23 ### Patch Changes - 5299c18: Update the GraphQL transport to account for the removal of recvAddress and the introduction of affectedAddress. Co-authored-by: github-actions[bot] commit 7b8e8ad552eb3c1e13b7554d2d6ed251a6e0c141 Author: Tony Lee Date: Mon Oct 14 13:48:31 2024 -0400 Mainnet Pools (#19850) ## Description Mainnet Pools Deploy ## Test plan How did you test the new or updated feature? Mainnet ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7227a4f59da9e46a9a7cde281ca8c6873561fbfe Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Mon Oct 14 09:13:17 2024 -0600 [docs][ci] Update parameters (#19829) Updating parameters. ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5299c18aed1b181743af14e91b378e0a774e42de Author: Ashok Menon Date: Mon Oct 14 15:36:55 2024 +0100 indexer: stop indexing tx_senders and tx_recipients (#19806) ## Description All references to these fields have been removed from readers, so we can stop keeping it up-to-date. Once this change lands, we can also clean these tables from the schema. ## Test plan ``` sui$ cargo nextest run -p sui-indexer ``` ## Stack - #19708 - #19802 - #19803 - #19804 - #19805 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [x] Indexer: Indexer no longer fills the `tx_sender` and `tx_recipient` tables. - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7e72423c1ea16b6ace00bfd7bc3e5ca4d0c3f14d Author: Vassilis Legakis Date: Mon Oct 14 17:07:47 2024 +0300 Trace prod (#19841) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 833ca51de608d6e47e6d5ed510e69746f691a7ee Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Mon Oct 14 08:02:42 2024 -0400 Version Packages (#19801) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.10.0 ### Minor Changes - 23c3a3a: DEEP Mainnet Redeploy ## @mysten/create-dapp@0.3.26 ### Patch Changes - @mysten/dapp-kit@0.14.26 ## @mysten/dapp-kit@0.14.26 ### Patch Changes - Updated dependencies [af39b6a] - @mysten/zksend@0.11.7 ## @mysten/graphql-transport@0.2.22 ### Patch Changes - af39b6a: Update to reflect GraphQL schema renaming TransactionBlockFilter.signAddress to .sentAddress. - 4d63e50: Update GraphQL transport layer to accommodate change in schema - 2cddd9d: Update the GraphQL transport to account for the removal of recvAddress and the introduction of affectedAddress. ## @mysten/zksend@0.11.7 ### Patch Changes - af39b6a: Update to reflect GraphQL schema renaming TransactionBlockFilter.signAddress to .sentAddress. Co-authored-by: github-actions[bot] commit 23c3a3a6f4a5cf887e26bf32c222161bf67db3c4 Author: Tony Lee Date: Mon Oct 14 07:49:04 2024 -0400 DEEP Pool Redeploy (#19845) ## Description DEEP Pool Redeploy ## Test plan How did you test the new or updated feature? Mainnet ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c5a0f83247438bfe5753ad4bd4868bc64f6cd22b Author: Patrick Kuo Date: Sun Oct 13 13:36:17 2024 +0100 [Rosetta] - skip balance change response in rosetta if symbol is empty (#19838) ## Description This is a fix to disregard coin balance change response for coins with empty symbol. ## Test plan Unit test --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c4a0f35bb04f5817daac5cc4441a4c6094ac2cab Author: 0xaslan <161349919+0xaslan@users.noreply.github.com> Date: Sat Oct 12 09:21:41 2024 -0400 DeepBook Server (#19764) ## Description Run a DeepBook server along with the indexer. ## Test plan Integration testing --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: longbowlu commit 2cddd9d8d403e41541a451940ac9f14458b2dba2 Author: Ashok Menon Date: Sat Oct 12 00:58:45 2024 +0100 graphql: Remove TransactionBlockFilter.recvAddress and AddressTransactionBlockRelation.RECV (#19805) ## Description These fields were flagged for deprecation in 1.34 and have been replaced in 1.35 by - `TransactionBlockFilter.affectedAddress` and - `AddressTransactionBlockRelation.AFFECTED` Which offer a similar (but not exactly the same) semantics, but without confusion around the sender address which was often also an implicit recipient of a transaction. ## Test plan ``` sui$ cargo nextest run -p sui-graphql-rpc sui$ cargo nextest run -p sui-graphql-rpc --features staging sui$ cargo nextest run -p sui-graphql-e2e-tests ``` ## Stack - #19708 - #19802 - #19803 - #19804 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: `TransactionBlockFilter.recvAddress` and `AddressTransactionBlockRelation.RECV` have been replace by `TransactionBlockFilter.affectedAddress` and `AddressTransactionBlockRelation.AFFECTED` which offer similar semantics, but without confusion around the sender address which was also often (but not always) an implicit recipient. Now we don't distinguish between senders and recipients -- we only have senders and "affected" addresses which were touched by the transaction in some way. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ec535cf74a9abaa246a7b166cb1c35f0ed4f5c0e Author: Kevin Zeidler Date: Fri Oct 11 16:27:12 2024 -0700 [move-ide] Fix startup errors in move-analyzer VSCode extension (#19573) ## Description This PR fixes issues in the `move-analyzer` VSCode extension that were causing startup errors and failing tests. The two big ones are: * The command IDs in `package.json` have been updated from `sui.*` to `move.*` in the `menus` section to match those in the `commands` section, resolving the startup errors where VSCode reported undefined commands. * The JSON syntax in `language-configuration.json` has been corrected by properly formatting the "brackets" array, ensuring the language configuration is correctly parsed by VSCode. Also includes a fix for a small, unrelated problem: * Missing types dependencies for `parse-json` and `ws` have been added, which resolves an error when trying to run the test suite. ## Test plan I also introduced a small tweak in an attempt to fix an unrelated issue with the test suite. With the fixes introduced to the `package.json` file above, `npm test` now successfully kicks off the test suite, compiles the crate, etc. But it fails when it tries to launch Electron in a subprocess. Here's what that looked like on my machine (macOS 14.6.1, m3 chip): ```zsh $ npm test > move@1.0.13 pretest > npm run compile && npm run lint && npm run copy-tests-files > move@1.0.13 compile > tsc -p ./ && cd ../../ && cargo build info: syncing channel updates for '1.81-aarch64-apple-darwin' info: latest update on 2024-09-05, rust version 1.81.0 (eeb90cda1 2024-09-04) info: downloading component 'cargo' Downloaded num-complex v0.4.6 Downloaded num-rational v0.4.2 ... Compiling move-disassembler v0.1.0 (/Users/kz/vcs/sui/external-crates/move/crates/move-disassembler) Compiling move-model v0.1.0 (/Users/kz/vcs/sui/external-crates/move/crates/move-model) Compiling move-docgen v0.1.0 (/Users/kz/vcs/sui/external-crates/move/crates/move-docgen) Compiling move-package v0.1.0 (/Users/kz/vcs/sui/external-crates/move/crates/move-package) Compiling move-analyzer v1.0.0 (/Users/kz/vcs/sui/external-crates/move/crates/move-analyzer) Compiling move-analyzer v1.0.0 (/Users/kz/vcs/sui/external-crates/move/crates/move-analyzer) Finished `dev` profile [unoptimized + debuginfo] target(s) in 42.17s > move@1.0.13 lint > eslint . --ext ts --max-warnings 0 > move@1.0.13 copy-tests-files > copyfiles "tests/**/*.move" "tests/**/*.exp" "tests/**/*.toml" "tests/**/*.code-workspace" out > move@1.0.13 test > node ./out/tests/runTests.js Downloading VS Code 1.64.0 from https://update.code.visualstudio.com/1.64.0/darwin-arm64/stable Downloading VS Code [==============================] 100% Downloaded VS Code into /Users/kz/vcs/sui/external-crates/move/crates/move-analyzer/editors/code/.vscode-test/vscode-darwin-arm64-1.64.0 Downloaded VS Code into /Users/kz/vcs/sui/external-crates/move/crates/move-analyzer/editors/code/.vscode-test/vscode-darwin-arm64-1.64.0 Test error: Error: spawn /Users/kz/vcs/sui/external-crates/move/crates/move-analyzer/editors/code/.vscode-test/vscode-darwin-arm64-1.64.0/Visual Studio Code.app/Contents/MacOS/Electron ENOENT Exit code: -2 Failed to run tests ``` I was able to resolve the `ENOENT` by introducing one other small tweak to the test suite: * In `tests/runTests.ts`, the path to the VSCode executable has been adjusted by erasing the `'Visual Studio Code.app/'` path component from the `vscodeExecutablePath` variable. While this fixes the file not found error, it exposes a new issue where the Electron executable is reported as damaged in a MacOS system alert. That's a bit harder to fix, but hopefully this new error gives a clearer idea why the test suite isn't working. 🙁 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Kevin Zeidler Co-authored-by: Adam Welc commit fee600d948a962399406ebfc822a68e4fd0e69e7 Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Fri Oct 11 16:42:35 2024 -0600 [docs][ci] Example changes (#19827) ## Description Notifies Slack when examples directory has changes. Updates recent PR to move to an action that is not skipped when doc changes don't exist. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit af39b6ad1ad5c8e032b2f7bba78ab3746e8df90e Author: Ashok Menon Date: Fri Oct 11 23:08:21 2024 +0100 graphql: Remove TransactionBlockFilter.signAddress and AddressTransactionBlockRelationship.SIGN (#19804) ## Description Formally remove - `TransactionBlockFilter.signAddress` and - `AddressTransactionBlockRelationship.SIGN` which were deprecated two releases ago. They have been replaced by - `TransactionBlockFilter.sentAddress` and - `AddressTransactionBlockRelationship.SENT` which offer the same features under clearer names. ## Test plan ``` sui$ cargo nextest run -p sui-graphql-rpc sui$ cargo nextest run -p sui-graphql-rpc --features staging sui$ cargo nextest run -p sui-graphql-e2e-tests ``` ## Stack - #19708 - #19802 - #19803 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Formally remove `TransactionBlockFilter.signAddress` and `AddressTransactionBlockRelationship.SIGN` which were deprecated two releases ago. They have been replaced by `TransactionBlockFilter.sentAddress` and `AddressTransactionBlockRelationship.SENT` which offer the same features under clearer names. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ce5829ba175dafe484a70b8cc02703d85c66742a Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Fri Oct 11 14:14:26 2024 -0600 [docs][ci] Updated examples check (#19792) ## Description Use existing action to filter change location. ## Test plan yolo --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a9cd80942cf6c216be89dacdc790ac44f1213521 Author: William Smith Date: Fri Oct 11 15:58:39 2024 -0400 [Bridge Node] Better optimize sig aggregation (#19700) ## Description Adds path to provide min timeout for sig aggregation with preference such that, when specified, we will wait at least `min_timeout` to collect as many sigs as possible before ordering based on the provided preference. Note that the const min timeout value provided here is for verifying functionality for now, and should ideally be replaced with something like P95 latency. ## Test plan Added unit test. Also ran on testnet node and observed that the client running this generated a certificate with 11 signatures, while other client generated a certificate with 28 signatures. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5dc3f9189ac80ce52b8dabdc8816b39c57097376 Author: nikos-kitmeridis <145983019+nikos-kitmeridis@users.noreply.github.com> Date: Fri Oct 11 22:45:29 2024 +0300 Adds arden to prod providers (#19819) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 43fdf00599fcd9dd6a2532d770754202a90cc647 Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Fri Oct 11 12:17:28 2024 -0700 [GraphQL] Serialize bcs bytes of `TransactionData` instead of `SenderSignedData` (#19768) ## Description This PR modifies which data is serialized to bcs for `TransactionBlock` type. Instead of serializing to bcs `SenderSignedData`, which includes signatures and intent message, it only serializes the transaction data. ## Test plan Existing tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: The `bcs` field of `TransactionBlock` has new data. Instead of serializing to `bcs` from `SenderSignedData`, which includes signatures and intent message, it only serializes the `TransactionData` object. Note that this breaks the semantics compared to previous JSON RPC which serialized the intent and signatures. `TransactionData` only includes tx data, but no signatures or intent. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 54dc4d02bdb78dbb234fec1e4ec62fdbb55e18d7 Author: Adam Welc Date: Fri Oct 11 11:14:59 2024 -0700 [trace-view] Fixed skipping same line instructions (#19812) ## Description This PR implements a more principled way to handle skipping instructions located on the same source code line for a smoother user experience. The main issue is handling call instructions, particularly if more than one of them is located on the same source code line. If not handled properly, the following effects could take place - if a call instruction was preceded by another instruction on the same source code line, when executing `step` action, the user would have to click `step` button twice on the same line to actually step into the call making an impression that the debugger is somehow stuck (or at least stuttering) - if more than one call instruction was on the same line, two things could happen: - executing the `next` action would not skip over all calls but only over a single one instead (the user would have to click `next` button multiple times to get to the next line) - executing the `step` would cause control flow to immediately enter the subsequent call on the same line instead of waiting for another `step` command from the user This PR also removes code related to stepping backwards in the trace as it's not being used nor maintained making a false impression that it's simply disabled and ready to roll ## Test plan Tested manually that the scenarios described above are handled correctly commit cc2a65fa5921a3ae34475fbab18eb41f3ce16af5 Author: Ashok Menon Date: Fri Oct 11 17:43:32 2024 +0100 graphql: expose MovePackage as BCS (packageBcs) (#19818) ## Description Add a new field -- `MovePackage.packageBcs` -- that exposes the BCS representation of the MovePackage. This complements `bcs` (which is the BCS representation of its `Object` form), and `moduleBcs` (which is the BCS representation of its `Vec`s). ## Test plan New E2E test: ``` sui$ cargo nextest run -p sui-graphql-e2e-tests -- packages/bcs.move ``` ## Stack - #19817 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Adds `MovePackage.packageBcs` to expose the BCS representation of the `MovePackage` struct (as opposed to the BCS representation of the outer `Object` struct. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a0bddfff2929dfdf1d8ab24587d55fbaa271f4fd Author: Ashok Menon Date: Fri Oct 11 15:26:05 2024 +0100 graphql: port MovePackage queries to full_objects_history (#19817) ## Description Spotted that MovePackage queries were still using `objects_history`, which is not going to work after that table gets pruned to two epochs. This PR replaces queries to `objects_history` with queries to `full_objects_history`, with support from `objects_version` and `packages` to fetch the checkpoint sequence number where necessary. ## Test plan Existing tests: ``` sui$ cargo nextest run -p sui-graphql-e2e-tests ``` And ran the following queries on the mainnet DB: ```graphql query AllPackages($after: String) { packages(first: 5 after: $after) { pageInfo { hasNextPage endCursor startCursor } nodes { address version } } } query AllPackagesBack($before: String) { packages(last: 5 before: $before) { pageInfo { hasNextPage endCursor startCursor } nodes { address version } } } query SystemPackages($after: String) { packageVersions(address: "0x2" first: 5 after: $after) { pageInfo { hasNextPage endCursor startCursor } nodes { address version } } } query SystemPackagesBack($before: String) { packageVersions(address: "0x2" last: 5 before: $before) { pageInfo { hasNextPage endCursor } nodes { address version } } } query UserPackage($after: String) { packageVersions( address: "0xbc3df36be17f27ac98e3c839b2589db8475fa07b20657b08e8891e3aaf5ee5f9" first: 5 after: $after ) { pageInfo { hasNextPage endCursor startCursor } nodes { address version } } } ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 35221c7851e7bfcc54128359f498ed8fe55fed33 Author: Anastasios Kichidis Date: Fri Oct 11 14:24:24 2024 +0100 [Consensus] Disable periodic synchronizer when commit is lagging and far missing blocks (#19763) ## Description This is fixing/improving the behaviour of the periodic synchronizer to: (1) allow the backfilling of missing blocks when commit is lagging but we do have missing blocks that are in an tolerable threshold range from the current highest accepted round (2) stop the periodic synchronizer when commit is lagging and missing blocks are too far in the future Point (1) will allow bypassing a recently observed issue where equivocating blocks make their way into the DAG but they are not committed (due to the current commit rule this is not allowed). Then nodes that are using the commit syncer to catch up might come across blocks that appear on the fetched committed sub dags but do have ancestor dependencies on equivocated blocks that never got committed. Currently as the synchronizer is disabled nothing will attempts to fetch the missing block effectively making the catch up stop. Now with change (1) will be able to do so. ## Test plan CI/PT --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a4d9207443eca2926d9ee0131d7785aa2df10c83 Author: Ashok Menon Date: Fri Oct 11 12:40:04 2024 +0100 graphql: tx_senders -> tx_affected_addresses (#19803) ## Description Replace references to `tx_senders` with `tx_affected_addresses` so that we can eventually get rid of the former table entirely. ## Test plan This should be a behaviour preserving transformation: ``` sui$ cargo nextest run -p sui-graphql-rpc sui$ cargo nextest run -p sui-graphql-e2e-tests ``` ## Stack - #19708 - #19802 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 86e6e8813cf69b2bab646131e02d496dae688478 Author: Ashok Menon Date: Fri Oct 11 10:56:53 2024 +0100 indexer-reader: remove references to tx_senders (#19802) ## Description Replace uses of `tx_senders` in `IndexerReader`, with other tables that can offer equivalent functionality, so that we can eventually get rid of this table. In transaction filtering, we support filtering by sender using `tx_affected_addresses` (where setting both `sender` and `affected` is equivalent to querying `tx_senders` by `sender`). In event filtering, we support filtering by sender using the `event_senders` table. ## Test plan Manually tested Indexer Reader on the following package: ```move module test::test; use sui::event; public struct Event(u64) has copy, drop, store; public fun emit() { 10u64.do!(|i| event::emit(Event(i))) } ``` Start the local network: ``` sui$ sui start --force-regenesis --with-faucet --with-indexer ``` Set-up two addresses, and call the test function: ``` sui$ sui client faucet sui$ sui client ptb --call "$PKG::test::emit" sui$ sui client switch --address X sui$ sui client faucet sui$ sui client ptb --call "$PKG::test::emit" ``` Run the query against both addresses: ``` curl -LX POST "http://localhost:9124" \ --header 'Content-Type: application/json' \ --data '{ "jsonrpc": "2.0", "id": 1, "method": "suix_queryEvents", "params": [ { "Sender": "'(sui client active-address)'" }, null, 5 ] }' | jq . ``` ## Stack - #19708 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7d006efeb900e9b4c56778657e31f4886085fecc Author: Adam Welc Date: Thu Oct 10 16:02:14 2024 -0700 [trace-view] Added support for tracking struct/enum values (#19724) ## Description This PR adds support for tracking variable values for structs and enums. It also does some code cleanup (mostly via renamings) ## Test plan Tested manually commit b97638014d2e188534a2c1e31775fc921a0a81bb Author: pei-mysten <147538877+pei-mysten@users.noreply.github.com> Date: Thu Oct 10 15:57:35 2024 -0700 [suiop] add --force & image target to suiop image build (#19810) ## Description as title, 1. with `--force`, we don't need to manually delete the k8s pod if we need to rebuild an image 2. `--image-target` or just `-t` can allow us build image from a multi-stage dockerfile ## Test plan tested locally, works as expected. backend code already updated & redeployed as well --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e7948ccceeccaf197a85db2ea321366f17efef11 Author: Arun Koshy <97870774+arun-koshy@users.noreply.github.com> Date: Thu Oct 10 15:34:01 2024 -0700 [consensus] Remove unused mysticeti_leader_scoring_and_schedule flag (#19800) ## Description Leader scoring & leader schedule are enabled in mainnet --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 75264b965769e339e3d478ab8eb5b16e7280037c Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Thu Oct 10 15:05:52 2024 -0700 [indexer] rename watermarks table fields to clarify inclusive upper bounds (#19793) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a49ca578fa995a3ac4e1878f04ab9844024b2193 Author: Ashok Menon Date: Thu Oct 10 22:52:15 2024 +0100 subscriptions: reinstate EventFilter::Any (#19807) ## Description Temporarily re-enable `EventFilter::Any` as it is still in use in some places. This is a partial revert of #19617. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [x] Nodes (Validators and Full nodes): Temporarily re-enable `EventFilter::Any` as a kind of event subscription filter. Note that subscriptions are deprecated. This means they are not officially supported, nor actively maintained. - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d46416f8a275c584c0671b73b5d97e23b12a10f7 Author: Ashok Menon Date: Thu Oct 10 22:11:29 2024 +0100 graphql: unstage affected_addresses (#19708) ## Description Ungate access to the parts of the GraphQL schema that depend on `tx_affected_addresses` as this has now been fully rolled out in the indexer ## Test plan ``` sui$ cargo nextest run -p sui-graphql-rpc sui$ cargo nextest run -p sui-graphql-e2e-tests ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Adds support for `TransactionBlockFilter.affectedAddress`, and `AddressTransactionBlockRelationship.AFFECTED` to find transactions associated with any address they touch (sender, recipient, or payer). - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 27b07b7bc214ee97b2af60a5e9a9f210f9e87e24 Author: plam-ml <127577476+plam-ml@users.noreply.github.com> Date: Thu Oct 10 12:42:41 2024 -0700 add source to swap (#19797) ## Description Add source for swap wallet address: https://github.com/MystenLabs/apps-backend/blob/main/src/routes/legacy/swap.ts#L85 ![Screenshot 2024-10-10 at 11 02 57 AM](https://github.com/user-attachments/assets/977c2eb8-7109-47c1-8fd8-55521ca58e26) ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4d63e5042981add10343f8bd93f249d30189bf29 Author: Ashok Menon Date: Thu Oct 10 20:39:17 2024 +0100 graphql: ObjectOwner::Parent exposed as Owner (#19785) ## Description Now that we no longer expose wrapped objects, we need a way to expose the addresses of object parents when they are other objects that have been wrapped. ## Test plan Updated tests: ``` sui$ cargo nextest run -p sui-graphql-e2e-tests sui$ cargo nextest run -p sui-graphql-rpc ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Change `Parent.parent` from an `Object` to an `Owner`. Although it's guaranteed to be an object if it exists, it may be wrapped, in which case it will not exist. Exposing it as an Owner allows queries to extract its ID and also fetch other dynamic fields from it even if it is wrapped. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e97402cbb714cc430596e9e04c4b77a485b5d0b1 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Thu Oct 10 12:13:00 2024 -0700 remove unused fields for bridge indexer (#19783) ## Description as title ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e0ebade8e11f4261ee3733074d9d59c62de008d7 Author: John Martin Date: Thu Oct 10 12:09:31 2024 -0700 refactor the main sui-full-node doc (#19759) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> commit 938e4fd3bf12a4c21bf0d6d37aeddf27c38d6a6f Author: Andrew Schran Date: Thu Oct 10 18:48:58 2024 +0100 Consolidate `HashSetAllow` and `AllowedPublicKeys` types (#19755) --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 1e01b40ea1e182b8caa93abc6e9d45b29159753c Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Thu Oct 10 10:01:30 2024 -0700 [Consensus] improve compression and reduce compaction on `blocks` column family (#19770) ## Description - Switch to sst storage for `blocks` with large block size, to reduce disk footprint. - Switch to universal compaction to reduce write amplifications due compaction. ## Test plan PT: the changes shows no visible effect on performance of the network, with significant (~60%) reduction in `blocks` column family foot print. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit adde1a9908fd3e5ca7cafd01544a08a22b33c717 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Thu Oct 10 09:31:19 2024 -0700 Do not continue transaction execution attempts after epoch has ended (#19780) Previously, we could hit the panic here due to continually retrying a transaction from the previous epoch. commit 605f166f033607d484fda35b5b9eea53e04c8b51 Author: Manolis Liolios Date: Thu Oct 10 19:25:33 2024 +0300 [gql][mvr] Named typed queries go through the package resolver (#19772) ## Description Make sure named type queries go through the package resolver so `repr` has the correct type (not using latest). ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8be878a684054c26362049e097c464c75d157f43 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Thu Oct 10 09:01:42 2024 -0700 Reduce log verbosity for object writes (#19776) Individual object writes should log at TRACE commit e2cc3dfec3eca6256bda9dc5f445754a5d24bbdb Author: Vassilis Legakis Date: Thu Oct 10 17:51:26 2024 +0300 Trace OAuth provider on Prod (#19784) Enable Trace OAuth provider on Prod commit 2534aa4a0d55a08e9d440e84079ee2f9a3942a2a Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Thu Oct 10 10:40:45 2024 -0400 Version Packages (#19788) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.9.0 ### Minor Changes - 89f2e59: Mainnet packages Co-authored-by: github-actions[bot] commit 89f2e591d4b2b41598a2184eed59627844bf54f1 Author: Tony Lee Date: Thu Oct 10 10:22:25 2024 -0400 Mainnet Packages (#19787) ## Description Constants update for mainnet ## Test plan How did you test the new or updated feature? Mainnet ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 157c732bda8026f56a63157a1c560725fe0b6b7f Author: Ashok Menon Date: Thu Oct 10 14:34:53 2024 +0100 graphql: Event.transactionBlock (#19669) ## Description Add the ability to fetch the transaction block that emitted the event. ## Test plan New E2E tests: ``` sui$ cargo nextest run -p sui-graphql-e2e-tests sui$ cargo nextest run -p sui-graphql-rpc -- test_schema_sdl_export sui$ cargo nextest run -p sui-graphql-rpc --features staging -- test_schema_sdl_export ``` ## Stack - #19670 - #19671 - #19672 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Add `Event.transactionBlock` for fetching the transaction that emitted the event, as long as the event is indexed (not just executed). - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6a579d6be31245b360df5dd76afc7b8baafd1429 Author: Ashok Menon Date: Wed Oct 9 23:34:26 2024 +0100 indexer: backfill for tx_affected_addresses ## Description This backfill has already run -- recording it in the repo for posterity. ## Test plan Already run on the production DB. commit 81fcff61ded72986fa4e8065736174c72494fdac Author: Ashok Menon Date: Wed Oct 9 18:46:31 2024 +0100 fix(indexer): fix default db path in SQL ingestion scripts ## Description The DB paths need to include a password or they will fail to connect. ## Test plan Run scripts locally. commit 372f6b62e3746ac3445305b5211d7e39cb99bfa8 Author: Ashok Menon Date: Wed Oct 9 18:45:23 2024 +0100 chore(indexer): reduce visibility of crates ## Description They don't need to be public, they are only referred to from within the crate. commit a9b5784e6183d9f83ef91bc6c29d5506247c5e5a Author: Ashok Menon Date: Wed Oct 9 18:45:07 2024 +0100 chore(indexer): drop redundant curly braces commit b902e8213d5ff7f62d2ad6baf4091527f21d227d Author: Ashok Menon Date: Wed Oct 9 18:41:41 2024 +0100 indexer: tx_affected_objects ingestion-based backfill ## Description Re-implement the `tx_affected_objects` backfill to use the ingestion-based backfill system, as the previous approach was too slow. ## Test plan - Run the indexer locally, against a remote store. - Create a copy of its `tx_affected_objects` table and create a fresh table to backfil into. - Find the backfill high watermark: ```sql SELECT MAX(sequence_number) FROM checkpoints; ``` - Run the backfill against the same remote store, between `0` and the high watermark. - Confirm that the table you created with backfill has the same contents as the backup: ```sql -- All of these should be the same count SELECT COUNT(1) FROM tx_affected_objects; SELECT COUNT(1) FROM tx_affected_objects_backup; SELECT COUNT(1) FROM tx_affected_objects INNER JOIN tx_affected_objects_backup USING ( affected, tx_sequence_number, sender ); ``` commit 778e78a6b825378ae3f804b327a72404032be86a Author: Ashok Menon Date: Thu Oct 10 11:44:38 2024 +0100 feat(package): canonicalize type tag (#19778) ## Description Add a function to the package resolver to canonicalize a type (ensure all the package IDs it mentions are defining IDs) ## Test plan New unit tests: ``` sui$ cargo nextest run -p sui-package-resolver ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ba47b6cd292cb70f0b858f492e64b768c9384128 Author: Anastasios Kichidis Date: Thu Oct 10 10:51:32 2024 +0100 [AuthorityAggregator] EpochEnded to be retryable (#19736) ## Description E2e localnet tests appear to fail sporadically due the the `EpochEnded` error. Although it should not be the common case it's possible to happen when a transaction has reached a node's RPC endpoint, but epoch changes in the middle. In this case an `EpochEnded` can be thrown when trying to access the AuthorityEpochStore. Making the error retryable will allow the quorum driver retry the transaction and hopefully succeed. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 101760e662bec619ab95c420f0dd1e19ae02f3a1 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Wed Oct 9 22:55:07 2024 -0700 remove `ContinueWithTimeout` in authority aggregation (#19775) ## Description as title. This state is not used today. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 066a6b6a9087101acba990ccf818e8c928a141fe Author: Brandon Williams Date: Mon Aug 19 09:07:30 2024 -0500 rest: introduce transaction resolve endpoint commit ea27dff85d82685aa64dc9d5d96d1a3442984562 Author: Brandon Williams Date: Mon Aug 19 09:35:30 2024 -0500 rest: introduce transaction simulation endpoint commit bf9ac998ed330d754e6ffe0b14a4285568a56a54 Author: Brandon Williams Date: Mon Aug 19 09:06:03 2024 -0500 transaction-executor: add simulate_transaction trait method commit 8d4797718e19c4dba5781e677a1f67745bff0400 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Wed Oct 9 21:54:39 2024 -0400 graphql: return live objects from consistent object query (#19737) ## Description we plan to remove deleted / wrapped objects from objects_snapshot, this prep pr is to make the consistency query no longer return deleted / wrapped objects as a whole, after the removal. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b7518305c8fa3239c158ef793e8db37f3c817095 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Wed Oct 9 17:11:13 2024 -0700 [DB] remove min level to compress (#19777) ## Description Lz4 should be cheap enough to enable on level 1. ## Test plan PT: looks like disk usage growth at the start is lower, while validator CPU usage doesn't seem to change. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c34376dc9399acccb2a5b93eefbdf5caba9d2e5e Author: Sam Barani Date: Wed Oct 9 18:31:30 2024 -0500 [docs] USDC + Stablecoins in docs (#19709) ## Description Describe the changes or additions included in this PR. Added docs for USDC / stablecoins ## Test plan How did you test the new or updated feature? `cd docs/site && pnpm start` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Sam Barani Co-authored-by: Ronny Roland commit 3335e61d65c39fefe2820ade4ddd43c0fb4bfaad Author: Anastasios Kichidis Date: Wed Oct 9 22:48:55 2024 +0100 [Consensus] abort submitting checkpoint sigs to consensus (#19223) ## Description As part of the PR https://github.com/MystenLabs/sui/pull/18398 we are skip submitting checkpoint signatures when we have received verified checkpoints. Although this prevents the node from sending unnecessarily signatures, it's still possible to unnecessarily attempt to submit a signature tx after this has reached to consensus adapter. This PR is addressing that part. ## Test plan CI/PT --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b898e3267a17b71f8d2779be2a912391ccebb4ca Author: jk jensen Date: Wed Oct 9 15:08:06 2024 -0600 [suiop] simplify pulumi init subcommand (#19718) ## Description Preparing for the addition of the typescript ## Test plan non-functional change --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d09e3ed58981a8b82f0e555e9b2ee7618aae963f Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Wed Oct 9 16:44:08 2024 -0400 indexer: exclude deleted/wrapped objects from snapshot (#19455) ## Description https://mysten-labs.slack.com/archives/C0578KFD9D2/p1726765436840499 this will cut storage further down ## Test plan objects snapshot ingestion test --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit dcab1a58409f276f088c32883bc587379f22e61b Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Wed Oct 9 12:05:53 2024 -0700 [CI] Fix action workflow (#19771) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 95792043bb29cd1b42a4596f3d238f6353329a76 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Wed Oct 9 13:33:21 2024 -0400 indexer: execute objects mutation & deletion in parallel (#19766) ## Description - before this pr, deletion futures wait for mutation futures, this pr makes it parallel - also added object ingestion tests ## Test plan object ingestion tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d923c836f3fee95e2f278d61c13267e3838352a4 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Wed Oct 9 13:22:59 2024 -0400 indexer ingestion: more eager commit (#19711) ## Description commit more eagerly, prev we only commit either filling the whole batch, or drained the un-processed, or at end of epoch, this pr changes it to commit more eagerly. I was looking at average indexer extra data lag: it's about 2s from download to commit https://metrics.sui.io/goto/mj7m62zHg?orgId=1 however commit latency itself is about 1.7s and index is all in mem and very fast https://metrics.sui.io/goto/W2FGehkHg?orgId=1 ## Test plan indexer ingestion test, also added one for end of epoch ingestion. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4cebe0371e8c45cc799d6f9880e39d15544e32d8 Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Wed Oct 9 10:55:10 2024 -0600 [docs] DBv3 updates (#19704) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4412f8f15e963921b7519f723df7c4bfb4b8c1db Author: Eugene Boguslavsky Date: Wed Oct 9 09:06:02 2024 -0700 Fix ts-e2e test (#19765) ## Description Fix ts-e2e test ## Test plan 👀 commit 2c421c3451a7dfbb553d7a91d7007816587b624b Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Wed Oct 9 08:43:57 2024 -0400 [data ingestion] introduce reducer functionality (#19656) ## Description The PR introduces a new `reduce` functionality to the data ingestion framework. The functionality is optional and will not impact existing workflows. When enabled, it allows for the accumulation of a batch of checkpoints for post-processing. The watermark in the progress store is now updated after the reducer has completed its job. The old method, `Worker::save_progress`, is deprecated in favor of this approach. As a proof of concept and to demonstrate the functionality, the archival workflow has been migrated to utilize a reducer. Note that the previous implementation would still work, but using a reducer for batch aggregation is more intuitive compared to accumulating state within a worker --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 0773e2f568ea09aaade3bcbac2c923e0897fe5a0 Author: Maria Siopi Date: Wed Oct 9 13:21:25 2024 +0300 [graphql] add query epochs functionality (#19357) ## Description Implementation of paginated query for epochs in GraphQL ## Test plan How did you test the new or updated feature? Added a pagination.move test under the sui-graphql-e2e-tests crate. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Added support for paginated queries for epochs. This allows users to fetch epochs data in a paginated format. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a574ef074791d56c798864ebfbbefd7ff789d6af Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Wed Oct 9 02:41:04 2024 -0700 [CI] rebalance tests (#19758) ## Description `test-extra` finishes much earlier than `test`, so moving graphql test to `test-extra`. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ace69fa8404eb704b504082d324ebc355a3d2948 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Tue Oct 8 14:33:43 2024 -0700 [Fastpath] execute transactions certified via fastpath (#19638) ## Description - Add support to execute certified transactions from consensus output. - Refactor how consensus commit handler processes blocks from consensus commits, so the logic can be reused for consensus transaction handler. - Small refactors in CommitConsumer and DagState. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a7863b75d4763b106747f636c6e0a4b3812bf344 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Tue Oct 8 17:07:20 2024 -0400 indexer fix: chunk to avoid PG parameter limit (#19754) ## Description - fixed a bug that caused mainnet indexer to stop, also got reported in https://github.com/MystenLabs/sui/issues/19542, specifically here when tx has many input objects / affected objects / recipients / affected addresses etc. the expanded query will exceed the PG parameter limit of 65535 - also added ingestion tests for big tx indices & event indices and better error tracing ## Test plan added ingestion tests for tx and event indices --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 037f13e3e413dced1ea6d6ac6b52d7ac27642ba8 Author: Eugene Boguslavsky Date: Tue Oct 8 12:36:38 2024 -0700 Create GraphQl staging build for `ci` and `devnet` environments (#19749) ## Description Create GraphQl staging build for `ci` and `devnet` environments. See https://linear.app/mysten-labs/issue/DVX-329/devnet-and-ci-graphql-built-with-staging-feature-enabled for details ## Test plan Will be testing this after it lands commit f0a892c540bc15b865c3e3b170c258be4cb58a50 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Tue Oct 8 18:45:13 2024 +0000 Version Packages (#19753) commit 50e8c8a6b4f3eda7bc72daf47f12fb92981a7523 Author: Ashok Menon Date: Tue Oct 8 19:36:51 2024 +0100 indexer: latency metrics for objects version (#19750) ## Description Add missing metrics and logs for latency on objects version and objects version chunks. Also standardized some of the function names (object versions -> objects version). ## Test plan Ran local indexer against mainnet remote store. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c0fb6d61cce3a87f77b44f7d3c941d18b483c771 Author: Tony Lee Date: Tue Oct 8 14:28:52 2024 -0400 Deepbook SDK constants update (#19752) ## Description Update constants ## Test plan How did you test the new or updated feature? Mainnet ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c5647d166b42e9f5693b895288b6056c710624d2 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Tue Oct 8 13:51:42 2024 -0400 Version Packages (#19751) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.8.4 ### Patch Changes - 5df4e5e: Test Mainnet Packages Co-authored-by: github-actions[bot] commit 5df4e5ec528a7a2c3b27dcfd014ba482e1d0ab21 Author: Tony Lee Date: Tue Oct 8 13:39:40 2024 -0400 Mainnet Packages (#19748) commit aaa6fc8d6680c19c4971921c93ae252deb6dee25 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Tue Oct 8 10:38:53 2024 -0700 [bridge-doc] make bridge runbook more visible (#19746) ## Description as title ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 298d90b221f824c5cb50493401d10bd775d07b44 Author: Jort Date: Tue Oct 8 10:23:34 2024 -0700 [move] Flag compatibility rework, reduce necessary flags (#19725) ## Description Flags have been updated as such: - never allow breaking of public linking, removes flag `check_datatype_and_function_linking` (check remains) - merged flags which are always set together `check_datatype_layout`, `disallow_change_datatype_type_params`, `disallow_new_variants`, into `check_datatype_layout` - remove `check_friend_linking` flag ## Test plan Changed the tests to better align with the new usage of the flags. No cases check for `check_datatype_and_function_linking` set false, individual cases of `check_datatype_layout`, `disallow_change_datatype_type_params`, `disallow_new_variants` merged together. Test cases where friend linking is set true are removed. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8991fb8e7a867e707eca9fac0a64d8eaec34b2c0 Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Tue Oct 8 08:50:25 2024 -0700 [Faucet] Add balance metric (#19681) ## Description Added a balance metric for alerting when we need to top it up. ## Test plan Manual checks vs dashboard and `sui client balance` image ``` ➜ ~ sui client balance ╭────────────────────────────────────────╮ │ Balance of coins owned by this address │ ├────────────────────────────────────────┤ │ ╭──────────────────────────────────╮ │ │ │ coin balance (raw) balance │ │ │ ├──────────────────────────────────┤ │ │ │ Sui 1959956440480 1.95K SUI │ │ │ ╰──────────────────────────────────╯ │ ╰────────────────────────────────────────╯ ➜ ~ ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 3bea024e0ddeb880784473281bc7fea63140cacc Author: Eugene Boguslavsky Date: Tue Oct 8 08:30:44 2024 -0700 Fix release type trigger (#19729) ## Description Fix release type trigger ## Test plan 👀 commit 6d9b1f98f595ff9cfa5a45ab2ec13b3a46cb1236 Author: Anastasios Kichidis Date: Tue Oct 8 09:59:56 2024 +0100 [Consensus] Garbage Collection - 2 (#19385) ## Description This is the second part of Garbage Collection which implements the following ticked next steps as outlined on the previous PR - [x] BlockManager to respect the gc_round when accepting blocks and trigger clean ups for new gc rounds - [x] Skip blocks that are received which are <= gc_round - [x] Not propose ancestors that are <= gc_round - [x] Subscriber to ask for blocks from `gc_round` when `last_fetched_round < gc_round` for a peer to prevent us from fetching unnecessary blocks Next steps: - [ ] Re-propose GC'ed transactions (probably not all of them) - [ ] Implement new timestamp approach so ancestor verification is not needed - [ ] Harden testing for GC & edge cases ## Test plan CI/PT --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f780402278d0e4d4028e80b1b10c1be19d3f919d Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Mon Oct 7 22:56:02 2024 -0700 [GraphQL] Rework Event type (#19654) ## Description Accessing the `bcs` field of an `Event` is rather confusing, as it is not the whole event BCS encoded, but just the contents of it, more specifically, the BCS bytes of a `MoveValue`. This PR removes the #flatten in `Event`, adds a `contents` field to the type, and adds the `bcs` field to the `Event` type which encodes the whole `Event`. ## Test plan Existing tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: The `Event` type has a new field called `contents`, which is the event's content value as a `MoveValue`. This replaces the previous scheme that flattened the `MoveValue` type in the `Event` type. A `bcs` field was also added, which represents the Base64 encoded BCS serialized event. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 872af5a12f85b8cd54833c1115f3945dca749fc7 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Mon Oct 7 22:05:21 2024 -0700 [bridge-doc] add how to update url after finalization (#19728) ## Description as title ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Bridgerz commit 689e69a4031d713b094d3ab54bce6a8fe1ad8c06 Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Mon Oct 7 21:05:31 2024 -0700 [indexer][watermarks][2/n] committer writes upper bounds to watermarks table (#19649) commit ff9762e75fb96f5f9fa744add25bbe83ae12865b Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Mon Oct 7 20:44:36 2024 -0700 [indexer][watermarks][1/n] Modify PruningOptions to point to a toml file of epochs_to_keep and optional per-table overrides (#19637) commit 906dce8ae3135c1a7bc729986a729a89770ae6ac Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Mon Oct 7 20:14:32 2024 -0700 Delete user sigs in process_pending_checkpoint (#19743) Confirmed fix via: ./scripts/simtest/seed-search.py --test simtest test_simulated_load_rolling_restarts_all_validators commit 15b64b408ddef9c32044d8da1d61ea0e656b0a96 Author: Brandon Williams Date: Mon Oct 7 21:37:03 2024 -0500 jsonrpc: add instrumentation to read apis (#19738) commit faa3eb04e650dbbaf1f23bbeeaa85122e2b2c5fa Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Mon Oct 7 18:43:28 2024 -0700 [Consensus] minor fixes (#19742) ## Description - Avoid busy looping in commit sync, when failing to fetch from every peer. - Make the error to serve connections debug, because the issue is on the requestor's side. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 60867d4b9d0906786b1479a64323be6dfc697d23 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Mon Oct 7 16:48:38 2024 -0700 Add service.name when it is not found (#19740) Tested with: $ cd docker/jaeger-local $ docker compose up $ cargo run --bin import-trace -- --trace-file /tmp/suitrace.out Compiling telemetry-subscribers v0.2.0 (/Users/marklogan/dev/sui/crates/telemetry-subscribers) Finished `dev` profile [unoptimized + debuginfo] target(s) in 1.60s Running `target/debug/import-trace --trace-file /tmp/suitrace.out` importing trace with service name "sui-node-1728342184" sending 271 spans to otlp collector sending 512 spans to otlp collector .... sending 512 spans to otlp collector all spans imported Imported traces are then visible in the jaeger UI with the correct service.name commit 8ecb26f2e7115930b9931f382b87367bca1514b5 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Mon Oct 7 16:04:33 2024 -0700 Remove debug output (#19730) commit 6b67106afcce971fcada3384d9a780b03240f9eb Author: Adam Welc Date: Mon Oct 7 14:29:00 2024 -0700 [move-ide] Added support for displaying diagnostic notes (#19727) ## Description This PR adds support for displaying the "notes" part of the compiler diagnostics. The issue it was not happening before is that compiler diagnostics do not map 1:1 to LSP diagnostics - a note does not have location and the only "message" without location in the LSP diagnostic is the main one. The challenge was to display notes without cluttering the output too much and after considering multiple options (e.g., appending notes to the main message, using `source` field of the LSP diagnostic) we settled on displaying notes the same as secondary compiler diagnostic labels with the location of a note being set to that of the main message. For what it's worth, it seems like this is what `rust-analyzer` is doing as well. Here are some examples of how the notes are now displayed: 1. Main message and single note: ![image](https://github.com/user-attachments/assets/3e5eb07d-7cdd-42bf-aa95-aa1368d218ac) 2. Main message, single secondary label and single note: ![image](https://github.com/user-attachments/assets/a909c1c7-6146-4e85-990f-2b05c2892fb7) 3. Main message and two notes: ![image](https://github.com/user-attachments/assets/148bbcdb-3468-4691-bbdd-a7a6ff495f6b) ## Test plan All existing tests must pass ## Test plan All existing tests must pass --------- Co-authored-by: Todd Nowacki commit dcb51904c166447dc03922bb74e3820da11c1758 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Mon Oct 7 21:21:54 2024 +0000 Version Packages (#19734) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/sui@1.12.0 ### Minor Changes - 5436a90: Update GraphQL schemas - 5436a90: add deriveDynamicFieldID util ## @mysten/create-dapp@0.3.25 ### Patch Changes - Updated dependencies [5436a90] - Updated dependencies [5436a90] - @mysten/sui@1.12.0 - @mysten/dapp-kit@0.14.25 ## @mysten/dapp-kit@0.14.25 ### Patch Changes - Updated dependencies [5436a90] - Updated dependencies [5436a90] - @mysten/sui@1.12.0 - @mysten/wallet-standard@0.13.7 - @mysten/zksend@0.11.6 ## @mysten/deepbook@0.8.21 ### Patch Changes - Updated dependencies [5436a90] - Updated dependencies [5436a90] - @mysten/sui@1.12.0 ## @mysten/deepbook-v3@0.8.3 ### Patch Changes - Updated dependencies [5436a90] - Updated dependencies [5436a90] - @mysten/sui@1.12.0 ## @mysten/enoki@0.4.5 ### Patch Changes - Updated dependencies [5436a90] - Updated dependencies [5436a90] - @mysten/sui@1.12.0 - @mysten/zklogin@0.7.22 ## @mysten/graphql-transport@0.2.21 ### Patch Changes - Updated dependencies [5436a90] - Updated dependencies [5436a90] - @mysten/sui@1.12.0 ## @mysten/kiosk@0.9.21 ### Patch Changes - Updated dependencies [5436a90] - Updated dependencies [5436a90] - @mysten/sui@1.12.0 ## @mysten/suins-toolkit@0.5.21 ### Patch Changes - Updated dependencies [5436a90] - Updated dependencies [5436a90] - @mysten/sui@1.12.0 ## @mysten/wallet-standard@0.13.7 ### Patch Changes - Updated dependencies [5436a90] - Updated dependencies [5436a90] - @mysten/sui@1.12.0 ## @mysten/zklogin@0.7.22 ### Patch Changes - Updated dependencies [5436a90] - Updated dependencies [5436a90] - @mysten/sui@1.12.0 ## @mysten/zksend@0.11.6 ### Patch Changes - Updated dependencies [5436a90] - Updated dependencies [5436a90] - @mysten/sui@1.12.0 - @mysten/wallet-standard@0.13.7 Co-authored-by: github-actions[bot] commit a88389718d06eaf49d6e6e7c3f92bea46baa259d Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Mon Oct 7 17:09:09 2024 -0400 [data ingestion] kv store: skip items over the DDB limit (#19735) ## Description skips items over the DDB limit ## Test plan already applied to existing kv pipeline --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5dd49e7a4d53312a053a8d169982a5d0fc0e4215 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Mon Oct 7 14:08:30 2024 -0700 [Consensus] adjust sync params (#19714) ## Description Reduce the blocks per request from synchronizer, since synchronizer generally uses smaller timeouts. ## Test plan CI PT --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5436a9065e54438eb749c75cbf6e9055d40bdde7 Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Mon Oct 7 12:26:21 2024 -0700 derive dynamic field object and update GraphQL schemas in ts sdk (#19733) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e97a3b2789441788ec400f6f39c720ca8a5c60fc Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Sun Oct 6 22:44:41 2024 -0700 [proxy] use validator name when logging metrics for bridge (#19706) ## Description as title ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f23da6602ea5f33d368e02c318166a8492228304 Author: Jort Date: Sun Oct 6 21:55:42 2024 -0700 [cli] Implement upgrade compatibility checks client side (#19562) ## Description Introduce client side upgrade compatibility checking, allowing users to check before TX submission if an upgrade will be successful. Improves the output of errors by relying on the move-binary-format crates checks to list the issues with the error to the user that are found. ## Test plan manually tested see comment below --- ## Release notes - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: User will see a different error when an upgrade error is thrown which includes the details of each error. - [ ] Rust SDK: - [ ] REST API: commit 2c1b6e24d25b219aa3272e0d9bed89e06b9bc629 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Fri Oct 4 19:42:18 2024 -0700 [bridge] report bridge voting power when node starts (#19694) ## Description so we can use this in the dashboard ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 2aa1d024a2a812d5bcea83d58a5540c149533028 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Fri Oct 4 15:31:37 2024 -0700 [Consensus] use dynamic timeouts in commit sync (#19705) ## Description Using a flat high timeouts in commit sync can stall fetching for too long and hurt throughput, when slow hosts block fetching blocks for the max timeout. Using timeouts that are too short can be problematic too. Instead, start the request timeout at 10s and gradually increase it with retries. ## Test plan CI PT --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a6539b1ec99a1f034dde8af2d0139a5aeb6f9f75 Author: plam-ml <127577476+plam-ml@users.noreply.github.com> Date: Fri Oct 4 15:22:37 2024 -0700 swap in extension (#19689) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit fedfb0dae81b568349de968d98e50bccb450423e Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Fri Oct 4 13:54:05 2024 -0700 Delete consensus data from tables when it is no longer needed (#19697) This PR is in preparation for data quarantining. The DQ PR will move the tables affected by this PR entirely into memory, and crash recovery will be driven by re-processing consensus commits. However, when we deploy DQ to a validator for the first time, we will restart in a state where uncertified consensus commits have already been marked as processed, so they will not be re-processed on startup. This means that upon starting up for the first time, some data will be on disk instead of in memory. There are two possible solutions to this: - All reads fall back to the database. This is a lot of ugly code, and is slow. - All data in the database is read into memory at startup. This is fast and simple. This PR bounds the amount of data we will have to read at startup in order to make the second option feasible. commit 3b665eb7cfe0c73d55883d1b3ca272e1067a1de4 Author: Brandon Williams Date: Thu Oct 3 17:47:18 2024 -0500 rest: enable all unstable apis by default Until the rest service as a whole is "stabalized" with a sane set of default stable apis, have the default be to enable all unstable apis. commit 13c1ec7b2a7a18e304444d63277c20f8f52a1bc2 Author: Brandon Williams Date: Thu Oct 3 16:08:27 2024 -0500 rest: stabalize some checkpoint apis Stabalize the get and list checkpoints apis as well as introduce a new list full checkpoints api. commit 591f96c95ee4cdef7c10c405993f5b6abd8273eb Author: Brandon Williams Date: Thu Oct 3 12:51:24 2024 -0500 rest: update openapi docs for stable apis commit ea81ea510829cd2a11ff333a776f259e26d625bd Author: Brandon Williams Date: Wed Oct 2 16:41:01 2024 -0500 rest: use software_version as service version commit 7d0548675713272027d194b553fce2e3ad0a64c4 Author: Brandon Williams Date: Wed Oct 2 12:42:35 2024 -0500 rest: enable marking apis as stable or unstable Enable marking APIs as either stable or unstable. By default unstable APIs are disabled and not served but the `enable_unstable_apis` can be set to `true` in order to enable serving of unstable APIs. Example enabling enable_unstable_apis in fullnode.yaml: ```yaml enable_experimental_rest_api: true rest: enable_unstable_apis: true ``` commit 512a4e0d28ddf151239821f97c2152c1b5bd427c Author: Brandon Williams Date: Wed Oct 2 09:04:40 2024 -0500 chore: pull in sui-sdk-types from crates.io commit e67a2f40db3c68d879ec723c7a73d3ba27f4099b Author: Joe Hrbek <123987499+suiwombat@users.noreply.github.com> Date: Fri Oct 4 14:28:20 2024 -0500 [sui-proxy/dockerfile] (#19717) ## Description add a sui-proxy dockerfile ## Test Plan tsia --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f316594164c4bd2830ffa9955dc17ccb3d68c255 Author: Eugene Boguslavsky Date: Fri Oct 4 11:52:39 2024 -0700 Sui v1.36.0 Version Bump (#19715) ## Description Sui v1.36.0 Version Bump ## Test plan `cargo build --release` commit 2e97e71ec438d1e8e1217c4e0b87a6bbc875d4f5 Author: Eugene Boguslavsky Date: Fri Oct 4 11:22:29 2024 -0700 Sui v1.35.0 Framework Bytecode Snapshot (#19713) ## Description Sui v1.35.0 Framework Bytecode Snapshot ## Test plan `cargo run --bin sui-framework-snapshot` commit be82841de3a971a959c0295f3e132b18001e2910 Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Fri Oct 4 11:21:01 2024 -0700 [indexer] Simplify setting up a test indexer writer or reader (#19663) ## Description Currently, tests that require spinning up an indexer do so through `start_test_indexer_impl` or `start_test_indexer`. This is further complicated by a single `start` function that accepts a `ReaderWriterConfig`. We can simplify this by exposing two functions with optional parameters that can be configured by the caller. Part of a stack of PRs for watermarks 1. simplify setting up test indexer: https://github.com/MystenLabs/sui/pull/19663 2. update pruner config: https://github.com/MystenLabs/sui/pull/19637 3. committer writes upper bounds https://github.com/MystenLabs/sui/pull/19649 4. pruner writes lower bounds: https://github.com/MystenLabs/sui/pull/19650 5. pruner prunes (wip) ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d61f3f498de9f694f26392bd15d5dfef8beb4c9a Author: jk jensen Date: Fri Oct 4 12:20:00 2024 -0600 [suiop][incidents] add notion integration (#19422) ## Description Automagically create incidents in the selection notion db when we select them for review. - make combined `User` abstraction - make it so we can insert suiop incidents into the db - ask the user for confirmation to insert Next steps - combine this db with the postmortems one so we can update in one place ## Test plan Tested e2e today in #incident-postmortems --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4c1d51408c6c6e6886be4922b71785872e0f631b Author: Arun Koshy <97870774+arun-koshy@users.noreply.github.com> Date: Fri Oct 4 11:03:14 2024 -0700 [consensus] return quorum rounds from round prober (#19703) ## Description To be used in smart ancestor selection [PR#19605](https://github.com/MystenLabs/sui/pull/19605) --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 9d23ec4e86c9662d915451b29957c02c395d0c39 Author: Eugene Boguslavsky Date: Fri Oct 4 17:06:20 2024 +0000 Sui v1.35.0 Framework Bytecode Snapshot commit 34a399568fd5bcaf2915802171cac82a2c61b71f Author: Emma Zhong Date: Fri Oct 4 10:40:06 2024 -0700 [fix] return empty results when provided object lookup keys are empty (#19712) ## Description This PR mitigates the issue where we select all unfiltered results when the `load` function uses `or_filter` and the provided keys are empty. ## Test Plan Tested locally against testnet db. The symptom of this bug was a dry run timeout when the user attempts to fetch gas object info so I don't know how we can repro the issue in local unit test without a big db. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit bfe1c64a258c6c629404f789306fcafbb28dd81e Author: Ashok Menon Date: Fri Oct 4 17:38:47 2024 +0100 fix(graphql): restore --node-rpc-url flag (#19710) ## Description I accidentally turned it into a positional argument in a recent refactoring. This change changes it back. ## Test plan ``` sui$ cargo run --bin sui-graphql-rpc -- start-server --help ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 0a293017773ee27fd09b9c8b85017f0a259e0d3e Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Thu Oct 3 17:10:06 2024 -0700 [Consensus] remove PeerState from CommitSyncer (#19696) ## Description - With less fetch parallelism, randomized target authorities should be acceptable for load distribution. And it is easier to reason about. - Add a metric for fetch failures per peer. ## Test plan CI PT --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 59f115babfe9b31b0634d7b14cbd806957cd525d Author: Ashok Menon Date: Fri Oct 4 00:12:44 2024 +0100 core: event sending module relocation (#19672) ## Description Make it so that the module that is associated as the "sending module" for an event is relocated by linkage. This fixes an issue (captured in a regression test) where if the function that is called by the PTB is from some upgraded version of the package, it cannot be found because the Event's package ID still points to the original version of the package. ## Test plan Introduced a regression test -- before the change, the "sending module" in the response returned `null`. ``` sui$ cargo nextest run -p sui-graphql-e2e-tests -- sending_module ``` ## Stack - #19670 - #19671 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [x] Protocol: Protocol version bump to 62, changing the semantics around the package/module in the PTB that the event originated from. - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8213cfe11930d49355882c82626f9cf5c25d98c4 Author: Ashok Menon Date: Fri Oct 4 00:12:34 2024 +0100 graphql: add flag to skip db compatibility (#19671) ## Description Add a flag to optionally skip database migration compatibility checks. This is helpful when trying to connect a local build up to a production database to test a specific change, even if you are aware that other queries may not be compatible. To accommodate this change, the compatibility check was also moved into `ServerBuilder` where the config is readily available. This also slightly simplifies the `Server` itself, which no longer needs to hold onto its own instance of the `Db`. ## Test plan Connected a local build of GraphQL to the production DB, which it is not compatible with. ## Stack - #19670 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Add `--skip-migration-consistency-check` to allow bypassing the database compatibility checks. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 80602301b6634e2a9ad5bfdc4c71c40a3e528193 Author: Ashok Menon Date: Fri Oct 4 00:12:24 2024 +0100 chore(graphql): deduplicate configs and clap args (#19670) ## Description Avoid duplicating fields for configs that are accepted as flags from the command-line and can also be read from TOML file. Use the same struct for both purposes but deocrate it with both `clap` and `serde` (or `GraphQLConfig`) annotations so that it can serve both purposes. ## Test plan This change should preserve the config/command-line interface -- checked by running the GraphQL service with the same invocation, referencing all flags, before and after the change. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 66f9da1f2a572eabb3513589c00d6b4c35adc67d Author: Ashok Menon Date: Fri Oct 4 00:04:54 2024 +0100 fix(backfill): chunk up writes to DB (#19699) ## Description When a given range of transactions generated too many affected object IDs, the backfill script would stop, because of limits in postgres' protocol over the wire. By chunking up the writes into batches of 1000, we avoid hitting this limit. ## Test plan Running this backfill against the DBs now -- and it is not crashing. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 2e58d1d04e4c02fd6795d53a772637db93f66c1b Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Thu Oct 3 15:40:26 2024 -0700 Remove use of assigned_shared_object_versions (#19688) The new table can be used with or without the random beacon. The only requirement is that we don't deploy this commit to a validator that is currently using the old table. An assert is added which will stop us from doing this accidentally. commit 183b14b785a7e6b8a60c2309f8aa73bd4af36eb9 Author: Xun Li Date: Thu Oct 3 15:10:35 2024 -0700 [Index] Fix a bug in epochs system state json backfill (#19695) ## Description The backfill for system state json was missing data for epoch 0. ## Test plan Run against testnet --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8761bf8b70de387bf9ec35025493688482793c60 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Thu Oct 3 13:04:08 2024 -0700 Enable writeback cache by default (#19508) Writeback cache has been stable on all mysten fullnodes plus our testnet validator for several months. Time to enable it by default everywhere commit 50ddd15ff617eb0c5bd9a696c8f2fe31e57e03d4 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Thu Oct 3 12:19:50 2024 -0700 [bridge indexer] retry getting block number and add more logs when it err (#19653) ## Description as title. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 3970fe973d9eb7b3e2f554362d539963c5358309 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Thu Oct 3 14:27:56 2024 -0400 segregated indexer: handler trait and migrate snapshot_objects table (#19623) ## Description main changes are: - `handler` trait for each segregated indexer handler including impl. of common codes - migrate objects_snapshot table to the handler - integration tests for objects_snapshot table after migration ## Test plan added integration tests for objects_snapshot table --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d6adff2b8c8f1a14291122c0a510ebb1abb7300c Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Thu Oct 3 10:34:49 2024 -0700 [bridge] add sui watch dog and a few observables (#19612) ## Description This PR adds 1. `Observable` traits that each perform an observation task to monitor bridge status. 1. `SuiWatchDog` that facilitates running `Observable` tasks 2. `EthVaultBalance` `SuiBridgeStatus`, and `EthBridgeStatus` that monitors vault balances on eth vault, whether sui bridge is paused and whether eth bridge is paused respectively. ## Test plan running locally. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit dfd2b944da91345ea1ceb65e5b35d0d97c0ad696 Author: Jort Date: Thu Oct 3 10:16:04 2024 -0700 [faucet] request status should return bad request for invalid UUIDs (#19662) ## Description faucet changes the request status for status queries when UUIDs are not parsable. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5ee7eb53f1f1d311c4161c1ec76377913f09c5e0 Author: Jort Date: Thu Oct 3 10:08:07 2024 -0700 ValidateMetadata Move struct doesn't match typo (#19647) ## Description rename to "match" for typo in validator error string ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 43642883698f486037f7fa429bc0bc6028d34bb4 Author: plam-ml <127577476+plam-ml@users.noreply.github.com> Date: Thu Oct 3 09:44:02 2024 -0700 Add amplitude and minor fixes (#19683) ## Description Add amplitude and minor fixes ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit bd3c68b0647b23284cca2af2aea1edadb858d39e Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Thu Oct 3 09:58:09 2024 -0400 indexer json rpc tests: checkpoint apis (#19643) ## Description title ## Test plan cargo test --package sui-indexer --test read_api_tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c851c1cde3120e290c98de8c911eac75b0f8d384 Author: benr-ml <112846738+benr-ml@users.noreply.github.com> Date: Thu Oct 3 15:27:00 2024 +0300 Update documentation of Random (#19609) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> commit a4b474b4d8173fffff79b95c09f0544683d79d53 Author: Ashok Menon Date: Thu Oct 3 13:01:44 2024 +0100 indexer: backfill tx_affected_objects (#19675) ## Description Custom backfill script for populating `tx_affected_objects` by loading the raw effects from the database and pulling the `object_changes` from it. Includes a small clean-up parameterising the script-based SQL backfills. ## Test plan Ran an updated version of this script locally, just printing the values to be added and ran that locally. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 2701d43217a8575414cff7024bbabef3529423f7 Author: Xun Li Date: Wed Oct 2 22:54:07 2024 -0700 [Indexer] Add ingestion based backfill (#19636) ## Description This PR adds a backfill pipeline based on ingestion. It also implement backfilling the raw checkpoints table. ## Test plan Run backfill locally. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 78bd7b397f5bd69f20b70722d14b645735ed4225 Author: Brandon Williams Date: Wed Oct 2 22:14:18 2024 -0500 chore: fix github actions warnings (#19680) commit b5c7f39d1f878acc9b4b35508a674446299b6800 Author: Brandon Williams Date: Wed Oct 2 21:44:35 2024 -0500 chore: separate cargo-deny advisory checks into its own job (#19679) commit 928b981dc5feaae852846ab621e07bdd85d30827 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Wed Oct 2 16:57:20 2024 -0700 Update opentelemetry (#19665) - Upgrade opentelemetry - consensus: properly configure http2 server --------- Co-authored-by: Brandon Williams commit 337e6a578e18e32f3f3fb89b3e983ee668640a01 Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Wed Oct 2 16:25:39 2024 -0600 [docs][ci] Adding steps to notify when /examples change (#19664) ## Description See title ## Test plan 👀 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b7dab062c17c852af5cebb90332bce1b911ee755 Author: Ashok Menon Date: Wed Oct 2 22:29:17 2024 +0100 docs: examples of things only GraphQL can do (#19668) ## Description Adding some examples of things that we can do with GraphQL that can't be done with JSON-RPC. ## Test plan :eyes: (and trying out the queries) --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a9a5d45f783e3788c50ec6ea2d18c19d384b81de Author: John Martin Date: Wed Oct 2 12:53:43 2024 -0700 [docs] improve db snapshots instructions (#19661) ## Description see title --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> commit 14b89bd15d030176517bb6ab127e0f697dcd76df Author: plam-ml <127577476+plam-ml@users.noreply.github.com> Date: Wed Oct 2 12:47:27 2024 -0700 add banners (#19658) ## Description Add USDC Banner ![usdc-banner-ext](https://github.com/user-attachments/assets/25d226be-5d53-4204-8cd1-bafeb8d1fa4f) ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b22cb248c9d50914934049c4931b06c34c58d5ad Author: John Martin Date: Wed Oct 2 10:16:52 2024 -0700 [docs] Fix formatting issues (#19657) ## Description Noticed some strange formatting, I think the problem is these vars are un-escaped image ## Test plan --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c4bca811a6a170788159650378cdc244f4651af0 Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Wed Oct 2 11:01:39 2024 -0600 [docs] Event updates (#19603) ## Description Updates guides/sui-101/using-events to remove subscriptions Removes concepts/event because it was always a topic for enhancement and now the Move Book is a better resource for the info. Updates links to events. Adds a redirect from concepts/events to guides/events, which contains a link to the topic in the move book. Adds a deprecation note to the coin flip example for subscriptions. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ff0a47cdeddd944e9d66609202a8aa49e76ae571 Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Wed Oct 2 09:39:58 2024 -0600 [docs] Bridge Node configuration (#19645) ## Description Moves the configuration/runbook from crates to docs. Adds a nav node to new topic in Guides/Operator. Replaces original content with location of current. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 13f738876eee848f8d7e0537bfc7de6be7bf9a71 Author: 0xaslan <161349919+0xaslan@users.noreply.github.com> Date: Wed Oct 2 11:15:33 2024 -0400 DeepBook Indexer - improve event logging (#19624) ## Description Parse events before logging. ## Test plan No test, just rearranged log lines. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b516d265b27ff69f8fe37946c7c50b522de5e7ac Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Tue Oct 1 16:34:21 2024 -0700 Reduce retries for nextest (#19646) commit 73d37b6333996c64fb34745212263d97031c2aa5 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Tue Oct 1 13:29:07 2024 -0700 [bridge] etherum mainnet contract deployment config (#19492) ## Description check in the mainnet config for eth contract deployment, for future references. ## Test plan production deployment. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c9af6ea8a5c3a3ac5ff1769542eeb1503c27fde8 Author: Joe Hrbek <123987499+suiwombat@users.noreply.github.com> Date: Tue Oct 1 15:10:11 2024 -0500 [sui-proxy/timeouts+errors] (#19454) ## Description add an on_error to the trace layer add a timeout to all routes, configurable via env vars or using the default. ## Test Plan locally --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ x ] Nodes (Validators and Full nodes): nodes sending metrics will have an enforced timeout of 20 seconds. If metrics cannot be sent in that time, the sui-proxy metrics server will disconnect the client. This will not affect the blockchain or any related activity. It only applies to metrics transmission. - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 782461fd3d066f651df20be8c8a07471279fa237 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Tue Oct 1 12:06:20 2024 -0700 [Consensus] increase min round delay in tests (#19644) ## Description It seems the default delay can result in too many commits per sec. Also turn another per-commit log into debug, since we have not seen related issues recently. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6930fea7313c800aff632a344be6d9284439aac4 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Tue Oct 1 15:05:09 2024 -0400 Version Packages (#19642) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.8.2 ### Patch Changes - f026ec6: Deepbook Package Upgrade Co-authored-by: github-actions[bot] commit 7994c22fd10bd8d73875141caee287c45c137b21 Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Tue Oct 1 11:04:17 2024 -0600 [docs][easy] Adding research papers (#19635) ## Description Adds additional research papers. Alphabetizes the list, as well. Adds - Sui Lutris - Mysticeti - zkLogin - HammerHead ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f026ec6294443bc7d9cc44b61ee1d0285dba5ae0 Author: Tony Lee Date: Tue Oct 1 12:51:18 2024 -0400 Deepbook Testnet Upgrade (#19641) ## Description Upgrade Deepbook SDK package ## Test plan Testnet --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 9df417729339429219a5c5a373a063e2b0d5e9f3 Author: Adam Welc Date: Tue Oct 1 15:47:37 2024 +0200 [move-ide] Improvements to on hover for module members (#19619) ## Description This PR fixes a problem with constants designated to be used for clever errors not displaying on-hover information when used in assertions. It also updates symbolicator to use compiler's doc comment extraction facility instead of the much flakier hand-rolled implementation originally used in `move-analyzer`. Among other things it allows clean doc comment extraction in presence of atrributes (new tests added). Finally, it updates `symbols` tests to Move 2024 ## Test plan All new and old tests must pass commit ff1aeddf8a672096cf7bde585a1f7c74d5141a21 Author: Xun Li Date: Mon Sep 30 19:12:23 2024 -0700 Replace local execution with wait for checkpoint execution (#19629) ## Description This PR replaces active local execution with waiting for checkpoint execution. There are some cleanup needed later to remove all the local execution code, but we can do that separately. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 045352dead7924dfdcabff9bf52fa801312d1066 Author: Ashok Menon Date: Tue Oct 1 00:10:45 2024 +0100 indexer: no need to deserialize dynamic fields (#19622) ## Description Further simplification to dynamic field indexing, where we avoid deserializing dynamic fields altogether and extract the only piece of information we need (whether the DF is a dynamic field or a dynamic object field) from its type. DF deserialization was also the only reason to perform package resolution during indexing, so this change also means we can remove the package resolver, package buffer and logic to keep those up-to-date (pushing in new packages before indexing a checkpoint, and "evicting" packages after a checkpoint had been processed). ## Test plan ``` sui$ cargo nextest run -p sui-graphql-e2e-tests ``` ## Stack - #19517 - #19518 - #19519 - #19520 - #19521 - #19548 - #19554 - #19565 - #19576 - #19621 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4dce691cea47d21606bcff816b96a55ce1b6fccf Author: Ashok Menon Date: Tue Oct 1 00:06:24 2024 +0100 framework(visitor): test_scenario wrapped object traversal (#19621) ## Description Use a custom annotated visitor to implement detecting wrapped objects in test scenario. Unlike other instances where we have switched to a custom visitor, there isn't an OOM risk or risk of failure at this call site, because it is only used in Move tests, which are only run locally, but the motivation for this change is to avoid a reliance on `simple_deserialize` in the codebase, as it is too easy to copy this pattern and introduce a vulnerability elsewhere. Eventually `simple_deserialize` will go away and be replaced by something that is based on the new annotated visitor, but looks scary enough to use that people think twice before reaching for it. ## Test plan ``` sui$ cargo nextest run -p sui-framework-tests ``` ## Stack - #19517 - #19518 - #19519 - #19520 - #19521 - #19548 - #19554 - #19565 - #19576 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Timothy Zakian commit b7333cdd9a4442470f8b0b077da36ee978459d9f Author: Ashok Menon Date: Tue Oct 1 00:06:02 2024 +0100 adapter(visitor): Use UIDTraversal across all execution layers (#19576) ## Description Replace the legacy implementations of `get_all_uids` with the one that has been used in `latest` for some time now. This version avoids inflating the whole struct just to read its UIDs. The main motivation in making the switch is to reduce the number of call-sites that use `simple_deserialize`, so that we can eventually get rid of it entirely, and replace it with a visitor based ipmplementation that includes the appropriate caveats around inflating a whole struct in memory. ## Test plan ``` sui$ cargo simtest ``` ## Stack - #19517 - #19518 - #19519 - #19520 - #19521 - #19548 - #19554 - #19565 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Timothy Zakian commit fd81820de265a74cd2e2eec55b74088a233bd0e6 Author: Ashok Menon Date: Tue Oct 1 00:05:49 2024 +0100 indexer(visitor): avoid fully deserializing dynamic field on write path (#19565) ## Description Use `FieldVisitor` to extract the dynamic field kind from a serialized MoveObject without deserializing the whole thing. This makes it so that the indexing process cannot fail if presented with an overly large dynamic field. This PR also simplifies dynamic field indexing, taking advantage of the fact that we now only store the `df_kind`, and not any other part of `df_info`. ## Test plan ``` sui-indexer$ cargo nextest run sui-graphql-e2e-tests$ cargo nextest run ``` ## Stack - #19517 - #19518 - #19519 - #19520 - #19521 - #19548 - #19554 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Timothy Zakian commit dbe3433bbda994d0ad0a0c1ab5978868f3ad9125 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Mon Sep 30 16:05:36 2024 -0700 [visitor] use dynamic field visitor in analytics indexer (#19554) ## Description Switches the analytics indexer to use the new dynamic field visitor so that we inflate the value as little as possible during the indexing process. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Ashok Menon commit 706a26464c262dafd3b216edfb6c83dca57dd0db Author: Ashok Menon Date: Tue Oct 1 00:05:22 2024 +0100 authority(visitor): avoid fully deserializing dynamic field (#19548) ## Description Make it so that deserializing dynamic field info does not inflate the whole field -- just the outer struct -- which in turn means that this operation cannot fail. ## Test plan ``` sui-core$ cargo simtest sui-adapter-transactional-tests$ cargo simtest ``` ## Stack - #19517 - #19518 - #19519 - #19520 - #19521 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d224a1abd325554ac37f15a581c2f3a2f5633417 Author: Ashok Menon Date: Tue Oct 1 00:05:11 2024 +0100 rest(visitor): avoid fully deserializing dynamic field (#19521) ## Description Make it so that deserializing dynamic field info does not inflate the whole field -- just the outer struct -- which in turn means that this operation cannot fail. ## Test plan ``` sui$ cargo simtest sui-adapter-transactional-tests$ cargo simtest ``` ## Stack - #19517 - #19518 - #19519 - #19520 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a745d72bfe6c098bf84065a5924a5eea33a6353b Author: Ashok Menon Date: Tue Oct 1 00:04:57 2024 +0100 indexer(visitor): avoid fully deserializing dynamic field (#19520) ## Description Use `FieldVisitor` to extract necessary information from a dynamic field object, rather than fully expanding it, which can fail if the overall size is too large. Unfortunately, because `DynamicFieldInfo` includes a structured representation of the field name, this operation can still fail if the name alone is too large to deserialize using `BoundedVisitor`, but we can at least avoid deserializing the value. ## Test plan Build and run the indexer reader against a mainnet DB, and check for `0x5`: ``` sui$ cargo run --bin sui-indexer -- --database-url "$DB" --pool-size 10 \ json-rpc-service --rpc-client-url "https://fullnode.mainnet.sui.io:443" ``` Make the request: ``` $ curl -LX POST "https:/fullnode.mainnet.sui.io:443" \ --header 'Content-Type: application/json' \ --data-raw '{ "jsonrpc": "2.0", "method": "suix_getDynamicFields", "id": 1, "params": ["0x5"] }' | jq -C . | less -r ``` Expects a response like: ``` { "jsonrpc": "2.0", "result": { "data": [ { "name": { "type": "u64", "value": "2" }, "bcsName": "LQM2cdzDY3", "type": "DynamicField", "objectType": "0x3::sui_system_state_inner::SuiSystemStateInnerV2", "objectId": "0x5b890eaf2abcfa2ab90b77b8e6f3d5d8609586c3e583baf3dccd5af17edf48d1", "version": 339253281, "digest": "89nPMzp31fiSy39a7fg6TBzALdm3xA4Byd4hWr2QLahg" } ], "nextCursor": "0x5b890eaf2abcfa2ab90b77b8e6f3d5d8609586c3e583baf3dccd5af17edf48d1", "hasNextPage": false }, "id": 1 } ``` ## Stack - #19517 - #19518 - #19519 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 31d284c7c105cd14b6fb2c12d894fb73cb05eb85 Author: Ashok Menon Date: Tue Oct 1 00:01:04 2024 +0100 graphql(visitor): avoid DynamicField to-and-fro (#19519) ## Description Use the annotated visitor pattern to avoid deserializing a whole DynamicField (include its name and value) to only fetch one part of it. ## Test plan ``` sui-graphql-rpc$ cargo nextest run -j 1 sui-graphql-e2e-tests$ cargo nextest run ``` ## Stack - #19517 - #19518 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c47b362b320152a0063bc1341adf13e462dc7298 Author: Ashok Menon Date: Tue Oct 1 00:00:47 2024 +0100 visitor: FieldVisitor for partially deserializing dynamic fields (#19518) ## Description Introduce `FieldVisitor` as a new `annotated_visitor::Visitor` for extracting details from a `sui::dynamic_field::Field` without fully deserializing its name and value. This is helpful in a couple of places where not all the information is needed at once. - When indexing, we can extract just the information that aids indexing, and not anything else. - In GraphQL, the name and value fields are accessed separately, and we currently unnecessarily move back-and-forth between the serialized and structured representations to support this use case. This commit does not replace any instances of dynamic field serialization, but introduces the visitor and associated tests. ## Test plan ``` sui-types$ cargo nextest run -- dynamic_field::visitor ``` ## Stack - #19517 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 26f393887db7900679178689c35aae6aa56f52e5 Author: Ashok Menon Date: Sun Sep 22 21:09:54 2024 +0100 visitor: Expose layout for values via driver ## Description Initialise `ValueDriver` with a layout so that visitor functions can query the layout in all cases (not just for structs, vectors and enums). ## Test plan ``` move-core-types$ cargo nextest run sui-types$ cargo nextest run ``` commit b43d87f866962087e3ba2d46b164f5409e4fb7e0 Author: Ashok Menon Date: Sun Sep 22 20:47:50 2024 +0100 visitor: parametrize Visitor by lifetimes ## Description Add lifetime parameters for the underlying byte stream and the layout to `Visitor` and `Traversal` so that `Visitor` can return values that are derived from references to that byte stream and layout. ## Test plan ``` move-core-types$ cargo nextest run sui-types$ cargo nextest run ``` commit 534b79ec1544af985ede25a3e3e3ef7eea04e6fc Author: Ashok Menon Date: Sat Sep 21 23:37:06 2024 +0100 visitor: Expose byte offsets through ValueDriver ## Description Introduce `ValueDriver`, by analogy to `StructDriver`, `VecDriver`, and `VariantDriver`. While other drivers expose functions to continue visiting the substructure of the value, `ValueDriver` solely serves as a shared context to expose the bytes being visited, the start of the current value being parsed and the position into the byte stream that the next byte will come from. This can be used to implement visitors/traversals that extract sub-slices from the underlying bytes, which is useful for dynamic field deserialization. ## Test plan Introduced a new unit test for byte offsets, and run the existing unit tests: ``` move-core-types$ cargo nextest run sui-types$ cargo nextest run ``` commit 1f8ed3d6fffe7d60272b6f09c28c2f2d70998d55 Author: Ashok Menon Date: Sat Sep 21 21:06:25 2024 +0100 refactor(visitor): introduce ValueDriver ## Description All this does for now is encapsulate the cursor in the `ValueDriver` and include a place where we can store the starting position for a value. ## Test plan ``` move-core-types$ cargo nextest run ``` commit 3f25826d44c990820d64e17d925e3930249ceb41 Author: Ashok Menon Date: Sat Sep 21 20:30:12 2024 +0100 refactor(visitor): Use Cursor<&[u8]> ## Description ...instead of `&mut &[u8]` we use `&mut Cursor<&[u8]>` and this allows us to query the position in the byte stream at any point in time. ## Test plan ``` move-core-types$ cargo nextest run ``` commit 06628c2dcdf97e168ab35ce2d75030d30ef197a5 Author: Ashok Menon Date: Sat Sep 21 20:00:39 2024 +0100 chore(visitor): Create visit_vector ## Description Standardize pattern for visiting non-scalar Move values by introducing `visit_vector`, like `visit_struct` and `visit_variant`. ## Test plan ``` move-core-types$ cargo nextest run ``` commit fe8982b16c3d62bfd9df7d8f805cd2cac8ffc1d3 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Mon Sep 30 17:47:28 2024 -0700 [Cleanup] remove rolling hash from consensus handler (#19628) ## Description - Each consensus commit includes its previous commit's digest. Commit digests are logged inside consensus. So I believe we have what is needed to debug forks without the rolling hash inside consensus handler now. - Deprecate the `last_consensus_index` table. It's replacement `last_consensus_stats` has been populated for awhile. - Remove Narwhal implementation of ConsensusOutputAPI. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7321c67d764099860aa72f0cb523b74aef608faf Author: Tom Cat <48447545+tx-tomcat@users.noreply.github.com> Date: Tue Oct 1 06:23:43 2024 +0700 [Linter] Checks for explicit self-assignments. (#17124) ## Description This code implements a linter check to detect self-assignments in Move code. The check focuses on two types of expressions: 1. Mutate expressions (`*a = b`) 2. Assign expressions (`a = b`) In each case, the compiler tracks for the same "memory locations" of the assignment, attempting to track the same local variable. It does not have any sort of aliasing notion nor does it track references in any meaningful way. ## Test plan Added more use case ## Release notes - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [X] CLI: Move lint now warns against unnecessary self-assignments. - [ ] Rust SDK: --------- Co-authored-by: jamedzung Co-authored-by: Todd Nowacki commit ff9e78f834dd8392668bbd746df31c2a1d855ee0 Author: Damir Shamanaev Date: Tue Oct 1 02:04:33 2024 +0300 [framework] output human readable debug in `std::unit_test` (#19632) commit dd9c262180b576484c768cef6784060843bec09e Author: John Martin Date: Mon Sep 30 15:58:28 2024 -0700 Update fullnode-template.yaml to include archival fallback (#19630) ## Description template update --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit dffc29802f6d2d0398fe88b740fb04a7156d6b8e Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Mon Sep 30 15:21:42 2024 -0700 [Fastpath] Support transactions certified through consensus (#19601) ## Description The fields in the `CertificateProof::Consensus` variant is chosen to enhance debuggability (including the transaction's position in the DAG: round, authority index, transaction index), without unnecessary data (consensus block digest is excluded). ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5505e624feccbd623f130358653e1fe613fd3181 Author: Vassilis Legakis Date: Mon Sep 30 23:46:55 2024 +0300 Add Trace as OAuth provider for dev (#19606) Add Trace as OAuth provider for dev ``` 2024-09-30T20:02:38.090458Z INFO node{name=k#99f25ef6..}: sui_core::authority::authority_per_epoch_store: received jwk vote from k#99f25ef6.. for jwk (JwkId { iss: "https://cognito-idp.eu-west-3.amazonaws.com/eu-west-3_gGVCx53Es", kid: "FIqQqeRU1EQP75fBeds+wL2yczwvbDy1NF6T/f5Oe5o=" }, JWK { kty: "RSA", e: "AQAB", n: "va55QtSTFhvqx0IL8UAPNZf6x5NO9jK88Wyct-i67grbDJZyXLSxz5oSttoIHkaJZQWgl_qsgyxDpHeSMznliS0K7rXFwaw81PwDIB_ek8uKhXmaQ6ryPAaO-8kvR46o-EYyVk9wWPDHTtN6NEbiUxGoDqOoiVCrVJLsFdoia7-MMVJnR2VokbEgGGbJkZu2MipikQvk_b7BGF__5mKvCJc6hCEJcxIUFvETKA9AOhjgxLvE9U8Ke6XRfmDLKovDAsDaZsWP6VPN0HoBi5mak9CgUqmxwnvNJFLhpjawD9E23O_l4aBcie56eFcC_knUa2VOHymBoJ0On3p03yxs7Q", alg: "RS256" }) ``` --------- Co-authored-by: Joy Wang <108701016+joyqvq@users.noreply.github.com> commit 96b5a149dff31731453baf59fe67e33ccb9fdd27 Author: Andrew Schran Date: Mon Sep 30 19:59:55 2024 +0100 Add mode to support signed variant of Discovery protocol. (#19587) ## Description Adds a new version of discovery where all NodeInfo messages are signed by the originator. commit f30dfc9f54fc424a52bb711e4e7ba0dad9f4314f Author: Ashok Menon Date: Mon Sep 30 19:50:04 2024 +0100 indexer: affected objects include created+wrapped/unwrapped+deleted (#19618) ## Description Broaden the definition of "affected object" to include objects that a transaction creates and then immediately wraps, or objects that it unwraps and then immediately deletes. This ensures that a transaction will show up in the response to an `affectedObject` query if and only if the Object's ID shows up in its `objectChanges` response. ## Test plan Updated GraphQL E2E test: ``` sui$ cargo nextest run -p sui-graphql-e2e-tests \ --features staging -- affected_object ``` ## Stack - #19474 - #19614 - #19615 - #19616 - #19617 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [x] Indexer: A transaction's "affected objects" include objects that it created and wrapped, or unwrapped. - [x] JSON-RPC: A transaction's "affected objects" include objects that it created and wrapped, or unwrapped. - [x] GraphQL: A transaction's "affected objects" include objects that it created and wrapped, or unwrapped. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 9c6029ba97c5aa9dfc67975eca373c5c7b92ecea Author: Ashok Menon Date: Mon Sep 30 19:02:45 2024 +0100 events: Remove unsupported filters (#19617) ## Description This PR removes EventFilters from JSON-RPC that aren't supported already by fullnode-backed JSON-RPC: - Combination filters (`All`, `Any`, `Not`, `And`, `Or`) - `Package` - `EventField` These filters were, however, supported by the now-deprecated subscription system, so a question remains whether they are safe to remove. ## Test plan Manually tested, in particular `All: []` support, which we do still need to keep as a way to get all filters (this was also added to the `IndexerReader` implementation): ``` sui$ cargo run --bin sui -- --force-regenesis \ --with-indexer --with-faucet ``` ...run for some time, so that some system events accumulate, then test that we get them from both the fullnode-backed JSONRPC and indexer-backed: Fullnode: ``` curl -LX POST "http://localhost:9000" \ --header 'Content-Type: application/json' \ --data '{ "jsonrpc": "2.0", "id": 1, "method": "suix_queryEvents", "params": [ { "All": [] }, null, 50, true ] }' | jq . ``` Indexer: ``` curl -LX POST "http://localhost:9124" \ --header 'Content-Type: application/json' \ --data '{ "jsonrpc": "2.0", "id": 1, "method": "suix_queryEvents", "params": [ { "All": [] }, null, 50, true ] }' | jq . ``` ## Stack - #19474 - #19614 - #19615 - #19616 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [x] Nodes (Validators and Full nodes): Remove unsupported compound filters for querying events from the JSON-RPC schema to avoid confusion. Remove support for compound filters from the deprecated events subscription system. - [x] Indexer: Remove compound filters for querying events. - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f3784b48718809bebc077ea2bb33a80e26aeed4e Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Mon Sep 30 13:21:45 2024 -0400 indexer easy: add sender to events to deprecate senders (#19626) ## Description clean up events senders to use sender instead, as this is a schema breaking change, following the process and splitting this into steps 1. this pr, adds new `sender` column; backfill instance to backfill `sender` with `senders` 2. then: remove usage of senders and drop `senders` column https://linear.app/mysten-labs/issue/DP-53/clean-up-multi-sender-in-the-db-schema-and-code ## Test plan ci --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6bfc91b2c4cda1640fadd7ee71058420d8a63c7b Author: Ashok Menon Date: Mon Sep 30 17:37:17 2024 +0100 indexer: deprecate TransactionFilter::ToAddress (#19616) ## Description Remove support for filtering by `ToAddress` in PG-backed JSON-RPC. At the same time, change implementations for `FromAndToAddress` and `FromOrToAddress` to use the new `tx_affected_addresses` table, which should be more efficient. ## Test plan Manually tested: ``` sui$ cargo run --bin sui -- --force-regenesis \ --with-faucet --with-indexer ``` ``` sui$ $SUI client faucet sui$ $SUI client ptb --transfer-objects [gas] @0x42 sui$ curl -LX POST "http://localhost:9124" \ --header 'Content-Type: application/json' \ --data '{ "jsonrpc": "2.0", "id": 1, "method": "suix_queryTransactionBlocks", "params": [ { "filter": { "FromOrToAddress": { "addr": "'($SUI client active-address)'" } } }, null, 50, true ] }' | jq .result.data.[].digest "4NDjddQA8Q158EuskHm73AVoo4Gmr6SknuTv1nzghVd1" "5EECPcG6ZbUaH6nXCZWazpg6vcsC33nfnEw6qVkAnN9W" sui$ curl -LX POST "http://localhost:9124" \ --header 'Content-Type: application/json' \ --data '{ "jsonrpc": "2.0", "id": 1, "method": "suix_queryTransactionBlocks", "params": [ { "filter": { "FromOrToAddress": { "addr": "0x0000000000000000000000000000000000000000000000000000000000000042" } } }, null, 50, true ] }' | jq .result.data.[].digest "4NDjddQA8Q158EuskHm73AVoo4Gmr6SknuTv1nzghVd1" sui$ curl -LX POST "http://localhost:9124" \ --header 'Content-Type: application/json' \ --data '{ "jsonrpc": "2.0", "id": 1, "method": "suix_queryTransactionBlocks", "params": [ { "filter": { "FromAndToAddress": { "from": "'($SUI client active-address)'", "to": "0x0000000000000000000000000000000000000000000000000000000000000042" } } }, null, 50, true ] }' | jq .result.data.[].digest "4NDjddQA8Q158EuskHm73AVoo4Gmr6SknuTv1nzghVd1" ``` ## Stack - #19474 - #19614 - #19615 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [x] Indexer: Remove support for filtering transactions by `ToAddress`, (instead of `FromOrToAddress`). - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 416a7520698a2741516e2f6b656f54599590180b Author: Eugene Boguslavsky Date: Mon Sep 30 09:05:53 2024 -0700 Fixing nightly workflow: Remove profile release option (#19627) ## Description Fixing nightly workflow: Remove profile release option ## Test plan 👀 commit 17702bc93a69411e4d2525a4894d03ebc46a4ca4 Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Mon Sep 30 11:28:15 2024 -0400 [docs] fix interface reference for data ingestion (#19625) ## Description Applies doc change to data ingestion's Worker trait ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 10b8c792b51ed76c1ec3450e1bd5d66859a2e681 Author: Ashok Menon Date: Mon Sep 30 16:03:34 2024 +0100 indexer: deprecate TransactionFilter::{Input, Output}Object (#19615) ## Description Deprecate `InputObject` and `OutputObject` as filters in IndexerReader, to be replaced by `AffectedObject`. ## Test plan Tested manually: ``` sui$ cargo run --bin sui -- start --with-indexer --force-regenesis ``` ``` curl -LX POST "http://localhost:9124" \ --header 'Content-Type: application/json' \ --data '{ "jsonrpc": "2.0", "id": 1, "method": "suix_queryTransactionBlocks", "params": [ { "filter": { "ChangedObject":"0x2" } }, null, 50, true ] ' | jq . { "jsonrpc": "2.0", "error": { "code": -32000, "message": "Indexer does not support the feature with error: `InputObject and OutputObject filters are not supported, please use AffectedObject instead.`" }, "id": 1 } ``` ## Stack - #19474 - #19614 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [x] Indexer: Remove support for `InputObject` and `OutputObject` transaction filters (replaced by `AffectedObject`). - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b764276a7abd526d775317e422d3068219f52258 Author: Ashok Menon Date: Mon Sep 30 15:54:01 2024 +0100 indexer: support TransactionFilter::AffectedObject (#19614) ## Description Add an `AffectedObject` filter on `TransactionFilter` and implement it in `IndexerReader` and in the subscription system. This filter is not implemented on fullnode indices, and in a follow-up PR, support for `InputObject` and `ChangedObject` will be removed from the PG-backed implementation of JSON-RPC. It was difficult to replace `InputObject`/`ChangedObject` support on fullnode with `AffectedObject` because we don't have (and don't want to add) the necessary indices, and the current filters are referred to in certain tests. ## Test plan Manually tested with a local network: ``` sui$ cargo run --bin sui -- start --with-indexer --force-regenesis ``` ``` sui$ curl -LX POST "http://localhost:9124" \ --header 'Content-Type: application/json' \ --data '{ "jsonrpc": "2.0", "id": 1, "method": "suix_queryTransactionBlocks", "params": [ { "filter": { "AffectedObject":"0x2" } }, null, 50, true ] }' | jq '.result.data.[].digest' ``` ## Stack - #19474 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [x] Indexer: Adds support for filtering transactions by their affected object - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 841f5e397273689fdae73f5afe774e40d8aca404 Author: Ashok Menon Date: Mon Sep 30 15:32:29 2024 +0100 graphql: filter transactions by affected object (#19474) ## Description Add support for filtering transactions by the objects they touch, which will supersede filtering by a transaction's input and output objects. This feature will exist only in staging mode until the underlying data has been fully backfilled. ## Test plan New staging-only tests: ``` sui$ cargo nextest run -p sui-graphql-e2e-tests --features staging -- affected_object ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 56361ed6996adb9270858953b2b82d64b0b79085 Author: 0xaslan <161349919+0xaslan@users.noreply.github.com> Date: Mon Sep 30 08:14:10 2024 -0400 DeepBook Indexer - update events (#19600) ## Description Small event changes. ## Test plan Tested locally. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7c9339816abba3c2f0ba342c01d7b0235d290444 Author: Adam Welc Date: Mon Sep 30 09:26:24 2024 +0200 [move-ide] Improvements to on hover for structs and enums (#19599) ## Description This PR improves on certain aspects of on hover for structs and enums. Mainly, it shows more meaningful information for struct/variant field values when packing structs/variants. Before, it would show information about the field itself, which was not very useful when inspecting the pack operation: ![image](https://github.com/user-attachments/assets/77753a0b-c0de-4230-9ce5-1e9d95192f49) In the case above, it makes a lot more sense to show information about constant `TMP` which is what is implemented in this PR: ![image](https://github.com/user-attachments/assets/e7c2563d-7dae-4001-9529-4a568067c69b) This PR also includes a fix to displaying on-hover information for variants when inspecting the pack operation ## Test plan Old and newly added tests must pass commit b3b80f75d07d0760026742123f00084439105d66 Author: Andrew Schran Date: Sun Sep 29 21:03:08 2024 +0100 Add transaction handler RPC for Mysticeti fastpath (#19401) This will not function until consensus adapter/handler side is implemented. commit 3864dcaec78ca245d8e77bf917ededaf5208877a Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Sat Sep 28 00:01:16 2024 -0700 [bridge-indexer] move current checkpoint metric to retrieval stage (#19570) ## Description Currently indexer reports `current_checkpoint` when saving progress to DB. This makes the metrics a bit out of state when we cache the metrics in memory. This PR: 1. moves the metrics to data retrieval stage, 2. rename it to `latest_retrieved_checkpoints` 3. in EthSubscription task, periodically fetch the latest block height and update this metric ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 2b1a4e8ea5514e768be12c456f47d722dd1c2b0e Author: Todd Nowacki Date: Fri Sep 27 17:10:14 2024 -0700 [move-compiler] Make visitors more parallel friendly (#19421) ## Description - Removed the mutability of the visitor itself, since once invoked, it can create mutability - Removed the RefCells around the visitors ## Test plan - Added a send+sync check --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 688fcbb12e279539616c09fc05b607fcb6370fa3 Author: 0xripleys <105607696+0xripleys@users.noreply.github.com> Date: Sat Sep 28 06:10:54 2024 +0800 FungibleStakedSui Implementation (#18759) ## Description Implementation for https://github.com/sui-foundation/sips/pull/31 ## Test plan Unit tests + random tests for the math --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Emma Zhong commit df41d44893038acd21c791df1329c7f3a588a32b Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Fri Sep 27 14:32:25 2024 -0700 [bridge] report gas coin balance on node start (#19593) ## Description 1. report gas coin balance on node start 2. set the minimal gas balance to 10 SUI on start 3. some other metrics updates ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: William Smith commit 1f2ac8e73b5a14bff0d9dc8c110ba194b96c780b Author: Bridgerz Date: Fri Sep 27 20:58:13 2024 +0100 Add Governance action event support (#19252) ## Description Governance action support ## Test plan Spot check on my local machine. Indexer validity checker tool coming soon... --------- Co-authored-by: patrick commit 1eca8ba220601dcbc1c7a6d2408ef07d52bc6e56 Author: Anastasios Kichidis Date: Fri Sep 27 18:34:11 2024 +0100 [Consensus] use authority_index to derive hostname (#19597) ## Description ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 92a104aaf89d34626913dee4db781e8dec83a651 Author: Nick Pappas <143171113+1NickPappas@users.noreply.github.com> Date: Fri Sep 27 17:11:45 2024 +0000 Np/new provider arden (#19571) Description This PR adds the Arden OIDC provider . Test plan The following steps were taken to test the new integration: 1. Initially ran cargo test -p sui-swarm-config, but not all tests passed. 2. Reviewed the snapshot using cargo insta review and proceeded with the test process. 3. Re-ran cargo test -p sui-swarm-config as per the guide, and all tests passed successfully on the second run. --- commit 138aabf734547989cd4b87c8fe25a5de108830d5 Author: Andrew Schran Date: Fri Sep 27 17:16:17 2024 +0100 Further reduce random beacon min shares to 700 (#19522) --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [x] Protocol: Reduces minimum shares for random beacon protocol to 700. This may reduce costs of running the protocol. - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f57e027b5e2969767004882a9b90458b5c27b968 Author: Xun Li Date: Fri Sep 27 08:57:25 2024 -0700 [Indexer] Add a generic backfill template (#19510) ## Description This PR introduces a generic backfill task trait and a runner. It supports querying the DB in parallel, process them and commit them back to the DB. The original SqlBackfil for full_objects_history is then added as one instance. This PR also adds another instance for system_system_summary_json backfill. ## Test plan Run locally. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a21e63f9bd700b96c3dd6982cbeec396349b152d Author: Krešimir Klas Date: Fri Sep 27 15:20:14 2024 +0200 move: load package version when using external resolver in dependency graph (#19595) ## Description Small fix to correctly load dependency version when using external resolver. Changed `echo` in `successful_package_batch_response.sh` to `printf` for more consistent handling of `\0` across systems (it fails on my machine). This is part of the work to enable compiling against on-chain dependencies https://github.com/MystenLabs/sui/pull/14178. cc @rvantonder @amnn ## Test plan Updated the unit test to have a dependency with `version` field. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 54ccf09ef3764b5b94082860bcba858a15ce6ab7 Author: Rijnard van Tonder Date: Thu Sep 26 22:47:29 2024 -0700 move: external resolver reads multiple lock contents (#19057) ## Description This makes changes to the external resolver so that multiple package graphs are read per resolver (null-separated strings). This also makes it so that an externally resolved dependency entry does not actually have to correspond to the package name. E.g., from https://github.com/MystenLabs/sui/pull/19561, `A` can be any name when `r.foo` is specified: ```toml A = { r.foo = "bar", } ``` ## Test plan Added a test to exercise parsing multiple null-separated lock contents. This also tests that it is not required for the dependency to be named as a recognized package. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: Changes to external dependency resolution: allows processing multiple null-separated contents to extend the package graph per resolution call. - [ ] Rust SDK: - [ ] REST API: commit cc13b433d65b9171e544c407d6703e6ec816e718 Author: Brandon Williams Date: Thu Sep 26 21:52:29 2024 -0500 jsonrpc: populate object cache with input objects (#19589) Also populate the object cache used for calculating balance changes with the input objects returned from the validators. commit 3ace00d8c6e0a707b50a2e38f3419471ec8dc2a0 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Thu Sep 26 17:03:21 2024 -0700 Version Packages (#19588) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/bcs@1.1.0 ### Minor Changes - 489f421: Updated hex, base64, and base58 utility names for better consistency All existing methods will continue to work, but the following methods have been deprecated and replaced with methods with improved names: - `toHEX` -> `toHEX` - `fromHEX` -> `fromHex` - `toB64` -> `toBase64` - `fromB64` -> `fromBase64` - `toB58` -> `toBase58` - `fromB58` -> `fromBase58` ## @mysten/sui@1.11.0 ### Minor Changes - 489f421: Updated hex, base64, and base58 utility names for better consistency All existing methods will continue to work, but the following methods have been deprecated and replaced with methods with improved names: - `toHEX` -> `toHEX` - `fromHEX` -> `fromHex` - `toB64` -> `toBase64` - `fromB64` -> `fromBase64` - `toB58` -> `toBase58` - `fromB58` -> `fromBase58` - 489f421: support Bech32 secrets in the Keypair.fromSecretKey methods ### Patch Changes - Updated dependencies [489f421] - @mysten/bcs@1.1.0 ## @mysten/create-dapp@0.3.24 ### Patch Changes - Updated dependencies [489f421] - Updated dependencies [489f421] - @mysten/sui@1.11.0 - @mysten/dapp-kit@0.14.24 ## @mysten/dapp-kit@0.14.24 ### Patch Changes - Updated dependencies [489f421] - Updated dependencies [489f421] - @mysten/sui@1.11.0 - @mysten/wallet-standard@0.13.6 - @mysten/zksend@0.11.5 ## @mysten/deepbook@0.8.20 ### Patch Changes - Updated dependencies [489f421] - Updated dependencies [489f421] - @mysten/sui@1.11.0 ## @mysten/deepbook-v3@0.8.1 ### Patch Changes - Updated dependencies [489f421] - Updated dependencies [489f421] - @mysten/sui@1.11.0 ## @mysten/enoki@0.4.4 ### Patch Changes - Updated dependencies [489f421] - Updated dependencies [489f421] - @mysten/sui@1.11.0 - @mysten/zklogin@0.7.21 ## @mysten/graphql-transport@0.2.20 ### Patch Changes - Updated dependencies [489f421] - Updated dependencies [489f421] - @mysten/sui@1.11.0 - @mysten/bcs@1.1.0 ## @mysten/kiosk@0.9.20 ### Patch Changes - Updated dependencies [489f421] - Updated dependencies [489f421] - @mysten/sui@1.11.0 ## @mysten/suins-toolkit@0.5.20 ### Patch Changes - Updated dependencies [489f421] - Updated dependencies [489f421] - @mysten/sui@1.11.0 ## @mysten/wallet-standard@0.13.6 ### Patch Changes - Updated dependencies [489f421] - Updated dependencies [489f421] - @mysten/sui@1.11.0 ## @mysten/zklogin@0.7.21 ### Patch Changes - Updated dependencies [489f421] - Updated dependencies [489f421] - @mysten/sui@1.11.0 - @mysten/bcs@1.1.0 ## @mysten/zksend@0.11.5 ### Patch Changes - Updated dependencies [489f421] - Updated dependencies [489f421] - @mysten/sui@1.11.0 - @mysten/wallet-standard@0.13.6 Co-authored-by: github-actions[bot] commit ec0fe9dabc39b1d4a5a592767bad4404a99848ec Author: Denys Kozak <117740720+denyskozak@users.noreply.github.com> Date: Fri Sep 27 00:49:18 2024 +0100 [sdk docs] Remove import duplication for dapp-kit sui client provider (#19582) ## Description Remove import duplication `from '@mysten/dapp-kit'` for Sui Client Provider "Using network specific configuration" code example (dapp-kit doc) ## Test plan Sui Client Provider "Using network specific configuration" code example, link (https://sdk.mystenlabs.com/dapp-kit/sui-client-provider#using-network-specific-configuration) --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Denis Kozak Co-authored-by: Michael Hayes commit 29126ad5a19e39a2bc8f9dc8fe2eecfbba6dc437 Author: John Martin Date: Thu Sep 26 14:47:52 2024 -0700 Update sui_for_node_operators.md (#19579) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 489f42152bd2bdcc5ba4021a7bfca1e44fc7e09d Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Thu Sep 26 14:33:52 2024 -0700 Fix some docs issues and improve fromSecretKey methods on keypair classes (#19581) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a0ae6e2c7fc7d1df49871048c29ae4191340177a Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Thu Sep 26 14:25:22 2024 -0700 [bridge] add logs and metrics for bridge indexer (#19567) ## Description 1. add more logs in indexer especially in error cases 3. add inflight live tasks metrics ## Test plan tested running locally. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 31ef917b259e4ad31c5e9865a2bed1789cef51ff Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Thu Sep 26 10:52:33 2024 -1000 Version Packages (#19583) commit 0d17307567c726abf82014b402beecf8f4917cd0 Author: Tony Lee Date: Thu Sep 26 09:47:18 2024 -1000 Deepbook SDK update (#19585) ## Description Package ID Updates ## Test plan Testnet --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 640b757ffda0b531b44e92baea775c891af87893 Author: Jordan Gensler Date: Thu Sep 26 14:39:44 2024 -0400 Add new router utilities to dapp-kit (#19568) ## Description This adds two new features two dapp kit: - `getSuiClientQuery`, which returns a `queryOptions` config that can be used with the query client directly. This is important for data prefetching where we need to load data in the route loader, not in the react render. - `useSuiClientSuspenseQuery`, which works well with new routers like tanstack router. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit be7160aa28042fd36a79de7a975bd3621711de31 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Thu Sep 26 11:11:52 2024 -0700 [move] Update syntax and usage for external resolvers in Move packages (#19561) ## Description This updates the way external package resolvers are declared and used inside Move packages. In particular, external resolvers can be prefixed by `r.` so to use an external resolver binary `foo` you would declare a dependency like `A = { r.foo = "bar", }`. This additionally adds support for resolver-specific configs in the `Move.toml` however these are treated opaquely -- any top-level item prefixed with `r.` will be allowed by the parser. Bottom commit are the changes + new tests, and top commit are the test updates. ## Test plan Added new tests, as well as updated existing tests and made sure they continued to work as expected. commit 3fa3ec9dca28cfcca39c30063cee2e1716d4c0bf Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Thu Sep 26 10:46:04 2024 -0700 Add fatal! and debug_fatal! macros (#19566) Add two macros as superior alternatives to panic! - `fatal!` logs its message at error level, and then panics. Logged messages are often easier to find than panic messages, both in loki and in simtest logs. - `debug_fatal!` is a variant of `fatal!` that only logs an error in release mode. commit 2362d043b3ad33b1ac6a3fb9ed20b9519f8b8930 Author: 0xaslan <161349919+0xaslan@users.noreply.github.com> Date: Thu Sep 26 12:51:40 2024 -0400 DeepBook indexer - modify events (#19550) ## Description Modified the OrderFilled event and added a new OrderExpired event. ## Test plan Integration test. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8bd13d00f79c7630ba3fdc145af227d7fe0b1447 Author: Xun Li Date: Thu Sep 26 09:36:24 2024 -0700 [Indexer] Do not prune epochs table (#19533) ## Description We will need a copy of the full epochs history somewhere. We either create a separate KV table or just simply don't prune the existing table. Given that this table is quite small, it's probably ok just to not prune it. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 98ed88d525dab5e6c469666ad98b1387a33f7c4e Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Thu Sep 26 09:50:24 2024 -0600 [sdk docs] Added mermaid ref (#19528) ## Description Was unable to build site locally to test. Trying to get mermaid graph to render here: https://sdk.mystenlabs.com/typedoc/index.html ## Test plan https://sui-typescript-docs-7fcypvvd6-mysten-labs.vercel.app/typedoc/index.html --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6d1c4902f774279a316cf8348139d1d52df8b240 Author: Vassilis Legakis Date: Thu Sep 26 17:43:27 2024 +0300 Enable 3DOS oauth provider for Prod (#19574) Enable 3DOS oauth provider for Prod commit 5fca05275b1b86da618e88f101385a0da9555bfd Author: Krešimir Klas Date: Thu Sep 26 13:28:14 2024 +0200 [on-chain-deps][manifest] change `Custom` dependency to `OnChain` (#19540) ## Description Refactor `Custom` dependency to `OnChain` (in the manifest) and streamline it. It now requires only the `id` field which specifies dependency's on-chain id (e.g. `Foo = { id = "0x123" }`. `Custom` dependencies aren't supported on Sui so this should be a fairly safe change. This is part of the work to enable compiling against on-chain dependencies https://github.com/MystenLabs/sui/pull/14178. cc @rvantonder @amnn ## Test plan Updated existing unit tests. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: Remove legacy support for custom package hooks, replacing it with initial logic for on-chain dependencies. - [ ] Rust SDK: - [ ] REST API: commit c9acf8338f8ecedf186c0df79f56bc0e4f22df3a Author: Ashok Menon Date: Thu Sep 26 12:21:51 2024 +0100 fix(ci): clean-up final use of --features indexer (#19575) ## Description The TS E2E tests were trying to build the Sui CLI with the `indexer` feature enabled, but that feature no longer exists (and the functionality it guarded is now available by default). ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 15de3f24ac3bf9b32b93ef2e2c41c0bf040f1c92 Author: Anastasios Kichidis Date: Thu Sep 26 09:49:17 2024 +0100 [Consensus] Small enhancement to the invalid blocks metric (#19515) ## Description Use the hostname whenever possible. Add the error type as well. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5473d94ea8fb7a40b8e9df987a8fc0020bc1f888 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Wed Sep 25 22:55:49 2024 -0700 Fix sorting in peer discovery (#19569) ## Description It seems `connection_rtt()` can get updated concurrently. This breaks total ordering, which becomes required for sorting in rust 1.81. Other callsites of `sort_by()` are reviewed and they don't seem to have the same issue. ## Test plan PT --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit fffce53a57acee206ab5108d14f63829bfbe1b0a Author: Arun Koshy <97870774+arun-koshy@users.noreply.github.com> Date: Wed Sep 25 18:49:57 2024 -0700 Enable distributed vote scoring in testnet v61 (#19553) ## Description This is a fix for CI. Bumping protocol version and pushing new feature change to that version --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [X] Protocol: Enable distributed vote scoring in testnet - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 49175e2a2336c14e3196daeccdef1a04faf2ba2d Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Wed Sep 25 18:49:23 2024 -0700 [Rust] upgrade to 1.81 (#19529) ## Description Upgrade rust toolchain to 1.81. Fix lint. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c838c1e2abce6b3109288743796bacefc99670ed Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Wed Sep 25 13:28:45 2024 -1000 Version Packages (#19559) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.7.1 ### Patch Changes - 37d259a: Locked balance feature Co-authored-by: github-actions[bot] commit a5f807e16e86f0c48d21f583e1184454e8ae907e Author: Ashok Menon Date: Thu Sep 26 00:17:33 2024 +0100 indexer: relax migration check (#19558) ## Description It's okay for the list of locally known migrations to be a subset of the applied migrations, rather than a prefix (The only clear issue is if there is a migration that has been embedded locally, but that hasn't been run on the DB). ## Test plan New unit test: ``` sui-indexer$ cargo nextest run ``` And confirm against the production DB. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a942c6e243fe5267474d9ecce5bc703b9644bc19 Author: Zhe Wu Date: Wed Sep 25 15:27:30 2024 -0700 Update outdated event id doc (#19551) ## Description As title says ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 15ef20aca1d21ff043c144b9656394bf7305065e Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Wed Sep 25 18:20:05 2024 -0400 indexer: fix unwrap after wrap bug in a checkpoint batch (#19468) ## Description title ## Test plan ``` cargo nextest run objects/wrap_unwrap PASS [ 7.594s] sui-graphql-e2e-tests::tests run_test::stable/objects/wrap_unwrap.move ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 1c75247c04e5e58253fd93bcbce4db7c3d6e2d63 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Wed Sep 25 15:03:08 2024 -0700 Terminate checkpoint builder before end of epoch (#19555) Otherwise it is possible for this panic to be hit: https://github.com/MystenLabs/sui/blob/34c9e3ef6fff67df7f6ec89b4ccee462fb111df2/crates/sui-core/src/checkpoints/mod.rs#L957 commit 37d259a46060fa4e29751d46a5d40e84a701886b Author: Tony Lee Date: Wed Sep 25 11:58:13 2024 -1000 Deepbook Locked Balance SDK (#19557) ## Description Locked balance function for sdk ## Test plan Testnet --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7a4ecf8083c06bd46a109c9b9f9608360177920a Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Wed Sep 25 14:57:58 2024 -0700 [gql] affix protocol version to e2e test (#19556) ## Description Hope this addresses ci test issues ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit fe17fe2788c2c9c361d7dbb2d69dab3cf822faae Author: Adam Welc Date: Wed Sep 25 23:41:26 2024 +0200 [move-ide] On-hover improvements (#19437) ## Description This PR improves on several instance of on-hover. 1. Doc comments now preserves indentation ![image](https://github.com/user-attachments/assets/2380db0c-1619-4080-ba48-b438b1b7f0d5) 2. Clean up the way function signatures (in particular type params and regular params) are displayed on-hover instead of always displaying them on a single line: ![image](https://github.com/user-attachments/assets/17056202-4469-4ddb-a2be-a43564e7e0e2) 3. Use information about implicit modules/datatype when displaying type information on-hover to abbreviate types by omitting package (for implicit modules, e.g., `vector::singleton` instead of `std::vector::singleton`) or both package and modules (for implicit datatypes, e.g., `Option`, instead of `std::option::Option`) ![image](https://github.com/user-attachments/assets/ad639db4-a30a-4b70-852f-8614adca812f) ## Test plan All new and old tests must pass commit 897aab252bc3afe139063c20828e0bcad5f50a89 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Wed Sep 25 16:11:38 2024 -0400 indexer restorer: simplify commands (#19552) ## Description - we made `archives` and `display` buckets public, thus restorer no longer needs gcs cred - remove un-necessary required args / env vars so that to restore from mainnet, only start-epoch & local snapshot download dir needs to be specified. ## Test plan local run of restorer --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c22271c935dcc2cb627dc56584f2a7dc459d3d4c Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Wed Sep 25 15:57:28 2024 -0400 fn json rpc: retry on empty events (#19469) ## Description when query transaction with showEvents set, it's possible that the transaction effects is available while events are not, as events are outputs of fn local execution, thus instead of returning empty events and make whole transaction response in-complete, we should retry, in that sense as long as fn responds, the response is complete. ## Test plan ci --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 34c9e3ef6fff67df7f6ec89b4ccee462fb111df2 Author: Jort Date: Wed Sep 25 11:16:36 2024 -0700 add compatability trait to move-binary-format (#19450) ## Description Add a trait which will allow the both execution and CLI to gather the necessary information for checking upgrade compatibility. The current implementation supports determining layout, linking and enum variant problems but does not hold information about each instance where it occurs, furthermore information is lost when mapped to an enum with no associated data. To ensure users can extract more information this new type will accumulate the struct, enum, and function's where each error occurs for upgrades, while preserving the previous error type for execution. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b816c4981e44bc5ba47900257332d8dda1a6dd5b Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Wed Sep 25 10:36:43 2024 -0700 Fixes for compatibility checker (#19549) - Revert workaround that is no longer necessary - Support running without a metrics api key - Show diffs if the script fails commit c28a2130e4c67507b34e86bb7ccc92964000fd44 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Wed Sep 25 07:30:38 2024 -1000 Version Packages (#19434) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.7.0 ### Minor Changes - 7923ed5: Newest deepbook package constants ### Patch Changes - Updated dependencies [830b8d8] - @mysten/sui@1.10.0 ## @mysten/sui@1.10.0 ### Minor Changes - 830b8d8: Introduce new naming scheme for named packages plugin ## @mysten/create-dapp@0.3.22 ### Patch Changes - Updated dependencies [830b8d8] - @mysten/sui@1.10.0 - @mysten/dapp-kit@0.14.22 ## @mysten/dapp-kit@0.14.22 ### Patch Changes - Updated dependencies [830b8d8] - @mysten/sui@1.10.0 - @mysten/wallet-standard@0.13.5 - @mysten/zksend@0.11.4 ## @mysten/deepbook@0.8.19 ### Patch Changes - Updated dependencies [830b8d8] - @mysten/sui@1.10.0 ## @mysten/enoki@0.4.3 ### Patch Changes - Updated dependencies [830b8d8] - @mysten/sui@1.10.0 - @mysten/zklogin@0.7.20 ## @mysten/graphql-transport@0.2.19 ### Patch Changes - Updated dependencies [830b8d8] - @mysten/sui@1.10.0 ## @mysten/kiosk@0.9.19 ### Patch Changes - Updated dependencies [830b8d8] - @mysten/sui@1.10.0 ## @mysten/suins-toolkit@0.5.19 ### Patch Changes - Updated dependencies [830b8d8] - @mysten/sui@1.10.0 ## @mysten/wallet-standard@0.13.5 ### Patch Changes - Updated dependencies [830b8d8] - @mysten/sui@1.10.0 ## @mysten/zklogin@0.7.20 ### Patch Changes - Updated dependencies [830b8d8] - @mysten/sui@1.10.0 ## @mysten/zksend@0.11.4 ### Patch Changes - Updated dependencies [830b8d8] - @mysten/sui@1.10.0 - @mysten/wallet-standard@0.13.5 Co-authored-by: github-actions[bot] commit 22fb154b24d3740d763112144e873da154d774ca Author: Petr Makhnev <51853996+i582@users.noreply.github.com> Date: Wed Sep 25 21:03:24 2024 +0400 [docs] Fixed typos and formatting (#19546) ## Description - Use `move` instead of `rust` for some code blocks - Fix alignment in `conventions.md` with standard 4 spaces - Fix minor formatting issues elsewhere commit c76e196213c550b6cfec2b7d3d635cd70cc8a454 Author: William Smith Date: Wed Sep 25 12:23:32 2024 -0400 [TrafficControl] Support allowlisting (#19242) ## Description In some cases, we may want to enable a more restrictive policy wherein the node must explicitly specify all IP's from which it will accept requests. Because `TrafficController` policies do not easily support allowlisting, this is instead supported by introducing a separate mode of operation for traffic controller, enabled by providing `allow_list` field in the `PolicyConfig`, which should map to a list of strings all parseable to `IpAddr`. When this config is present, we skip spawning a tally thread and ignore all calls to `TrafficController::tally`, and instead initialize an in memory allowlist of IPs. On subsequent calls to `TrafficController::check`, we check this list against the requestor IP. Dry run mode in this mode still works as expected, as do block metrics (any request that is not in the allowlist is tallied against the block metric). ## Test plan Added simtest --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 23fa67bdc4afe68518e8f3536151e221ed33a53b Author: Brandon Williams Date: Wed Sep 25 10:47:05 2024 -0500 rosetta: don't panic when accessing inputs (#19544) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 9e19e5a3c4c717aaa2adbbb585de25bdc6c65b3c Author: gorpig <146006860+chris-gorham@users.noreply.github.com> Date: Wed Sep 25 10:23:32 2024 -0500 [sui-node][json-rpc] log origin headers for json-rpc (#19536) ## Description This PR adds origin header logging into the json-rpc middleware. ## Test plan Ran sui-node locally and tested RPC calls using suiscan.xyz custom node option. example log line output: `2024-09-25T01:55:05.077872Z INFO json-rpc-request{origin=https://custom.suiscan.xyz}:get_validators_apy: sui_json_rpc::governance_api: get_validator_apy` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 0dfcfe9cd9aa057f9c9251732e87ebe7f159d585 Author: Denys Kozak <117740720+denyskozak@users.noreply.github.com> Date: Wed Sep 25 16:02:35 2024 +0100 [sdk docs] Add missed react import for dapp-kit doc (#19541) ## Description Add missed import useState from react lib for Connect Modal " Controlled example" code example (dapp-kit doc) ## Test plan Review Connect Modal "Controlled example", link (https://sdk.mystenlabs.com/dapp-kit/wallet-components/ConnectModal) --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: Co-authored-by: Denis Kozak commit 87298641ad79f1bc5e7ce8870a3b45ea00dfd684 Author: Ashok Menon Date: Wed Sep 25 16:02:01 2024 +0100 indexer: add indices to aid pruning (#19543) ## Description All the transaction and events lookup tables are missing indices that allow the pruner to efficiently pick out the ranges of rows to remove. Note that because these indices are being added to existing tables, they are being added using `CONCURRENTLY IF NOT EXISTS`, and because of that, they need to each be in their own migration file (concurrent index creation cannot go in a DB transaction). ## Test plan ``` sui$ ./scripts/generate_indexer_schema.sh ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [x] Indexer: Add indices to support efficient pruning by tx sequence number. - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e30c5fa39350548e87cba14ae360e45d95335ae4 Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Wed Sep 25 07:17:38 2024 -0700 Set the right path to get a coin from faucet (#19537) ## Description As title ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a219f198ca7b01d7ed9f6f692a9d770e6fbd1113 Author: Damir Shamanaev Date: Wed Sep 25 12:59:50 2024 +0300 [framework] Formatting: coin, token, balance, config, transfer 6/N (#19513) ## Description Final framework PR (excluding tests) in the formatting series. ## Test plan All tests must pass, framework must produce the same bytecode. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 63fa5c0079ceb5191bbf4eb47a63da5b83ecbb65 Author: Adam Welc Date: Wed Sep 25 09:23:20 2024 +0200 [move-ide] Added support for variable shadowing (#19462) ## Description This PR adds handling of shadowed variables to the trace viewer: - shadowed variables appear in separate scopes - shadowed scope appears when a variable with the same name as an already existing one is declared - shadowed scope disappears when all variables it includes stop being live Additionally, this PR (temporarily) suppresses on-hover information that VSCode shows during debug session. The reason for it is that the default VSCode behavior in this department breaks in presence of shadowed variables ## Test plan Tested manually commit fd5a0523c1d40e88193890127d24600000987983 Author: Xun Li Date: Tue Sep 24 21:48:15 2024 -0700 [Indexer] Support 2-day retention for objects_history table (#19318) ## Description Add support to override epoch retention for individual tables. This can be passed from the command line. It only supports partitioned table today. For non-partitioned tables it will take some wiring to make it work and not urgent. We need this today mainly to prune the objects_history table more aggressively. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 857de336cef8e2e580cfa9118bf86e085ec1c342 Author: Xun Li Date: Tue Sep 24 20:30:41 2024 -0700 [Indexer] Break dependency between pruner and epoch watermark (#19535) ## Description This PR makes a few improvements to the pruner: 1. It stops depending on the epochs table data range as the watermark for global pruning decisions. Instead, it keeps track of the next prune epoch and last seen max epoch locally in the task. Once we have the actual watermark table we probably could further improve it. 2. It also fixes an issue where we ignore failed pruning error. ## Test plan Unfortunately we don't have any tests for pruner. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f0591bdfb5ebc6db8804e3d6cd9da49b920577dd Author: Xun Li Date: Tue Sep 24 17:37:37 2024 -0700 [Indexer] Stop reading old system_state in epochs (#19532) ## Description This PR makes the old system_state column nullable, and stop reading from it. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5aac8768c84bcb089801b6f578ed80ac34bd8d5d Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Tue Sep 24 17:14:44 2024 -0600 [docs] trustless swap updates (#19409) ## Description Original PR melted on rebase. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Ashok Menon commit 12fbb11ce0b0f1a9001fded8e71b11a67864d72c Author: plam-ml <127577476+plam-ml@users.noreply.github.com> Date: Tue Sep 24 16:04:36 2024 -0700 add paddings to interstitial (#19534) ## Description BEFORE ![Screenshot 2024-09-24 at 1 52 40 PM](https://github.com/user-attachments/assets/8e462d8b-cbe4-462a-b63b-3084bebecb1b) AFTER ![Screenshot 2024-09-24 at 3 49 27 PM](https://github.com/user-attachments/assets/3e690903-be14-4b88-a591-f302c5ea7370) ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 2de49a40fb3880e19d4d7aacd8855370ee78f22e Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Tue Sep 24 14:32:24 2024 -0700 Support concurrent threads locking the same transaction (#19526) Fix crash when two threads concurrently lock the same transaction. The new test case fails reliably if the fix is not present. commit b39e43d6030a1872600c033527be490269a66944 Author: John Martin Date: Tue Sep 24 13:49:47 2024 -0700 [sui-proxy] set 10s timeout for bridge metrics key query (#19527) ## Description some bridge endpoints take very long to return and eventually fail, this slows down the key fetching process commit ec8e921f95a83b6ead9bb16b0c19888c8488bfba Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Tue Sep 24 13:31:05 2024 -0700 [bridge] add metrics push section to runbook (#19525) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f7d926b93a475cc256f49ccfc905d01633dd1e25 Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Tue Sep 24 14:15:29 2024 -0600 [docs] Updated example and linked to source (#19278) ## Description Updates a broken SDK example. Need to eventually recreate in the sui repo for ease of maintenance; ticket created. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7923ed51f792137d2f148cec4161d1de3561e7e3 Author: Tony Lee Date: Wed Sep 25 03:31:31 2024 +0800 Deepbook SDK package constants (#19524) ## Description Deepbook SDK package constants update, coin IDs update ## Test plan How did you test the new or updated feature? Tested on testnet --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ff17714bbd6823e504177c483bea4aa9dbad6faf Author: Manolis Liolios Date: Tue Sep 24 22:19:06 2024 +0300 [NS Indexer] adds README (#19511) ## Description Adds a README file to explain some of the env variables that were kinda unclear. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> commit 68497e32faf369ede9bbfb7ab7487c46058ff3d2 Author: Brandon Williams Date: Tue Sep 24 09:49:17 2024 -0500 types: add CheckpointData to bcs sui.yaml description file commit 4b0e0aceee7c3a8382b6798b8db084a3925aafbf Author: gorpig <146006860+chris-gorham@users.noreply.github.com> Date: Mon Sep 23 18:55:02 2024 -0500 suins-indexer: support tls db connections by skipping server cert verification The default behavior of libpq is to skip server cert verification and our db requires a tls connection...so we'll do the same thing for the time being. commit a94f65567aa477f0e79cbf2d59391e56edbd7797 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Mon Sep 23 17:07:07 2024 -0700 [bridge] fix token price index (#19495) ## Description self descrptive ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 08a4ef986fdf44a2233f010c5e3764ae888c54a2 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Mon Sep 23 16:22:25 2024 -0700 [bridge] log last synced eth blocks and default to end block (#19507) ## Description as title ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7336840e36b9cdc2aebf59467d3b05ac125ebf3a Author: Damir Shamanaev Date: Tue Sep 24 01:53:14 2024 +0300 [framework] Formatting: utils, object, tx_context, types, random 5/N (#19498) ## Description Continues the formatting. ## Test plan Tests must pass, framework must be compatible. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6a83f64ac0b2987830765c4b43059e500c8ff8a6 Author: Arun Koshy <97870774+arun-koshy@users.noreply.github.com> Date: Tue Sep 24 06:50:33 2024 +0800 [consensus] Enable distributed vote scoring in testnet (#19503) ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [X] Protocol: Enable distributed vote scoring in testnet - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a3c1b54935f7e07afa1c37771f3b371ff66c5b9a Author: Damir Shamanaev Date: Tue Sep 24 01:01:26 2024 +0300 [framework] Formatting: collections and dynamic fields 4/N (#19497) ## Description Another PR in the formatting series, covers collection modules and dynamic fields. See also: - #19496 - #19463 - #19461 ## Test plan Tests must pass, framework must produce the same bytecode. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8d369a090f19e3292fbbc050d3d33b5fc540440a Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Mon Sep 23 14:45:48 2024 -0700 [indexer] Adjust transactions queries to the latest db schema (#19418) commit 29e63476ca2a273fd56e5fa91359823e40c49b33 Author: Damir Shamanaev Date: Tue Sep 24 00:17:22 2024 +0300 [framework] Formatting: crypto, test, kiosk 3/N (#19496) ## Description Formats 3 parts of the Sui Framework: `crypto`, `kiosk` and `test` Previous PRs: - #19461 - #19463 ## Test plan All existing tests pass. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d2a5f4449365510ff6179a6dc1fa9fa7fd49f97f Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Mon Sep 23 13:40:56 2024 -0700 [cli] Make prerender clever errors to use context instead of client (#19506) ## Description Fixes the issue that if the active env cannot be connected to, `active-env` and `envs` do not display the output but instead a low level network error appears. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: Fixes an issue with `active-env` and `envs` commands that were not showing an output if the active RPC url was not correct. - [ ] Rust SDK: - [ ] REST API: commit 830c42b8fd23e603adb2b2806f53afa9c87b8632 Author: Eugene Boguslavsky Date: Mon Sep 23 13:10:00 2024 -0700 Use PAT token for release creation (#19505) ## Description Use PAT token for release creation ## Test plan Will be testing this next week commit 50675d5248d047a63306b4baa85cf655b2a95b9c Author: Jort Date: Mon Sep 23 12:24:46 2024 -0700 fix(rpc): reword errors to transaction validator signing (#19504) ## Description SUI JSON RPC returns errors which suggest a transaction was executed, however transactions which throw `NonRecoverableTransactionError`, do not execute the transaction but instead attempt to retrieve validator signatures. This change updates the wording to better describe which step this error is thrown. ## Test plan Existing tests sufficiently cover this change. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 0caf315994fff737986ca7193817b2485a9e9516 Author: William Smith Date: Mon Sep 23 14:52:46 2024 -0400 [object store] Disable more timeouts (#19486) commit 7c9b1d1f091e810e8cf22c913589a8f43c215604 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Mon Sep 23 09:51:53 2024 -0700 Hold reconfig lock while handling transaction (#19320) This fixes a crash where we clear all pending locks at reconfig (https://github.com/MystenLabs/sui/blob/dec33d0a303a7f83e778fcc5a136ab9776162e68/crates/sui-core/src/execution_cache/object_locks.rs#L121) while trying to acquire locks for a transaction. Without this fix, the node can reconfigure while we are trying to acquire locks for a transaction. If `clear_locks()` linked above is called while we are trying to call `clear_cached_locks` at https://github.com/MystenLabs/sui/blob/74d6d564970406e1b3191a07cf207af1ab6b3356/crates/sui-core/src/execution_cache/object_locks.rs#L245, we hit the panic at https://github.com/MystenLabs/sui/blob/74d6d564970406e1b3191a07cf207af1ab6b3356/crates/sui-core/src/execution_cache/object_locks.rs#L160 commit 283f954adf25e825558ee6e3806406f8c9f3550b Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Mon Sep 23 09:46:21 2024 -0700 [cli] Better error message when a sui config folder exists without genesis metadata (#19487) ## Description Improves the error message when running `sui start` when a sui config folder exists without genesis metadata. ## Test plan Existing tests. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: Improved the error message when running `sui start` when a sui config folder exists without genesis metadata. - [ ] Rust SDK: - [ ] REST API: commit 16e6c7b69d712712085013afab128d38d09e8d1b Author: Brandon Williams Date: Mon Sep 23 11:05:34 2024 -0500 discovery: limit the number of peers shared during discovery (#19501) commit aa71f238844d67ac9fc40849443e370b83306fad Author: Sadhan Sood <106645797+sadhansood@users.noreply.github.com> Date: Mon Sep 23 08:46:24 2024 -0700 Add an option to disable fullnode pruning in test cluster (#19490) ## Description Currently we aggressive prine FNs by default which causes tests which depend on reading checkpoints from rest api fail ## Test plan Existing tests commit 41961edfd81e980f526c72d8e60b5f81bcbc755a Author: William Robertson Date: Mon Sep 23 10:31:47 2024 -0400 Fix failing `pnpm audit` CI job (#19500) ## Description This PR fixes two high level audit warnings to make the `npm audit` CI job pass again: image ## Test plan - The audit CI job passes now + there are no high-level warnings locally - The docs site still works --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d43c532435b9bb6e9e57babd66480120ab318126 Author: Krešimir Klas Date: Mon Sep 23 13:58:11 2024 +0200 [move lockfile] rename dependency `name` field to `id` and add a separate `name` field to store manifest name (#19387) ## Description As discussed in https://github.com/MystenLabs/sui/pull/14178#issuecomment-1924760426 (second and third point), a few changes to the lockfile: - the `name` field in `dependencies` is renamed to `id` to better reflect the meaning in the dependency graph (the packages are discriminated by their identifier, as resolved by the hook, which is not necessarily their manifest name) - added a `name` field which will store the dependency manifest name (this is needed to show user-friendly error messages using the package manifest name instead of identifier which may be confusing) - bumped lockfile version to 3 This is part of the work to enable compiling against on-chain dependencies https://github.com/MystenLabs/sui/pull/14178. cc @rvantonder @amnn ## Test plan All changes are covered by existing unit tests. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: Introduce lock file version 3, which renames a dependency's `name` field to `id` and introduces a separate `name` field that stores the package's name from its manifest for improved error reporting. Older lock files will be ignored when resolving dependencies so that this information is always available. - [ ] Rust SDK: - [ ] REST API: commit 0816afc69e4e5ccf343a494226dc75aab665ec8f Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Mon Sep 23 00:54:52 2024 -0700 [bridge] add height metrics (#19488) ## Description This PR reworks `last_synced_sui_checkpoints` and `last_synced_eth_blocks`. * `last_synced_sui_checkpoints`: when SuiSyncer gets events with false `has_next_page`, it means it has synced to the tip of the blockchain. Then it notifies the metrics update task to query the latest checkpoint from chain and update it. Apparently this is not accurate, but it is the best we can do today, because event query does not return checkpoint number. In the future when we switch to graphql, checkpoint num will be available and we can get rid of this. * Also note that when the bridge node is catching up, `has_next_page` will be true and the metrics is not going to be updated. * `last_synced_eth_blocks`: make it a vector to track all contract progresses. ## Test plan added a unit test for sui side. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 795cf17bf921859d0bb1671ba5be99c48b74f571 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Sun Sep 22 20:40:01 2024 -0700 [bridge] put token id and sui decimal in deploy configs (#19491) ## Description 1. Add `suiDecimals` and `tokenIds` to config so we don't need default values for sui btc and so on. 2. the json parsing logic in forge does not work for the updated struct. I created `parseDeployConfig` to parse each value individually. 3. because of 2, rename `supportedChainIDs` and `WETH` for json naming conventions. ## Test plan 1. all evm tests 2. tested deployed locally with different configs 4. e2e tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 1fc1412deb50ba309a6a3c110703219701004605 Author: Damir Shamanaev Date: Sun Sep 22 22:37:01 2024 +0300 [stdlib] Formats tests in MoveStdlib 2/N (#19463) commit 8451dd038d0a1600d16d51a5ec1cdf67dc7c905b Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Sat Sep 21 18:16:33 2024 -0700 [bridge] Update abi (#19483) ## Description update bridge abi for the recent changes in solidity code ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 1d4df7f82e6f68464776d12bf30626b2bb6fd2e4 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Sat Sep 21 17:49:19 2024 -0700 [bridge] sort sui bridge abi (#19482) ## Description This PR sorts sui_bridge.json based on name field. This is to make future abi changes more readable. jq -S 'sort_by(.name)' sui_bridge.json > sui_bridge.sorted ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit acd910dd30a6b68f4cd44870e22c30d27a06e2b4 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Sat Sep 21 17:48:34 2024 -0700 [bridge] sort bridge committee abi (#19481) ## Description This PR sorts bridge_committee.json based on name field. This is to make future abi changes more readable. jq -S 'sort_by(.name)' bridge_committee.json.json > bridge_committee.json.sorted ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e97b2f883e960d04e76c3e3eb3f84fe7ae0db77d Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Sat Sep 21 17:47:56 2024 -0700 [bridge] sort bridge committee upgradeable abi (#19480) ## Description This PR sorts bridge_committee_upgradeable.json based on name field. This is to make future abi changes more readable. jq -S 'sort_by(.name)' bridge_committee_upgradeable.json > bridge_committee_upgradeable.sorted ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d500750d18c045caef4e55e73b6fd41c6467159d Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Sat Sep 21 17:47:42 2024 -0700 [bridge] sort bridge limiter abi (#19479) ## Description This PR sorts bridge_limiter.json based on name field. This is to make future abi changes more readable. jq -S 'sort_by(.name)' bridge_limiter.json > bridge_limiter.sorted ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 31419273dab6676369e78ae77d0e9167dbfde3d9 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Sat Sep 21 17:45:26 2024 -0700 [bridge] Sort bridge vault abi (#19478) ## Description This PR sorts bridge_vault.json based on name field. This is to make future abi changes more readable. jq -S 'sort_by(.name)' bridge_vault.json > bridge_vault.sorted ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8d4338be25b8b66b750bd2a987a8932a66891adc Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Sat Sep 21 17:38:04 2024 -0700 [bridge] sort bridge config abi (#19477) ## Description This PR sorts bridge_config.json based on name field. This is to make future abi changes more readable. jq -S 'sort_by(.name)' bridge_config.json > bridge_config.sorted ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b4176b6e7109ccd56c59218802a5b860899d4243 Author: Bridgerz Date: Sun Sep 22 00:19:37 2024 +0100 Update Events to include message nonce (#19475) ## Description Updated the governance action events to include the message nonce for indexing purposes. ## Test plan Unit tests --------- Co-authored-by: longbowlu commit 5efd2c4b32df36983220a8a3ce25bd148a610f47 Author: José Cerqueira Date: Sat Sep 21 22:07:08 2024 +0800 feat: update sui-move new to use module labels (#19485) ## Description Updated the sui move new command to use module labels. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5afef3c9e60ed59f3f99f1af548fad48dc300265 Author: mamos-mysten <122397493+mamos-mysten@users.noreply.github.com> Date: Fri Sep 20 21:47:58 2024 -0700 fix: show prior transaction warning onChange (#19472) ## Description Adjusts the Send Form to show the "No prior transactions" warning onChange instead of onBlur. This helps ensure that the user sees it before moving onto the Review screen. https://github.com/user-attachments/assets/743c58f8-f9c5-4530-8677-6511530ee98a commit 260b0df922d4c38902887c9dc6a90094413a1bed Author: William Smith Date: Fri Sep 20 22:30:37 2024 -0400 [formal snapshots] Fix retry logic in GCS and improve logging (#19470) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 3ced52d87a6748ebaf7cdf79b1a679ccfa23d472 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Fri Sep 20 14:43:38 2024 -0700 Improve metrics push (#19460) ## Description A few improvements to metrics push logic. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 667874c3e623857501ec5528553f001552e32a2d Author: John Martin Date: Fri Sep 20 14:05:28 2024 -0700 Include the aws region in example config (#19466) ## Description follow up to #19464 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 17aebf1e01c108e21f977f984481bb9b2c6e514e Author: John Martin Date: Fri Sep 20 13:59:45 2024 -0700 [sui-node] Add heath check router to jsonrpc server (#19433) ## Description Copying over the health check from the https://github.com/MystenLabs/sui/blob/main/crates/sui-rest-api/src/health.rs into a route that's enabled by default on the fullnode. This should be removed once the REST api is enabled for all fullnodes ## Test plan tested locally usage example (on a local sui-node starting from genesis): ``` $ curl localhost:9000/health up% $ curl 'localhost:9000/health?threshold_seconds=10000' down% $ curl 'localhost:9000/health?threshold_seconds=1000000000' up% ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [x] JSON-RPC: feature: adds a configurable health check endpoint for json rpc fullnodes to report down if too far behind - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 0c3ec2de9908c3b6e4d4f0c62636e82596ef3629 Author: Damir Shamanaev Date: Fri Sep 20 23:13:08 2024 +0300 [stdlib] Formats MoveStdlib package 1/N (#19461) ## Description Uses module label in the MoveStdlib, runs the current build of the formatter on the code. ## Test plan All tests must pass, no snapshot regen should be required. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d87662a20dec3d61774e36d22825509197931eb6 Author: John Martin Date: Fri Sep 20 10:03:20 2024 -0700 add no sign request to archives docs (#19464) ## Description small docs update so users don't need to supply aws creds --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 96ee5fabf8d5809549a4f9750a457ab6584243dd Author: nikos-kitmeridis <145983019+nikos-kitmeridis@users.noreply.github.com> Date: Fri Sep 20 16:00:04 2024 +0300 [rosetta] Replace coins hashmap cache with an lru cache (#19393) ## Description Replaces coins hashmap cache with an lru cache ## Test plan Unit tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 42aa935a805605658fe93944ef109b48f5830d69 Author: Anastasios Kichidis Date: Fri Sep 20 12:42:08 2024 +0100 [Consensus] Garbage Collection - 1 (#19315) ## Description This is the fist part that implements Garbage Collection for consensus. Compared to Narwhal/Bullshark where the GC round was calculated and driven by the consensus component it self, here the single threaded nature of the system and the fact that we do have DagState - which is shared amongst all the components - allow us to base the gc round calculations purely on the latest committed round as derived from DagState. On this PR: * `Linearizer` has been refactored to respect the `gc_round`. When linearizer tries to commit leader `R` , it attempts to commit everything up to the `gc_round` that has been set from leader `R-1` (as already discussed this is the desired approach). * `DagState` is calculating the current `gc_round` based on the latest commit. * `gc_depth` has been added as a protocol config variable * Basic testing has been added to `Linearizer` Next steps: - [ ] BlockManager to respect the `gc_round` when accepting blocks and trigger clean ups for new gc rounds - [ ] Skip blocks that are received which are `<= gc_round` - [ ] Not propose ancestors that are `<= gc_round` - [ ] Subscriber to ask for blocks from `gc_round` when `last_fetched_round < gc_round` for a peer to prevent us from fetching unnecessary blocks - [ ] Implement new timestamp approach so ancestor verification is not needed - [ ] Re-propose GC'ed transactions (probably not all of them) - [ ] Harden testing for GC & edge cases ## Test plan CI/PT --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 35dacad303ea0829892eba0793fb89d8a7baca4c Author: Xun Li Date: Thu Sep 19 20:25:24 2024 -0700 [Indexer] Add raw_checkpoints table (#19451) ## Description This PR adds a raw_checkpoints table that store the raw BCS data for each checkpoint. This will serve as the KV table for checkpoints. ## Test plan CI. Once I add some code for the reads there will be some test coverage. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 06b1e4df8ce3ee2707796c56b14fa080472d8d16 Author: Eugene Boguslavsky Date: Thu Sep 19 18:56:34 2024 -0700 Sui v1.35.0 Version Bump (#19459) ## Description Sui v1.35.0 Version Bump ## Test plan 👀 commit 8e09680b6df847ddff4f6edb568b796ea88f886d Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Thu Sep 19 21:31:45 2024 -0400 indexer: drop df columns and refactoring (#19308) ## Description df_ columns except df_kind references have been removed from both indexer reader and graphql, this pr removes them from the ingestion path. to make it happen, some refactoring was necessary, previously `StoredObject` was the `From` source Stored* of `objects_history` and `objects_snapshot`, this pr changes the source to `IndexedObject`, which is more intuitive as `StoredObject` is supposed to be coupled with `objects` table while `IndexedObject` is table agnostic. ## Test plan ci and local run --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Will Yang commit 003ac15a2a9160b4f96ccb32ae12644ef555ba68 Author: Eugene Boguslavsky Date: Thu Sep 19 18:14:44 2024 -0700 Sui `v1.34.0` framework bytecode snapshot (#19458) ## Description Sui `v1.34.0` framework bytecode snapshot ## Test plan `cargo run --bin sui-framework-snapshot` commit dcdf415602ddffdb6325b9bde5c425d67123c04f Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Thu Sep 19 15:38:18 2024 -0700 [Consensus] reduce commit sync fetch parallelism (#19449) ## Description For validators with limited bandwidth, too much parallelism can increase fetch jitters and slow down the overall progress. Before we make the algorithm more adaptive, start with less parallelism by default. ## Test plan PT --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 72603de6260795d5c9ed60f885a4ebe717a9430e Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Thu Sep 19 16:08:29 2024 -0400 indexer restore 3/N: restore checkpoints and chain identifier (#19341) ## Description title ## Test plan local run and verify checkpoints and chain_identifier table. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b7182a940d296bbee6aaef94cea384832a736e32 Author: Ashok Menon Date: Thu Sep 19 15:57:46 2024 +0100 docs(graphql): Update deprecation notices for fields to be removed ## Description Introduce deprecation notices for `recvAddress`, `inputObject`, `changedObject`, indicating that they will be removed. ## Test plan CI + :eyes: commit 58127579f895f4edeb0868d91283b2f436872784 Author: Ashok Menon Date: Thu Sep 19 15:54:50 2024 +0100 feat(indexer): Prune `tx_affected_objects` ## Description Prune `tx_affected_objects` (added in #19447) along with other transaction index tables. ## Test plan TBD. commit 0ae651460263df8893fc9e2906c7cb4dab63abeb Author: Xun Li Date: Thu Sep 19 12:27:41 2024 -0700 [Indexer] Add json serialized SuiSystemStateSummary to epochs table (#19428) ## Description The existing system_state column is a BCS serialization of a JSON-RPC type, which is not evolvable. This PR adds a new column that is the JSON serialization of SuiSystemStateSummary without BCS. It also fixes two bugs along the way: 1. We were storing the wrong version of the system state, offsetting by 1 epoch. This PR fixes it by storing the right version of the json column at the beginning of the epoch instead of at the end. 2. We were ignoring a deserialization error and swallow it in some place. Made that throw instead. ## Test plan Added a debug assert to see that this column is populated with the correct epoch. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5cf3b93745a8f2d6900630688825e28b1937bbd3 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Thu Sep 19 14:46:37 2024 -0400 indexer restore 2/N: restore display table (#19324) ## Description title ## Test plan upload table to remote and restore from remote to make sure that tables match ``` (SELECT * FROM display EXCEPT SELECT * FROM display_backup) UNION ALL (SELECT * FROM display_backup EXCEPT SELECT * FROM display); ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 94d68bfe8529bcf5108e786cc68590aeb5850a56 Author: nikos-kitmeridis <145983019+nikos-kitmeridis@users.noreply.github.com> Date: Thu Sep 19 20:22:55 2024 +0300 Adds testnet and mainnet support for one provider (#19444) ## Description Adds testnet and mainnet support for one provider ## Test plan Devnet tests, according to the predefined procedure --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d783e8009c02748aa00b7aea71a29ae1db461c24 Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Thu Sep 19 11:06:35 2024 -0600 [docs] libpq updates (#19448) ## Description Updated the requirement for libpq. Also, highlighting the quick install method. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ee1af5fd6f57576b271c2fb234239ce50d41b1e0 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Thu Sep 19 09:53:58 2024 -0700 Test MAX_PROTOCOL_VERSION is indeed the max supported version (#19405) ## Description Otherwise it is relatively easy to miss updating the max protocol version. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6d8ceed4d727a6c9ce9f5879b3cd1b2e8605affa Author: Ashok Menon Date: Thu Sep 19 17:39:15 2024 +0100 fix(core): deserialize malformed types in tx (#19446) ## Description Make it possible to represent a transaction that includes a bad type (a type that does not parse correctly). Such transactions will fail to execute but may exist on-chain and so we need to be able to represent them. To avoid confusion in future, we will perform an early (signing) check for type correctness, preventing more examples of this pattern from appearing on-chain. GraphQL has also been updated so that it can represent such a transaction without emitting an error. ## Test plan ``` sui$ cargo simtest sui$ cargo nextest run -p sui-graphql-rpc sui$ cargo nextest run -p sui-graphql-e2e-tests ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [x] Protocol: Transactions that include unparseable types will not be signed by validators from protocol 60 onwards. - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: `MoveType` can fail to provide a layout or abilities in cases where it represents an unparseable type. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f3825b1117027a81d3db502036a62f48946045f3 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Thu Sep 19 12:11:32 2024 -0400 indexer formal restore 1/N: restore packages and move objects (#18886) ## Description first of the stack PR to restore indexer from sui archives & formal snapshot ## Test plan test locally with GCS buckets, local snapshot dir and local PG server ``` ██████████████████████░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 75 out of 555 move object files restored (Restored 569395 live move objects and 0 wrapped or deleted objects from epoch_500/1_53.obj)2024-09-10T14:21:39.854508Z INFO sui_indexer::restorer::formal_snapshot: Finished downloading move object file Path { raw: "epoch_500/1_125.obj" } 2024-09-10T14:21:44.111960Z INFO sui_indexer::restorer::formal_snapshot: Start persisting 565556 move objects from epoch_500/1_125.obj 2024-09-10T14:22:12.142760Z INFO sui_indexer::store::pg_indexer_store: Persisted 563126 objects snapshot elapsed=7058.20103075 2024-09-10T14:22:12.142830Z INFO sui_indexer::restorer::formal_snapshot: Finished persisting 0 wrapped or deleted objects from epoch_500/1_86.obj [10:46:20] ``` need to benchmark in the production env --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 959bf75e4a1f1c7d28b007eab3c9473415b329ac Author: nikos-kitmeridis <145983019+nikos-kitmeridis@users.noreply.github.com> Date: Thu Sep 19 18:48:59 2024 +0300 Adds testnet and mainnet support for playtron provider (#19445) ## Description Adds testnet and mainnet support for playtron provider ## Test plan Devnet tests, according to the predefined procedure --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b8d819468a0f545f4d08a7b1e43bc81153d07041 Author: Xun Li Date: Thu Sep 19 08:29:25 2024 -0700 [Indexer] A few improvements to backfill tool (#19441) ## Description This PR adds 3 improvements to the sql backfill tool: 1. It allows ON CONFLICT DO NOTHING, so that we can safely backfill gaps without been too precise. 2. It tunes down the default concurrency and chunk size, and allows for override through command line args. 3. It prints out the minimum in-progress checkpoint so that if it ever stops for some reason, you can restart using that number. ## Test plan Run again locally --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f9698a6d1fd4d1d4133d5dfbbcc4a01928406dcb Author: Ashok Menon Date: Thu Sep 19 14:54:05 2024 +0100 feat(indexer): index tx_affected_objects (#19447) ## Description Similar to #19355, introduce `tx_affected_objects` table -- a combination of `tx_input_objects` and `tx_changed_objects` which will both eventually be removed in favour of the new table. ## Test plan ``` sui$ cargo build -p sui-indexer ``` Tests based on reading this field will be included with the PR introducing changes to GraphQL. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [x] Indexer: Index the objects affected by a transaction (either because they are an input object or are changed by the transaction). - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 3ac44edd34defe0c5c902708b1974358ab2e30df Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Wed Sep 18 22:19:24 2024 -0700 [bridge] add bridge node info to validator run book (#19429) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 695ff9a35381b3258c72c74c02cef486d989b79c Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Wed Sep 18 22:19:05 2024 -0700 [bridge] add uptime metrics for bridge node (#19411) ## Description As title. ## Test plan tested by running a bridge network locally. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 2d7c37c3fd7a506aeef924fe855c5caeb0388459 Author: Andrew Schran Date: Thu Sep 19 02:01:08 2024 +0100 Add help text to sui start for ProtocolConfig overrides (#19279) commit 940149d7d6cccae3f7c4675ec32eaebfa074df1d Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Wed Sep 18 17:49:51 2024 -0700 [cli] Remove indexer feature for `sui` and remove the `sui-pg` binary (#19436) ## Description Recent improvements on GraphQL and Postgres code removed the dynamic linking to `libpq`. Due to this linking (GH actions was linking to postgres@14), we had to have a separate `sui-pg` binary that was built with the `indexer` feature such that one can have access to `sui start --with-indexer` and `--with-graphql` due to the required dependencies to Postgres. This PR removes the `indexer` feature, all the instances of building the binary with `--features indexer` in workflows, actions, etc, and updates the documentation (docs + sui-test-validator crate). This is possible because `libpq` is no longer a dependency that gets dynamically linked to the binary. Note that in order to use those flags, a running Postgres DB is still required just like before. ## Test plan Existing tests. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: the `indexer` feature was removed from the `sui` crate as the dynamic linking to `libpq` was removed. Therefore, the `sui-pg` binary will not be part of releases anymore. This `sui-pg` binary was used for starting a network with `--with-indexer` and `--with-graphql` flags. These commands will still work as before and it is still required to have installed a Postgres database. If you used `sui-pg` binary previously, you can simply use `sui` binary from this version on. - [ ] Rust SDK: - [ ] REST API: commit 83f76b1a9ccc54a3ad4c4547e5aac1d53d12c4c3 Author: Ashok Menon Date: Thu Sep 19 00:10:27 2024 +0100 fix(graphql): flakiness in pruning test (#19440) ## Description Fix some flakiness in the `prune.move` by removing the initial checkpoints query which may witness different degrees of pruning. ## Test plan Run the E2E tests in a loop -- previously this would trigger the failure after some time (usually fewer than 10 runs) and after this change, the same thing does not happen: ``` sui$ while true; do cargo nextest run -p sui-graphql-e2e-tests done ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 3c1c5ef2cd1da89ac5ac086e98fe75846aa6c2c4 Author: Ashok Menon Date: Wed Sep 18 23:35:16 2024 +0100 feat(json-rpc): get transaction block raw effects (#19438) ## Description Add the ability to get transaction effects in BCS form, from JSON-RPC's read path, by passing the `showRawEffects` option. ## Test plan ``` sui$ cargo build --bin sui --features indexer sui$ $SUI start --force-regenesis --with-indexer --with-graphql --with-faucet ``` Then in another session: ``` sui$ $SUI client faucet ``` Find the transaction `$DIGEST` of the faucet transaction, and then fetch it with: ``` curl -LX POST "http://localhost:9000" \ --header 'Content-Type: application/json' \ --data-raw '{ "jsonrpc": "2.0", "method": "sui_getTransactionBlock", "id": 1, "params": ["'$DIGEST'", { "showRawEffects": true }] }' | jq . ``` And corroborate it against the following GraphQL query: ``` query ($digest: String!) { transactionBlock(digest: $digest) { effects { bcs } } } ``` Which can be requested at `localhost:9125`. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [x] Nodes (Validators and Full nodes): `sui_getTransactionBlock` and `sui_multiGetTransactionBlock` JSON-RPC endpoints will now heed the `showRawEffects` option, and return the BCS representation of the transaction effects. - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 81e9fdbcd7093e4dbf1ef30c66a55307ea99ff9f Author: Ashok Menon Date: Wed Sep 18 23:00:56 2024 +0100 feat(graphql)!: deprecate tx signer filters (#19430) ## Description `TransactionBlockFilter.signAddress` and `AddressTransactionBlockFilter.SIGN` both claim to support filtering by sender and sponsor, but this is not true, they only support sender, and they should not be adapted to also filter by sponsor because we rely on the there being only one sender per transaction to ensure filters that combine sender with something else remain both space and time efficient. ## Test plan Existing tests and CI, plus some new tests to make sure the new filter options work, and interactions between the old and new filters work. ``` sui$ cargo nextest run -p sui-graphql-rpc sui$ cargo nextest run -p sui-graphql-e2e-tests ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Deprecates `TransactionBlockFilter.signAddress`, replacing it with `TransactionBlockFilter.sentAddress` which behaves identically. Similarly `AddressTransactionBlockRelationship.SIGN` is deprecated and replaced by `AddressTransactionBlockRelationship.SENT`. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f2ab91ecea73c739657a158d1ec11d7612573507 Author: Joe Hrbek <123987499+suiwombat@users.noreply.github.com> Date: Wed Sep 18 16:50:55 2024 -0500 [sui_proxy/pod health] (#19416) ## Description * add a route for pod health in k8s i am using the consumer_operations_submitted value but more values can be added later if we need to. when this value is non-zero, it means the service is processing data correctly and can be used to consider it healthy enough to handle traffic. it will be implemented as a liveness check. ## Test Plan local --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit fd3ffac8b05fcbe056e84156cfb6311a88fa31fe Author: Brandon Williams Date: Wed Sep 18 16:50:10 2024 -0500 rest: avoid panic when converting from TypeTag (#19431) commit 70ff0c7a5fce042d57dbc94451fd540620769d38 Author: Adam Welc Date: Wed Sep 18 20:58:12 2024 +0200 [trace-view] Support for differently named variables moving forward (#19394) ## Description This PR is the first in the series of PRs aiming to implement support for inspecting variables when viewing the trace (splitting this into multiple PRs to make the reviewing easier). The current focus is on primitive values (no structs/enums or references), but even with this limited focus it leaves the following work to subsequent PR: - support for shadowed variables (a variable declared in an inner scope/block that has the same name as the one declared in the outer scope/block) - support for moving backwards in trace ## Test plan Tested manually to make sure that thing work correctly commit 23ee1e84a5859154084c5c80f52aae3233fce269 Author: Bridgerz Date: Wed Sep 18 18:46:45 2024 +0100 Bridge evm implementation initialization (#19424) ## Description Addressing: https://dashboard.hackenproof.com/manager/sui/sui-protocol/reports/SUIPR-150 Update the CommitteeUpgradeable contract to disable all upgradeable bridge contracts' implementations from initialising. ## Test plan Added a unit test. commit 830b8d85b7ee5f5bb4c140d2d980db9f4e043ae6 Author: Manolis Liolios Date: Wed Sep 18 20:22:03 2024 +0300 [SDK][MVR] Move to new naming standard (#19432) ## Description 1. After the recent decision to change our naming scheme to `{suins_name}/{app}/{optional_version}`, I am updating the plugin to also match the expected naming. 2. Also parse `MakeMoveVec` types. ## Test plan Updated tests to match this, added extra test case for `MakeMoveVec` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4f5756569a98227bbd362d94ee6cb00882771c64 Author: Ashok Menon Date: Wed Sep 18 16:59:04 2024 +0100 feat(graphql): Filter by affected address (#19365) ## Description Add `TransactionBlockFilter.affectedAddress` as a way to filter transactions by their relationship to an address without worrying about what kind of relationship that is. ## Test plan New E2E ## Stack - #19363 - #19371 - #19402 - #19403 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 0b276e5a532bcc040d28b83d0b2c4326fa3413a0 Author: Brandon Williams Date: Wed Sep 18 06:39:22 2024 -0500 types: failable conversions to/from sdk and core types (#19414) commit d12505f8edd347df45ed40e967f25bda452f37a5 Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Tue Sep 17 21:36:06 2024 -0400 [kv store] explicitly disable for testnet (#19426) ## Description disable kv store for testnet --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 30048a065178b7af8487733756851583d1ee5a5d Author: Joe Hrbek <123987499+suiwombat@users.noreply.github.com> Date: Tue Sep 17 18:32:55 2024 -0500 [sui-proxy/mypkg] add mypkg (#19397) ## Description add mypkg target ## Test plan locally --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 66262619c9f2c2b4bf2f7fb5974931870ade961f Author: John Martin Date: Tue Sep 17 16:14:44 2024 -0700 change extract_bridge errors to warn (#19420) ## Description logging level change --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 94d9e01ffce772b9edb8c9e3ed6b3cfd7f0838eb Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Tue Sep 17 16:12:23 2024 -0700 Revert "[cli] Switch to `WaitForEffectsCert` for CLI (#19375)" (#19423) ## Description This reverts PR #19375. Unclear how to handle `waitForLocalExecution` in the CLI for now. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5563665da28442f94e00bfdfa3b27d69d376a8a7 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Tue Sep 17 15:27:47 2024 -0700 [bridge] handle eth events in monitor (#19389) ## Description 1. in `bridge.move` rename `ETokenAlreadyClaimed` to `ETokenAlreadyClaimedOrHitLimit` and update comments accordingly. 2. implements `handle_eth_events`, mostly bumping metrics for governance action events 3. use a macro to update sui side metrics as well 4. adds `UpdateRouteLimitEvent` from Sui. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit bf86ff934b5323e371e8aff8431647cb173f0ea7 Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Tue Sep 17 14:28:18 2024 -0700 [cli] Switch to `WaitForEffectsCert` for CLI (#19375) ## Description Added a new function `execute_tx` that is used only in the CLI that uses `WaitForEffectsCert` instead of `WaitForLocalExecution` to align with the deprecation of waiting for local execution. Also added the polling for transaction like in https://github.com/MystenLabs/sui/pull/18996/files ## Test plan Existing tests. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: The CLI switched to using `WaitForEffectsCert` in the quorum driver for transaction execution. Due to this, it simulates `WaitForLocalExecution` via polling as JSON RPC ignores `WaitForLocalExecution` requests now. - [ ] Rust SDK: - [ ] REST API: commit 95f0a4daa7ea8c95cc9cbe798be84e2c31e0abb3 Author: Xun Li Date: Tue Sep 17 12:12:22 2024 -0700 [indexer] Use full_objects_history in reads (#19290) ## Description This PR queries the full_objects_history table for objects query. It does so in a backward-compatible way. We always return data from the objects_history table if it's found, and otherwise we return from the full_objects_history table. ## Test plan Added e2e test --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit bce84008a815f94e8ab9361e9ac73ca68c41092e Author: Ashok Menon Date: Tue Sep 17 18:21:54 2024 +0100 test: adds gas payment to transactional test runner ## Description Add the ability to specify an explicit gas payment object to a programmable transaction in transactional test runner. ## Test plan TBD -- to be used in a future test. commit 6691ca34d718079118b1dcf265be73294334c1b1 Author: Ashok Menon Date: Mon Sep 16 18:51:11 2024 +0100 test: adds sponsor to transactional test runner ## Description Add a way to create programmable transactions with gas from a sponsor address, rather than a sender address ## Test plan TBD -- will be used in a follow-up PR. commit 5fca65afaaca94d380d5e0708ae689b8bf79b9bb Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Tue Sep 17 11:31:34 2024 -0700 [bridge] let monitor subscribe to eth governance events (#19380) ## Description This PR lets `Monitor` subscribes to eth governance events as well. Next PR (https://github.com/MystenLabs/sui/pull/19389) implements the actual handling of these events. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e7b77404192af11c6001dcd1b5594b64414e901b Author: Ashok Menon Date: Tue Sep 17 19:01:31 2024 +0100 chore(graphql): Align all e2e tests on protocol 51 (#19402) ## Description Some of these tests are on older versions because they landed around the time of the last bump. Bringing everything in line with protocol 51. ## Test plan ``` $ cargo nextest run -p sui-graphql-e2e-tests ``` ## Stack - #19371 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 493efd3d75cf8516a6568960b5daebb23775e83a Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Tue Sep 17 10:33:44 2024 -0700 Fix protocol config after patching 1.33 (#19417) ## Description Now protocol version 59 will be dominant on `testnet`, update protocol config on `main`. ## Test plan ``` eugene@eugene-dev ~/code/sui/scripts/compatibility (tmw/config-fix) $ ./check-protocol-compatibility.sh testnet .... running 1 test test test::snapshot_tests ... ok test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 4 filtered out; finished in 0.48s ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6395d539636f9c0220c23db08f40eba80f0522d8 Author: Ashok Menon Date: Tue Sep 17 18:19:25 2024 +0100 ci(graphql): tests for staging variant (#19371) ## Description Set-up testing for `staging` variant of `sui-graphql-rpc`: - Split E2E tests into `stable` and `staging` variants (each in their own sub-directories -- existing tests are all `stable`). Only run the stable tests by default, and additionally run the staging tests if the `staging` feature is enabled. - Generate a separate `staging` schema and snapshot test. - Add a CI step to run GraphQL tests with the staging flag enabled. ## Test plan ``` sui$ cargo nextest run --profile ci --features staging \ -E 'package(sui-graphql-rpc)' \ -E 'package(sui-graphql-e2e-tests)' ``` And also check CI. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Introduce `staging.graphql`. This schema includes changes that are being developed and tested but have not been productionised yet. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a725da97959f061d511ae174e4b498487e6987ec Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Tue Sep 17 08:07:34 2024 -0700 [GraphQL] Remove version related routes (#19410) ## Description Removes the version related routes. ## Test plan Existing tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: GraphQL only supports one version instead of `beta, stable, legacy`. This change removes the previous routes and only allows for `/` and `/graphql`, which will always point to the latest version. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 02dbe53ded9ef21ddbb90d0e4caa5e8a4ad3fd89 Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Tue Sep 17 10:23:15 2024 -0400 Revert "repair coin index" (#19406) ## Description This reverts commit bb778828e36d53a7d91a27e55109f2f45621badc It was a temporary fix for one release only --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c092832b3cc5d0c0eb08ee23ccb50a6af3e1c15f Author: Todd Nowacki Date: Mon Sep 16 19:29:23 2024 -0700 [move-compiler] More lint filtering (#19364) ## Description - Updated filtering for absint lints ## Test plan - ran tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit bf03567342d29e69b6e6ed3b76169e9b30dc2cb7 Author: Xun Li Date: Mon Sep 16 15:53:31 2024 -0700 Do not request local execution from Rust SDK (#19373) ## Description In the code below these lines we emulate local execution by polling the transaction. This means that we no longer need to pass through the request type to the RPC server, otherwise if user specified WaitForLocalExecution it would still be passed through. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 0f6c1293b44c77989ee90dbf4d13de97c787ae5b Author: Xun Li Date: Mon Sep 16 14:23:54 2024 -0700 Fix a few flaky tests (#19398) ## Description This PR fixes a few flaky tests: 1. The multisig tests. If epoch duration is too small, some times it triggered epoch changes in the middle of test and the max epoch becomes insufficient. 2. The transaction e2e tesets. The query includes system transactions too, which there can be a lot more than 2 pages if enough checkpoints have passed by. 3. In rosetta with_draw_stake test, similar issue to (1), we need to manually control epoch change otherwise accidental epoch change could lead to wrong stake results. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4797bd643477a0375c7087f6c8308fa198d6d309 Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Mon Sep 16 14:29:22 2024 -0600 [docs] Beta tag for graphql (#19358) ## Description Adds a beta tag to the frontmatter. Applied the tag to the graphql topics. If networks provided as value, theyre added to the admonition box. Format is ``` --- beta: --- ``` ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e75c9627fd0966057ecb4e81e243bb00eb1f02d7 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Mon Sep 16 13:25:00 2024 -0700 [Consensus] small metrics improvements (#19395) ## Description - Add metric to measure the gap between local last proposed round and low quorum round of each peer. - Rename subscriber connection count metrics to be more intuitive. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c36d37a6b37db7df05bfb3417e4bedbfc26adddd Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Mon Sep 16 12:57:57 2024 -0700 [Metrics] add timeout and retries to metrics push task (#19399) ## Description We have observed `mainnet` validators cannot push metrics until restart. And metrics data points are dropped from some validators occasionally. Adding request timeouts and retries hopefully should mitigate some of these issues. Interval ticks are skipped if missed, so it is ok to retry until success there. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5856d750f7b030ed773a1e801a7343ff09f85b1d Author: Xun Li Date: Mon Sep 16 12:31:14 2024 -0700 [Indexer] SQL Backfill command (#19359) ## Description This PR adds a command to sui-indexer that allows us to backfill using SQL queries, split by checkpoint ranges. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 92441a4bd6cfc301a241b1566bee886af2e0b9ac Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Mon Sep 16 11:02:43 2024 -0700 Move protocol config 59 to 60 (#19372) ## Description Testnet is using protocol config 58 right now. Protocol config 59 will be used for patching testnet. Move `main` to protocol config 60. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit bfeaf47e3fba2bc07eb643515027ac7edbb328a8 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Mon Sep 16 13:40:24 2024 -0400 indexer json rpc: batch object db reades (#19392) ## Description the original impl. fires a DB query for each object, which is wasteful ## Test plan ci --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 33d442d132b9febc8395fe871e0cfe88c5db9913 Author: Cam Swords Date: Mon Sep 16 10:27:39 2024 -0700 [move][move-2024] Fix match parsing by adding Match to expression start set (#19342) ## Description Match did not end up in the expression start set due to these features growing separately. Now match parses in more places. This PR also slightly changes parsing error reporting to report what symbol was there in comma list parse failures, allowing for slightly better error reporting(?) ## Test plan New tests added. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f27caabe60b1408f829c25bef74c32890a7d418d Author: Cam Swords Date: Mon Sep 16 10:27:04 2024 -0700 [move] Revise dead code detection (#19332) ## Description Dead code was written without respecting named blocks and loops, leading to false negatives in cases that should work just fine. This revises. that analysis to do the correct thing, though the implementation is still a bit rough and full of heuristics. The main idea is that instead of tracking "diverged" versus "did not", we track "might have" versus "did" and "did not." This allows us enough breathing room to avoid saying we diverged when that divergence was guarded by conditional control flow. In writing tests, I also found an issue in how HLIR lowered named blocks, due to conflicting assumptions about how things are typed. This is fixed now, allowing this sort of code: ``` fun test(): u64 { 'a: { loop { return 'a 5 } } } ``` ## Test plan A bunch of new tests. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5771116cea2c8d7af5c912530f579cdf5a1ab8a6 Author: Manolis Liolios Date: Mon Sep 16 20:18:24 2024 +0300 [GraphQL][MVR] Move to new naming standard (#19376) ## Description Move things left and right (gets rid of the `dot_move` naming), and update our parsing logic to use our new naming standard. The naming standard used is `{ns_name}/{app_name}/{version}` with `/{version}` being optional. ## Test plan Tweaked the existing e2e & unit tests to work with the new naming standard, and resolution works as expected on the e2e ones. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c337a96b49b4c265824d531bf8c408c8ae3eea50 Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Mon Sep 16 09:53:31 2024 -0700 [CLI] Add debug prints for execution time of a tx / dry run (#19374) ## Description Adds debug prints for preparing a tx and executing it. Similarly, adds debug prints for executing a dry run. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: Added debug prints for preparing a tx and executing it. Similarly, adds debug prints for executing a dry run. Use `RUST_LOG=debug sui` to see the extra information printed on the terminal. - [ ] Rust SDK: - [ ] REST API: commit 66eda7e901c959bc012e846cbb92744e19f5c32f Author: 0xripleys <105607696+0xripleys@users.noreply.github.com> Date: Mon Sep 16 12:52:27 2024 -0400 get_validator_address_by_pool_id function (#19333) ## Description Add a function called `get_validator_address_by_pool_id` There is currently no way to verify on-chain that a validator address and staking pool id are related. This is now possible with the function above ## Test plan added some unit tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 0550e6f72bde871ccefbf11b138ea84bd4527171 Author: Brandon Williams Date: Mon Sep 16 11:05:17 2024 -0500 deny: temporarily allow RUSTSEC-2023-0086 to unblock ci commit cf1cc2fe303a9f8a03b0b69a2765f5f2c2c81796 Author: Brandon Williams Date: Mon Sep 16 10:41:31 2024 -0500 ingestion: reduce memory footprint by wrapping checkpoints in Arcs commit ae756cb831501ce5901fb5025ba1f58a9ce30511 Author: Brandon Williams Date: Mon Sep 16 10:19:13 2024 -0500 ingestion: change Worker api to take &CheckpointData commit 000d4197f89b012daebdc6c380d616d2a10bf1a6 Author: Eugene Boguslavsky Date: Mon Sep 16 09:03:24 2024 -0700 Start building binaries for ubuntu-arm64 (#19362) ## Description Start building binaries for ubuntu-arm64 ## Test plan https://github.com/MystenLabs/sui/actions/runs/10853752222/job/30122694354 commit 99ebd8f082f222ea6e662a4b3a2653e2c8f6eb12 Author: Eugene Boguslavsky Date: Sun Sep 15 20:01:00 2024 -0700 Revert "Add additional release trigger types (#19281)" (#19384) This reverts commit 6bbb00776f6445c979c219a7de2c1f4f8226d1c3. ## Description reverr commit f28674511c84e4f314d71f630a0e9e04ebe2981e Author: Ashok Menon Date: Sun Sep 15 08:14:34 2024 +0100 rest: discard failed dynamic field indices (#19377) ## Description Allow dynamic field indexing to fail when building up fullnode REST indices to bring it in line with behaviour in `sui-indexer`, `sui-analytics-indexer`, and the existing fullnode indices. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c0b807b1e095bc33ae17115f2806add8e84f585b Author: Adam Welc Date: Sat Sep 14 11:28:20 2024 +0200 [trace-view] Added support for continue (#19331) ## Description This PR adds support for "continue" debugger action. It also makes implementation of "next" ("step over") and "step out" actions faithful to their intended semantics (i.e., executing all instruction in a function stepped over or stepped out of). These changes do not actually change the current behavior of the viewer - they are done in preparation for implementing variable value tracking and displaying. This PR also includes a refinement of the "step out" action implementation. As also explained in the code comment, previously in the following line of code, after entering `foo` and stepping out of it, we would immediately step into `bar`, which could be quite confusing for the user: ``` assert(foo() == bar()); ``` Finally, this PR also includes some formatting changes for lines that were a bit too long ## Test plan Tested manually commit 12cf3a07b5d48b959f68d82ab121b761d1201504 Author: vegetabledogdog Date: Sat Sep 14 15:30:08 2024 +0800 [cli] improve git output (#18636) ## Description Check if git is installed. Ouput the git clone or git fetch message ## Test plan 1. git is not installed ``` sui move build Git is not installed or not in the PATH. Failed to build Move modules: Failed to resolve dependencies for package 'gitt' Caused by: 0: Fetching 'Sui' 1: Git is not installed or not in the PATH.. ``` 2.network error ``` sui move build FETCHING GIT DEPENDENCY https://github.com/MystenLabs/sui.git Cloning into '/home/sun/.move/https___github_com_MystenLabs_sui_git_framework__testnet'... fatal: unable to access 'https://github.com/MystenLabs/sui.git/': Could not resolve host: github.com Failed to build Move modules: Failed to resolve dependencies for package 'up' Caused by: 0: Parsing manifest for 'Sui' 1: No such file or directory (os error 2). ``` 3.Show the progress of git ``` sui move build FETCHING GIT DEPENDENCY https://github.com/MystenLabs/sui.git Cloning into '/home/sun/.move/https___github_com_MystenLabs_sui_git_framework__testnet'... remote: Enumerating objects: 345462, done. remote: Counting objects: 100% (5589/5589), done. remote: Compressing objects: 100% (2204/2204), done. Receiving objects: 9% (33800/345462), 35.71 MiB | 4.38 MiB/s ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: Improve error and status messages for `sui move build`. - [ ] Rust SDK: commit c0a1fe8517b7e7f6c023035bb66b99416cd4a75e Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Fri Sep 13 23:12:20 2024 -0700 [doc] add configs and client description to bridge node runbook (#19361) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 494f2520a063a6b725a9e393c0dc45b37072b55f Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Fri Sep 13 17:29:03 2024 -0700 At startup, wait until all previously-observed commits have been sent to the consensus handler. (#19344) This is necessary for proper crash recovery with data quarantining, as we will need to wait until all previously-built checkpoints have been rebuilt before starting CheckpointExecutor commit 9c6906f14e7b956fabe368cb06b624e50d010a09 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Fri Sep 13 16:55:40 2024 -0700 Version Packages (#19367) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/create-dapp@0.3.21 ### Patch Changes - @mysten/dapp-kit@0.14.21 ## @mysten/dapp-kit@0.14.21 ### Patch Changes - Updated dependencies [0db770a] - @mysten/zksend@0.11.3 ## @mysten/zksend@0.11.3 ### Patch Changes - 0db770a: Check transaction status when creating and claiming transactions Co-authored-by: github-actions[bot] commit 92a5b1d94ae7c34015b26a1ee89c330c326188f1 Author: John Martin Date: Fri Sep 13 16:36:03 2024 -0700 WAF details in validator_runbook.md (#19370) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 2c47636b2e3a0f4f11cba63f1ddddb3dc5b309f3 Author: Adam Welc Date: Sat Sep 14 00:49:39 2024 +0200 [move-ide] Split symbolication implementation into smaller functions (#19338) ## Description This PR contains a refactoring to split a super-long `get_symbols` function into two smaller ones. It not only makes code mode readable but also enables a more optimized implementation of the test suite. Previously, all symbols had to be recomputed when tests involved a cursor, even though it was only cursor that was changing, rather than symbols themselves. With this PR, we can recompute the cursor only, which brings the time to run all `move-analyzer` tests from minutes to (tens of) seconds. ## Test plan All existing tests must pass commit 3fd78458acf96df386203dafc977df85fac6e894 Author: John Martin Date: Fri Sep 13 15:10:10 2024 -0700 [sui-proxy] Allow sui bridge validator metrics (#19076) ## Description Introduces the ability for sui proxy to accept metrics that are pushed to it from sui-bridge. Works by looking up the `http_rest_url` for each proxy in the on-chain metadata, and querying each endpoint for the metrics pub key Depends on https://github.com/MystenLabs/sui/pull/18877. ## Test plan Tested in testnet commit c2687c0e5c0513a6f8d9c6313fb56d27534c21f1 Author: Bridgerz Date: Fri Sep 13 14:57:01 2024 -0700 Account for Eth block re-orgs in eth indexer (#19158) Description Account for Eth block re-orgs in eth indexer #19158 Test plan Spot check on my local machine. Indexer validity checker tool coming soon... commit 2de2e0369614ceea73bf15ae7035076d96e8fa14 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Fri Sep 13 14:27:03 2024 -0700 [move][test_scenario] Fix bug in `test_scenario` where we weren't returning allocated tickets (#17991) ## Description Fixes a bug where we weren't returning allocated receiving tickets in a transaction at the end of the transaction. This fixes the bug, and adds tests to make sure allocated tickets are properly handled at the end of a test scenario transaction. ## Test plan Added new tests to test the failing behavior and made sure they pass. commit cb17e68e0d5713380ba48a73193a47c35518b62a Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Fri Sep 13 14:07:55 2024 -0700 [Consensus] stop proposing when propagation delay is high (#19328) ## Description Introduce `RoundProber` to gather information from each authority, on their received rounds from others. The gathered data on block propagation delays across the network are exported to metrics. The propagation delay for own blocks is used to control whether proposing blocks can happen. ## Test plan CI PT --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c413391cd54be6cb50d6fbd6b9b986300dd773ef Author: devan-ko <143364659+devan-ko@users.noreply.github.com> Date: Sat Sep 14 04:43:20 2024 +0900 Add FanTV as Zklogin OIDC provider onboarding (#19334) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Joy Wang <108701016+joyqvq@users.noreply.github.com> commit 19db7d3b806579d1df9264ac73c166b683aa46c9 Author: jk jensen Date: Fri Sep 13 12:01:04 2024 -0700 [suiop] module refactor (#19356) ## Description Refactor modules and shuffle things around to clean up the codebase and prepare for other integrations (notion) The only actual functional change is to separate the PD incident struct that's returned from the API from the more robust incident, which includes slack association and will also include notion metadata. ## Test plan Functionality remains ([example](https://mysten-labs.slack.com/archives/C03QJ1B6BC0/p1726233363961019)) --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6d558b18552a0ad72990ca4542ed90b0be10ae37 Author: Jonas Lindstrøm Date: Fri Sep 13 20:49:47 2024 +0200 Update fastcrypto + new vdf test vector (#19360) ## Description Update fastcrypto pointer. ## Test plan Unit tests. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 0db770aa709d683749833572ff2a49bb8a31895c Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Fri Sep 13 11:39:43 2024 -0700 [zksend] check transaction status when creating and claiming links (#19366) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 018cbcb362faa30833c5c3763a42bef1c031c6ab Author: Ashok Menon Date: Fri Sep 13 18:34:50 2024 +0100 docs(graphql): explain scan limits rationale commit 874a407dbfc4d40d04e8506908ae24e31a649e2a Author: Ashok Menon Date: Fri Sep 13 14:41:14 2024 +0100 docs(graphql): [easy] document transaction, event filters ## Description Document the various filter options for transactions and events (and tweak the docs for object filters slightly). ## Test plan :eyes: commit 0ac25d3b69bd9ed658b48c3df2f02dbdeb9cd0e7 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Fri Sep 13 11:28:40 2024 -0700 Revert "[move] Enable serialization of source maps into json" (#19347) Reverts MystenLabs/sui#18727 commit aaa906f7517519bbb7ac7d9500fe7831d2a6ab1a Author: Ashok Menon Date: Fri Sep 13 19:00:02 2024 +0100 indexer: index tx_affected_addresses (#19355) ## Description Introduce `tx_affected_addresses` table -- a combination of `tx_recipients` and `tx_senders`, to power the sender-or-recipient query in GraphQL. ## Test plan ``` cargo build -p sui-indexer ``` Tests based on reading this field will be included with the PR introducing changes to GraphQL. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [x] Indexer: Index the addresses affected by a transaction (either because they are a sender or a recipient of the transaction). - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit dce2e0683c2472a64858d13dc77b82448b4139f1 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Fri Sep 13 10:17:15 2024 -0700 [bridge] add metrics to SuiClient and Monitor (#19352) ## Description as title, boring PR ## Test plan existing tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 71ac1878d81a9467971c26b5314b917246312fc4 Author: Anastasios Kichidis Date: Fri Sep 13 15:13:06 2024 +0100 [Consensus] subscriber counter to atomically set node status (#19313) ## Description Looking on the connectivity metrics for the subscribed peers I believe it's possible to have some race conditions when nodes quickly connect/disconnect. As the metric is a gauge the last who sets the value "wins", so it's possible that we might have nodes connecting again while the earlier connection has not been dropped yet - which consequently once dropped it will make the peer appear in metrics as disconnected. The PR is refactoring a bit that part and only sets the peer as disconnected when there is no other pending connection. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e5703e337b613a5acc4bcc6c87de89c6d5456235 Author: nikos-kitmeridis <145983019+nikos-kitmeridis@users.noreply.github.com> Date: Fri Sep 13 09:25:28 2024 +0300 Adds configuration for One (#19339) ## Description Adds configuration for One Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 28a0c879003c9731e4bb9597b7cfcbfe26a864cf Author: Tom Cat <48447545+tx-tomcat@users.noreply.github.com> Date: Fri Sep 13 08:52:08 2024 +0700 [Linter] Unnecessary Conditional (#16856) # Description - Added a lint to check for unnecessary conditionals - Triggered when - Each branch is a single value and the values are equal (consider removing the conditional) - Each branch is a single bool, and the bools are not equal (use the condition directly) ## Testing - New tests ## Release notes - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [X] CLI: Move will now lint against unnecessary conditionals, `if-else` expressions. - [ ] Rust SDK: --------- Co-authored-by: jamedzung Co-authored-by: Todd Nowacki commit cd6dfa41d32b92383e4b05ec4dbeac23941e1c15 Author: Jort Date: Thu Sep 12 18:16:04 2024 -0700 Improve clarity of errors for QuorumDriverError type and it's usage in RPCs (#19258) ## Description The CLI and SDK rely on the QuorumDriverError type to handle errors from the Quorum driver. This PR improves the clarity of errors for the QuorumDriverError type and its usage in RPCs. - a published transaction which has a conflict now provides the conflicting transaction - invalid user signature simplified - better formatting for object double used - rely on display when possible ## Test plan Augment the current RPC error tests surrounding `From` trait implementations to ensure that the error messages are clear and informative. ## Release notes - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c78f49059c95c9076ef5bd7bb5c4ea1234eadd03 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Thu Sep 12 17:53:20 2024 -0700 [move] Fix move compiler tests (#19353) ## Description Fix some move compiler tests that landed in a broken state somehow. ## Test plan CI/ran them locally. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e605d229c1347e37e665897facda9eb080ae13ca Author: Yang Hau Date: Fri Sep 13 02:14:20 2024 +0200 fix typos (#18543) commit f909668f0af56ce659f53971d5b602318860ac5c Author: Todd Nowacki Date: Thu Sep 12 16:37:53 2024 -0700 [more] Fix move tests (#19348) ## Description - Fix lint tests ## Test plan - Updated tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit fbf20c97f849c2bdc06464c7fdd5564f24978bd6 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Thu Sep 12 16:07:55 2024 -0700 Revert "[move][tracing] Document flag for generating traces from `sui move test`" (#19346) Reverts MystenLabs/sui#19326 commit 1f7b34ecb0daddf744689152d15c43c524179f64 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Thu Sep 12 15:25:32 2024 -0700 [bridge] Some config changes & cleanups (#19303) ## Description 1. add `disable_eth` for bridge indexer so we can run it against mainnet before deploying ethereum contracts 2. make `checkpoints_path` optional when we are not using colocated FNs. and add comments for `CheckpointReader` in ingestion-core 3. some misc but self-descriptive stuff. ## Test plan tested locally --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8656e4a71dae15c1cb1f1c912aec3b06ef0f5c04 Author: Todd Nowacki Date: Thu Sep 12 15:20:08 2024 -0700 [sui-mode][move-compiler] Greatly improve shared_owned lint accuracy (#19330) ## Description - The share_owned lint now uses type information to try to determine if an object is "fresh" - It assumes an object is fresh if it it comes from a function with not reachable UID inputs. Since in that case, the object could wrapped or coming from a dynamic field - Improved filtering to skip this analysis in some cases ## Test plan - New tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 363eb019fc813bd4dc32c713f85d06b974bc5cc8 Author: jk jensen Date: Thu Sep 12 15:10:17 2024 -0700 [suiop] add point of contact selection for incident review (#19329) ## Description - Refactor api logic into new module - get all users so we can select them to be points of contact - add multiselect to allow selection of an arbitrary set of users - generalize serde logic for caching api responses (once per day) - improve tagging of channels to link to the actual channel - tag associated poc users for an incident ## Test plan e2e functional (see [example message](https://mysten-labs.slack.com/archives/C03QJ1B6BC0/p1726147679829409)) --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ba035ce7bff023129ab4d2b79cf2151ecd07e214 Author: Tom Cat <48447545+tx-tomcat@users.noreply.github.com> Date: Fri Sep 13 04:14:45 2024 +0700 [Linter] 'loop' without exit (#16875) # Description - Gives a suspicious lint for loop without break or return. - Does not currently consider if that break or return is reachable ## Test plan - New tests ## Release notes - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [X] CLI: Move lint will now warn against `loop`s without `break` or `return` - [ ] Rust SDK: --------- Co-authored-by: jamedzung Co-authored-by: Todd Nowacki commit 02fbd4796f9d1b54295873a59a5c5953bd3baaaa Author: Ashok Menon Date: Thu Sep 12 21:50:30 2024 +0100 indexer: ensure schema always matches what diesel generated (#19337) ## Description Create a script that regenerates the schema and run it to standardise our schema. Also merge `schema/mod.rs` and `schema/pg.rs` into one `schema.rs` -- otherwise we have an opportunity for things to go out of sync because a module is present in the generated schema (`pg.rs`) but not in the module entrypoint (`mod.rs`). Run this script to refresh the schema (some inconsistencies already crept in!) and set it up to run under CI. ## Test plan ``` sui$ ./scripts/generate_indexer_schema.rs` ``` and CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8ffc2a691002f243e995ae50e6b8c78ccf4b590d Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Thu Sep 12 11:09:16 2024 -0700 [move][tracing] Document flag for generating traces from `sui move test` (#19326) ## Description Add documentation for the `trace-execution` flag for `sui move test`, and also made the `-Xdebug` flag for the disassembler hidden. ## Test plan recompiled and manually checked. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 1cbef4f9d15f08d420bc8a748a5e6da243c07d86 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Thu Sep 12 13:29:29 2024 +0200 Version Packages (#19323) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.6.0 ### Minor Changes - ebe2ae8: Admin function updates, package constant updates Co-authored-by: github-actions[bot] commit 495def8285fc4a44a8bab7f17ae1cf07a91cebdb Author: Maria Siopi Date: Thu Sep 12 11:00:04 2024 +0300 Add tests module sui-types/object.rs (#19316) ## Description Put tests in sui-types/object.rs in their own module The tests are just floating around in the main module, which presumably means they are also compiled into our production builds? They should go into their own module, guarded by a #[cfg(test] annotation. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 88c7c85453bff0f41492cf591a52c2781d681b68 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Wed Sep 11 22:41:02 2024 -0700 add deepbook indexer to sui tool image (#19322) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8a96ddb41f19f3b964b65f1fc5393c4226fa4009 Author: Cam Swords Date: Wed Sep 11 19:25:47 2024 -0700 [move][move-2024] Adjust HLIR to allow reuse of labels in different scopes (#19289) ## Description While name resolution and typing ensure uniquely-named labels, they can appear on the right-hand side of an `or`-pattern match, meaning they will be duplicated during match compilation. This currently causes a panic (cc @damirka ), but the revised code allows for block name reuse in different scopes in HLIR by adding them to the map for processing the body and removing them afterwards. This also cleans up a redundant error being generated in typing, which was also being reported in naming. ## Test plan Several new tests, plus a bunch of other tests for match features. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7fd291a71da6408b582acca3ffca81e1787985f1 Author: Adam Welc Date: Wed Sep 11 16:42:12 2024 -0700 [trace-view] Added support for marking optimized away code lines (#19306) ## Description In Move, there isn't necessarily 1:1 correspondence between source code and bytecode due to compiler optimizations. In the following code example (admittedly somewhat artificial for presentation purposes), there will be no entry for `constant` variable in the bytecode as it will be optimized away via constant propagation: ``` fun hello(param: u64): vector { let mut res = param; let constant = 42; // optimized away if (constant >= 42) { // optimized away and turned into straight line code res = 42; }; vector::singleton(res) } ``` This PR implements a heuristic that will mark source code lines optimized away by the compiler as having grey background. We do this by analyzing source maps for a given file and marking lines that are present in the source map but not in the source file (with some exceptions, notably empty lines, lines starting with `const`, and lines that only contain right brace `}`). At this point, the "optimized away" lines include comments (we can finesse it in the future if need be, but it does in some sense reflect the spirit of this visualization) ## Test plan Tested manually to see if grey lines appear and changes throughout debugging session when going to different files, and that they are reset once debug session is finished. commit 601ff73115923accc005d9ee9706b836a91ddd78 Author: Todd Nowacki Date: Wed Sep 11 16:03:42 2024 -0700 [move-compiler] Add sui specific information to TypingProgramInfo (#19287) ## Description - Added a map of all UID holders - Added a map of all transferred objects ## Test plan - Not yet used --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 74d6d564970406e1b3191a07cf207af1ab6b3356 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Wed Sep 11 14:11:40 2024 -0700 [Move] Update tests to use unique addresses for different modules in tests (#19262) ## Description Updates tests so that unique addresses are used on modules that are not part of the same package (needed as part of the larger package loader rewrite). Note that the `0x1`, `0x2` addresses are used by the stdlib (etc.) so I bumped all addresses to start at `0x6`. Tests should largely be unchanged. The one exception is some rewriting in the `move-vm-integration-tests` as there were issues running tests, and the fact the `CompiledModule::serialize` is test-only. Once this lands, I'll rebase and land into the vm_2024 branch. ## Test plan Update existing tests, verify their outputs change as expected. commit 65e9b4a214ba6c94d97f5e5ef6cf8c428a609416 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Wed Sep 11 13:54:56 2024 -0700 [move] Enable serialization of source maps into json (#18727) ## Description Pretty simple -- allows serialization and deserialization of Move source maps into a json format. This is to make it more amenable to being utilized by tools written in languages that don't have access to BCS (e.g., for use alongside tracing). ### PR Stack: 1. #18727 **<<<<< You are here** 2. #18729 3. #18730 ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 280bfec2f77fe9aa3e0193eb9e42654269cb4f71 Author: jk jensen Date: Wed Sep 11 12:40:36 2024 -0700 [suiop] add title similarity detection to incident review selection (#19304) ## Description Refactor the filtering code into the review selection module. Use strsim to detect title similarity (only using the first 20 chars of the title for now to keep it simple) Group similar titles and allow treating them as one atomic operation ## Test plan tested locally, grouped successfully. tests pass --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ebe2ae8254c96ed78476d631c81b8a3194a11412 Author: Tony Lee Date: Wed Sep 11 20:35:32 2024 +0200 Deepbook SDK Updates (#19302) ## Description setTreasuryAddress function in SDK Admin createPoolFunction updates Constant Updates ## Test plan How did you test the new or updated feature? Testnet ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 52a3fbab779fd25bb530c836e839b1d04be3b678 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Wed Sep 11 14:24:44 2024 -0400 cherrypick indexer: split object version ingestion for perf (#19104) (#19319) objects_history has been a bottleneck of indexer ingestion / backfill, splitting objects_version and let it run in parallel. this might have been a factor of recent backfill perf regression. --- Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 72c17f45a21e764bbb465cb9b6cbb6e93cbb67ca Author: Xun Li Date: Wed Sep 11 10:21:27 2024 -0700 [Indexer] Minor cleanup on ObjectStatus::WrappedOrDeleted (#19288) ## Description The WrappedOrDeleted variant only needs the version, as all other information is already in the Object struct. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit dec33d0a303a7f83e778fcc5a136ab9776162e68 Author: jk jensen Date: Wed Sep 11 09:44:12 2024 -0700 [deps] bump itertools version (#19307) ## Description Introduces some [breaking changes](https://github.com/rust-itertools/itertools/blob/master/CHANGELOG.md), trying to see if anything yells. ## Test plan Counting on signals for this one. None of the breaking changes listed in the itertools changelog appear anywhere in our codebase based on grepping for them. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5415bb65851cf15a89ae473a349ac62eb8e09a5f Author: Brandon Williams Date: Wed Sep 11 11:23:12 2024 -0500 indexer, graphql: remove pg_integration feature flag usage (#19295) commit 79e76dab56725e0383fb2260404064bf73451c5f Author: Xun Li Date: Wed Sep 11 08:58:35 2024 -0700 Optimising validator tx fianlizer (#19272) ## Description This PR adds an extra checks before a validator would attempt to finalize a transaction: if the transaction is already submitted to consensus (i.e. in pending consensus transactions). ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e17b1e7da2f3c4b56066513cdae3fca66ee7a889 Author: nikos-kitmeridis <145983019+nikos-kitmeridis@users.noreply.github.com> Date: Wed Sep 11 18:00:55 2024 +0300 Adds configuration for playtron provider in Devnet (#19311) ## Description Adds playtron provides to Devnet. Afaik, since Playtron serves only the `authorization_code` flow it cannot work with the current version of the keytool, so no need to bump fastcrypto version here, unless I am missing anything. (Related: https://github.com/MystenLabs/fastcrypto/blob/781398d38fbf342a50e66dc0ad11d6227839bd68/fastcrypto-zkp/src/bn254/utils.rs#L88) If I do miss something though, it looks that bumping to a later fastcrypto revision it causes dependency related error during the test execution (`cargo test -p sui-swarm-config`). Like the following: ``` error[E0433]: failed to resolve: could not find `conversions` in `bls12381` --> sui-execution/v1/sui-move-natives/src/crypto/groth16.rs:205:41 | 205 | > fastcrypto_zkp::bls12381::conversions::SCALAR_SIZE * MAX_PUBLIC_INPUTS | ``` ## Test plan running localnet with ``` cargo build --bin sui RUST_LOG=info target/debug/sui start --force-regenesis --with-faucet ``` and grep logs for JWK pulling for playtron and threedos ok. ``` 2024-09-11T14:51:07.219647Z INFO node{name=k#b3fd5efb..}:jwk_updater_task{epoch=0}: sui_node: Submitting JWK to consensus: JwkId { iss: "https://oauth2.playtron.one", kid: "1fd66a18-7afd-4086-ba57-a8ca9b398406" } 2024-09-11T14:51:07.219736Z INFO node{name=k#b3fd5efb..}:jwk_updater_task{epoch=0}: sui_node: Submitting JWK to consensus: JwkId { iss: "https://oauth2.playtron.one", kid: "7388b63c-ba03-4fe3-85e3-6717f91d55b1" } 2024-09-11T14:51:07.219789Z INFO node{name=k#b3fd5efb..}:jwk_updater_task{epoch=0}: sui_node: Submitting JWK to consensus: JwkId { iss: "https://oauth2.playtron.one", kid: "e55421cb-cdbd-4791-a590-ad8ca798aa89" } ``` for threedos: ``` 2024-09-11T14:52:08.492045Z INFO node{name=k#8dcff6d1..}: sui_core::authority::authority_per_epoch_store: received jwk vote from k#8dcff6d1.. for jwk (JwkId { iss: "https://auth.3dos.io", kid: "6d361dc9637a275eb585a915af26198ff0d97326ca13f4baf0e4805f72f2a9a0" }, JWK { kty: "RSA", e: "AQAB", n: "y_8hHwq7w2yE4968sbQF98iGUhnu0BwyB5khTxVPAcUnMCYdp61zYcRWml2zdY4HAfq-Nnjb_pAli6I66Vpe9IE8Gf8uGRB0oYIo2S6tYMEe0lhRaEDYVbMdQkuKxTIYMNBXSd_kCHKJM1ZUAo7uFoq_bWuzt2hRG2-79z-Ycbiw0wil0rzFHlpNBKsBLKM4GSGUwOejaL2zCiE_rjf77AvOaJLRd4I_DBYG16t8D1BkxbhkcQCmOxYGG0NqjP3z0lz-w1ALqHCNfhzczZOsgaCrbSlcTKcBTq1syAUUhQmounW7nG5clBIfPQRVH7jCoPztiJUZg6Xz1AN6V07xnw", alg: "RS256" }) 2024-09-11T14:52:08.492281Z INFO node{name=k#8dcff6d1..}: sui_core::authority::authority_per_epoch_store: received jwk vote from k#99f25ef6.. for jwk (JwkId { iss: "https://auth.3dos.io", kid: "6d361dc9637a275eb585a915af26198ff0d97326ca13f4baf0e4805f72f2a9a0" }, JWK { kty: "RSA", e: "AQAB", n: "y_8hHwq7w2yE4968sbQF98iGUhnu0BwyB5khTxVPAcUnMCYdp61zYcRWml2zdY4HAfq-Nnjb_pAli6I66Vpe9IE8Gf8uGRB0oYIo2S6tYMEe0lhRaEDYVbMdQkuKxTIYMNBXSd_kCHKJM1ZUAo7uFoq_bWuzt2hRG2-79z-Ycbiw0wil0rzFHlpNBKsBLKM4GSGUwOejaL2zCiE_rjf77AvOaJLRd4I_DBYG16t8D1BkxbhkcQCmOxYGG0NqjP3z0lz-w1ALqHCNfhzczZOsgaCrbSlcTKcBTq1syAUUhQmounW7nG5clBIfPQRVH7jCoPztiJUZg6Xz1AN6V07xnw", alg: "RS256" }) 2024-09-11T14:52:08.492310Z INFO node{name=k#8dcff6d1..}: sui_core::authority::authority_per_epoch_store: jwk became active epoch=1 round=16 jwk=(JwkId { iss: "https://auth.3dos.io", kid: "6d361dc9637a275eb585a915af26198ff0d97326ca13f4baf0e4805f72f2a9a0" }, JWK { kty: "RSA", e: "AQAB", n: "y_8hHwq7w2yE4968sbQF98iGUhnu0BwyB5khTxVPAcUnMCYdp61zYcRWml2zdY4HAfq-Nnjb_pAli6I66Vpe9IE8Gf8uGRB0oYIo2S6tYMEe0lhRaEDYVbMdQkuKxTIYMNBXSd_kCHKJM1ZUAo7uFoq_bWuzt2hRG2-79z-Ycbiw0wil0rzFHlpNBKsBLKM4GSGUwOejaL2zCiE_rjf77AvOaJLRd4I_DBYG16t8D1BkxbhkcQCmOxYGG0NqjP3z0lz-w1ALqHCNfhzczZOsgaCrbSlcTKcBTq1syAUUhQmounW7nG5clBIfPQRVH7jCoPztiJUZg6Xz1AN6V07xnw", alg: "RS256" }) 2024-09-11T14:52:08.492464Z INFO node{name=k#8dcff6d1..}: sui_core::authority::authority_per_epoch_store: received jwk vote from k#b3fd5efb.. for jwk (JwkId { iss: "https://auth.3dos.io", kid: "6d361dc9637a275eb585a915af26198ff0d97326ca13f4baf0e4805f72f2a9a0" }, JWK { kty: "RSA", e: "AQAB", n: "y_8hHwq7w2yE4968sbQF98iGUhnu0BwyB5khTxVPAcUnMCYdp61zYcRWml2zdY4HAfq-Nnjb_pAli6I66Vpe9IE8Gf8uGRB0oYIo2S6tYMEe0lhRaEDYVbMdQkuKxTIYMNBXSd_kCHKJM1ZUAo7uFoq_bWuzt2hRG2-79z-Ycbiw0wil0rzFHlpNBKsBLKM4GSGUwOejaL2zCiE_rjf77AvOaJLRd4I_DBYG16t8D1BkxbhkcQCmOxYGG0NqjP3z0lz-w1ALqHCNfhzczZOsgaCrbSlcTKcBTq1syAUUhQmounW7nG5clBIfPQRVH7jCoPztiJUZg6Xz1AN6V07xnw", alg: "RS256" }) ``` ``` ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Joy Wang <108701016+joyqvq@users.noreply.github.com> commit 57d57ea792c49ad184290b069224ab1537b4103c Author: Jonas Lindstrøm Date: Wed Sep 11 16:27:28 2024 +0200 Update fastcrypto pointer and bls12381 scalar length path (#19314) ## Description Update the fastcrypto version. This includes adding Playtron and Threedos as OIDC providers. The path to the BLS12381 scalar size has been updated in fastcrypto and will be updated in here also. The value [is](https://github.com/MystenLabs/fastcrypto/blob/5f2c63266a065996d53f98156f0412782b468597/fastcrypto-zkp/src/bls12381/conversions.rs#L21) [unchanged](https://github.com/MystenLabs/fastcrypto/blob/501225efbb9d069062c0ab47e7ce25a7cd5128c0/fastcrypto/src/groups/bls12381.rs#L57). ## Test plan Unit tests. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 9a275a4bbe9cf4a90e3f218a121d31f95b4b0bad Author: Krešimir Klas Date: Wed Sep 11 15:58:50 2024 +0200 move toolchain: fix multiple issues with bytecode dependencies (#16523) ## Description This PR fixes multiple issues caused by bytecode dependencies not being included in `CompiledPackage`: - multiple functionalities failing due to topo sort in `Modules` panicking on missing dependencies - running `move test` on packages with bytecode deps failing due to them not being included in VM storage - source verification failing because bytecode deps aren't handled - commands such as publish and upgrade failing due to bytecode deps not being referenced in the construction of transaction blocks Summary of changes: - removed `move_bytecode_utils::dependency_graph` module and instead added a `compute_topological_order` method to `Modules`. Replaced all calls to `compute_dependency_graph` with a direct call to `compute_topological_order` (2 in total) - added bytecode deps to VM storage for test runs by loading them from `ResolvedGraph` - include bytecode deps in `sui_move_build::CompiledPackage` and fix various module fetching methods to return modules from bytecode deps also - include bytecode deps in `local_modules` function to fix `LocalDependencyNotFound` errors in source verification This is part of the work to enable compiling against on-chain dependencies https://github.com/MystenLabs/sui/pull/14178. cc @rvantonder @amnn ## Test Plan Added unit tests for `move test` and source verification. --- If your changes are not user-facing and do not break anything, you can skip the following section. Otherwise, please briefly describe what has changed under the Release Notes section. ### Type of Change (Check all that apply) - [ ] protocol change - [ ] user-visible impact - [ ] breaking change for a client SDKs - [ ] breaking change for FNs (FN binary must upgrade) - [ ] breaking change for validators or node operators (must upgrade binaries) - [ ] breaking change for on-chain data layout - [ ] necessitate either a data wipe or data migration ### Release notes commit bb93869a4c1b83cb52a64019a2ca70e33597eed7 Author: George Danezis <4999882+gdanezis@users.noreply.github.com> Date: Wed Sep 11 12:07:06 2024 +0100 [light-client] Fix light client rpc (#19028) ## Description Modernise the RPC used in the light client: - Use GraphQL to get the end-of-epoch checkpoint. - Use the supported Rust SDK to get objects and other info. - Use a generic object store, by default provided by Mysten, for the full checkpoint data. - Add a cache to package loader since we observed the same package downloaded multiple times. ## Test plan Existing unit tests, and manual CLI invocations. commit 9cd655a99b9d7a36bdde58532060c23c5adb90e1 Author: Ashok Menon Date: Wed Sep 11 11:54:34 2024 +0100 fix(cli): canonicalize keystore path (#19312) ## Description Fix a bug where a relative keystore path was written out to the config upon creation when the config itself was specified at a relative path. The fix is to canonicalize the keystore path before using it, but we need to handle multiple edge cases: - A wallet config created in the current working directory as a relative path (and relative paths in general). - A wallet config created at the very root of the filesystem (although this should never happen, the correct behaviour is not to default to the default config directory here). ## Test plan ``` sui$ cargo build --bin sui sui$ export SUI=~/sui/target/debug/sui sui$ mv ~/.sui/sui_config/client.yaml{,~} sui$ cd ~ # Create the config at the default location using a relative path sui$ $SUI client --client.config .sui/sui_config/client.yaml gas sui$ cd - sui$ $SUI client gas # Create the config at some other location, using a relative path sui$ mkdir -p /tmp/a sui$ cd /tmp/a a$ $SUI client --client.config ./client.yaml gas a$ cd - sui$ $SUI client --client.config /tmp/a/client.yaml gas # Create the config at some other location, using a relative path # without a root sui$ mkdir -p /tmp/b sui$ cd /tmp/b b$ $SUI client --client.config client.yaml gas b$ cd - sui$ $SUI client --client.config /tmp/b/client.yaml gas ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: Fixes a bug where the CLI would write out a config with a relative path for the keystore that would only work if the CLI was subsequently called from the same directory that the config was first created in. - [ ] Rust SDK: - [ ] REST API: commit 3012fa439c9b5cee7e2a1cb94ff6d8b45d200588 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Wed Sep 11 12:27:23 2024 +0300 Version Packages (#19283) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/sui@1.9.0 ### Minor Changes - 2c96b06: Adds experimental named packages plugin - 1fd22cc: Require name to register global transaction plugins ## @mysten/create-dapp@0.3.20 ### Patch Changes - Updated dependencies [2c96b06] - Updated dependencies [1fd22cc] - @mysten/sui@1.9.0 - @mysten/dapp-kit@0.14.20 ## @mysten/dapp-kit@0.14.20 ### Patch Changes - Updated dependencies [2c96b06] - Updated dependencies [1fd22cc] - @mysten/sui@1.9.0 - @mysten/wallet-standard@0.13.4 - @mysten/zksend@0.11.2 ## @mysten/deepbook@0.8.18 ### Patch Changes - Updated dependencies [2c96b06] - Updated dependencies [1fd22cc] - @mysten/sui@1.9.0 ## @mysten/deepbook-v3@0.5.1 ### Patch Changes - Updated dependencies [2c96b06] - Updated dependencies [1fd22cc] - @mysten/sui@1.9.0 ## @mysten/enoki@0.4.2 ### Patch Changes - Updated dependencies [2c96b06] - Updated dependencies [1fd22cc] - @mysten/sui@1.9.0 - @mysten/zklogin@0.7.19 ## @mysten/graphql-transport@0.2.18 ### Patch Changes - Updated dependencies [2c96b06] - Updated dependencies [1fd22cc] - @mysten/sui@1.9.0 ## @mysten/kiosk@0.9.18 ### Patch Changes - Updated dependencies [2c96b06] - Updated dependencies [1fd22cc] - @mysten/sui@1.9.0 ## @mysten/suins-toolkit@0.5.18 ### Patch Changes - Updated dependencies [2c96b06] - Updated dependencies [1fd22cc] - @mysten/sui@1.9.0 ## @mysten/wallet-standard@0.13.4 ### Patch Changes - Updated dependencies [2c96b06] - Updated dependencies [1fd22cc] - @mysten/sui@1.9.0 ## @mysten/zklogin@0.7.19 ### Patch Changes - Updated dependencies [2c96b06] - Updated dependencies [1fd22cc] - @mysten/sui@1.9.0 ## @mysten/zksend@0.11.2 ### Patch Changes - Updated dependencies [2c96b06] - Updated dependencies [1fd22cc] - @mysten/sui@1.9.0 - @mysten/wallet-standard@0.13.4 Co-authored-by: github-actions[bot] commit 2c96b06fe12572f118de8ef61173292ee37398df Author: Manolis Liolios Date: Wed Sep 11 10:50:11 2024 +0300 [SDK] Adds `.move` plugin (#19194) ## Description Introduces the TS plugin that will be used for replacing names with actual addresses. ## Test plan Currently contains unit tests. Once we have a live setup (mainnet / testnet), we'll also setup an actual e2e test (similar to zksend). --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Michael Hayes commit 0fdeb2dd2dee1ff261bdce2212e292157fbcbe05 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Tue Sep 10 22:53:46 2024 -0700 [Move] Box type layout vectors (#19310) ## Description Boxes fields in the `MoveTypeLayout` to make the enum size smaller for type layouts. ## Test plan Added test to check size of the enums + make sure existing tests pass. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [X] Protocol: Increase the maximum type layout size in the VM. Most users should not notice this change. - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit db5ba7aa9564af91de0aa9ad58ac0f2c9f86eac2 Author: Adam Welc Date: Tue Sep 10 16:37:08 2024 -0700 [move-ide] Version bump (#19309) ## Description Bump version to the next one in VScode Marketplace (currently 1.0.11) as plugin can be republished only at a higher version commit b7900fbba6239110c54d01040b8229b6a2e6ac41 Author: Anton Dosta Date: Tue Sep 10 16:23:58 2024 -0700 feat: Cookbook Onboard (AI Assistant) integration (#19266) ## Description Cookbook's Ask Cookbook AI assistant and co-pilot is trained on all existing Sui resources (source code, docs website, etc.) and is available as a standalone modal, embeddable as a button on any page (recommended for technical docs). It answers developer questions about building on Sui, acting as an enhanced and streamlined technical documentation search tool as well as Solidity and Move coding co-pilot. Ask Cookbook AI can also access context from thousands of data sources indexed by Cookbook.dev in addition to Sui-specific data sources, providing the best blockchain developer-focused answers of any chatbot on the market. Cookbook will assist in the tuning and calibration of the AI assistant to ensure the highest answer quality. ![image](https://github.com/user-attachments/assets/d36e4a14-e5e1-476f-9604-9ced0f38aeb9) ### Changes - Added the Ask Cookbook plugin to embed the Cookbook Onboard AI Assistant into the Sui docs, and included it in `docusaurus.config.js`. ### Preview Preview link: https://sui-docs-cookbook.vercel.app/ --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4f683265933ea6ff994fe109b244e8a7306f112c Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Tue Sep 10 14:54:19 2024 -0700 [chore] upgrade express to fix npm audit warning (#19298) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 89224b426815ce30e8dd3ac880749ef94395b117 Author: John Martin Date: Tue Sep 10 14:51:41 2024 -0700 split out push_metrics into mysten-common (#19254) ## Description Follow up to #19077, this shares the `push_metrics` fn between `sui-bridge` & `sui-node`. ## Test plan Tested on a testnet fullnode & bridge validator node --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5b7f117d0e75975fc4c3087bccaa38565bbced92 Author: Adam Welc Date: Tue Sep 10 12:53:34 2024 -0700 [move-ide] Refactored auto-completions implementation to multiple files (#19236) ## Description The auto-completions implementation was becoming unwieldy and this PR splits it up to multiple files in a separate directory. This a pure refactoring - other than some name changes, added comments, and moving code around, no modifications have been made ## Test plan All existing tests must pass commit a9113687c410b55eb010ab13fad703ba4ebf0635 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Tue Sep 10 11:52:31 2024 -0700 [Consensus] add type and interface for transaction voting (#19280) ## Description Add block variant that supports voting. Add method to TransactionVerifier trait, for calling into the voting logic. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 91ee7f048a4b62aa79088c76ff7dfff630786302 Author: Joe Hrbek <123987499+suiwombat@users.noreply.github.com> Date: Tue Sep 10 13:26:45 2024 -0500 [sui-node/logging] (#19301) ## Description move warn to error when sui-node cannot send metrics ## Test Plan it builds --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 9adbe04637937183f4c5b74308179b8015cbe28a Author: Eugene Boguslavsky Date: Tue Sep 10 11:01:01 2024 -0700 Fix env variable name (#19300) ## Description Fix env variable name ## Test plan 👀 commit bd44746691dedf2ff6feeae8bb85054734500c79 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Tue Sep 10 09:38:41 2024 -0700 [brideg] add hardware requirements for bridge node (#19292) ## Description as title ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a31bee54707900cdbb5b7f098c18ed780dcb1fdd Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Tue Sep 10 09:09:22 2024 -0700 [Consensus] count missing ancestors and blocks per authority (#19293) ## Description Count missing ancestors and new missing blocks per authority. This may help identify bad performing validators faster. ## Test plan CI PT --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit cfe8e53bc1021e9439bef25f309c2e7189a15e0d Author: Joy Wang <108701016+joyqvq@users.noreply.github.com> Date: Tue Sep 10 11:48:03 2024 -0400 chore: update doc for new providers (#19224) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> commit e1f3607c4c5d0a906161d6cedc388f76c5bcaa88 Author: jk jensen Date: Tue Sep 10 08:42:42 2024 -0700 [suiop] filter incident selection by incidents with status or slack (#19284) channels ## Description Limit the noisy incidents/alerts ## Test plan ran locally successfully --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 127ab5dc1ef3a68ed624f233877632e795568edd Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Tue Sep 10 08:58:52 2024 -0400 [pg indexer] split executors in non-colocated setup (#19277) ## Description conditionally split executors for the PG indexer. This will prevent the object snapshot pipeline from affecting the progress of the primary workflow in non-colocated setups. For colocated setups, we still need to utilize a single executor --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6b37277276b4c347d5dba45711055bb8e533ed45 Author: Jonas Lindstrøm Date: Tue Sep 10 09:03:21 2024 +0200 Assertions for groth16 public inputs parser (#19014) ## Description Adds assertions to the public inputs from bytes function. The inputs are expected to be concatenated which has caused some confusion. These new assertions should give a more meaningful error msg to devs. This also adds a `flatten` function for vectors. ## Test plan Unit tests. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [x] Protocol: Fail fast on invalid public inputs to Groth16 zk-proof verification. Add a `flatten` Move function which flattens a vector of vectors into a single vector. This introduces a new protocol version 59. - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Todd Nowacki commit 055c20ce5515f6f604c3a982d47c10fdf0044965 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Mon Sep 9 20:12:49 2024 -0700 [Consensus] add missing blocks per authority metric (#19286) ## Description This metric might provide us with a better idea on blocks from which authorities are having propagation problems. Also, decrement the synchronizer inflight metric properly. ## Test plan CI PT --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 90750b4343602470a833904957fc4ecaa683d71e Author: Adam Welc Date: Mon Sep 9 17:16:10 2024 -0700 [move-ide] Same named module symbolication fix (#19285) ## Description This PR fixes a recently reported problem: https://github.com/MystenLabs/sui/issues/19275 The issues was that `move-analyzer` was only using numerical addresses to construct strings representing parsing-level and expanstion-level modules to be used as map keys. It works well unless someone will created a dependent module with the same name as another module in their project, and packages containing both modules are still unpublished (and thus have the same address). This is still disallowed by our build system and would normally be reported by the compiler, but since this compilation problem is non-blocking, `move-analyzer` actually moves to the symbolication phase and gets confused if two modules with the same address/name are present. The fix is to make the name of the package (if available) part of the string representing module map keys. ## Test plan Added a new test replicating originally encountered problem. All new and all tests must pass commit 227ded1eba2e623fe1587ef4984c9c7794ab431e Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Mon Sep 9 18:37:52 2024 -0400 indexer reader: derive dynamic field info (#19099) ## Description title, with this pr and the corresponding pr in GQL, we can remove df_* from objects table except df_kind. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 53e58066170d2472afd26fe7077fe30c8fc0dff2 Author: Bridgerz Date: Mon Sep 9 14:53:44 2024 -0700 Deepbook Indexer (#19023) ## Description Deepbook indexer ## Test plan Will add Unit tests --------- Co-authored-by: Aslan Tashtanov Co-authored-by: 0xaslan <161349919+0xaslan@users.noreply.github.com> Co-authored-by: longbowlu commit 1fd22cc8a5cfc63e5b26e3f18f280b19c76fb0ed Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Mon Sep 9 12:29:21 2024 -0700 Require name to register global transaction plugins (#19260) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d2577a7cf29785b3134e0595b21a18082b4cb226 Author: Jort Date: Mon Sep 9 12:16:07 2024 -0700 [docs] Include a reference to "The Move Book" in the Write a Move Package page (#19261) ## Description Add a link to the Sui Move book. A link also exists in the move references page, however it is beneficial to have the link in the write a move package page as well. This will help users find the right book for writing move packages for sui and can be a good follow up step to writing a basic move package. ## Test plan Built the docs, checked the link. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6bbb00776f6445c979c219a7de2c1f4f8226d1c3 Author: Eugene Boguslavsky Date: Mon Sep 9 11:23:54 2024 -0700 Add additional release trigger types (#19281) ## Description Add additional release trigger types ## Test plan 👀 commit 4cd523c278bfdbc796b1777c7f54b4a27231a6a7 Author: Brandon Williams Date: Mon Sep 9 11:34:42 2024 -0500 indexer: chunk protocol_config commits Chunk the commits to the protocol_config table in order to account for the fact that we have too many protocol configs to commit to at once now resulting in an `UnableToSendCommand` error otherwise. commit 2f83b1a544f29b029a6b69a03dd318965132aa4b Author: Brandon Williams Date: Mon Sep 9 11:09:16 2024 -0500 chore: remove and ban dependency on libpq commit c6e010f69acafacc1edd29cfe69a577379612dab Author: Brandon Williams Date: Mon Sep 9 11:06:02 2024 -0500 indexer: remove unused macros and blocking connection pool logic commit d51318671e20a17b7610ebc1731f7a39b4993e3d Author: Brandon Williams Date: Mon Sep 9 10:57:44 2024 -0500 indexer: remove blocking pool from PgIndexerStore commit 3adec430f02cdad17b2a114ad2c1775f3046157b Author: Brandon Williams Date: Mon Sep 9 10:36:01 2024 -0500 indexer: use async connection for ingestion_tests commit 6b35b59af829c6db718653f226999e2f98c81064 Author: Brandon Williams Date: Mon Sep 9 10:28:22 2024 -0500 indexer: use async connection for drop_table_partition commit 6424a50a04f44352700310d66a970d23289cc69b Author: Brandon Williams Date: Mon Sep 9 10:16:41 2024 -0500 indexer: use async connection for get_table_partitions commit 0824cf161e4dbec996de896e7dde9bbabd192c43 Author: Brandon Williams Date: Mon Sep 9 10:10:12 2024 -0500 indxer: remove unused execute_in_blocking_worker method commit 4c5429befcd0f73f9f5627805a888ac5b671be5c Author: Brandon Williams Date: Mon Sep 9 10:08:38 2024 -0500 indexer: use async connection for prune_cp_tx_table commit e766e9f7e04b45a930863dac4ad821a59b55c8bc Author: Brandon Williams Date: Mon Sep 9 10:06:10 2024 -0500 indexer: use async connection for prune_tx_indices_table commit b4aaf92e9b5d632bc23e6c63f12a360f97dbc6e1 Author: Brandon Williams Date: Mon Sep 9 10:02:19 2024 -0500 indexer: use async connection for prune_event_indices_table commit efb6181cceed07c5cc7dabff852309fd8eee145f Author: Brandon Williams Date: Mon Sep 9 09:58:18 2024 -0500 indexer: use async connection for prune_epochs_table commit ff1819ea60ffdfb70149755828847a7b6c27607d Author: Brandon Williams Date: Mon Sep 9 09:56:15 2024 -0500 indexer: use async connection for prune_checkpoints_table commit e6d91aa6095dfac18bf367d1c2ad210743a2f8e6 Author: Brandon Williams Date: Mon Sep 9 09:53:26 2024 -0500 indexer: remove unused spawn_blocking_task and spawn_task methods commit 9d21974a91119b8d01853460c63ff2a51d6fc715 Author: Brandon Williams Date: Mon Sep 9 09:50:05 2024 -0500 indexer: use async connection for advance_epoch commit e8e4d81e79c86d20e00f6fc20ad15f0988fdcb62 Author: Brandon Williams Date: Mon Sep 9 09:37:45 2024 -0500 indexer: use async connection for persist_tx_indices_chunk commit 0c38b91f603b0b2d69fb9867e55f66626a7c31b2 Author: Brandon Williams Date: Mon Sep 9 09:29:33 2024 -0500 indexer: use async connection for persist_event_indices_chunk commit 1f55dd456d69f53916d32eae171ca35446ccffd1 Author: Brandon Williams Date: Mon Sep 9 09:13:35 2024 -0500 indexer: use async connection for persist_object_mutation_chunk commit 9094b791840bd8d16463ae45c4a7dbffcbcc7ef2 Author: Brandon Williams Date: Mon Sep 9 09:09:46 2024 -0500 indexer: use async connection for get_protocol_version_index_range commit 823f492217b27f7842ddf40fc3eca467c82c4fa6 Author: Brandon Williams Date: Mon Sep 9 09:07:33 2024 -0500 indexer: use async connection for persist_protocol_configs_and_feature_flags commit d41af062592bdd32ce74d64653fa0c5493af81a2 Author: Brandon Williams Date: Mon Sep 9 10:48:55 2024 -0500 indexer: use async connection for persist_full_objects_history_chunk commit 66f1c2c2d6650e5bf78af0e3a17f4ae8e3541e2f Author: Brandon Williams Date: Mon Sep 9 08:59:52 2024 -0500 indexer: use async connection for persist_checkpoints commit 0e9edcfb31d225f3ce3b3a92d44662c5a5da858e Author: Brandon Williams Date: Fri Sep 6 15:51:33 2024 -0500 indexer: use async connection for persist_epoch commit fb3362c0d595ff315af710a1f0664514e219297a Author: Brandon Williams Date: Fri Sep 6 15:43:53 2024 -0500 indexer: use async connection for persist_packages commit 38fcee941baf687bfb7d43a330f2dd9f2b5cfab5 Author: Brandon Williams Date: Fri Sep 6 15:41:06 2024 -0500 indexer: use async connection for persist_events_chunk commit 103d64a3d14d901d16b14d700a7869bf2fd8719c Author: Brandon Williams Date: Fri Sep 6 15:38:20 2024 -0500 indexer: use async connection for persist_transactions_chunk commit bd74e55fa5307f67a5f61815b383213c9b22d4bb Author: Brandon Williams Date: Fri Sep 6 15:30:47 2024 -0500 indexer: use async connection for persist_objects_history_chunk commit c766f5b69e91e2c4c2c79b76a3e2ec0a28c89f45 Author: Brandon Williams Date: Fri Sep 6 15:25:29 2024 -0500 indexer: use async connection for backfill_objects_snapshot_chunk commit 6ec8b6d02ba01145820aad5f96535d43c4b82a8c Author: Brandon Williams Date: Fri Sep 6 15:21:03 2024 -0500 indexer: use async connection for persist_object_deletion_chunk commit b4de2c5b5457fe184986dbc29b2b7a8e6a16727a Author: Brandon Williams Date: Fri Sep 6 15:15:24 2024 -0500 indexer: use async connection for upload_display commit 6fb2b8fd4276dc36bb7263e2371e2c90c927da5e Author: Brandon Williams Date: Fri Sep 6 15:13:19 2024 -0500 indexer: use async connection for get_network_total_transactions_by_end_of_epoch commit 21225dd36c5c6913985b9e32fbf291cc39a7c789 Author: Brandon Williams Date: Fri Sep 6 15:09:17 2024 -0500 indexer: use async connection for persist_display_updates commit 0f04fe1ae6a2a0787b7b1511ab1c8573db9af7d4 Author: Brandon Williams Date: Fri Sep 6 15:08:12 2024 -0500 indexer: add async transaction_with_retry function Introduce an async version of the transactional_blocking_with_retry macro but in function form for improved readability. commit 66df62832ba488b49385649deac8a2a9a1c63eb6 Author: Brandon Williams Date: Thu Sep 5 17:55:36 2024 -0500 indexer: use async connection for get_transaction_range_for_checkpoint commit b996c58d6ab1a642fb02256695c1869bed9329c2 Author: Brandon Williams Date: Thu Sep 5 17:54:19 2024 -0500 indexer: use async connection for get_checkpoint_range_for_epoch commit 15d326ebafe228e3653a75ac6c537b833cb5eded Author: Brandon Williams Date: Thu Sep 5 17:52:55 2024 -0500 indexer: use async connection for get_min_prunable_checkpoint commit 48ab15bd8583204e2e32dd6c870ebcd0d1bcd585 Author: Brandon Williams Date: Thu Sep 5 17:51:02 2024 -0500 indexer: use async connection for get_latest_object_snapshot_checkpoint_sequence_number commit eea8f719981b00df4091ebcd4263d1268329a249 Author: Brandon Williams Date: Thu Sep 5 17:50:07 2024 -0500 indexer: use async connection for get_available_checkpoint_range commit 24259ef17f7c5c48be7f13136d991867ae329a41 Author: Brandon Williams Date: Thu Sep 5 17:48:46 2024 -0500 indexer: use async connection for get_available_epoch_range commit 7825266798acc4e2d8195450e6335a9654618d6b Author: Brandon Williams Date: Thu Sep 5 17:47:26 2024 -0500 indexer: use async connection for get_chain_identifier commit 3cb32486957fe5b4639b631b1d27bed7a14d8b5e Author: Brandon Williams Date: Thu Sep 5 17:44:45 2024 -0500 indexer: use async connection for get_latest_checkpoint_sequence_number commit b032f03d7f94b4386a2e4b9b4c651c1ca952dc9d Author: Brandon Williams Date: Thu Sep 5 17:39:54 2024 -0500 indexer: remove unused get_latest_epoch_id method commit 9c3f35ef62f0164aab3cf5367a3d6c9d8a43c630 Author: Brandon Williams Date: Thu Sep 5 17:34:22 2024 -0500 indexer: move diesel::QueryDsl trait import into methods Move the diesel::QueryDsl trait import into the methods that need it in preparation for converting each function to use an async connection. commit 749002fbd1b1b801c73f59d73db8e137c0b5b243 Author: Emma Zhong Date: Mon Sep 9 09:47:57 2024 -0700 [indexer] create obj snapshot type indexes for performance (#19274) ## Description I noticed I forgot to merge the creation of these indices into `main` branch so adding it now. These are results of the object query performance investigation from awhile ago. ## Test plan existing tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 915deb4bbfaee4e2e0a7dc712cda2042507bbd35 Author: nikos-kitmeridis <145983019+nikos-kitmeridis@users.noreply.github.com> Date: Mon Sep 9 19:40:21 2024 +0300 [Rosetta] Support coin standard (#19018) ## Description Rosetta support for all coins. ## Test plan Unit and integration tests, CB also did a thorough manual integration test on their end. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: nikos-terzo Co-authored-by: patrick commit f9ac5d5b915ec5e1306581df0673ad398c8b5a39 Author: Brandon Williams Date: Mon Sep 9 11:35:35 2024 -0500 rest: avoid panicking when constructing response headers (#19267) commit db8992f14e27c68aeb3e5d50c8c7a7c54f7c098e Author: Ashok Menon Date: Mon Sep 9 16:32:04 2024 +0100 refactor(graphql): Remove references to df_object_id (#19276) ## Description Remove references to the `df_object_id` column from GraphQL codebase, so that we can eventually remove it altogether. ## Test plan ``` sui$ cargo nextest run -p sui-graphql-rpc sui$ cargo nextest run -p sui-graphql-e2e-tests --features pg_integration ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e6fbc5dba4066ce642c489e20b03298b9f065896 Author: Andrew Schran Date: Mon Sep 9 16:31:45 2024 +0100 Add support for overriding ProtocolConfig fields by env variable (#19253) For use in local networks, or possibly for emergency response. Tested with: `SUI_PROTOCOL_CONFIG_OVERRIDE_ENABLE=1 SUI_PROTOCOL_CONFIG_OVERRIDE_min_checkpoint_interval_ms=1000 RUST_LOG=warn cargo run --bin sui start --no-full-node --force-regenesis` commit 072fcfbe71792906e4c467cf599a50cdc88e6adb Author: Arun Koshy <97870774+arun-koshy@users.noreply.github.com> Date: Sun Sep 8 20:00:00 2024 -0600 [consensus] Switch to certified vote scoring & incrementally score subdags (#18806) After analyzing the results of our Leader Scoring Strategy experimentation. We will be switching from a Vote scoring strategy to a Certified Vote Scoring Strategy. This has shown improvements of about ~90ms for P50 latency with 6K mixed TPS in private-testnet. We can see that this scoring strategy gives us the best distribution of scores, as scores are distributed together across major geographic regions which improves our quorum latency. As part of this change we also moved to an incremental scoring process with a new struct called ScoringSubdag that keeps track of the votes for leaders and relevant stake for eventual reputation score calculation. Also removed the ReputationScoreCalculator & LeaderScoringStrategy components as we are now finalizing on the scoring strategy we will be using. Full experiment results : https://www.notion.so/mystenlabs/Leader-Scoring-Strategy-f11bbbd1055e453f9f0f5490544941ed?pvs=4 ## Testing - [x] unit tests & simtests - [ ] run certified vote v2 & v3 with incremental scoring in private-testnet and finalize on one - [ ] run upgrade test from vote scoring to certified vote scoring commit 00e2d712c8ae1608cac4f61c78c645397c23f49c Author: Eugene Boguslavsky Date: Sun Sep 8 17:28:09 2024 -0700 Fix Code Coverage script (#19271) ## Description Fix Code Coverage script due to https://github.com/MystenLabs/sui/commit/89ef73d1409e1bfdba868026890f87f9cfa8f67c#diff-9a4f3e4537ebb7474452d131b0d969d89a51286f4269aac5ef268e712be17268 ## Test plan 👀 commit 2a06af6216333774118217fab789d4bd8c6bac85 Author: Jordan Gensler Date: Sun Sep 8 12:22:54 2024 -0400 Doc fixes (#19270) commit dd951f88d060cb039ef83fa596e6cfd2f5d6eda2 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Sat Sep 7 17:39:26 2024 -0700 [bridge-indexer] revamp task (#19245) ## Description This PR reworks `Tasks`: 1. get rid of trait `Tasks` and create struct `Tasks` instead. 2. add `is_live_task` field to `Task` 3. pass `Task` to several functions instead of its parameters. 4. for ingestion framework, use a custom batch read size for backfill tasks (this significantly improves the data download speed) ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 2616286c377b61affc9b7bf8ce9f391176f44dbd Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Sat Sep 7 16:13:10 2024 -0700 [bridge-indexer] Integrate progress saving policy (#19244) commit 2e41207ea1b4098f56c11359643dbd1f01c5d628 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Fri Sep 6 23:12:54 2024 -0700 [bridge-indexer] Progress saving policy (#19243) ## Description This PR introduces `ProgressSavingPolicy` to deal with two problems: 1. The current implementation has a bug on Sui side - checkpoints data arrive out-of-order (e.g. checkpoint 10 may be processed earlier than checkpoint 9), so existing `save_process` may cause us to miss blocks. 2. In current implementation we need to write progress to DB for every call to `save_process`. This can be optimized to cache progresses in memory and flush them periodically or conditionally. We add two types of `ProgressSavingPolicy`, `SaveAfterDuration` and `OutOfOrderSaveAfterDuration`: * `SaveAfterDuration` only flushes the progress to DB after a period of time * `OutOfOrderSaveAfterDuration` assumes the data is out of order, and will only write height N when it makes sure everything before N has been received. * ## Test plan unit tests and production deployment. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 62eab6013f8570668557181c6d0a368efc34f168 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Fri Sep 6 22:38:40 2024 -0700 finalize bridge committee on mainnet in protocol version 58 (#19246) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c1384f97e67b805f5a915742892206a65f120b54 Author: Andrew Schran Date: Sat Sep 7 02:31:03 2024 +0100 Add consensus message type for uncertified user transactions (#19264) commit ff93134c48f30cfd3bdf553d45edc2e08ed44ce7 Author: Todd Nowacki Date: Fri Sep 6 16:04:58 2024 -0700 [move-compiler] Small changes to visitors for upcoming improvements (#19255) ## Description - Made all visitors immutable - Added mutable variant for typing, but the lints cant use it - Gave the CFG to the construction of cfgir/absint visitors - helpful for filtering ## Test plan - non-functional --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 53244c1e46c63959b6f9c1685c41622bd709483a Author: Arun Koshy <97870774+arun-koshy@users.noreply.github.com> Date: Fri Sep 6 15:19:12 2024 -0700 [benchmark] Add error types (#19265) Follow up to this [comment](https://github.com/MystenLabs/sui/pull/19235#discussion_r1746372963) commit 81afd44fd360fe6916f9427a20262f3477a65448 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Fri Sep 6 14:43:35 2024 -0700 [bridge-indexer] split out storage component in bridge indexer into its own file (#19238) ## Description This is just moving things around. It makes the next PR smaller. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 9cc02b43b4762ddc2017cdaf4ffd8c9033abeb4c Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Fri Sep 6 14:40:44 2024 -0700 [doc] add validator runbook for bridge (#19257) ## Description Add instructions for validators to register bridge metadata. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f811a09a115ec86f2403834ad508d40ea573d3da Author: Brandon Williams Date: Fri Sep 6 13:53:34 2024 -0500 indexer: remove unused spawn_blocking method from IndexerReader commit 1edcf13f386f0d74c2b217e732ec28f2e8804dbe Author: Brandon Williams Date: Fri Sep 6 13:52:40 2024 -0500 indexer: remove blocking pool from IndexerReader commit 94f5cb2e09cf380964ae1dad5d0b35f61edf6ece Author: Brandon Williams Date: Fri Sep 6 11:28:19 2024 -0500 indexer: refactor migration logic to use an async connection commit 91aaa8877143660861e38c825d16c0b79a305ad9 Author: Xun Li Date: Fri Sep 6 14:01:58 2024 -0700 [Indexer] Add full_objects_history table (#19227) ## Description This is a redo of https://github.com/MystenLabs/sui/pull/18994, but instead of merging to parking branch, merge it to main. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5928a0d08d9ac7f83c5689c6094227bcfa8b5cdd Author: Eugene Boguslavsky Date: Fri Sep 6 12:46:01 2024 -0700 Check for PR number before running release notes check (#19256) ## Description Check for PR number before running release notes check ## Test plan 👀 commit 47ad4681b22b045745cd67c8de4562c05a2f28fd Author: jk jensen Date: Fri Sep 6 11:23:42 2024 -0700 [suiop] add date to message output (#19233) ## Description Add date output to make it clear when an incident was closed. Add the slack channel url to short output. ## Test plan ``` DEBUG=true cargo run -- i r -i --limit 1 Finished `dev` profile [unoptimized + debuginfo] target(s) in 0.54s Running `/Users/jkjensen/mysten/sui/target/debug/suiop i r -i --limit 1` 2024-09-05T20:52:01.898863Z INFO suiop: Debug mode enabled 3511:0d [FIRING:1] disk used % Infrastructure (/dev/md1 mainnet ext4 ord-mnt-rpcbig-03 localhost:9091 node-exporter /opt/sui mainnet pagerduty/nre rpc active) (https://mystenlabs.pagerduty.com/incidents/Q1OS8GCY2HHNEB) > Keep this incident for review? Yes Incidents marked for review: 3511 Here is the message to send in the channel: Hello everyone and happy Thursday! We have selected the following incidents for review: • 3511 09/05/24 [FIRING:1] disk used % Infrastructure (/dev/md1 mainnet ext4 ord-mnt-rpcbig-03 localhost:9091 node-exporter /opt/sui mainnet pagerduty/nre rpc active) and the following incidents have been excluded from review: Please comment in the thread to request an adjustment to the list. > Send this message to the #test-notifications channel? Yes ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c8c0f602a175f9b85fa98c02abdeda44b858768c Author: Brandon Williams Date: Fri Sep 6 13:14:52 2024 -0500 graphql: migrate graphql to use async connections (#19234) commit e22b3656d2ba5fd16ad7034dd79b26e71b4c06d7 Author: Eugene Boguslavsky Date: Fri Sep 6 11:03:01 2024 -0700 Add debug builds back for all platforms (#19250) ## Description Add debug builds back for all platforms ## Test plan https://github.com/MystenLabs/sui/actions/runs/10741384417/job/29791637040 commit bdf306f3582cdbf42694a63454733908458a9a00 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Fri Sep 6 13:49:03 2024 -0400 Version Packages (#19222) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/create-dapp@0.3.19 ### Patch Changes - @mysten/dapp-kit@0.14.19 ## @mysten/dapp-kit@0.14.19 ### Patch Changes - Updated dependencies [22844ae] - @mysten/zksend@0.11.1 ## @mysten/zksend@0.11.1 ### Patch Changes - 22844ae: Add network param to zksend links Co-authored-by: github-actions[bot] commit 13a60fe90abd8d5b3055938deff027f1ecdde719 Author: Brandon Williams Date: Fri Sep 6 11:55:56 2024 -0500 authority_aggregator: always request for events when asked (#19249) Change AuthorityAggregator to always pass through request_events flag, even to the validators we aren't sampling for objects. In addition, up the sample size of validators to 10 and log when we get unlucky and reach quorum without having received input or output objects. commit eabd7736c99cd0c5ad672bcacbd49784f9690c37 Author: John Martin Date: Fri Sep 6 08:59:24 2024 -0700 Push metrics bridge (#19077) ## Description This adds the ability for a bridge node operator to push metrics to an external metrics proxy for aggregation. The implementation is mostly copied from https://github.com/MystenLabs/sui/blob/main/crates/sui-node/src/metrics.rs ## Test plan Tested in testnet --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a58ad2e2b2e1067d1acbc6cd841a87aaf623c16c Author: Ashok Menon Date: Fri Sep 6 16:49:12 2024 +0100 fix(indexer): Store JSON-RPC URL in String (#19248) ## Description `Url` elides ports if they are the default port for the URL scheme, and `jsonrpsee` needs the port to be provided explicitly, and never the twain shall meet. ## Test plan Run a local indexer talking to a mainnet DB and a public good mainnet fullnode. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c2b41689c647a25d95f1e014ada4e12527efda7d Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Fri Sep 6 11:28:30 2024 -0400 indexer: upload display to GCS (#19196) ## Description upload display table in csv to GCS for later restoring from there, as it's now possible to restore from formal snapshot. ## Test plan local run and verify that files are on GCS https://console.cloud.google.com/storage/browser/mysten-mainnet-display-table;tab=objects?forceOnBucketsSortingFiltering=true&hl=en&project=fullnode-snapshot-gcs&prefix=&forceOnObjectsSortingFiltering=false ![Screenshot 2024-09-03 at 12 42 29 PM](https://github.com/user-attachments/assets/d1921af2-688c-4c1b-adef-3117bf07cb03) --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [x] Indexer: need to add a cred before next deployment to enable uploading. - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6f9e7a066ad6ee29cd4860cb9b7b834fef8b762f Author: Ashok Menon Date: Fri Sep 6 15:24:17 2024 +0100 fix(indexer): Leverage index for singleton query (#19247) ## Description Make sure the query to fetch singleton objects in `IndexerReader` can leverage the type index on the `objects` table. ## Test plan ``` # In one session sui$ cargo run --bin sui --features indexer \ -- start --force-renesis --with-indexer # In another session sui$ curl -LX POST "http://localhost:9124" \ --header 'Content-Type: application/json' \ --data-raw '{ "jsonrpc": "2.0", "method": "suix_getCoinMetadata", "id": 1, "params": ["0x2::sui::SUI"] }' | jq -C . { "jsonrpc": "2.0", "result": { "decimals": 9, "name": "Sui", "symbol": "SUI", "description": "", "iconUrl": null, "id": "0x46fe2b6c623e02dd06c74552dc9d6234cb46abb747b1e5936f699a4b2b8df2e8" }, "id": 1 } ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 03d1667cde26e433de27fea99c72da667067e080 Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Fri Sep 6 08:42:53 2024 -0400 [data ingestion] introduce upper limit for ingestion job (#19225) ## Description Introduces a new parameter: an upper limit for data ingestion. Once the framework reaches this checkpoint, it will gracefully shut down --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 072ebd9e397a506f0dfc1b7bf3242dca6e541ae4 Author: William Smith Date: Thu Sep 5 22:20:39 2024 -0400 [core] Force enable state accumulator v2 (#19231) ## Description We have removed the ability to disable state accumulator v2 previously by ignoring the config parameter (https://github.com/MystenLabs/sui/pull/18786), however if a node that had v2 disabled has been running since before this change without shutting down, the old config would still be stored in memory. This PR forces enabling of v2. Once we have reached the epoch change after all nodes are running this code, we can safely merge https://github.com/MystenLabs/sui/pull/18764. ## Test plan Existing tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 26afa0c8c2b7d1e2fac85f764e733354b56bf953 Author: Eugene Boguslavsky Date: Thu Sep 5 19:02:00 2024 -0700 Sui v1.34.0 Version Bump (#19241) ## Description Sui v1.34.0 Version Bump ## Test plan 👀 commit 5fb5da2a125644891de948e8a11d80ef572a7cc3 Author: Arun Koshy <97870774+arun-koshy@users.noreply.github.com> Date: Thu Sep 5 18:35:15 2024 -0700 [benchmark] Count tx as success only if effects are ok (#19235) This is more of an issue now that we have congestion control mechanisms in place where transactions can get cancelled after consensus. We also have not run our benchmark suite routinely to push these more congested scenarios so we have not run into this issue before, commit 6c732bf42ca374e9f1c4fa1aa9b43e79c463de7b Author: Eugene Boguslavsky Date: Thu Sep 5 18:16:51 2024 -0700 Sui v1.33.0 Bytecode Framework Snapshot (#19240) ## Description Sui v1.33.0 Bytecode Framework Snapshot ## Test plan 👀 commit 0c02e0735a8d6ec26f95f2f5d62d0c29f20b59ba Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Thu Sep 5 18:12:53 2024 -0700 [CI] do not ignore failures from Split Cluster Check in PRs (#19218) ## Description Currently when the check runs in PR CI, the failure is ignored and bisect does not run. So the workflow succeeds when it shouldn't. For example, https://github.com/MystenLabs/sui/actions/runs/10641580676/job/29502904956#step:3:2268 ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c86143bbfecc0a4cdec48e5917c358562f83450e Author: Bridgerz Date: Thu Sep 5 17:52:53 2024 -0700 Add storage gap variable to the CommitteeUpgradeable contract (#19228) ## Description In the case the `CommitteeUpgradeable` contract needs to include more storage variables, a storage gap variable is needed to reserve storage slots so the child contract state is not overwritten. ## Test plan Contract upgrade unit tests commit 611056816a1e01f482e329b46ac9b3fd8a28a39c Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Thu Sep 5 17:18:15 2024 -0700 [bridge-indexer] batch processing checkpoints and indexing progress metrics (#19179) ## Description This PR does two things: 1. pull checkpoint batches to processed when there are multiple, rather than one-by-one. This improves the efficiency especially for write_process 2. add metrics to check indexing progress. ## Test plan Deployed in production. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit fa776a03b4d94079aec74e200d275c340e3325fb Author: Adam Welc Date: Thu Sep 5 17:12:42 2024 -0700 [trace-view] Added support for stepping back through the trace (#19215) ## Description This PR adds the ability to step back through the trace. The main limitation is that stepping back into or over (previously executed) function calls is not supported. We will add this support after support for viewing variables is added as it will requires snapshotting variable state (which we do not have at the moment). ## Test plan Tested manually that: - viewer correctly steps back within the function and from inside the callee to the caller - viewer correctly stops stepping back at the beginning of the trace - viewer correctly stops stepping back upon encountering previously executed function call commit 2b3991a841ce655bd790abfcbbdc5d2f27e14448 Author: Xun Li Date: Thu Sep 5 16:28:12 2024 -0700 [indexer] Compatibility check using migration records (#19156) ## Description Previously we check DB compatibility by making sure that we could make select query on all columns to the DB based on the locally known schema. This doesn't cover all cases, for instance, there could be tables in the DB that does not exist in the local schema. This PR changes how we do the compatibility check by fully leveraging the migration records. It checks that the migration records in the DB must fully match with the locally known one. It also moves the code to sui-indexer crate, so that we could do this check on the startup of both indexer and graphql server. This does require that from now on we fully respect the migration scripts, and don't do adhoc modifications on the existing migration. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 89737f7ca52a6be1047a02a3ffb9ca319098017b Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Thu Sep 5 14:39:23 2024 -0700 [indexer] Objects Snapshot Processor continuously reads from stream (#19232) ## Description In ci, we saw that the `objects_snapshot_processor` stalls if there are unprocessed checkpoint data that we cannot commit unless we have a continuous stream of checkpoints from `[start_cp, max_allowed_cp]`. To address this, the processor should continue to read from stream whether `unprocessed` is empty or not. ## Test plan Ran indexer manually to observe that `objects_snapshot_processor` doesn't stall --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 369d44baab028fd3c685fb95ce4a02196e1324eb Author: Todd Nowacki Date: Thu Sep 5 14:24:02 2024 -0700 [move stdlib] Add `fun bitwise_not` and `macro fun max_value` (#19126) ## Description - Added a bitwise not function, `fun bitwise_not` to `u*` modules - Added a maximum value macro, `macro fun max_value` to `u*` modules ## Test plan - New tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [X] CLI: Move integer modules now have a `bitwise_not` function and a `max_value` macro function. - [ ] Rust SDK: - [ ] REST API: commit 7d0ebb9d8039cf4a5d592ebe842f49770117ca37 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Thu Sep 5 17:22:47 2024 -0400 indexer: clean up legacy snapshot codes (#19078) ## Description the legacy method of updating `objects_snapshot` is no longer needed, thus this pr cleans it up ## Test plan ci --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 3b3603cc73a6d9ba28ed9ba3f4f00e0682b509d2 Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Thu Sep 5 17:16:56 2024 -0400 [data ingestion] handle termination in worker pool (#19192) ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 262390b27ca7befe9bd3a537aae6ebef9f569348 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Thu Sep 5 13:41:06 2024 -0700 [rocksdb] add metric for num level 0 files (#19230) ## Description Too many level 0 files can be a trigger to write stall & stop as well ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d275762040247747ef429eda6309ffbef005c28b Author: jk jensen Date: Thu Sep 5 11:45:36 2024 -0700 [suiop] add interactive incident selection (#19186) ## Description Allow interactive incident review selection with suiop ## Test plan Successful e2e test in #test-notifications --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a831de6cd80001dd33e6f7686cb1f29f00dd35b4 Author: Eugene Boguslavsky Date: Thu Sep 5 11:43:11 2024 -0700 Do not build sui in debug mode for macos-arm64 and delete unused software. (#19210) ## Description Do not build sui in debug mode for `macos-arm64` and delete unused software. ``` Filesystem Size Used Avail Capacity Mounted on /dev/disk3s1s1 295Gi 9.6Gi 17Gi 37% / Filesystem Size Used Avail Capacity Mounted on /dev/disk3s1s1 295Gi 9.6Gi 89Gi 10% / ``` ## Test plan https://github.com/MystenLabs/sui/actions/runs/10725603432/job/29743813335 commit 9a3a08502ba0c840b47295a6cc332c93c74343b1 Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Thu Sep 5 10:41:06 2024 -0700 [graphql] Remove usage of legacy objects snapshot processing (#19175) ## Description A separate task is solely responsible for updating `objects_snapshot`, which means the graphql e2e tests, which depend on forcing `objects_snapshot` changes through an update query, also need to obey this. In lieu of the current behavior, we instead configure the snapshot lag per test scenario, and ``` //# advance-clock --duration-ns 1 //# create-checkpoint ``` to get the desired `objects_snapshot` state ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 28feff4c36806869f2d34d86db46536cda107e0c Author: Brandon Williams Date: Thu Sep 5 10:15:44 2024 -0500 indexer: use async connection for SystemPackageTask commit 5a331b4f1f214b9375ce65ce43bed649db68805a Author: Brandon Williams Date: Thu Sep 5 10:04:29 2024 -0500 indexer: use async connection for get_object commit 277479e483fbf23c074d656784db782fd03203a0 Author: Brandon Williams Date: Thu Sep 5 09:57:44 2024 -0500 indexer: remove impl ObjectStore for IndexerReader commit 0348913d6b08660e3027179f6cce4b9d7079f5a1 Author: Brandon Williams Date: Thu Sep 5 09:22:23 2024 -0500 indexer: use async connection for get_object_read commit 8d7bcb2e95d717a06d0f7dfd5b0285ef24ba1645 Author: Brandon Williams Date: Thu Sep 5 09:18:39 2024 -0500 indexer: use async connection for get_owned_objects commit 21a20469be7e5dc2a0008ebaf7b07abd63e0690e Author: Brandon Williams Date: Thu Sep 5 09:15:27 2024 -0500 indexer: use async connection for multi_get_objects commit a0e193ba62eb2f90e44c5beaf877d6a2945e6733 Author: Brandon Williams Date: Thu Sep 5 09:12:27 2024 -0500 indexer: use async connection for query_transaction_blocks commit dc6c4b4569722dde4d4380a6fe3919de6c65ce6d Author: Brandon Williams Date: Thu Sep 5 09:06:39 2024 -0500 indexer: use async connection for query_events commit 47778a2bd281a4aff635f1b51dda3ef49b9dc65e Author: Brandon Williams Date: Thu Sep 5 09:00:21 2024 -0500 indexer: use async connection for get_transaction_events commit 76f45f5c0ea63b5dc0389f54aaeed25fa30fe8e3 Author: Brandon Williams Date: Thu Sep 5 08:56:04 2024 -0500 indexer: use async connection for multi_get_transactions_with_sequence_numbers commit 88ca95177906d5978f48c8aec74732c4cb294a8f Author: Brandon Williams Date: Thu Sep 5 08:51:36 2024 -0500 indexer: use async connection for multi_get_transactions commit c1c0d255baaa8d5959484404d58830d99cdc94cd Author: Brandon Williams Date: Thu Sep 5 08:48:41 2024 -0500 indexer: use async connection for get_dynamic_fields commit 4b2777d78560611cd21fa0b53c4a2a45600bb629 Author: Brandon Williams Date: Thu Sep 5 08:41:20 2024 -0500 indexer: use async connection for get_object_refs commit 3a30a4d5c17e1b5f2bfce425a8f5360fefcf2235 Author: Brandon Williams Date: Wed Sep 4 19:37:51 2024 -0500 indexer: use async connection for get_coin_balances commit beec6c0315a6379232af7d97664a281d78edcb62 Author: Brandon Williams Date: Wed Sep 4 19:35:16 2024 -0500 indexer: use async connection for get_owned_coins commit ba937ca3f87905be3c08af1eb675aab0a08de1be Author: Brandon Williams Date: Wed Sep 4 17:11:30 2024 -0500 indexer: use async connection for get_display_object_by_type commit 58eabc52a3fac08c340f5b8840588724fff7c12c Author: Brandon Williams Date: Wed Sep 4 17:05:18 2024 -0500 indexer: use async connection for get_coin_metadata and get_total_supply commit 4eb829150aeef3deb91f4f85d6e8e87faa61918b Author: Brandon Williams Date: Wed Sep 4 16:21:06 2024 -0500 indexer: use async connection for get_checkpoints commit 7b45e7aeb14002e27f6c42cce9c48419bd2c1dcf Author: Brandon Williams Date: Wed Sep 4 16:18:40 2024 -0500 indexer: use async connection for get_latest_checkpoint commit 373c0ab28ce08ee08aa1c94a3422cfca8371c084 Author: Brandon Williams Date: Wed Sep 4 16:14:09 2024 -0500 indexer: use async connection for get_epochs commit 69563730855e2c92c64a191520b6565bbda2eaf0 Author: Brandon Williams Date: Wed Sep 4 16:07:08 2024 -0500 indexer: remove unused get_consistent_read_range method commit fb1fcb0653b04b2307d57df6b4387d87b06265db Author: Brandon Williams Date: Wed Sep 4 16:03:57 2024 -0500 indexer: use async connection for get_epoch_info commit a95f48deb0bb1ba695dc764b88b6b9638164bc9d Author: Brandon Williams Date: Wed Sep 4 15:39:24 2024 -0500 indexer: use async connection for get_checkpoint commit 07248c9f9b092cadf48b626f69cb93979f7efe19 Author: Brandon Williams Date: Wed Sep 4 15:09:57 2024 -0500 indexer: use async connection for package resolver commit 728accdb857540d1735a85279c2990b700a61c44 Author: Brandon Williams Date: Wed Sep 4 15:09:51 2024 -0500 indexer-writer: instantiate async connection pool commit bd96deea1e0a142b862e96f3c9befb0146ef8670 Author: Brandon Williams Date: Wed Sep 4 13:23:29 2024 -0500 indexer-reader: instantiate async connection pool commit 829d03ad52170dd1d833aeb8369e9c74fd28bc2e Author: Brandon Williams Date: Wed Sep 4 12:36:48 2024 -0500 indexer: perform database reset via async connection commit 0e78656655529c130e6b680e0962ff6204306713 Author: Andrew Schran Date: Thu Sep 5 17:10:00 2024 +0100 Add metric for active time of monitored futures (#19226) commit bb778828e36d53a7d91a27e55109f2f45621badc Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Thu Sep 5 10:43:54 2024 -0400 repair coin index (#19142) temporary PR. This PR introduces a background task to repair the `coin_index` and remove any dangling entries. The PR will be active for one release and will be reverted afterward. The background task works by iterating over a snapshot of the `coin_index`, identifying coins that no longer belong to their respective owners, and populating a list of candidates for removal(some entries might be benign) Once the candidate list is populated, the task makes a second pass over the candidates list. This time it locks the corresponding entries to prevent potential races with concurrent writes. The task then reverifies the eligibility criteria and removes the dangling entries commit 22844ae53bb2f59fdf790fadaa9ad1b7e5a7d94e Author: Jordan Gensler Date: Thu Sep 5 08:36:47 2024 -0400 Update zkSend docs (#19170) commit 2d019963d9bfebfcf09940e623ecafd0b6bcfcb8 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Wed Sep 4 22:24:41 2024 -0700 Revert "Remove RandomnessStateUpdate" from ConsensusTransactionKind (#19217) ## Description It turns out BCS serializes enum purely based on the variant order. The discriminant / tag does not matter. https://github.com/diem/bcs/blob/master/README.md#externally-tagged-enumerations Also revert tagging of the enum since it is no longer useful. ## Test plan SUI_PROTOCOL_CONFIG_CHAIN_OVERRIDE=testnet scripts/compatibility/split-cluster-check.sh origin/testnet a6336e6390b31379c7d41ef0d6fba4f966fad00c --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 1ebf5f8a395ca00f7cdcfa19e988975d8ccc15a3 Author: Ashok Menon Date: Thu Sep 5 01:04:20 2024 +0100 test(graphql): Remove explicit cleanup_resources (#19201) ## Description These were originally added to try and deal with a deadlock issue, which was due to an issue with `inotify` on macOS, fixed by #19195. These calls are safe to remove (also note that if the test failed, they would never be hit, because the assert would cause a panic before they ran). ## Test plan Rebase on top of #19195, run the following on macOS: ``` sui$ cargo nextest run \ -j 1 -p sui-graphql-rpc \ --test e2e_tests \ --features pg_integration ``` This used to intermittently hang, but now succeeds. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e1abe0963b579d69471ce40bd465d63fb80f0f41 Author: Zhe Wu Date: Wed Sep 4 16:31:30 2024 -0700 Add a metric to track the number of managed files per column family in RocksDb (#19214) We don't add more detailed level to control the cardinality of the metrics. Manually tested in local cluster: Screenshot 2024-09-04 at 3 11 22 PM ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit bf303ab96cb7c8baeea1a6a78fdb75e32fb99a6b Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Wed Sep 4 15:59:56 2024 -0400 Version Packages (#19212) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.5.0 ### Minor Changes - c53baf2: Redeploy packages Co-authored-by: github-actions[bot] commit c53baf24d4759ea88f7f5aaf5a11495047ef058b Author: Tony Lee Date: Wed Sep 4 15:48:55 2024 -0400 Testnet update after campaign (#19147) ## Description Package updates with latest changes ## Test plan How did you test the new or updated feature? Testnet ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit bf8a3ae6387ec3e8dc3abb15d4f4f217bf948130 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Wed Sep 4 12:11:00 2024 -0700 [Narwhal] remove Narwhal crypto usages from Sui (#19205) ## Description This is another step in removing Narwhal integration with Sui. Narwhal crypto types used in Sui are replaced with the corresponding Sui types. There should be no difference in logic or serialized formats. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6a0adb5de7f3fb87d3aa23364f200b542e3e8b87 Author: Sadhan Sood <106645797+sadhansood@users.noreply.github.com> Date: Wed Sep 4 11:51:33 2024 -0700 Delete sst files in key range in rocksdb (#19211) ## Description This PR adds a rocksdb endpoint to delete .sst files in key range in rocksdb which is useful to prune data (in certain scenarios) without compaction. commit a22b71b70d6330f33c165198c194e4b716bc953b Author: Andrew Schran Date: Wed Sep 4 19:51:19 2024 +0100 add monitored scopes to CheckpointExecutor (#19209) commit aa9dd4221020dc59d7576d938d8e2954671927ae Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Wed Sep 4 09:11:37 2024 -0400 [data ingestion] disable inotify for macos (#19195) ## Description prevents local tests from hanging ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e7437194b6e634660b9c27e7a86b2eb6f2d34d29 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Tue Sep 3 21:03:41 2024 -0700 [bridge] enable bridge on mainnet (#19200) ## Description enable bridge creation on mainnet by setting bridge is true. Before this change, the value true for testnet and devnet, but not mainnet. So this only applies to mainnet. ## Test plan ### Mainnet * expect 56, 57, 58 all have bridge = true * [old 57](https://github.com/MystenLabs/sui/blob/main/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_57.snap) v.s. [new 58](https://github.com/MystenLabs/sui/blob/bb25a072dac4848337c111d4fefeb0256fa36cd8/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_58.snap): https://www.diffchecker.com/AbcQx7DQ/ * [old 56](https://github.com/MystenLabs/sui/blob/main/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_56.snap) v.s. [new 57](https://github.com/MystenLabs/sui/blob/bb25a072dac4848337c111d4fefeb0256fa36cd8/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_57.snap): https://www.diffchecker.com/E3b9mlHd/, notably random_beacon_reduction_lower_bound: 800 in new 57, which was in old 56 * [old 56](https://github.com/MystenLabs/sui/blob/main/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_56.snap) v.s [new 56](https://github.com/MystenLabs/sui/blob/bb25a072dac4848337c111d4fefeb0256fa36cd8/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_56.snap): https://www.diffchecker.com/EkWIOCeQ/ this reverts [@Andrew Schran](https://mysten-labs.slack.com/team/U03TDESBNR0) ’s change of random_beacon_reduction_lower_bound , now back to 1000. ### Testnet * unlike mainnet, bridge has been set to true months ago, so expect no change for this parameter. * [old 57](https://github.com/MystenLabs/sui/blob/main/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_57.snap) v.s. [new 58](https://github.com/MystenLabs/sui/blob/bb25a072dac4848337c111d4fefeb0256fa36cd8/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_58.snap): https://www.diffchecker.com/bc7A5fVn/, no change * [old 56](https://github.com/MystenLabs/sui/blob/main/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_56.snap) vs [new 57](https://github.com/MystenLabs/sui/blob/bb25a072dac4848337c111d4fefeb0256fa36cd8/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_57.snap): https://www.diffchecker.com/XaktzhpC/ no change. notably random_beacon_reduction_lower_bound: 800 in new 57, which was in old 56 * [old 56 ](https://github.com/MystenLabs/sui/blob/main/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_56.snap)v.s [new 56:](https://github.com/MystenLabs/sui/blob/bb25a072dac4848337c111d4fefeb0256fa36cd8/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_56.snap) https://www.diffchecker.com/SeNewiDN/ this reverts [@Andrew Schran](https://mysten-labs.slack.com/team/U03TDESBNR0) ’s change of random_beacon_reduction_lower_bound , now back to 1000. * effectively version 56 is a no-op for testnet --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4b0d129d0548a7e36dc549c499529087c8999a0a Author: Andrew Schran Date: Wed Sep 4 01:30:52 2024 +0100 Add monitored scope for single threaded checkpoint builder (#19197) commit 76995edaa85ea9f41d403da76954a8e047013056 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Tue Sep 3 17:18:25 2024 -0700 [Docs] update references and metrics post Mysticeti launch (#19198) ## Description Checkpoint rate is now ~4/s post Mysticeti launch. Narwhal and Bullshark are no longer running in Sui and references to them need to be migrated to Mysticeti. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit dfe05bf6411c969327945b41103be49d22b2162d Author: Pika Date: Wed Sep 4 07:45:34 2024 +0800 Update swaps.mdx (#19187) fix typo `coin::zer()` --> `coin::zero()` ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 04e6b0f3e4de536e68fe757d628ab66fc30aa582 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Tue Sep 3 16:40:23 2024 -0700 Version Packages (#19168) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/sui@1.8.0 ### Minor Changes - 569511a: Add data to result of executeTransaction methods on Transaction executor classes ## @mysten/zksend@0.11.0 ### Minor Changes - 4bdef4a: Add support for testnet in Stashed and zkSend SDKs. ### Patch Changes - Updated dependencies [569511a] - @mysten/sui@1.8.0 - @mysten/wallet-standard@0.13.3 ## @mysten/create-dapp@0.3.18 ### Patch Changes - Updated dependencies [569511a] - Updated dependencies [012aefe] - @mysten/sui@1.8.0 - @mysten/dapp-kit@0.14.18 ## @mysten/dapp-kit@0.14.18 ### Patch Changes - 012aefe: Support passing network param through to stashed wallet - Updated dependencies [4bdef4a] - Updated dependencies [569511a] - @mysten/zksend@0.11.0 - @mysten/sui@1.8.0 - @mysten/wallet-standard@0.13.3 ## @mysten/deepbook@0.8.17 ### Patch Changes - Updated dependencies [569511a] - @mysten/sui@1.8.0 ## @mysten/deepbook-v3@0.4.3 ### Patch Changes - Updated dependencies [569511a] - @mysten/sui@1.8.0 ## @mysten/enoki@0.4.1 ### Patch Changes - Updated dependencies [569511a] - @mysten/sui@1.8.0 - @mysten/zklogin@0.7.18 ## @mysten/graphql-transport@0.2.17 ### Patch Changes - Updated dependencies [569511a] - @mysten/sui@1.8.0 ## @mysten/kiosk@0.9.17 ### Patch Changes - Updated dependencies [569511a] - @mysten/sui@1.8.0 ## @mysten/suins-toolkit@0.5.17 ### Patch Changes - Updated dependencies [569511a] - @mysten/sui@1.8.0 ## @mysten/wallet-standard@0.13.3 ### Patch Changes - Updated dependencies [569511a] - @mysten/sui@1.8.0 ## @mysten/zklogin@0.7.18 ### Patch Changes - Updated dependencies [569511a] - @mysten/sui@1.8.0 Co-authored-by: github-actions[bot] commit 569511aceb344ad31dea30084938bf9ccffa5bc9 Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Tue Sep 3 16:20:17 2024 -0700 Add data to result of executeTransaction methods on Transaction execu… (#19202) …tor classes ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f351cba7898f7a18ad9dc5a27d0fbefe6eb7dead Author: Adam Welc Date: Tue Sep 3 16:03:49 2024 -0700 [move-ide] Fixes auto-completion for fields (#19176) ## Description This PR fixes field auto-completion in two ways: - adds support for struct fields (previously only variant fields were being auto-completed) - named fields are now listed in their definition order (which should typically be what a developer wants) - finesses auto-completion formatting (previously all fields where inserted on a single line, and now named fields are inserted on separate lines if there are more than two fields) - adds a no-field auto-completion option (for when a struct or variant is used as a type and not in pack/unpack context) ## Test plan All tests must pass commit e03a8abd7b4a4e3ef22eb405246361349e3cce49 Author: Cam Swords Date: Tue Sep 3 15:58:14 2024 -0700 [move][move-vm] Add more benchmarks, slightly reorganize the code (#18864) ## Description This adds a few more benchmarks to the current VM benchmark suite, plus reorganizes it slightly ## Test plan `cargo bench` in the directory --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8e713e26c9b013955ce19544c345d0626e8fce96 Author: Pankaj Jangid Date: Wed Sep 4 03:51:36 2024 +0530 Fixed incomplete sentence in the docs (#19160) - Follow the instructions here to run your own Sui Full. + Follow the instructions here to run your own Sui Full Node. --------- Included some commit chatter likely due to rebase Co-authored-by: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> commit 32c7828da902476ff344effe8467b7331bfda508 Author: Pika Date: Wed Sep 4 06:20:09 2024 +0800 fix typo in deepbookv3-sdk.mdx doc (#19181) commit fa7419bfcfeffaa816e3232c5d6af5db67dc7ca8 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Tue Sep 3 15:14:36 2024 -0700 Refactor ConsensusTransactionKind (#19177) ## Description A few small refactors: - Remove the deprecated RandomnessStateUpdate variant - Rename UserTransaction to CertifiedTransaction. UserTransaction will be used for the non-certified variant later. - Tag variants so serialization is unaffected when variants are reordered. Using `repr(u8)` should be compatible with the varint serialization of default tag type. ## Test plan CI. Upgrade tests in simulation and PT. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 87ec25b6563bb38c80d8ea378ca824dd4dae9889 Author: Cam Swords Date: Tue Sep 3 14:03:21 2024 -0700 [move][move-2024] Boolean binop optimizations (#18943) ## Description Optimize boolean binop groups to avoid extra locals during HLIR lowering, helping with metering prices. ## Test plan All tests still pass --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 82bf44e58df032e5a8dd5e452c51a1aa6959f15e Author: Andrew Schran Date: Tue Sep 3 19:24:56 2024 +0100 Migrate users of mysten_metrics Histogram to prometheus Histogram (#19124) mysten_metrics variant is difficult to use in Grafana because its precomputation of percentiles makes it impossible to aggregate streams from multiple hosts in a statistically valid way. This keeps a few of the mysten_metrics Histogram versions around to help with the transition of exsiting users. commit dd8e82ccfc6950831a32d9644518e3e0c83ef1e1 Author: Brandon Williams Date: Mon Sep 2 16:34:51 2024 -0500 graphql: convert remaining tests to use ephemeral postgres db Convert remaining graphql tests to use an ephemeral pstrgres db enabling them all to be run in parallel. After this patch the graphql-test workflow takes ~7 minutes compared to ~19 minutes it used to take. commit 12890bd598786d8dd53bc1cd10bb0629e13b5bc8 Author: Brandon Williams Date: Mon Sep 2 16:03:05 2024 -0500 graphql: convert examples-validation_tests to use temporary db commit 4205ea4115d4149b47d9c3cfed61fe839385de56 Author: Brandon Williams Date: Mon Sep 2 15:56:34 2024 -0500 sui-cluster-test: convert tests to use temporary postgres db commit c412c82f07bc7fe6792ae5d0fc22a5acee40b064 Author: Brandon Williams Date: Mon Sep 2 15:03:17 2024 -0500 sui-indexer, sui-graphql-e2e-tests: convert tests to use TempDb commit 347c9da11e0dc0b9a62a7ae4bd2150da5b4fe4e8 Author: Brandon Williams Date: Mon Sep 2 14:59:05 2024 -0500 indexer: introduce TempDb and LocalDatabase Introduce LocalDatabase, a local instance of a postgres server, as well as TempDb, a way to create ephemeral postgres databases for use in testing enviornments. commit f11d10319305621b3a7200166a2db5c181169c76 Author: Brandon Williams Date: Mon Sep 2 14:57:35 2024 -0500 indexer: introduce async postgres connection helpers commit e66bdb5a2e2ec0e935778c76c128a9b881856b86 Author: Ashok Menon Date: Tue Sep 3 18:07:57 2024 +0100 feat(graphql): Remove versioning support (#19191) ## Description Removes support for multiple versions from GraphQL, including: - Version parsing - Routing by version - Associated tests - `ServiceConfig.availableVersions` in schema - `[versions].versions` in TOML ServiceConfig This change also removes the only use of some error APIs which have also been cleaned up. The Service now quotes the same version as `sui-node`, etc, so to prevent churn during tests, it has been mocked to a fixed value. ## Test plan ``` sui-graphql-rpc$ cargo nextest run sui-graphql-e2e-tests$ cargo nextest run --features pg_integration ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Removes support for multiple versions. GraphQL's version now aligns with the `sui-node` version, and it drops support for the `ServiceConfig.availableVersions` query. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit efc47d1ae5b6a9ee2bbf6a535f9478f0dda3808b Author: omahs <73983677+omahs@users.noreply.github.com> Date: Tue Sep 3 18:46:39 2024 +0200 [docs] Fix typos (#19190) [docs] Fix typos commit ceeb4c60687daf7376df8119fb49bc14826f3617 Author: Bridgerz Date: Tue Sep 3 09:40:32 2024 -0700 Reintroduce soldeer dependency management and pin foundry version (#19131) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit fdc8325abfd62013c83fdee06034609ae60f4c7c Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Sat Aug 31 04:05:56 2024 -0700 [GraphQL] Add a mutation payload size (#18017) ## Description The mutation payload can be a lot higher than a query payload, due to the possibility of passing a large transaction data (e.g., publishing a package). This PR adds a different check for when a mutation is requested, and adds a `max_tx_payload_size` variable that holds the max bytes that can be sent through a GraphQL mutation request. The total sum of `txBytes + signatures` of all GraphQL mutation or `txBytes` in a `dryRunTransactionBlock` query have to be below the `max_tx_payload_size`. The `max_tx_payload_size` is computed based on the `protocol_version -> max_tx_bytes` and a Base64 overhead as follows: `max_tx_bytes * 4 / 3` ## Test plan Added several tests. `cd crates/sui-graphql-rpc` `cargo nextest run --features pg_integration -- test_query test_mutation test_dry_run_transaction test_transaction_dry_run` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Added a `max_tx_payload_size` variable to protect against large transaction queries. The sum of `txBytes + signatures` in all GraphQL mutation `executeTransactionBlock` nodes or `txBytes` in `dryRunTransactionBlock` nodes from a query have to be below the `max_tx_payload_size`. The `max_tx_payload_size` is computed based on the `protocol_version -> max_tx_bytes` and a Base64 overhead as follows: `max_tx_bytes * 4 / 3` Added also a check that the overall query size is not larger than `max_tx_payload_size` + `max_query_payload_size`, where `max_query_payload_size` is the `read` part of the query. - [ ] CLI: - [ ] Rust SDK: --------- Co-authored-by: Ashok Menon commit 44ab1f5e7e9f6bccea1ff244c09888fdd19fd02c Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Fri Aug 30 21:52:07 2024 -0700 [bridge-indexer] change live task start point (#19174) ## Description 1. now the live task's starting height is always `get_live_task_starting_checkpoint`. See the comment for more consideration. Previously we used a value in config to determine. 2. add `fn get_live_task_starting_checkpoint` and `fn get_genesis_height` to `DataSource` trait. Therefore each datasource implements their own method to pick these values to determine task ranges, as opposed to we do it on `main.rs` today. 3. clean up `fn build` for `IndexerBuilder` by moving existing parameters to elsewhere. 4. remove unused parameters in indexer config ## Test plan tests and more tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b9dd8837bbb837229cdc9c6e6d1d1e22901c987f Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Fri Aug 30 15:52:35 2024 -0700 [bridge-indexer] fix duplicated task creation (#19171) ## Description In function `tasks()` we return only incomplete tasks. As a result, when the latest backfill tasks are done and ignored, we mistakenly use an intermediate value as the `latest target checkpoint` and use it to fill gaps. This causes duplicated tasks. This PR fixes it: 1. rename `tasks` to `get_ongoing_tasks` for semantics 2. add `get_largest_backfill_task_target_checkpoint` and use that to determine whether there is a gap 3. simplify `update_tasks` for backfill task creation 4. add some utils functions for testing. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c661699d3511facf92e30af288c062d3ba39f210 Author: Brandon Williams Date: Fri Aug 30 16:45:58 2024 -0500 indexer: rework config and cli arguments (#19162) This patch reworks a number of the indexer configs and cli arguments to try and centralize where the configuration for the indexer is loaded. This also cleans up the cli arguments into well defined subcommands instead of using boolean flags for deciding if a jsonrpc or indexer writer service should be started. commit 1f9fbfba5bb728286100a3f9d4d3aa0acb5e9a35 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Fri Aug 30 14:10:02 2024 -0700 [bridge-indexer] some logs and todos (#19154) ## Description as title ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f64c6ffe4aba85549eb77dac55dfe8657cbc3216 Author: Eugene Boguslavsky Date: Fri Aug 30 13:38:30 2024 -0700 Move release notes validator into its own workflow (#19148) ## Description Move the release notes validator into its own workflow ## Test plan ``` eugene@eugene-dev ~/code/sui/ (ebmifa/fix_release_notes) $ ./scripts/release_notes.py check 19148 Found issues with release notes in 19148: - 'Protocol' is checked but has no release note. ``` ![Screenshot 2024-08-30 at 9 07 27 AM](https://github.com/user-attachments/assets/7665bad5-4b36-4faf-9e41-8195dc935156) ``` eugene@eugene-dev ~/code/sui/ (ebmifa/fix_release_notes) $ ./scripts/release_notes.py generate releases/sui-v1.31.0-release releases/sui-v1.32.0-release ## Protocol #### Sui Protocol Version in this release: `55` https://github.com/MystenLabs/sui/pull/19031: Enable Move enums in mainnet ## Indexer https://github.com/MystenLabs/sui/pull/18899: .... ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [x] Protocol: - [x] Nodes (Validators and Full nodes): and here - [x] Indexer: and here - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 012aefe3c5b9b2bae6c33643932a6bff246c3ad7 Author: Jordan Gensler Date: Fri Aug 30 15:46:26 2024 -0400 Stashed wallet dapp-kit (#19169) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 42dbb6253959e1deda9afe4f34c0d717b9d67ccc Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Fri Aug 30 12:14:33 2024 -0700 [Store] disable write stall on fullnodes perpetual db (#19134) ## Description For fullnodes that do not prune the `perpetual` DB, especially `transactions` and `effects` cfs, they can run into write stalls occasionally that makes the fullnode non operational. Since fullnodes have to accept all writes from checkpoints, throttling writes do not seem to make much sense. Write stalling on validators is left enabled. ## Test plan CI Deploy to a few fullnodes. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ddeffaabe0cf0ced3a235912831c62296c73d434 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Fri Aug 30 19:51:41 2024 +0100 Add decode-key command to http_kv_tool (#18990) e.g. `http_kv_tool decode-key -u https://transactions.sui.io/mainnet/cP1pPYHRXroEHhXrsD_uy-kbAcH5lZguUEPfocX0zXsIsR0GAAAAAA/ob` commit 724b54cbb8bcb6046d1ee63ba23823261d594cfb Author: Andrew Schran Date: Fri Aug 30 19:26:03 2024 +0100 reduce minimum random beacon shares to 800 (#19165) commit 346775946303e0e432685a7aebbe4ae108ed5e88 Author: Andrew Schran Date: Fri Aug 30 19:25:21 2024 +0100 Put back HandleConsensusOutput scope removed in PR #19089 (#19166) commit 4bdef4a070b590f366730a8aba77251b9a867331 Author: Jordan Gensler Date: Fri Aug 30 14:13:00 2024 -0400 Add testnet support to stashed (#19167) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 59fe76678b288807f4e54bcd2da13cf777fb0da8 Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Fri Aug 30 10:38:05 2024 -0700 [indexer][graphql] Pruner prunes only epoch-partitioned tables (#19164) ## Description Currently, the pruner assumes that all partitioned tables are partitioned on epoch, which is an issue since `objects_version` is not partitioned by epoch. Modify the pruner so that it will filter out non-epoch-partitioned tables, and otherwise do the same thing. Change `EPOCHS_TO_KEEP` from an env variable to a config, so we can pass in test values through the transactional test runner, and a prune.move test to validate that we prune epoch data without any trouble. ## Test plan prune.move --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 131004734a2bdd8bf626eee8e7c9e2147f69eb89 Author: Cam Swords Date: Thu Aug 29 21:22:43 2024 -0700 [move][move-2024] Add match fix for typing around literal switches, plus tests (#19133) ## Description This addresses a bug where `abort` was causing mistyped literal arm binders in match compilation. It also addresses some false-positive dead code complaints that I discovered while fixing the bug up. Longer-term, it would be nice to eliminate temp binders of the form `#%1: _|_ = unreachable` from HLIR generation so that CFGIR can invariantly ensure none exist, catching these sorts of issues, but due to multiple-binding forms `(x, y, z) = (abort 0, abort 1, abort 2)` and the current structure of the pass, that is work left for another day. ## Test plan Several more tests to cover these cases, though still never enough. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 070a2c38134a4ff4afaca150218289a3c8927603 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Fri Aug 30 01:58:44 2024 +0100 Improvements to thread stall monitor (#19151) Use a watchdog thread for monitoring thread stalls. This allows us to detect a thread stall while it is still occurring, rather than only after the fact. commit 07f8b22d5615ad2bcb355d32b02317e024987da2 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Fri Aug 30 01:39:57 2024 +0100 Caching resolver for Tonic. (#19152) On some platforms (GKE) the local resolver does not seem to cache aggressively, which results in high variance for requests commit 89f3a3a86719c5f7c64352072cc777f6e7e585b8 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Thu Aug 29 17:09:17 2024 -0700 [execution] Update how verifier constants for signing are set (#19094) ## Description Since verifier constants for limits and metering for signing do not need to be protocol versioned, a previous PR moved some of these constants out of the protocol config. This PR takes that one step further by allowing it to be set by the node config instead of being hardcoded into the binary. The default is that these should remain unset in the node config, but this gives the ability to easily change them later on. ## Test plan Make sure existing tests pass. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 63f7e47af0765ca7c6f318bb03e90c2ca6eb399a Author: Adam Welc Date: Thu Aug 29 17:01:54 2024 -0700 [move-ide] A fix to empty variant autocompletion (#19150) ## Description This fixes a discrepancy in auto-completion behavior between when an identifier starts with a lower-case character and upper-case character. Prior to this PR, the in the following code fragment, auto-completion in `foo` would offer `bar` as a suggestion (even though `bar` starts with lower case and `B` is upper case, but that's VSCode not filtering on capital letters): ``` module test::M1 { fun bar() :u64 { 42 } fun foo(): u64 { B } } ``` It would however not offer the same suggestion in the following code fragment: ``` module test::M1 { fun bar() :u64 { 42 } fun foo(): u64 { b } } ``` The reason for it was that IDE annotations related to auto-completion were only inserted in places where (a potentially unresolved) identifier was starting with a capital letter ## Test plan All existing tests (including the modified ones and new ones) must pass commit 04d8f98811e4b225cfecdb16c5ac5ec34fd3842f Author: Adam Welc Date: Thu Aug 29 17:01:39 2024 -0700 [trace-view] Initial version of debug adapter and trace view extension (#19106) ## Description Move VM tracing is currently in development (https://github.com/MystenLabs/sui/pull/18730) and this PR contains the first version of both the Move VM trace viewing VSCode extension and the IDE-independent [Debug Adapter Protocol ](https://microsoft.github.io/debug-adapter-protocol/)(DAP) implementation that is is responsible for actual trace analysis and visualization. Reviewing advice - I would suggest focusing on TypeScript files as the rest is largely configuration and boilerplate to get a TypeScript project (and, by extension, a VSCode project) going. ## Test plan Tested manually by both packaging/installing extension and making sure that the extension itself can be debugged correctly. commit a8e969ad338c6c0f26a66eb893eaf18432f08e3c Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Thu Aug 29 22:59:54 2024 +0100 Add caching for HttpKVStore (#18995) commit 223a6220a2ee90b02d23a8171544fee153ed9c5f Author: pei-mysten <147538877+pei-mysten@users.noreply.github.com> Date: Thu Aug 29 17:48:10 2024 +0100 [Release Notes] Revert support for rebased commits for now (#19149) ## Description as title ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7d4af69c2b5d014cb1acaa48d1e978eab182664d Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Thu Aug 29 11:40:57 2024 -0400 Version Packages (#19145) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.4.2 ### Patch Changes - 339b8eb: Try catch for getOrder function Co-authored-by: github-actions[bot] commit aebab3cf3218a8893417209fecc834c23619e780 Author: Manolis Liolios Date: Thu Aug 29 18:08:21 2024 +0300 [GraphQL][DotMove] Introduces `DotMove` resolution (#18774) ## Description Introduces the logic for querying packages & types by name. Works for both internal & external resolution mode. Important: There's not yet an officially supported package for this functionality on mainnet. There'll be a follow-up PR to address this. ## Test plan There are both unit tests on name parsing, and e2e tests for package resolution. I test package upgrades, and resolution both with "latest", as well as on given fixed versions. ``` cargo nextest run --package sui-graphql-rpc --test dot_move_e2e --features pg_integration ``` ## Stack - #18770 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Introduces `.move` name resolution (internal & external) for GraphQL. Only supported on a non-mainnet environment for the time being. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 339b8eb0f52d178719905b46d4a1166e9533172d Author: Tony Lee Date: Thu Aug 29 10:22:20 2024 -0400 Try Catch in SDK (#19143) ## Description Try catch in SDK ## Test plan How did you test the new or updated feature? Testnet ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit df958202c8e6d3e759a9591341574d79976d1f5c Author: Thouny <92447129+thounyy@users.noreply.github.com> Date: Thu Aug 29 10:35:25 2024 +0200 feat: add to_string to numbers (#19119) ## Description Adds `to_string` to integer modules (`u8`->`u256`) ## Test plan Features tests. --- ## Release notes * unsigned integers now support `.to_string()` methods, for example `10u8.to_string()` is the same as `b"10".to_string()` - [x] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d02aba1d4d83ab2f19ca4dcd5298e403a482caa8 Author: Manolis Liolios Date: Thu Aug 29 00:37:33 2024 +0300 [GraphQL] Fix more tests failing due to not waiting long enough (#19129) ## Description Tweaked some more tests that would randomly fail because of trying to retrieve data too early. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4ac0fe2c0b947ce2b4a979375759e484e6db03d6 Author: Brandon Williams Date: Wed Aug 28 15:43:40 2024 -0500 chore: update anemo to e609f7697e Update anemo to a version where dns resolution can no longer block the async runtime. commit 3a68b7dbf1b39a5f6c722517ff9b76623cc07573 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Wed Aug 28 13:50:56 2024 -0700 [Bench] emit error from shared counter creation failure (#19125) ## Description Otherwise we don't see the error from quorum driver. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 19ba013e7fa57eaf36fa0ab02af13575688eb04b Author: Tom Cat <48447545+tx-tomcat@users.noreply.github.com> Date: Thu Aug 29 03:29:32 2024 +0700 [Linter] Abort and assert should use named constants (#17030) ## Description This lint encourages the use of named constants in abort and assert statements for better code readability. It detects cases where numeric literals are used directly and issues a warning. How it works: When it encounters an abort or assert statement, it examines the argument. If the argument is not a named constant (i.e., it's a literal or some other expression), it issues a warning suggesting the use of a named constant instead. Key Functions: `is_named_constant`: This function is crucial. It determines whether an expression is considered a named constant: It returns true for direct constants. For ExpList, it checks if the second item (index 1) is a constant. This is likely to handle cases where assert might have multiple arguments. `check_named_constant`: This function uses `is_named_constant` to check the expression and issues a warning if needed. ## Test plan Added more use case including true positive, true negative, false positive, false negative case ## Release notes - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: --------- Co-authored-by: Timothy Zakian commit f9f742a320960c970977212f6abd60fc3bd5984e Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Wed Aug 28 12:08:34 2024 -0700 [rocksdb] add metric for pending compaction bytes and memtable flushes (#19112) ## Description Also, increase the interval where the metric is sampled. In comments, it says reading some metrics may impact live serving traffic. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 89bfdbd4fa6cddbb3f6d74e1d0bd9889a72c47c2 Author: Alexey Date: Thu Aug 29 01:22:50 2024 +0700 my_module.move -> example.move (#19095) fixes typos in first-app guide commit 3f03d1c8c76388a61c28ca69121d38df1f43f1ac Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Wed Aug 28 10:22:53 2024 -0700 [GraphQL/Events] Disable filtering on both `event_type` and `emitting_module` (#18740) commit d81dfa292d332c010e5d4cadb9b6968900482d41 Author: Manolis Liolios Date: Wed Aug 28 20:19:58 2024 +0300 [GraphQL] fix health check acting up on tests (#19121) ## Description Waiting a bit longer (until we've indexed checkpoint 1), to give time to the watermark task to pick up the proper timestamp in tests, otherwise the timestamp has the value of 0 causing a 504. https://github.com/MystenLabs/sui/blob/f6dd9a8ef91612f541405d0e2f704b1895abbc37/crates/sui-graphql-rpc/src/server/builder.rs#L658 (`checkpoint_timestamp` being 0 would always cause this to fail as `now` is a system utc timestamp). ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 9f6245d947074a835c577bc2c5b08f80aa9e7ef5 Author: Adam Welc Date: Wed Aug 28 10:04:40 2024 -0700 [move-ide] A fix to empty variant autocompletion (#19111) ## Description This PR fixes autocompletion for empty enum variants. Previously the autocompletion would result in: ``` SomeEnum::EmptyVariant{} ``` After the change, autocompletion will result in: ``` SomeEnum::EmptyVariant ``` ## Test plan All tests must pass (the change in the PR is reflected in modified expected test output) commit f6dd9a8ef91612f541405d0e2f704b1895abbc37 Author: Anastasios Kichidis Date: Wed Aug 28 12:31:02 2024 +0100 [Consensus] consider tx processed either via consensus or checkpoint when submit (#18896) ## Description This PR introduces logic to consider a transaction as processed when one of the following conditions is met: 1. It has been sequenced through consensus. 2. It has been executed via a checkpoint (state sync). Under typical circumstances, transactions are expected to be marked as processed primarily through the consensus path. However, the checkpoint execution path proves beneficial when a node faces difficulties syncing with the consensus DAG but can still follow the state sync. This approach prevents transactions from remaining as inflight and, more importantly, avoids redundant submission through the consensus path, as these transactions are likely to be obsolete. ## Test plan CI/Private Testnet --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 29896ab0dddacd03a61c35ac5bced54ce969669f Author: Manolis Liolios Date: Wed Aug 28 14:28:17 2024 +0300 [GraphQL] Cache chain identifier & use ticks (#18770) ## Description We cache the chain identifier & use ticks instead of sleep() on the loop (to keep our queries consistently on `self.sleep` duration, instead of `self.sleep + query time`, as suggested by @amnn ! ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8b3f0e8ce4628dbfb906343ec1c06b1d01d271fe Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Tue Aug 27 22:14:50 2024 -0700 Revert "Replace forge install with soldeer (#18930)" (#19115) This reverts commit 1ce8f6bb3b90273820d3e78bf6d61e9928df17f0. ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 292031e819838ba66a173e51160e7cbda78af132 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Tue Aug 27 20:17:58 2024 -0700 [bridge] e2e tests optimizations (#18892) ## Description This PR does a few things to make e2e tests run faster: 1. instead of having every node running the client, only having one of them do so. 2. allow custom number of bridge nodes, and reduce the amount of nodes for some tests 3. reduce the epoch length to 1 slot from 3 slots These jointly reduce the duration of the basic e2e transfer test from 100+s to 70s. ## Test plan existing tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d43e7cc5125aee3023c3adb104d54c6b38884841 Author: John Martin Date: Tue Aug 27 18:05:07 2024 -0400 [bridge] add network key + /metrics/network_key rest endpoint (#18877) ## Description adds a ed25519 network key to the `BridgeNodePublicMetadata`. I'm also adding a new api endpoint, `/metrics/network_key` which returns only the network key, this isn't necessary, as `/ping` already returns this info, I'm open to removing it. ## Test plan tested locally: ``` $ curl localhost:9191/ping {"version":"1.31.0-e244466d3d18","network_key_pair":"nxuXbFTndKX4sbtmbyTBtvb0vFRL+ImjDoZifaWok+8="}% $ curl localhost:9191/metrics/network_key "nxuXbFTndKX4sbtmbyTBtvb0vFRL+ImjDoZifaWok+8="% ``` commit 21ee4bfb72995aae2a3a3868fa0ce92448292875 Author: Xun Li Date: Tue Aug 27 13:19:20 2024 -0700 Fix a few cargo clippy (#19110) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e1b9af47985fba780e78c4315aa7a1119de3f687 Author: Brandon Williams Date: Mon Aug 26 15:40:34 2024 -0500 sui-indexer: remove unused mysql code commit 2ff948dd7ac12d3688e63f87a1d9319f3eefa80a Author: Brandon Williams Date: Mon Aug 26 15:26:35 2024 -0500 sui, cluster-test: remove direct dependency on diesel commit f4f70edc16e3e28a9ba6d2cb2b1cbc3e74ec4258 Author: Brandon Williams Date: Mon Aug 26 10:41:29 2024 -0500 sui-bridge-indexer: convert to using diesel-async commit d8222c1d369c9ce18afcfaebef580d37a88bae44 Author: Brandon Williams Date: Mon Aug 26 10:22:31 2024 -0500 suins-indexer: convert to using diesel-async commit d304a378370e64a52f0a52d2c3b01c89050d90ee Author: Brandon Williams Date: Fri Aug 23 15:37:16 2024 -0500 chore: improve how diesel is depended on improve how diesel is depended on by ensuring that direct uses configure the feature set they need. commit 0bffebae576c63de6af21ff8ba07705275b24d94 Author: Eugene Boguslavsky Date: Mon Aug 26 19:20:36 2024 -0700 Trigger on pre-released (#19107) ## Description Trigger on pre-released ## Test plan 👀 commit 8f214551fb1544386857ab85813154d61d851dc3 Author: Cam Swords Date: Mon Aug 26 15:37:57 2024 -0700 [move][lints] warn on uneeded return (#14332) ## Description Check for and report unnecessary returns. ## Test Plan New test case added --- If your changes are not user-facing and not a breaking change, you can skip the following section. Otherwise, please indicate what changed, and then add to the Release Notes section as highlighted during the release process. ### Type of Change (Check all that apply) - [ ] protocol change - [ ] user-visible impact - [ ] breaking change for a client SDKs - [ ] breaking change for FNs (FN binary must upgrade) - [ ] breaking change for validators or node operators (must upgrade binaries) - [ ] breaking change for on-chain data layout - [ ] necessitate either a data wipe or data migration ### Release notes commit 7b7ebdc6990a4d26eebe6dece0f83e1cc2db5f3e Author: Eugene Boguslavsky Date: Mon Aug 26 13:18:50 2024 -0700 Add run name for generate release notes workflow (#19105) ## Description Add run name for generate release notes workflow ## Test plan 👀 commit d7c986ca250f619e0e29ee312b64215eedb567f5 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Mon Aug 26 11:50:03 2024 -0700 [Narwhal] remove NarwhalManager, LazyNarwhalClient, TransactionValidator and a few other integrations (#19089) ## Description Remove a few integration points between Narwhal and Sui. Remove some usages of Narwhal types in Sui. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f3f3014d6858b3d06bed8a3d20d3425a8c8a0d59 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Mon Aug 26 12:49:03 2024 -0400 Version Packages (#19103) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.4.1 ### Patch Changes - 3221141: Book param function and package upgrade Co-authored-by: github-actions[bot] commit 3221141b8fabac05829de2c9d85f6c2a660c0cd3 Author: Tony Lee Date: Mon Aug 26 11:46:36 2024 -0400 Pool Book Param and package upgrade (#19100) ## Description Pool Book Param function and package upgrade ## Test plan How did you test the new or updated feature? Testnet ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 2008edec2089c4967c14dd2973064611bb7b879b Author: Damir Shamanaev Date: Sun Aug 25 17:52:21 2024 +0300 [framework] Improve option macros to support value w/o drop (#19084) commit a6302388bd77a12c167c5c1f3e87e79d8b137bf7 Author: Arun Koshy <97870774+arun-koshy@users.noreply.github.com> Date: Sat Aug 24 09:18:21 2024 -0700 [consensus] Remove Narwhal assets/benchmark/docker/scripts (#19096) commit db183b668b6ed5553f68785a9403263e84feec79 Author: Todd Nowacki Date: Fri Aug 23 15:20:18 2024 -0700 [sui-framework] Check for SUI in private balance functions (#19088) ## Description - `balance::create_staking_rewards` and `balance::destroy_storage_rebates` are private functions used by validators to deal with SUI staking rewards and storage rebates. - This change is nonfunctional, but should make it more clear within Move to understand their usage ## Test plan - Run tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f401919ba05073084c7cf7b67f7352ecdc4b705f Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Fri Aug 23 14:27:46 2024 -0700 [CI] remove narwhal related workflows (#19091) ## Description These workflows will start failing or become no-ops when Narwhal code and integrations are removed. Also, support specifying commits / branches in the main Rust CI, same as that in the Narwhal workflow. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b8264492c95bb44af060ec739cd796836e7d5af2 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Fri Aug 23 14:27:31 2024 -0700 [CI] allow specifying branch / commit in coverage workflow (#19090) ## Description This allows rerunning the workflow with a past commit. ## Test plan https://github.com/MystenLabs/sui/actions/runs/10530504701/job/29180577939#step:3:3 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7b788893df07dcdf02f3d08351fbb3c7c5d662a7 Author: Hipo <0xhipo@gmail.com> Date: Sat Aug 24 04:19:54 2024 +0800 Fix syntax issues in deepbook-v3 doc. (#19082) ## Description Demo code in Deepbook doc is not working properly. Fix some syntax bugs. ## Test plan No test included. Doc fix on code syntax issue. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [√] REST API: commit 89ef73d1409e1bfdba868026890f87f9cfa8f67c Author: Brandon Williams Date: Fri Aug 23 14:31:26 2024 -0500 chore: update rust to 1.80.1 (#19087) commit c724ed0d6dd062e1a0e04e4e33ff7b8f497f5829 Author: Tom Cat <48447545+tx-tomcat@users.noreply.github.com> Date: Sat Aug 24 00:49:12 2024 +0700 [Linter] Meaningless Math Operation (#16626) ## Description The lint identifies operations that have no effect on the result, such as multiplying by 0 or 1, adding or subtracting 0, or shifting by 0. These operations are considered redundant and can be simplified to improve code clarity. Main Logic: For binary operations, it checks the operator and the right-hand side operand. It identifies specific patterns of meaningless operations. Detected Patterns: The lint checks for the following meaningless operations: Multiplication or division by 0 Multiplication by 1 Addition or subtraction of 0 Left or right shift by 0 ## Test plan Added more use case including true positive, true negative, false positive, false negative case ## Release notes - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [X] CLI: Move will now lint against unnecessary math operations in many cases. - [ ] Rust SDK: --------- Co-authored-by: jamedzung Co-authored-by: Todd Nowacki commit 5d6e3ca479c47c21efa8873cc59a89976f0d2d7b Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Fri Aug 23 12:20:22 2024 -0400 Version Packages (#19086) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.4.0 ### Minor Changes - adc704a: trade params and account getters Co-authored-by: github-actions[bot] commit 79cbf5c1813bc9060f9bda675695ca3e06a3beea Author: Allen Xu Date: Fri Aug 23 23:42:40 2024 +0800 Update sui_commands.rs. Fixed variable error in printing logs (#19080) Fixed variable error in printing logs ## Description Fixed variable error in printing logs ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit adc704a080000bb5b76820887ab948f2d2717a71 Author: Tony Lee Date: Fri Aug 23 11:29:16 2024 -0400 Deepbook SDK functions (#19085) ## Description Trade params and account added ## Test plan Tested on testnet --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f42a049cdb1fc34711eab5ea1a427473bab502b9 Author: pei-mysten <147538877+pei-mysten@users.noreply.github.com> Date: Fri Aug 23 08:03:55 2024 +0100 Sui v1.33.0 version bump (#19083) ## Description Sui v1.33.0 version bump commit cc20b13c3207aac775c71609de8d1b16ccbb5833 Author: pei-mysten <147538877+pei-mysten@users.noreply.github.com> Date: Fri Aug 23 06:00:11 2024 +0100 Sui v1.32 snapshot (#19081) ## Description Sui v1.32 snapshot commit 9726619730412beb6f12301eea58836bd4067b5b Author: pei-mysten <147538877+pei-mysten@users.noreply.github.com> Date: Fri Aug 23 05:19:17 2024 +0100 [suiop] fix dup alias issue (#19073) ## Description as title ## Test plan tested locally --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e19a8015fb457e5c7f6b5524bd01fd74a5c0f73a Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Fri Aug 23 01:46:45 2024 +0100 Revert "Migrate users of mysten_metrics Histogram to prometheus Histogram (#19043)" (#19079) This reverts commit ed69f7cce7ac200a849647c5d8efa7113969d44a. Need to revert this as it breaks health checks for services behind LB commit 753e4638290e906ae5a37c214f2589e151e3009a Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Fri Aug 23 01:21:05 2024 +0100 Add ServerTiming headers to fullnode responses (#19063) - Add facility for adding ServerTiming measurements from arbitrary points in the code. - Instrument wait_for_finality and local execution commit c4e4be46815cdef92eda2c138bead8a2d3580b49 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Thu Aug 22 16:48:54 2024 -0700 [bridge] extend add coin e2e test to cover evm side, and a bug fix (#18861) ## Description In this PR we extend e2e test `test_add_new_coins_on_sui` to `test_add_new_coins_on_sui_and_eth`, also testing 1. add new coin on Eth 2. bridge node capture the new token event without restarting Also fixed a bug where bridge node did not listen to move events from treasury.move and limiter.move (caught by the updated e2e tests, apparently) ## Test plan e2e tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 3170d58bcd8eb9706cd02d7ebf1bf78b8c337dba Author: Todd Nowacki Date: Thu Aug 22 15:41:06 2024 -0700 [move] Add large enum test (#19075) ## Description - Fix compiler's sui-mode entry rules for enums - Add large enum tests for entry functions ## Test plan - Added tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit bd0dc2fb9b74ca3cfdc3922bc63db2fb8d6098dc Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Thu Aug 22 16:42:51 2024 -0400 indexer fix: multiple object mutations in one checkpoint (#18991) ## Description per Xun's report https://linear.app/mysten-labs/issue/DP-43/bug-epochendindexingobjectstore-might-contain-multiple-versions-of ## Test plan CI ideally in the long run we want to have an embedded DB and test it on CI with a test, but we lack that today. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit bacc76c3fe2747d278f7eacb5f3564076f900d18 Author: Andrey Chursin Date: Thu Aug 22 18:05:46 2024 +0000 Rename RocksDB metrics (#19067) Some rocksdb metric names have typos commit 22aa24d27ddbeaea0a3fbcfd6a329c80471418ae Author: Todd Nowacki Date: Thu Aug 22 11:04:16 2024 -0700 [move] Rewrite verifier metering (#19036) ## Description - Added ability cache for verifier ability queries - Redid metering for typing, local safety, and reference safety ## Test plan - calibrated constants --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 2969b591b42c46f5bc9a876e6c897b5fd6fa746e Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Thu Aug 22 09:54:45 2024 -0700 [Consensus] panic in tests when transaction exceeds consensus size limit (#19065) ## Description This makes the submission failure more obvious when it is due to exceeding size limit. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ba7e08577ae6d5e51e3c418594a60ed5f09df380 Author: Anastasios Kichidis Date: Thu Aug 22 19:14:47 2024 +0300 [Consensus] enable amnesia recovery & refactor retry approach (#18771) ## Description This PR is enabling amnesia recovery automatically when: * node has not accepted any block yet (nothing in DagState) * consensus authority boot counter is == 0 , which means that consensus has started for the first time while the binary runs. This avoids from running the amnesia recovery during normal epoch change The timeout parameter `sync_last_known_own_block_timeout` is still being used but now it is timing out a whole "iteration" attempt to fetch the last own block. Also, if the `sync_last_known_own_block_timeout` is set to `zero` it is disabling the amnesia recovery mechanism overall which might be desired under some conditions. Also, the synchronizer has been refactored in order to not panic anymore when node doesn't manage to hear back from other peers, but keep trying until it finally does hear back from `f+1` nodes. Liveness is not affected as we should always have `f+1` nodes available before we are able to make any meaningful round advancement. **Note:** The node crash recovery will be successful when both the authority db & consensus db have been wiped out. If only the consensus db is wiped out, although the sync of the last own block will be successful, it's possible to reach the checks in the commit observer here https://github.com/MystenLabs/sui/blob/25d2f3087a3797184d141929a4cf3dfcb244604f/consensus/core/src/commit_observer.rs#L111 as the authority node might have already consumed from previous run committed sub dags. We can relax the rule there but not touching it for now. ## Test plan * **CI** * **private-testnet**: delete the authority & consensus databases from a node , after it has made some progress for a few minutes, and restart the node. Node gets into recovery mode and asks the peers for last proposed block. Node manages to recover successfully and participate to network --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 9141931f11ac715a6ddb60833c573eb0be33b545 Author: Emma Zhong Date: Thu Aug 22 08:46:49 2024 -0700 return 0 for lowest available cp on unpruned fn (#19066) ## Description If the fullnodes is unpruned, then we should return 0 directly for lowest available cp. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f746620d04b7d49be5e2162396fcd3088d029c6a Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Thu Aug 22 05:28:51 2024 -0700 [GraphQL/TransactionBlock] Scan Limits (#18413) ## Description Implement learnings from GraphQL performance benchmarks: - Implement transaction block pagination as a two step process: First fetch the relevant transaction sequence numbers, then fetch their contents. - Every "atomic" filter on transaction blocks is served by a single `tx_` table, with two indices on it, both of which are prepped to perform index-only scans. - The primary index is used to apply the filter directly. - The secondary index applies the filter after limiting results to one sender. - Compound filters are served by joining multiple atomic filters together. - The "scan limit" concept is introduced to limit the amount of work done when dealing with compound filters (see below). ### Scan Limits - If a filter is compound, a scan limit must be provided, and controls how many transactions are considered as candidates when building a page of results. - There is an upperbound on the scan limit, currently 100M, which is enough for over a week of transactions at 100TPS. - When scan limits are enabled, pagination behaviour changes: Pages can be returned with fewer results than the page size (including no results), but still have a previous or next page, because there were no valid candidates in the area scanned but there is more room to scan on either side. - The start and end cursor for the page may no longer point to an element in the results, because they point to the first and last candidate transaction. ## Test plan ``` sui$ cargo build -p sui-indexer sui$ cargo nextest run -p sui-graphql-rpc sui$ cargo nextest run -p sui-graphql-e2e-tests --features pg_integration ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Introduce `scanLimit` for paginating `TransactionBlocks`. Queries that include multiple complex filters (filters on the function called, affected objects, recipient), need to include a scan limit which controls the number of transactions that are looked at as candidates. - [ ] CLI: - [ ] Rust SDK: --------- Co-authored-by: Ashok Menon commit 3182fe63c5ddbc569d574f7543a4051ec744a7d5 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Wed Aug 21 15:21:31 2024 -0700 [move] Update error behavior on serialization boundaries (#19064) ## Description Update the semantics around serialization errors when computing type layouts to follow the underlying layout errors more closely. ## Test plan Manually tested. commit 9a24f66b339196a59e2816889796bf550c848caf Author: Todd Nowacki Date: Wed Aug 21 15:17:00 2024 -0700 [move] Small lint cleanups (#18878) ## Description - Small changes/rewording to lints - Improved capability freezing lint - Fixed VISIT_TYPES ## Test plan - Updated tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b2a362143c5fd5e470a3dc4a3719a31d46ab6e14 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Wed Aug 21 17:02:13 2024 -0400 Version Packages (#19061) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.3.3 ### Patch Changes - ed221a6: Update package address Co-authored-by: github-actions[bot] commit b1976e689cb335c2014693c1aa0b19180dd8da0d Author: Emma Zhong Date: Wed Aug 21 13:34:49 2024 -0700 [indexer] index protocol configs and feature flags (#18450) ## Description This PR adds two tables `protocol_configs` and `feature_flags` that get populated at indexer startup time and every epoch change time if protocol version has changed. ## Test plan Tested locally against devnet. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [x] Indexer: adds two new indexer tables that stores protocol configs and features flags of different versions. - [ ] JSON-RPC: - [x] GraphQL: uses the stored data to query for protocol configs instead of native configs stored in the binary. - [ ] CLI: - [ ] Rust SDK: commit b9d696e1f3986197ea4cc9cefc1e22a0eb5053f1 Author: Patrick Kuo Date: Wed Aug 21 19:25:59 2024 +0100 Indexer builder bug fix (#19058) ## Description this PR fixes a bug in the indexer task creation, where it is trying to create the same task upon restart resulting in duplicate key violation in the database. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 3db05b0e144fb242bb696ab943dc688a5f1c7966 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Wed Aug 21 16:53:58 2024 +0100 Do not truncate logs when container restarts (#18955) commit ed221a6ba9144fa5739f964674701fd773dddbab Author: Tony Lee Date: Wed Aug 21 11:12:35 2024 -0400 SDK constant update (#19060) ## Description Deepbook SDK constant update ## Test plan How did you test the new or updated feature? Testnet ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 1dd888add82f71810f1ef4992092169f19260c09 Author: Christina <156356273+cratiu222@users.noreply.github.com> Date: Wed Aug 21 17:30:40 2024 +0300 Docs fix typo (#19059) Hello I found and fixed several minor typos. Hope it helps. commit 61dc132b7d627304c1cc65dcb4046f3df7c16154 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Tue Aug 20 22:08:06 2024 -0400 indexer:add experimental warning (#18890) commit c6eb1e73a0f2486ad7a7c884aca8138842822d97 Author: Brandon Williams Date: Tue Aug 20 17:55:16 2024 -0500 rest: cleanup commit 2d7a7aae0cc0c37eef631d4dae2f206c08783641 Author: plam-ml <127577476+plam-ml@users.noreply.github.com> Date: Tue Aug 20 14:35:47 2024 -0700 remove hover label (#19053) ## Description https://linear.app/mysten-labs/issue/APPS-246/remove-nft-name-andor-object-id-from-thumbnail-image ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 15a406c583e8ef5eb16c9e5d207664cfc79eb8b6 Author: Brandon Williams Date: Mon Aug 19 09:05:14 2024 -0500 protocol-config: correct doc comment for max_tx_gas parameter commit a2e3e99ae4e49dc688bbd7b1c0e4bad05ba0b189 Author: Brandon Williams Date: Mon Aug 19 08:58:38 2024 -0500 sui-core: remove unnecessary async from a few functions commit 3ab3cef3adc65a272047213c5b005774bad309c8 Author: Brandon Williams Date: Mon Aug 19 08:55:05 2024 -0500 types: add a digest method to TransactionData commit 8c44a8e9c264461317c4d836423734be92332a06 Author: Brandon Williams Date: Mon Aug 19 08:54:00 2024 -0500 types: move TransactionExecutor from sui-rest-api to sui-types Move the TransactionExecutor trait from sui-rest-api to sui-types in order to avoid needing to have sui-core depend on sui-rest-api. commit 6166edcf2ba14f8ddb11ba68e2536f9947625bf2 Author: Brandon Williams Date: Mon Aug 19 08:44:25 2024 -0500 types: add From impls for TransactionExpiration and Command commit d41d7bdb1cdca15587ec85d5fc8c4d3eb54f47c0 Author: Brandon Williams Date: Mon Aug 19 08:43:38 2024 -0500 move-core: add Borrow for Identifier commit d955f7e2ac1e505e17476f01c43aecf233d81fa3 Author: Brandon Williams Date: Fri Aug 16 22:22:31 2024 -0500 rest: introduce a structure error type for client commit b454eb967c8d5e36ef2262809e6aad0b51f214cc Author: Brandon Williams Date: Mon Aug 12 09:07:25 2024 -0500 rest: add 410 responses to checkpoint apis commit b6199ed83d9bcdd01a5ed42c3fb3d114c9f38816 Author: Brandon Williams Date: Thu Aug 1 09:00:13 2024 -0500 rest: add Debug impl for Page and ResponseContent commit 824b6b6cb4d754a447e5e4c1914322c35d52ae0a Author: Brandon Williams Date: Thu Aug 1 08:52:42 2024 -0500 read-store: properly return the highest executed checkpoint Fix the implementation of ReadStore::get_latest_checkpoint for RocksDbStore to properly return the highest executed checkpoint instead of the highest certified checkpoint given this api is explicitly used to signal checkpoint availability. commit fa9f0972cfa89bf16565510243051cfc5d5841ba Author: Brandon Williams Date: Wed Jul 31 16:47:02 2024 -0500 rest: redirect /rest -> /v2 One polish item before the rest service can be stabilized is to have route based api versioning. This is done by hosting the main service off of `/v2` and for compatibility for old clients we redirect from the old `/rest` route to the new `/v2` ones. commit 728a4d0271fa6db3ca90681d6dda326d64351fc1 Author: Brandon Williams Date: Wed Jul 31 16:08:37 2024 -0500 keys: convert noisy message to doc comment commit 7a030228606e14e3a57f8edf75ffbc6694f3e59d Author: Brandon Williams Date: Wed Jul 31 12:46:38 2024 -0500 cluster: remove unused ws data commit d950fa67a5593596e3e447efd97d050b1b552389 Author: Sadhan Sood <106645797+sadhansood@users.noreply.github.com> Date: Tue Aug 20 13:46:50 2024 -0700 Fix empty result set in wallet monitoring service (#18350) ## Description Handle empty result set. Ran locally, works commit 643b5840c9dc14b549555005b657c4579a7a2bda Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Tue Aug 20 20:08:19 2024 +0000 Version Packages (#19052) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/enoki@0.4.0 ### Minor Changes - f589885: Add sdk methods for managing enoki subnames Co-authored-by: github-actions[bot] commit f5898858eedd547dec1484068b41d409085f48d4 Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Tue Aug 20 13:00:10 2024 -0700 [enoki-sdk] Add sdk methods for managing enoki subnames (#18846) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Pavlos Chrysochoidis <10210143+pchrysochoidis@users.noreply.github.com> commit 2cf1eeb35027fd9c1037d26267e6d23ecd9a0e8d Author: Jean-Pierre Smith Date: Tue Aug 20 21:10:01 2024 +0200 feat: allow specifying OTEL service name with OTEL_SERVICE_NAME (#19044) ## Description This changes allows specifying the open-telemetry service name to the `telemetry-subscribers` crate, by setting the `OTEL_SERVICE_NAME` environment variable. This defaults to sui-node, so there is no change in behaviour when unset. As `OTEL_SERVICE_NAME` is not automatically consulted by the otel rust implementation, this uses the environment variable to perform the update in `telemetry-subscribers`. ## Test plan The change is minor, the code compiles, and the environment variable is spelled correctly. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ed69f7cce7ac200a849647c5d8efa7113969d44a Author: Andrew Schran Date: Tue Aug 20 20:00:27 2024 +0100 Migrate users of mysten_metrics Histogram to prometheus Histogram (#19043) mysten_metrics variant is difficult to use in Grafana because its precomputation of percentiles makes it impossible to aggregate streams from multiple hosts in a statistically valid way. commit 91bf75e62884d507254f6a7e209d99bbb9c0dafb Author: Brandon Williams Date: Tue Aug 20 12:27:05 2024 -0500 chore: update rust-sdk commit 2682497286249dd495aa15bd526d18eac6213f44 Author: Ashok Menon Date: Tue Aug 20 18:15:55 2024 +0100 fix(graphql): Docs fixes from #17543 (#19046) ## Description Gathering all the suggested docs fixes across the stack in #17543 into one PR. ## Test plan :eyes: ## Stack - #19047 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit dab9bad6187e0ba342aa6fca4586e6f276583db9 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Tue Aug 20 17:27:15 2024 +0100 [Consensus] move transaction count limit to protocol config (#19042) ## Description 1. move transaction count per block limit to protocol config. 2. reduce the transaction bytes per block limit to 512KB, based on the transaction count limit. 3. verify the limits in `BlockVerifier` ## Test plan Unit test --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 91f8de171a193b6866e097897ffc518870b65c0b Author: Dario Russi <113150618+dariorussi@users.noreply.github.com> Date: Tue Aug 20 18:19:41 2024 +0200 unit test (#18659) ## Description Unit test infra for the bridge. Not sure why `bridge-env.move` is not showing as a rename. Also I run the latest formatter on the test files and that may add to the change. In any case @patrickkuo @Bridgerz we will talk about this given that is the easiest way to understand what is going on. Overall it is a purely additive change. ## Test plan This is it. Old tests run and new test increase coverage (~90%). We need to write few tests to cover for some assertion and that is coming How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit c68131bc41c3d88a950caa1228693a63ee51d29e Author: Ashok Menon Date: Tue Aug 20 16:41:36 2024 +0100 fix(jsonrpc): Flaky fullnode JSON-RPC test (#19047) `transaction_tests::test_get_fullnode_transaction` included a flaky assertion that if you fetch 5 transactions, then the rest of the transactions, and then fetched the latest 10 transactions, the latter would match up with the suffix of the former two queries concatenated together. This does not work in general (at least not with this test set-up), because of consensus commit transactions. I have updated the test to check a similar property: That if you fetch 5 transactions, then the rest, and then fetch the first 10 transactions, then the prefixes match (this tests that transaction order is stable). There are other existing tests for what happens when you query transactions in descending order. This seed previously exhibited the failure, and now it succeeds. ``` sui-json-rpc-tests$ MSIM_TEST_SEED=968774516445189525 cargo simtest -- test_get_fullnode_transaction ``` commit bd887c6bbb4666d36ac0833d09552ad31d2d1f6d Author: Godwin JIbs <126525197+Jibz-Mysten@users.noreply.github.com> Date: Tue Aug 20 10:42:48 2024 -0400 Add expiry time to cached SuiNS name validation (#19048) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7802581c5ace6150c5bb59e501854dc9cb2b423a Author: Ashok Menon Date: Mon Aug 19 11:53:22 2024 +0100 [sui-tool] dump-packages uses GraphQL (#18337) ## Description Replace the original implementation of the dump-packages command (which requires access to an indexer database) with an implementation that reads from a GraphQL service. The former is not readily accessible, but the latter should be. The new tool is also able to run incrementally: Fetching only packages created before a certain checkpoint, or pick up where a previous invocation took off to fetch new packages that were introduced since. ## Test plan Ran a test invocation, on our experimental read replica. With a max page size of 200, I was able to fetch 17000 packages (all the packages at the time the read replica was created) in 3 minutes. ## Stack - #17543 - #17692 - #17693 - #17696 - #18287 - #18288 - #18336 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit d068f8f21301767915cd2a2e0fc3598ced0ea63f Author: Ashok Menon Date: Mon Aug 19 11:52:52 2024 +0100 [GraphQL] `generate-config` sub-command (#18336) ## Description Add a command for generating a config TOML file for the GraphQL service with all its parameters set to their default values. (We used to have a similar command for the YAML file which we weren't using, but we still use the TOML file). ## Test plan ``` cargo run --bin sui-graphql-rpc -- generate-config /tmp/config.toml ``` ## Stack - #17543 - #17692 - #17693 - #17696 - #18287 - #18288 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: New sub-command for `sui-graphql-rpc`, `generate-config` for creating a TOML config with all default values set. - [ ] CLI: - [ ] Rust SDK: commit b3d3d7ad680fa993c03ec959046632ec0a32500e Author: Ashok Menon Date: Mon Aug 19 11:41:40 2024 +0100 [chore][GraphQL] Declutter schemas (#18288) ## Description Remove `draft_target_schema.graphql` and promote `current_progress_schema.graphql` to be the canonical schema for the service -- move it to the top-level of the `sui-graphql-rpc` crate to make it easier to find. This is to avoid confusion about source of truth for the GraphQL schema. Because the TS SDK references the schema at multiple GraphQL versions, we will need to cherry-pick this change to release branches when it lands. ## Test plan CI ## Stack - #17543 - #17692 - #17693 - #17696 - #18287 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: The schema file has been moved from `crates/sui-graphql-rpc/schemas/current_progress_schema.graphql` to `crates/sui-graphql-rpc/schema.graphql`. - [ ] CLI: - [ ] Rust SDK: commit 4110e6eaae9fcd26d5284b319b2f88dda9aa4984 Author: Ashok Menon Date: Mon Aug 19 11:40:43 2024 +0100 [chore][GraphQL] Clean-up unused commands (#18287) ## Description Remove commands to generate examples, markdown and schema from the main binary as we do not use them: - Instead of generating examples, we have hand-crafted examples in our docs. Removing this code also removes a test that forces regeneration of a markdown file from these docs (which we also were not using). - We also never used the output from the `generate-schema` sub-command, because the schema was always available as a file, or via introspection commands from the running service. - Logic for gathering examples to test has been moved into the test file, to avoid including test-only code in the main library. ## Test plan CI ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? ## Stack - #17543 - #17692 - #17693 - #17696 - #17697 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: The GraphQL binary no longer supports generating examples, or exporting its own schema as these commands have been unused for some time. - [ ] CLI: - [ ] Rust SDK: commit c90c28f174288ffec3eaf44e021b2cceddd6a320 Author: Ashok Menon Date: Fri May 10 18:29:57 2024 +0100 [GraphQL/MovePackage] Paginate by version (#17697) ## Description Introduce two new queries: `Query.packageVersions` and `MovePackage.versions` for iterating over all the different versions of a given package. This kind of query is useful for understanding package history. These were introduced as a separate query, instead of having a single query for iterating over packages that could optionally take a checkpoint bounds or version bounds because of how system packages interact with the `packages` table: Because system packages are updated in-place, they only have one row in the `packages` table. This makes sense for paginating packages in bulk (e.g. by checkpoint) where the primary aim is to get a snapshot of the packages available at a certain point in time, but doesn't work for answering package version queries for system packages, and it prevents us from creating a combined query. A combined query would also allow someone to create a filter that bounds checkpoints and versions, but doesn't bound the package itself (or would require us to prevent that combination), which is complicated to implement efficiently and not particularly useful. ## Test plan New E2E tests: ``` sui$ cargo nextest run -p sui-graphql-e2e-tests \ --features pg_integration \ -- packages/versioning ``` & Testing against a read replica to make sure system package tests work well, and performance is reasonable. ## Stack - #17686 - #17687 - #17688 - #17689 - #17691 - #17694 - #17695 - #17542 - #17690 - #17543 - #17692 - #17693 - #17696 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Introduces `Query.packageVersions` and `MovePackage.versions` for paginating over the versions of a particular package. - [ ] CLI: - [ ] Rust SDK: commit 01bbabe976d61f016d33dcd2375679f3958793eb Author: Ashok Menon Date: Mon Aug 19 11:40:25 2024 +0100 [GraphQL/MovePackage] Paginate by checkpoint (#17696) ## Description Adds a query, `Query.packages` for fetching all packages that were introduced within a given checkpoint range. Useful for fetching package contents in bulk, to do local analyses. ## Test plan New E2E tests: ``` sui$ cargo nextest run -p sui-graphql-e2e-tests \ --features pg_integration \ -- packages/versioning ``` Also tested for performance against a large read replica (the query planner quotes a high estimate for the query but the actual results do not take very long to run because queries on many sub-partitions are eliminated). ## Stack - #17686 - #17687 - #17688 - #17689 - #17691 - #17694 - #17695 - #17542 - #17726 - #17543 - #17692 - #17693 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Introduces `Query.packages` for paginating through all packages (optionally bounding by the checkpoint the package was introduced in). - [ ] CLI: - [ ] Rust SDK: commit e22cd030099cfd86a57e3015b58775558161ed77 Author: Ashok Menon Date: Mon Aug 19 11:38:54 2024 +0100 [GraphQL/MovePackage] Query for latest version (#17693) ## Description Add a new kind of package point look-up to get the latest version of the package at a given ID (or from another `MovePackage`). For system packages, this is analogous to getting the latest version of the object at that ID, but the versions of other packages all exist at different IDs. ## Test plan New transactional tests: ``` sui$ cargo nextest run -p sui-graphql-e2e-tests \ --features pg_integration \ -- packages/versioning ``` ## Stack - #17686 - #17687 - #17688 - #17689 - #17691 - #17694 - #17695 - #17542 - #17726 - #17543 - #17692 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Add `Query.latestPackage` and `MovePackage.latest` for fetching the latest version of a package. - [ ] CLI: - [ ] Rust SDK: commit fd0fe51e5c24f84da6944ee63d13fb579aca2f33 Author: Ashok Menon Date: Mon Aug 19 11:35:30 2024 +0100 [GraphQL/MovePackage] Query by ID and version (#17692) ## Description Implement `Query.package` and `MovePackage.atVersion` to query a package at a specific version, using the new fields added to the `packages` table, exposed via some new data loaders. ## Test plan New transactional tests: ``` sui$ cargo nextest run -p sui-graphql-e2e-tests \ --features pg_integration \ -- packages/versioning ``` ## Stack - #17686 - #17687 - #17688 - #17689 - #17691 - #17694 - #17695 - #17542 - #17726 - #17543 - #17692 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Introduce `Query.package` and `MovePackage.atVersion` to query packages at specific versions. - [ ] CLI: - [ ] Rust SDK: commit 56a05c87bf795bee57039ec5979c45465ace44dd Author: Ashok Menon Date: Mon May 6 00:57:52 2024 +0100 [GraphQL] Leverage `objects_version` table. (#17543) ## Description Use the `objects_version` table to speed up point look-ups (via data loaders) for historical objects (ID + version), and dynamic fields (object look-up bounding version by parent ID). With this change, the restriction of accessing dynamic fields only within the available range is dropped. ## Test plan ``` sui$ cargo nextest run -p sui-graphql-rpc sui$ cargo nextest run -p sui-graphql-e2e-tests --features pg_integration. ``` Perform a query that involves fetching a large number of dynamic fields, which should now be fast. The following example, fetching dynamic fields on a deepbook pool loads 50 dynamic fields in about 5s from cold (which also requires loading packages for resolution), and then 2s from there: ``` query { owner( address: "0x029170bfa0a1677054263424fe4f9960c7cf05d359f6241333994c8830772bdb" ) { dynamicFields { pageInfo { hasNextPage endCursor } nodes { name { type { repr } json } value { ... on MoveValue { type { repr } json } ... on MoveObject { contents { json type { repr } } } } } } } } ``` ## Stack - #17686 - #17687 - #17688 - #17689 - #17691 - #17694 - #17695 - #17542 - #17726 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Dynamic fields can now be looked up on any historical object (not just objects in the available range). - [ ] CLI: - [ ] Rust SDK: commit c26c1217777fe2d16283b5d504bf9966711c79c4 Author: Xun Li Date: Mon Aug 19 22:08:47 2024 -0700 [LocalExecution] Remove local execution loop from FN (#19032) ## Description We are about to deprecate local execution. This PR gets rid of the loop that subscribes effects from quorum driver and execute them locally. This was an optimization and it should be safe to remove at this point. Removing this also allows us to see clear metrics on the number of local execution requests received from the network. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 101221975ac5e3545bec33f02cf9c0a5d5d70668 Author: Eugene Boguslavsky Date: Mon Aug 19 17:05:56 2024 -0700 Release Notes Generator (#19039) ## Description Instead of running`./scripts/release_notes.py generate` locally, we should be doing this via a workflow and also be able to create a new release within the same workflow. ## Test plan https://github.com/MystenLabs/sui/actions/runs/10462421870/job/28972625617 Screenshot 2024-08-19 at 4 31 48 PM commit 66f6d484935110fdf420b4e8b7594a0ec7d7f886 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Mon Aug 19 15:53:24 2024 -0700 [replay] Fix issue when replaying transactions taht query existence of dynamic fields before creating it (#19041) ## Description This fixes a bug in the replay tool where if the transaction checked for the existence of the dynamic field before creating it, we would fail to be able to replay the transaction locally since the replay tool was not properly handling the underlying error. ## Test plan Tested manually on a failing transaction. commit 43c144565114215ea815b6f8626420a6d5624fae Author: Andrey Chursin Date: Mon Aug 19 21:21:05 2024 +0000 Measure RocksDB batch put bytes by cf (#19038) We measure number of bytes by cf for `put`, and measure total written in the batch by DB, but do not have breakdown for batch written bytes by cf. This metrics adds the breakdown by cf for batch writes commit 3799dcf5f8e79179e145dfaf211336b7fa6a8b25 Author: Eugene Boguslavsky Date: Mon Aug 19 13:44:05 2024 -0700 Create generate release workflow (#19037) ## Description Create generate release workflow ## Test plan Will test it after it lands commit a18f77bc932508302856ac2758b6322c6b7aac4e Author: Ashok Menon Date: Mon Aug 19 21:20:38 2024 +0100 revert(18170): patch for fetching latest dynamic field state for zkloginverifysignature (#19017) ## Description This reverts commit de8ae3c38c01011ade7e8bc0de70ef7076514ed4. This change was introduced in the absence of optimisations that are going out as part of the next GraphQL release, so reverting so that we get the fully correct behaviour again (consistent lookup). ## Test plan Existing zkLoginVerifySignature E2E tests. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 746ba272f8f9848c2719010f628fb7238f3f861f Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Mon Aug 19 12:49:59 2024 -0700 Version Packages (#19035) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/sui@1.7.0 ### Minor Changes - 143cd9d: Add new tx.object methods for defining inputs for well known object ids: - `tx.object.system()`: `0x5` - `tx.object.clock()`: `0x6` - `tx.object.random()`: `0x8` - `tx.object.denyList()`: `0x403` - 4019dd7: Add default budget to transactions executed through the SerialTransactionExecutor class - 4019dd7: Add options argument to executeTransaction methods on transaction executor classes - 00a974d: Add global registry for transaction plugins ### Patch Changes - 4357ac6: Add options argument to verifyTransactionSignature ## @mysten/create-dapp@0.3.17 ### Patch Changes - Updated dependencies [143cd9d] - Updated dependencies [4357ac6] - Updated dependencies [4019dd7] - Updated dependencies [4019dd7] - Updated dependencies [00a974d] - @mysten/sui@1.7.0 - @mysten/dapp-kit@0.14.17 ## @mysten/dapp-kit@0.14.17 ### Patch Changes - Updated dependencies [143cd9d] - Updated dependencies [4357ac6] - Updated dependencies [4019dd7] - Updated dependencies [4019dd7] - Updated dependencies [00a974d] - @mysten/sui@1.7.0 - @mysten/wallet-standard@0.13.2 - @mysten/zksend@0.10.6 ## @mysten/deepbook@0.8.16 ### Patch Changes - Updated dependencies [143cd9d] - Updated dependencies [4357ac6] - Updated dependencies [4019dd7] - Updated dependencies [4019dd7] - Updated dependencies [00a974d] - @mysten/sui@1.7.0 ## @mysten/deepbook-v3@0.3.2 ### Patch Changes - Updated dependencies [143cd9d] - Updated dependencies [4357ac6] - Updated dependencies [4019dd7] - Updated dependencies [4019dd7] - Updated dependencies [00a974d] - @mysten/sui@1.7.0 ## @mysten/enoki@0.3.17 ### Patch Changes - Updated dependencies [143cd9d] - Updated dependencies [4357ac6] - Updated dependencies [4019dd7] - Updated dependencies [4019dd7] - Updated dependencies [00a974d] - @mysten/sui@1.7.0 - @mysten/zklogin@0.7.17 ## @mysten/graphql-transport@0.2.16 ### Patch Changes - Updated dependencies [143cd9d] - Updated dependencies [4357ac6] - Updated dependencies [4019dd7] - Updated dependencies [4019dd7] - Updated dependencies [00a974d] - @mysten/sui@1.7.0 ## @mysten/kiosk@0.9.16 ### Patch Changes - Updated dependencies [143cd9d] - Updated dependencies [4357ac6] - Updated dependencies [4019dd7] - Updated dependencies [4019dd7] - Updated dependencies [00a974d] - @mysten/sui@1.7.0 ## @mysten/suins-toolkit@0.5.16 ### Patch Changes - Updated dependencies [143cd9d] - Updated dependencies [4357ac6] - Updated dependencies [4019dd7] - Updated dependencies [4019dd7] - Updated dependencies [00a974d] - @mysten/sui@1.7.0 ## @mysten/wallet-standard@0.13.2 ### Patch Changes - Updated dependencies [143cd9d] - Updated dependencies [4357ac6] - Updated dependencies [4019dd7] - Updated dependencies [4019dd7] - Updated dependencies [00a974d] - @mysten/sui@1.7.0 ## @mysten/zklogin@0.7.17 ### Patch Changes - Updated dependencies [143cd9d] - Updated dependencies [4357ac6] - Updated dependencies [4019dd7] - Updated dependencies [4019dd7] - Updated dependencies [00a974d] - @mysten/sui@1.7.0 ## @mysten/zksend@0.10.6 ### Patch Changes - Updated dependencies [143cd9d] - Updated dependencies [4357ac6] - Updated dependencies [4019dd7] - Updated dependencies [4019dd7] - Updated dependencies [00a974d] - @mysten/sui@1.7.0 - @mysten/wallet-standard@0.13.2 Co-authored-by: github-actions[bot] commit 00a974d8cab189c17566d7edb98efd518360cd0c Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Mon Aug 19 12:25:40 2024 -0700 [ts sdk] Add global registry for transaction plugins (#18928) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 143cd9dc29896c1e042c4b5bc2c176fa306c5552 Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Mon Aug 19 12:04:28 2024 -0700 [ts sdk] Add new tx.object methods (#18940) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4019dd790d81db0abd2dea6de524a745b9397501 Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Mon Aug 19 12:00:44 2024 -0700 [sdk] Add default budget to serial transaction executor (#19013) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4357ac6eda8b6a61d6d401f8768ed77119c3a8c1 Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Mon Aug 19 11:19:08 2024 -0700 Add docs on zklogin signature verification and using waitForTransaction (#18982) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit cc6540f867b2ad6899e787a4de4c76f36298024a Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Mon Aug 19 11:16:43 2024 -0700 [sdk e2e] fix some more flaky e2e tests (#19022) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 34628d30ced770114e9fc7c4941c5a4320ea3cb8 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Mon Aug 19 13:18:58 2024 -0400 Version Packages (#19033) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.3.1 ### Patch Changes - d70e8ff: Upgrade Package Co-authored-by: github-actions[bot] commit d70e8ff969fce28f5615b3fa09a819b3f2303671 Author: Tony Lee Date: Mon Aug 19 12:49:17 2024 -0400 Upgrade Deepbook SDK Constant (#19027) ## Description Upgrade Deepbook SDK Constant ## Test plan How did you test the new or updated feature? Testnet ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f82f095956a14110a46d49510d83e1b8aded9192 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Mon Aug 19 09:24:19 2024 -0700 [move] Enable enums in mainnet (#19031) ## Description Turns on enums in mainnet. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [X] Protocol: Enable Move enums in mainnet - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 226cbc34143a9340fc13a4caa53a6b1e64e92cf5 Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Mon Aug 19 10:06:29 2024 -0600 [docs] More precise kiosk language (#18997) ## Description Attempts to clear up what is meant by bullet point under **Benefits**. Applies more consistent capitalization. Kiosk is capitalized when specifically referring to the standard or object, concept is lower case. Other minor tweaks. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8a8c6b14d2124832b99ef607b0e5de0a1db90de1 Author: George Danezis <4999882+gdanezis@users.noreply.github.com> Date: Mon Aug 19 16:59:32 2024 +0100 [light client] Fix light client doc spelling (#19030) ## Description Fixed documentation spelling and grammar ## Test plan Proof read. commit 98c8291018669273995fd5fe1b2ec77965fa330e Author: George Danezis <4999882+gdanezis@users.noreply.github.com> Date: Mon Aug 19 15:32:29 2024 +0100 [light-client] Add standalone verification function (#18971) ## Description Add a standalone light client verification function. It takes a structure that contains some targets to verify and a proof material, and returns Ok() if they verify or an error otherwise. It takes dependencies only on Sui types for the moment. ## Test plan We have new unit tests. commit f8f19a3bc19a5e7191c80e7955b7509bda405e15 Author: Adam Welc Date: Sat Aug 17 10:39:37 2024 -0700 [move-ide] Refactored parsing symbolicator into parsing analysis (#18942) ## Description What the title says - the idea was to do a similar thing we did before for typing symbolicator ## Test plan All existing tests must pass commit 75c5e109555a34618f32265f3fe2edb040abc15a Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Fri Aug 16 16:22:41 2024 -0400 Version Packages (#19015) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.3.0 ### Minor Changes - 36f1c6f: Rounding for numbers, exports update - c51f186: New contract constants Co-authored-by: github-actions[bot] commit 8574a5bd9e666bd9f60f9bc25763460efe0f97c5 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Fri Aug 16 13:52:32 2024 -0400 indexer: DB reset to handle incomplete migration history (#19019) ## Description previous fix did not work well when migration run history in __diesel_schema_migrations table is not complete. I also considered dropping the database as we usually do locally, but a connection cannot drop the database in its own connection and ended up having errors of ``` cannot drop the currently open database ``` thus this pr drops all tables, functions and procedures explicitly for PG and tables for MySQL as we now only have tables in MySQL. ## Test plan 1. diesel reset on latest main 2. cherry-pick this pr to 1.31 and 1.30, run cmd below and make sure that DB are indeed reset ``` DB_POOL_SIZE=10 cargo run --bin sui-indexer -- --db-url =DB_URL --reset-db ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 36f1c6f6b33e70a80322c3458c5425e834b29b85 Author: Tony Lee Date: Fri Aug 16 13:17:49 2024 -0400 Export Update (Deepbook SDK) (#19016) ## Description Deebook SDK update, floating point rounding and exports ## Test plan How did you test the new or updated feature? Testnet ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 3316ca4eb1bcd2970da32502a677a5f41e26274f Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Fri Aug 16 09:54:47 2024 -0600 [docs] Limit information for object model (#19010) ## Description In its original form, the Object Model document pointed to a line of code where the ProtocolConfig struct was. Weird, but the code was added to since the link was made so it no longer pointed to the intended spot. Worse, the link never showed what the actual limits were, just the param name and type. These edits fix the code copy plugin to locate Rust structs so the current code can be used in the document instead of creating another soon-to-be outdated link. Also, adds a component to get the protocol configs for each network so the values are available from docs. The Object Model content was also updated to accommodate these features. Result is bottom of this page: https://sui-docs-git-fork-ronny-mysten-docs-87-sui-foundation.vercel.app/concepts/object-model ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c51f186ad5b377d4e69ad43bf66c79bfe682d81b Author: Tony Lee Date: Fri Aug 16 09:10:45 2024 -0400 Deepbook SDK Constants Update (#19007) ## Description Deepbook SDK Constants Update ## Test plan Testnet --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 396ecf1565bf76dce046d6b00d3c0ba0221e0210 Author: Ashok Menon Date: Fri Aug 16 10:37:19 2024 +0100 feat(rust-sdk): Simulate WaitForLocalExecution (#18996) ## Description If transaction execution requires waiting for local execution, then simulate it by polling, to account for the fact that fullnodes will soon start to ignore this parameter. ## Test plan Run the programmable transaction SDK example: ``` sui-sdk$ cargo run --example programmable_transactions_api ``` Run the tic-tac-toe E2E example: ``` examples/tic-tac-toe/cli$ env $(cat testnet.env) \ cargo run -- new $(sui client active-address) | | ---+---+--- | | ---+---+--- | | -> X:
O:
GAME: examples/tic-tac-toe/cli$ env $(cat testnet.env) \ cargo run -- move -r 0 -c 0 $GAME ... ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [X] Rust SDK: Adds support for simulating `WaitForLocalExecution` in the client, using polling, as the flag will be ignored by fullnodes shortly. - [ ] REST API: commit 9a09b4914be6d228f0c390848b029b1c6405599f Author: Eugene Boguslavsky Date: Thu Aug 15 16:02:37 2024 -0700 Update chocolatey sui content (#19008) ## Description Update chocolatey sui content ## Test plan 👀 commit 667745104e492e8a9a389bedcf4cf63c37f48118 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Thu Aug 15 18:02:42 2024 -0400 indexer 2024-08: merge idx-breaking-change-park to main (#19005) ## Description title, this is another attempt of https://github.com/MystenLabs/sui/pull/18899 which got reverted as it triggered some CI issues, the issues have been resolved by #18993 ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Emma Zhong Co-authored-by: Ashok Menon Co-authored-by: wlmyng <127570466+wlmyng@users.noreply.github.com> Co-authored-by: Emma Zhong commit 1c88f3f229748f47675de284ed36060a3feb0ba5 Author: Eugene Boguslavsky Date: Thu Aug 15 14:02:19 2024 -0700 Fail if publish to chocolatey fails (#19006) ## Description Fail if publish to chocolatey fails ## Test plan 👀 commit fea58211135790715c57c61c3f837853e8738538 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Thu Aug 15 14:51:42 2024 -0400 indexer minor: more efficient total tx query (#19004) ## Description title, per a related slack thread ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8a6959c19113266053adc754a0bc4ed503000e11 Author: Patrick Kuo Date: Thu Aug 15 19:28:48 2024 +0100 [bridge indexer] - bridge indexer unit test (#18973) ## Description added unit test for bridge indexer task creations. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 2ac77203bd850222f8082490c84def9f2946c915 Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Thu Aug 15 14:22:20 2024 -0400 indexer fix: reset db via reverting migrations (#18993) ## Description the issue was that, prev indexer db reset was done via dropping all tables, which is problematic when we change a PG PROCEDURE parameter, see this slack message. https://mysten-labs.slack.com/archives/C03TCGDF45N/p1723507055114959 this caused issues on CI after merging https://github.com/MystenLabs/sui/pull/18899 and it got reverted, this pr changes it to reverting all migrations and cleans up the table dropping codes ## Test plan locally - reset DB before #18899 - cherry-pick this pr - cherry-pick #18899 run cmd below, which was the cmd on CI that ran into issue ``` DB_POOL_SIZE=10 cargo run --bin sui-indexer -- --db-url "postgres://postgres:postgres@localhost/gegao" --reset-db ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b2108ef219829b573ff1df4e4ea41fdf61776fdd Author: Eugene Boguslavsky Date: Thu Aug 15 11:08:55 2024 -0700 Exclude sui-bridge tests for code coverage (#19002) ## Description Exclude sui-bridge tests for code coverage ## Test plan https://github.com/MystenLabs/sui/actions/runs/10407677341/job/28823468147 commit b4d49448789ae47bc4f3aa17c11bdfa5830452e0 Author: Adam Welc Date: Thu Aug 15 10:44:13 2024 -0700 [move-ide] Fixed a bug preventing pkg build from VSCode (#19003) ## Description This PR fixes a problem recently reported in https://github.com/MystenLabs/sui/issues/18749 and https://github.com/MystenLabs/sui/issues/18983. When creating `git` commands using `Command::new(...).status()` standard input inherited from VSCode was somehow causing the child process to hang, even though the commands created did not in fact require any input. This only happened on Windows and while admittedly I do not understand the root cause (perhaps one of the reviewers can help), it does fix the problem. ## Test plan All existing tests must pass. Additionally, manually verified that the build no longer hangs on Windows. commit 65843fd352ef0bb6505d326f84a98e05b42c814d Author: Eugene Boguslavsky Date: Thu Aug 15 09:21:51 2024 -0700 Set `SIMTEST_STATIC_INIT_MOVE` variable for Code Coverage (#18998) ## Description Set `SIMTEST_STATIC_INIT_MOVE` variable for Code Coverage due to https://github.com/MystenLabs/sui/pull/18752 ## Test plan https://github.com/MystenLabs/sui/actions/runs/10406180594/job/28818615076 commit e205aca231ba52b29fb51b282ad850fc63b7444e Author: Brandon Williams Date: Tue Aug 13 13:58:00 2024 -0500 authority_aggregator: sample 5 validators for objects contents commit ecbf4b1a0e3a6be8ae56a3023ba66e81558bbc09 Author: Brandon Williams Date: Wed Aug 14 11:02:07 2024 -0500 chore: update quinn-proto commit 588ea8f803c461e1d9c55db1a3b925f16b367d8d Author: Ashok Menon Date: Wed Aug 14 18:57:17 2024 +0100 feat: [Source Validation] Check linkage (#18964) ## Description Source validation additionally checks that dependencies in the linkage table of the on-chain package match the published dependencies from the source package. Previously it was possible to construct a scenario where source verification would succeed even though a package's representation on-chain did not exactly match, because one of the dependencies mismatched on version but between two versions that were themselves identical. ## Test plan New tests exercising the scenario mentioned above: ``` sui-source-validation$ cargo nextest run -- linkage_differs ``` ## Stack - #18956 - #18959 - #18978 - #18960 - #18962 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: `sui client verify-source` now also confirms a package's linkage table matches its source dependencies. - [ ] Rust SDK: - [ ] REST API: commit 3160fe15cc21b6dda641f1152a10039ac9e66bdf Author: Ashok Menon Date: Wed Aug 14 18:55:08 2024 +0100 fix(test): publish depending on upgrade (#18962) ## Description Fix a bug in tests that publish packages where the publish transaction was always constructed referring to dependencies at their original IDs, and not their storage IDs. This has not caused a problem to date, meaning these tests may not publish a package that depends on an upgrade package, but to avoid confusion `get_dependency_original_package_ids` has been replaced with `get_dependency_storage_package_ids`. Similarly, `get_package_dependencies_hex` has been replaced with an invocation of `get_dependency_storage_package_ids`. ## Test plan CI +: ``` sui$ cargo build --bin sui sui$ cd tmp sui$ ~/sui/target/debug/sui move new test sui$ # make it possible to compile sui$ ~/sui/target/debug/ui move build --dump-bytecode-as-base64 ``` ## Stack - #18956 - #18959 - #18978 - #18960 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 968247ad80a9cbfeec2c5746dd4b10cf76879312 Author: Ashok Menon Date: Wed Aug 14 18:39:15 2024 +0100 fix(TransactionBuilder): Publish use storage IDs (#18960) ## Description Fix a bug where publish test transactions always refer to their dependencies at their original IDs. This breaks publishing test packages that depend on upgraded packages. ## Test plan CI -- existing tests using TestTransactionBuilder should still pass. ## Stack - #18956 - #18959 - #18978 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b4459e73dc4c1c23c8fe980d14bb68c697d6e81b Author: Ashok Menon Date: Wed Aug 14 18:38:47 2024 +0100 fix(verify-source): Detect published-at = 0x0 (#18978) ## Description Detect when the `published-at` field in `Move.toml` or `Move.lock` has been explicitly set to `0x0` and treat that as if it was not set at all. This is not commonly done by people, but it happens in our test set-up. This also required converting the field into an `ObjectID` earlier in the pipeline, which introduced some further changes in the codebase. ## Test plan ``` sui-source-validation$ cargo nextest run ``` ## Stack - #18956 - #18959 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: Explicitly setting `published-at = "0x0"` is treated as if the `published-at` field was omitted. - [ ] Rust SDK: - [ ] REST API: commit adfa70fd9cbed0fa3a47b9f008d458816f6dbf19 Author: Ashok Menon Date: Wed Aug 14 18:38:29 2024 +0100 chore(verify-source): Split out toolchain logic (#18959) ## Description Move logic for dealing with toolchain versioning into its own module. ## Test plan ``` sui-source-validation$ cargo nextest run ``` ## Stack - #18956 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 220f2ce857e559286cfdb81b0a9106420ff7aea1 Author: Ashok Menon Date: Sun Aug 4 18:03:30 2024 +0100 chore: [Source Validation] Split out error types (#18956) ## Description Move the error and aggregate error types into their own module, to declutter main source validation module. ## Test plan ``` sui-source-validation$ cargo nextest run ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 474cc58f32fe52119b2c4fe43ca775d69460fed9 Author: Tom Cat <48447545+tx-tomcat@users.noreply.github.com> Date: Thu Aug 15 06:38:24 2024 +0700 [Linter] Public mut tx context (#16878) ## Description This linter checks public functions to ensure they use `&mut TxContext` instead of `&TxContext `for better upgradability. Here's a step-by-step breakdown of how to implement this linter: Implement the main logic : `check_function_parameters`: Iterate through function parameters. `is_immutable_tx_context`: Check if a parameter is an immutable `TxContext`. `is_tx_context_type`: Check if a type is `TxContext`. `is_sui_tx_context`: Verify if the type is from the `sui::tx_context` module. ## Test plan Added more use case including false positive, false negative case ## Release notes - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [X] CLI: Move will now lint against using `&TxContext` instead of `&mut TxContext` in public functions - [ ] Rust SDK: --------- Co-authored-by: jamedzung Co-authored-by: Todd Nowacki commit c4343a297240fe8c684efd815ffe01d953d6a2d2 Author: Tom Cat <48447545+tx-tomcat@users.noreply.github.com> Date: Thu Aug 15 06:01:33 2024 +0700 [Linter] Unnecessary while loop (#16876) # Description This linter encourages replacing `while(true)` loops with the more idiomatic loop construct. Here's a breakdown of how it works: It checks each expression in the AST. If the expression is a While loop, it examines the condition. If the condition is always true (using the `is_condition_always_true` function), it reports a diagnostic suggesting to use loop instead. The `is_condition_always_true` function checks if the condition is a boolean literal with the value true. ## Test plan Added more use case including false positive, false negative case ## Release notes - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [X] CLI: Move will now lint against `while (true)`, which should be replaced by `loop` - [ ] Rust SDK: --------- Co-authored-by: jamedzung Co-authored-by: Todd Nowacki commit b490bb1716338a39706635209bfe40809dc4b505 Author: Theodore Chaikalis Date: Thu Aug 15 01:31:13 2024 +0300 Update useSignAndExecuteTransaction.mdx (#18984) `useClient` hook should be renamed to `useSuiClient` commit 55312989030d886e28ecbe1bddaf755b55b86ad3 Author: Theodore Chaikalis Date: Thu Aug 15 01:30:16 2024 +0300 Update import path for @mysten/sui/transactions (#18985) import path is @mysten/sui/transaction**s** commit c743bd983fe19101e9d6bb6d28bf9a250f433123 Author: Eugene Boguslavsky Date: Wed Aug 14 12:28:28 2024 -0700 Fix chocolatey binary path (#18992) ## Description Fix chocolatey binary path ## Test plan 👀 commit 64a5adbc2b5c188838be706fb9b263f8550453cf Author: Cam Swords Date: Wed Aug 14 10:11:41 2024 -0700 [move][move-2024] Fix match compilation field ordering (#18947) ## Description Revise match compilation that ensures misordered fields are sorted when bindings are created. ## Test plan Two new tests to ensure things work correctly. I will add more tests ASAP as well. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 918d82a74cb36185b3602a7bb51a9a113c0a50fe Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Wed Aug 14 17:58:39 2024 +0100 [Consensus] stop commit sync when consensus handler cannot keep up (#18438) ## Description `CommitSyncer` can download more transactions than what consensus handler and execution can process in the same amount of time. This PR adds backpressure to commit syncer based on # of commits not processed by consensus handler. Also, record commit votes from blocks fetched through Synchronizer for completeness. ## Test plan CI Private testnet --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 84908a391478ee745c82e892f2db13ee1f76b121 Author: Xun Li Date: Wed Aug 14 08:55:08 2024 -0700 [indexer] Simplify IndexedObject (#18981) ## Description All the other fields can be derived from Object. Remove them. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 542edc8764cd10dca97f8c0c1806227dcf9b03eb Author: William Smith Date: Wed Aug 14 11:49:23 2024 -0400 [TrafficControl] Handle invalid client sig on rpc node (#18979) commit 9c4652f3e3663e5c9a08605154cdd4a6b17860f7 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Tue Aug 13 18:42:33 2024 -0700 [move] Add further support for enums in Move model (#18980) commit c5766bce0e51044c065b950fb6d1e8d50d94fd0d Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Tue Aug 13 18:56:19 2024 +0000 Version Packages (#18976) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/sui@1.6.0 ### Minor Changes - a3e32fe: `WaitForLocalExecution` now waits using client.waitForTransaction rather than sending requestType to the RPC node. This change will preserve readAfterWrite consistency when local execution is removed from fullnodes, at the cost of more network requests and higher latency. ## @mysten/create-dapp@0.3.16 ### Patch Changes - Updated dependencies [a3e32fe] - @mysten/sui@1.6.0 - @mysten/dapp-kit@0.14.16 ## @mysten/dapp-kit@0.14.16 ### Patch Changes - Updated dependencies [a3e32fe] - @mysten/sui@1.6.0 - @mysten/wallet-standard@0.13.1 - @mysten/zksend@0.10.5 ## @mysten/deepbook@0.8.15 ### Patch Changes - Updated dependencies [a3e32fe] - @mysten/sui@1.6.0 ## @mysten/deepbook-v3@0.2.1 ### Patch Changes - Updated dependencies [a3e32fe] - @mysten/sui@1.6.0 ## @mysten/enoki@0.3.16 ### Patch Changes - Updated dependencies [a3e32fe] - @mysten/sui@1.6.0 - @mysten/zklogin@0.7.16 ## @mysten/graphql-transport@0.2.15 ### Patch Changes - Updated dependencies [a3e32fe] - @mysten/sui@1.6.0 ## @mysten/kiosk@0.9.15 ### Patch Changes - Updated dependencies [a3e32fe] - @mysten/sui@1.6.0 ## @mysten/suins-toolkit@0.5.15 ### Patch Changes - Updated dependencies [a3e32fe] - @mysten/sui@1.6.0 ## @mysten/wallet-standard@0.13.1 ### Patch Changes - Updated dependencies [a3e32fe] - @mysten/sui@1.6.0 ## @mysten/zklogin@0.7.16 ### Patch Changes - Updated dependencies [a3e32fe] - @mysten/sui@1.6.0 ## @mysten/zksend@0.10.5 ### Patch Changes - Updated dependencies [a3e32fe] - @mysten/sui@1.6.0 - @mysten/wallet-standard@0.13.1 Co-authored-by: github-actions[bot] commit a3e32fec739bb02cf3dca1c3510a4179f481bd99 Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Tue Aug 13 11:26:46 2024 -0700 [ts sdk] replace WaitForLocalExecution with waitForTransaction (#18929) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit fe922b7022b92c5047a3e05c4d078484571bf643 Author: Eugene Boguslavsky Date: Tue Aug 13 17:24:45 2024 +0100 Revert "[indexer] merge in indexer breaking change park" (#18975) Reverts MystenLabs/sui#18899 commit bb3fc0b87b80e0c9f479cc4d5089e0f243aac582 Author: Zihe Huang Date: Tue Aug 13 08:48:09 2024 -0700 [docs] change custom indexer example Cargo.toml capitalization (#18970) ## Description Code inject link was broken. Renaming to fix it. Also added deprecation warning for event subscription ## Test plan by inspection --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Ronny Roland commit 7f05c707874a271f4b6b5d01f0a460a8fdb064ca Author: Xun Li Date: Tue Aug 13 07:58:33 2024 -0700 Fix fullnode event resolution (#18958) ## Description Introduce a new struct that contains both local package store and newly published packages. This allows us to resolve new events using both. ## Test plan I will add some tests later. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c90056e379e00bc710037bb847793426ef9765ac Author: Patrick Kuo Date: Tue Aug 13 15:46:29 2024 +0100 [bridge-indexer] - simplify bridge generic (#18908) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 1913ce6baa4a17adc764690f06e3118d0c964e9c Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Tue Aug 13 07:29:40 2024 +0100 [Consensus] avoid subtracting from current Instant (#18939) ## Description On Windows, `Instant::now()` can be close to 0 and we cannot subtract from that. ## Test plan CI. Verified on a Windows machine. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f644d71e70ee2692a4de34472c9cd7e126116448 Author: Xun Li Date: Mon Aug 12 20:08:46 2024 -0700 [denylist] Fix sign check early return (#18951) ## Description Describe the changes or additions included in this PR. ## Test plan Will add test in a separate PR. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 77071e27701bb57d0156649104cd3e618cec5b55 Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Mon Aug 12 19:35:57 2024 -0700 [multisig toolkit] fix mouse events in account selector (#18969) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 7d30e9475b88fced5f162863ad85db301b76a01b Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Mon Aug 12 19:16:09 2024 -0700 Fix cmdk not rendering account list (#18968) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5ed526aadd78647bd6efa309a9a5055d9555c503 Author: Zihe Huang Date: Mon Aug 12 16:57:00 2024 -0700 [docs] Local ingestion custom indexer docs update and example (#18957) ## Description Added the local ingestion custom indexer example as well as doc improvement ## Test plan Manually --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> commit 7d103a93cd73cd16c6652246a2ec7fd24aa2b06e Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Mon Aug 12 17:43:37 2024 -0600 [docs][site] Landing page links (#18967) ## Description Changing order of landing page links. Testing effects on page views. Reexamining specific links used is beyond scope. Previous image Current image ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f8fa78597a237259e2de8b6e50650ffd5bc4a695 Author: Ashok Menon Date: Tue Aug 13 00:34:40 2024 +0100 fix: [GraphQL/Owner] rootVersion: UInt53 (#18966) ## Description The PR introducing the `rootVersion` parameter to `Query.owner` raced with the PR that introduced `UInt53`. This PR fixes the race by using `UInt53` as the type for `rootVersion`. ## Test plan ``` sui-graphql-rpc$ cargo nextest run ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: `Query.owner`'s `rootVersion` parameter should accepts a `UInt53` rather than an `Int`. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 88d4fd454cb3aec7f074f8bb3381b6c4d5fc3ef6 Author: Godwin JIbs <126525197+Jibz-Mysten@users.noreply.github.com> Date: Mon Aug 12 17:09:00 2024 -0500 Fix isValidating state from showing invalid address (#18963) ## Description Prevent showing errors during isValidator. Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit f8176eb5ca1ffaa75a82b729ab3bf911e39810a9 Author: Ashok Menon Date: Mon Aug 12 22:29:58 2024 +0100 chore: [Examples] Remove sui_programmability (#18965) commit fc213865355d4fd172b4998bcad9c254ef3e8b99 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Mon Aug 12 17:08:36 2024 -0400 Version Packages (#18950) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.2.0 ### Minor Changes - 41361b6: Constants update, manager sdk update ## @mysten/enoki@0.3.15 ### Patch Changes - Updated dependencies [6f79ed9] - @mysten/zklogin@0.7.15 ## @mysten/zklogin@0.7.15 ### Patch Changes - 6f79ed9: Additional check for bigints Co-authored-by: github-actions[bot] commit 212f3de0fc89385c1e01bfcdc18b25b88158c906 Author: Emma Zhong Date: Mon Aug 12 21:06:46 2024 +0100 [indexer] merge in indexer breaking change park (#18899) ## Description Changes in this uhaul PR includes: - Addition of `objects_version` table - Addition of a number of transaction and event indices tables, with the removal of `tx_calls` table. - Enable pruning for the newly added indices tables and make tx sequence number a valid partition range, in addition to cp sequence number. ## Test plan tested locally --- ## Release notes This PR modifies the indexer db schemas for improving GraphQL query performance. Specifically, an `objects_version` table along with various transaction and events lookup tables are added. `tx_calls` table is removed by more fine-grained tables `tx_calls_pkg`, `tx_calls_mod` and `tx_calls_fun`. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [x] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Ashok Menon Co-authored-by: wlmyng <127570466+wlmyng@users.noreply.github.com> commit 41361b6087a59dd4567ccc95ea984e9b7cb1c290 Author: Tony Lee Date: Mon Aug 12 15:49:11 2024 -0400 Deepbook SDK Update (#18954) ## Description Update balance manager and constants in SDK ## Test plan How did you test the new or updated feature? Test in testnet and feedback from integrators ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 42def2d87fa0888d714dc71fdf15adef7923e2a2 Author: Ashok Menon Date: Mon Aug 12 18:22:48 2024 +0100 [Docs] Sui Addresses are usually hex-encoded ## Description Base58 -> hexadecimal ## Test plan :eyes: --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 23e38a22f2db95b111f6234e03ca1dbdcaff8c52 Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Mon Aug 12 10:30:39 2024 -0500 [data ingestion daemon] dynamodb progress store: make updates conditional (#18945) commit 6f79ed9749902ea5c126db4c115fab64b9d0de98 Author: Deepak Maram Date: Mon Aug 12 10:49:07 2024 -0400 [zkLogin] Check that bigints fit into the field for extra safety (#16891) ## Description Turns out we don't check anywhere that the inputs fit into the field and neither does the poseidon-lite library (it just truncates elements and fits them into the field). Instrumenting poseidon seems like the simplest way to do this. The other option (which I have not taken) is to instrument as soon as we receive some inputs.. ## Test Plan How did you test the new or updated feature? --- If your changes are not user-facing and do not break anything, you can skip the following section. Otherwise, please briefly describe what has changed under the Release Notes section. ### Type of Change (Check all that apply) - [ ] protocol change - [ ] user-visible impact - [ ] breaking change for a client SDKs - [ ] breaking change for FNs (FN binary must upgrade) - [ ] breaking change for validators or node operators (must upgrade binaries) - [ ] breaking change for on-chain data layout - [ ] necessitate either a data wipe or data migration ### Release notes commit 3b034c7604e7631474ce00dad967f1633670026f Author: Joy Wang <108701016+joyqvq@users.noreply.github.com> Date: Mon Aug 12 10:42:56 2024 -0400 feat: enable karrier one for testnet/mainnet (#18898) commit c79f53f9391946e83152103cbb90c2065311d0ca Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Fri Aug 9 14:45:17 2024 +0100 [move] Bugfix in Move model for enums (#18938) commit 659b02b71f40f285d3ac6d204ef54c9cc1f5ebc6 Author: Adam Welc Date: Fri Aug 9 12:02:15 2024 +0100 [move-ide] Version number consistent with marketplace (#18937) ## Description I was a bit overeager in bumping up extension version number and this PR brings it back (to `1.0.10`) to be consistent with what should be the next version in the marketplace: ![image](https://github.com/user-attachments/assets/2b8788e9-8d35-431f-a8cc-b6653ae7023e) commit 8e032860f11e69dbd09cfac86e4b289a7646c750 Author: Eugene Boguslavsky Date: Fri Aug 9 10:40:18 2024 +0100 sui v1.32.0 version bump (#18934) ## Description Sui v1.32.0 version bump commit 1ce8f6bb3b90273820d3e78bf6d61e9928df17f0 Author: Bridgerz Date: Fri Aug 9 10:31:05 2024 +0100 Replace forge install with soldeer (#18930) ## Description Replacing the dependency management from manual forge install to use the recommended Soldeer dependancy management tool. ## Test plan CI checks will test this update --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 0de46e0c0ce3f22e2ff7f3001fccf6fdf1b1dd9e Author: Eugene Boguslavsky Date: Fri Aug 9 09:56:56 2024 +0100 Sui v1.31.0 Bytecode Snapshot (#18933) ## Description Sui v1.31.0 Bytecode Snapshot ## Test plan 👀 commit 3f4cab62afbf86870d37b187a6bfaabf4f33d30e Author: Adam Welc Date: Fri Aug 9 00:44:14 2024 +0100 [move-ide] Added support for use-s auto-completion (#18924) ## Description This PR add support for `::` auto-completion for `use` statements. It is the last step towards finishing current level of `::` support started in https://github.com/MystenLabs/sui/pull/18778 ## Test plan All new and existing test must pass commit 3e6540f7b09d14fcb953d0497b44f87fb65a7e76 Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Thu Aug 8 14:06:55 2024 -0500 [data ingestion] for lagging secondary only update local progress state (#18927) commit ba9495f13b3ee881fc13beeb727edaef5cead487 Author: William Smith Date: Thu Aug 8 12:14:35 2024 -0400 [snapshots] Add metric for number of local db checkpoints (#18926) ## Description Because we run snapshot upload and garbage collection as a background task but we perform rocksdb checkpointing at end of epoch unconditionally for configured nodes, any failure in either the upload or garbage collection path will lead to an accumulation of old db checkpoints, which will inevitably lead to disk filling and make things more difficult to debug. This PR adds a metric to periodically count the number of rocksdb checkpoints that exist on local disk. Except in rare cases (backfills), this number should generally be lower than 3, so we can add alerting on this for early intervention. ## Test plan 👀 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6f4864b78b87fc7be3774aab30e85307b2dad696 Author: shio-coder <165585716+shio-coder@users.noreply.github.com> Date: Thu Aug 8 02:14:16 2024 -0700 Enable soft bundle on mainnet (#18876) ## Description This PR adds Protocol Version 54, where soft bundle will be enabled on mainnet. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [x] Protocol: Enable soft bundle on mainnet. - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 58fd1d1d2cadec6fb2cc6b85df61727db56a2ad2 Author: Adam Welc Date: Thu Aug 8 02:00:29 2024 +0100 [move-compler] Added parser resilience for use declarations (#18879) ## Description This PR adds parsing resilience when parsing use declarations. The idea is to recognize partially parsed statements (examples below and also in the new test) to enable auto-completion in the IDE. ``` use a::m2:: use a::m2::{foo use a::m2::{foo, bar use a::{m2::{foo, bar use a::{m2::{foo, bar}, m3 ``` ## Test plan New and all tests must pass --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: Additional compiler errors for incomplete name access chains (such as `use some_pkg::some_module::`) might appear in the compiler output. - [ ] Rust SDK: - [ ] REST API: commit e4c9076d4f9a017aa0f64cb306e6bdeb639e57b0 Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Wed Aug 7 12:13:11 2024 -0500 [data ingestion] ensure current checkpoint watermark is greater or equal to pruning watermark… (#18922) this is required for setups that use colocation setups and have multiple processes/hosts that execute the same workflow with shared progress store commit 5cb3a2e41ea878ae8bdb70be3c6072be23087723 Author: Anastasios Kichidis Date: Wed Aug 7 17:32:25 2024 +0100 [chore] disable flaky test (#18919) ## Description Temporary disabling until further tuned by @williampsmith ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b1ebfff1ed0398c3263c8c62befa3877af2a566e Author: Brandon Williams Date: Wed Aug 7 14:37:17 2024 +0100 serde_json: don't use the arbitrary_precision feature commit af1588f0d3eb2aac1a04f23f35add9fd10184a9b Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Wed Aug 7 09:17:48 2024 -0500 [data ingestion] decrease default queue length for remote reader (#18889) should decrease pressure on remote store. The parameter can be bumped manually for backfill jobs commit 6bc8aeee2160c8a8065be343e0432f161d8c9c49 Author: Bridgerz Date: Wed Aug 7 14:41:01 2024 +0100 Eth Sync rework (#18604) ## Description Update Eth bridge indexer sync and live event subscription. ## Test plan Will include unit tests in a subsequent PR. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: --------- Co-authored-by: Brandon Williams commit e8e6b492a66956ca43e099887e2e94204881cf65 Author: Eugene Boguslavsky Date: Wed Aug 7 12:08:39 2024 +0100 Fix suiop version and include in builds (#18914) ## Description Fix `suiop` version and include in builds ## Test plan 👀 commit 1ce3dda3cadcb40a7ddcb8beef17a85769ca3d14 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Wed Aug 7 09:49:20 2024 +0100 [move] Fixes for enums in Move model + add support in docgen (#18907) ## Description Fixes some issues with enums in Move model creation, and adds support for enums in docgen, and adding doc comments to enum variants. ## Test plan Added a test for docgen with enums -- this tests both the bugfixes and also the generation of doc comments for enums. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 98603adc68a6363b5dc6ee2fed07025a6282dd3e Author: Eugene Boguslavsky Date: Wed Aug 7 08:59:02 2024 +0100 Revert #18910 (#18913) ## Description Revert #18910 Unbreaking https://github.com/MystenLabs/sui-operations/actions/runs/10278960227/job/28445039800 Will fix later ## Test plan 👀 commit 4c36d71b9f5a199f0607df8913980578b52101d3 Author: Xun Li Date: Wed Aug 7 06:51:36 2024 +0100 Enable tx finalizer on mainnet (#18906) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 226103220755b27dd3798f0193629804e25ae52c Author: Xun Li Date: Wed Aug 7 06:27:54 2024 +0100 Improve tx finalizer simtests (#18903) ## Description Introduce a timing config, and set the values differently for prod vs tests. This allows tests to run much more quickly. Re-enable the simtests in nightly. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 191589271025c31a1a13f8f4cdd457986e845441 Author: Zhe Wu Date: Tue Aug 6 19:36:59 2024 -0700 Turn on shared object congestion control in mainnet (#18902) ## Description Says by the title. Going out in 1.3.1 ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e34d2b9005eacd220aa8fe08a322289d83e2775f Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Tue Aug 6 17:58:40 2024 +0000 Version Packages (#18874) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/deepbook-v3@0.1.0 ### Minor Changes - 05fb3ac: Update deepbook addresses ### Patch Changes - Updated dependencies [0851b31] - Updated dependencies [f37b3c2] - @mysten/sui@1.5.0 ## @mysten/sui@1.5.0 ### Minor Changes - 0851b31: Deprecate requestType option when executing transactions ### Patch Changes - f37b3c2: Add PerEpochConfig and Cancelled to UnchangedSharedKind - Updated dependencies [f37b3c2] - @mysten/bcs@1.0.4 ## @mysten/wallet-standard@0.13.0 ### Minor Changes - 0851b31: Deprecate requestType option when executing transactions ### Patch Changes - Updated dependencies [0851b31] - Updated dependencies [f37b3c2] - @mysten/sui@1.5.0 ## @mysten/bcs@1.0.4 ### Patch Changes - f37b3c2: Improve error message when bcs enum contains unknown value ## @mysten/create-dapp@0.3.15 ### Patch Changes - Updated dependencies [0851b31] - Updated dependencies [f37b3c2] - @mysten/sui@1.5.0 - @mysten/dapp-kit@0.14.15 ## @mysten/dapp-kit@0.14.15 ### Patch Changes - Updated dependencies [0851b31] - Updated dependencies [f37b3c2] - @mysten/wallet-standard@0.13.0 - @mysten/sui@1.5.0 - @mysten/zksend@0.10.4 ## @mysten/deepbook@0.8.14 ### Patch Changes - Updated dependencies [0851b31] - Updated dependencies [f37b3c2] - @mysten/sui@1.5.0 ## @mysten/enoki@0.3.14 ### Patch Changes - Updated dependencies [0851b31] - Updated dependencies [f37b3c2] - @mysten/sui@1.5.0 - @mysten/zklogin@0.7.14 ## @mysten/graphql-transport@0.2.14 ### Patch Changes - Updated dependencies [0851b31] - Updated dependencies [f37b3c2] - Updated dependencies [f37b3c2] - @mysten/sui@1.5.0 - @mysten/bcs@1.0.4 ## @mysten/kiosk@0.9.14 ### Patch Changes - Updated dependencies [0851b31] - Updated dependencies [f37b3c2] - @mysten/sui@1.5.0 ## @mysten/suins-toolkit@0.5.14 ### Patch Changes - Updated dependencies [0851b31] - Updated dependencies [f37b3c2] - @mysten/sui@1.5.0 ## @mysten/zklogin@0.7.14 ### Patch Changes - Updated dependencies [0851b31] - Updated dependencies [f37b3c2] - Updated dependencies [f37b3c2] - @mysten/sui@1.5.0 - @mysten/bcs@1.0.4 ## @mysten/zksend@0.10.4 ### Patch Changes - Updated dependencies [0851b31] - Updated dependencies [f37b3c2] - @mysten/wallet-standard@0.13.0 - @mysten/sui@1.5.0 Co-authored-by: github-actions[bot] commit f37b3c21d90d1c5299f0f1bc94af0e4194e82094 Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Tue Aug 6 10:46:31 2024 -0700 [ts sdk] Add PerEpochConfig and Cancelled to UnchangedSharedKind (#18909) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit aa54e5a46cbea799205256b9c1d282c8bd5fe137 Author: pei-mysten <147538877+pei-mysten@users.noreply.github.com> Date: Tue Aug 6 05:16:21 2024 -0700 [suiop] add to internal binaries release list (#18910) ## Description [suiop] add to internal binaries release list commit ce145026587eacce9dfe3a56b45c5f09eac5029a Author: Cam Swords Date: Tue Aug 6 04:01:21 2024 -0700 [move][move-2024] Fix a small bug in how match was handled in parsing (#18880) ## Description This adds `Tok::Match` to the `at_start_of_exp` check in the parser ## Test plan Added tests that failed before the fix. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 429e1260b5a8b9a62b7bb62e81dbc829faa5464e Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Tue Aug 6 01:48:38 2024 -0700 [chorse] Update `time` crate to a newer version to avoid compilation errors (#18904) ## Description Update `time` crate to a newer version to avoid compilation errors ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d9dc6ca01e8700d964f45482bf18f863b4fc85fb Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Fri Aug 2 10:11:20 2024 -0700 [Consensus] update a few parameters (#18891) ## Description - With consensus latency consistently < 1s under max TPS, the consensus adapter limit can be significantly reduced even to support high TPS. - Reduce # of transactions that can be included in a block. ## Test plan Private Testnet --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit aae147c62850b7d39f1af487ca32a70614e668de Author: Andrew Schran Date: Fri Aug 2 10:26:51 2024 -0400 Enable random beacon on mainnet (#18888) ## Description Enables the native randomness (random beacon) feature on sui mainnet. ## Test plan Extensive manual and automated testing. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [x] Protocol: Enables the native randomness (random beacon) feature on Sui Mainnet. - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 0d765401a13b79ebfd49e403efe36e711a0b216a Author: jk jensen Date: Fri Aug 2 08:33:55 2024 -0400 [suiop] add aliases for image building (#18819) ## Description make it a bit easier to use ## Test plan works ```bash » suiop ci image list --repo sui-operations --image test Requested list for repo: sui-operations ╭──────┬──────╮ │ name │ tags │ ├──────┼──────┤ ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 9702dfb58a123664b38c519cb2fde875ca92fb5b Author: Adam Welc Date: Thu Aug 1 15:03:59 2024 -0700 [move-ide] Added support for name chain completions in attributes (#18866) ## Description This PR adds support for handling name access chains in attributes. The goal here is NOT to add more general support smart auto-completion for attributes, though we can potentially do a lot of interesting things there. Instead, the focus here is to finish `::` auto-completion started in https://github.com/MystenLabs/sui/pull/18778 ## Test plan All new and old tests must pass commit 7efc10bd802040003532f310d475fb49849d55e0 Author: wlmyng <127570466+wlmyng@users.noreply.github.com> Date: Thu Aug 1 13:10:01 2024 -0700 Fix e2e test hanging on local runs (#18887) ## Description When running `cargo nextest run --test-threads 1 --package sui-graphql-rpc --test e2e_tests --test examples_validation_tests --features pg_integration` locally, these tests will hang on the first one. Explicitly sending a cancellation signal and waiting for the indexer and graphql handles to spin down seems to solve the issue. Would be nice to run these in parallel too, but punting them for when we have an embedded db setup. ## Test plan Tests pass locally --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8c453183740d482c5b035e10f90134f011aadd55 Author: Andrew Schran Date: Thu Aug 1 15:52:04 2024 -0400 Add long-epoch, rolling restart simtest (#18875) ## Description This reproduces prior bugs with random beacon. ## Test plan Manual repro testing on old commits. commit 05fb3aca05accd6dd96d1088036bfed557fde056 Author: Tony Lee Date: Thu Aug 1 20:26:16 2024 +0100 SDK Updates (#18871) commit af9a3574e00a4cf396c889b3254422447d365121 Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Thu Aug 1 13:09:57 2024 -0600 [docs] DeepBook SDK (#18815) ## Description Adds DeepBook SDK. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8599d66ceb8666da15af0200ff63f0058fcfc4af Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Thu Aug 1 13:04:34 2024 -0500 [data ingestion] update internal state of progress store wrapper (#18883) always update internal state of progress store wrapper. It should speed up cleaning up local files in case if the host is slow secondary commit 83863f2c4aba802ab2b102fe3fa19362cbaa320d Author: Brandon Williams Date: Thu Aug 1 11:59:04 2024 -0500 rocksdb: reintroduce workspace-hack scoped to typed-store Reintroduce a workspace-hack package (`typed-store-workspace-hack`) in order to perform feature unification specifically for the third-party dependencies which impact the building of rocksdb, the heaviest weight dependency we currently have. After this patch, no matter where in the workspace you perform a build, rocksdb will only be built a single time. For now the `typed-store-workspace-hack` package is manually maintained and was created with assistance from `cargo hakari` and `cargo tree`. commit 2a25217f7b62210edb962e0e76ebb2d371c3af11 Author: Brandon Williams Date: Thu Aug 1 10:25:50 2024 -0500 rocksdb: access rocksdb through typed-store This patch does some dependency shuffling to force accessing the rocksdb crate via the typed-store crate in order to try and limit how and when librocksdb-sys is built. Before this patch, when building all targets, librocksdb-sys was included in the dependency graph and built twice (with a total of 2005 targets). After this patch librocksdb-sys is included only a single time in the dependency graph and reduces the number of built targets to 1959. commit d3c7e0e09abb112dae21691d67faba73943a4b09 Author: Brandon Williams Date: Thu Aug 1 10:02:13 2024 -0500 narwhal: manually build grpc interface Manually build grpc interface instead of leveraging protoc in order to eliminate needing to build the expensive protoc compiler. commit 44150d4d4786743e0799c6cafd19169d737b90ee Author: mamos-mysten <122397493+mamos-mysten@users.noreply.github.com> Date: Thu Aug 1 09:48:34 2024 -0700 [wallet-ext]: fix: hidden assets button (#18884) ## Description Fixes appearance of hide assets button in the wallet extension ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b4f6f3cfd0e600b6db6b153692e633bcf25ec877 Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Thu Aug 1 08:03:31 2024 -0700 [Rust SDK] Handle unwrap in wallet context (#18882) ## Description Handle unwrap in wallet context ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e1540fde9d40af20f67f63fade2b0670f9da8958 Author: plam-ml <127577476+plam-ml@users.noreply.github.com> Date: Thu Aug 1 06:32:47 2024 -0700 add blank and rel (#18881) ## Description - Add target and rel ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 1df30b2af1868153dad77fba3a2a43f297bc46a6 Author: devan-ko <143364659+devan-ko@users.noreply.github.com> Date: Thu Aug 1 20:30:05 2024 +0900 feat: enable ambrus aws cognito as zklogin provider (#18867) commit b5ca0ebe9d702ff5b5b17bb17663cc9f20a840c5 Author: Anastasios Kichidis Date: Thu Aug 1 10:55:00 2024 +0100 [Consensus] refactored data remover for store pruning (#18839) ## Description This PR refactors the store pruning for the consensus db. The following have been done: * renamed component from `EpochDataRemover` to `ConsensusStorePruner` as it's more accurate and easier to locate * use the `safe_drop_db` method to ensure more safety/robustness against deletions * added node configuration for the consensus db epoch retention and run interval * made the component attempt to prune old epoch dbs not only during epoch change but periodically as well (configurable) to ensure that there is a retry approach in case of transient failures enhancing the robustness. * more testing ## Test plan CI/private-testnet --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 32347c20c248f6691c6ab89ba53266e2f677ed9b Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Wed Jul 31 22:54:28 2024 -0700 [rocksdb] increase write stopping threshold for L0 files (#18872) ## Description Currently for column families with high write rate, write stalling and stopping can happen after there are 24 pending L0 files, for example in consensus DB during consensus catchup. This hurts the throughput significantly and reduces the stability of the system. The # of L0 files to compact is reduced back to the default (4), to speed up L0 compactions. Also, for DB that optimizes for write throughput, the thresholds to stall and stop writes are further increased. Logs observed from one validator: ``` 2024/07/31-03:59:57.695047 2702658 [WARN] [db/column_family.cc:991] [blocks] Stalling writes because we have 24 level-0 files rate 16777216 2024/07/31-04:00:13.393421 2702607 [WARN] [db/column_family.cc:991] [blocks] Stalling writes because we have 24 level-0 files rate 13421772 2024/07/31-04:00:13.393593 2702607 [WARN] [db/column_family.cc:991] [blocks] Stalling writes because we have 24 level-0 files rate 10737417 2024/07/31-04:00:14.418687 2702901 [WARN] [db/column_family.cc:991] [blocks] Stalling writes because we have 25 level-0 files rate 8589933 2024/07/31-04:00:43.754068 2702656 [WARN] [db/column_family.cc:991] [blocks] Stalling writes because we have 25 level-0 files rate 10737416 2024/07/31-04:00:52.471606 2702597 [WARN] [db/column_family.cc:991] [blocks] Stalling writes because we have 25 level-0 files rate 8589932 2024/07/31-04:00:52.471784 2702597 [WARN] [db/column_family.cc:991] [blocks] Stalling writes because we have 25 level-0 files rate 9620723 2024/07/31-04:00:53.677837 2702901 [WARN] [db/column_family.cc:991] [blocks] Stalling writes because we have 26 level-0 files rate 7696578 2024/07/31-04:01:26.237337 2702597 [WARN] [db/column_family.cc:991] [blocks] Stalling writes because we have 26 level-0 files rate 8620167 2024/07/31-04:01:26.237494 2702597 [WARN] [db/column_family.cc:991] [blocks] Stalling writes because we have 26 level-0 files rate 6896133 2024/07/31-04:01:27.389744 2702901 [WARN] [db/column_family.cc:991] [blocks] Stalling writes because we have 27 level-0 files rate 5516906 2024/07/31-04:02:21.401986 2702597 [WARN] [db/column_family.cc:991] [blocks] Stalling writes because we have 27 level-0 files rate 4413524 2024/07/31-04:02:21.402179 2702597 [WARN] [db/column_family.cc:991] [blocks] Stalling writes because we have 27 level-0 files rate 3530819 2024/07/31-04:02:22.441728 2702901 [WARN] [db/column_family.cc:991] [blocks] Stalling writes because we have 28 level-0 files rate 2118491 2024/07/31-04:03:18.346778 2702614 [WARN] [db/column_family.cc:991] [blocks] Stalling writes because we have 28 level-0 files rate 10066329 2024/07/31-04:03:18.346980 2702614 [WARN] [db/column_family.cc:991] [blocks] Stalling writes because we have 28 level-0 files rate 6039797 2024/07/31-04:03:19.198853 2702901 [WARN] [db/column_family.cc:991] [blocks] Stalling writes because we have 29 level-0 files rate 3623878 ``` There is no logs for stopping writes at 30 level-0 files. ## Test plan CI. Private testnet. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5a0febd2d09cc6a9a42236577e6238ef02bf21e0 Author: Tom Cat <48447545+tx-tomcat@users.noreply.github.com> Date: Thu Aug 1 03:55:21 2024 +0700 [Linter] Warn against freezing a type T with a capability-like name (ends in Cap or Capability) and capability-like usage (one or more functions gated with &T inside the package that declares T) (#17273) ## Description The lint identifies function calls that may incorrectly freeze such types, which can lead to design issues. Key Features: 1. Checks for specific freezing functions defined in constants. 2. Uses regex to identify capability-like type names. 3. Reports warnings for potential misuse of freezing on capability-like types. Implementation Details: The lint focuses on public_freeze and freeze functions in the sui::transfer module. It uses a regex pattern to match type names ending with "Cap", "Capability", or similar variations. When a match is found, it reports a warning with a custom diagnostic message. # Test plan Added more use case including false positive, false negative case ## Release notes - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [X] CLI: Move will now lint against freezing a potential capability object - [ ] Rust SDK: --------- Co-authored-by: jamedzung Co-authored-by: Todd Nowacki commit 801989d439b052fee233c6291fd6162ef1be4936 Author: Tom Cat <48447545+tx-tomcat@users.noreply.github.com> Date: Thu Aug 1 01:37:39 2024 +0700 [Linter] Missing key (#16616) ## Description Adds a new linter rule targeting structs that have an 'id' field of type 'UID' but lack the 'key' ability. Key features of this linter: - Identifies structs with an 'id' field of type 'UID'. - Checks if these structs have the 'key' ability. - Issues a warning if the 'key' ability is missing. The linter specifically looks for structs that represent Sui objects. These are identified by: `has_id_field_of_type_uid`. This function checks for a field named "id" of type `sui::object::UID` ## Test plan Added more use case including false positive, false negative case ## Release notes - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [X] CLI: A new Move lint will warn when a struct has a `id: UID` field, but lacks the `key` ability - [ ] Rust SDK: --------- Co-authored-by: jamedzung Co-authored-by: Todd Nowacki commit 0851b31567e87b4d009fbe832cb09f45a43fabdd Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Wed Jul 31 10:41:18 2024 -0700 [sdk] deprecate requestType option in SDK (#18854) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c9a6d67e47ab5deab86639f8620a981d9e767033 Author: Brandon Williams Date: Wed Jul 31 11:26:39 2024 -0500 jsonrpc: use ExecuteTransactionRequestV3 when executing a transaction This patch deprecates the `WaitForLocalExecution` request type that was previously relied upon to calculate object and balance changes. Instead the new `ExecuteTransactionRequestV3` functionality is used to request output object data directly from the validators so that we no longer need to rely on waiting for local execution. The `WaitForLocalExecution` feature still exists and will still properly wait to return a response to a client until after the transaction is executed locally in order to retain the Read-after-Write semantics that some clients may presently rely on. commit be249a05b5b3cdc6d0bbc1d149a4ea79cba9e5eb Author: Patrick Kuo Date: Wed Jul 31 16:50:30 2024 +0100 [bridge indexer] - indexer refactoring (#18761) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Lu Zhang <8418040+longbowlu@users.noreply.github.com> commit e8ca7ad1ec873307e55b5f65faeed48b107e48d9 Author: Brandon Williams Date: Mon Jul 29 15:33:58 2024 -0500 rest: implement client with support for all existing endpoints commit c1b1e1e74c82b950e8d531f1b84c605d1ea957ca Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Tue Jul 30 17:26:38 2024 -0700 Version Packages (#18865) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/sui@1.4.0 ### Minor Changes - 4419234: Add setGasBudgetIfNotSet helper to Transaction class ## @mysten/create-dapp@0.3.14 ### Patch Changes - Updated dependencies [4419234] - @mysten/sui@1.4.0 - @mysten/dapp-kit@0.14.14 ## @mysten/dapp-kit@0.14.14 ### Patch Changes - Updated dependencies [4419234] - @mysten/sui@1.4.0 - @mysten/wallet-standard@0.12.14 - @mysten/zksend@0.10.3 ## @mysten/deepbook@0.8.13 ### Patch Changes - Updated dependencies [4419234] - @mysten/sui@1.4.0 ## @mysten/deepbook-v3@0.0.1 ### Patch Changes - Updated dependencies [4419234] - @mysten/sui@1.4.0 ## @mysten/enoki@0.3.13 ### Patch Changes - Updated dependencies [4419234] - @mysten/sui@1.4.0 - @mysten/zklogin@0.7.13 ## @mysten/graphql-transport@0.2.13 ### Patch Changes - Updated dependencies [4419234] - @mysten/sui@1.4.0 ## @mysten/kiosk@0.9.13 ### Patch Changes - Updated dependencies [4419234] - @mysten/sui@1.4.0 ## @mysten/suins-toolkit@0.5.13 ### Patch Changes - Updated dependencies [4419234] - @mysten/sui@1.4.0 ## @mysten/wallet-standard@0.12.14 ### Patch Changes - Updated dependencies [4419234] - @mysten/sui@1.4.0 ## @mysten/zklogin@0.7.13 ### Patch Changes - Updated dependencies [4419234] - @mysten/sui@1.4.0 ## @mysten/zksend@0.10.3 ### Patch Changes - Updated dependencies [4419234] - @mysten/sui@1.4.0 - @mysten/wallet-standard@0.12.14 Co-authored-by: github-actions[bot] commit 0148d85f3b4a0d3bb7ea880c33825cef7fa9dfbe Author: Adam Welc Date: Tue Jul 30 17:08:01 2024 -0700 [move-ide] Added smart auto-completion for colon-colon (#18778) ## Description Currently, this PR implements most of functionality for `::` auto-completion when the cursor is on one of the identifiers in the name access chains. There are some missing parts here, arguably better split to separate PRs: - handling of access chains in attributes - handling of access chainss in `use` statements Also, this PR actually removes `:` completion (auto-completion start when typing the first character after `:`) which is consistent with what rust-analyzer does. While we can implement auto-completion right after `:`, the question is whether we should ## Test plan All new and old tests must pass commit b7b2a760cc7597e481a757fd5e9d4a32df25e166 Author: John Martin Date: Tue Jul 30 15:40:18 2024 -0700 [docs] update snapshot docs based off operator feedback (#18855) ## Description Got some feedback from a validator operator that the docs were a bit confusing, attempting to improve them. --------- Co-authored-by: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> commit 82a0a27f95022ffa8c9efa0546410c6ed13d6ed2 Author: shio-coder <165585716+shio-coder@users.noreply.github.com> Date: Tue Jul 30 15:16:44 2024 -0700 Soft bundle basic observability (#18807) ## Description The PR adds several metrics and logging to Soft Bundle. A node operator will have the ability to understand what bundles are submitted, as well as the content. This can be particularly useful when tracing down a transaction. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 4419234c8a77a1e22b10b1de0e4c2ed1d8b435ed Author: Tony Lee Date: Tue Jul 30 23:04:21 2024 +0100 Deepbook SDK (#18610) commit 3053ad99496f832d5a13ff51b33be55f3e359052 Author: Ashok Menon Date: Tue Jul 30 21:18:12 2024 +0100 [main][GraphQL] Fix pruning compatibility issues (#18862) ## Description Two further fixes to get GraphQL working with pruned databases. Cherry-pick of #18860 **into main**. ## Test plan Tested by deploying this version of the service against a pruned DB, and ensuring the following query executed successfully: ```graphql query { chainIdentifier protocolConfig(protocolVersion: 10) { featureFlags { key value } } } ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Adds support for running GraphQL against a pruned version of the service. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit de9d14fa86b1c8c2b450458e5f4aad37fd00b362 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Tue Jul 30 12:54:59 2024 -0700 introduce MeteredEthHttpProvider that meters rpc usage (#18833) ## Description `MeteredEthHttpProvider` counts every eth rpc query and its latency. This will be useful to track the eth rpc usage. ## Test plan unit tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit b3f67dfbd1b367bdb3763add1cdf838af7b92acf Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Tue Jul 30 12:53:52 2024 -0700 [bridge] move add new coin's handling to monitor (#18831) ## Description Today, when receiving an `AddNewToken` event, orchestrator updates the sui type tags map and send to the watch channel that action execution watches. In this PR, we consolidate the handling of this event with the newly added monitor module. In the new approach, orchestrator sends the event to monitor, instead of handling it in place. Also the type tag maps is stored in a `ArcSwap` to allow monitor to change it. Hence we get rid of the watch channel. ## Test plan Added unit tests. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 447707c01284f450a97817bb6d9735bc5ad8ba28 Author: plam-ml <127577476+plam-ml@users.noreply.github.com> Date: Tue Jul 30 12:33:25 2024 -0700 update promo setup (#18858) ## Description - Rounded corners in apps list - Bake in centering and padding for interstitial ![Screenshot 2024-07-30 at 8 37 13 AM](https://github.com/user-attachments/assets/b3cd8f9b-a615-4abc-a49b-db072054b059) ![Screenshot 2024-07-30 at 10 53 57 AM](https://github.com/user-attachments/assets/e7e11c05-f8f7-4455-b491-d63f89b910e6) ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8d68ee64149d2b8adf2b0b4f069b6e80c39021f4 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Tue Jul 30 12:06:06 2024 -0700 allow custom validator num in bridge tests (#18835) ## Description As title, this will make some e2e tests easier to write. It's worth noting that surprisingly this does not help with fullnode sync up speed in the test, which is the main source of slowness in bridge e2e tests. ## Test plan existing tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 6140ef850f4caad3a852d265cdc324c4cac4721a Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Tue Jul 30 11:58:09 2024 -0700 [bridge] do not submit tx to sui if paused (#18828) ## Description in `ActionExecutor`, use `bridge_pause_rx` to decide whether to submit tx to Sui. ## Test plan new unit test --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 129f420e3114e132a1b4acbd47a422ddf0edd8f0 Author: Andrew Schran Date: Tue Jul 30 12:53:18 2024 -0400 revert ci test log output back to default (#18859) commit 0e3903dd6176f441f39ce88c0af7db298a188383 Author: Brandon Williams Date: Tue Jul 30 10:45:25 2024 -0500 simtest: fix config-patch commit 75a729d5ad901b9776d34ca985eab8f6a5a435db Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Tue Jul 30 08:00:01 2024 -0700 Remove wait timeout from LazyMysticetiClient (#18853) ## Description Getting a consensus client only fails when the validator has not finished initializing consensus. This issue can and should be detected inside consensus instead of through client. Also, clear consensus client at the end of epochs. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 33c65ab633d3dee3144bb7d2a3450835d406a348 Author: Emma Zhong Date: Tue Jul 30 05:48:31 2024 -0700 [gql][indexer] index chain identifier into its own table (#18825) ## Description This PR puts chain identifier into its own table so that queries of chain identifier does not depend on the existence of checkpoint 0 in the db, which may be pruned away. ## Test plan Tested against devnet locally and added a gql e2e test. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Added a way to always have `chainIdentifier` query return the correct chain ID even when pruning is enabled. - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Co-authored-by: Ashok Menon commit 5056e4f192f6b57f2ed507a6a292a0d85c66a47b Author: Rijnard van Tonder Date: Tue Jul 30 09:09:47 2024 +0200 docs: update outdated mention on Move.toml (#18837) ## Description Outdated doc mention as of address management: > The publish process replaces the `0x0` address with an actual on-chain address. ## Test plan N/A --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 17454e6449157be9abb208e35e42403b04831bed Author: veth <162897869+VitalikButerinEth@users.noreply.github.com> Date: Tue Jul 30 12:12:45 2024 +0800 chore: fix some comments (#17992) ## Description fix some comments ## Test plan No need. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: Signed-off-by: VitalikButerinEth commit 90809ef227f9848df8597292f761d08e89a29c39 Author: plam-ml <127577476+plam-ml@users.noreply.github.com> Date: Mon Jul 29 16:03:21 2024 -0700 update wallet connect logo (#18821) ## Description https://linear.app/mysten-labs/issue/APPS-65/update-wallet-logo ![Screenshot 2024-07-26 at 1 09 36 PM](https://github.com/user-attachments/assets/47fc5d1d-1b20-4be4-8406-2c875fff70b3) ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 14f02fb4f322a0247f13922ffa00de1c932f49c1 Author: Todd Nowacki Date: Mon Jul 29 15:19:03 2024 -0700 [move] Mark sui::math as deprecated (#18849) ## Description - Adds the `deprecated` annotation to `sui::math` ## Test plan - Ran tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [X] CLI: The Move module `sui::math` has been deprecated. The individual integer modules, e.g. `std::u64`, should be used instead. - [ ] Rust SDK: - [ ] REST API: commit 982024ab1793c98c87330b1dc50d737141494479 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Mon Jul 29 15:05:18 2024 -0700 Fix edge-case in assert which can be triggered from jsonrpc (#18843) See comments in code for explanation. I haven't been able to repro this in a test, but I'm pretty confident about the source of the bug. commit 0512a87dfac161044b2833878e2ad0a7a8103583 Author: Todd Nowacki Date: Mon Jul 29 11:09:05 2024 -0700 [move] fixed find_index (#18842) ## Description - Fixed find_index not being by-ref ## Test plan - new tests that force no `copy` possible --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 9edb72257d9a54cc84a6d3fd2b26cc28fb579338 Author: Joy Wang <108701016+joyqvq@users.noreply.github.com> Date: Mon Jul 29 12:37:28 2024 -0400 fix: handle nondeterminism in test (#18841) ## Description my guess is the modified signature byte is already 0 and does not need modification. fix here is to deterministically flip the byte. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit eb55624e7a5decda7f4946fe86d2b96b3aed6fb1 Author: Todd Nowacki Date: Sun Jul 28 13:56:41 2024 -0700 [move] Copy Sui's Move stdlib into external-crates. Bump the default package version to 2024 beta. (#18827) ## Description - Copied Sui's stdlib over to external-crates - Changed the default edition for package-less files in the compiler to 2024 beta. ## Test plan - Ran tests, updated where necessary --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 25017335f15a4b5bda591b78364faf50d1ffe40a Author: Rijnard van Tonder Date: Fri Jul 26 17:33:14 2024 -0700 move: dump-bytecode-as-base64 uses Move.lock addresses (#18794) ## Description `sui move build` does not ordinarily require a network connection because it doesn't need to know about a chain's ID. The exception is when `--dump-bytecode-as-base64` is specified: In this case, we should resolve the correct addresses for the respective chain (e.g., testnet, mainnet) from the `Move.lock` under automated address management. Two options to fix `sui move build --dump-bytecode-as-base64` to work with automated addresses / by resolving from the `Move.lock`: 1. Require an extra `--chain-id` flag on `sui move build`, which a user must specify along with `--dump-bytecode-as-base64`. E.g., ``` sui move build --dump-bytecode-as-base64 --chain-id "$(sui client chain-identifier)" ``` OR 2. Require a network connection _only when_ `--dump-bytecode-as-base64` is set and and resolve the chain-id without requiring a flag. This PR opts for **(2)**, but it is trivial to change the implementation and test to do **(1)** instead. **(1)** should come with an accompanying doc change though. Context: encountered when running, e.g., ``` execSync(`${SUI} move build --dump-bytecode-as-base64 --path ${packagePath}` ``` ## Test plan Added test --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: Fixed an issue where `--dump-bytecode-as-base64` did not work as expected if [package addresses are automatically managed](https://docs.sui.io/concepts/sui-move-concepts/packages/automated-address-management#adopting-automated-address-management-for-published-packages). - [ ] Rust SDK: - [ ] REST API: commit db844c3aeaf4378eeb003177c0d3973145608051 Author: Joe Hrbek <123987499+suiwombat@users.noreply.github.com> Date: Fri Jul 26 17:28:31 2024 -0500 [docker/tidb-indexer] (#18824) ## Description add dockerfile for tidb indexer ## Test Plan tsia --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 2f48f32149a25923acadab6d5234a5e566165d94 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Fri Jul 26 15:26:27 2024 -0700 [Consensus] bind to localhost if requested (#18823) ## Description This avoids external network on local cluster tests. ## Test plan `cargo nextest run -p sui-swarm` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 344117ced2a10643d4a2cb8eb4f72e9adde1bd36 Author: John Martin Date: Fri Jul 26 15:06:29 2024 -0700 log the file that we fail to fetch (#18822) commit 50a0d38d6af54fbd0d4364d2fec12412ca681e60 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Fri Jul 26 14:36:37 2024 -0700 [bridge] let monitor handle emergecny op (#18791) ## Description as title. Note the actual usage of the watch channel hasn't been wired up in ActionExecutor. It will be in the next PR. ## Test plan unit tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 93caf44bab65241f3417db15135ba5919a45f6a7 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Fri Jul 26 13:53:07 2024 -0700 [bridge] let monitor handle blocklist event (#18792) ## Description as title ## Test plan added unit tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 3dd9ddd81f2ecba0b700fc19e18b80f20663f488 Author: Ashok Menon Date: Fri Jul 26 21:39:53 2024 +0100 [Move/Examples] Switch to datatest (#18813) ## Description Use `datatest-stable` to find all the Move examples we might want to build and test, instead of stashing this away in a rust test. ## Test plan ``` sui$ cargo nextest run -p sui-framework-tests --test move_tests ``` + CI Closes #18802 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit eca7e4551560670a62b331b54b463e145d820532 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Fri Jul 26 12:54:44 2024 -0700 [bridge] add BridgeMonitor and handle url change event (#18790) ## Description This PR adds `BridgeMonitor` which receives all `SuiBridgeEvent` (and probably `EthBridgeEvent` as well soon) and handles them accordingly. In this PR we add the handling for `CommitteeMemberUrlUpdateEvent` by retrieving the latest committee onchain and swap it in. ## Test plan added unit tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 82c3ec2a859bc8bbfceb761783868ae9b9c61619 Author: Brandon Williams Date: Fri Jul 26 14:03:42 2024 -0500 cargo-deny: update deny.toml to clear out warnings commit 83b9cd6097f028262189e4f3adefaf88ce69d815 Author: Brandon Williams Date: Fri Jul 26 13:08:41 2024 -0500 suiop: fix test to use axum v0.7 commit e3de95064e9afa3c603721c667e9bdaf8474a533 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Fri Jul 26 17:19:37 2024 +0000 Version Packages (#18814) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/create-dapp@0.3.13 ### Patch Changes - Updated dependencies [a45f461] - @mysten/sui@1.3.1 - @mysten/dapp-kit@0.14.13 ## @mysten/dapp-kit@0.14.13 ### Patch Changes - Updated dependencies [a45f461] - @mysten/sui@1.3.1 - @mysten/wallet-standard@0.12.13 - @mysten/zksend@0.10.2 ## @mysten/deepbook@0.8.12 ### Patch Changes - Updated dependencies [a45f461] - @mysten/sui@1.3.1 ## @mysten/enoki@0.3.12 ### Patch Changes - Updated dependencies [a45f461] - @mysten/sui@1.3.1 - @mysten/zklogin@0.7.12 ## @mysten/graphql-transport@0.2.12 ### Patch Changes - Updated dependencies [a45f461] - @mysten/sui@1.3.1 ## @mysten/kiosk@0.9.12 ### Patch Changes - Updated dependencies [a45f461] - @mysten/sui@1.3.1 ## @mysten/suins-toolkit@0.5.12 ### Patch Changes - Updated dependencies [a45f461] - @mysten/sui@1.3.1 ## @mysten/sui@1.3.1 ### Patch Changes - a45f461: Shared objects passed to MakeMoveVec, MergeCoins, and SplitCoin are now marked as mutable ## @mysten/wallet-standard@0.12.13 ### Patch Changes - Updated dependencies [a45f461] - @mysten/sui@1.3.1 ## @mysten/zklogin@0.7.12 ### Patch Changes - Updated dependencies [a45f461] - @mysten/sui@1.3.1 ## @mysten/zksend@0.10.2 ### Patch Changes - Updated dependencies [a45f461] - @mysten/sui@1.3.1 - @mysten/wallet-standard@0.12.13 Co-authored-by: github-actions[bot] commit c1a75c997fcbff71a09e4156b27b2d3ee2ef58df Author: shangchenglumetro Date: Sat Jul 27 02:07:25 2024 +0900 chore: fix some comments (#18803) ## Description fix some comments ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a45f461a10c54b1688d1a874e7a16129d110d5b0 Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Fri Jul 26 09:57:51 2024 -0700 [sdk] mark shared inputs passed to makeMoveVec, mergeCoins, and split… (#18812) …Coins as mutable ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5b8b0505cd36da65fad25181a2c311dac74da61b Author: Andrew Schran Date: Fri Jul 26 11:33:28 2024 -0400 Corrected filter for `validator_tx_finalizer_e2e_tests` (#18811) commit 07446bac38a8a3892633069e9a1deb683052d42e Author: Brandon Williams Date: Wed Jul 24 09:54:42 2024 -0500 chore: update hyper et al Update a number of http related libraries, including but not limited to: - hyper - rustls - axum commit e8b092e3fbd6f4ce8fdb82359e60e206c86c2d83 Author: pei-mysten <147538877+pei-mysten@users.noreply.github.com> Date: Thu Jul 25 21:06:17 2024 -0700 Sui Version Bump v1.31 (#18810) ## Description Sui Version Bump v1.31 commit 41df25dd623f76844d64f64670b8ff8b87291f6c Author: pei-mysten <147538877+pei-mysten@users.noreply.github.com> Date: Thu Jul 25 20:09:06 2024 -0700 Sui v1.30.0 snapshot (#18809) ## Description Sui version bump v1.30.0 commit e2aab492da818c5960796919850a6768a0ebcd3c Author: Cam Swords Date: Thu Jul 25 16:49:39 2024 -0700 [move][ide] Change parsing / expansion to report IDE suggestions for missing types (#18744) ## Description This modifies the parser to build `UnresolvedError` for types that don't parse, and then uses those during expansion to provide IDE alias information at those locations. ## Test plan New IDE tests, plus everything else still works as expected. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ec7527c73816979e3423ee8f38ec57090d1579d4 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Thu Jul 25 14:34:21 2024 -0700 [bridge cli] print data in json and print more data (#18742) ## Description 1. now you can use `--network testnet` to avoid having to provide proxy address 2. add more stuff to print functions, including nonces 3. print everything in json, for easier piped processing See offline nodes: ``` sui-bridge-cli view-sui-bridge --sui-rpc-url https://rpc.testnet.sui.io:443 --ping --hex | jq '.result.committee[] | select(.status == "offline")' ``` See eth nonce: ``` sui-bridge-cli view-eth-bridge --network testnet --eth-rpc-url https://ethereum-sepolia-rpc.publicnode.com ``` See sui nonce: ``` sui-bridge-cli view-sui-bridge --sui-rpc-url https://rpc.testnet.sui.io:443 --ping --hex | jq '.result.nonces' ``` ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d1efa1c976c49db00a47cadd08c1b17d97bdb7fd Author: Anastasios Kichidis Date: Thu Jul 25 22:29:22 2024 +0100 [Consensus] refine logging & small timeout changes (#18795) ## Description * Increase timeout when waiting for consensus to start from `30s -> 60s`. Came across cases where commit recovery was taking a bit longer and might make sense to have a bit more tolerance. * Add more logs in consensus components and switch them to `info` - it's ok since those should be printed once per epoch * Make the consensus protocol choice log a `debug` . The method `get_consensus_protocol_in_epoch` is now being used from `get_max_accumulated_txn_cost_per_object_in_commit` which is called on every sequenced transaction leading to a very spammy log output: Screenshot 2024-07-25 at 11 37 09 ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit a4812b9c5c1a477d845ab692d2f0a87829ef76be Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Thu Jul 25 15:57:50 2024 -0500 [data ingestion] decrease batch size for analytics (#18804) commit 69a9e6b17dc38a8f054dfc691cd4341243aae161 Author: Brandon Williams Date: Thu Jul 25 14:39:14 2024 -0500 chore: update anemo and quinn (#18800) commit aa291ae4b71d765beea93d2200ebcaabf35194ad Author: Andrew Schran Date: Thu Jul 25 14:13:46 2024 -0400 Skip validator_tx_finalizer_e2e_tests in simtest-run.sh (#18801) Until timeout issues can be debugged. commit 2c3c77490029c0ce05180be8e6fcc69fc5b03d9d Author: Carry Wang Date: Fri Jul 26 00:32:12 2024 +0800 [Docs] Update utils.mdx (#18782) ## Description According to the function definition, the Docs description is backwards function `fromB64` receive `base64 string` return `Uint8Array` But in the description of the Docs it is the opposite : >`fromB64`: Serializes a Uint8Array to a base64 string image image ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c93dc8381cd572b6217260136e9d39bd4ce5cffa Author: Elias Rad <146735585+nnsW3@users.noreply.github.com> Date: Thu Jul 25 19:27:53 2024 +0300 docs: fix spelling issues (#18799) Fix typos in docs. commit 4e307674cf29944fca10a7b728e0c149e4eaf860 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Thu Jul 25 09:22:37 2024 -0700 [Consensus] increase commit syncer fetch timeouts (#18793) ## Description Validators with limited network need larger timeout for the requests to succeed. In future, we will add support for partial results. ## Test plan n/a --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 255d868bfbe4776e1e99e59dfd207c50291a8485 Author: Brandon Williams Date: Thu Jul 25 10:10:42 2024 -0500 nextest: extend slow timeout to 5min commit 4092dbd56827a1d4419fbee31672a3dfbe9d9c91 Author: Brandon Williams Date: Thu Jul 25 09:10:59 2024 -0500 chore: update reqwest to 0.12 commit 3aa1f2eb98587d3e299b1561a0b2a324822dce82 Author: Brandon Williams Date: Thu Jul 25 08:47:27 2024 -0500 mysten-network: remove unused uds support commit c345bc340666cc4fd0dceed72128df2bae3ec409 Author: Brandon Williams Date: Thu Jul 25 08:45:50 2024 -0500 chore: update msim commit c7de614e83d27ec916b72c62ba2fd0200813bb57 Author: William Smith Date: Wed Jul 24 18:39:41 2024 -0400 [StateAccumulator] Remove v2 disable functionality from node config (#18786) ## Description Before removing v1, we need to ensure that no one has v2 force disabled, as it would otherwise lead to a fork during upgrade due to forcing v2 to be enabled mid-epoch. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit edac0d98153c6a0f6ebcbcd65024f71de698e37d Author: Andrew Schran Date: Wed Jul 24 17:28:20 2024 -0400 Revert "Enable random beacon on mainnet" (#18788) Reverts MystenLabs/sui#18756 commit 37109ef173af4580e5cad5a5ee88ded68ec8fed9 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Wed Jul 24 13:54:32 2024 -0700 [sui-execution] First step towards allowing versioning move-vm-types (#18775) ## Description First step towards moving `move-vm-types` to being execution versioned. ## Test plan Existing CI tests. commit dd56ccb447bf84fec2b5aca530acd550d13c2168 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Wed Jul 24 13:40:18 2024 -0700 [bridge] store bridge authority aggregator in ArcSwap (#18780) ## Description As title. In follow up PRs, we will introduce monitoring component to swap the aggregator when there are changes to committee members (e.g. url change) ## Test plan existing tests. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5849d3f65c04d6c512b159e9839b70876a16ebdd Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Wed Jul 24 13:15:16 2024 -0700 [Consensus] make config optional (#18784) ## Description 1. Make consensus parameters optional in node config, since it has not been populated before. 2. Make consensus_config::Parameters::db_path non optional. It is always set from the node config after integration with Sui. If empty path causes problem, it needs to be detected from Sui or within consensus. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 5935eabf0e499c4d3d3bae4463a4ba1938db3cf4 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Wed Jul 24 12:37:31 2024 -0700 Transaction must be written before executed_effects_digests (#18785) This fixes a very unlikely race where: - notify_read_executed_effects is called - the effects are available and are returned immediately - transaction itself has not yet been written to pending_transaction_writes - reader thread tries to read the tx and asserts that it exists commit 5d2506cb2886c9a11879447db04f068654d3e85d Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Wed Jul 24 12:26:37 2024 -0700 [sui-tool] Add support for config objects to the sui-tool replay command (#18743) ## Description This adds support for (manually) supplying the config objects to use when replaying a transaction. These must be provided for both state dumps and network-based replays. We will refuse to replay a transaction that has a deny-list related error (global pause, or address denial) unless configs are supplied. We will insert the latest version of the deny-list though if you don't supply it and we can see you will need it (due to a deny-list error). In this case we just pick the most recent version of the deny list as the version doesn't matter, and this will always not be pruned. This is only plumbed into the `sui-tool` replay command as I would prefer to not expose this ugly of an interface to the CLI, and instead support this in the future there once we have a better solution for getting these objects (e.g., with graphql). But happy to add it in if we feel it's necessary. ## Test plan Tested it on devnet: You can try it out on any transaction from here: https://explorer.polymedia.app/address/0xb685a6c1516b640646bc534239e5522482cc11af4e15a4cf7054b7314bb2a8d3?network=devnet ``` # global pause replay sui-tool replay --rpc https://rpc.devnet.sui.io:443 tx --tx-digest 9mXi3JSKYg9UeQbpq2YGgqByua6efn4cQUqgVyFFLmy4 --config-objects 0xb7ff3d164ce1490466b56ae1743796016f318e0afd62ab84168fdc2ba5a881fe 10 0x7cd46c8fb69f11bae382be7984cec1063dfa8a2de826fcc92f9e28a2b17cd8e3 13 0x2379389dd2b0fa44c1cbeb93c737f96674e7c352add67ec902eb58512e41334c 12 ## Address denial replay sui-tool replay --rpc https://rpc.devnet.sui.io:443 tx --tx-digest Af84RAY1Pf6GaAG9qXhHvTbcSGt7HM8E3zeGhWV5FUdt --config-objects 0xb7ff3d164ce1490466b56ae1743796016f318e0afd62ab84168fdc2ba5a881fe 10 0x7cd46c8fb69f11bae382be7984cec1063dfa8a2de826fcc92f9e28a2b17cd8e3 13 0x2379389dd2b0fa44c1cbeb93c737f96674e7c352add67ec902eb58512e41334c 12 ``` commit 21aeeae879548b5a6974021d178b91548aada216 Author: Tom Cat <48447545+tx-tomcat@users.noreply.github.com> Date: Thu Jul 25 01:52:45 2024 +0700 [Linter] Refactor: Optimize Linter Configuration and Visitor Generation (#18755) ## Description Function Updates: Updated the known_filters function to use the LinterDiagnosticCategory::Style enum variant. Simplified the linter_visitors function by combining the None and Default cases into a single match arm, reducing redundancy. Naming Consistency: Ensured consistent naming conventions throughout the code. Removed Unused Code: Eliminated any unused code or imports to improve code cleanliness. ## Test plan Existing linter unit tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ed7987c3ec308b4380632fe1552ee95692eb2e1e Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Wed Jul 24 12:05:29 2024 -0600 [docs] Retrieving edits (#18783) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 9d2292ead2e6a3ecbb27efc134a7b40373ce2985 Author: Damir Shamanaev Date: Wed Jul 24 19:48:52 2024 +0200 [framework] vector macros wave 2 (#18702) commit 51b3d50a06b55fb8cacbcfd000fcda8549cdf7d6 Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Wed Jul 24 10:01:06 2024 -0600 [docs] Updates to wallet standard (#18765) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: Alex Tsiliris commit d4fa9125f483dee93cb625d2c4529998e855b750 Author: Andrew Schran Date: Wed Jul 24 11:30:55 2024 -0400 Enable random beacon on mainnet (#18756) ## Description Enables the native randomness (random beacon) feature on sui mainnet. ## Test plan Extensive manual and automated testing. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [x] Protocol: Enables the native randomness (random beacon) feature on sui mainnet. - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 25d2f3087a3797184d141929a4cf3dfcb244604f Author: Zhe Wu Date: Tue Jul 23 23:48:37 2024 -0700 Customize statis initialize move dir in simtest (#18752) ## Description So that other projects can choose their own move package for this static initialization. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e95007c54f534ce375abc0aac7cc02551f496f00 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Tue Jul 23 22:03:26 2024 -0700 Integrate consensus Parameters into NodeConfig (#18767) ## Description This allows setting consensus Parameters via NodeConfig. Also, do some minor cleanups. But the bulk of the cleanup will happen when we stop supporting Narwhal. ## Test plan CI. Private testnet. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ce374dff6cc0d246245a95d853b5edd3eb27c942 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Tue Jul 23 20:05:33 2024 -0700 Ignore RUSTSEC-2024-0358 (#18779) ## Description This specific vulnerability does not seem to impact Sui. Upgrading the affect package involves upgrading dependencies which will take more time. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 8169f0529f477c52a2565404e406a70ce3aefa8e Author: Zihe Huang Date: Tue Jul 23 16:20:15 2024 -0700 [docs] add custom indexer example to doc page (#18711) ## Description Linking custom indexer example to doc page ## Test plan manually --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> commit f72859afcf0fb35085ae4475b03c0b96c4e36e76 Author: kyoshisuki <143475866+kyoshisuki@users.noreply.github.com> Date: Tue Jul 23 15:50:33 2024 -0600 Update SECURITY.md (#14675) Fixed a typo in SECURITY.md commit a345ba497b14882c6e4cb242799742c6d18bf171 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Tue Jul 23 13:26:34 2024 -0700 Enable writeback cache in antithesis (#18776) commit eda1abffb11c924479ebdaa0d58d9362d8ad8d8e Author: Cam Swords Date: Tue Jul 23 13:12:33 2024 -0700 [move][move-2024] Add macro visibility error tests (#18741) ## Description Add tests I wrote but forgot to include on #18735 😭 ## Test plan New tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 46f24c321f3dda0f411dd2a8745fa153c7f32b30 Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Tue Jul 23 11:26:11 2024 -0600 [docs] Content updates and style changes (#18745) ## Description Addresses requests made by aslan. Adds a style guide entry for using code in headings. Previous opinions were 50/50 and he tipped the scale. Original request was to remove the bullets in `swap_exact_quote_for_base` to make it the same as the previous description. But the suggestion is to keep the bullets to break up the content and make the object list easier to read. Also breaks up the last paragraph on Design because it looked weird. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 21d2a340859353732e314adcc597ec4123d03dda Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Tue Jul 23 09:00:35 2024 -0700 Refactor consensus output to write to an intermediate store before going to the db (#18447) This PR should have no behavior changes. It introduces a new struct, `ConsensusCommitOutput`, which stores all writes generated while processing a consensus commit, prior to writing them to the db. This is the first stage of caching/quarantining consensus-specific epoch-db state. The next step will be to hold `ConsensusCommitOutput` structs in memory until the checkpoints created for the commit have been certified. This will also require reading from `ConsensusCommitOutput` (or more likely, a broader caching struct which holds information from a set of `ConsensusCommitOutput` objects), since required information will not always be available from the db. commit 7aea45687c04e00194742f41ec806bd3b2ed93a5 Author: Brandon Williams Date: Wed Jul 17 11:45:27 2024 -0500 rest: define openapi scheam for transaction execution commit 5dc8975e4a829b32b95b8c74ffea630b10188686 Author: Brandon Williams Date: Wed Jul 17 10:30:29 2024 -0500 rest: define openapi schema for committee endpoints commit 80814b4fb9234b6b5b4fa6e1805e849cf8d0e744 Author: Brandon Williams Date: Wed Jul 17 10:26:33 2024 -0500 rest: define openapi schema for system endpoints commit 6845eea9ed086bc8f7a5f7de2f9a2e364ad52ec1 Author: Brandon Williams Date: Wed Jul 17 10:07:34 2024 -0500 rest: define openapi schema for get_coin_info endpoint commit 663da05417d78b033a5adfe5992cfaebf22437a0 Author: Brandon Williams Date: Wed Jul 17 10:06:35 2024 -0500 rest: add 404 response type to object endpoints commit bc456c8819e834acd97e8f62c9b708500d07e865 Author: Brandon Williams Date: Wed Jul 17 09:52:57 2024 -0500 rest: fix TransactionResponse serialization format commit 36fa74f916eeb1602c71c3f3938d3ebd06a7f66c Author: Brandon Williams Date: Wed Jul 17 09:51:16 2024 -0500 chore: update sui-rust-sdk with hash feature enabled commit 808410fae71e35d545e3322e1eb680c57315fba8 Author: Brandon Williams Date: Wed Jul 17 09:34:28 2024 -0500 rest: define openapi schema for checkpoint endpoints commit 730f2c63338484e881ae9372fb2821948059a620 Author: Brandon Williams Date: Wed Jul 17 09:21:08 2024 -0500 rest: define openapi schema for health check endpoint commit 5d47ed35044ff5b8bb76628c73aafc933846ba16 Author: Brandon Williams Date: Tue Jul 16 13:50:21 2024 -0500 rest: correct list_account_objects schema commit 179b98931a5428dee2106307b6806434138496ee Author: Brandon Williams Date: Tue Jul 16 13:49:54 2024 -0500 rest: correct get_node_info schema commit c0672be34952f1b80327a9917a5fe0d1b9c655df Author: Brandon Williams Date: Tue Jul 16 13:48:15 2024 -0500 rest: add a JsonSchema for U64 commit e1418dfe2a5246c1dcc16590982d34ee870400d5 Author: Brandon Williams Date: Tue Jul 16 13:47:44 2024 -0500 rest: properly return preferred Accept format commit 8ca9eae404b956bca9820bedab0ae2558eabb554 Author: Stefanos Pleros <36567567+StefPler@users.noreply.github.com> Date: Tue Jul 23 17:44:07 2024 +0300 [Docs - NFT Rental] Updates broken link (#18769) ## Description Updates a broken link in the [related links](https://docs.sui.io/guides/developer/nft/nft-rental#related-links) section ## Test plan Run locally the doc site and tested that the link has been successfully updated. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 36f6a8652baf57e1ec3172109ccf01c337d01d6d Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Tue Jul 23 10:38:24 2024 -0400 indexer: move partition dropping to pruner (#18762) ## Description split epoch advance and partition dropping, and move partition dropping to pruner so that all pruning tasks are in sync and tracked by pruner watermark table. ## Test plan local run and verify that - partition dropping still works - pruning are indeed in sync and accurately tracked by `pruner_watermark_cp` table --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit c24115c282a53601dfd2778a491841cd649fc079 Author: Todd Nowacki Date: Mon Jul 22 18:58:02 2024 -0700 [move-2024] Fixed issue with macro fun declarations causing circular dependencies (#18747) ## Description - Macro signatures were erroneously being included in dependency checking ## Test plan - Added a new test and macro --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 907e571805570bbaf5dda22884c071cfe1f5edf3 Author: techdebt-99 <150741822+techdebt-99@users.noreply.github.com> Date: Mon Jul 22 18:55:44 2024 -0600 Update zklogin.mdx (#18766) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit ef4639cc7095820e7ff8471bf77fa2a46a81a719 Author: Andrew Schran Date: Mon Jul 22 15:18:23 2024 -0400 add randomness workload to sui-benchmark (#18758) commit ff941c13f7dafe3394191309d6b32c3d498deacf Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Mon Jul 22 10:29:09 2024 -0700 [DB] increase parallelism per db from 4 to 8, and allow env var override (#18748) ## Description If a DB has a large number of column families, and each of them receives a large amount of writes, there will be a large number of memtable flushes and compactions that need to be processed. Only allowing 4 concurrent processing per DB seems not enough, as observed on some column families stopping writes because of too many memtables pending to flush. The default is increased to 8. Allow the value to be overridden by env var, so operators can increase / decrease the value on their own. Remove the `set_max_background_jobs()` call since it effect is overwritten by the subsequent `increase_parallelism()` call. ## Test plan Private testnet. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit cb0417438e9b343faecdbac059a5873d7e313424 Author: Anastasios Kichidis Date: Mon Jul 22 13:33:53 2024 +0100 [Consensus 2.0] Recover from amnesia - part 2 (#18190) ## Description The second part of the recovering from amnesia feature. This PR is implementing the logic to fetch the last own block from the peer network in the synchronizer. It is also adding several tests to confirm the behaviour correctness. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 62019420588d8b7c9fcc9c6a34edc200e57193b8 Author: Anastasios Kichidis Date: Mon Jul 22 12:24:23 2024 +0100 [Consensus] reduce connection shutdown grace period (#18687) ## Description Currently in testnet we seem to frequently hit the `5 second` connection shutdown timeout which seems to stall even more the epoch change. We can lower the grace period (similar to what used in Anemo - 1 second) . ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 7bc276d534c6c758ac2cfefe96431c2b1318ca01 Author: benr-ml <112846738+benr-ml@users.noreply.github.com> Date: Sun Jul 21 07:42:05 2024 +0300 [beacon] Print a fixed error message instead of a panic (#18751) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit d42ec4894125ff947cc122cd1125b3519b467e19 Author: William Smith Date: Fri Jul 19 22:14:53 2024 -0400 [TrafficControl] Support multi-hop forwarding (#18699) ## Description Sometimes there can be more than one proxy between the client and the server. In such cases, the `x-forwarded-for` header is appended with the full path of IPs, ostensibly with the client IP being the first. For this reason, we need to fix the parsing to accommodate a list of IPs. However, we additionally have the problem of knowing which IP in the list is the client IP. We cannot assume the first IP in the list is the client IP, as a malicious client could attempt to spoof by writing a junk value in `x-forwarded-for` in the request, to which the internal LBs would append. There are two possible solutions to this: a) define a custom header and configure our gcs LBS to write to this and overwrite any existing value for that header. b) add configuration such that the node operator defines the number of intermediate load balancers (`num_hops`) it runs. In such a case, for a `num_hops = N`, the client IP should always be the `N`th **to last**. To illustrate (note that for N proxies, only the IP addresses of proxy 1, ..., N-1 will be contained in the header, as proxy N is directly connected to the server and does not write its own IP): ``` [ , , <-- we want this <1>, ..., ] ``` The first solution would require extra configuration on their infra by the node operator, and would be specific to gcs (we'd have to confirm that something similar is supported in other cloud hosting services). So this PR implements option (2). ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 4f5dc220e852820a03d8e3d63ba2dc7a4b68edc3 Author: Adam Welc Date: Fri Jul 19 19:03:49 2024 -0700 [move-compiler] Added parsing resilience for name access chains (#18706) ## Description This adds parsing resilience for name access chains needed for IDE-level auto-completion ## Test plan All old and new tests must pass --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: Additional compiler errors for incomplete name access chains (e.g., `some_pkg::some_module::`) may appear in the compiler output - [ ] Rust SDK: commit c16607953ba0f369d2929ec3a080d0596aa2c879 Author: Xun Li Date: Fri Jul 19 17:30:14 2024 -0700 [tx-finalizer] Cap validator wait time (#18739) ## Description This PR adds a cap to how long a validator can wait before waking up. It makes it slightly easier to reason about the max amount of time these kinds of threads will stay alive. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e23823d9334f209050e65a01f833b1b07db99c4a Author: Xun Li Date: Fri Jul 19 16:44:40 2024 -0700 Only spawn validator tx finalizer for newly signed tx (#18746) ## Description This PR moves the validator tx finalizer into AuthorityState, since there we have the most accurate information on whether a transaction is newly signed. Only spawn a thread if it's newly signed. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e967cb7abe709d6b98e8c92b9ee460e8b6d17e26 Author: Tim Zakian <2895723+tzakian@users.noreply.github.com> Date: Fri Jul 19 16:42:07 2024 -0700 [move] Add printout of input commands in transactional test runner (#18601) ## Description Adds a printout of the command run in the expected files for the Move transactional test runner, and also adds a way for custom subcommands to change this (and this is then used for PTBs in transactional tests). The bottom commit contains the actual changes, and the top commit contains all of the changes to expected value files. ## Test plan Update existing tests with the new output. commit 05cc24936158e0cc10191baeb39008cf4eb8cacd Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Fri Jul 19 15:50:05 2024 -0700 [bridge] add url update event in rust (#18734) ## Description as title. ## Test plan unit test --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 3781c364f972e4b16a301127bc587f873f647861 Author: Nikhil-Mysten <128089541+Nikhil-Mysten@users.noreply.github.com> Date: Fri Jul 19 18:27:58 2024 -0400 [wallet]: add token metadata overrides (#18726) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit e7d196eb3c342af2d0efc6f7dc7d5698bce434fb Author: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Fri Jul 19 16:47:06 2024 -0400 indexer pruner: add pruner and prune unpartitioned tables (#18495) ## Description a separate pruner that use `epochs` table and `cp_tx` table to track progress of pruning and handle disaster recovery; and `checkpoints` table as available range data source. ## Test plan local run and verify via local client - progress tracking via epoch, cp, and tx ![epoch_progress](https://github.com/MystenLabs/sui/assets/106119108/f628c7b5-add7-4198-94e3-f2542dfd7890) ![tx_cp_progress](https://github.com/MystenLabs/sui/assets/106119108/4395ff7d-b79b-4a6d-aec6-243ac5c86c22) - verify on checkpoints & tx_ tables - make sure that the checkpoints table is indeed latest cp_tx cp + 1 ![check_cp](https://github.com/MystenLabs/sui/assets/106119108/13de7129-16b6-44e9-a922-c487100cb152) - make sure that all cp < min_tx in cp_tx have been pruned ![check_tx](https://github.com/MystenLabs/sui/assets/106119108/0708e667-135a-44be-be77-b1eb667ba640) --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 090bec4cd3733ad0e8bb835e260cc731c7c90003 Author: Cam Swords Date: Fri Jul 19 12:40:21 2024 -0700 [move] Update external workflow to check formatting for _all_ packages (#18738) ## Description This updates the external workflow to check all crates. It appears that, without this, `cargo` will not check crates with `publish = false` by default. ## Test plan This updates tests to ensure formatting is applied uniformly in the external crates, plus a few commits explicitly tested behavior. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 77bae85153529082e6c3e73b8dd55f3f5daa8bea Author: Andrew Schran Date: Fri Jul 19 15:28:31 2024 -0400 Ignore randomness RPCs from byzantine peers (#18690) commit d0e250aa5eb8b191fcb8004600607a0c5e3c2cae Author: Xun Li Date: Fri Jul 19 11:11:49 2024 -0700 Increase timeout (#18736) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit cd69f3cd643530d065a521d6dd9de5c7ffd4d4f6 Author: Cam Swords Date: Fri Jul 19 11:06:11 2024 -0700 [move][move-2024] Revise some visibility errors, add macro information (#18735) ## Description This adds information about macro expansion to visibility errors, when appropriate. It also slightly reforms how visibility errors for data structures are reported to unify reporting them and function visibility errors. ## Test plan Updated tests, plus some new ones. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 031f6b8a02bd776b0f9f414a4f5526ee71190fe0 Author: Eugene Boguslavsky Date: Fri Jul 19 11:00:08 2024 -0700 Update suiop-cli-binaries workflow (#18732) ## Description Update dispatch event-type to be more specific. See https://github.com/MystenLabs/sui-operations/pull/3967 ## Test plan 👀 commit 3e0a4ec98cf04239c10fdb91991f958e1412c8a4 Author: benr-ml <112846738+benr-ml@users.noreply.github.com> Date: Fri Jul 19 20:13:26 2024 +0300 [beacon] Remove dkg v0 (#18560) ## Description Remove the support of dkg v0 which is no longer used in any network ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 7187fc18171ed0f22848f36b9ae6d0f290cf2250 Author: Cam Swords Date: Thu Jul 18 22:48:56 2024 -0700 [move][ide] Revise IDE path information to always include all possible names. (#18733) ## Description This revises IDE path information to include _all_ available information, not just information based on the position of the name. ## Test plan A new test, plus updated other tests. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 92d112757e99088135fa87851785214273431bfb Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Thu Jul 18 20:17:16 2024 -0700 [Metric Checker] Take specific percentiles for range queries (#18728) ## Description This allows the following: 1. Check p95 or even p99 in the time range for p50 latency and TPS. 2. Check p50 in the time range for p95 latency. Config will be modified separately. ## Test plan Nightly run --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 2c1733108b5f5e9ffd85769ed39d3571ea324b38 Author: Cam Swords Date: Thu Jul 18 19:39:26 2024 -0700 [move][move-2024] Revised index syntax typing to avoid crash (#18696) ## Description This fixes an issue reported around crashing when there are errors in index calls that then chain to method calls. ## Test plan Repros added to the compiler suite --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 1e65d62f745102f353c22183efe8ecceb8c62efd Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Thu Jul 18 19:36:03 2024 -0700 Monitor thread stalls in sui node (#18700) ## Description Assuming when tokio scheduling stalls temporarily, tasks are not woken up after sleeps. A dedicated task can be used to monitor this issue. This mechanism will stop reporting when the system is completely frozen, which has been observed once before. This issue will require a separate std::thread to monitor, which can be added if needed in future. ## Test plan TODO --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 4dffddbfb51b0aaa70dd35d06fdaec6e38557b29 Author: Zhe Wu Date: Thu Jul 18 18:02:40 2024 -0700 Create a separate congestion control consensus commit limit for Mysticeti (#18648) ## Description Since mysticeti has different commit rate than Narwhal, we want to use different transaction count limit for shared object congestion control in consensus commit. Therefore, I created a different config for mysticeti, and the consensus handler will choose to use different limit based on the current consensus algorithm. We also turn on shared object congestion control on testnet. ## Test plan simtest updated cluster testing --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: --------- Co-authored-by: Eugene Boguslavsky commit b6775051d8bd8218a60ba74ce533350703e203c1 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Thu Jul 18 15:49:11 2024 -0700 Tweak antithesis configs (#18725) ## Description - Reduce # of commands in each batch payment txn and its TPS to 1. - Reduce TPS of shared and owned obj txns to 1 as well, since sui surfer is already running. - Update logging config for consensus. ## Test plan https://github.com/MystenLabs/sui-operations/actions/runs/9997864674 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 72bc96f5265288a3b4a7ae5425b260ab5041bcbb Author: Zihe Huang Date: Thu Jul 18 15:22:32 2024 -0700 [docs] update sui move cli options (#18724) ## Description Update the Sui Move CLI docs to have the latest CLI options ## Test plan manual --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: --------- Co-authored-by: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> commit 5e3f38cfce10138ff3d1e3c72d392890b5bbda2d Author: Xun Li Date: Thu Jul 18 11:55:05 2024 -0700 [tx-finalizer] Add validator leader delay (#18672) ## Description Add incremental delays for validators so that they wake up at different times for efficiency. ## Test plan Added a test --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 61b00f5bac218d4ffb6c618b0371b3028a2d0106 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Thu Jul 18 11:47:24 2024 -0700 [bridge] add metrics for SignerWithCache (#18685) ## Description as title. `SignerWithCache` today already guarantees single processing of one request (if they are received in a short period of time and not evicted in cache). In this PR we add hit and miss metrics for us to understand how well it works. https://github.com/MystenLabs/sui/blob/main/crates/sui-bridge/src/server/handler.rs#L152 ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit cc64c377a6653287dbf9d20bca253d13bae3f65d Author: Xun Li Date: Thu Jul 18 11:13:24 2024 -0700 Integrate validator tx finalizer (#18681) ## Description This PR integrates the validator tx finalizer with the validator service, which is then invoked after handling a transaction. A node config is used to control whether an instance of the service is created when the service starts. It is enabled on devnet and testnet. We will enable for mainnet later when we feel confident. ## Test plan Added e2e simtests. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 10beaec2e58d8071e623f694b9dbf46e0a0bcf2b Author: Calvin Li Date: Fri Jul 19 01:16:31 2024 +0800 Update sui-bridging.mdx for the correct anchor link (#17633) ## Description In line 7: Sui supports bridging through .... and [Wormhole Portal Bridge](#wormhole-portal-bridge). Updating line 27 to make the anchor link match with line 7, or else the section jump from lin 7 would fail. ## Test plan N/A --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: --------- Co-authored-by: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> commit 6a5e72e60cc33954010f7f402ddc35c129e1546a Author: benr-ml <112846738+benr-ml@users.noreply.github.com> Date: Thu Jul 18 19:59:55 2024 +0300 Update fastcrypto (#18722) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 9cdfc6d64af5a6788f51abd994848a0a41af1bef Author: Ashok Menon Date: Thu Jul 18 17:17:02 2024 +0100 [Resolver/Inputs] Support layout inference for overlapping inputs (#18720) ## Description From the perspective of input layout calculation, it's fine for a pure input to be referred to multiple times in a PTB, as long as those usages are consistent (all refer to those pure bytes as being the same type). ## Test plan New unit test: ``` sui-package-resolver$ cargo nextest run ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [x] JSON-RPC: Bugfix for displaying PTBs where a pure input has been used multiple times. - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 66b19f7d84794010b673ea662a2ac43cdf7d1e0e Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Wed Jul 17 21:27:30 2024 -0700 Remove deleted protocol fields from older snapshots to avoid spurious failures (#18715) - Remove bad changes from cargo insta - Remove deleted protocol fields from older snapshots to avoid spurious failures commit 9417279fa92a43435b4de1e60974c1e1ab388b33 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Wed Jul 17 17:01:00 2024 -0700 Compute median instead of average for range queries in metrics checker (#18708) ## Description This should tolerate temporary spikes better. If we want to check to ensure there is no spikes, we can create a new query type in future. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] REST API: commit 30e3c5fba13f9c0e238ab58acb2ad704dc3665db Author: Ashok Menon Date: Wed Jul 17 23:41:55 2024 +0100 [Disassembler] Fix string contraction logic ## Description Always pick whole UTF8 characters. ## Test plan Tested against a package containing a UTF8 constant. commit 3959d9af51172824b0e4f20802c71e416596c7df Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Wed Jul 17 13:59:36 2024 -0700 Include digest of ProtocolConfig when voting for a version (#18675) Add AuthorityCapabilitiesV2, which includes the digest of the protocol config for each version. This makes it impossible for the committee to approve a protocol upgrade if (due to a release mistake) there is disagreement about what is in the config. commit 6754bf86ee6b8a25dd4b3b08ac140f69fe95830a Author: Andrew Schran Date: Wed Jul 17 16:58:08 2024 -0400 disable RSU timeout panic for non-debug builds (#18705) commit 77929e73c34d6256c3cd26b79f0afdd2eb92d857 Author: Eugene Boguslavsky Date: Wed Jul 17 13:38:15 2024 -0700 Add REST API section to release notes (#18707) ## Description Add REST API section to release notes ## Test plan 👀 commit 0ff46cd6fd532fa4e3383e09e52e654334f91166 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Wed Jul 17 11:31:05 2024 -0700 Record batch writes and inserts that are slower than 1s (#18698) We have seen situations where rocksdb throttling briefly causes all threads to block while attempting to write to a throttled table. However, it is not clear that very rare events such as these will show up in histograms. By tracking them in counters we can see definitively how often and when these events occur. commit 6e79fda57ac18a4c42d4324afaf54d2e2842356b Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Wed Jul 17 11:29:41 2024 -0700 Improve tracing for TransactionOrchestrator, QD, and AuthAgg (#18689) tracing for an execute transaction attempt now looks like: image commit ddf42d3d61e3f5ef1dfcb1f6c4d13c8708c8eb23 Author: sui-merge-bot[bot] <114704316+sui-merge-bot[bot]@users.noreply.github.com> Date: Wed Jul 17 18:23:08 2024 +0000 Version Packages (#18555) This PR was opened by the [Changesets release](https://github.com/changesets/action) GitHub action. When you're ready to do a release, you can merge this and publish to npm yourself or [setup this action to publish automatically](https://github.com/changesets/action#with-publishing). If you're not ready to do a release yet, that's fine, whenever you add more changesets to main, this PR will be updated. # Releases ## @mysten/sui@1.3.0 ### Minor Changes - 086b2bc: Add waitForLastTransaction methods to all executor classes - cdedf69: Add Argument helpers for constructing transaction arguments without a Transaction instance - beed646: Add tx.pure.vector and tx.pure.option methods ### Patch Changes - 7fc464a: Remove unique symbols from types to improve compatability between version - 0fb0628: Mark subscription methods as deprecated. - 0f27a97: Update dependencies - Updated dependencies [7fc464a] - Updated dependencies [0f27a97] - @mysten/bcs@1.0.3 ## @mysten/bcs@1.0.3 ### Patch Changes - 7fc464a: Remove unique symbols from types to improve compatability between version - 0f27a97: Update dependencies ## @mysten/create-dapp@0.3.12 ### Patch Changes - 0f27a97: Update dependencies - Updated dependencies [7fc464a] - Updated dependencies [086b2bc] - Updated dependencies [0fb0628] - Updated dependencies [cdedf69] - Updated dependencies [0f27a97] - Updated dependencies [beed646] - @mysten/sui@1.3.0 - @mysten/dapp-kit@0.14.12 ## @mysten/dapp-kit@0.14.12 ### Patch Changes - 0f27a97: Update dependencies - Updated dependencies [7fc464a] - Updated dependencies [086b2bc] - Updated dependencies [0fb0628] - Updated dependencies [cdedf69] - Updated dependencies [0f27a97] - Updated dependencies [beed646] - @mysten/sui@1.3.0 - @mysten/wallet-standard@0.12.12 - @mysten/zksend@0.10.1 ## @mysten/deepbook@0.8.11 ### Patch Changes - 0f27a97: Update dependencies - Updated dependencies [7fc464a] - Updated dependencies [086b2bc] - Updated dependencies [0fb0628] - Updated dependencies [cdedf69] - Updated dependencies [0f27a97] - Updated dependencies [beed646] - @mysten/sui@1.3.0 ## @mysten/enoki@0.3.11 ### Patch Changes - 0f27a97: Update dependencies - Updated dependencies [7fc464a] - Updated dependencies [086b2bc] - Updated dependencies [0fb0628] - Updated dependencies [cdedf69] - Updated dependencies [0f27a97] - Updated dependencies [beed646] - @mysten/sui@1.3.0 - @mysten/zklogin@0.7.11 ## @mysten/graphql-transport@0.2.11 ### Patch Changes - 0f27a97: Update dependencies - Updated dependencies [7fc464a] - Updated dependencies [086b2bc] - Updated dependencies [0fb0628] - Updated dependencies [cdedf69] - Updated dependencies [0f27a97] - Updated dependencies [beed646] - @mysten/sui@1.3.0 - @mysten/bcs@1.0.3 ## @mysten/kiosk@0.9.11 ### Patch Changes - 0f27a97: Update dependencies - Updated dependencies [7fc464a] - Updated dependencies [086b2bc] - Updated dependencies [0fb0628] - Updated dependencies [cdedf69] - Updated dependencies [0f27a97] - Updated dependencies [beed646] - @mysten/sui@1.3.0 ## @mysten/ledgerjs-hw-app-sui@0.4.1 ### Patch Changes - 0f27a97: Update dependencies ## @mysten/suins-toolkit@0.5.11 ### Patch Changes - 0f27a97: Update dependencies - Updated dependencies [7fc464a] - Updated dependencies [086b2bc] - Updated dependencies [0fb0628] - Updated dependencies [cdedf69] - Updated dependencies [0f27a97] - Updated dependencies [beed646] - @mysten/sui@1.3.0 ## @mysten/wallet-standard@0.12.12 ### Patch Changes - 0f27a97: Update dependencies - Updated dependencies [7fc464a] - Updated dependencies [086b2bc] - Updated dependencies [0fb0628] - Updated dependencies [cdedf69] - Updated dependencies [0f27a97] - Updated dependencies [beed646] - @mysten/sui@1.3.0 ## @mysten/zklogin@0.7.11 ### Patch Changes - 0f27a97: Update dependencies - Updated dependencies [7fc464a] - Updated dependencies [086b2bc] - Updated dependencies [0fb0628] - Updated dependencies [cdedf69] - Updated dependencies [0f27a97] - Updated dependencies [beed646] - @mysten/sui@1.3.0 - @mysten/bcs@1.0.3 ## @mysten/zksend@0.10.1 ### Patch Changes - 0f27a97: Update dependencies - Updated dependencies [7fc464a] - Updated dependencies [086b2bc] - Updated dependencies [0fb0628] - Updated dependencies [cdedf69] - Updated dependencies [0f27a97] - Updated dependencies [beed646] - @mysten/sui@1.3.0 - @mysten/wallet-standard@0.12.12 Co-authored-by: github-actions[bot] commit beed646993dc128ab5eb45eba7b3ade08e323a5f Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Wed Jul 17 11:07:27 2024 -0700 add tx.pure.vector and tx.pure.option (#18561) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 14ea5c4b6b5aec1955b4956b326ba5da177d3aa5 Author: Zhe Wu Date: Wed Jul 17 11:03:24 2024 -0700 Enable consensus commit prologue V3 on mainnet (#18691) ## Description To be out in 1.30 ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 086b2bc40bca2f59f3a09cd08654cd1693ba0910 Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Wed Jul 17 10:03:17 2024 -0700 Add waitForLastTransaction to all executor classes (#18697) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit cdedf69b76d173c8534c575b89fabcabb279ec3f Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Wed Jul 17 10:03:00 2024 -0700 [ts sdk] Add Arguments export for constructing Transaction arguments … (#18540) …without a Transaction instance ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit ccbb3c2dc38aa8cfa59c3c63f8aa33e44418ef28 Author: Zoe Braiterman Date: Wed Jul 17 12:56:49 2024 -0400 Grammar fix on the Wallet Standard page (#18703) ## Description Fix grammar on a doc page. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ * ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit c718a3687587b992642d9219d4b9f88cc276761f Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Wed Jul 17 09:44:21 2024 -0700 remove sui-common which was accidentally added (#18686) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit d019f53f01d9f7d04cae02bc8574ec12c2033d2c Author: Patrick Kuo Date: Wed Jul 17 16:20:04 2024 +0100 [bridge indexer] - record bridge txn errors (#18510) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit afc4197ed0fbf3538345bf5a0183e3266205942c Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Tue Jul 16 18:08:17 2024 -0700 [DB] disable write stalling and stopping due to pending compaction bytes for index (#18093) ## Description Index db cfs can become large and require compactions. Write throttling based on compaction bytes can happen after initial ingestion of data, and is hard to investigate. Disable write throttling for index cfs. ## Test plan Deploy to mainnet fullnode. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 2a55a068d89851214016d28f64efd33b24fb1401 Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Tue Jul 16 17:13:54 2024 -0700 Add tic-tac-toe example to workspace (#18616) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 5ec703d6bf9a8731e3f0ebb3d984f4520e1e3ed9 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Tue Jul 16 16:13:28 2024 -0700 change protocol version 54 to 53 (#18695) ## Description the next version should be 53 instead of 54. Also update MAX_VERSION ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 0252a76d7d574318b19146da66fa34c42828a0de Author: Zhe Wu Date: Tue Jul 16 15:20:52 2024 -0700 Add a metric to track per object cost in consensus commit for congestion control (#18676) ## Description Also some chore to add txn execution status in workloads. ## Test plan Unit test --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 39a4eb01b675c11040551cdc8f79345a8426e2bc Author: Cam Swords Date: Tue Jul 16 14:38:01 2024 -0700 [move][move-ide] Colon/Alias Autocompletion (#18108) ## Description This records autocomplete information for alias resolution positions. ## Test plan Updated test expectations --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 4ba06ed7463743fa46f6b5879b7cd1f4b76da2dc Author: Ashok Menon Date: Tue Jul 16 17:02:40 2024 +0100 [GraphQL/Limits] Reimplement QueryLimitsChecker (#18666) ## Description Rewriting query limits checker to land a number of improvements and fixes: - Avoid issues with overflows by counting down from a predefined budget, rather than counting up to the limit and protecting multiplications using `checked_mul`. - Improve detection of paginated fields: - Previously we treated all connections-related fields as appearing as many times as the page size (including the field that introduced the connection, and the `pageInfo` field). This was over-approximated the output size by a large margin. The new approach counts exactly the number of nodes in the output: The connection's root field, and any non-`edges` or `nodes` field will not get multiplied by the page size. - The checker now also detects connections-related fields even if they are obscured by fragment or inline fragment spreads. - Tighter `__schema` query detection: Previously we would skip requests that started with a `__schema` introspection query. Now it's required to be the only operation in the request (not just the first). - Fix metrics collection after limits are hit: Previously, if a limit was hit, we would not observe validation-related metrics in prometheus. Now we will always record such metrics, and if a limit has been hit, it will register as being "at" the limit. ## Test plan ``` sui-graphql-e2e-tests$ cargo nextest run --features pg_integration -- limits/ ``` ## Stack - #18660 - #18661 - #18662 - #18663 - #18664 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Output node estimation has been made more accurate -- the estimate should now track the theoretical max number of nodes on the JSON `data` output. - [ ] CLI: - [ ] Rust SDK: commit e397c2f23a0498446032b0fa5cd6f7572f928bc5 Author: Ashok Menon Date: Tue Jul 16 15:00:02 2024 +0100 [GraphQL/Limits] Separate out directive checks (#18664) ## Description Trying to do the directive checks at the same time as the query checks complicates both implementations. Split out the directive check into its own extension. Also fix the directive checks not looking at directives on variable definitions. ## Test plan ``` sui-graphql-e2e-tests$ cargo nextest run \ --features pg_integration \ -- limits/directives.move ``` ## Stack - #18660 - #18661 - #18662 - #18663 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: The service now detects unsupported directives on query variable definitions. - [ ] CLI: - [ ] Rust SDK: commit 6756fa78b101c6a1501093c255f2b5d535949fb1 Author: Ashok Menon Date: Tue Jul 16 13:33:29 2024 +0100 [chore][GraphQL/Limits] Standardise query_limits_checker file order (#18663) ## Description This is a file order change only, with no other meaningful changes. It standardises the order to: - Constants - Types - Impls - Trait Impls - Free functions ## Test plan CI ## Stack - #18660 - #18661 - #18662 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 0ab05e42b7a40a11eb653c7741d04ce371224016 Author: Ashok Menon Date: Tue Jul 16 13:13:09 2024 +0100 [chore][GraphQL/Limits] Clean up headers (#18662) ## Description Two header related clean-ups: - The version header should no longer be part of the CORS configuration, because we don't expect versions to be configured by (request) header. - The `ShowUsage` type doesn't need to implement `Header` because we only care about comparing its name. ## Test plan CI ## Stack - #18660 - #18661 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: `x-sui-rpc-version` is no longer an accepted request header, as versions are now selected by modifying the path. - [ ] CLI: - [ ] Rust SDK: commit 77dc88eb2824fd5700c2548da2c8e98d7e95efff Author: Ashok Menon Date: Tue Jul 16 13:13:01 2024 +0100 [GraphQL/Limits] BUGFIX: directives test error type (#18661) ## Description Passing an unsupported directive should be a user input error, not an internal error. ## Test plan ``` sui-graphql-e2e-tests$ cargo nextest run --features pg_integration -- limits/directives ``` ## Stack - #18660 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Passing an unsupported directive to the service will be treated as a `BAD_USER_INPUT` rather than an `INTERNAL_SERVER_ERROR`. - [ ] CLI: - [ ] Rust SDK: commit 3731f2887c9619751677255ec1325a5467b341b7 Author: Ashok Menon Date: Sat Jul 13 14:52:49 2024 +0100 [chore][GraphQL/Limits] Separate QueryLimitChecker extension/factory (#18660) ## Description Split up the extension factory from the extension itself, similar to what we did for `Timeout` before. This avoids the confusion of the single type being created with defaulted fields to act as the factory and then creating new versions of itself to act as the extension. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit cb2f63e6f31cbc37a68447bc991f6198d6f2ed07 Author: Xun Li Date: Tue Jul 16 09:14:04 2024 -0700 Fix flaky validator_tx_finalizer tests (#18688) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit c11b6173cc738c106d9d11e70f80fc07d119aaad Author: Daniel Leavitt <71237296+dantheman8300@users.noreply.github.com> Date: Tue Jul 16 08:14:41 2024 -0700 [docs] Update coin flip app example (#18623) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: --------- Co-authored-by: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> commit bd45b28d68f4f7ce5780e3b74a9f3906e031d719 Author: Anastasios Kichidis Date: Tue Jul 16 13:48:00 2024 +0100 [Consensus] disable scheduled synchronizer when commit lag (#18485) ## Description The PR disables the scheduled synchronizer when we detect local commit to lag compared to the quorum index, same as we do during block processing. It has to be noted that we do not disable the live synchronizer as this will normally be taken care of from the `authority_service` block processing path. With this change we'll avoid issues that have been observed during crash recovery (or even lagging nodes) where some blocks are received (until the commit voter finally gathers a quorum and cut off the block processing path) and trigger the block synchronization attempting to complete the causal history for the received blocks leading to a big queue of suspended & missing blocks. ## Test plan CI/PT Testing on PT environment, on the first screenshot we can see the number of pending suspended blocks in block_manager when the synchronization is not disabled during crash recovery - which is constantly increasing. On the second screenshot we see that number does not increase while node recovers and remains constant: Screenshot 2024-07-04 at 17 56 39 Screenshot 2024-07-04 at 18 05 31 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit adaffe37f3fff7ed82cfbf5dc7cdf0b9a0833b2c Author: Alexandros Tzimas Date: Tue Jul 16 09:30:10 2024 +0300 Deprecate Sui_Owned_Object_Pools (#18657) commit 7fc464ae14e66db5da110da53e8f308ba17a937f Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Mon Jul 15 21:17:07 2024 -0700 [ts sdk] Make symbol types not unique between versions (#18639) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit fedd75a7c3aa4ced553283e4dd6d9f5d7cf1cfa2 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Mon Jul 15 21:02:25 2024 -0700 Make EpochFlag values explicit to avoid mistakes when cherry picking (#18683) This is intended to prevent mistakes such as the one being fixed by #18682 commit bf94b2fb37ee212f2a7c1185df5464082e9944c8 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Mon Jul 15 17:01:26 2024 -0700 Re-order flags to match order in v1.28 (#18682) commit 33baaa761f30cd96c26f24f5851f2227e866e3c1 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Mon Jul 15 15:58:20 2024 -0700 Add more addresses checks for validator tool (#18570) ## Description as title and self-descriptive. ## Test plan tested locally. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 0984a6bcde7b675f65b03659d21b3365566fcdd8 Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Mon Jul 15 15:43:21 2024 -0700 Add metrics for batch verify and overload (#18680) ## Description Add a metric for time consumed during transaction verifications in Mysticeti. Add a metric for the specific overloaded component when transactions are rejected for signing / submission. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 98b5d236f1226f8057f279050aee00f0e9f2098b Author: phoenix <51927076+phoenix-o@users.noreply.github.com> Date: Mon Jul 15 17:01:35 2024 -0500 [data ingestion] hybrid remote setup (#18677) adding support for hybrid setup, where indexer fetches checkpoint from a remote FN and falls back to a remote bucket if the FN's already pruned commit 6514dce4083bfc6de633902b5a80b0717d3f7209 Author: hayes-mysten <135670682+hayes-mysten@users.noreply.github.com> Date: Mon Jul 15 14:29:21 2024 -0700 avoid rebuilding sui cli during tests (#18678) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 77b6e6ec861786cb7857661c198614003441cb00 Author: Adam Welc Date: Mon Jul 15 14:11:47 2024 -0700 [move-ide] Added inlay type hints for unpacks (#18650) ## Description With this PR inlay type hints work for variant unpacks (they already worked for struct unpacks): ![image](https://github.com/user-attachments/assets/5bef6232-62c3-407a-8bcb-89504efb4a40) This includes nested patterns: ![image](https://github.com/user-attachments/assets/581af7ae-9471-426c-9db5-5376674f4539) This PR also changes the way that types are displayed for type hints - the fully qualified name seemed too long (particularly for functions with multiple parameters on the same line) and not really necessary (or redundant even) since you can hover over the hint to see the full type. ## Test plan All new and existing tests must pass commit 96a222b2261ccec16882cdc6fe0af689e8b7a113 Author: Andrew Schran Date: Mon Jul 15 16:14:23 2024 -0400 Cancel randomness-using tx on DKG failure instead of ignoring (#18511) commit 15acca373f829709a4e81206ae62be14688af534 Author: jk jensen Date: Mon Jul 15 11:54:58 2024 -0700 [suiop][incidents] add --with-priority flag (#18673) ## Description enable limiting incident results to those with a priority set via the `--with-priority`/`-p` flag. ## Test plan ``` » cargo run --bin suiop -- i r -p Finished dev [unoptimized + debuginfo] target(s) in 0.43s Running `/Users/jordankylejensen/mysten/sui/target/debug/suiop i r -p` 2433: (2d) P0 dummy incident 3135: (2d) P2 [FIRING:2] incident for something 3134: (2d) P2 [FIRING:2] redacted ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 14a139210664be00d96dee2580da514a57805727 Author: Andrew Schran Date: Mon Jul 15 14:16:52 2024 -0400 Add randomness-related admin handlers for disaster recovery use (#18500) commit 8aa6ba9cc4433eecc99acf6f335c989cdc78fee6 Author: Xun Li Date: Mon Jul 15 10:55:47 2024 -0700 Store AuthorityAggregator in sui-node (#18647) ## Description This PR moves the construction of AuthorityAggregator to sui-node and stores a reference there. This will make sure that all the metrics are ever only instantiated once (to avoid double register problem). It will also allow us to use AuthorityAggregator later in ValidatorTxFinalizer. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 7c143d566312377f0127b2712fb439f4805d066f Author: mwtian <81660174+mwtian@users.noreply.github.com> Date: Mon Jul 15 10:46:59 2024 -0700 Cleanup a few Narwhal references (#18671) ## Description Cleanup a few references to Narwhal. Remove consensus protocol choice from `ConsensusConfig`. This field is unused now. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 6954c2048395ac5ab31ca00f478bd9d304944cc0 Author: pei-mysten <147538877+pei-mysten@users.noreply.github.com> Date: Mon Jul 15 10:03:51 2024 -0700 [script] no need for GITHUB_TOKEN because sui is a public repo (#18544) ## Description Remove the Auth header because `sui` is a public repo..no need for GTIHUB_TOKEN ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit ad5d7b9d8ef143c171f6e344bf4330892217e36f Author: Zhe Wu Date: Mon Jul 15 08:27:40 2024 -0700 Update graphql e2e test to protocol version 51 (#18653) ## Description I wanted to update it to version 49, since there is a change related to consensus commit prologue V3. But since testnet now uses 51, I choose to use it here as well. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit a5a4ab37710120a31665448e2da6c1344ead1ea1 Author: Xun Li Date: Mon Jul 15 07:47:59 2024 -0700 [AuthAgg] Use epoch start state to construct AuthorityAggregator (#18649) ## Description This PR makes two changes: 1. The primary change is to be able to construct AuthorityAggregator from epoch start state, instead of reading system state from the store. This is much safer and sync way to do it in prod. 2. The secondary change is to extend AuthorityAggregatorBuilder to be able to build more aggregators, to simplify some of the code. ## Test plan CI --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 5c3ab8c170479145487ab1fd95caa3aa94b253bd Author: Joy Wang <108701016+joyqvq@users.noreply.github.com> Date: Mon Jul 15 10:17:00 2024 -0400 chore: add a new cognito tenant (#18635) ## Description This enables a new aws tenant for devnet for zklogin ## Test plan ``` # this runs a localnet from epoch 0 cargo build --bin sui RUST_LOG=info target/debug/sui start --force-regenesis --with-faucet # in different tab, this composes an auth url. this creates a deterministic way of getting JWT token with a deterministic nonce and ephemeral key with max epoch 10. Terminate this command. target/debug/sui keytool zk-login-sign-and-execute-tx --max-epoch 10 --network localnet --fixed Visit URL (AWS - Ambrus): https://ambrus.auth.us-east-1.amazoncognito.com/login?response_type=token&client_id=t1eouauaitlirg57nove8kvj8&redirect_uri=https://api.ambrus.studio/callback&nonce=hTPpgF7XAKbW37rEUS6pEVZqmoI # once you obtain the JWT token from redirect URL after id_token=xxxx (do not include the access_token), paste it to the following command (no need to change other params, this assumes you are using the fixed ephemeral key, the max epoch fixed at 10) target/debug/sui keytool zk-login-enter-token --parsed-token eyJraWQiOiJWSEFcL3ZZMWQyaDdYMzNEcFo2WkJEUmZuQ1NcL09JZ2lWN3RvQ2R4eUVVRFk9IiwiYWxnIjoiUlMyNTYifQ.eyJhdF9oYXNoIjoiZktkZ21iXzNOMW1KTGlWNFByRG9IUSIsInN1YiI6Ijc0YjhmNGI4LTYwNTEtNzAwNC0wOGUxLTNkNTQxOTE1MzExOCIsImlzcyI6Imh0dHBzOlwvXC9jb2duaXRvLWlkcC51cy1lYXN0LTEuYW1hem9uYXdzLmNvbVwvdXMtZWFzdC0xX3FQc1p4WXFkOCIsImNvZ25pdG86dXNlcm5hbWUiOiJnaGZqc2tkIiwibm9uY2UiOiJoVFBwZ0Y3WEFLYlczN3JFVVM2cEVWWnFtb0kiLCJhdWQiOiJ0MWVvdWF1YWl0bGlyZzU3bm92ZThrdmo4IiwiZXZlbnRfaWQiOiI4NmFkNGZhOC1kN2U0LTQ1ZGUtOTI5My1mMWE0YjAzYzkxNTciLCJ0b2tlbl91c2UiOiJpZCIsImF1dGhfdGltZSI6MTcyMDc5MTMzNiwiZXhwIjoxNzIwNzk0OTM2LCJpYXQiOjE3MjA3OTEzMzYsImp0aSI6ImEzM2U1OWM3LTA5ODAtNDk5ZC04YzQzLWEzZDY4NzM0YzI4MCIsImVtYWlsIjoibHVjaWVuQGFtYnJ1cy5zdHVkaW8ifQ.WmcTipaovAmGh9_095RbMZmiQom-rAeboxfWvQz9y5ym-wwMSCL63uwihrLtE1JVzOS_8Qk1dkTm_AoRSd4zIGxSCOUA3bHC3ekqOS5_McIfHfp6V0dLK67KmofKB7HzPFFY8tRh20jpVwcxVBpeuTojs3KFUJUIBFwwxe-pMz8--r62yzplv067sHL9UtoJ86KInQtJCceyk-EepuHisx7dFdspcyue2GpSvTCLYnkyjIJE5T6RUhldfAQOK0d6WNiBMq8MQgbsz8dOhpSWmZk-wa7uftMVZ0IG22MHqaqdaZxCwmmKbMTb3ACMG0dooVkAiclz8hxmP5IUHD4RcQ --max-epoch 10 --jwt-randomness 100681567828351849884072155819400689117 --kp-bigint 84029355920633174015103288781128426107680789454168570548782290541079926444544 --ephemeral-key-identifier 0xcc2196ee1fa156836daf9bb021d88d648a0023fa387e695d3701667a634a331f --network localnet ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit bbbd374604f0981714d56bade7507143befd6d5a Author: UnMaykr <98741738+unmaykr-aftermath@users.noreply.github.com> Date: Mon Jul 15 18:40:58 2024 +0700 feat: add optional `root_version` to `Query.owner` (#18486) ## Description This adds an optional `root_version` argument to `Query.owner` as discussed in PR #17934. In summary: ``` `root_version` represents the version of the root object in some nested chain of dynamic fields. It allows historical queries for the case of wrapped objects, which don't have a version. For example, if querying the dynamic fields of a table wrapped in a parent object, passing the parent object's version here will ensure we get the dynamic fields' state at the moment that parent's version was created. If `root_version` is left null, the dynamic fields will be from a consistent snapshot of the Sui state at the latest checkpoint known to the GraphQL RPC. ``` ## Test plan Introduced new E2E tests: ``` sui-graphql-e2e-tests$ cargo nextest run --features pg_integration ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Introduces an optional `rootVersion` parameter to `Query.owner`. This can be used to do versioned lookups when reading dynamic fields rooted on a wrapped object or another dynamic object field. - [ ] CLI: - [ ] Rust SDK: --------- Co-authored-by: Ashok Menon commit ffd3231924e27f4099fefe380ef48b65885c0aac Author: Brandon Williams Date: Thu Jul 11 15:01:02 2024 -0500 chore: update fastcrypto and serde_with commit c2ef24af9af2692f1cedb87fb45e24ad60e3230d Author: Ashok Menon Date: Sat Jul 13 14:44:58 2024 +0100 [GraphQL][RFC] Introduce UInt53 scalar (#18552) ## Description Whilst working on #18337, I noticed that we were over-using the `Int` scalar -- using it to represent values that could exceed 2^31 - 1 -- when the GraphQL spec states that `Int` must be a 32-bit signed integer. We made this decision at the time (a) because `async-graphql` allowed converting `u64`s to `Int` and we were primarily concerned with the fact that although JSON doesn't specify a precision for its numeric types, JS (among other languages), assumes it is an IEEE double-precision floating point number, so can only represent integral values precisely below 2^53. `cynic` (a Rust GraphQL client library) is (correctly) stricter, however, and maps an `Int` to an `i32`, always. There may be other similarly strict client libraries for other languages. This PR introduces a new scalar, `UInt`, that maps to a JSON number literal, just like `Int`, but allows us to ascribe our own meaning (in this case, it will be an unsigned number, and it can be as large as 2^53). This scalar has been used in many cases where we had previously used `Int`: sequence numbers, counts of objects, checkpoints, transactions, etc. While other uses continue to use `Int` (pagination limits, service limits, values bounded by the number of validators). In some cases, we have switched from `BigInt` to using this scalar notably: - the db cost estimate, which was previously a `BigInt` because we were unsure of its scale, but in hindsight, after benchmarking, it is unlikely that we would want to set a limit greater than 2^31 - 1. - the number of checkpoints in an epoch, as the number of transactions in an epoch (a number that is guaranteed to be greater) is being represented using an `Int` at the moment (and soon a `UInt53`). This will be a breaking change, so will only go out with the new major version. Hopefully, this change will be minimal as the format of this scalar over the wire is the same as for `Int`, but it will require existing clients to register a new scalar in most cases. ## Test plan Existing tests: ``` sui-graphql-rpc$ cargo nextest run sui-graphql-e2e-tests$ cargo nextest run --features pg_integration ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [x] GraphQL: Introduces a new scalar -- `UInt53` -- to represent unsigned 53 bit integer values. Some uses of `Int` in the existing schema have been replaced with `UInt53`. All clients will need to register the new scalar and clients for statically typed languages will also need to use a wider (e.g. 64 bit), unsigned type to hold the value. - [ ] CLI: - [ ] Rust SDK: commit 883b4c017e3f30c6dcdb63a982a533c7fb08e678 Author: Cam Swords Date: Fri Jul 12 17:57:50 2024 -0700 [move][move-2024] Extract macros from precompiled programs for expansion. (#18643) ## Description This extracts macros from the precompiled program so that macro expansion works for them, too. ## Test plan Updated a test to use `vector`'s `do!`. It failed before the fix, and works now. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 0144a2448216a6a856831898f1f4de24e9d1c216 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Fri Jul 12 14:39:46 2024 -0700 Fix build (#18645) commit 2f71d9d79b78627901cafe51dfaffbecafab3dce Author: Ashok Menon Date: Fri Jul 12 22:20:13 2024 +0100 [Examples/Move] Delete `sui_programmability` (#18612) ## Description Remove the `sui_programmability` folder as all examples have been ported and modernised to `examples/move`, or elsewhere. ## Test plan CI ## Stack - #18525 - #18526 - #18557 - #18558 - #18595 - #18609 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: --------- Co-authored-by: Ronny Roland commit d43a7addde2f2f538bd2558361fa0e1aad5de7e8 Author: Ashok Menon Date: Fri Jul 12 22:16:39 2024 +0100 [Examples/Move] Port remaining Crypto Sui Programmability Examples (#18609) ## Description Port over the following examples from sui_programmability/examples: - crypto/sources/ecdsa.move - crypto/sources/groth16.move - ~games/sources/drand_lib.move~ - ~games/sources/drand_based_lottery.move~ - ~games/sources/drand_based_scratch_card.move~ - games/sources/vdf_based_lottery.move Modernising and cleaning them up in the process: - Applying wrapping consistently at 100 characters, and cleaning up comments. - Removing unnecessary use of `entry` functions, including returning values instead of transfering to sender in some cases. - Using receiver functions where possible. - Standardising file order and adding titles for sections. - Standardising use of doc comments vs regular comments. - Using clever errors. This marks the final set of examples to be moved out of sui-programmability, which will then be deleted. ## Test plan ``` sui-framework-tests$ cargo nextest run -- run_examples_move_unit_tests ``` ## Stack - #18525 - #18526 - #18557 - #18558 - #18595 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: --------- Co-authored-by: Ronny Roland commit aae6f5c37672095fb6e7ddc59683a52a92ff4f98 Author: Ashok Menon Date: Fri Jul 12 22:16:30 2024 +0100 [Examples] Remove references to `sui_programmability` in codebase (#18595) ## Description Replace all references to Move packages in `sui_programmability` with equivalents in `./examples/move`, in preparation for deleting the programmability directory. In the process, I also: - removed the tic-tac-toe example from the Sui SDK, as it has been replaced by a more full-featured E2E example. - ported some modernised versions of the `basics` packages into the new `examples/move/basics` for use in tests. ## Test plan CI and, ``` sui$ cargo simtest ``` ## Stack - #18525 - #18526 - #18557 - #18558 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: --------- Co-authored-by: Ronny Roland commit b6feedeb8a71e118498e8e9626073912f7c8b282 Author: Ashok Menon Date: Fri Jul 12 21:53:18 2024 +0100 [Examples/Docs] Tic-tac-toe (#18558) ## Description A README for the tic-tac-toe app example, derived from: https://github.com/MystenLabs/multisig_tic-tac-toe/blob/main/README.md And updates to the docs for the tic-tac-toe app example to reflect changes to modernise the guide and bring all its source code into the `sui` mono-repo. This is the last major example that lived in the `sui_programmability` directory. ## Test plan :eyes: ## Stack - #18525 - #18526 - #18557 --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: --------- Co-authored-by: Ronny Roland commit 389c3dc450f89fa2feea063dd6e0c7fc870bc498 Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Fri Jul 12 13:48:31 2024 -0700 [codecov-ci] Disable debug symbols (#18641) ## Description Disable debug symbols when running `cargo llvm-cov` as it seems the CI run is OOM-ing when building `sui-e2e-tests`. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 260f1c867bbedee3fb51b1ad3b223f340be8aa26 Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Fri Jul 12 13:11:15 2024 -0700 Move SupportedProtocolVersions to sui-types, rename AuthorityCapabilities -> AuthorityCapabilitiesV1 (#18583) Quick refactoring to prepare for adding AuthorityCapabilitiesV2, which will include the digest of ProtocolConfig for each supported version. commit 8e103afedf82493fe06eb30f52d28bbf25804f9f Author: Xun Li Date: Fri Jul 12 13:10:12 2024 -0700 [RFC][core] Add validator tx finalizer (#18542) ## Description The ValidatorTxFinalizer gets called whenever a transaction is signed on a validator. It would then sleep for a min, wake up and check if the tx has already been executed, if not, use authority aggregator to finalize it. ## Test plan Added unit tests to cover various cases: 1. Basic flow 2. Epoch change 3. Tx already executed --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 98a2d8d9c0eafce40072b0e162633e402786084e Author: Mark Logan <103447440+mystenmark@users.noreply.github.com> Date: Fri Jul 12 12:48:43 2024 -0700 Add check that all executed transactions are checkpointed (and vice versa) (#18622) commit 6b425c9718c9989c686c0c18368b411718136a07 Author: stefan-mysten <135084671+stefan-mysten@users.noreply.github.com> Date: Fri Jul 12 11:30:15 2024 -0700 [CLI] Make service hosts configurable in `sui start` (#18607) ## Description Make indexer, GraphQL, faucet hosts configurable. When using Docker, due to the way it sets up a network interface for the container, the services need to bind to 0.0.0.0 to be able to be accessed from outside the container. This PR enables configurable hosts for these three services via optional flags: - `--indexer-host 0.0.0.0` - `--faucet-host 0.0.0.0` - `--graphql-host 0.0.0.0` If no host flag is provided, it will use the default `0.0.0.0` one. In addition, I found a bug where if the `default_value` is not provided, calling `unwrap_or_default` will return value 0 (if that field is an int). For example, if we call `--with-graphql`, the indexer port would have been set to 0 because the `default_missing_value` is only set when the flag is passed, but not when it is not passed, which is the case here due `with-indexer` being implicit enabled. This supersedes #14701 and should close #14701. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [x] CLI: Changed `sui start` to allow configurable hosts for indexer (`--indexer-host `), GraphQL (`--graphql-host `), and faucet (`--faucet-host`) services. This will enable to use `sui start` from a Docker container. By default, all services start with `0.0.0.0` host. - [ ] Rust SDK: commit 3c9b596e039f5bc35c7f38006c6363c551a57314 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Fri Jul 12 11:02:15 2024 -0700 [bridge] use protocol parameter to determine when to finalize bridge committee `bridge_should_try_to_finalize_committee` (#18019) ## Description Updated: we decided to introduce a protocol parameter to determine when bridge committee happens. This gives us more flexibility to include validators. As a result, we don't need the original change of threshold any more. Together with the new parameter, the old default value 7500 still acts as the minimal voting power threshold ------------------ Old description: Previously the 75% minimal stake is too low to include most validators. This PR changes it to 90%. I have two commits in this PR: * the first commit sets the value by distinguish chain id - if it's testnet, then use 7500, otherwise 9000. This is safe because on mainnet we haven't enabled registration yet. * the second commit uses a different approach with protocol config. Basically it adds a `minimal_threshold_for_bridge_committee` field and is set to 90% after the added version. In this way we don't need to differentiate chain ids. It's safe for testnet because this value won't be needed post committee initialization. I like the 2nd commit better because the code is cleaner. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 9a6a6841029a9aaaaedb475fa996ddf12c9e938a Author: Andrew Schran Date: Fri Jul 12 13:34:01 2024 -0400 Send full sig if possible during randomness sig exchange (#18565) commit 67b18a5481ab4879af4abf54d28c98496af38dc5 Author: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> Date: Fri Jul 12 09:58:54 2024 -0600 [docs][site] Fixes small screen nav (#18546) ## Description Fixes broken nav on small screens. Jira [DOCS-383](https://mysten.atlassian.net/browse/DOCS-383) for more info. Also applies tailwind to some styles, which should eventually be used exclusively. [docs/site/src/components/GetStartedLink/index.tsx](https://github.com/MystenLabs/sui/pull/18546/files#diff-ed1e3e4e3d31a916f33152c76fae5faaa7fad41c38d7436f441e0d977f33ac72): Removed the logic that controls the display of the Get Started link on the home page to its own component. This should make changing the location the link navigates to easier and to change the logic behind visible/hidden and other updates. Should make updates easier because only need to change the component reference of the docusaurus component instead of the related logic. [docs/site/src/components/ThemeToggle/index.tsx](https://github.com/MystenLabs/sui/pull/18546/files#diff-3951ee509e2800c54d91ed3424884e59ce6fd2dd8a8d2e71963223728cdad832): Removed the logic for showing or hiding the default theme switcher from the default docusaurus component. Moved to its own component to better support docusaurus updates in the future. Also makes replacing the default switcher in the future easier. deleted components from `theme/Navbar`: These components were swizzled from docusaurus theme but only added a style in some cases and didn't change anything in others. The Navbar component was ejected with all its children even though it didn't need to be. The Navbar component (and its children) are labeled as `not safe` for swizzling. This means that updates to docusaurus can break any changes made to these components, so should only be ejected and changed when necessary and requires checking for updates. [docs/site/src/theme/Navbar/index.js](https://github.com/MystenLabs/sui/pull/18546/files#diff-a62ab467816eb68ede0469822b5ed6637ff9af29ab3c5cb3831ccf1b321d5db4): This component was ejected from Docusaurus 2.x. The changes here are just what docusaurus updated the component to. And now that I type this, I realize that I can just delete it so that the default component gets used instead of the custom one. This means only the default component `Navbar/Content` is changed by just adding references to the two components mentioned previously (as opposed to the 5 or 6 components that were ejected originally). Styles: Styles were changed where necessary. If a style was updated, then it was also updated to use tailwind syntax. Two styles were used to remove the need for swizzling default docusaurus components, which as mentioned comes at the cost of maintenance). This approach was necessary because docusaurus reuses a lot of general classes for its components or creates dynamic ones that are suffixed by the relative path to the component. - This style selects the wrapper around the search bar so that the "get started" button on the home page displays next to the search. Prevents having to eject Navbar/Search component just to add a class. ``` [class^="navbarSearchContainer"] { @apply min-[997px]:flex min-[997px]:gap-4 } ``` - This style selects the theme switch (light/dark). Prevents having to make an additional update to the Navbar/Content component. ``` button[title^="Switch between dark and light mode"] { @apply !text-white hover:bg-sui-blue-primary } ``` ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit a3bf1369c3dcf5e7dafc0558a1597f81fb083961 Author: Daniel Ahn Date: Fri Jul 12 08:19:30 2024 -0700 Update local-network.mdx (#18633) Updated broken link with correct link to url of Current Tags section of the Sui NPM registry. ## Description Existing link was broken. I navigated to the correct url and replaced the link with that url (https://www.npmjs.com/package/@mysten/sui?activeTab=versions). ## Test plan How did you test the new or updated feature? I double-checked that the url is correct. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: Screenshot shows the current broken link/url: ![Screenshot 2024-07-12 003147](https://github.com/user-attachments/assets/d7cbf4ba-b54b-47d3-b568-f170b39f7f5c) commit 02599ed5c4e03845ca3ea06bc0a9ded10fc1aa52 Author: Rijnard van Tonder Date: Thu Jul 11 22:24:08 2024 -0700 sdk test setup: fix racy access on tmp config file (#18628) ## Description Some test setups call [`[setup(), setup()]`](https://github.com/MystenLabs/sui/blob/9c588e14f284a8feb10f29791f32ad6bb3f36ae4/sdk/typescript/test/e2e/coin-with-balance.test.ts#L22) which can cause a race on accessing the tmp `config.yaml` (which happens in the same tmp directory). This change ensures `setup()` is creates `config.yaml` in unique directories. This issue is probably what caused the flakiness in [CI](https://github.com/MystenLabs/sui/actions/runs/9897466376/job/27342099106#step:10:2279), note the: ``` Cannot open wallet config file at "/tmp/client.yaml ``` ## Test plan Tested locally that running the test creates separate directories and passes. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit f7a6a6c4b16e5a8c6544831935e7b13c55b35bd0 Author: Lu Zhang <8418040+longbowlu@users.noreply.github.com> Date: Thu Jul 11 22:11:21 2024 -0700 [bridge] support rank validators by voting power when requesting signatures (#18298) ## Description In this PR we introduce a way to order validators to request signatures according to their voting power. This is useful for signatures to be verified on ethereum because the # of signatures do impact the gas cost there significantly. ## Test plan deploying on a testnet bridge node to test. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit c108fd84af22410f6de69e36dc55a657cd682fa7 Author: Xun Li Date: Thu Jul 11 21:04:04 2024 -0700 [test] Simplify transaction orchestrator tests (#18632) ## Description There is already a orchestrator, no need to create new one in the test. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit 8cc4a8dcb3b2df8e59b34add5772a1c5cf28f79e Author: Todd Nowacki Date: Thu Jul 11 19:25:28 2024 -0700 [deny list v2] Added additional transactional tests (#18600) ## Description - Added transactional tests around specific underlying config behavior - Cannot yet add config specific tests as those APIs are not `public` ## Test plan - New tests --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: --------- Co-authored-by: ronny-mysten <118224482+ronny-mysten@users.noreply.github.com> commit 5f7b89026c28d554e743f4df9e4ac96446b5ea04 Author: Eugene Boguslavsky Date: Thu Jul 11 19:13:40 2024 -0700 Sui v1.30.0 Version Bump (#18631) ## Description Sui v1.30.0 Version Bump ## Test plan 👀 commit e48c9c4095607e98432ab556804c4f67feab7835 Author: John Naulty Jr Date: Thu Jul 11 19:02:19 2024 -0700 build statically-linked sui-node for testing on glibc-based hosts (#18611) ## Description Update Deterministic Sui Build to compile a statically-linked binary. StageX uses `musl` for compiling `sui-node`. In order for this to run the `sui-node` binary directly on glibc-based linux macines, it must be statically linked. ## Test plan Cherry-pick commit to `testnet` branch and test on a private testnet. * build docker image * extract `sui-node` binary * confirm statically linked (run `file` on mac, `ldd` on linux) * upload to baremetal test server + run * check build is still deterministic from github action build, linux machine, macOS machine --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit a093c4a9662e20de994c246c9c6293e594b24e09 Author: Xun Li Date: Thu Jul 11 18:31:28 2024 -0700 Make sure AuthAgg construction cannot fail (#18617) ## Description Describe the changes or additions included in this PR. ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] Indexer: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: commit cf6d11e6474caf0b92b68a81eb12d2b5322a69cf Author: Xun Li Date: Thu Jul 11 11:18:50 2024 -0700 Refactor CommitteeWithNetworkMetadata --- .github/actions/diffs/action.yml | 5 + .github/workflows/ide-tests.yml | 91 + .vscode/extensions.json | 3 +- Cargo.lock | 1221 ++++++-- Cargo.toml | 34 +- .../connections/ContentScriptConnection.ts | 3 + .../dapp-interface/WalletStandardInterface.ts | 15 + .../src/shared/analytics/ampli/index.ts | 45 + apps/wallet/src/ui/app/pages/swap/index.tsx | 11 + consensus/core/Cargo.toml | 1 + consensus/core/src/core.rs | 12 + consensus/core/src/metrics.rs | 7 + consensus/core/src/network/tonic_network.rs | 45 +- consensus/core/src/network/tonic_tls.rs | 58 +- consensus/core/src/threshold_clock.rs | 2 +- crates/mysten-common/src/logging.rs | 5 +- crates/mysten-metrics/src/lib.rs | 12 +- crates/mysten-network/Cargo.toml | 3 + crates/mysten-network/src/client.rs | 87 +- crates/mysten-network/src/config.rs | 17 +- crates/mysten-network/src/server.rs | 227 +- crates/mysten-util-mem/Cargo.toml | 2 +- crates/simulacrum/src/lib.rs | 6 + .../tests/programmable/split_coins.exp | 2 +- crates/sui-archival/Cargo.toml | 4 +- .../sui-benchmark/src/drivers/bench_driver.rs | 90 +- crates/sui-benchmark/src/drivers/mod.rs | 8 + crates/sui-benchmark/src/lib.rs | 5 +- crates/sui-benchmark/src/options.rs | 7 + .../src/workloads/adversarial.rs | 6 +- .../src/workloads/batch_payment.rs | 6 +- .../sui-benchmark/src/workloads/delegation.rs | 6 +- .../src/workloads/expected_failure.rs | 265 ++ crates/sui-benchmark/src/workloads/mod.rs | 1 + crates/sui-benchmark/src/workloads/payload.rs | 5 +- .../sui-benchmark/src/workloads/randomness.rs | 5 +- .../src/workloads/shared_counter.rs | 7 +- .../src/workloads/shared_object_deletion.rs | 7 +- .../src/workloads/transfer_object.rs | 6 +- .../sui-benchmark/src/workloads/workload.rs | 57 + .../src/workloads/workload_configuration.rs | 170 +- crates/sui-benchmark/tests/simtest.rs | 177 +- crates/sui-bridge-cli/src/lib.rs | 23 +- crates/sui-bridge-cli/src/main.rs | 9 +- crates/sui-bridge-indexer/Cargo.toml | 5 +- crates/sui-bridge-indexer/README.md | 41 + crates/sui-bridge-indexer/src/config.rs | 3 - crates/sui-bridge-indexer/src/lib.rs | 174 +- crates/sui-bridge-indexer/src/main.rs | 158 +- crates/sui-bridge-indexer/src/storage.rs | 4 +- .../sui-bridge-indexer/tests/indexer_tests.rs | 174 ++ crates/sui-bridge-watchdog/Cargo.toml | 18 - .../sui-bridge-watchdog/eth_bridge_status.rs | 58 + .../sui-bridge-watchdog/eth_vault_balance.rs | 75 + crates/sui-bridge-watchdog/lib.rs | 62 + crates/sui-bridge-watchdog/metrics.rs | 41 + .../sui-bridge-watchdog/sui_bridge_status.rs | 48 + crates/sui-bridge/Cargo.toml | 7 +- crates/sui-bridge/src/action_executor.rs | 6 +- .../src/client/bridge_authority_aggregator.rs | 165 +- crates/sui-bridge/src/client/bridge_client.rs | 15 +- crates/sui-bridge/src/config.rs | 13 + crates/sui-bridge/src/e2e_tests/basic.rs | 373 +-- crates/sui-bridge/src/e2e_tests/complex.rs | 9 +- crates/sui-bridge/src/e2e_tests/mod.rs | 2 + crates/sui-bridge/src/e2e_tests/test_utils.rs | 367 ++- crates/sui-bridge/src/eth_client.rs | 4 + crates/sui-bridge/src/lib.rs | 7 +- crates/sui-bridge/src/metrics.rs | 17 + crates/sui-bridge/src/monitor.rs | 42 +- crates/sui-bridge/src/node.rs | 109 +- crates/sui-bridge/src/server/mod.rs | 2 +- .../sui_bridge_watchdog/eth_bridge_status.rs | 58 + .../sui_bridge_watchdog/eth_vault_balance.rs | 75 + .../src/sui_bridge_watchdog/metrics.rs | 52 + .../sui-bridge/src/sui_bridge_watchdog/mod.rs | 62 + .../sui_bridge_watchdog/sui_bridge_status.rs | 48 + .../src/sui_bridge_watchdog/total_supplies.rs | 65 + crates/sui-bridge/src/types.rs | 17 - crates/sui-bridge/src/utils.rs | 36 +- crates/sui-cluster-test/src/cluster.rs | 3 + crates/sui-config/src/local_ip_utils.rs | 11 +- crates/sui-config/src/node.rs | 9 +- crates/sui-core/Cargo.toml | 1 + crates/sui-core/src/authority.rs | 171 +- .../authority/authority_per_epoch_store.rs | 545 ++-- .../shared_object_congestion_tracker.rs | 509 +++- crates/sui-core/src/authority_client.rs | 37 +- crates/sui-core/src/authority_server.rs | 11 +- crates/sui-core/src/checkpoints/mod.rs | 68 +- crates/sui-core/src/consensus_handler.rs | 161 +- .../consensus_types/consensus_output_api.rs | 11 +- crates/sui-core/src/epoch/randomness.rs | 6 +- crates/sui-core/src/execution_cache.rs | 10 +- .../src/execution_cache/writeback_cache.rs | 34 +- crates/sui-core/src/execution_driver.rs | 27 +- crates/sui-core/src/generate_format.rs | 2 +- .../sui-core/src/post_consensus_tx_reorder.rs | 2 +- crates/sui-core/src/safe_client.rs | 13 +- .../sui-core/src/transaction_input_loader.rs | 35 +- crates/sui-core/src/transaction_manager.rs | 26 +- .../src/unit_tests/authority_tests.rs | 41 +- .../unit_tests/congestion_control_tests.rs | 17 +- .../src/unit_tests/consensus_tests.rs | 62 +- .../unit_tests/move_package_upgrade_tests.rs | 199 +- .../sui-core/src/unit_tests/server_tests.rs | 17 +- .../src/unit_tests/transaction_tests.rs | 108 +- crates/sui-data-ingestion-core/src/reader.rs | 40 +- crates/sui-data-ingestion-core/src/tests.rs | 2 +- crates/sui-data-ingestion/Cargo.toml | 1 + crates/sui-data-ingestion/src/main.rs | 16 + .../down.sql | 7 + .../up.sql | 30 +- crates/sui-deepbook-indexer/src/models.rs | 55 +- crates/sui-deepbook-indexer/src/schema.rs | 58 +- crates/sui-deepbook-indexer/src/server.rs | 134 +- .../src/sui_deepbook_indexer.rs | 86 +- crates/sui-deepbook-indexer/src/types.rs | 60 +- crates/sui-e2e-tests/Cargo.toml | 3 +- .../sui-e2e-tests/tests/passkey_e2e_tests.rs | 28 +- .../sui-e2e-tests/tests/rest/checkpoints.rs | 182 ++ crates/sui-e2e-tests/tests/rest/committee.rs | 65 + .../tests/{rest.rs => rest/execute.rs} | 0 crates/sui-e2e-tests/tests/rest/main.rs | 27 + crates/sui-e2e-tests/tests/rest/objects.rs | 79 + crates/sui-e2e-tests/tests/rest/resolve.rs | 437 +++ .../sui-e2e-tests/tests/rest/transactions.rs | 118 + .../tests/traffic_control_tests.rs | 57 + crates/sui-faucet/Cargo.toml | 1 + crates/sui-faucet/src/main.rs | 7 + crates/sui-faucet/src/metrics.rs | 52 +- crates/sui-faucet/src/metrics_layer.rs | 77 +- crates/sui-field-count-derive/Cargo.toml | 14 + crates/sui-field-count-derive/src/lib.rs | 29 + crates/sui-field-count-main/Cargo.toml | 7 + crates/sui-field-count-main/src/lib.rs | 6 + crates/sui-field-count/Cargo.toml | 11 + crates/sui-field-count/src/lib.rs | 5 + ...000000000000000000000000000000000000000001 | Bin 0 -> 14381 bytes ...000000000000000000000000000000000000000002 | Bin 0 -> 66985 bytes ...000000000000000000000000000000000000000003 | Bin 0 -> 44530 bytes ...00000000000000000000000000000000000000000b | Bin 0 -> 19826 bytes ...00000000000000000000000000000000000000dee9 | Bin 0 -> 33346 bytes ...000000000000000000000000000000000000000001 | Bin 0 -> 15602 bytes ...000000000000000000000000000000000000000002 | Bin 0 -> 66985 bytes ...000000000000000000000000000000000000000003 | Bin 0 -> 44530 bytes ...00000000000000000000000000000000000000000b | Bin 0 -> 19826 bytes ...00000000000000000000000000000000000000dee9 | Bin 0 -> 33346 bytes ...000000000000000000000000000000000000000001 | Bin 0 -> 15634 bytes ...000000000000000000000000000000000000000002 | Bin 0 -> 67504 bytes ...000000000000000000000000000000000000000003 | Bin 0 -> 44514 bytes ...00000000000000000000000000000000000000000b | Bin 0 -> 19826 bytes ...00000000000000000000000000000000000000dee9 | Bin 0 -> 33346 bytes crates/sui-framework-snapshot/manifest.json | 30 + .../sui-framework/docs/move-stdlib/vector.md | 4 +- .../docs/sui-framework/bls12381.md | 117 + .../docs/sui-framework/group_ops.md | 96 + .../docs/sui-framework/vec_map.md | 4 +- .../docs/sui-framework/vec_set.md | 2 +- .../docs/sui-system/stake_subsidy.md | 27 +- .../docs/sui-system/sui_system_state_inner.md | 27 +- .../docs/sui-system/validator.md | 5 +- .../docs/sui-system/voting_power.md | 2 +- .../packages/move-stdlib/sources/vector.move | 8 +- .../sources/crypto/bls12381.move | 24 + .../sources/crypto/group_ops.move | 11 + .../sui-framework/sources/vec_map.move | 4 +- .../sui-framework/sources/vec_set.move | 2 +- .../tests/crypto/bls12381_tests.move | 64 + .../sui-system/sources/stake_subsidy.move | 7 +- .../sui-system/sources/sui_system.move | 11 + .../sources/sui_system_state_inner.move | 37 +- .../sui-system/sources/validator.move | 5 +- .../sui-system/sources/voting_power.move | 2 +- .../tests/rewards_distribution_tests.move | 137 + .../sui-system/tests/sui_system_tests.move | 11 +- .../packages_compiled/move-stdlib | Bin 15569 -> 15601 bytes .../packages_compiled/sui-framework | Bin 66920 -> 67439 bytes .../packages_compiled/sui-system | Bin 44241 -> 44417 bytes crates/sui-framework/published_api.txt | 27 + .../tests/stable/packages/types.exp | 2 +- .../sui-graphql-rpc/src/test_infra/cluster.rs | 4 + .../types/move_registry/named_move_package.rs | 2 +- crates/sui-indexer-alt/Cargo.toml | 47 + crates/sui-indexer-alt/diesel.toml | 6 + crates/sui-indexer-alt/generate_schema.sh | 77 + .../down.sql | 6 + .../up.sql | 36 + .../2024-10-14-123213_checkpoints/down.sql | 1 + .../2024-10-14-123213_checkpoints/up.sql | 6 + .../2024-10-15-143704_objects/down.sql | 1 + .../2024-10-15-143704_objects/up.sql | 7 + .../2024-10-15-170316_transactions/down.sql | 1 + .../2024-10-15-170316_transactions/up.sql | 12 + .../down.sql | 1 + .../up.sql | 13 + .../down.sql | 1 + .../up.sql | 6 + .../2024-10-16-225607_watermarks/down.sql | 1 + .../2024-10-16-225607_watermarks/up.sql | 38 + .../2024-10-19-113135_ev_indices/down.sql | 2 + .../2024-10-19-113135_ev_indices/up.sql | 56 + .../2024-10-27-150938_sum_obj_types/down.sql | 1 + .../2024-10-27-150938_sum_obj_types/up.sql | 62 + .../down.sql | 1 + .../up.sql | 20 + .../2024-10-30-142219_obj_versions/down.sql | 1 + .../2024-10-30-142219_obj_versions/up.sql | 14 + .../2024-10-30-214852_wal_obj_types/down.sql | 1 + .../2024-10-30-214852_wal_obj_types/up.sql | 76 + .../down.sql | 1 + .../up.sql | 49 + crates/sui-indexer-alt/schema.patch | 7 + crates/sui-indexer-alt/src/args.rs | 37 + crates/sui-indexer-alt/src/db.rs | 158 ++ .../src/handlers/ev_emit_mod.rs | 62 + .../src/handlers/ev_struct_inst.rs | 66 + .../src/handlers/kv_checkpoints.rs | 43 + .../src/handlers/kv_objects.rs | 69 + .../src/handlers/kv_transactions.rs | 72 + crates/sui-indexer-alt/src/handlers/mod.rs | 15 + .../src/handlers/obj_versions.rs | 62 + .../src/handlers/sum_coin_balances.rs | 188 ++ .../src/handlers/sum_obj_types.rs | 184 ++ .../src/handlers/tx_affected_objects.rs | 65 + .../src/handlers/tx_balance_changes.rs | 105 + .../src/handlers/wal_coin_balances.rs | 59 + .../src/handlers/wal_obj_types.rs | 62 + .../src/ingestion/broadcaster.rs | 103 + .../sui-indexer-alt/src/ingestion/client.rs | 155 + crates/sui-indexer-alt/src/ingestion/error.rs | 25 + .../src/ingestion/local_client.rs | 65 + crates/sui-indexer-alt/src/ingestion/mod.rs | 432 +++ .../src/ingestion/regulator.rs | 257 ++ .../src/ingestion/remote_client.rs | 292 ++ .../src/ingestion/test_utils.rs | 56 + crates/sui-indexer-alt/src/lib.rs | 274 ++ crates/sui-indexer-alt/src/main.rs | 63 + crates/sui-indexer-alt/src/metrics.rs | 592 ++++ .../sui-indexer-alt/src/models/checkpoints.rs | 26 + crates/sui-indexer-alt/src/models/events.rs | 41 + crates/sui-indexer-alt/src/models/mod.rs | 8 + crates/sui-indexer-alt/src/models/objects.rs | 148 + .../src/models/transactions.rs | 69 + .../sui-indexer-alt/src/models/watermarks.rs | 117 + .../src/pipeline/concurrent/collector.rs | 181 ++ .../src/pipeline/concurrent/committer.rs | 182 ++ .../src/pipeline/concurrent/mod.rs | 153 + .../src/pipeline/concurrent/watermark.rs | 278 ++ crates/sui-indexer-alt/src/pipeline/mod.rs | 152 + .../sui-indexer-alt/src/pipeline/processor.rs | 128 + .../src/pipeline/sequential/committer.rs | 408 +++ .../src/pipeline/sequential/mod.rs | 114 + crates/sui-indexer-alt/src/schema.rs | 152 + crates/sui-indexer-alt/src/task.rs | 43 + .../src/indexer_builder.rs | 5 + crates/sui-indexer/Cargo.toml | 4 +- .../down.sql | 6 + .../up.sql | 6 + .../down.sql | 18 + .../up.sql | 2 + crates/sui-indexer/src/apis/read_api.rs | 6 +- .../ingestion_backfills/digest_task.rs | 26 + .../ingestion_backfills/mod.rs | 1 + .../src/backfill/backfill_instances/mod.rs | 8 + crates/sui-indexer/src/backfill/mod.rs | 1 + crates/sui-indexer/src/benchmark.rs | 130 + crates/sui-indexer/src/config.rs | 58 + crates/sui-indexer/src/db.rs | 25 +- .../src/handlers/checkpoint_handler.rs | 95 +- crates/sui-indexer/src/handlers/committer.rs | 55 +- crates/sui-indexer/src/handlers/mod.rs | 26 +- .../src/handlers/objects_snapshot_handler.rs | 18 +- crates/sui-indexer/src/indexer.rs | 28 +- crates/sui-indexer/src/lib.rs | 1 + crates/sui-indexer/src/main.rs | 19 +- crates/sui-indexer/src/models/epoch.rs | 81 +- crates/sui-indexer/src/schema.rs | 17 - .../sui-indexer/src/store/pg_indexer_store.rs | 9 +- crates/sui-indexer/src/test_utils.rs | 47 +- crates/sui-indexer/tests/ingestion_tests.rs | 70 +- crates/sui-indexer/tests/json_rpc_tests.rs | 243 ++ .../tests/move_test_code/Move.toml | 10 + .../tests/move_test_code/sources/events.move | 26 + crates/sui-json-rpc-api/src/lib.rs | 1 + .../sui-json-rpc-types/src/sui_transaction.rs | 4 +- crates/sui-json-rpc/src/balance_changes.rs | 2 +- crates/sui-json-rpc/src/coin_api.rs | 14 +- crates/sui-json-rpc/src/lib.rs | 4 +- crates/sui-json/Cargo.toml | 2 +- crates/sui-kvstore/Cargo.toml | 25 + crates/sui-kvstore/src/bigtable/README.md | 16 + crates/sui-kvstore/src/bigtable/client.rs | 463 +++ crates/sui-kvstore/src/bigtable/init.sh | 20 + crates/sui-kvstore/src/bigtable/mod.rs | 6 + crates/sui-kvstore/src/bigtable/proto.rs | 14 + .../src/bigtable/proto/google.api.rs | 1591 +++++++++++ .../src/bigtable/proto/google.bigtable.v2.rs | 1734 ++++++++++++ .../sui-kvstore/src/bigtable/proto/google.pem | 1128 ++++++++ .../src/bigtable/proto/google.rpc.rs | 24 + crates/sui-kvstore/src/bigtable/worker.rs | 39 + crates/sui-kvstore/src/lib.rs | 57 + crates/sui-kvstore/src/main.rs | 35 + crates/sui-move/src/build.rs | 7 +- crates/sui-move/src/main.rs | 4 +- crates/sui-mvr-indexer/Cargo.toml | 89 + crates/sui-mvr-indexer/README.md | 27 + crates/sui-mvr-indexer/diesel.toml | 8 + .../down.sql | 6 + .../up.sql | 36 + .../pg/2023-08-19-044020_events/down.sql | 2 + .../pg/2023-08-19-044020_events/up.sql | 26 + .../pg/2023-08-19-044023_objects/down.sql | 3 + .../pg/2023-08-19-044023_objects/up.sql | 95 + .../2023-08-19-044026_transactions/down.sql | 3 + .../pg/2023-08-19-044026_transactions/up.sql | 23 + .../pg/2023-08-19-044044_checkpoints/down.sql | 3 + .../pg/2023-08-19-044044_checkpoints/up.sql | 36 + .../pg/2023-08-19-044052_epochs/down.sql | 2 + .../pg/2023-08-19-044052_epochs/up.sql | 47 + .../pg/2023-08-19-060729_packages/down.sql | 2 + .../pg/2023-08-19-060729_packages/up.sql | 14 + .../pg/2023-10-06-204335_tx_indices/down.sql | 9 + .../pg/2023-10-06-204335_tx_indices/up.sql | 67 + .../pg/2023-10-07-160139_display/down.sql | 2 + .../pg/2023-10-07-160139_display/up.sql | 7 + .../down.sql | 2 + .../up.sql | 17 + .../pg/2024-05-05-155158_obj_indices/down.sql | 1 + .../pg/2024-05-05-155158_obj_indices/up.sql | 31 + .../2024-06-14-045801_event_indices/down.sql | 7 + .../pg/2024-06-14-045801_event_indices/up.sql | 74 + .../down.sql | 2 + .../2024-07-13-003534_chain_identifier/up.sql | 6 + .../down.sql | 1 + .../up.sql | 10 + .../down.sql | 15 + .../2024-09-10-195655_drop-df-columns/up.sql | 15 + .../pg/2024-09-12-150939_tx_affected/down.sql | 1 + .../pg/2024-09-12-150939_tx_affected/up.sql | 9 + .../pg/2024-09-12-213234_watermarks/down.sql | 1 + .../pg/2024-09-12-213234_watermarks/up.sql | 34 + .../down.sql | 1 + .../up.sql | 1 + .../down.sql | 1 + .../2024-09-19-011238_raw_checkpoints/up.sql | 6 + .../down.sql | 1 + .../up.sql | 9 + .../down.sql | 1 + .../up.sql | 1 + .../down.sql | 1 + .../metadata.toml | 1 + .../up.sql | 3 + .../down.sql | 1 + .../metadata.toml | 1 + .../up.sql | 3 + .../down.sql | 1 + .../metadata.toml | 1 + .../up.sql | 3 + .../down.sql | 1 + .../metadata.toml | 1 + .../up.sql | 3 + .../down.sql | 1 + .../metadata.toml | 1 + .../up.sql | 3 + .../down.sql | 1 + .../metadata.toml | 1 + .../up.sql | 3 + .../down.sql | 1 + .../metadata.toml | 1 + .../up.sql | 3 + .../down.sql | 1 + .../metadata.toml | 1 + .../up.sql | 3 + .../down.sql | 1 + .../metadata.toml | 1 + .../up.sql | 3 + .../down.sql | 1 + .../metadata.toml | 1 + .../up.sql | 3 + .../down.sql | 1 + .../metadata.toml | 1 + .../up.sql | 3 + .../down.sql | 1 + .../metadata.toml | 1 + .../up.sql | 3 + .../down.sql | 1 + .../metadata.toml | 1 + .../up.sql | 3 + .../down.sql | 1 + .../metadata.toml | 1 + .../up.sql | 3 + .../down.sql | 1 + .../metadata.toml | 1 + .../up.sql | 3 + .../down.sql | 1 + .../metadata.toml | 1 + .../up.sql | 3 + .../down.sql | 1 + .../2024-09-30-153705_add_event_sender/up.sql | 1 + .../down.sql | 7 + .../up.sql | 18 + .../down.sql | 1 + .../up.sql | 1 + .../down.sql | 6 + .../up.sql | 6 + .../down.sql | 18 + .../up.sql | 2 + crates/sui-mvr-indexer/src/apis/coin_api.rs | 153 + .../sui-mvr-indexer/src/apis/extended_api.rs | 83 + .../src/apis/governance_api.rs | 295 ++ .../sui-mvr-indexer/src/apis/indexer_api.rs | 428 +++ crates/sui-mvr-indexer/src/apis/mod.rs | 20 + crates/sui-mvr-indexer/src/apis/move_utils.rs | 143 + crates/sui-mvr-indexer/src/apis/read_api.rs | 305 ++ .../src/apis/transaction_builder_api.rs | 70 + crates/sui-mvr-indexer/src/apis/write_api.rs | 90 + .../ingestion_backfills/digest_task.rs | 26 + .../ingestion_backfill_task.rs | 98 + .../ingestion_backfills/mod.rs | 18 + .../ingestion_backfills/raw_checkpoints.rs | 34 + .../tx_affected_objects.rs | 48 + .../src/backfill/backfill_instances/mod.rs | 55 + .../backfill_instances/sql_backfill.rs | 36 + .../sql_backfills/event_sender.sh | 6 + .../sql_backfills/full_objects_history.sh | 6 + .../sql_backfills/tx_affected_addresses.sh | 7 + .../system_state_summary_json.rs | 56 + .../src/backfill/backfill_runner.rs | 94 + .../src/backfill/backfill_task.rs | 12 + crates/sui-mvr-indexer/src/backfill/mod.rs | 35 + crates/sui-mvr-indexer/src/benchmark.rs | 130 + crates/sui-mvr-indexer/src/config.rs | 633 +++++ crates/sui-mvr-indexer/src/database.rs | 161 ++ crates/sui-mvr-indexer/src/db.rs | 395 +++ crates/sui-mvr-indexer/src/errors.rs | 172 ++ .../src/handlers/checkpoint_handler.rs | 653 +++++ .../sui-mvr-indexer/src/handlers/committer.rs | 281 ++ crates/sui-mvr-indexer/src/handlers/mod.rs | 316 +++ .../src/handlers/objects_snapshot_handler.rs | 139 + crates/sui-mvr-indexer/src/handlers/pruner.rs | 288 ++ .../src/handlers/tx_processor.rs | 223 ++ crates/sui-mvr-indexer/src/indexer.rs | 214 ++ crates/sui-mvr-indexer/src/indexer_reader.rs | 1511 ++++++++++ crates/sui-mvr-indexer/src/lib.rs | 97 + crates/sui-mvr-indexer/src/main.rs | 117 + crates/sui-mvr-indexer/src/metrics.rs | 813 ++++++ .../sui-mvr-indexer/src/models/checkpoints.rs | 186 ++ crates/sui-mvr-indexer/src/models/display.rs | 35 + crates/sui-mvr-indexer/src/models/epoch.rs | 278 ++ .../src/models/event_indices.rs | 145 + crates/sui-mvr-indexer/src/models/events.rs | 156 ++ crates/sui-mvr-indexer/src/models/mod.rs | 15 + .../sui-mvr-indexer/src/models/obj_indices.rs | 16 + crates/sui-mvr-indexer/src/models/objects.rs | 579 ++++ crates/sui-mvr-indexer/src/models/packages.rs | 29 + .../src/models/raw_checkpoints.rs | 26 + .../src/models/transactions.rs | 353 +++ .../sui-mvr-indexer/src/models/tx_indices.rs | 225 ++ .../sui-mvr-indexer/src/models/watermarks.rs | 76 + .../sui-mvr-indexer/src/restorer/archives.rs | 60 + .../src/restorer/formal_snapshot.rs | 283 ++ crates/sui-mvr-indexer/src/restorer/mod.rs | 5 + crates/sui-mvr-indexer/src/schema.patch | 7 + crates/sui-mvr-indexer/src/schema.rs | 404 +++ .../src/store/indexer_store.rs | 140 + crates/sui-mvr-indexer/src/store/mod.rs | 93 + .../src/store/package_resolver.rs | 58 + .../src/store/pg_indexer_store.rs | 2495 +++++++++++++++++ .../src/store/pg_partition_manager.rs | 224 ++ crates/sui-mvr-indexer/src/store/query.rs | 329 +++ .../src/system_package_task.rs | 66 + crates/sui-mvr-indexer/src/tempdb.rs | 343 +++ crates/sui-mvr-indexer/src/test_utils.rs | 341 +++ crates/sui-mvr-indexer/src/types.rs | 671 +++++ .../sui-mvr-indexer/tests/ingestion_tests.rs | 242 ++ .../sui-mvr-indexer/tests/json_rpc_tests.rs | 243 ++ .../tests/move_test_code/Move.toml | 10 + .../tests/move_test_code/sources/events.move | 26 + .../sui-mvr-indexer/tests/read_api_tests.rs | 50 + crates/sui-network/build.rs | 9 - crates/sui-network/src/discovery/builder.rs | 2 +- crates/sui-network/src/discovery/mod.rs | 162 +- crates/sui-network/src/discovery/server.rs | 23 +- crates/sui-network/src/discovery/tests.rs | 59 +- crates/sui-node/src/lib.rs | 45 +- crates/sui-node/src/main.rs | 4 +- crates/sui-open-rpc/Cargo.toml | 2 +- crates/sui-open-rpc/spec/openrpc.json | 18 +- crates/sui-protocol-config/src/lib.rs | 107 +- ...ocol_config__test__Mainnet_version_66.snap | 329 +++ ...ocol_config__test__Mainnet_version_67.snap | 330 +++ ...ocol_config__test__Mainnet_version_68.snap | 339 +++ ...ocol_config__test__Testnet_version_66.snap | 330 +++ ...ocol_config__test__Testnet_version_67.snap | 330 +++ ...ocol_config__test__Testnet_version_68.snap | 339 +++ ...sui_protocol_config__test__version_66.snap | 340 +++ ...sui_protocol_config__test__version_67.snap | 340 +++ ...sui_protocol_config__test__version_68.snap | 350 +++ crates/sui-proxy/src/peers.rs | 75 +- crates/sui-replay/src/data_fetcher.rs | 4 +- crates/sui-replay/src/replay.rs | 70 +- crates/sui-rest-api/Cargo.toml | 2 + crates/sui-rest-api/openapi/openapi.json | 351 ++- crates/sui-rest-api/proto/rest.proto | 156 ++ crates/sui-rest-api/src/accept.rs | 38 + crates/sui-rest-api/src/checkpoints.rs | 369 ++- crates/sui-rest-api/src/client/mod.rs | 36 +- crates/sui-rest-api/src/client/sdk.rs | 262 +- crates/sui-rest-api/src/committee.rs | 25 +- crates/sui-rest-api/src/lib.rs | 45 +- crates/sui-rest-api/src/objects.rs | 51 +- crates/sui-rest-api/src/openapi.rs | 4 + .../src/proto/generated/sui.rest.rs | 215 ++ crates/sui-rest-api/src/proto/mod.rs | 1196 ++++++++ crates/sui-rest-api/src/response.rs | 142 +- .../src/transactions/execution.rs | 57 +- crates/sui-rest-api/src/transactions/mod.rs | 51 +- .../src/transactions/resolve/mod.rs | 28 +- crates/sui-rest-api/tests/bootstrap.rs | 56 + .../src/mock_storage.rs | 20 +- crates/sui-storage/Cargo.toml | 4 +- ...ests__genesis_config_snapshot_matches.snap | 3 +- ..._populated_genesis_snapshot_matches-2.snap | 31 +- crates/sui-swarm/Cargo.toml | 1 + crates/sui-swarm/src/memory/node.rs | 8 +- crates/sui-synthetic-ingestion/Cargo.toml | 18 + .../sui-synthetic-ingestion/src/benchmark.rs | 160 ++ crates/sui-synthetic-ingestion/src/lib.rs | 29 + .../src/synthetic_ingestion.rs | 56 + .../src/tps_tracker.rs | 80 + crates/sui-tls/src/lib.rs | 43 +- crates/sui-tls/src/verifier.rs | 20 +- crates/sui-tool/Cargo.toml | 1 + crates/sui-tool/src/lib.rs | 12 +- .../sui-transactional-test-runner/src/args.rs | 9 +- .../sui-transactional-test-runner/src/lib.rs | 7 +- .../parser.rs | 2 +- .../token.rs | 2 +- .../src/test_adapter.rs | 5 +- crates/sui-types/Cargo.toml | 6 +- crates/sui-types/src/coin.rs | 21 +- crates/sui-types/src/committee.rs | 11 +- crates/sui-types/src/error.rs | 5 +- .../sui-types/src/executable_transaction.rs | 17 +- crates/sui-types/src/lib.rs | 12 +- crates/sui-types/src/message_envelope.rs | 11 +- .../sui-types/src/mock_checkpoint_builder.rs | 11 + crates/sui-types/src/move_package.rs | 4 + crates/sui-types/src/passkey_authenticator.rs | 52 +- crates/sui-types/src/storage/mod.rs | 2 +- .../epoch_start_sui_system_state.rs | 1 + .../simtest_sui_system_state_inner.rs | 3 + .../sui_system_state_inner_v1.rs | 1 + .../sui_system_state_inner_v2.rs | 1 + .../sui_system_state_summary.rs | 5 + .../unit_tests/passkey_authenticator_test.rs | 115 +- crates/sui/Cargo.toml | 14 +- crates/sui/src/client_commands.rs | 107 +- crates/sui/src/client_ptb/ast.rs | 5 +- crates/sui/src/client_ptb/builder.rs | 4 +- crates/sui/src/client_ptb/parser.rs | 6 +- crates/sui/src/client_ptb/ptb.rs | 5 + ...nt_ptb__parser__tests__parse_commands.snap | 27 + ...ent_ptb__parser__tests__parse_publish.snap | 2 + crates/sui/src/displays/dev_inspect.rs | 46 + crates/sui/src/displays/mod.rs | 1 + crates/sui/src/keytool.rs | 22 +- crates/sui/src/sui_commands.rs | 2 + .../declarations_missing_v1/Move.toml | 6 + .../declarations_missing_v1/sources/enum.move | 10 + .../declarations_missing_v1/sources/func.move | 9 + .../sources/struct.move | 10 + .../declarations_missing_v2/Move.toml | 6 + .../declarations_missing_v2/sources/enum.move | 7 + .../declarations_missing_v2/sources/func.move | 7 + .../sources/struct.move | 7 + crates/sui/src/unit_tests/profiler_tests.rs | 16 +- ...atibility_tests__declarations_missing.snap | 36 + .../unit_tests/upgrade_compatibility_tests.rs | 39 +- crates/sui/src/upgrade_compatibility.rs | 258 +- crates/sui/tests/cli_tests.rs | 4 + crates/suiop-cli/Cargo.toml | 1 + crates/suiop-cli/src/cli/env/mod.rs | 72 + crates/suiop-cli/src/cli/incidents/mod.rs | 3 +- crates/suiop-cli/src/cli/incidents/notion.rs | 9 +- crates/suiop-cli/src/cli/incidents/pd/mod.rs | 16 +- crates/suiop-cli/src/cli/mod.rs | 2 + crates/suiop-cli/src/cli/pulumi/init.rs | 62 +- crates/suiop-cli/src/cli/pulumi/mod.rs | 17 +- crates/suiop-cli/src/cli/slack/mod.rs | 8 +- crates/suiop-cli/src/main.rs | 29 +- crates/test-cluster/Cargo.toml | 3 + crates/test-cluster/src/indexer_util.rs | 84 + crates/test-cluster/src/lib.rs | 71 +- .../test-cluster/src/test_indexer_handle.rs | 88 + crates/x/src/lint.rs | 3 +- dapps/multisig-toolkit/package.json | 2 + .../src/routes/offline-signer.tsx | 29 +- docker/sui-graphql-rpc-staging/build.sh | 36 + docker/sui-indexer-alt/Dockerfile | 37 + docker/sui-indexer-alt/build.sh | 25 + docker/sui-mvr-indexer/Dockerfile | 37 + docker/sui-mvr-indexer/build.sh | 25 + docs/content/concepts/sui-bridge.mdx | 87 + .../getting-started/local-network.mdx | 2 +- docs/content/references/cli.mdx | 4 +- docs/content/references/cli/client.mdx | 4 +- docs/content/sidebars/concepts.js | 1 + docs/content/standards/deepbook.mdx | 10 +- .../standards/deepbookv3/query-the-pool.mdx | 6 +- .../src/components/API/api-ref/compnav.js | 29 + .../src/components/API/api-ref/components.js | 326 +++ .../site/src/components/API/api-ref/refnav.js | 3 +- .../site/src/components/API/api-ref/result.js | 1 - docs/site/src/components/API/index.js | 10 +- docs/site/src/pages/index.js | 11 - examples/custom-indexer/rust/Cargo.toml | 2 +- examples/custom-indexer/rust/local_reader.rs | 2 +- examples/custom-indexer/rust/remote_reader.rs | 2 +- external-crates/move/Cargo.lock | 1 - external-crates/move/Cargo.toml | 1 - .../src/analysis/parsing_analysis.rs | 6 +- .../src/analysis/typing_analysis.rs | 8 +- .../move/crates/move-analyzer/src/analyzer.rs | 12 +- .../move-analyzer/src/completions/mod.rs | 10 +- .../move/crates/move-analyzer/src/symbols.rs | 433 ++- .../move-analyzer/tests/ide_testsuite.rs | 20 +- .../move-analyzer/trace-adapter/.mocharc.yaml | 3 + .../trace-adapter/package-lock.json | 549 ++++ .../move-analyzer/trace-adapter/package.json | 7 +- .../trace-adapter/src/adapter.ts | 112 +- .../trace-adapter/src/runtime.ts | 431 ++- .../trace-adapter/src/source_map_utils.ts | 6 +- .../trace-adapter/src/trace_utils.ts | 345 ++- .../trace-adapter/tests/.gitignore | 6 + .../tests/breakpoints_line/Move.toml | 10 + .../breakpoints_line/bytecode_modules/m.mv | Bin 0 -> 369 bytes .../build/breakpoints_line/source_maps/m.json | 1 + .../build/breakpoints_line/sources/m.move | 30 + .../tests/breakpoints_line/sources/m.move | 30 + .../tests/breakpoints_line/test.exp | 63 + .../tests/breakpoints_line/trace.spec.js | 27 + .../traces/breakpoints_line__m__test.json | 1 + .../trace-adapter/tests/compound/Move.toml | 10 + .../dependencies/MoveStdlib/vector.mv | Bin 0 -> 1123 bytes .../build/compound/bytecode_modules/m.mv | Bin 0 -> 721 bytes .../dependencies/MoveStdlib/vector.json | 1 + .../build/compound/source_maps/m.json | 1 + .../dependencies/MoveStdlib/vector.move | 364 +++ .../compound/build/compound/sources/m.move | 54 + .../tests/compound/sources/m.move | 54 + .../trace-adapter/tests/compound/test.exp | 55 + .../tests/compound/trace.spec.js | 23 + .../compound/traces/compound__m__test.json | 1 + .../trace-adapter/tests/global_loc/Move.toml | 10 + .../build/global_loc/bytecode_modules/m.mv | Bin 0 -> 520 bytes .../dependencies/MoveStdlib/bcs.json | 1 + .../source_maps/dependencies/Sui/object.json | 1 + .../dependencies/Sui/tx_context.json | 1 + .../build/global_loc/source_maps/m.json | 1 + .../sources/dependencies/MoveStdlib/bcs.move | 11 + .../sources/dependencies/Sui/object.move | 233 ++ .../sources/dependencies/Sui/tx_context.move | 141 + .../build/global_loc/sources/m.move | 22 + .../tests/global_loc/sources/m.move | 22 + .../trace-adapter/tests/global_loc/test.exp | 45 + .../tests/global_loc/trace.spec.js | 10 + .../traces/global_loc__m__test.json | 1 + .../trace-adapter/tests/native_fun/Move.toml | 10 + .../build/native_fun/bytecode_modules/m.mv | Bin 0 -> 369 bytes .../dependencies/MoveStdlib/ascii.json | 1 + .../dependencies/MoveStdlib/string.json | 1 + .../dependencies/MoveStdlib/vector.json | 1 + .../build/native_fun/source_maps/m.json | 1 + .../dependencies/MoveStdlib/ascii.move | 166 ++ .../dependencies/MoveStdlib/string.move | 137 + .../dependencies/MoveStdlib/vector.move | 364 +++ .../build/native_fun/sources/m.move | 14 + .../tests/native_fun/sources/m.move | 14 + .../trace-adapter/tests/native_fun/test.exp | 6 + .../tests/native_fun/trace.spec.js | 8 + .../traces/native_fun__m__test.json | 1 + .../trace-adapter/tests/references/Move.toml | 10 + .../dependencies/MoveStdlib/vector.mv | Bin 0 -> 1123 bytes .../build/references/bytecode_modules/m.mv | Bin 0 -> 612 bytes .../dependencies/MoveStdlib/vector.json | 1 + .../build/references/source_maps/m.json | 1 + .../dependencies/MoveStdlib/vector.move | 364 +++ .../build/references/sources/m.move | 45 + .../tests/references/sources/m.move | 45 + .../trace-adapter/tests/references/test.exp | 90 + .../tests/references/trace.spec.js | 21 + .../traces/references__m__test.json | 1 + .../tests/references_deep/Move.toml | 10 + .../dependencies/MoveStdlib/vector.mv | Bin 0 -> 1123 bytes .../references_deep/bytecode_modules/m.mv | Bin 0 -> 504 bytes .../dependencies/MoveStdlib/vector.json | 1 + .../build/references_deep/source_maps/m.json | 1 + .../dependencies/MoveStdlib/vector.move | 364 +++ .../build/references_deep/sources/m.move | 34 + .../tests/references_deep/sources/m.move | 34 + .../tests/references_deep/test.exp | 74 + .../tests/references_deep/trace.spec.js | 17 + .../traces/references_deep__m__test.json | 1 + .../trace-adapter/tests/run_spec.js | 51 + .../trace-adapter/tests/shadowing/Move.toml | 10 + .../build/shadowing/bytecode_modules/m.mv | Bin 0 -> 352 bytes .../build/shadowing/source_maps/m.json | 1 + .../shadowing/build/shadowing/sources/m.move | 26 + .../tests/shadowing/sources/m.move | 26 + .../trace-adapter/tests/shadowing/test.exp | 63 + .../tests/shadowing/trace.spec.js | 25 + .../shadowing/traces/shadowing__m__test.json | 1 + .../trace-adapter/tests/stepping/Move.toml | 10 + .../build/stepping/bytecode_modules/m.mv | Bin 0 -> 256 bytes .../build/stepping/source_maps/m.json | 1 + .../stepping/build/stepping/sources/m.move | 16 + .../tests/stepping/sources/m.move | 16 + .../trace-adapter/tests/stepping/test.exp | 20 + .../tests/stepping/trace.spec.js | 14 + .../stepping/traces/stepping__m__test.json | 1 + .../tests/stepping_call/Move.toml | 10 + .../build/stepping_call/bytecode_modules/m.mv | Bin 0 -> 321 bytes .../build/stepping_call/source_maps/m.json | 1 + .../build/stepping_call/sources/m.move | 26 + .../tests/stepping_call/sources/m.move | 26 + .../tests/stepping_call/test.exp | 58 + .../tests/stepping_call/trace.spec.js | 27 + .../traces/stepping_call__m__test.json | 1 + .../move-analyzer/trace-debug/package.json | 1 + .../src/compatibility_mode.rs | 4 +- .../move/crates/move-cli/Cargo.toml | 2 +- .../move/crates/move-cli/src/base/test.rs | 2 +- .../move/crates/move-cli/src/sandbox/cli.rs | 14 +- .../move-cli/src/sandbox/commands/run.rs | 2 +- .../move-cli/tests/tracing_testsuite.rs | 2 +- .../move-command-line-common/Cargo.toml | 7 - .../move-command-line-common/src/lib.rs | 4 - .../derived_line_number_raw_abort.exp | 11 + .../derived_line_number_raw_abort.move | 11 + .../macro_call_line_number_abort.exp | 21 + .../macro_call_line_number_abort.move | 27 + .../crates/move-compiler/src/cfgir/ast.rs | 2 +- .../move-compiler/src/cfgir/borrows/mod.rs | 17 +- .../move-compiler/src/cfgir/liveness/mod.rs | 28 +- .../move-compiler/src/cfgir/locals/mod.rs | 62 +- .../crates/move-compiler/src/cfgir/mod.rs | 25 +- .../move-compiler/src/cfgir/optimize/mod.rs | 2 +- .../move-compiler/src/cfgir/translate.rs | 97 +- .../crates/move-compiler/src/cfgir/visitor.rs | 94 +- .../src/command_line/compiler.rs | 40 +- .../move-compiler/src/diagnostics/codes.rs | 63 - .../move-compiler/src/diagnostics/mod.rs | 250 +- .../src/diagnostics/warning_filters.rs | 442 +++ .../crates/move-compiler/src/editions/mod.rs | 4 +- .../crates/move-compiler/src/expansion/ast.rs | 21 +- .../src/expansion/name_validation.rs | 48 +- .../src/expansion/path_expander.rs | 89 +- .../src/expansion/primitive_definers.rs | 38 +- .../move-compiler/src/expansion/translate.rs | 283 +- .../move/crates/move-compiler/src/hlir/ast.rs | 2 +- .../src/hlir/detect_dead_code.rs | 101 +- .../src/hlir/match_compilation.rs | 6 +- .../move-compiler/src/hlir/translate.rs | 141 +- .../src/linters/abort_constant.rs | 34 +- .../src/linters/constant_naming.rs | 34 +- .../src/linters/loop_without_exit.rs | 32 +- .../src/linters/meaningless_math_operation.rs | 37 +- .../crates/move-compiler/src/linters/mod.rs | 16 +- .../src/linters/redundant_ref_deref.rs | 54 +- .../src/linters/self_assignment.rs | 33 +- .../src/linters/unnecessary_conditional.rs | 36 +- .../src/linters/unnecessary_unit.rs | 100 + .../src/linters/unnecessary_while_loop.rs | 32 +- .../src/linters/unneeded_return.rs | 42 +- .../crates/move-compiler/src/naming/ast.rs | 12 +- .../move-compiler/src/naming/fake_natives.rs | 6 +- .../src/naming/resolve_use_funs.rs | 66 +- .../src/naming/syntax_methods.rs | 52 +- .../move-compiler/src/naming/translate.rs | 335 ++- .../crates/move-compiler/src/parser/ast.rs | 9 +- .../crates/move-compiler/src/parser/lexer.rs | 8 +- .../crates/move-compiler/src/parser/mod.rs | 9 +- .../crates/move-compiler/src/parser/syntax.rs | 58 +- .../parser/verification_attribute_filter.rs | 13 +- .../crates/move-compiler/src/shared/ide.rs | 2 +- .../move-compiler/src/shared/matching.rs | 4 +- .../crates/move-compiler/src/shared/mod.rs | 307 +- .../move-compiler/src/sui_mode/id_leak.rs | 5 +- .../crates/move-compiler/src/sui_mode/info.rs | 5 +- .../src/sui_mode/linters/coin_field.rs | 60 +- .../sui_mode/linters/collection_equality.rs | 40 +- .../sui_mode/linters/custom_state_change.rs | 3 +- .../src/sui_mode/linters/freeze_wrapped.rs | 33 +- .../sui_mode/linters/freezing_capability.rs | 46 +- .../src/sui_mode/linters/missing_key.rs | 39 +- .../move-compiler/src/sui_mode/linters/mod.rs | 2 +- .../sui_mode/linters/public_mut_tx_context.rs | 48 +- .../src/sui_mode/linters/public_random.rs | 35 +- .../src/sui_mode/linters/self_transfer.rs | 2 - .../src/sui_mode/linters/share_owned.rs | 3 +- .../move-compiler/src/sui_mode/typing.rs | 80 +- .../move-compiler/src/to_bytecode/context.rs | 4 +- .../src/to_bytecode/translate.rs | 8 +- .../crates/move-compiler/src/typing/ast.rs | 34 +- .../crates/move-compiler/src/typing/core.rs | 92 +- .../src/typing/dependency_ordering.rs | 18 +- .../src/typing/deprecation_warnings.rs | 23 +- .../crates/move-compiler/src/typing/expand.rs | 20 +- .../src/typing/infinite_instantiations.rs | 12 +- .../move-compiler/src/typing/macro_expand.rs | 27 +- .../src/typing/match_analysis.rs | 39 +- .../src/typing/match_compilation.rs | 4 +- .../src/typing/recursive_datatypes.rs | 6 +- .../src/typing/syntax_methods.rs | 18 +- .../move-compiler/src/typing/translate.rs | 226 +- .../move-compiler/src/typing/visitor.rs | 134 +- .../src/unit_test/filter_test_members.rs | 10 +- .../src/unit_test/plan_builder.rs | 101 +- ...alse_negative_unnecessary_conditional.move | 6 +- .../linter/suppress_unnecessary_unit.move | 12 + .../true_negative_unnecessary_unit.move | 12 + .../linter/true_positive_unnecessary_unit.exp | 151 + .../true_positive_unnecessary_unit.move | 52 + .../matching/inferred_int_complex.move | 14 + .../matching/inferred_int_mut_ref_type.move | 5 + .../matching/inferred_int_ref_type.move | 5 + .../matching/inferred_int_subject.move | 5 + ..._exp_associativity_else_after_if_block.exp | 24 +- ...ntrol_exp_associativity_typing_invalid.exp | 6 +- .../parsing/clever_errors_raw_abort.move | 40 + .../parsing/expr_abort_missing_value.move | 5 + ..._exp_associativity_else_after_if_block.exp | 24 +- ...ntrol_exp_associativity_typing_invalid.exp | 6 +- .../parser/expr_abort_missing_value.exp | 13 +- .../parser/expr_abort_missing_value.move | 4 +- .../commands/abort_negative_stack_size.exp | 9 - .../commands/abort_negative_stack_size.move | 7 - .../tests/move_check/typing/if_no_else.exp | 34 + .../tests/move_check/typing/if_no_else.move | 14 + .../tests/move_check_testsuite.rs | 1 + .../tests/sui_mode/linter/coin_field.exp | 14 +- .../src/annotated_extractor.rs | 334 +++ .../move-core-types/src/annotated_value.rs | 37 + .../move-core-types/src/annotated_visitor.rs | 25 +- .../move-core-types/src/language_storage.rs | 13 +- .../move/crates/move-core-types/src/lib.rs | 3 +- .../move/crates/move-core-types/src/parser.rs | 632 ----- .../src/parsing}/address.rs | 13 +- .../crates/move-core-types/src/parsing/mod.rs | 10 + .../src/parsing}/parser.rs | 333 +-- .../src/parsing}/types.rs | 14 +- .../src/parsing}/values.rs | 10 +- .../move/crates/move-core-types/src/u256.rs | 12 + .../src/unit_tests/extractor_test.rs | 852 ++++++ .../move-core-types/src/unit_tests/mod.rs | 2 + .../src/unit_tests/parsing_test.rs | 678 +++++ .../src/unit_tests/visitor_test.rs | 541 ++-- .../move/crates/move-model/src/lib.rs | 2 +- .../move/crates/move-model/src/model.rs | 3 +- .../move/crates/move-model/tests/testsuite.rs | 2 +- .../src/resolution/resolution_graph.rs | 2 +- .../tests/testsuite.rs | 2 +- .../move/crates/move-stdlib/src/lib.rs | 6 +- .../src/framework.rs | 10 +- .../src/tasks.rs | 8 +- .../src/vm_test_harness.rs | 5 +- .../move/crates/move-unit-test/Cargo.toml | 2 +- .../move/crates/move-unit-test/src/lib.rs | 2 +- .../crates/move-unit-test/src/test_runner.rs | 2 +- .../move/crates/move-vm-config/Cargo.toml | 2 +- .../move/crates/move-vm-config/src/runtime.rs | 10 +- .../move-vm-integration-tests/Cargo.toml | 11 +- .../src/tests/instantiation_tests.rs | 6 +- .../move/crates/move-vm-profiler/Cargo.toml | 2 +- .../move/crates/move-vm-profiler/src/lib.rs | 60 +- .../move/crates/move-vm-runtime/Cargo.toml | 8 +- .../move/crates/move-vm-runtime/src/lib.rs | 2 +- .../move/crates/move-vm-runtime/src/loader.rs | 2 +- .../crates/move-vm-runtime/src/runtime.rs | 2 +- .../crates/move-vm-runtime/src/session.rs | 6 +- .../crates/move-vm-runtime/src/tracing.rs | 24 +- .../move-vm-runtime/src/tracing2/mod.rs | 4 +- .../move/crates/move-vm-test-utils/Cargo.toml | 2 +- .../v0/crates/move-vm-runtime/Cargo.toml | 6 +- .../crates/move-vm-runtime/src/interpreter.rs | 6 +- .../v1/crates/move-vm-runtime/Cargo.toml | 6 +- .../crates/move-vm-runtime/src/interpreter.rs | 6 +- .../v1/crates/move-vm-runtime/src/runtime.rs | 2 +- .../v1/crates/move-vm-runtime/src/session.rs | 2 +- .../v2/crates/move-vm-runtime/Cargo.toml | 6 +- .../crates/move-vm-runtime/src/interpreter.rs | 6 +- .../v2/crates/move-vm-runtime/src/runtime.rs | 2 +- .../v2/crates/move-vm-runtime/src/session.rs | 2 +- external-crates/tests.sh | 2 +- .../tests/consensus_integration_tests.rs | 238 -- .../primary/tests/causal_completion_tests.rs | 161 -- .../tests/nodes_bootstrapping_tests.rs | 299 -- narwhal/test-utils/src/cluster.rs | 1 + narwhal/worker/src/lib.rs | 1 - narwhal/worker/src/tests/worker_tests.rs | 371 --- narwhal/worker/src/transactions_server.rs | 206 -- narwhal/worker/src/worker.rs | 22 +- pnpm-lock.yaml | 754 ++--- pnpm-workspace.yaml | 1 + sdk/build-scripts/src/utils/buildPackage.ts | 2 + sdk/create-dapp/CHANGELOG.md | 8 + sdk/create-dapp/package.json | 2 +- sdk/dapp-kit/CHANGELOG.md | 9 + sdk/dapp-kit/package.json | 2 +- sdk/deepbook-v3/CHANGELOG.md | 7 + sdk/deepbook-v3/package.json | 2 +- sdk/deepbook/CHANGELOG.md | 7 + sdk/deepbook/package.json | 2 +- sdk/enoki/CHANGELOG.md | 8 + sdk/enoki/package.json | 2 +- sdk/graphql-transport/CHANGELOG.md | 7 + sdk/graphql-transport/package.json | 2 +- sdk/kiosk/CHANGELOG.md | 8 + sdk/kiosk/package.json | 2 +- sdk/kiosk/src/client/kiosk-client.ts | 4 +- sdk/kms/.env.example | 4 + sdk/kms/CHANGELOG.md | 15 + sdk/kms/README.md | 75 + sdk/kms/aws/package.json | 6 + sdk/kms/package.json | 59 + sdk/kms/src/aws/aws-client.ts | 131 + sdk/kms/src/aws/aws-kms-signer.ts | 147 + sdk/kms/src/aws/index.ts | 9 + sdk/kms/src/aws/utils.ts | 65 + sdk/kms/tests/e2e-aws-kms.test.ts | 46 + sdk/kms/tsconfig.esm.json | 7 + sdk/kms/tsconfig.json | 11 + sdk/kms/vitest.config.ts | 19 + sdk/suins-toolkit/CHANGELOG.md | 7 + sdk/suins-toolkit/package.json | 2 +- sdk/typescript/CHANGELOG.md | 7 + sdk/typescript/package.json | 2 +- sdk/typescript/src/client/http-transport.ts | 1 + sdk/typescript/src/version.ts | 4 +- .../test/unit/client/http-transport.test.ts | 1 + sdk/wallet-standard/CHANGELOG.md | 7 + sdk/wallet-standard/package.json | 2 +- sdk/zklogin/CHANGELOG.md | 7 + sdk/zklogin/package.json | 2 +- sdk/zksend/CHANGELOG.md | 8 + sdk/zksend/package.json | 2 +- sui-execution/Cargo.toml | 24 +- sui-execution/latest/sui-adapter/Cargo.toml | 10 +- .../latest/sui-adapter/src/adapter.rs | 6 +- .../src/programmable_transactions/context.rs | 2 +- .../programmable_transactions/execution.rs | 28 +- .../sui-move-natives/src/crypto/group_ops.rs | 164 ++ .../latest/sui-move-natives/src/lib.rs | 25 + sui-execution/v0/sui-adapter/Cargo.toml | 10 +- sui-execution/v0/sui-adapter/src/adapter.rs | 6 +- .../src/programmable_transactions/context.rs | 2 +- sui-execution/v1/sui-adapter/Cargo.toml | 10 +- sui-execution/v1/sui-adapter/src/adapter.rs | 6 +- .../src/programmable_transactions/context.rs | 2 +- sui-execution/v2/sui-adapter/Cargo.toml | 10 +- sui-execution/v2/sui-adapter/src/adapter.rs | 6 +- .../src/programmable_transactions/context.rs | 2 +- turbo.json | 3 +- 965 files changed, 57258 insertions(+), 8845 deletions(-) create mode 100644 .github/workflows/ide-tests.yml create mode 100644 crates/sui-benchmark/src/workloads/expected_failure.rs create mode 100644 crates/sui-bridge-indexer/README.md create mode 100644 crates/sui-bridge-indexer/tests/indexer_tests.rs delete mode 100644 crates/sui-bridge-watchdog/Cargo.toml create mode 100644 crates/sui-bridge-watchdog/eth_bridge_status.rs create mode 100644 crates/sui-bridge-watchdog/eth_vault_balance.rs create mode 100644 crates/sui-bridge-watchdog/lib.rs create mode 100644 crates/sui-bridge-watchdog/metrics.rs create mode 100644 crates/sui-bridge-watchdog/sui_bridge_status.rs create mode 100644 crates/sui-bridge/src/sui_bridge_watchdog/eth_bridge_status.rs create mode 100644 crates/sui-bridge/src/sui_bridge_watchdog/eth_vault_balance.rs create mode 100644 crates/sui-bridge/src/sui_bridge_watchdog/metrics.rs create mode 100644 crates/sui-bridge/src/sui_bridge_watchdog/mod.rs create mode 100644 crates/sui-bridge/src/sui_bridge_watchdog/sui_bridge_status.rs create mode 100644 crates/sui-bridge/src/sui_bridge_watchdog/total_supplies.rs create mode 100644 crates/sui-e2e-tests/tests/rest/checkpoints.rs create mode 100644 crates/sui-e2e-tests/tests/rest/committee.rs rename crates/sui-e2e-tests/tests/{rest.rs => rest/execute.rs} (100%) create mode 100644 crates/sui-e2e-tests/tests/rest/main.rs create mode 100644 crates/sui-e2e-tests/tests/rest/objects.rs create mode 100644 crates/sui-e2e-tests/tests/rest/resolve.rs create mode 100644 crates/sui-e2e-tests/tests/rest/transactions.rs create mode 100644 crates/sui-field-count-derive/Cargo.toml create mode 100644 crates/sui-field-count-derive/src/lib.rs create mode 100644 crates/sui-field-count-main/Cargo.toml create mode 100644 crates/sui-field-count-main/src/lib.rs create mode 100644 crates/sui-field-count/Cargo.toml create mode 100644 crates/sui-field-count/src/lib.rs create mode 100644 crates/sui-framework-snapshot/bytecode_snapshot/66/0x0000000000000000000000000000000000000000000000000000000000000001 create mode 100644 crates/sui-framework-snapshot/bytecode_snapshot/66/0x0000000000000000000000000000000000000000000000000000000000000002 create mode 100644 crates/sui-framework-snapshot/bytecode_snapshot/66/0x0000000000000000000000000000000000000000000000000000000000000003 create mode 100644 crates/sui-framework-snapshot/bytecode_snapshot/66/0x000000000000000000000000000000000000000000000000000000000000000b create mode 100644 crates/sui-framework-snapshot/bytecode_snapshot/66/0x000000000000000000000000000000000000000000000000000000000000dee9 create mode 100644 crates/sui-framework-snapshot/bytecode_snapshot/67/0x0000000000000000000000000000000000000000000000000000000000000001 create mode 100644 crates/sui-framework-snapshot/bytecode_snapshot/67/0x0000000000000000000000000000000000000000000000000000000000000002 create mode 100644 crates/sui-framework-snapshot/bytecode_snapshot/67/0x0000000000000000000000000000000000000000000000000000000000000003 create mode 100644 crates/sui-framework-snapshot/bytecode_snapshot/67/0x000000000000000000000000000000000000000000000000000000000000000b create mode 100644 crates/sui-framework-snapshot/bytecode_snapshot/67/0x000000000000000000000000000000000000000000000000000000000000dee9 create mode 100644 crates/sui-framework-snapshot/bytecode_snapshot/68/0x0000000000000000000000000000000000000000000000000000000000000001 create mode 100644 crates/sui-framework-snapshot/bytecode_snapshot/68/0x0000000000000000000000000000000000000000000000000000000000000002 create mode 100644 crates/sui-framework-snapshot/bytecode_snapshot/68/0x0000000000000000000000000000000000000000000000000000000000000003 create mode 100644 crates/sui-framework-snapshot/bytecode_snapshot/68/0x000000000000000000000000000000000000000000000000000000000000000b create mode 100644 crates/sui-framework-snapshot/bytecode_snapshot/68/0x000000000000000000000000000000000000000000000000000000000000dee9 create mode 100644 crates/sui-indexer-alt/Cargo.toml create mode 100644 crates/sui-indexer-alt/diesel.toml create mode 100755 crates/sui-indexer-alt/generate_schema.sh create mode 100644 crates/sui-indexer-alt/migrations/00000000000000_diesel_initial_setup/down.sql create mode 100644 crates/sui-indexer-alt/migrations/00000000000000_diesel_initial_setup/up.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-14-123213_checkpoints/down.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-14-123213_checkpoints/up.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-15-143704_objects/down.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-15-143704_objects/up.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-15-170316_transactions/down.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-15-170316_transactions/up.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-16-002409_tx_affected_objects/down.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-16-002409_tx_affected_objects/up.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-16-211445_tx_balance_changes/down.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-16-211445_tx_balance_changes/up.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-16-225607_watermarks/down.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-16-225607_watermarks/up.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-19-113135_ev_indices/down.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-19-113135_ev_indices/up.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-27-150938_sum_obj_types/down.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-27-150938_sum_obj_types/up.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-28-144002_sum_coin_balances/down.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-28-144002_sum_coin_balances/up.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-30-142219_obj_versions/down.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-30-142219_obj_versions/up.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-30-214852_wal_obj_types/down.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-30-214852_wal_obj_types/up.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-30-232206_wal_coin_balances/down.sql create mode 100644 crates/sui-indexer-alt/migrations/2024-10-30-232206_wal_coin_balances/up.sql create mode 100644 crates/sui-indexer-alt/schema.patch create mode 100644 crates/sui-indexer-alt/src/args.rs create mode 100644 crates/sui-indexer-alt/src/db.rs create mode 100644 crates/sui-indexer-alt/src/handlers/ev_emit_mod.rs create mode 100644 crates/sui-indexer-alt/src/handlers/ev_struct_inst.rs create mode 100644 crates/sui-indexer-alt/src/handlers/kv_checkpoints.rs create mode 100644 crates/sui-indexer-alt/src/handlers/kv_objects.rs create mode 100644 crates/sui-indexer-alt/src/handlers/kv_transactions.rs create mode 100644 crates/sui-indexer-alt/src/handlers/mod.rs create mode 100644 crates/sui-indexer-alt/src/handlers/obj_versions.rs create mode 100644 crates/sui-indexer-alt/src/handlers/sum_coin_balances.rs create mode 100644 crates/sui-indexer-alt/src/handlers/sum_obj_types.rs create mode 100644 crates/sui-indexer-alt/src/handlers/tx_affected_objects.rs create mode 100644 crates/sui-indexer-alt/src/handlers/tx_balance_changes.rs create mode 100644 crates/sui-indexer-alt/src/handlers/wal_coin_balances.rs create mode 100644 crates/sui-indexer-alt/src/handlers/wal_obj_types.rs create mode 100644 crates/sui-indexer-alt/src/ingestion/broadcaster.rs create mode 100644 crates/sui-indexer-alt/src/ingestion/client.rs create mode 100644 crates/sui-indexer-alt/src/ingestion/error.rs create mode 100644 crates/sui-indexer-alt/src/ingestion/local_client.rs create mode 100644 crates/sui-indexer-alt/src/ingestion/mod.rs create mode 100644 crates/sui-indexer-alt/src/ingestion/regulator.rs create mode 100644 crates/sui-indexer-alt/src/ingestion/remote_client.rs create mode 100644 crates/sui-indexer-alt/src/ingestion/test_utils.rs create mode 100644 crates/sui-indexer-alt/src/lib.rs create mode 100644 crates/sui-indexer-alt/src/main.rs create mode 100644 crates/sui-indexer-alt/src/metrics.rs create mode 100644 crates/sui-indexer-alt/src/models/checkpoints.rs create mode 100644 crates/sui-indexer-alt/src/models/events.rs create mode 100644 crates/sui-indexer-alt/src/models/mod.rs create mode 100644 crates/sui-indexer-alt/src/models/objects.rs create mode 100644 crates/sui-indexer-alt/src/models/transactions.rs create mode 100644 crates/sui-indexer-alt/src/models/watermarks.rs create mode 100644 crates/sui-indexer-alt/src/pipeline/concurrent/collector.rs create mode 100644 crates/sui-indexer-alt/src/pipeline/concurrent/committer.rs create mode 100644 crates/sui-indexer-alt/src/pipeline/concurrent/mod.rs create mode 100644 crates/sui-indexer-alt/src/pipeline/concurrent/watermark.rs create mode 100644 crates/sui-indexer-alt/src/pipeline/mod.rs create mode 100644 crates/sui-indexer-alt/src/pipeline/processor.rs create mode 100644 crates/sui-indexer-alt/src/pipeline/sequential/committer.rs create mode 100644 crates/sui-indexer-alt/src/pipeline/sequential/mod.rs create mode 100644 crates/sui-indexer-alt/src/schema.rs create mode 100644 crates/sui-indexer-alt/src/task.rs create mode 100644 crates/sui-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/down.sql create mode 100644 crates/sui-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/up.sql create mode 100644 crates/sui-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/down.sql create mode 100644 crates/sui-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/up.sql create mode 100644 crates/sui-indexer/src/backfill/backfill_instances/ingestion_backfills/digest_task.rs create mode 100644 crates/sui-indexer/src/benchmark.rs create mode 100644 crates/sui-indexer/tests/json_rpc_tests.rs create mode 100644 crates/sui-indexer/tests/move_test_code/Move.toml create mode 100644 crates/sui-indexer/tests/move_test_code/sources/events.move create mode 100644 crates/sui-kvstore/Cargo.toml create mode 100644 crates/sui-kvstore/src/bigtable/README.md create mode 100644 crates/sui-kvstore/src/bigtable/client.rs create mode 100755 crates/sui-kvstore/src/bigtable/init.sh create mode 100644 crates/sui-kvstore/src/bigtable/mod.rs create mode 100644 crates/sui-kvstore/src/bigtable/proto.rs create mode 100644 crates/sui-kvstore/src/bigtable/proto/google.api.rs create mode 100644 crates/sui-kvstore/src/bigtable/proto/google.bigtable.v2.rs create mode 100644 crates/sui-kvstore/src/bigtable/proto/google.pem create mode 100644 crates/sui-kvstore/src/bigtable/proto/google.rpc.rs create mode 100644 crates/sui-kvstore/src/bigtable/worker.rs create mode 100644 crates/sui-kvstore/src/lib.rs create mode 100644 crates/sui-kvstore/src/main.rs create mode 100644 crates/sui-mvr-indexer/Cargo.toml create mode 100644 crates/sui-mvr-indexer/README.md create mode 100644 crates/sui-mvr-indexer/diesel.toml create mode 100644 crates/sui-mvr-indexer/migrations/pg/00000000000000_diesel_initial_setup/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/00000000000000_diesel_initial_setup/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-08-19-044020_events/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-08-19-044020_events/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-08-19-044023_objects/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-08-19-044023_objects/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-08-19-044026_transactions/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-08-19-044026_transactions/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-08-19-044044_checkpoints/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-08-19-044044_checkpoints/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-08-19-044052_epochs/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-08-19-044052_epochs/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-08-19-060729_packages/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-08-19-060729_packages/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-10-06-204335_tx_indices/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-10-06-204335_tx_indices/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-10-07-160139_display/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-10-07-160139_display/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-11-29-193859_advance_partition/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2023-11-29-193859_advance_partition/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-05-05-155158_obj_indices/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-05-05-155158_obj_indices/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-06-14-045801_event_indices/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-06-14-045801_event_indices/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-07-13-003534_chain_identifier/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-07-13-003534_chain_identifier/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-05-164455_full_objects_history/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-05-164455_full_objects_history/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-10-195655_drop-df-columns/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-10-195655_drop-df-columns/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-12-150939_tx_affected/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-12-150939_tx_affected/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-12-213234_watermarks/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-12-213234_watermarks/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-18-003318_epochs_add_json_system_state/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-18-003318_epochs_add_json_system_state/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-19-011238_raw_checkpoints/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-19-011238_raw_checkpoints/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-19-121113_tx_affected_objects/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-19-121113_tx_affected_objects/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-24-213054_epochs_system_state_nullable/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-24-213054_epochs_system_state_nullable/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/metadata.toml create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/metadata.toml create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/metadata.toml create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/metadata.toml create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/metadata.toml create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/metadata.toml create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/metadata.toml create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/metadata.toml create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/metadata.toml create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/metadata.toml create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/metadata.toml create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/metadata.toml create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/metadata.toml create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/metadata.toml create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/metadata.toml create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/metadata.toml create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-30-153705_add_event_sender/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-09-30-153705_add_event_sender/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-10-08-025030_partial_index_instead/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-10-08-025030_partial_index_instead/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-10-09-180628_add_network_total_transactions_to_epochs/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-10-09-180628_add_network_total_transactions_to_epochs/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/up.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/down.sql create mode 100644 crates/sui-mvr-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/up.sql create mode 100644 crates/sui-mvr-indexer/src/apis/coin_api.rs create mode 100644 crates/sui-mvr-indexer/src/apis/extended_api.rs create mode 100644 crates/sui-mvr-indexer/src/apis/governance_api.rs create mode 100644 crates/sui-mvr-indexer/src/apis/indexer_api.rs create mode 100644 crates/sui-mvr-indexer/src/apis/mod.rs create mode 100644 crates/sui-mvr-indexer/src/apis/move_utils.rs create mode 100644 crates/sui-mvr-indexer/src/apis/read_api.rs create mode 100644 crates/sui-mvr-indexer/src/apis/transaction_builder_api.rs create mode 100644 crates/sui-mvr-indexer/src/apis/write_api.rs create mode 100644 crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/digest_task.rs create mode 100644 crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/ingestion_backfill_task.rs create mode 100644 crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/mod.rs create mode 100644 crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/raw_checkpoints.rs create mode 100644 crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/tx_affected_objects.rs create mode 100644 crates/sui-mvr-indexer/src/backfill/backfill_instances/mod.rs create mode 100644 crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfill.rs create mode 100644 crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/event_sender.sh create mode 100644 crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/full_objects_history.sh create mode 100644 crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/tx_affected_addresses.sh create mode 100644 crates/sui-mvr-indexer/src/backfill/backfill_instances/system_state_summary_json.rs create mode 100644 crates/sui-mvr-indexer/src/backfill/backfill_runner.rs create mode 100644 crates/sui-mvr-indexer/src/backfill/backfill_task.rs create mode 100644 crates/sui-mvr-indexer/src/backfill/mod.rs create mode 100644 crates/sui-mvr-indexer/src/benchmark.rs create mode 100644 crates/sui-mvr-indexer/src/config.rs create mode 100644 crates/sui-mvr-indexer/src/database.rs create mode 100644 crates/sui-mvr-indexer/src/db.rs create mode 100644 crates/sui-mvr-indexer/src/errors.rs create mode 100644 crates/sui-mvr-indexer/src/handlers/checkpoint_handler.rs create mode 100644 crates/sui-mvr-indexer/src/handlers/committer.rs create mode 100644 crates/sui-mvr-indexer/src/handlers/mod.rs create mode 100644 crates/sui-mvr-indexer/src/handlers/objects_snapshot_handler.rs create mode 100644 crates/sui-mvr-indexer/src/handlers/pruner.rs create mode 100644 crates/sui-mvr-indexer/src/handlers/tx_processor.rs create mode 100644 crates/sui-mvr-indexer/src/indexer.rs create mode 100644 crates/sui-mvr-indexer/src/indexer_reader.rs create mode 100644 crates/sui-mvr-indexer/src/lib.rs create mode 100644 crates/sui-mvr-indexer/src/main.rs create mode 100644 crates/sui-mvr-indexer/src/metrics.rs create mode 100644 crates/sui-mvr-indexer/src/models/checkpoints.rs create mode 100644 crates/sui-mvr-indexer/src/models/display.rs create mode 100644 crates/sui-mvr-indexer/src/models/epoch.rs create mode 100644 crates/sui-mvr-indexer/src/models/event_indices.rs create mode 100644 crates/sui-mvr-indexer/src/models/events.rs create mode 100644 crates/sui-mvr-indexer/src/models/mod.rs create mode 100644 crates/sui-mvr-indexer/src/models/obj_indices.rs create mode 100644 crates/sui-mvr-indexer/src/models/objects.rs create mode 100644 crates/sui-mvr-indexer/src/models/packages.rs create mode 100644 crates/sui-mvr-indexer/src/models/raw_checkpoints.rs create mode 100644 crates/sui-mvr-indexer/src/models/transactions.rs create mode 100644 crates/sui-mvr-indexer/src/models/tx_indices.rs create mode 100644 crates/sui-mvr-indexer/src/models/watermarks.rs create mode 100644 crates/sui-mvr-indexer/src/restorer/archives.rs create mode 100644 crates/sui-mvr-indexer/src/restorer/formal_snapshot.rs create mode 100644 crates/sui-mvr-indexer/src/restorer/mod.rs create mode 100644 crates/sui-mvr-indexer/src/schema.patch create mode 100644 crates/sui-mvr-indexer/src/schema.rs create mode 100644 crates/sui-mvr-indexer/src/store/indexer_store.rs create mode 100644 crates/sui-mvr-indexer/src/store/mod.rs create mode 100644 crates/sui-mvr-indexer/src/store/package_resolver.rs create mode 100644 crates/sui-mvr-indexer/src/store/pg_indexer_store.rs create mode 100644 crates/sui-mvr-indexer/src/store/pg_partition_manager.rs create mode 100644 crates/sui-mvr-indexer/src/store/query.rs create mode 100644 crates/sui-mvr-indexer/src/system_package_task.rs create mode 100644 crates/sui-mvr-indexer/src/tempdb.rs create mode 100644 crates/sui-mvr-indexer/src/test_utils.rs create mode 100644 crates/sui-mvr-indexer/src/types.rs create mode 100644 crates/sui-mvr-indexer/tests/ingestion_tests.rs create mode 100644 crates/sui-mvr-indexer/tests/json_rpc_tests.rs create mode 100644 crates/sui-mvr-indexer/tests/move_test_code/Move.toml create mode 100644 crates/sui-mvr-indexer/tests/move_test_code/sources/events.move create mode 100644 crates/sui-mvr-indexer/tests/read_api_tests.rs create mode 100644 crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_66.snap create mode 100644 crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_67.snap create mode 100644 crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_68.snap create mode 100644 crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_66.snap create mode 100644 crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_67.snap create mode 100644 crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_68.snap create mode 100644 crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_66.snap create mode 100644 crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_67.snap create mode 100644 crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_68.snap create mode 100644 crates/sui-rest-api/proto/rest.proto create mode 100644 crates/sui-rest-api/src/proto/generated/sui.rest.rs create mode 100644 crates/sui-rest-api/src/proto/mod.rs create mode 100644 crates/sui-rest-api/tests/bootstrap.rs create mode 100644 crates/sui-synthetic-ingestion/Cargo.toml create mode 100644 crates/sui-synthetic-ingestion/src/benchmark.rs create mode 100644 crates/sui-synthetic-ingestion/src/lib.rs create mode 100644 crates/sui-synthetic-ingestion/src/synthetic_ingestion.rs create mode 100644 crates/sui-synthetic-ingestion/src/tps_tracker.rs create mode 100644 crates/sui/src/displays/dev_inspect.rs create mode 100644 crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/Move.toml create mode 100644 crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/enum.move create mode 100644 crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/func.move create mode 100644 crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/struct.move create mode 100644 crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/Move.toml create mode 100644 crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/enum.move create mode 100644 crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/func.move create mode 100644 crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/struct.move create mode 100644 crates/sui/src/unit_tests/snapshots/sui__upgrade_compatibility__upgrade_compatibility_tests__declarations_missing.snap create mode 100644 crates/suiop-cli/src/cli/env/mod.rs create mode 100644 crates/test-cluster/src/indexer_util.rs create mode 100644 crates/test-cluster/src/test_indexer_handle.rs create mode 100755 docker/sui-graphql-rpc-staging/build.sh create mode 100644 docker/sui-indexer-alt/Dockerfile create mode 100644 docker/sui-indexer-alt/build.sh create mode 100644 docker/sui-mvr-indexer/Dockerfile create mode 100755 docker/sui-mvr-indexer/build.sh create mode 100644 docs/content/concepts/sui-bridge.mdx create mode 100644 docs/site/src/components/API/api-ref/compnav.js create mode 100644 docs/site/src/components/API/api-ref/components.js create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/.mocharc.yaml create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/.gitignore create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/Move.toml create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/build/breakpoints_line/bytecode_modules/m.mv create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/build/breakpoints_line/source_maps/m.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/build/breakpoints_line/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/test.exp create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/trace.spec.js create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/traces/breakpoints_line__m__test.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/Move.toml create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/bytecode_modules/dependencies/MoveStdlib/vector.mv create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/bytecode_modules/m.mv create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/source_maps/dependencies/MoveStdlib/vector.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/source_maps/m.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/sources/dependencies/MoveStdlib/vector.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/test.exp create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/trace.spec.js create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/traces/compound__m__test.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/Move.toml create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/bytecode_modules/m.mv create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/MoveStdlib/bcs.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/Sui/object.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/Sui/tx_context.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/m.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/MoveStdlib/bcs.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/Sui/object.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/Sui/tx_context.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/test.exp create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/trace.spec.js create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/traces/global_loc__m__test.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/Move.toml create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/bytecode_modules/m.mv create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/source_maps/dependencies/MoveStdlib/ascii.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/source_maps/dependencies/MoveStdlib/string.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/source_maps/dependencies/MoveStdlib/vector.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/source_maps/m.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/ascii.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/string.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/vector.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/test.exp create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/trace.spec.js create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/traces/native_fun__m__test.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references/Move.toml create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/bytecode_modules/dependencies/MoveStdlib/vector.mv create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/bytecode_modules/m.mv create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/source_maps/dependencies/MoveStdlib/vector.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/source_maps/m.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/sources/dependencies/MoveStdlib/vector.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references/test.exp create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references/trace.spec.js create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references/traces/references__m__test.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/Move.toml create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/bytecode_modules/dependencies/MoveStdlib/vector.mv create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/bytecode_modules/m.mv create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/source_maps/dependencies/MoveStdlib/vector.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/source_maps/m.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/sources/dependencies/MoveStdlib/vector.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/test.exp create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/trace.spec.js create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/traces/references_deep__m__test.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/run_spec.js create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/Move.toml create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/build/shadowing/bytecode_modules/m.mv create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/build/shadowing/source_maps/m.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/build/shadowing/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/test.exp create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/trace.spec.js create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/traces/shadowing__m__test.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/Move.toml create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/bytecode_modules/m.mv create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/source_maps/m.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/test.exp create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/trace.spec.js create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/traces/stepping__m__test.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/Move.toml create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/bytecode_modules/m.mv create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/source_maps/m.json create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/sources/m.move create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/test.exp create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/trace.spec.js create mode 100644 external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/traces/stepping_call__m__test.json create mode 100644 external-crates/move/crates/move-compiler-transactional-tests/tests/constants/derived_line_number_raw_abort.exp create mode 100644 external-crates/move/crates/move-compiler-transactional-tests/tests/constants/derived_line_number_raw_abort.move create mode 100644 external-crates/move/crates/move-compiler-transactional-tests/tests/constants/macro_call_line_number_abort.exp create mode 100644 external-crates/move/crates/move-compiler-transactional-tests/tests/constants/macro_call_line_number_abort.move create mode 100644 external-crates/move/crates/move-compiler/src/diagnostics/warning_filters.rs create mode 100644 external-crates/move/crates/move-compiler/src/linters/unnecessary_unit.rs create mode 100644 external-crates/move/crates/move-compiler/tests/linter/suppress_unnecessary_unit.move create mode 100644 external-crates/move/crates/move-compiler/tests/linter/true_negative_unnecessary_unit.move create mode 100644 external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_unit.exp create mode 100644 external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_unit.move create mode 100644 external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_complex.move create mode 100644 external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_mut_ref_type.move create mode 100644 external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_ref_type.move create mode 100644 external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_subject.move create mode 100644 external-crates/move/crates/move-compiler/tests/move_2024/parsing/clever_errors_raw_abort.move create mode 100644 external-crates/move/crates/move-compiler/tests/move_2024/parsing/expr_abort_missing_value.move delete mode 100644 external-crates/move/crates/move-compiler/tests/move_check/translated_ir_tests/move/commands/abort_negative_stack_size.exp delete mode 100644 external-crates/move/crates/move-compiler/tests/move_check/translated_ir_tests/move/commands/abort_negative_stack_size.move create mode 100644 external-crates/move/crates/move-compiler/tests/move_check/typing/if_no_else.exp create mode 100644 external-crates/move/crates/move-compiler/tests/move_check/typing/if_no_else.move create mode 100644 external-crates/move/crates/move-core-types/src/annotated_extractor.rs delete mode 100644 external-crates/move/crates/move-core-types/src/parser.rs rename external-crates/move/crates/{move-command-line-common/src => move-core-types/src/parsing}/address.rs (93%) create mode 100644 external-crates/move/crates/move-core-types/src/parsing/mod.rs rename external-crates/move/crates/{move-command-line-common/src => move-core-types/src/parsing}/parser.rs (60%) rename external-crates/move/crates/{move-command-line-common/src => move-core-types/src/parsing}/types.rs (94%) rename external-crates/move/crates/{move-command-line-common/src => move-core-types/src/parsing}/values.rs (99%) create mode 100644 external-crates/move/crates/move-core-types/src/unit_tests/extractor_test.rs create mode 100644 external-crates/move/crates/move-core-types/src/unit_tests/parsing_test.rs delete mode 100644 narwhal/executor/tests/consensus_integration_tests.rs delete mode 100644 narwhal/primary/tests/causal_completion_tests.rs delete mode 100644 narwhal/primary/tests/nodes_bootstrapping_tests.rs delete mode 100644 narwhal/worker/src/tests/worker_tests.rs delete mode 100644 narwhal/worker/src/transactions_server.rs create mode 100644 sdk/kms/.env.example create mode 100644 sdk/kms/CHANGELOG.md create mode 100644 sdk/kms/README.md create mode 100644 sdk/kms/aws/package.json create mode 100644 sdk/kms/package.json create mode 100644 sdk/kms/src/aws/aws-client.ts create mode 100644 sdk/kms/src/aws/aws-kms-signer.ts create mode 100644 sdk/kms/src/aws/index.ts create mode 100644 sdk/kms/src/aws/utils.ts create mode 100644 sdk/kms/tests/e2e-aws-kms.test.ts create mode 100644 sdk/kms/tsconfig.esm.json create mode 100644 sdk/kms/tsconfig.json create mode 100644 sdk/kms/vitest.config.ts diff --git a/.github/actions/diffs/action.yml b/.github/actions/diffs/action.yml index 37f2ae16d5bfb..8a97000610847 100644 --- a/.github/actions/diffs/action.yml +++ b/.github/actions/diffs/action.yml @@ -22,6 +22,9 @@ outputs: isMoveAutoFormatter: description: True when changes happened in MoveAutoFormatter code value: "${{ steps.diff.outputs.isMoveAutoFormatter }}" + isMoveAnalyzerTraceAdapter: + description: True when changes happened in Trace Adapter + value: "${{ steps.diff.outputs.isMoveAnalyzerTraceAdapter }}" isExamples: description: True when changes happened in examples/ directory value: "${{ steps.diff.outputs.isExamples }}" @@ -79,5 +82,7 @@ runs: - 'sui-execution/**' isMoveAutoFormatter: - 'external-crates/move/crates/move-analyzer/prettier-plugin/**' + isMoveAnalyzerTraceAdapter: + - 'external-crates/move/crates/move-analyzer/trace-adapter/**' isExamples: - 'examples/**' diff --git a/.github/workflows/ide-tests.yml b/.github/workflows/ide-tests.yml new file mode 100644 index 0000000000000..ecb0e2a7435e8 --- /dev/null +++ b/.github/workflows/ide-tests.yml @@ -0,0 +1,91 @@ +name: IDE Tests + +on: + push: + branches: main + pull_request: + types: [ opened, synchronize, reopened, ready_for_review ] + workflow_dispatch: + inputs: + sui_repo_ref: + description: "Branch / commit to test" + type: string + required: false + default: '' + +jobs: + diff: + runs-on: [ubuntu-latest] + outputs: + isMoveAutoFormatter: ${{ steps.diff.outputs.isMoveAutoFormatter }} + isMoveAnalyzerTraceAdapter: ${{ steps.diff.outputs.isMoveAnalyzerTraceAdapter }} + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # Pin v4.1.1 + - name: Detect Changes + uses: './.github/actions/diffs' + id: diff + + move-auto-formatter-ci-test: + name: Move Auto-formatter Test + needs: diff + if: needs.diff.outputs.isMoveAutoFormatter == 'true' + runs-on: [ ubuntu-latest ] + + steps: + - name: Checkout + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # Pin v4.1.1 + with: + ref: ${{ github.event.inputs.sui_repo_ref || github.ref }} + + - name: pnpm setup + uses: pnpm/action-setup@a3252b78c470c02df07e9d59298aecedc3ccdd6d # pin@v3.0.0 + with: + version: 9.1.1 + + - name: Setup Node + uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # pin@v4.0.2 + with: + node-version: '20' + cache: 'pnpm' + + - name: Install dependencies + working-directory: ./external-crates/move/crates/move-analyzer/prettier-plugin + run: npm install && npm i web-tree-sitter + + - name: Run npm test + working-directory: ./external-crates/move/crates/move-analyzer/prettier-plugin + shell: bash + run: npm run test + + move-analyzer-trace-adapter-ci-test: + name: Trace Adapter Test + needs: diff + if: needs.diff.outputs.isMoveAnalyzerTraceAdapter == 'true' + runs-on: [ ubuntu-latest ] + + steps: + - name: Checkout + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # Pin v4.1.1 + with: + ref: ${{ github.event.inputs.sui_repo_ref || github.ref }} + + - name: pnpm setup + uses: pnpm/action-setup@a3252b78c470c02df07e9d59298aecedc3ccdd6d # pin@v3.0.0 + with: + version: 9.1.1 + + - name: Setup Node + uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # pin@v4.0.2 + with: + node-version: '20' + cache: 'pnpm' + + - name: Install dependencies + working-directory: ./external-crates/move/crates/move-analyzer/trace-adapter + shell: bash + run: npm install + + - name: Run npm test + working-directory: ./external-crates/move/crates/move-analyzer/trace-adapter + shell: bash + run: npm run test diff --git a/.vscode/extensions.json b/.vscode/extensions.json index f29818dd52d36..79feead4aa559 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -1,6 +1,7 @@ { "recommendations": [ - "move.move-analyzer", + "mysten.move", + "damirka.move-syntax", "rust-lang.rust-analyzer", "esbenp.prettier-vscode", "ms-playwright.playwright", diff --git a/Cargo.lock b/Cargo.lock index 34011f5351fe9..bc21f489583af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -182,7 +182,7 @@ dependencies = [ "rand 0.8.5", "rcgen", "ring 0.17.8", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-webpki 0.102.8", "serde", "serde_json", @@ -217,7 +217,7 @@ dependencies = [ "prettyplease 0.2.25", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -262,9 +262,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.15" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", @@ -277,52 +277,52 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.4" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.91" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8" +checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" dependencies = [ "backtrace", ] [[package]] name = "arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" dependencies = [ "derive_arbitrary", ] @@ -1002,7 +1002,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", ] [[package]] @@ -1016,6 +1016,16 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "assert-json-diff" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "assert_cmd" version = "2.0.16" @@ -1089,8 +1099,8 @@ checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" dependencies = [ "async-task 4.7.1", "concurrent-queue", - "fastrand", - "futures-lite", + "fastrand 2.2.0", + "futures-lite 2.5.0", "slab", ] @@ -1105,7 +1115,7 @@ dependencies = [ "async-io", "async-lock 3.4.0", "blocking", - "futures-lite", + "futures-lite 2.5.0", "once_cell", ] @@ -1180,7 +1190,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "strum 0.25.0", - "syn 2.0.85", + "syn 2.0.87", "thiserror", ] @@ -1210,15 +1220,15 @@ dependencies = [ [[package]] name = "async-io" -version = "2.3.4" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" +checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" dependencies = [ "async-lock 3.4.0", "cfg-if", "concurrent-queue", "futures-io", - "futures-lite", + "futures-lite 2.5.0", "parking", "polling", "rustix", @@ -1255,7 +1265,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1273,7 +1283,7 @@ dependencies = [ "futures-channel", "futures-core", "futures-io", - "futures-lite", + "futures-lite 2.5.0", "gloo-timers 0.3.0", "kv-log-macro", "log", @@ -1304,7 +1314,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1326,7 +1336,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1399,7 +1409,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1432,7 +1442,7 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "fastrand", + "fastrand 2.2.0", "hex", "http 0.2.12", "hyper 0.14.31", @@ -1452,7 +1462,7 @@ checksum = "70a66ac8ef5fa9cf01c2d999f39d16812e90ec1467bd382cbbb74ba23ea86201" dependencies = [ "aws-smithy-async", "aws-smithy-types", - "fastrand", + "fastrand 2.2.0", "tokio", "tracing", "zeroize", @@ -1492,7 +1502,7 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "fastrand", + "fastrand 2.2.0", "http 0.2.12", "percent-encoding", "tracing", @@ -1517,7 +1527,7 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "fastrand", + "fastrand 2.2.0", "http 0.2.12", "regex", "tokio-stream", @@ -1543,7 +1553,7 @@ dependencies = [ "aws-smithy-types", "aws-smithy-xml", "aws-types", - "fastrand", + "fastrand 2.2.0", "http 0.2.12", "regex", "tokio-stream", @@ -1695,7 +1705,7 @@ dependencies = [ "aws-smithy-http-tower", "aws-smithy-types", "bytes", - "fastrand", + "fastrand 2.2.0", "http 0.2.12", "http-body 0.4.6", "hyper 0.14.31", @@ -1789,7 +1799,7 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "bytes", - "fastrand", + "fastrand 2.2.0", "http 0.2.12", "http-body 0.4.6", "once_cell", @@ -1988,7 +1998,7 @@ checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2005,7 +2015,7 @@ dependencies = [ "hyper 1.5.0", "hyper-util", "pin-project-lite", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-pemfile 2.2.0", "rustls-pki-types", "tokio", @@ -2223,9 +2233,9 @@ dependencies = [ [[package]] name = "bigdecimal" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d712318a27c7150326677b321a5fa91b55f6d9034ffd67f20319e147d40cee" +checksum = "8f850665a0385e070b64c38d2354e6c104c8479c59868d1e48a0c13ee2c7a1c1" dependencies = [ "autocfg", "libm", @@ -2242,7 +2252,7 @@ checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" [[package]] name = "bin-version" -version = "1.36.1" +version = "1.37.1" dependencies = [ "const-str", "git-version", @@ -2275,7 +2285,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2501,7 +2511,7 @@ dependencies = [ "async-channel 2.3.1", "async-task 4.7.1", "futures-io", - "futures-lite", + "futures-lite 2.5.0", "piper", ] @@ -2895,9 +2905,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.31" +version = "1.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" +checksum = "40545c26d092346d8a8dab71ee48e7685a7a9cba76e634790c215b41a4a7b4cf" dependencies = [ "jobserver", "libc", @@ -2929,6 +2939,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chacha20" version = "0.9.1" @@ -3057,7 +3073,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -3185,9 +3201,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "colored" @@ -3277,7 +3293,7 @@ dependencies = [ "quinn-proto", "rand 0.8.5", "rstest", - "rustls 0.23.15", + "rustls 0.23.16", "serde", "shared-crypto", "strum_macros 0.24.3", @@ -3294,6 +3310,7 @@ dependencies = [ "tokio-util 0.7.12", "tonic 0.12.3", "tonic-build 0.12.3", + "tonic-rustls", "tower 0.4.13", "tower-http", "tracing", @@ -3944,7 +3961,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -3968,7 +3985,7 @@ checksum = "478c02b53607e3f21c374f024c2cfc2154e554905bba478e8e09409f10ce3726" dependencies = [ "cynic-proc-macros", "ref-cast", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "static_assertions", @@ -3989,7 +4006,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "strsim 0.10.0", - "syn 2.0.85", + "syn 2.0.87", "thiserror", ] @@ -4013,7 +4030,7 @@ dependencies = [ "cynic-codegen", "darling 0.20.10", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4085,7 +4102,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "strsim 0.11.1", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4118,7 +4135,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4336,6 +4353,25 @@ dependencies = [ "walkdir", ] +[[package]] +name = "deadpool" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "421fe0f90f2ab22016f32a9881be5134fdd71c65298917084b0c7477cbc3856e" +dependencies = [ + "async-trait", + "deadpool-runtime", + "num_cpus", + "retain_mut", + "tokio", +] + +[[package]] +name = "deadpool-runtime" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "092966b41edc516079bdf31ec78a2e0588d1d0c08f78b91d8307215928642b2b" + [[package]] name = "debug-ignore" version = "1.0.5" @@ -4421,13 +4457,13 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" +checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4471,7 +4507,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "rustc_version", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4491,7 +4527,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4527,9 +4563,9 @@ dependencies = [ [[package]] name = "diesel-async" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb799bb6f8ca6a794462125d7b8983b0c86e6c93a33a9c55934a4a5de4409d3" +checksum = "4c5c6ec8d5c7b8444d19a47161797cbe361e0fb1ee40c6a8124ec915b64a4125" dependencies = [ "async-trait", "bb8", @@ -4550,7 +4586,7 @@ dependencies = [ "dsl_auto_type", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4570,7 +4606,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25" dependencies = [ - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4719,7 +4755,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4792,7 +4828,7 @@ dependencies = [ "optfield", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4824,7 +4860,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -5041,7 +5077,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -5060,7 +5096,7 @@ dependencies = [ "once_cell", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -5267,7 +5303,7 @@ dependencies = [ "reqwest 0.11.27", "serde", "serde_json", - "syn 2.0.85", + "syn 2.0.87", "toml 0.8.19", "walkdir", ] @@ -5285,7 +5321,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "serde_json", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -5311,7 +5347,7 @@ dependencies = [ "serde", "serde_json", "strum 0.26.3", - "syn 2.0.85", + "syn 2.0.87", "tempfile", "thiserror", "tiny-keccak", @@ -5547,7 +5583,7 @@ dependencies = [ [[package]] name = "fastcrypto" version = "0.1.8" -source = "git+https://github.com/MystenLabs/fastcrypto?rev=c050ffc78b93739328af5d59b05f90e0e26b1b7e#c050ffc78b93739328af5d59b05f90e0e26b1b7e" +source = "git+https://github.com/MystenLabs/fastcrypto?rev=2f502fd8570fe4e9cff36eea5bbd6fef22002898#2f502fd8570fe4e9cff36eea5bbd6fef22002898" dependencies = [ "aes", "aes-gcm", @@ -5601,7 +5637,7 @@ dependencies = [ [[package]] name = "fastcrypto-derive" version = "0.1.3" -source = "git+https://github.com/MystenLabs/fastcrypto?rev=c050ffc78b93739328af5d59b05f90e0e26b1b7e#c050ffc78b93739328af5d59b05f90e0e26b1b7e" +source = "git+https://github.com/MystenLabs/fastcrypto?rev=2f502fd8570fe4e9cff36eea5bbd6fef22002898#2f502fd8570fe4e9cff36eea5bbd6fef22002898" dependencies = [ "quote 1.0.37", "syn 1.0.109", @@ -5610,7 +5646,7 @@ dependencies = [ [[package]] name = "fastcrypto-tbls" version = "0.1.0" -source = "git+https://github.com/MystenLabs/fastcrypto?rev=c050ffc78b93739328af5d59b05f90e0e26b1b7e#c050ffc78b93739328af5d59b05f90e0e26b1b7e" +source = "git+https://github.com/MystenLabs/fastcrypto?rev=2f502fd8570fe4e9cff36eea5bbd6fef22002898#2f502fd8570fe4e9cff36eea5bbd6fef22002898" dependencies = [ "bcs", "digest 0.10.7", @@ -5629,7 +5665,7 @@ dependencies = [ [[package]] name = "fastcrypto-vdf" version = "0.1.0" -source = "git+https://github.com/MystenLabs/fastcrypto?rev=c050ffc78b93739328af5d59b05f90e0e26b1b7e#c050ffc78b93739328af5d59b05f90e0e26b1b7e" +source = "git+https://github.com/MystenLabs/fastcrypto?rev=2f502fd8570fe4e9cff36eea5bbd6fef22002898#2f502fd8570fe4e9cff36eea5bbd6fef22002898" dependencies = [ "bcs", "fastcrypto", @@ -5646,7 +5682,7 @@ dependencies = [ [[package]] name = "fastcrypto-zkp" version = "0.1.3" -source = "git+https://github.com/MystenLabs/fastcrypto?rev=c050ffc78b93739328af5d59b05f90e0e26b1b7e#c050ffc78b93739328af5d59b05f90e0e26b1b7e" +source = "git+https://github.com/MystenLabs/fastcrypto?rev=2f502fd8570fe4e9cff36eea5bbd6fef22002898#2f502fd8570fe4e9cff36eea5bbd6fef22002898" dependencies = [ "ark-bn254", "ark-ec", @@ -5667,7 +5703,7 @@ dependencies = [ "num-bigint 0.4.6", "once_cell", "regex", - "reqwest 0.12.8", + "reqwest 0.12.9", "schemars", "serde", "serde_json", @@ -5676,9 +5712,18 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.1" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + +[[package]] +name = "fastrand" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" [[package]] name = "fd-lock" @@ -6002,11 +6047,26 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.3.0" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + +[[package]] +name = "futures-lite" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" dependencies = [ - "fastrand", + "fastrand 2.2.0", "futures-core", "futures-io", "parking", @@ -6031,7 +6091,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -6108,7 +6168,7 @@ dependencies = [ "hyper 0.14.31", "hyper-rustls 0.25.0", "log", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "thiserror", @@ -6119,6 +6179,33 @@ dependencies = [ "yup-oauth2", ] +[[package]] +name = "gcp_auth" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbf67f30198e045a039264c01fb44659ce82402d7771c50938beb41a5ac87733" +dependencies = [ + "async-trait", + "base64 0.22.1", + "bytes", + "chrono", + "home", + "http 1.1.0", + "http-body-util", + "hyper 1.5.0", + "hyper-rustls 0.27.3", + "hyper-util", + "ring 0.17.8", + "rustls-pemfile 2.2.0", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", + "tracing-futures", + "url", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -6193,7 +6280,7 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -6456,9 +6543,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" dependencies = [ "allocator-api2", "equivalent", @@ -6671,6 +6758,27 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08a397c49fec283e3d6211adbe480be95aae5f304cfb923e9970e08956d5168a" +[[package]] +name = "http-types" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e9b187a72d63adbfba487f48095306ac823049cb504ee195541e91c7775f5ad" +dependencies = [ + "anyhow", + "async-channel 1.9.0", + "base64 0.13.1", + "futures-lite 1.13.0", + "http 0.2.12", + "infer", + "pin-project-lite", + "rand 0.7.3", + "serde", + "serde_json", + "serde_qs", + "serde_urlencoded", + "url", +] + [[package]] name = "httparse" version = "1.9.5" @@ -6793,7 +6901,7 @@ dependencies = [ "http 1.1.0", "hyper 1.5.0", "hyper-util", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", @@ -6816,9 +6924,9 @@ dependencies = [ [[package]] name = "hyper-timeout" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ "hyper 1.5.0", "hyper-util", @@ -6829,9 +6937,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", @@ -6882,6 +6990,124 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.87", +] + [[package]] name = "id-arena" version = "2.2.1" @@ -6904,6 +7130,27 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + [[package]] name = "if_chain" version = "1.0.2" @@ -7024,7 +7271,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.15.0", + "hashbrown 0.15.1", "serde", ] @@ -7041,6 +7288,12 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "infer" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64e9829a50b42bb782c1df523f78d332fe371b10c661e78b7a3c34b0198e9fac" + [[package]] name = "inotify" version = "0.9.6" @@ -7089,9 +7342,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.40.0" +version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6593a41c7a73841868772495db7dc1e8ecab43bb5c0b6da2059246c4b506ab60" +checksum = "7e9ffc4d4892617c50a928c52b2961cb5174b6fc6ebf252b2fac9d21955c48b8" dependencies = [ "console", "lazy_static", @@ -7122,9 +7375,9 @@ checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "io-extras" -version = "0.18.2" +version = "0.18.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9f046b9af244f13b3bd939f55d16830ac3a201e8a9ba9661bfcb03e2be72b9b" +checksum = "7d45fd7584f9b67ac37bc041212d06bfac0700b36456b05890d36a3b626260eb" dependencies = [ "io-lifetimes", "windows-sys 0.52.0", @@ -7677,9 +7930,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.161" +version = "0.2.162" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" +checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" [[package]] name = "libloading" @@ -7693,9 +7946,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libquickjs-sys" @@ -7779,6 +8032,12 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "litemap" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" + [[package]] name = "lock_api" version = "0.4.12" @@ -7820,7 +8079,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "regex-syntax 0.8.5", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -7856,7 +8115,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.0", + "hashbrown 0.15.1", ] [[package]] @@ -8160,7 +8419,7 @@ checksum = "dcf09caffaac8068c346b6df2a7fc27a177fd20b39421a39ce0a211bde679a6c" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -8507,7 +8766,6 @@ dependencies = [ "hex", "move-binary-format", "move-core-types", - "num-bigint 0.4.6", "once_cell", "serde", "sha2 0.9.9", @@ -8748,7 +9006,7 @@ version = "0.1.0" dependencies = [ "enum-compat-util", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -9177,7 +9435,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", ] [[package]] @@ -9202,7 +9460,7 @@ dependencies = [ "mysten-metrics", "parking_lot 0.12.3", "prometheus", - "reqwest 0.12.8", + "reqwest 0.12.9", "snap", "sui-tls", "sui-types", @@ -9235,11 +9493,13 @@ name = "mysten-network" version = "0.2.0" dependencies = [ "anemo", + "async-stream", "bcs", "bytes", "eyre", "futures", "http 1.1.0", + "hyper-rustls 0.27.3", "hyper-util", "multiaddr", "once_cell", @@ -9247,6 +9507,7 @@ dependencies = [ "serde", "snap", "tokio", + "tokio-rustls 0.26.0", "tokio-stream", "tonic 0.12.3", "tonic-health", @@ -9296,7 +9557,7 @@ version = "0.1.0" dependencies = [ "proc-macro2 1.0.89", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", ] [[package]] @@ -9445,7 +9706,7 @@ dependencies = [ "pretty_assertions", "prometheus", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde-reflection", "serde_yaml 0.8.26", "sui-keys", @@ -9499,7 +9760,7 @@ dependencies = [ "prometheus", "proptest", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "sui-macros", "sui-protocol-config", "tap", @@ -9639,7 +9900,7 @@ dependencies = [ "narwhal-types", "prometheus", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "sui-protocol-config", "tap", "telemetry-subscribers", @@ -10064,7 +10325,7 @@ dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -10076,7 +10337,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -10145,7 +10406,7 @@ dependencies = [ "percent-encoding", "quick-xml", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "ring 0.17.8", "rustls-pemfile 2.2.0", "serde", @@ -10344,7 +10605,7 @@ checksum = "fa59f025cde9c698fcb4fcb3533db4621795374065bee908215263488f2d2a1d" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -10394,7 +10655,7 @@ dependencies = [ "proc-macro-error", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -10408,7 +10669,7 @@ dependencies = [ "proc-macro2 1.0.89", "proc-macro2-diagnostics", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -10670,7 +10931,7 @@ checksum = "f14d42b14749cc7927add34a9932b3b3cc5349a633384850baa67183061439dd" dependencies = [ "ciborium", "coset", - "idna", + "idna 0.5.0", "passkey-authenticator", "passkey-types", "public-suffix", @@ -10850,7 +11111,7 @@ dependencies = [ "pest_meta", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -10934,7 +11195,7 @@ dependencies = [ "phf_shared 0.11.2", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -10972,7 +11233,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -10994,7 +11255,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", - "fastrand", + "fastrand 2.2.0", "futures-io", ] @@ -11077,9 +11338,9 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.3" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2790cd301dec6cd3b7a025e4815cf825724a51c98dccfe6a3e55f05ffb6511" +checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" dependencies = [ "cfg-if", "concurrent-queue", @@ -11257,7 +11518,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2 1.0.89", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -11378,7 +11639,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", "version_check", "yansi 1.0.1", ] @@ -11415,7 +11676,7 @@ checksum = "0fcebfa99f03ae51220778316b37d24981e36322c82c24848f48c5bd0f64cbdb" dependencies = [ "enum-as-inner", "mime", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "time", "url", @@ -11532,7 +11793,7 @@ dependencies = [ "prost 0.13.3", "prost-types 0.13.3", "regex", - "syn 2.0.85", + "syn 2.0.87", "tempfile", ] @@ -11559,7 +11820,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -11572,7 +11833,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -11679,7 +11940,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.15", + "rustls 0.23.16", "socket2 0.5.7", "thiserror", "tokio", @@ -11696,7 +11957,7 @@ dependencies = [ "rand 0.8.5", "ring 0.17.8", "rustc-hash 2.0.0", - "rustls 0.23.15", + "rustls 0.23.16", "slab", "thiserror", "tinyvec", @@ -11705,10 +11966,11 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" dependencies = [ + "cfg_aliases", "libc", "once_cell", "socket2 0.5.7", @@ -11895,7 +12157,7 @@ checksum = "a25d631e41bfb5fdcde1d4e2215f62f7f0afa3ff11e26563765bd6ea1d229aeb" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -11962,7 +12224,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -11980,9 +12242,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", @@ -12071,9 +12333,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.8" +version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ "async-compression", "base64 0.22.1", @@ -12096,7 +12358,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-native-certs 0.8.0", "rustls-pemfile 2.2.0", "rustls-pki-types", @@ -12126,7 +12388,7 @@ dependencies = [ "anyhow", "async-trait", "http 1.1.0", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "thiserror", "tower-service", @@ -12146,7 +12408,7 @@ dependencies = [ "http 1.1.0", "hyper 1.5.0", "parking_lot 0.11.2", - "reqwest 0.12.8", + "reqwest 0.12.9", "reqwest-middleware", "retry-policies", "tokio", @@ -12154,6 +12416,12 @@ dependencies = [ "wasm-timer", ] +[[package]] +name = "retain_mut" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4389f1d5789befaf6029ebd9f7dac4af7f7e3d61b69d4f30e2ac02b57e7712b0" + [[package]] name = "retry-policies" version = "0.3.0" @@ -12600,9 +12868,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" dependencies = [ "bitflags 2.6.0", "errno", @@ -12653,9 +12921,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.15" +version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fbb44d7acc4e873d613422379f69f237a1b141928c02f6bc6ccfddddc2d7993" +checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "log", "once_cell", @@ -12847,7 +13115,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -12881,7 +13149,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "serde_derive_internals", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -13027,9 +13295,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.213" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ea7893ff5e2466df8d720bb615088341b295f849602c6956047f8f80f0e9bc1" +checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" dependencies = [ "serde_derive", ] @@ -13076,13 +13344,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.213" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5" +checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -13093,7 +13361,7 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -13119,6 +13387,17 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_qs" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7715380eec75f029a4ef7de39a9200e0a63823176b759d055b613f5a87df6a6" +dependencies = [ + "percent-encoding", + "serde", + "thiserror", +] + [[package]] name = "serde_repr" version = "0.1.19" @@ -13127,7 +13406,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -13203,7 +13482,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -13215,7 +13494,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -13594,7 +13873,7 @@ dependencies = [ "log", "object_store 0.10.2", "regex", - "reqwest 0.12.8", + "reqwest 0.12.9", "reqwest-middleware", "reqwest-retry", "serde", @@ -13882,7 +14161,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "rustversion", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -13895,7 +14174,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "rustversion", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -13931,7 +14210,7 @@ checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" [[package]] name = "sui" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anemo", "anyhow", @@ -13944,6 +14223,7 @@ dependencies = [ "bip32 0.4.0", "camino", "clap", + "codespan-reporting", "colored", "csv", "datatest-stable", @@ -13961,9 +14241,12 @@ dependencies = [ "miette", "move-analyzer", "move-binary-format", + "move-bytecode-source-map", "move-bytecode-verifier-meter", "move-command-line-common", + "move-compiler", "move-core-types", + "move-ir-types", "move-package", "move-vm-config", "move-vm-profiler", @@ -13972,7 +14255,7 @@ dependencies = [ "prometheus", "rand 0.8.5", "regex", - "reqwest 0.12.8", + "reqwest 0.12.9", "rusoto_core", "rusoto_kms", "rustyline", @@ -14145,7 +14428,7 @@ dependencies = [ [[package]] name = "sui-analytics-indexer" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "arrow 52.2.0", @@ -14197,7 +14480,7 @@ dependencies = [ [[package]] name = "sui-analytics-indexer-derive" -version = "1.36.1" +version = "1.37.1" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", @@ -14206,7 +14489,7 @@ dependencies = [ [[package]] name = "sui-archival" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "byteorder", @@ -14266,7 +14549,7 @@ dependencies = [ "narwhal-config", "prettytable-rs", "prometheus-parse", - "reqwest 0.12.8", + "reqwest 0.12.9", "russh", "russh-keys", "serde", @@ -14331,7 +14614,7 @@ dependencies = [ [[package]] name = "sui-bridge" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "arc-swap", @@ -14356,7 +14639,7 @@ dependencies = [ "once_cell", "prometheus", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "serde_with 3.11.0", @@ -14381,7 +14664,7 @@ dependencies = [ [[package]] name = "sui-bridge-cli" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "clap", @@ -14389,7 +14672,7 @@ dependencies = [ "fastcrypto", "futures", "move-core-types", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "serde_with 3.11.0", @@ -14417,6 +14700,7 @@ dependencies = [ "clap", "diesel", "diesel-async", + "diesel_migrations", "ethers", "futures", "hex-literal 0.3.4", @@ -14428,9 +14712,9 @@ dependencies = [ "serde_yaml 0.8.26", "strum_macros 0.24.3", "sui-bridge", - "sui-bridge-watchdog", "sui-config", "sui-data-ingestion-core", + "sui-indexer", "sui-indexer-builder", "sui-json-rpc-types", "sui-sdk", @@ -14444,24 +14728,9 @@ dependencies = [ "tracing", ] -[[package]] -name = "sui-bridge-watchdog" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "ethers", - "futures", - "mysten-metrics", - "prometheus", - "sui-bridge", - "tokio", - "tracing", -] - [[package]] name = "sui-cluster-test" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-trait", @@ -14473,7 +14742,7 @@ dependencies = [ "move-core-types", "prometheus", "regex", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde_json", "shared-crypto", "sui-config", @@ -14517,7 +14786,7 @@ dependencies = [ "once_cell", "prometheus", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_with 3.11.0", "serde_yaml 0.8.26", @@ -14586,7 +14855,7 @@ dependencies = [ "prometheus", "rand 0.8.5", "rayon", - "reqwest 0.12.8", + "reqwest 0.12.9", "roaring", "rstest", "scopeguard", @@ -14614,6 +14883,7 @@ dependencies = [ "sui-storage", "sui-swarm-config", "sui-test-transaction-builder", + "sui-tls", "sui-transaction-checks", "sui-types", "tap", @@ -14654,7 +14924,7 @@ dependencies = [ [[package]] name = "sui-data-ingestion" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-trait", @@ -14677,6 +14947,7 @@ dependencies = [ "serde_yaml 0.8.26", "sui-archival", "sui-data-ingestion-core", + "sui-kvstore", "sui-storage", "sui-types", "telemetry-subscribers", @@ -14750,7 +15021,7 @@ dependencies = [ [[package]] name = "sui-e2e-tests" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "assert_cmd", @@ -14775,12 +15046,13 @@ dependencies = [ "passkey-client", "passkey-types", "prometheus", + "prost 0.13.3", "rand 0.8.5", + "reqwest 0.12.9", "serde", "serde_json", "shared-crypto", "sui", - "sui-bridge", "sui-config", "sui-core", "sui-framework", @@ -14868,12 +15140,13 @@ dependencies = [ [[package]] name = "sui-faucet" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-recursion", "async-trait", "axum 0.7.7", + "bin-version", "clap", "eyre", "futures", @@ -14906,16 +15179,36 @@ dependencies = [ ] [[package]] -name = "sui-framework" -version = "0.1.0" +name = "sui-field-count" +version = "1.37.1" dependencies = [ - "anyhow", - "bcs", - "move-binary-format", - "move-compiler", - "move-core-types", - "move-package", - "once_cell", + "sui-field-count-derive", + "sui-field-count-main", +] + +[[package]] +name = "sui-field-count-derive" +version = "1.37.1" +dependencies = [ + "quote 1.0.37", + "syn 1.0.109", +] + +[[package]] +name = "sui-field-count-main" +version = "1.37.1" + +[[package]] +name = "sui-framework" +version = "0.1.0" +dependencies = [ + "anyhow", + "bcs", + "move-binary-format", + "move-compiler", + "move-core-types", + "move-package", + "once_cell", "regex", "serde", "sui-config", @@ -14927,7 +15220,7 @@ dependencies = [ [[package]] name = "sui-framework-snapshot" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "bcs", @@ -14991,7 +15284,7 @@ dependencies = [ [[package]] name = "sui-graphql-config" -version = "1.36.1" +version = "1.37.1" dependencies = [ "quote 1.0.37", "syn 1.0.109", @@ -15011,7 +15304,7 @@ dependencies = [ [[package]] name = "sui-graphql-rpc" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-graphql", @@ -15051,7 +15344,7 @@ dependencies = [ "prometheus", "rand 0.8.5", "regex", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "serde_with 3.11.0", @@ -15095,7 +15388,7 @@ dependencies = [ "async-graphql", "axum 0.7.7", "hyper 1.5.0", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde_json", "sui-graphql-rpc-headers", "thiserror", @@ -15110,7 +15403,7 @@ dependencies = [ [[package]] name = "sui-indexer" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-trait", @@ -15167,6 +15460,8 @@ dependencies = [ "sui-sdk", "sui-snapshot", "sui-storage", + "sui-swarm-config", + "sui-synthetic-ingestion", "sui-test-transaction-builder", "sui-transaction-builder", "sui-types", @@ -15183,6 +15478,41 @@ dependencies = [ "url", ] +[[package]] +name = "sui-indexer-alt" +version = "1.37.1" +dependencies = [ + "anyhow", + "async-trait", + "axum 0.7.7", + "backoff", + "bb8", + "bcs", + "chrono", + "clap", + "diesel", + "diesel-async", + "diesel_migrations", + "futures", + "mysten-metrics", + "prometheus", + "rand 0.8.5", + "reqwest 0.12.9", + "serde", + "sui-field-count", + "sui-storage", + "sui-types", + "telemetry-subscribers", + "tempfile", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util 0.7.12", + "tracing", + "url", + "wiremock", +] + [[package]] name = "sui-indexer-builder" version = "0.1.0" @@ -15310,7 +15640,7 @@ dependencies = [ "move-package", "prometheus", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "sui-config", "sui-core", "sui-json", @@ -15382,9 +15712,31 @@ dependencies = [ "tiny-bip39", ] +[[package]] +name = "sui-kvstore" +version = "1.37.1" +dependencies = [ + "anyhow", + "async-trait", + "base64 0.21.7", + "bcs", + "gcp_auth", + "http 1.1.0", + "prometheus", + "prost 0.13.3", + "prost-types 0.13.3", + "serde", + "sui-data-ingestion-core", + "sui-types", + "telemetry-subscribers", + "tokio", + "tonic 0.12.3", + "tracing", +] + [[package]] name = "sui-light-client" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-trait", @@ -15396,7 +15748,7 @@ dependencies = [ "move-binary-format", "move-core-types", "object_store 0.10.2", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "serde_yaml 0.8.26", @@ -15422,7 +15774,7 @@ dependencies = [ [[package]] name = "sui-metric-checker" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "backoff", @@ -15432,7 +15784,7 @@ dependencies = [ "humantime", "once_cell", "prometheus-http-query", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_yaml 0.9.34+deprecated", "strum_macros 0.24.3", @@ -15443,7 +15795,7 @@ dependencies = [ [[package]] name = "sui-move" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "assert_cmd", @@ -15485,7 +15837,7 @@ dependencies = [ [[package]] name = "sui-move-build" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "datatest-stable", @@ -15509,7 +15861,7 @@ dependencies = [ [[package]] name = "sui-move-lsp" -version = "1.36.1" +version = "1.37.1" dependencies = [ "bin-version", "clap", @@ -15599,6 +15951,83 @@ dependencies = [ "tracing", ] +[[package]] +name = "sui-mvr-indexer" +version = "1.37.1" +dependencies = [ + "anyhow", + "async-trait", + "axum 0.7.7", + "backoff", + "bb8", + "bcs", + "bytes", + "cached", + "chrono", + "clap", + "criterion", + "csv", + "dashmap", + "diesel", + "diesel-async", + "diesel_migrations", + "fastcrypto", + "futures", + "hex", + "indicatif", + "itertools 0.13.0", + "jsonrpsee", + "move-binary-format", + "move-bytecode-utils", + "move-core-types", + "mysten-metrics", + "ntest", + "object_store 0.10.2", + "prometheus", + "rand 0.8.5", + "rayon", + "regex", + "serde", + "serde_json", + "serde_with 3.11.0", + "simulacrum", + "strum 0.24.1", + "strum_macros 0.24.3", + "sui-archival", + "sui-config", + "sui-core", + "sui-data-ingestion-core", + "sui-json", + "sui-json-rpc", + "sui-json-rpc-api", + "sui-json-rpc-types", + "sui-keys", + "sui-move-build", + "sui-open-rpc", + "sui-package-resolver", + "sui-protocol-config", + "sui-rest-api", + "sui-sdk", + "sui-snapshot", + "sui-storage", + "sui-swarm-config", + "sui-synthetic-ingestion", + "sui-test-transaction-builder", + "sui-transaction-builder", + "sui-types", + "tap", + "telemetry-subscribers", + "tempfile", + "test-cluster", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util 0.7.12", + "toml 0.7.8", + "tracing", + "url", +] + [[package]] name = "sui-network" version = "0.0.0" @@ -15641,7 +16070,7 @@ dependencies = [ [[package]] name = "sui-node" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anemo", "anemo-tower", @@ -15665,7 +16094,7 @@ dependencies = [ "narwhal-network", "parking_lot 0.12.3", "prometheus", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "sui-archival", "sui-config", @@ -15693,7 +16122,7 @@ dependencies = [ [[package]] name = "sui-open-rpc" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "bcs", @@ -15729,7 +16158,7 @@ dependencies = [ [[package]] name = "sui-oracle" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "bcs", @@ -15741,7 +16170,7 @@ dependencies = [ "once_cell", "prometheus", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "shared-crypto", @@ -15759,7 +16188,7 @@ dependencies = [ [[package]] name = "sui-package-dump" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "bcs", @@ -15767,7 +16196,7 @@ dependencies = [ "cynic-codegen", "fastcrypto", "move-core-types", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "sui-types", @@ -15776,7 +16205,7 @@ dependencies = [ [[package]] name = "sui-package-management" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "move-core-types", @@ -15821,7 +16250,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "sui-enum-compat-util", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -15875,8 +16304,8 @@ dependencies = [ "prost-build 0.13.3", "protobuf", "rand 0.8.5", - "reqwest 0.12.8", - "rustls 0.23.15", + "reqwest 0.12.9", + "rustls 0.23.16", "rustls-pemfile 2.2.0", "serde", "serde_json", @@ -15959,8 +16388,10 @@ dependencies = [ "mysten-network", "openapiv3", "prometheus", + "prost 0.13.3", + "prost-build 0.13.3", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "schemars", "serde", "serde_json", @@ -15977,7 +16408,7 @@ dependencies = [ [[package]] name = "sui-rosetta" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-trait", @@ -15996,7 +16427,7 @@ dependencies = [ "once_cell", "quick-js", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "shared-crypto", @@ -16022,7 +16453,7 @@ dependencies = [ [[package]] name = "sui-rpc-loadgen" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-trait", @@ -16051,7 +16482,7 @@ dependencies = [ [[package]] name = "sui-sdk" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-recursion", @@ -16067,7 +16498,7 @@ dependencies = [ "jsonrpsee", "move-core-types", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "serde_with 3.11.0", @@ -16107,7 +16538,7 @@ dependencies = [ [[package]] name = "sui-security-watchdog" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "arrow-array 52.2.0", @@ -16118,7 +16549,7 @@ dependencies = [ "lexical-util", "mysten-metrics", "prometheus", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "snowflake-api", @@ -16154,7 +16585,7 @@ dependencies = [ [[package]] name = "sui-single-node-benchmark" -version = "1.36.1" +version = "1.37.1" dependencies = [ "async-trait", "bcs", @@ -16217,7 +16648,7 @@ dependencies = [ [[package]] name = "sui-source-validation" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "colored", @@ -16265,7 +16696,7 @@ dependencies = [ "move-symbol-pool", "mysten-metrics", "prometheus", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "sui", "sui-json-rpc-types", @@ -16321,7 +16752,7 @@ dependencies = [ "percent-encoding", "pretty_assertions", "prometheus", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "sui-config", @@ -16343,7 +16774,7 @@ dependencies = [ [[package]] name = "sui-surfer" -version = "1.36.1" +version = "1.37.1" dependencies = [ "async-trait", "bcs", @@ -16385,6 +16816,7 @@ dependencies = [ "sui-protocol-config", "sui-simulator", "sui-swarm-config", + "sui-tls", "sui-types", "tap", "telemetry-subscribers", @@ -16423,11 +16855,24 @@ dependencies = [ "tracing", ] +[[package]] +name = "sui-synthetic-ingestion" +version = "0.0.0" +dependencies = [ + "async-trait", + "simulacrum", + "sui-test-transaction-builder", + "sui-types", + "tempfile", + "tokio", + "tracing", +] + [[package]] name = "sui-telemetry" version = "0.1.0" dependencies = [ - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "sui-core", "tracing", @@ -16448,7 +16893,7 @@ dependencies = [ [[package]] name = "sui-test-validator" -version = "1.36.1" +version = "1.37.1" [[package]] name = "sui-tls" @@ -16463,8 +16908,8 @@ dependencies = [ "pkcs8 0.9.0", "rand 0.8.5", "rcgen", - "reqwest 0.12.8", - "rustls 0.23.15", + "reqwest 0.12.9", + "rustls 0.23.16", "rustls-webpki 0.102.8", "tokio", "tokio-rustls 0.26.0", @@ -16474,7 +16919,7 @@ dependencies = [ [[package]] name = "sui-tool" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anemo", "anemo-cli", @@ -16509,6 +16954,7 @@ dependencies = [ "sui-sdk", "sui-snapshot", "sui-storage", + "sui-tls", "sui-types", "telemetry-subscribers", "tempfile", @@ -16756,7 +17202,7 @@ dependencies = [ [[package]] name = "suins-indexer" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-trait", @@ -16776,7 +17222,7 @@ dependencies = [ "object_store 0.10.2", "prometheus", "rand 0.8.5", - "rustls 0.23.15", + "rustls 0.23.16", "serde", "serde_json", "serde_yaml 0.8.26", @@ -16796,7 +17242,7 @@ dependencies = [ [[package]] name = "suiop-cli" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "axum 0.7.7", @@ -16818,7 +17264,7 @@ dependencies = [ "prettytable-rs", "rand 0.8.5", "regex", - "reqwest 0.12.8", + "reqwest 0.12.9", "semver", "serde", "serde_json", @@ -16879,9 +17325,9 @@ dependencies = [ [[package]] name = "symbolic-common" -version = "12.12.0" +version = "12.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "366f1b4c6baf6cfefc234bbd4899535fca0b06c74443039a73f6dfb2fad88d77" +checksum = "3d4d73159efebfb389d819fd479afb2dbd57dcb3e3f4b7fcfa0e675f5a46c1cb" dependencies = [ "debugid", "memmap2", @@ -16891,9 +17337,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.12.0" +version = "12.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aba05ba5b9962ea5617baf556293720a8b2d0a282aa14ee4bf10e22efc7da8c8" +checksum = "a767859f6549c665011970874c3f541838b4835d5aaaa493d3ee383918be9f10" dependencies = [ "cpp_demangle 0.4.4", "rustc-demangle", @@ -16924,9 +17370,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.85" +version = "2.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" +checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", @@ -16960,6 +17406,17 @@ dependencies = [ "unicode-xid 0.2.6", ] +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.87", +] + [[package]] name = "sysinfo" version = "0.27.8" @@ -17060,9 +17517,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tar" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ff6c40d3aedb5e06b57c6f669ad17ab063dd1e63d977c6a88e7f4dfa4f04020" +checksum = "c65998313f8e17d0d553d28f91a0df93e4dbbbf770279c7bc21ca0f09ea1a1f6" dependencies = [ "filetime", "libc", @@ -17118,12 +17575,12 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", - "fastrand", + "fastrand 2.2.0", "once_cell", "rustix", "windows-sys 0.59.0", @@ -17240,6 +17697,7 @@ dependencies = [ "sui-config", "sui-core", "sui-framework", + "sui-indexer", "sui-json-rpc", "sui-json-rpc-api", "sui-json-rpc-types", @@ -17253,7 +17711,9 @@ dependencies = [ "sui-swarm-config", "sui-test-transaction-builder", "sui-types", + "tempfile", "tokio", + "tokio-util 0.7.12", "tracing", ] @@ -17295,7 +17755,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "subprocess", - "syn 2.0.85", + "syn 2.0.87", "test-fuzz-internal", "toolchain_find", ] @@ -17327,22 +17787,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.65" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" +checksum = "02dd99dc800bbb97186339685293e1cc5d9df1f8fae2d0aecd9ff1c77efea892" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.65" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" +checksum = "a7c61ec9a6f64d2793d8a45faba21efbe3ced62a886d44c36a009b2b519b4c7e" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -17436,6 +17896,16 @@ dependencies = [ "crunchy", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -17514,7 +17984,7 @@ checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -17524,7 +17994,7 @@ source = "git+https://github.com/mystenmark/tokio-madsim-fork.git?rev=d46208cb11 dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -17560,7 +18030,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04fb792ccd6bbcd4bba408eb8a292f70fc4a3589e5d793626f45190e6454b6ab" dependencies = [ "ring 0.17.8", - "rustls 0.23.15", + "rustls 0.23.16", "tokio", "tokio-postgres", "tokio-rustls 0.26.0", @@ -17616,7 +18086,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.15", + "rustls 0.23.16", "rustls-pki-types", "tokio", ] @@ -17867,13 +18337,15 @@ dependencies = [ "http-body 1.0.1", "http-body-util", "hyper 1.5.0", - "hyper-timeout 0.5.1", + "hyper-timeout 0.5.2", "hyper-util", "percent-encoding", "pin-project", "prost 0.13.3", + "rustls-pemfile 2.2.0", "socket2 0.5.7", "tokio", + "tokio-rustls 0.26.0", "tokio-stream", "tower 0.4.13", "tower-layer", @@ -17905,7 +18377,7 @@ dependencies = [ "prost-build 0.13.3", "prost-types 0.13.3", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -17921,6 +18393,33 @@ dependencies = [ "tonic 0.12.3", ] +[[package]] +name = "tonic-rustls" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "803689f99cfc6de9c3b27aa86bf98553754c72c53b715913f1c14dcd3c030f77" +dependencies = [ + "async-stream", + "bytes", + "h2 0.4.6", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.5.0", + "hyper-timeout 0.5.2", + "hyper-util", + "pin-project", + "socket2 0.5.7", + "tokio", + "tokio-rustls 0.26.0", + "tokio-stream", + "tonic 0.12.3", + "tower 0.5.1", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "toolchain_find" version = "0.3.0" @@ -17963,9 +18462,12 @@ checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" dependencies = [ "futures-core", "futures-util", + "indexmap 2.6.0", "pin-project-lite", + "slab", "sync_wrapper 0.1.2", "tokio", + "tokio-util 0.7.12", "tower-layer", "tower-service", "tracing", @@ -18046,7 +18548,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -18325,7 +18827,7 @@ dependencies = [ "quote 1.0.37", "regex", "regex-syntax 0.7.5", - "syn 2.0.85", + "syn 2.0.87", "zstd-sys", ] @@ -18337,9 +18839,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "typeshare" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f17399b76c2e743d58eac0635d7686e9c00f48cd4776f00695d9882a7d3187" +checksum = "19be0f411120091e76e13e5a0186d8e2bcc3e7e244afdb70152197f1a8486ceb" dependencies = [ "chrono", "serde", @@ -18354,7 +18856,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a615d6c2764852a2e88a4f16e9ce1ea49bb776b5872956309e170d63a042a34f" dependencies = [ "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -18494,7 +18996,7 @@ dependencies = [ "flate2", "log", "once_cell", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-pki-types", "url", "webpki-roots 0.26.6", @@ -18502,12 +19004,12 @@ dependencies = [ [[package]] name = "url" -version = "2.5.2" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" dependencies = [ "form_urlencoded", - "idna", + "idna 1.0.3", "percent-encoding", "serde", ] @@ -18524,6 +19026,18 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -18636,6 +19150,12 @@ dependencies = [ "libc", ] +[[package]] +name = "waker-fn" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" + [[package]] name = "walkdir" version = "2.5.0" @@ -18721,7 +19241,7 @@ dependencies = [ "once_cell", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", "wasm-bindgen-shared", ] @@ -18755,7 +19275,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -18787,9 +19307,9 @@ dependencies = [ [[package]] name = "wasm-streams" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e072d4e72f700fb3443d8fe94a39315df013eef1104903cdb0a2abd322bbecd" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" dependencies = [ "futures-util", "js-sys", @@ -18926,7 +19446,7 @@ dependencies = [ "anyhow", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", "wasmtime-component-util", "wasmtime-wit-bindgen", "wit-parser", @@ -19094,7 +19614,7 @@ checksum = "5399c175ddba4a471b9da45105dea3493059d52b2d54860eadb0df04c813948d" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -19305,7 +19825,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "shellexpand 2.1.2", - "syn 2.0.85", + "syn 2.0.87", "witx", ] @@ -19317,7 +19837,7 @@ checksum = "93e43fc332703d1ec3aa86a5ce8bb49e6b95b6c617b90e726d3e70a0f70f48a5" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", "wiggle-generate", ] @@ -19593,6 +20113,28 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "wiremock" +version = "0.5.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13a3a53eaf34f390dd30d7b1b078287dd05df2aa2e21a589ccb80f5c7253c2e9" +dependencies = [ + "assert-json-diff", + "async-trait", + "base64 0.21.7", + "deadpool", + "futures", + "futures-timer", + "http-types", + "hyper 0.14.31", + "log", + "once_cell", + "regex", + "serde", + "serde_json", + "tokio", +] + [[package]] name = "wit-parser" version = "0.13.2" @@ -19622,6 +20164,18 @@ dependencies = [ "wast 35.0.2", ] +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "ws_stream_wasm" version = "0.7.4" @@ -19658,7 +20212,7 @@ dependencies = [ [[package]] name = "x" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "camino", @@ -19717,9 +20271,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.22" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af4e2e2f7cba5a093896c1e150fbfe177d1883e7448200efb81d40b9d339ef26" +checksum = "af310deaae937e48a26602b730250b4949e125f468f11e6990be3e5304ddd96f" [[package]] name = "xmlparser" @@ -19759,6 +20313,30 @@ dependencies = [ "time", ] +[[package]] +name = "yoke" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.87", + "synstructure 0.13.1", +] + [[package]] name = "yup-oauth2" version = "8.3.2" @@ -19804,7 +20382,28 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", +] + +[[package]] +name = "zerofrom" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.87", + "synstructure 0.13.1", ] [[package]] @@ -19824,7 +20423,29 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", +] + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.87", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 67cc9896cda22..ed066f8183cd4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -93,7 +93,6 @@ members = [ "crates/sui-bridge", "crates/sui-bridge-cli", "crates/sui-bridge-indexer", - "crates/sui-bridge-watchdog", "crates/sui-cluster-test", "crates/sui-config", "crates/sui-core", @@ -104,6 +103,9 @@ members = [ "crates/sui-e2e-tests", "crates/sui-enum-compat-util", "crates/sui-faucet", + "crates/sui-field-count", + "crates/sui-field-count-derive", + "crates/sui-field-count-main", "crates/sui-framework", "crates/sui-framework-snapshot", "crates/sui-framework-tests", @@ -114,6 +116,7 @@ members = [ "crates/sui-graphql-rpc-client", "crates/sui-graphql-rpc-headers", "crates/sui-indexer", + "crates/sui-indexer-alt", "crates/sui-indexer-builder", "crates/sui-json", "crates/sui-json-rpc", @@ -121,12 +124,14 @@ members = [ "crates/sui-json-rpc-tests", "crates/sui-json-rpc-types", "crates/sui-keys", + "crates/sui-kvstore", "crates/sui-light-client", "crates/sui-macros", "crates/sui-metric-checker", "crates/sui-move", "crates/sui-move-build", "crates/sui-move-lsp", + "crates/sui-mvr-indexer", "crates/sui-network", "crates/sui-node", "crates/sui-open-rpc", @@ -154,6 +159,7 @@ members = [ "crates/sui-surfer", "crates/sui-swarm", "crates/sui-swarm-config", + "crates/sui-synthetic-ingestion", "crates/sui-telemetry", "crates/sui-test-transaction-builder", "crates/sui-test-validator", @@ -203,7 +209,7 @@ members = [ [workspace.package] # This version string will be inherited by sui-core, sui-faucet, sui-node, sui-tools, sui-sdk, sui-move-build, and sui crates. -version = "1.36.1" +version = "1.37.1" [profile.release] # debug = 1 means line charts only, which is minimum needed for good stack traces @@ -256,6 +262,7 @@ async-graphql = "=7.0.1" async-graphql-axum = "=7.0.1" async-graphql-value = "=7.0.1" async-recursion = "1.0.4" +async-stream = "0.3.6" async-trait = "0.1.61" atomic_float = "0.1" aws-config = "0.56" @@ -301,6 +308,7 @@ camino = "1.1.1" cfg-if = "1.0.0" chrono = { version = "0.4.26", features = ["clock", "serde"] } clap = { version = "4.4", features = ["derive", "wrap_help"] } +codespan-reporting = "0.11.1" collectable = "0.0.2" colored = "2.0.0" color-eyre = "0.6.2" @@ -344,6 +352,7 @@ futures-core = "0.3.21" git-version = "0.3.5" glob = "0.3.1" governor = "0.6.0" +gcp_auth = "0.12.3" hashbrown = "0.12" hdrhistogram = "7.5.1" hex = "0.4.3" @@ -418,6 +427,7 @@ proptest = "1.1.0" proptest-derive = "0.3.0" prost = "0.13" prost-build = "0.13" +prost-types = "0.13.1" protobuf = { version = "2.28", features = ["with-bytes"] } quinn-proto = "0.11.7" quote = "1.0.23" @@ -496,12 +506,10 @@ tokio-stream = { version = "0.1.14", features = ["sync", "net"] } tokio-util = "0.7.10" toml = { version = "0.7.4", features = ["preserve_order"] } toml_edit = { version = "0.19.10" } -# NOTE: do not enable the `tls` feature on tonic. It will break custom TLS handling -# for self signed certificates. Unit tests under consensus/core and other integration -# tests will fail. tonic = { version = "0.12", features = ["transport"] } tonic-build = { version = "0.12", features = ["prost", "transport"] } tonic-health = "0.12" +tonic-rustls = "0.1.0" tower = { version = "0.4.12", features = [ "full", "util", @@ -540,6 +548,7 @@ webpki = { version = "0.102", package = "rustls-webpki", features = [ "alloc", "std", ] } +wiremock = "0.5" x509-parser = "0.14.0" zstd = "0.12.3" zeroize = "1.6.0" @@ -579,10 +588,10 @@ mamoru-sui-types = { git = "https://github.com/Mamoru-Foundation/mamoru-core", r #mamoru-sniffer = { path = "../mamoru-core/mamoru-sniffer" } #mamoru-sui-types = { path = "../mamoru-core/blockchain-types/mamoru-sui-types" } -fastcrypto = { git = "https://github.com/MystenLabs/fastcrypto", rev = "c050ffc78b93739328af5d59b05f90e0e26b1b7e" } -fastcrypto-tbls = { git = "https://github.com/MystenLabs/fastcrypto", rev = "c050ffc78b93739328af5d59b05f90e0e26b1b7e" } -fastcrypto-zkp = { git = "https://github.com/MystenLabs/fastcrypto", rev = "c050ffc78b93739328af5d59b05f90e0e26b1b7e", package = "fastcrypto-zkp" } -fastcrypto-vdf = { git = "https://github.com/MystenLabs/fastcrypto", rev = "c050ffc78b93739328af5d59b05f90e0e26b1b7e", features = [ +fastcrypto = { git = "https://github.com/MystenLabs/fastcrypto", rev = "2f502fd8570fe4e9cff36eea5bbd6fef22002898" } +fastcrypto-tbls = { git = "https://github.com/MystenLabs/fastcrypto", rev = "2f502fd8570fe4e9cff36eea5bbd6fef22002898" } +fastcrypto-zkp = { git = "https://github.com/MystenLabs/fastcrypto", rev = "2f502fd8570fe4e9cff36eea5bbd6fef22002898", package = "fastcrypto-zkp" } +fastcrypto-vdf = { git = "https://github.com/MystenLabs/fastcrypto", rev = "2f502fd8570fe4e9cff36eea5bbd6fef22002898", features = [ "experimental", ] } passkey-types = { version = "0.2.0" } @@ -621,7 +630,6 @@ sui-archival = { path = "crates/sui-archival" } sui-authority-aggregation = { path = "crates/sui-authority-aggregation" } sui-benchmark = { path = "crates/sui-benchmark" } sui-bridge = { path = "crates/sui-bridge" } -sui-bridge-watchdog = { path = "crates/sui-bridge-watchdog" } sui-cluster-test = { path = "crates/sui-cluster-test" } sui-config = { path = "crates/sui-config" } sui-core = { path = "crates/sui-core" } @@ -631,6 +639,9 @@ sui-data-ingestion-core = { path = "crates/sui-data-ingestion-core" } sui-e2e-tests = { path = "crates/sui-e2e-tests" } sui-enum-compat-util = { path = "crates/sui-enum-compat-util" } sui-faucet = { path = "crates/sui-faucet" } +sui-field-count = { path = "crates/sui-field-count" } +sui-field-count-main = { path = "crates/sui-field-count-main" } +sui-field-count-derive = { path = "crates/sui-field-count-derive" } sui-framework = { path = "crates/sui-framework" } sui-framework-snapshot = { path = "crates/sui-framework-snapshot" } sui-framework-tests = { path = "crates/sui-framework-tests" } @@ -646,11 +657,13 @@ sui-json-rpc = { path = "crates/sui-json-rpc" } sui-json-rpc-api = { path = "crates/sui-json-rpc-api" } sui-json-rpc-types = { path = "crates/sui-json-rpc-types" } sui-keys = { path = "crates/sui-keys" } +sui-kvstore = {path = "crates/sui-kvstore"} sui-macros = { path = "crates/sui-macros" } sui-metric-checker = { path = "crates/sui-metric-checker" } sui-move = { path = "crates/sui-move" } sui-move-build = { path = "crates/sui-move-build" } sui-move-lsp = { path = "crates/sui-move-lsp" } +sui-mvr-indexer = { path = "crates/sui-mvr-indexer" } sui-network = { path = "crates/sui-network" } sui-node = { path = "crates/sui-node" } sui-open-rpc = { path = "crates/sui-open-rpc" } @@ -674,6 +687,7 @@ sui-storage = { path = "crates/sui-storage" } sui-surfer = { path = "crates/sui-surfer" } sui-swarm = { path = "crates/sui-swarm" } sui-swarm-config = { path = "crates/sui-swarm-config" } +sui-synthetic-ingestion = { path = "crates/sui-synthetic-ingestion" } sui-telemetry = { path = "crates/sui-telemetry" } sui-test-transaction-builder = { path = "crates/sui-test-transaction-builder" } sui-test-validator = { path = "crates/sui-test-validator" } diff --git a/apps/wallet/src/background/connections/ContentScriptConnection.ts b/apps/wallet/src/background/connections/ContentScriptConnection.ts index 4eb296c0a428d..1b72a231cb2d1 100644 --- a/apps/wallet/src/background/connections/ContentScriptConnection.ts +++ b/apps/wallet/src/background/connections/ContentScriptConnection.ts @@ -25,6 +25,7 @@ import { import Permissions from '_src/background/Permissions'; import Transactions from '_src/background/Transactions'; import { FEATURES, growthbook } from '_src/shared/experimentation/features'; +import { isDisconnectApp } from '_src/shared/messaging/messages/payloads/permissions/DisconnectApp'; import { isQredoConnectPayload } from '_src/shared/messaging/messages/payloads/QredoConnect'; import { isSignMessageRequest, @@ -151,6 +152,8 @@ export class ContentScriptConnection extends Connection { throw new Error('This feature is not implemented yet.'); } await requestUserApproval(payload.args, this, msg); + } else if (isDisconnectApp(payload)) { + await Permissions.delete(this.origin); } else { throw new Error(`Unknown message, ${JSON.stringify(msg.payload)}`); } diff --git a/apps/wallet/src/dapp-interface/WalletStandardInterface.ts b/apps/wallet/src/dapp-interface/WalletStandardInterface.ts index 389dcc6dccb75..7350a5b88f151 100644 --- a/apps/wallet/src/dapp-interface/WalletStandardInterface.ts +++ b/apps/wallet/src/dapp-interface/WalletStandardInterface.ts @@ -22,6 +22,7 @@ import type { } from '_payloads/transactions'; import { API_ENV } from '_src/shared/api-env'; import type { NetworkEnvType } from '_src/shared/api-env'; +import { type DisconnectApp } from '_src/shared/messaging/messages/payloads/permissions/DisconnectApp'; import { isQredoConnectPayload, type QredoConnectPayload, @@ -40,6 +41,8 @@ import { SUI_TESTNET_CHAIN, type StandardConnectFeature, type StandardConnectMethod, + type StandardDisconnectFeature, + type StandardDisconnectMethod, type StandardEventsFeature, type StandardEventsListeners, type StandardEventsOnMethod, @@ -119,6 +122,7 @@ export class SuiWallet implements Wallet { get features(): StandardConnectFeature & StandardEventsFeature & + StandardDisconnectFeature & SuiFeatures & QredoConnectFeature { return { @@ -130,6 +134,10 @@ export class SuiWallet implements Wallet { version: '1.0.0', on: this.#on, }, + 'standard:disconnect': { + version: '1.0.0', + disconnect: this.#disconnect, + }, 'sui:signTransactionBlock': { version: '1.0.0', signTransactionBlock: this.#signTransactionBlock, @@ -244,6 +252,13 @@ export class SuiWallet implements Wallet { return { accounts: this.accounts }; }; + #disconnect: StandardDisconnectMethod = async () => { + this.#send({ + type: 'disconnect-app', + origin: '', // origin is auto-discovered for wallet's disconnect. + }); + }; + #signTransactionBlock: SuiSignTransactionBlockMethod = async ({ transactionBlock, account, diff --git a/apps/wallet/src/shared/analytics/ampli/index.ts b/apps/wallet/src/shared/analytics/ampli/index.ts index 2c52d691daa87..03db6920b3800 100644 --- a/apps/wallet/src/shared/analytics/ampli/index.ts +++ b/apps/wallet/src/shared/analytics/ampli/index.ts @@ -231,6 +231,18 @@ export interface ClickedSwapCoinProperties { totalBalance?: number; } +export interface ClickedTokenClaimsBannerProperties { + /** + * A generic name property that can be used across events. + */ + name: string; + /** + * The ID of an object on Sui. + */ + objectId: string; + objectType: string; +} + export interface ClickedUnstakeSuiProperties { /** * The amount of SUI staked. @@ -514,6 +526,10 @@ export interface SwappedCoinProperties { */ estimatedReturnBalance: number; fromCoinType: string; + /** + * swap provider name + */ + provider?: string; toCoinType: string; /** * The total balance of the selected coin that the user has. @@ -537,6 +553,10 @@ export interface SwappedCoinFailedProperties { */ estimatedReturnBalance: number; fromCoinType: string; + /** + * swap provider name + */ + provider?: string; toCoinType: string; /** * The total balance of the selected coin that the user has. @@ -691,6 +711,14 @@ export class ClickedSwapCoin implements BaseEvent { } } +export class ClickedTokenClaimsBanner implements BaseEvent { + event_type = 'clicked token claims banner'; + + constructor(public event_properties: ClickedTokenClaimsBannerProperties) { + this.event_properties = event_properties; + } +} + export class ClickedUnstakeSui implements BaseEvent { event_type = 'clicked unstake SUI'; @@ -1300,6 +1328,23 @@ export class Ampli { return this.track(new ClickedSwapCoin(properties), options); } + /** + * clicked token claims banner + * + * [View in Tracking Plan](https://data.amplitude.com/mystenlabs/Sui%20Wallet/events/main/latest/clicked%20token%20claims%20banner) + * + * Event has no description in tracking plan. + * + * @param properties The event's properties (e.g. name) + * @param options Amplitude event options. + */ + clickedTokenClaimsBanner( + properties: ClickedTokenClaimsBannerProperties, + options?: EventOptions, + ) { + return this.track(new ClickedTokenClaimsBanner(properties), options); + } + /** * clicked unstake SUI * diff --git a/apps/wallet/src/ui/app/pages/swap/index.tsx b/apps/wallet/src/ui/app/pages/swap/index.tsx index 93396613d06ab..a6b040f4d96f9 100644 --- a/apps/wallet/src/ui/app/pages/swap/index.tsx +++ b/apps/wallet/src/ui/app/pages/swap/index.tsx @@ -222,6 +222,7 @@ export function SwapPage() { toCoinType: toCoinType || '', totalBalance: Number(amount), estimatedReturnBalance: inputAmountInUSD || 0, + provider: swapData?.provider, }); const receiptUrl = `/receipt?txdigest=${encodeURIComponent( @@ -229,6 +230,16 @@ export function SwapPage() { )}&from=transactions`; return navigate(receiptUrl); }, + onError: (error) => { + ampli.swappedCoinFailed({ + estimatedReturnBalance: Number(swapData?.formattedToAmount || 0), + fromCoinType: fromCoinType!, + toCoinType: toCoinType!, + totalBalance: Number(amount || 0), + errorMessage: error.message, + provider: swapData?.provider, + }); + }, }); const handleOnsubmit: SubmitHandler = (formData) => { diff --git a/consensus/core/Cargo.toml b/consensus/core/Cargo.toml index 2cb277834c9b2..6d4756ac4075f 100644 --- a/consensus/core/Cargo.toml +++ b/consensus/core/Cargo.toml @@ -57,6 +57,7 @@ tower.workspace = true tower-http.workspace = true tracing.workspace = true typed-store.workspace = true +tonic-rustls.workspace = true [dev-dependencies] rstest.workspace = true diff --git a/consensus/core/src/core.rs b/consensus/core/src/core.rs index ab9569243cef8..a1ff274e6722c 100644 --- a/consensus/core/src/core.rs +++ b/consensus/core/src/core.rs @@ -478,6 +478,18 @@ impl Core { // Ensure the new block and its ancestors are persisted, before broadcasting it. self.dag_state.write().flush(); + let current_proposal_duration = Duration::from_millis(verified_block.timestamp_ms()); + let previous_proposal_duration = Duration::from_millis(self.last_proposed_timestamp_ms()); + self.context + .metrics + .node_metrics + .block_proposal_interval + .observe( + current_proposal_duration + .saturating_sub(previous_proposal_duration) + .as_secs_f64(), + ); + // Update internal state. self.last_proposed_block = verified_block.clone(); diff --git a/consensus/core/src/metrics.rs b/consensus/core/src/metrics.rs index a9f0ad175e848..11d303f3cf65a 100644 --- a/consensus/core/src/metrics.rs +++ b/consensus/core/src/metrics.rs @@ -105,6 +105,7 @@ pub(crate) struct NodeMetrics { pub(crate) proposed_block_ancestors_depth: HistogramVec, pub(crate) highest_verified_authority_round: IntGaugeVec, pub(crate) lowest_verified_authority_round: IntGaugeVec, + pub(crate) block_proposal_interval: Histogram, pub(crate) block_proposal_leader_wait_ms: IntCounterVec, pub(crate) block_proposal_leader_wait_count: IntCounterVec, pub(crate) block_timestamp_drift_wait_ms: IntCounterVec, @@ -235,6 +236,12 @@ impl NodeMetrics { &["authority"], registry, ).unwrap(), + block_proposal_interval: register_histogram_with_registry!( + "block_proposal_interval", + "Intervals (in secs) between block proposals.", + FINE_GRAINED_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ).unwrap(), block_proposal_leader_wait_ms: register_int_counter_vec_with_registry!( "block_proposal_leader_wait_ms", "Total time in ms spent waiting for a leader when proposing blocks.", diff --git a/consensus/core/src/network/tonic_network.rs b/consensus/core/src/network/tonic_network.rs index 185f59786b163..59a29a102ff90 100644 --- a/consensus/core/src/network/tonic_network.rs +++ b/consensus/core/src/network/tonic_network.rs @@ -24,6 +24,7 @@ use mysten_network::{ Multiaddr, }; use parking_lot::RwLock; +use sui_tls::AllowPublicKeys; use tokio::{ pin, task::JoinSet, @@ -44,7 +45,6 @@ use super::{ consensus_service_client::ConsensusServiceClient, consensus_service_server::ConsensusService, }, - tonic_tls::create_rustls_client_config, BlockStream, NetworkClient, NetworkManager, NetworkService, }; use crate::{ @@ -54,7 +54,7 @@ use crate::{ error::{ConsensusError, ConsensusResult}, network::{ tonic_gen::consensus_service_server::ConsensusServiceServer, - tonic_tls::create_rustls_server_config, + tonic_tls::certificate_server_name, }, CommitIndex, Round, }; @@ -339,7 +339,7 @@ impl NetworkClient for TonicClient { // Tonic channel wrapped with layers. type Channel = mysten_network::callback::Callback< tower_http::trace::Trace< - tonic::transport::Channel, + tonic_rustls::Channel, tower_http::classify::SharedClassifier, >, MetricsCallbackMaker, @@ -381,7 +381,17 @@ impl ChannelPool { let address = format!("https://{address}"); let config = &self.context.parameters.tonic; let buffer_size = config.connection_buffer_size; - let endpoint = tonic::transport::Channel::from_shared(address.clone()) + let client_tls_config = sui_tls::create_rustls_client_config( + self.context + .committee + .authority(peer) + .network_key + .clone() + .into_inner(), + certificate_server_name(&self.context), + Some(network_keypair.private_key().into_inner()), + ); + let endpoint = tonic_rustls::Channel::from_shared(address.clone()) .unwrap() .connect_timeout(timeout) .initial_connection_window_size(Some(buffer_size as u32)) @@ -391,22 +401,14 @@ impl ChannelPool { .http2_keep_alive_interval(config.keepalive_interval) // tcp keepalive is probably unnecessary and is unsupported by msim. .user_agent("mysticeti") + .unwrap() + .tls_config(client_tls_config) .unwrap(); - let client_tls_config = create_rustls_client_config(&self.context, network_keypair, peer); - let https_connector = hyper_rustls::HttpsConnectorBuilder::new() - .with_tls_config(client_tls_config) - .https_only() - .enable_http2() - .build(); - let deadline = tokio::time::Instant::now() + timeout; let channel = loop { trace!("Connecting to endpoint at {address}"); - match endpoint - .connect_with_connector(https_connector.clone()) - .await - { + match endpoint.connect().await { Ok(channel) => break channel, Err(e) => { warn!("Failed to connect to endpoint at {address}: {e:?}"); @@ -735,8 +737,17 @@ impl NetworkManager for TonicManager { Arc::new(builder) }; - let tls_server_config = - create_rustls_server_config(&self.context, self.network_keypair.clone()); + let tls_server_config = sui_tls::create_rustls_server_config( + self.network_keypair.clone().private_key().into_inner(), + certificate_server_name(&self.context), + AllowPublicKeys::new( + self.context + .committee + .authorities() + .map(|(_i, a)| a.network_key.clone().into_inner()) + .collect(), + ), + ); let tls_acceptor = TlsAcceptor::from(Arc::new(tls_server_config)); // Create listener to incoming connections. diff --git a/consensus/core/src/network/tonic_tls.rs b/consensus/core/src/network/tonic_tls.rs index 13377934e3b18..6e7ff630115ec 100644 --- a/consensus/core/src/network/tonic_tls.rs +++ b/consensus/core/src/network/tonic_tls.rs @@ -2,63 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::context::Context; -use consensus_config::{AuthorityIndex, NetworkKeyPair}; -use sui_tls::AllowPublicKeys; -use tokio_rustls::rustls::{ClientConfig, ServerConfig}; -pub(crate) fn create_rustls_server_config( - context: &Context, - network_keypair: NetworkKeyPair, -) -> ServerConfig { - let allower = AllowPublicKeys::new( - context - .committee - .authorities() - .map(|(_i, a)| a.network_key.clone().into_inner()) - .collect(), - ); - let verifier = sui_tls::ClientCertVerifier::new(allower, certificate_server_name(context)); - // TODO: refactor to use key bytes - let self_signed_cert = sui_tls::SelfSignedCertificate::new( - network_keypair.private_key().into_inner(), - &certificate_server_name(context), - ); - let tls_cert = self_signed_cert.rustls_certificate(); - let tls_private_key = self_signed_cert.rustls_private_key(); - let mut tls_config = verifier - .rustls_server_config(vec![tls_cert], tls_private_key) - .unwrap_or_else(|e| panic!("Failed to create TLS server config: {:?}", e)); - tls_config.alpn_protocols = vec![b"h2".to_vec()]; - tls_config -} - -pub(crate) fn create_rustls_client_config( - context: &Context, - network_keypair: NetworkKeyPair, - target: AuthorityIndex, -) -> ClientConfig { - let target_public_key = context - .committee - .authority(target) - .network_key - .clone() - .into_inner(); - let self_signed_cert = sui_tls::SelfSignedCertificate::new( - network_keypair.private_key().into_inner(), - &certificate_server_name(context), - ); - let tls_cert = self_signed_cert.rustls_certificate(); - let tls_private_key = self_signed_cert.rustls_private_key(); - let mut tls_config = - sui_tls::ServerCertVerifier::new(target_public_key, certificate_server_name(context)) - .rustls_client_config(vec![tls_cert], tls_private_key) - .unwrap_or_else(|e| panic!("Failed to create TLS client config: {:?}", e)); - // ServerCertVerifier sets alpn for completeness, but alpn cannot be predefined when - // using HttpsConnector from hyper-rustls, as in TonicManager. - tls_config.alpn_protocols = vec![]; - tls_config -} - -fn certificate_server_name(context: &Context) -> String { +pub(crate) fn certificate_server_name(context: &Context) -> String { format!("consensus_epoch_{}", context.committee.epoch()) } diff --git a/consensus/core/src/threshold_clock.rs b/consensus/core/src/threshold_clock.rs index 8786383acbb92..ef8ca8b752973 100644 --- a/consensus/core/src/threshold_clock.rs +++ b/consensus/core/src/threshold_clock.rs @@ -38,7 +38,7 @@ impl ThresholdClock { (self.round > previous_round).then_some(self.round) } - pub(crate) fn add_block(&mut self, block: BlockRef) { + fn add_block(&mut self, block: BlockRef) { match block.round.cmp(&self.round) { // Blocks with round less then what we currently build are irrelevant here Ordering::Less => {} diff --git a/crates/mysten-common/src/logging.rs b/crates/mysten-common/src/logging.rs index 8ba327026953b..3cd5c9ad0a371 100644 --- a/crates/mysten-common/src/logging.rs +++ b/crates/mysten-common/src/logging.rs @@ -15,8 +15,11 @@ macro_rules! debug_fatal { if cfg!(debug_assertions) { $crate::fatal!($($arg)*); } else { - // TODO: Export invariant metric for alerting tracing::error!(debug_fatal = true, $($arg)*); + let location = concat!(file!(), ':', line!()); + if let Some(metrics) = mysten_metrics::get_metrics() { + metrics.system_invariant_violations.with_label_values(&[location]).inc(); + } } }}; } diff --git a/crates/mysten-metrics/src/lib.rs b/crates/mysten-metrics/src/lib.rs index 3fb40de20573a..8cf2310fce3e0 100644 --- a/crates/mysten-metrics/src/lib.rs +++ b/crates/mysten-metrics/src/lib.rs @@ -15,8 +15,9 @@ use std::time::Instant; use once_cell::sync::OnceCell; use prometheus::{ - register_histogram_with_registry, register_int_gauge_vec_with_registry, Histogram, IntGaugeVec, - Registry, TextEncoder, + register_histogram_with_registry, register_int_counter_vec_with_registry, + register_int_gauge_vec_with_registry, Histogram, IntCounterVec, IntGaugeVec, Registry, + TextEncoder, }; use tap::TapFallible; use tracing::{warn, Span}; @@ -69,6 +70,7 @@ pub struct Metrics { pub scope_duration_ns: IntGaugeVec, pub scope_entrance: IntGaugeVec, pub thread_stall_duration_sec: Histogram, + pub system_invariant_violations: IntCounterVec, } impl Metrics { @@ -143,6 +145,12 @@ impl Metrics { registry, ) .unwrap(), + system_invariant_violations: register_int_counter_vec_with_registry!( + "system_invariant_violations", + "Number of system invariant violations", + &["name"], + registry, + ).unwrap(), } } } diff --git a/crates/mysten-network/Cargo.toml b/crates/mysten-network/Cargo.toml index 3fb61694e170f..18426cb914806 100644 --- a/crates/mysten-network/Cargo.toml +++ b/crates/mysten-network/Cargo.toml @@ -9,6 +9,7 @@ publish = false [dependencies] anemo.workspace = true +async-stream.workspace = true bcs.workspace = true bytes.workspace = true eyre.workspace = true @@ -18,8 +19,10 @@ multiaddr.workspace = true serde.workspace = true once_cell.workspace = true snap.workspace = true +hyper-rustls.workspace = true hyper-util.workspace = true tokio = { workspace = true, features = ["sync", "rt", "macros"] } +tokio-rustls.workspace = true tokio-stream.workspace = true tonic.workspace = true tonic-health.workspace = true diff --git a/crates/mysten-network/src/client.rs b/crates/mysten-network/src/client.rs index f0c188f54f21c..8cb508c798431 100644 --- a/crates/mysten-network/src/client.rs +++ b/crates/mysten-network/src/client.rs @@ -21,53 +21,67 @@ use std::{ vec, }; use tokio::task::JoinHandle; +use tokio_rustls::rustls::ClientConfig; use tonic::transport::{Channel, Endpoint, Uri}; use tower::Service; use tracing::{info, trace}; -pub async fn connect(address: &Multiaddr) -> Result { - let channel = endpoint_from_multiaddr(address)?.connect().await?; +pub async fn connect(address: &Multiaddr, tls_config: Option) -> Result { + let channel = endpoint_from_multiaddr(address, tls_config)? + .connect() + .await?; Ok(channel) } -pub fn connect_lazy(address: &Multiaddr) -> Result { - let channel = endpoint_from_multiaddr(address)?.connect_lazy(); +pub fn connect_lazy(address: &Multiaddr, tls_config: Option) -> Result { + let channel = endpoint_from_multiaddr(address, tls_config)?.connect_lazy(); Ok(channel) } -pub(crate) async fn connect_with_config(address: &Multiaddr, config: &Config) -> Result { - let channel = endpoint_from_multiaddr(address)? +pub(crate) async fn connect_with_config( + address: &Multiaddr, + tls_config: Option, + config: &Config, +) -> Result { + let channel = endpoint_from_multiaddr(address, tls_config)? .apply_config(config) .connect() .await?; Ok(channel) } -pub(crate) fn connect_lazy_with_config(address: &Multiaddr, config: &Config) -> Result { - let channel = endpoint_from_multiaddr(address)? +pub(crate) fn connect_lazy_with_config( + address: &Multiaddr, + tls_config: Option, + config: &Config, +) -> Result { + let channel = endpoint_from_multiaddr(address, tls_config)? .apply_config(config) .connect_lazy(); Ok(channel) } -fn endpoint_from_multiaddr(addr: &Multiaddr) -> Result { +fn endpoint_from_multiaddr( + addr: &Multiaddr, + tls_config: Option, +) -> Result { let mut iter = addr.iter(); let channel = match iter.next().ok_or_else(|| eyre!("address is empty"))? { Protocol::Dns(_) => { let (dns_name, tcp_port, http_or_https) = parse_dns(addr)?; let uri = format!("{http_or_https}://{dns_name}:{tcp_port}"); - MyEndpoint::try_from_uri(uri)? + MyEndpoint::try_from_uri(uri, tls_config)? } Protocol::Ip4(_) => { let (socket_addr, http_or_https) = parse_ip4(addr)?; let uri = format!("{http_or_https}://{socket_addr}"); - MyEndpoint::try_from_uri(uri)? + MyEndpoint::try_from_uri(uri, tls_config)? } Protocol::Ip6(_) => { let (socket_addr, http_or_https) = parse_ip6(addr)?; let uri = format!("{http_or_https}://{socket_addr}"); - MyEndpoint::try_from_uri(uri)? + MyEndpoint::try_from_uri(uri, tls_config)? } unsupported => return Err(eyre!("unsupported protocol {unsupported}")), }; @@ -77,21 +91,25 @@ fn endpoint_from_multiaddr(addr: &Multiaddr) -> Result { struct MyEndpoint { endpoint: Endpoint, + tls_config: Option, } static DISABLE_CACHING_RESOLVER: OnceCell = OnceCell::new(); impl MyEndpoint { - fn new(endpoint: Endpoint) -> Self { - Self { endpoint } + fn new(endpoint: Endpoint, tls_config: Option) -> Self { + Self { + endpoint, + tls_config, + } } - fn try_from_uri(uri: String) -> Result { + fn try_from_uri(uri: String, tls_config: Option) -> Result { let uri: Uri = uri .parse() .with_context(|| format!("unable to create Uri from '{uri}'"))?; let endpoint = Endpoint::from(uri); - Ok(Self::new(endpoint)) + Ok(Self::new(endpoint, tls_config)) } fn apply_config(mut self, config: &Config) -> Self { @@ -107,7 +125,17 @@ impl MyEndpoint { }); if disable_caching_resolver { - self.endpoint.connect_lazy() + if let Some(tls_config) = self.tls_config { + self.endpoint.connect_with_connector_lazy( + hyper_rustls::HttpsConnectorBuilder::new() + .with_tls_config(tls_config) + .https_only() + .enable_http2() + .build(), + ) + } else { + self.endpoint.connect_lazy() + } } else { let mut http = HttpConnector::new_with_resolver(CachingResolver::new()); http.enforce_http(false); @@ -115,12 +143,33 @@ impl MyEndpoint { http.set_keepalive(None); http.set_connect_timeout(None); - self.endpoint.connect_with_connector_lazy(http) + if let Some(tls_config) = self.tls_config { + let https = hyper_rustls::HttpsConnectorBuilder::new() + .with_tls_config(tls_config) + .https_only() + .enable_http1() + .wrap_connector(http); + self.endpoint.connect_with_connector_lazy(https) + } else { + self.endpoint.connect_with_connector_lazy(http) + } } } async fn connect(self) -> Result { - self.endpoint.connect().await.map_err(Into::into) + if let Some(tls_config) = self.tls_config { + let https_connector = hyper_rustls::HttpsConnectorBuilder::new() + .with_tls_config(tls_config) + .https_only() + .enable_http2() + .build(); + self.endpoint + .connect_with_connector(https_connector) + .await + .map_err(Into::into) + } else { + self.endpoint.connect().await.map_err(Into::into) + } } } diff --git a/crates/mysten-network/src/config.rs b/crates/mysten-network/src/config.rs index 1e59dbe75bcf8..eab88a024ec41 100644 --- a/crates/mysten-network/src/config.rs +++ b/crates/mysten-network/src/config.rs @@ -9,6 +9,7 @@ use crate::{ use eyre::Result; use serde::{Deserialize, Serialize}; use std::time::Duration; +use tokio_rustls::rustls::ClientConfig; use tonic::transport::Channel; #[derive(Debug, Default, Deserialize, Serialize)] @@ -90,11 +91,19 @@ impl Config { ServerBuilder::from_config(self, metrics_provider) } - pub async fn connect(&self, addr: &Multiaddr) -> Result { - connect_with_config(addr, self).await + pub async fn connect( + &self, + addr: &Multiaddr, + tls_config: Option, + ) -> Result { + connect_with_config(addr, tls_config, self).await } - pub fn connect_lazy(&self, addr: &Multiaddr) -> Result { - connect_lazy_with_config(addr, self) + pub fn connect_lazy( + &self, + addr: &Multiaddr, + tls_config: Option, + ) -> Result { + connect_lazy_with_config(addr, tls_config, self) } } diff --git a/crates/mysten-network/src/server.rs b/crates/mysten-network/src/server.rs index 4bac6fe61ae52..8d3986c6fd205 100644 --- a/crates/mysten-network/src/server.rs +++ b/crates/mysten-network/src/server.rs @@ -9,11 +9,15 @@ use crate::{ multiaddr::{parse_dns, parse_ip4, parse_ip6, Multiaddr, Protocol}, }; use eyre::{eyre, Result}; -use futures::FutureExt; +use futures::{FutureExt, Stream}; +use std::pin::Pin; +use std::sync::Arc; use std::task::{Context, Poll}; use std::{convert::Infallible, net::SocketAddr}; -use tokio::net::{TcpListener, ToSocketAddrs}; -use tokio_stream::wrappers::TcpListenerStream; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::net::{TcpListener, TcpStream, ToSocketAddrs}; +use tokio_rustls::rustls::ServerConfig; +use tokio_rustls::{server::TlsStream, TlsAcceptor}; use tonic::codegen::http::HeaderValue; use tonic::{ body::BoxBody, @@ -35,6 +39,7 @@ use tower_http::classify::{GrpcErrorsAsFailures, SharedClassifier}; use tower_http::propagate_header::PropagateHeaderLayer; use tower_http::set_header::SetRequestHeaderLayer; use tower_http::trace::{DefaultMakeSpan, DefaultOnBodyChunk, DefaultOnEos, TraceLayer}; +use tracing::debug; pub struct ServerBuilder { router: Router>, @@ -155,46 +160,48 @@ impl ServerBuilder { self } - pub async fn bind(self, addr: &Multiaddr) -> Result { + pub async fn bind(self, addr: &Multiaddr, tls_config: Option) -> Result { let mut iter = addr.iter(); let (tx_cancellation, rx_cancellation) = tokio::sync::oneshot::channel(); let rx_cancellation = rx_cancellation.map(|_| ()); - let (local_addr, server): (Multiaddr, BoxFuture<(), tonic::transport::Error>) = - match iter.next().ok_or_else(|| eyre!("malformed addr"))? { - Protocol::Dns(_) => { - let (dns_name, tcp_port, _http_or_https) = parse_dns(addr)?; - let (local_addr, incoming) = - tcp_listener_and_update_multiaddr(addr, (dns_name.as_ref(), tcp_port)) - .await?; - let server = Box::pin( - self.router - .serve_with_incoming_shutdown(incoming, rx_cancellation), - ); - (local_addr, server) - } - Protocol::Ip4(_) => { - let (socket_addr, _http_or_https) = parse_ip4(addr)?; - let (local_addr, incoming) = - tcp_listener_and_update_multiaddr(addr, socket_addr).await?; - let server = Box::pin( - self.router - .serve_with_incoming_shutdown(incoming, rx_cancellation), - ); - (local_addr, server) - } - Protocol::Ip6(_) => { - let (socket_addr, _http_or_https) = parse_ip6(addr)?; - let (local_addr, incoming) = - tcp_listener_and_update_multiaddr(addr, socket_addr).await?; - let server = Box::pin( - self.router - .serve_with_incoming_shutdown(incoming, rx_cancellation), - ); - (local_addr, server) - } - unsupported => return Err(eyre!("unsupported protocol {unsupported}")), - }; + let (local_addr, server): (Multiaddr, BoxFuture<(), tonic::transport::Error>) = match iter + .next() + .ok_or_else(|| eyre!("malformed addr"))? + { + Protocol::Dns(_) => { + let (dns_name, tcp_port, _http_or_https) = parse_dns(addr)?; + let (local_addr, incoming) = + listen_and_update_multiaddr(addr, (dns_name.to_string(), tcp_port), tls_config) + .await?; + let server = Box::pin( + self.router + .serve_with_incoming_shutdown(incoming, rx_cancellation), + ); + (local_addr, server) + } + Protocol::Ip4(_) => { + let (socket_addr, _http_or_https) = parse_ip4(addr)?; + let (local_addr, incoming) = + listen_and_update_multiaddr(addr, socket_addr, tls_config).await?; + let server = Box::pin( + self.router + .serve_with_incoming_shutdown(incoming, rx_cancellation), + ); + (local_addr, server) + } + Protocol::Ip6(_) => { + let (socket_addr, _http_or_https) = parse_ip6(addr)?; + let (local_addr, incoming) = + listen_and_update_multiaddr(addr, socket_addr, tls_config).await?; + let server = Box::pin( + self.router + .serve_with_incoming_shutdown(incoming, rx_cancellation), + ); + (local_addr, server) + } + unsupported => return Err(eyre!("unsupported protocol {unsupported}")), + }; Ok(Server { server, @@ -205,22 +212,134 @@ impl ServerBuilder { } } -async fn tcp_listener_and_update_multiaddr( +async fn listen_and_update_multiaddr( address: &Multiaddr, socket_addr: T, -) -> Result<(Multiaddr, TcpListenerStream)> { - let (local_addr, incoming) = tcp_listener(socket_addr).await?; + tls_config: Option, +) -> Result<( + Multiaddr, + impl Stream>, +)> { + let listener = TcpListener::bind(socket_addr).await?; + let local_addr = listener.local_addr()?; let local_addr = update_tcp_port_in_multiaddr(address, local_addr.port()); - Ok((local_addr, incoming)) + + let tls_acceptor = tls_config.map(|tls_config| TlsAcceptor::from(Arc::new(tls_config))); + let incoming = TcpOrTlsListener::new(listener, tls_acceptor); + let stream = async_stream::stream! { + loop { + yield incoming.accept().await; + } + }; + + Ok((local_addr, stream)) } -async fn tcp_listener(address: T) -> Result<(SocketAddr, TcpListenerStream)> { - let listener = TcpListener::bind(address).await?; - let local_addr = listener.local_addr()?; - let incoming = TcpListenerStream::new(listener); - Ok((local_addr, incoming)) +pub struct TcpOrTlsListener { + listener: TcpListener, + tls_acceptor: Option, } +impl TcpOrTlsListener { + fn new(listener: TcpListener, tls_acceptor: Option) -> Self { + Self { + listener, + tls_acceptor, + } + } + + async fn accept(&self) -> std::io::Result { + let (stream, addr) = self.listener.accept().await?; + if self.tls_acceptor.is_none() { + return Ok(TcpOrTlsStream::Tcp(stream, addr)); + } + + // Determine whether new connection is TLS. + let mut buf = [0; 1]; + stream.peek(&mut buf).await?; + if buf[0] == 0x16 { + // First byte of a TLS handshake is 0x16. + debug!("accepting TLS connection from {addr:?}"); + let stream = self.tls_acceptor.as_ref().unwrap().accept(stream).await?; + Ok(TcpOrTlsStream::Tls(stream, addr)) + } else { + debug!("accepting TCP connection from {addr:?}"); + Ok(TcpOrTlsStream::Tcp(stream, addr)) + } + } +} + +pub enum TcpOrTlsStream { + Tcp(TcpStream, SocketAddr), + Tls(TlsStream, SocketAddr), +} + +impl AsyncRead for TcpOrTlsStream { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut tokio::io::ReadBuf, + ) -> Poll> { + match self.get_mut() { + TcpOrTlsStream::Tcp(stream, _) => Pin::new(stream).poll_read(cx, buf), + TcpOrTlsStream::Tls(stream, _) => Pin::new(stream).poll_read(cx, buf), + } + } +} + +impl AsyncWrite for TcpOrTlsStream { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + match self.get_mut() { + TcpOrTlsStream::Tcp(stream, _) => Pin::new(stream).poll_write(cx, buf), + TcpOrTlsStream::Tls(stream, _) => Pin::new(stream).poll_write(cx, buf), + } + } + + fn poll_flush( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + match self.get_mut() { + TcpOrTlsStream::Tcp(stream, _) => Pin::new(stream).poll_flush(cx), + TcpOrTlsStream::Tls(stream, _) => Pin::new(stream).poll_flush(cx), + } + } + + fn poll_shutdown( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + match self.get_mut() { + TcpOrTlsStream::Tcp(stream, _) => Pin::new(stream).poll_shutdown(cx), + TcpOrTlsStream::Tls(stream, _) => Pin::new(stream).poll_shutdown(cx), + } + } +} + +impl tonic::transport::server::Connected for TcpOrTlsStream { + type ConnectInfo = tonic::transport::server::TcpConnectInfo; + + fn connect_info(&self) -> Self::ConnectInfo { + match self { + TcpOrTlsStream::Tcp(stream, addr) => Self::ConnectInfo { + local_addr: stream.local_addr().ok(), + remote_addr: Some(*addr), + }, + TcpOrTlsStream::Tls(stream, addr) => Self::ConnectInfo { + local_addr: stream.get_ref().0.local_addr().ok(), + remote_addr: Some(*addr), + }, + } + } +} + +/// TLS server name to use for the public Sui validator interface. +pub const SUI_TLS_SERVER_NAME: &str = "sui"; + pub struct Server { server: BoxFuture<(), tonic::transport::Error>, cancel_handle: Option>, @@ -318,14 +437,14 @@ mod test { let mut server = config .server_builder_with_metrics(metrics.clone()) - .bind(&address) + .bind(&address, None) .await .unwrap(); let address = server.local_addr().to_owned(); let cancel_handle = server.take_cancel_handle().unwrap(); let server_handle = tokio::spawn(server.serve()); - let channel = config.connect(&address).await.unwrap(); + let channel = config.connect(&address, None).await.unwrap(); let mut client = HealthClient::new(channel); client @@ -381,14 +500,14 @@ mod test { let mut server = config .server_builder_with_metrics(metrics.clone()) - .bind(&address) + .bind(&address, None) .await .unwrap(); let address = server.local_addr().to_owned(); let cancel_handle = server.take_cancel_handle().unwrap(); let server_handle = tokio::spawn(server.serve()); - let channel = config.connect(&address).await.unwrap(); + let channel = config.connect(&address, None).await.unwrap(); let mut client = HealthClient::new(channel); // Call the healthcheck for a service that doesn't exist @@ -408,11 +527,11 @@ mod test { async fn test_multiaddr(address: Multiaddr) { let config = Config::new(); - let mut server = config.server_builder().bind(&address).await.unwrap(); + let mut server = config.server_builder().bind(&address, None).await.unwrap(); let address = server.local_addr().to_owned(); let cancel_handle = server.take_cancel_handle().unwrap(); let server_handle = tokio::spawn(server.serve()); - let channel = config.connect(&address).await.unwrap(); + let channel = config.connect(&address, None).await.unwrap(); let mut client = HealthClient::new(channel); client diff --git a/crates/mysten-util-mem/Cargo.toml b/crates/mysten-util-mem/Cargo.toml index 0c5b923205e31..4cd5f54afe08d 100644 --- a/crates/mysten-util-mem/Cargo.toml +++ b/crates/mysten-util-mem/Cargo.toml @@ -14,7 +14,7 @@ cfg-if.workspace = true hashbrown = { workspace = true, optional = true } mysten-util-mem-derive.workspace = true impl-trait-for-tuples.workspace = true -fastcrypto = { workspace = true } +fastcrypto.workspace = true fastcrypto-tbls.workspace = true indexmap.workspace = true roaring.workspace = true diff --git a/crates/simulacrum/src/lib.rs b/crates/simulacrum/src/lib.rs index 32ffab8beaca6..9e8024ac8469c 100644 --- a/crates/simulacrum/src/lib.rs +++ b/crates/simulacrum/src/lib.rs @@ -391,6 +391,12 @@ impl Simulacrum { .unwrap(); } + pub fn override_last_checkpoint_number(&mut self, number: CheckpointSequenceNumber) { + let committee = CommitteeWithKeys::new(&self.keystore, self.epoch_state.committee()); + self.checkpoint_builder + .override_last_checkpoint_number(number, &committee); + } + fn process_data_ingestion( &self, checkpoint: VerifiedCheckpoint, diff --git a/crates/sui-adapter-transactional-tests/tests/programmable/split_coins.exp b/crates/sui-adapter-transactional-tests/tests/programmable/split_coins.exp index 220540ce069e5..a14e830ed4f04 100644 --- a/crates/sui-adapter-transactional-tests/tests/programmable/split_coins.exp +++ b/crates/sui-adapter-transactional-tests/tests/programmable/split_coins.exp @@ -7,7 +7,7 @@ task 1, lines 9-22: //# publish created: object(1,0) mutated: object(0,2) -gas summary: computation_cost: 1000000, storage_cost: 5677200, storage_rebate: 0, non_refundable_storage_fee: 0 +gas summary: computation_cost: 1000000, storage_cost: 5563200, storage_rebate: 0, non_refundable_storage_fee: 0 task 2, lines 24-28: //# programmable --sender A --inputs 100000 @A diff --git a/crates/sui-archival/Cargo.toml b/crates/sui-archival/Cargo.toml index d96dba9edb39d..728fc4f98d343 100644 --- a/crates/sui-archival/Cargo.toml +++ b/crates/sui-archival/Cargo.toml @@ -19,7 +19,7 @@ rand.workspace = true object_store.workspace = true prometheus.workspace = true sui-config.workspace = true -sui-types = { workspace = true, features = ["test-utils"]} +sui-types = { workspace = true, features = ["test-utils"] } sui-storage.workspace = true fastcrypto = { workspace = true, features = ["copy_key"] } tokio = { workspace = true, features = ["full"] } @@ -34,7 +34,7 @@ move-core-types.workspace = true move-package.workspace = true tokio = { workspace = true, features = ["test-util"] } ed25519-consensus.workspace = true -fastcrypto = { workspace = true } +fastcrypto.workspace = true sui-swarm-config.workspace = true sui-macros.workspace = true diff --git a/crates/sui-benchmark/src/drivers/bench_driver.rs b/crates/sui-benchmark/src/drivers/bench_driver.rs index ed11e81d42390..148d79f51ae6a 100644 --- a/crates/sui-benchmark/src/drivers/bench_driver.rs +++ b/crates/sui-benchmark/src/drivers/bench_driver.rs @@ -49,6 +49,7 @@ pub struct BenchMetrics { pub benchmark_duration: IntGauge, pub num_success: IntCounterVec, pub num_error: IntCounterVec, + pub num_expected_error: IntCounterVec, pub num_submitted: IntCounterVec, pub num_in_flight: GaugeVec, pub latency_s: HistogramVec, @@ -79,6 +80,13 @@ impl BenchMetrics { registry, ) .unwrap(), + num_expected_error: register_int_counter_vec_with_registry!( + "num_expected_error", + "Total number of transaction errors that were expected", + &["workload"], + registry, + ) + .unwrap(), num_success_cmds: register_int_counter_vec_with_registry!( "num_success_cmds", "Total number of commands success", @@ -373,6 +381,7 @@ impl Driver<(BenchmarkStats, StressStats)> for BenchDriver { duration: Duration::ZERO, num_error_txes: 0, num_success_txes: 0, + num_expected_error_txes: 0, num_success_cmds: 0, total_gas_used: 0, latency_ms: HistogramWrapper { @@ -407,6 +416,7 @@ impl Driver<(BenchmarkStats, StressStats)> for BenchDriver { let mut total_cps: f32 = 0.0; let mut num_success_txes: u64 = 0; let mut num_error_txes: u64 = 0; + let mut num_expected_error_txes: u64 = 0; let mut num_success_cmds = 0; let mut latency_histogram = hdrhistogram::Histogram::::new_with_max(120_000, 3).unwrap(); @@ -426,6 +436,7 @@ impl Driver<(BenchmarkStats, StressStats)> for BenchDriver { total_cps += v.bench_stats.num_success_cmds as f32 / duration; num_success_txes += v.bench_stats.num_success_txes; num_error_txes += v.bench_stats.num_error_txes; + num_expected_error_txes += v.bench_stats.num_expected_error_txes; num_success_cmds += v.bench_stats.num_success_cmds; num_no_gas += v.num_no_gas; num_submitted += v.num_submitted; @@ -442,7 +453,24 @@ impl Driver<(BenchmarkStats, StressStats)> for BenchDriver { }; counter += 1; if counter % num_workers == 0 { - stat = format!("TPS = {}, CPS = {}, latency_ms(min/p50/p99/max) = {}/{}/{}/{}, num_success_tx = {}, num_error_tx = {}, num_success_cmds = {}, no_gas = {}, submitted = {}, in_flight = {}", total_qps, total_cps, latency_histogram.min(), latency_histogram.value_at_quantile(0.5), latency_histogram.value_at_quantile(0.99), latency_histogram.max(), num_success_txes, num_error_txes, num_success_cmds, num_no_gas, num_submitted, num_in_flight); + stat = format!( + "TPS = {}, CPS = {}, latency_ms(min/p50/p99/max) = {}/{}/{}/{}, \ + num_success_tx = {}, num_error_tx = {}, num_expected_error_tx = {}, \ + num_success_cmds = {}, no_gas = {}, submitted = {}, in_flight = {}", + total_qps, + total_cps, + latency_histogram.min(), + latency_histogram.value_at_quantile(0.5), + latency_histogram.value_at_quantile(0.99), + latency_histogram.max(), + num_success_txes, + num_error_txes, + num_expected_error_txes, + num_success_cmds, + num_no_gas, + num_submitted, + num_in_flight + ); if show_progress { eprintln!("{}", stat); } @@ -681,6 +709,7 @@ async fn run_bench_worker( let request_delay_micros = 1_000_000 / worker.target_qps; let mut num_success_txes = 0; let mut num_error_txes = 0; + let mut num_expected_error_txes = 0; let mut num_success_cmds = 0; let mut num_no_gas = 0; let mut num_in_flight: u64 = 0; @@ -708,6 +737,7 @@ async fn run_bench_worker( -> NextOp { match result { Ok(effects) => { + assert!(payload.get_failure_type().is_none()); let latency = start.elapsed(); let time_from_start = total_benchmark_start_time.elapsed(); @@ -767,27 +797,38 @@ async fn run_bench_worker( } Err(err) => { error!("{}", err); - if err - .downcast::() - .and_then(|err| { - if matches!( - err, - QuorumDriverError::NonRecoverableTransactionError { .. } - ) { - Err(err.into()) + match payload.get_failure_type() { + Some(_) => { + metrics_cloned + .num_expected_error + .with_label_values(&[&payload.to_string()]) + .inc(); + NextOp::Retry(Box::new((transaction, payload))) + } + None => { + if err + .downcast::() + .and_then(|err| { + if matches!( + err, + QuorumDriverError::NonRecoverableTransactionError { .. } + ) { + Err(err.into()) + } else { + Ok(()) + } + }) + .is_err() + { + NextOp::Failure } else { - Ok(()) + metrics_cloned + .num_error + .with_label_values(&[&payload.to_string(), "rpc"]) + .inc(); + NextOp::Retry(Box::new((transaction, payload))) } - }) - .is_err() - { - NextOp::Failure - } else { - metrics_cloned - .num_error - .with_label_values(&[&payload.to_string(), "rpc"]) - .inc(); - NextOp::Retry(Box::new((transaction, payload))) + } } } } @@ -841,6 +882,7 @@ async fn run_bench_worker( bench_stats: BenchmarkStats { duration:stat_start_time.elapsed(), num_error_txes, + num_expected_error_txes, num_success_txes, num_success_cmds, latency_ms:HistogramWrapper{ @@ -855,6 +897,7 @@ async fn run_bench_worker( } num_success_txes = 0; num_error_txes = 0; + num_expected_error_txes = 0; num_success_cmds = 0; num_no_gas = 0; num_submitted = 0; @@ -874,7 +917,11 @@ async fn run_bench_worker( if let Some(b) = retry_queue.pop_front() { let tx = b.0; let payload = b.1; - num_error_txes += 1; + if payload.get_failure_type().is_some() { + num_expected_error_txes += 1; + } else { + num_error_txes += 1; + } num_submitted += 1; metrics_cloned.num_submitted.with_label_values(&[&payload.to_string()]).inc(); // TODO: clone committee for each request is not ideal. @@ -958,6 +1005,7 @@ async fn run_bench_worker( bench_stats: BenchmarkStats { duration: stat_start_time.elapsed(), num_error_txes, + num_expected_error_txes, num_success_txes, num_success_cmds, total_gas_used: worker_gas_used, diff --git a/crates/sui-benchmark/src/drivers/mod.rs b/crates/sui-benchmark/src/drivers/mod.rs index 9a245f5742907..8f29631de5d6d 100644 --- a/crates/sui-benchmark/src/drivers/mod.rs +++ b/crates/sui-benchmark/src/drivers/mod.rs @@ -124,6 +124,8 @@ pub struct BenchmarkStats { pub duration: Duration, /// Number of transactions that ended in an error pub num_error_txes: u64, + /// Number of transactions that ended in an error but were expected + pub num_expected_error_txes: u64, /// Number of transactions that were executed successfully pub num_success_txes: u64, /// Total number of commands in transactions that executed successfully @@ -137,6 +139,7 @@ impl BenchmarkStats { pub fn update(&mut self, duration: Duration, sample_stat: &BenchmarkStats) { self.duration = duration; self.num_error_txes += sample_stat.num_error_txes; + self.num_expected_error_txes += sample_stat.num_expected_error_txes; self.num_success_txes += sample_stat.num_success_txes; self.num_success_cmds += sample_stat.num_success_cmds; self.total_gas_used += sample_stat.total_gas_used; @@ -155,6 +158,7 @@ impl BenchmarkStats { "tps", "cps", "error%", + "expected error%", "latency (min)", "latency (p50)", "latency (p99)", @@ -169,6 +173,10 @@ impl BenchmarkStats { (100 * self.num_error_txes) as f32 / (self.num_error_txes + self.num_success_txes) as f32, )); + row.add_cell(Cell::new( + (100 * self.num_expected_error_txes) as f32 + / (self.num_expected_error_txes + self.num_success_txes) as f32, + )); row.add_cell(Cell::new(self.latency_ms.histogram.min())); row.add_cell(Cell::new(self.latency_ms.histogram.value_at_quantile(0.5))); row.add_cell(Cell::new(self.latency_ms.histogram.value_at_quantile(0.99))); diff --git a/crates/sui-benchmark/src/lib.rs b/crates/sui-benchmark/src/lib.rs index b440e5ffb4062..67f9d6a57db27 100644 --- a/crates/sui-benchmark/src/lib.rs +++ b/crates/sui-benchmark/src/lib.rs @@ -742,10 +742,9 @@ impl ValidatorProxy for FullNodeProxy { .await { Ok(resp) => { - let effects = ExecutionEffects::SuiTransactionBlockEffects( + return Ok(ExecutionEffects::SuiTransactionBlockEffects( resp.effects.expect("effects field should not be None"), - ); - return Ok(effects); + )); } Err(err) => { error!( diff --git a/crates/sui-benchmark/src/options.rs b/crates/sui-benchmark/src/options.rs index f924aa54f110a..4bab34fd41930 100644 --- a/crates/sui-benchmark/src/options.rs +++ b/crates/sui-benchmark/src/options.rs @@ -181,6 +181,9 @@ pub enum RunSpec { // relative weight of randomness transactions in the benchmark workload #[clap(long, num_args(1..), value_delimiter = ',', default_values_t = [0])] randomness: Vec, + // relative weight of expected failure transactions in the benchmark workload + #[clap(long, num_args(1..), value_delimiter = ',', default_values_t = [0])] + expected_failure: Vec, // --- workload-specific options --- (TODO: use subcommands or similar) // 100 for max hotness i.e all requests target @@ -210,6 +213,10 @@ pub enum RunSpec { // Default is (0-0.5) implying random load at 50% load. See `AdversarialPayloadType` enum for `adversarial_type` #[clap(long, num_args(1..), value_delimiter = ',', default_values_t = ["0-1.0".to_string()])] adversarial_cfg: Vec, + // type of expected failure transactions in the benchmark workload. + // See `ExpectedFailureType` enum for `expected_failure_type` + #[clap(long, num_args(1..), value_delimiter = ',', default_values_t = [0])] + expected_failure_type: Vec, // --- generic options --- // Target qps diff --git a/crates/sui-benchmark/src/workloads/adversarial.rs b/crates/sui-benchmark/src/workloads/adversarial.rs index f4d871d79bf40..eeb82dd150aea 100644 --- a/crates/sui-benchmark/src/workloads/adversarial.rs +++ b/crates/sui-benchmark/src/workloads/adversarial.rs @@ -10,7 +10,7 @@ use crate::in_memory_wallet::move_call_pt_impl; use crate::in_memory_wallet::InMemoryWallet; use crate::system_state_observer::{SystemState, SystemStateObserver}; use crate::workloads::payload::Payload; -use crate::workloads::{Gas, GasCoinConfig}; +use crate::workloads::{workload::ExpectedFailureType, Gas, GasCoinConfig}; use crate::ProgrammableTransactionBuilder; use crate::{convert_move_call_args, BenchMoveCallArg, ExecutionEffects, ValidatorProxy}; use anyhow::anyhow; @@ -189,6 +189,10 @@ impl Payload for AdversarialTestPayload { .expect("Protocol config not in system state"), ) } + + fn get_failure_type(&self) -> Option { + None + } } impl AdversarialTestPayload { diff --git a/crates/sui-benchmark/src/workloads/batch_payment.rs b/crates/sui-benchmark/src/workloads/batch_payment.rs index 771dee09c0d88..94e58da2ae484 100644 --- a/crates/sui-benchmark/src/workloads/batch_payment.rs +++ b/crates/sui-benchmark/src/workloads/batch_payment.rs @@ -5,7 +5,7 @@ use crate::drivers::Interval; use crate::in_memory_wallet::InMemoryWallet; use crate::system_state_observer::SystemStateObserver; use crate::workloads::payload::Payload; -use crate::workloads::workload::{Workload, STORAGE_COST_PER_COIN}; +use crate::workloads::workload::{ExpectedFailureType, Workload, STORAGE_COST_PER_COIN}; use crate::workloads::workload::{WorkloadBuilder, ESTIMATED_COMPUTATION_COST}; use crate::workloads::{Gas, GasCoinConfig, WorkloadBuilderInfo, WorkloadParams}; use crate::{ExecutionEffects, ValidatorProxy}; @@ -116,6 +116,10 @@ impl Payload for BatchPaymentTestPayload { gas_budget, ) } + + fn get_failure_type(&self) -> Option { + None + } } #[derive(Debug)] diff --git a/crates/sui-benchmark/src/workloads/delegation.rs b/crates/sui-benchmark/src/workloads/delegation.rs index 8f55acf807b23..24f21a120f12c 100644 --- a/crates/sui-benchmark/src/workloads/delegation.rs +++ b/crates/sui-benchmark/src/workloads/delegation.rs @@ -4,7 +4,7 @@ use crate::drivers::Interval; use crate::system_state_observer::SystemStateObserver; use crate::workloads::payload::Payload; -use crate::workloads::workload::{Workload, WorkloadBuilder}; +use crate::workloads::workload::{ExpectedFailureType, Workload, WorkloadBuilder}; use crate::workloads::workload::{ ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, STORAGE_COST_PER_COIN, }; @@ -80,6 +80,10 @@ impl Payload for DelegationTestPayload { ), } } + + fn get_failure_type(&self) -> Option { + None + } } #[derive(Debug)] diff --git a/crates/sui-benchmark/src/workloads/expected_failure.rs b/crates/sui-benchmark/src/workloads/expected_failure.rs new file mode 100644 index 0000000000000..ae200ccbe759b --- /dev/null +++ b/crates/sui-benchmark/src/workloads/expected_failure.rs @@ -0,0 +1,265 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::drivers::Interval; +use crate::system_state_observer::SystemStateObserver; +use crate::workloads::payload::Payload; +use crate::workloads::workload::{ + ExpectedFailureType, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, + STORAGE_COST_PER_COIN, +}; +use crate::workloads::{Gas, GasCoinConfig, Workload, WorkloadBuilderInfo, WorkloadParams}; +use crate::ExecutionEffects; +use crate::ValidatorProxy; +use async_trait::async_trait; +use rand::seq::IteratorRandom; +use std::collections::HashMap; +use std::fmt; +use std::sync::Arc; +use sui_core::test_utils::make_transfer_object_transaction; +use sui_types::base_types::SuiAddress; +use sui_types::crypto::{AccountKeyPair, Ed25519SuiSignature}; +use sui_types::signature::GenericSignature; +use sui_types::{base_types::ObjectRef, crypto::get_key_pair, transaction::Transaction}; +use tracing::debug; + +#[derive(Debug, Clone)] +pub struct ExpectedFailurePayload { + failure_type: ExpectedFailureType, + transfer_object: ObjectRef, + transfer_from: SuiAddress, + transfer_to: SuiAddress, + gas: Vec, + system_state_observer: Arc, +} + +#[derive(Debug, Clone)] +pub struct ExpectedFailurePayloadCfg { + pub failure_type: ExpectedFailureType, +} + +impl Copy for ExpectedFailurePayloadCfg {} + +impl ExpectedFailurePayload { + fn create_failing_transaction(&self, mut tx: Transaction) -> Transaction { + match self.failure_type { + ExpectedFailureType::InvalidSignature => { + let signatures = tx.tx_signatures_mut_for_testing(); + signatures.pop(); + signatures.push(GenericSignature::Signature( + sui_types::crypto::Signature::Ed25519SuiSignature( + Ed25519SuiSignature::default(), + ), + )); + tx + } + ExpectedFailureType::Random => unreachable!(), + } + } +} + +impl Payload for ExpectedFailurePayload { + fn make_new_payload(&mut self, _effects: &ExecutionEffects) { + // This should never be called, as an expected failure payload + // should fail (thereby having no effects) and be retried. Note + // that since these are failures rather than Move level errors, + // no gas should be consumed, nor any objects mutated. + unreachable!() + } + + fn make_transaction(&mut self) -> Transaction { + let (gas_obj, _, keypair) = self.gas.iter().find(|x| x.1 == self.transfer_from).unwrap(); + let tx = make_transfer_object_transaction( + self.transfer_object, + *gas_obj, + self.transfer_from, + keypair, + self.transfer_to, + self.system_state_observer + .state + .borrow() + .reference_gas_price, + ); + self.create_failing_transaction(tx) + } + + fn get_failure_type(&self) -> Option { + Some(self.failure_type) + } +} + +impl fmt::Display for ExpectedFailurePayload { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "ExpectedFailurePayload({:?})", self.failure_type) + } +} + +#[derive(Debug)] +pub struct ExpectedFailureWorkloadBuilder { + expected_failure_cfg: ExpectedFailurePayloadCfg, + num_transfer_accounts: u64, + num_payloads: u64, +} + +impl ExpectedFailureWorkloadBuilder { + pub fn from( + workload_weight: f32, + target_qps: u64, + num_workers: u64, + in_flight_ratio: u64, + num_transfer_accounts: u64, + expected_failure_cfg: ExpectedFailurePayloadCfg, + duration: Interval, + group: u32, + ) -> Option { + let target_qps = (workload_weight * target_qps as f32) as u64; + let num_workers = (workload_weight * num_workers as f32).ceil() as u64; + let max_ops = target_qps * in_flight_ratio; + if max_ops == 0 || num_workers == 0 { + None + } else { + let workload_params = WorkloadParams { + target_qps, + num_workers, + max_ops, + duration, + group, + }; + let workload_builder = Box::>::from(Box::new( + ExpectedFailureWorkloadBuilder { + expected_failure_cfg, + num_payloads: max_ops, + num_transfer_accounts, + }, + )); + let builder_info = WorkloadBuilderInfo { + workload_params, + workload_builder, + }; + Some(builder_info) + } + } +} + +#[async_trait] +impl WorkloadBuilder for ExpectedFailureWorkloadBuilder { + async fn generate_coin_config_for_init(&self) -> Vec { + vec![] + } + async fn generate_coin_config_for_payloads(&self) -> Vec { + let mut address_map = HashMap::new(); + // Have to include not just the coins that are going to be created and sent + // but the coin being used as gas as well. + let amount = MAX_GAS_FOR_TESTING + + ESTIMATED_COMPUTATION_COST + + STORAGE_COST_PER_COIN * (self.num_transfer_accounts + 1); + // gas for payloads + let mut payload_configs = vec![]; + for _i in 0..self.num_transfer_accounts { + let (address, keypair) = get_key_pair(); + let cloned_keypair: Arc = Arc::new(keypair); + address_map.insert(address, cloned_keypair.clone()); + for _j in 0..self.num_payloads { + payload_configs.push(GasCoinConfig { + amount, + address, + keypair: cloned_keypair.clone(), + }); + } + } + + let owner = *address_map.keys().choose(&mut rand::thread_rng()).unwrap(); + + // transfer tokens + let mut gas_configs = vec![]; + for _i in 0..self.num_payloads { + let (address, keypair) = (owner, address_map.get(&owner).unwrap().clone()); + gas_configs.push(GasCoinConfig { + amount, + address, + keypair: keypair.clone(), + }); + } + + gas_configs.extend(payload_configs); + gas_configs + } + async fn build( + &self, + _init_gas: Vec, + payload_gas: Vec, + ) -> Box> { + debug!( + "Using `{:?}` expected failure workloads", + self.expected_failure_cfg.failure_type, + ); + + Box::>::from(Box::new(ExpectedFailureWorkload { + num_tokens: self.num_payloads, + payload_gas, + expected_failure_cfg: self.expected_failure_cfg, + })) + } +} + +#[derive(Debug)] +pub struct ExpectedFailureWorkload { + num_tokens: u64, + payload_gas: Vec, + expected_failure_cfg: ExpectedFailurePayloadCfg, +} + +#[async_trait] +impl Workload for ExpectedFailureWorkload { + async fn init( + &mut self, + _proxy: Arc, + _system_state_observer: Arc, + ) { + } + + async fn make_test_payloads( + &self, + _proxy: Arc, + system_state_observer: Arc, + ) -> Vec> { + let (transfer_tokens, payload_gas) = self.payload_gas.split_at(self.num_tokens as usize); + let mut gas_by_address: HashMap> = HashMap::new(); + for gas in payload_gas.iter() { + gas_by_address + .entry(gas.1) + .or_insert_with(|| Vec::with_capacity(1)) + .push(gas.clone()); + } + + let addresses: Vec = gas_by_address.keys().cloned().collect(); + let mut transfer_gas: Vec> = vec![]; + for i in 0..self.num_tokens { + let mut account_transfer_gas = vec![]; + for address in addresses.iter() { + account_transfer_gas.push(gas_by_address[address][i as usize].clone()); + } + transfer_gas.push(account_transfer_gas); + } + let refs: Vec<(Vec, Gas)> = transfer_gas + .into_iter() + .zip(transfer_tokens.iter()) + .map(|(g, t)| (g, t.clone())) + .collect(); + refs.iter() + .map(|(g, t)| { + let from = t.1; + let to = g.iter().find(|x| x.1 != from).unwrap().1; + Box::new(ExpectedFailurePayload { + failure_type: self.expected_failure_cfg.failure_type, + transfer_object: t.0, + transfer_from: from, + transfer_to: to, + gas: g.to_vec(), + system_state_observer: system_state_observer.clone(), + }) + }) + .map(|b| Box::::from(b)) + .collect() + } +} diff --git a/crates/sui-benchmark/src/workloads/mod.rs b/crates/sui-benchmark/src/workloads/mod.rs index 5dd11418f1a5f..368f2c96fe0c4 100644 --- a/crates/sui-benchmark/src/workloads/mod.rs +++ b/crates/sui-benchmark/src/workloads/mod.rs @@ -4,6 +4,7 @@ pub mod adversarial; pub mod batch_payment; pub mod delegation; +pub mod expected_failure; pub mod payload; pub mod randomness; pub mod shared_counter; diff --git a/crates/sui-benchmark/src/workloads/payload.rs b/crates/sui-benchmark/src/workloads/payload.rs index 567e7ad897e95..0e3d61e8ef2ea 100644 --- a/crates/sui-benchmark/src/workloads/payload.rs +++ b/crates/sui-benchmark/src/workloads/payload.rs @@ -1,7 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::ExecutionEffects; +use crate::{workloads::ExpectedFailureType, ExecutionEffects}; use std::fmt::Display; use sui_types::transaction::Transaction; @@ -12,4 +12,7 @@ use sui_types::transaction::Transaction; pub trait Payload: Send + Sync + std::fmt::Debug + Display { fn make_new_payload(&mut self, effects: &ExecutionEffects); fn make_transaction(&mut self) -> Transaction; + fn get_failure_type(&self) -> Option { + None // Default implementation returns None + } } diff --git a/crates/sui-benchmark/src/workloads/randomness.rs b/crates/sui-benchmark/src/workloads/randomness.rs index 6729f49f13f43..6a812b7b4d636 100644 --- a/crates/sui-benchmark/src/workloads/randomness.rs +++ b/crates/sui-benchmark/src/workloads/randomness.rs @@ -6,7 +6,7 @@ use crate::system_state_observer::SystemStateObserver; use crate::util::publish_basics_package; use crate::workloads::payload::Payload; use crate::workloads::workload::{ - Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, + ExpectedFailureType, Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, }; use crate::workloads::GasCoinConfig; use crate::workloads::{Gas, WorkloadBuilderInfo, WorkloadParams}; @@ -58,6 +58,9 @@ impl Payload for RandomnessTestPayload { .call_emit_random(self.package_id, self.randomness_initial_shared_version) .build_and_sign(self.gas.2.as_ref()) } + fn get_failure_type(&self) -> Option { + None + } } #[derive(Debug)] diff --git a/crates/sui-benchmark/src/workloads/shared_counter.rs b/crates/sui-benchmark/src/workloads/shared_counter.rs index d4c6036414282..5356d53b7184d 100644 --- a/crates/sui-benchmark/src/workloads/shared_counter.rs +++ b/crates/sui-benchmark/src/workloads/shared_counter.rs @@ -6,8 +6,8 @@ use crate::system_state_observer::SystemStateObserver; use crate::util::publish_basics_package; use crate::workloads::payload::Payload; use crate::workloads::workload::{ - Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, - STORAGE_COST_PER_COUNTER, + ExpectedFailureType, Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, + MAX_GAS_FOR_TESTING, STORAGE_COST_PER_COUNTER, }; use crate::workloads::GasCoinConfig; use crate::workloads::{Gas, WorkloadBuilderInfo, WorkloadParams}; @@ -72,6 +72,9 @@ impl Payload for SharedCounterTestPayload { ) .build_and_sign(self.gas.2.as_ref()) } + fn get_failure_type(&self) -> Option { + None + } } #[derive(Debug)] diff --git a/crates/sui-benchmark/src/workloads/shared_object_deletion.rs b/crates/sui-benchmark/src/workloads/shared_object_deletion.rs index 34250f5f82d23..552cd86c85fa8 100644 --- a/crates/sui-benchmark/src/workloads/shared_object_deletion.rs +++ b/crates/sui-benchmark/src/workloads/shared_object_deletion.rs @@ -6,8 +6,8 @@ use crate::system_state_observer::SystemStateObserver; use crate::util::publish_basics_package; use crate::workloads::payload::Payload; use crate::workloads::workload::{ - Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, - STORAGE_COST_PER_COUNTER, + ExpectedFailureType, Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, + MAX_GAS_FOR_TESTING, STORAGE_COST_PER_COUNTER, }; use crate::workloads::GasCoinConfig; use crate::workloads::{Gas, WorkloadBuilderInfo, WorkloadParams}; @@ -118,6 +118,9 @@ impl Payload for SharedCounterDeletionTestPayload { } .build_and_sign(self.gas.2.as_ref()) } + fn get_failure_type(&self) -> Option { + None + } } #[derive(Debug)] diff --git a/crates/sui-benchmark/src/workloads/transfer_object.rs b/crates/sui-benchmark/src/workloads/transfer_object.rs index 3bea7713ca98e..1835a5d0f17af 100644 --- a/crates/sui-benchmark/src/workloads/transfer_object.rs +++ b/crates/sui-benchmark/src/workloads/transfer_object.rs @@ -13,7 +13,8 @@ use crate::system_state_observer::SystemStateObserver; use crate::workloads::payload::Payload; use crate::workloads::workload::WorkloadBuilder; use crate::workloads::workload::{ - Workload, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, STORAGE_COST_PER_COIN, + ExpectedFailureType, Workload, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, + STORAGE_COST_PER_COIN, }; use crate::workloads::{Gas, GasCoinConfig, WorkloadBuilderInfo, WorkloadParams}; use crate::{ExecutionEffects, ValidatorProxy}; @@ -80,6 +81,9 @@ impl Payload for TransferObjectTestPayload { .reference_gas_price, ) } + fn get_failure_type(&self) -> Option { + None + } } impl std::fmt::Display for TransferObjectTestPayload { diff --git a/crates/sui-benchmark/src/workloads/workload.rs b/crates/sui-benchmark/src/workloads/workload.rs index f6175834ea05a..d4c4cb507c270 100644 --- a/crates/sui-benchmark/src/workloads/workload.rs +++ b/crates/sui-benchmark/src/workloads/workload.rs @@ -5,8 +5,14 @@ use crate::system_state_observer::SystemStateObserver; use crate::workloads::payload::Payload; use crate::workloads::{Gas, GasCoinConfig}; use crate::ValidatorProxy; +use anyhow::anyhow; use async_trait::async_trait; +use rand::distributions::{Distribution, Standard}; +use rand::Rng; +use std::str::FromStr; use std::sync::Arc; +use strum::{EnumCount, IntoEnumIterator}; +use strum_macros::{EnumCount as EnumCountMacro, EnumIter}; use sui_types::gas_coin::MIST_PER_SUI; // This is the maximum gas we will transfer from primary coin into any gas coin @@ -23,6 +29,57 @@ pub const STORAGE_COST_PER_COUNTER: u64 = 341 * 76 * 100; /// Used to estimate the budget required for each transaction. pub const ESTIMATED_COMPUTATION_COST: u64 = 1_000_000; +#[derive(Debug, EnumCountMacro, EnumIter, Clone, Copy)] +pub enum ExpectedFailureType { + Random = 0, + InvalidSignature, + // TODO: Add other failure types +} + +impl TryFrom for ExpectedFailureType { + type Error = anyhow::Error; + + fn try_from(value: u32) -> Result { + match value { + 0 => Ok(rand::random()), + _ => ExpectedFailureType::iter() + .nth(value as usize) + .ok_or_else(|| { + anyhow!( + "Invalid failure type specifier. Valid options are {} to {}", + 0, + ExpectedFailureType::COUNT + ) + }), + } + } +} + +impl FromStr for ExpectedFailureType { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + let v = u32::from_str(s).map(ExpectedFailureType::try_from); + + if let Ok(Ok(q)) = v { + return Ok(q); + } + + Err(anyhow!( + "Invalid input string. Valid values are 0 to {}", + ExpectedFailureType::COUNT + )) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ExpectedFailureType { + // Exclude the "Random" variant + let n = rng.gen_range(1..ExpectedFailureType::COUNT); + ExpectedFailureType::iter().nth(n).unwrap() + } +} + #[async_trait] pub trait WorkloadBuilder: Send + Sync + std::fmt::Debug { async fn generate_coin_config_for_init(&self) -> Vec; diff --git a/crates/sui-benchmark/src/workloads/workload_configuration.rs b/crates/sui-benchmark/src/workloads/workload_configuration.rs index aae4924eaf469..9cbd18800ead3 100644 --- a/crates/sui-benchmark/src/workloads/workload_configuration.rs +++ b/crates/sui-benchmark/src/workloads/workload_configuration.rs @@ -9,7 +9,7 @@ use crate::workloads::batch_payment::BatchPaymentWorkloadBuilder; use crate::workloads::delegation::DelegationWorkloadBuilder; use crate::workloads::shared_counter::SharedCounterWorkloadBuilder; use crate::workloads::transfer_object::TransferObjectWorkloadBuilder; -use crate::workloads::{GroupID, WorkloadBuilderInfo, WorkloadInfo}; +use crate::workloads::{ExpectedFailureType, GroupID, WorkloadBuilderInfo, WorkloadInfo}; use anyhow::Result; use std::collections::BTreeMap; use std::str::FromStr; @@ -17,9 +17,36 @@ use std::sync::Arc; use tracing::info; use super::adversarial::{AdversarialPayloadCfg, AdversarialWorkloadBuilder}; +use super::expected_failure::{ExpectedFailurePayloadCfg, ExpectedFailureWorkloadBuilder}; use super::randomness::RandomnessWorkloadBuilder; use super::shared_object_deletion::SharedCounterDeletionWorkloadBuilder; +pub struct WorkloadWeights { + pub shared_counter: u32, + pub transfer_object: u32, + pub delegation: u32, + pub batch_payment: u32, + pub shared_deletion: u32, + pub adversarial: u32, + pub expected_failure: u32, + pub randomness: u32, +} + +pub struct WorkloadConfig { + pub group: u32, + pub num_workers: u64, + pub num_transfer_accounts: u64, + pub weights: WorkloadWeights, + pub adversarial_cfg: AdversarialPayloadCfg, + pub expected_failure_cfg: ExpectedFailurePayloadCfg, + pub batch_payment_size: u32, + pub shared_counter_hotness_factor: u32, + pub num_shared_counters: Option, + pub shared_counter_max_tip: u64, + pub target_qps: u64, + pub in_flight_ratio: u64, + pub duration: Interval, +} pub struct WorkloadConfiguration; impl WorkloadConfiguration { @@ -40,12 +67,14 @@ impl WorkloadConfiguration { delegation, batch_payment, adversarial, + expected_failure, randomness, shared_counter_hotness_factor, num_shared_counters, shared_counter_max_tip, batch_payment_size, adversarial_cfg, + expected_failure_type, target_qps, num_workers, in_flight_ratio, @@ -60,28 +89,36 @@ impl WorkloadConfiguration { // benchmark group will run in the same time for the same duration. for workload_group in 0..num_of_benchmark_groups { let i = workload_group as usize; - let builders = Self::create_workload_builders( - workload_group, - num_workers[i], - opts.num_transfer_accounts, - shared_counter[i], - transfer_object[i], - delegation[i], - batch_payment[i], - shared_deletion[i], - adversarial[i], - AdversarialPayloadCfg::from_str(&adversarial_cfg[i]).unwrap(), - randomness[i], - batch_payment_size[i], - shared_counter_hotness_factor[i], - num_shared_counters.as_ref().map(|n| n[i]), - shared_counter_max_tip[i], - target_qps[i], - in_flight_ratio[i], - duration[i], - system_state_observer.clone(), - ) - .await; + let config = WorkloadConfig { + group: workload_group, + num_workers: num_workers[i], + num_transfer_accounts: opts.num_transfer_accounts, + weights: WorkloadWeights { + shared_counter: shared_counter[i], + transfer_object: transfer_object[i], + delegation: delegation[i], + batch_payment: batch_payment[i], + shared_deletion: shared_deletion[i], + adversarial: adversarial[i], + expected_failure: expected_failure[i], + randomness: randomness[i], + }, + adversarial_cfg: AdversarialPayloadCfg::from_str(&adversarial_cfg[i]) + .unwrap(), + expected_failure_cfg: ExpectedFailurePayloadCfg { + failure_type: ExpectedFailureType::try_from(expected_failure_type[i]) + .unwrap(), + }, + batch_payment_size: batch_payment_size[i], + shared_counter_hotness_factor: shared_counter_hotness_factor[i], + num_shared_counters: num_shared_counters.as_ref().map(|n| n[i]), + shared_counter_max_tip: shared_counter_max_tip[i], + target_qps: target_qps[i], + in_flight_ratio: in_flight_ratio[i], + duration: duration[i], + }; + let builders = + Self::create_workload_builders(config, system_state_observer.clone()).await; workload_builders.extend(builders); } @@ -139,37 +176,35 @@ impl WorkloadConfiguration { } pub async fn create_workload_builders( - workload_group: u32, - num_workers: u64, - num_transfer_accounts: u64, - shared_counter_weight: u32, - transfer_object_weight: u32, - delegation_weight: u32, - batch_payment_weight: u32, - shared_deletion_weight: u32, - adversarial_weight: u32, - adversarial_cfg: AdversarialPayloadCfg, - randomness_weight: u32, - batch_payment_size: u32, - shared_counter_hotness_factor: u32, - num_shared_counters: Option, - shared_counter_max_tip: u64, - target_qps: u64, - in_flight_ratio: u64, - duration: Interval, + WorkloadConfig { + group, + num_workers, + num_transfer_accounts, + weights, + adversarial_cfg, + expected_failure_cfg, + batch_payment_size, + shared_counter_hotness_factor, + num_shared_counters, + shared_counter_max_tip, + target_qps, + in_flight_ratio, + duration, + }: WorkloadConfig, system_state_observer: Arc, ) -> Vec> { - let total_weight = shared_counter_weight - + shared_deletion_weight - + transfer_object_weight - + delegation_weight - + batch_payment_weight - + adversarial_weight - + randomness_weight; + let total_weight = weights.shared_counter + + weights.shared_deletion + + weights.transfer_object + + weights.delegation + + weights.batch_payment + + weights.adversarial + + weights.randomness + + weights.expected_failure; let reference_gas_price = system_state_observer.state.borrow().reference_gas_price; let mut workload_builders = vec![]; let shared_workload = SharedCounterWorkloadBuilder::from( - shared_counter_weight as f32 / total_weight as f32, + weights.shared_counter as f32 / total_weight as f32, target_qps, num_workers, in_flight_ratio, @@ -178,11 +213,11 @@ impl WorkloadConfiguration { shared_counter_max_tip, reference_gas_price, duration, - workload_group, + group, ); workload_builders.push(shared_workload); let shared_deletion_workload = SharedCounterDeletionWorkloadBuilder::from( - shared_deletion_weight as f32 / total_weight as f32, + weights.shared_deletion as f32 / total_weight as f32, target_qps, num_workers, in_flight_ratio, @@ -190,58 +225,69 @@ impl WorkloadConfiguration { shared_counter_max_tip, reference_gas_price, duration, - workload_group, + group, ); workload_builders.push(shared_deletion_workload); let transfer_workload = TransferObjectWorkloadBuilder::from( - transfer_object_weight as f32 / total_weight as f32, + weights.transfer_object as f32 / total_weight as f32, target_qps, num_workers, in_flight_ratio, num_transfer_accounts, duration, - workload_group, + group, ); workload_builders.push(transfer_workload); let delegation_workload = DelegationWorkloadBuilder::from( - delegation_weight as f32 / total_weight as f32, + weights.delegation as f32 / total_weight as f32, target_qps, num_workers, in_flight_ratio, duration, - workload_group, + group, ); workload_builders.push(delegation_workload); let batch_payment_workload = BatchPaymentWorkloadBuilder::from( - batch_payment_weight as f32 / total_weight as f32, + weights.batch_payment as f32 / total_weight as f32, target_qps, num_workers, in_flight_ratio, batch_payment_size, duration, - workload_group, + group, ); workload_builders.push(batch_payment_workload); let adversarial_workload = AdversarialWorkloadBuilder::from( - adversarial_weight as f32 / total_weight as f32, + weights.adversarial as f32 / total_weight as f32, target_qps, num_workers, in_flight_ratio, adversarial_cfg, duration, - workload_group, + group, ); workload_builders.push(adversarial_workload); let randomness_workload = RandomnessWorkloadBuilder::from( - randomness_weight as f32 / total_weight as f32, + weights.randomness as f32 / total_weight as f32, target_qps, num_workers, in_flight_ratio, reference_gas_price, duration, - workload_group, + group, ); workload_builders.push(randomness_workload); + let expected_failure_workload = ExpectedFailureWorkloadBuilder::from( + weights.expected_failure as f32 / total_weight as f32, + target_qps, + num_workers, + in_flight_ratio, + num_transfer_accounts, + expected_failure_cfg, + duration, + group, + ); + workload_builders.push(expected_failure_workload); workload_builders } diff --git a/crates/sui-benchmark/tests/simtest.rs b/crates/sui-benchmark/tests/simtest.rs index fcb22e160de14..3c0e9cacccc4b 100644 --- a/crates/sui-benchmark/tests/simtest.rs +++ b/crates/sui-benchmark/tests/simtest.rs @@ -5,6 +5,7 @@ mod test { use rand::{distributions::uniform::SampleRange, thread_rng, Rng}; use std::collections::HashSet; + use std::num::NonZeroUsize; use std::path::PathBuf; use std::str::FromStr; use std::sync::atomic::{AtomicBool, Ordering}; @@ -13,7 +14,11 @@ mod test { use sui_benchmark::bank::BenchmarkBank; use sui_benchmark::system_state_observer::SystemStateObserver; use sui_benchmark::workloads::adversarial::AdversarialPayloadCfg; - use sui_benchmark::workloads::workload_configuration::WorkloadConfiguration; + use sui_benchmark::workloads::expected_failure::ExpectedFailurePayloadCfg; + use sui_benchmark::workloads::workload::ExpectedFailureType; + use sui_benchmark::workloads::workload_configuration::{ + WorkloadConfig, WorkloadConfiguration, WorkloadWeights, + }; use sui_benchmark::{ drivers::{bench_driver::BenchDriver, driver::Driver, Interval}, util::get_ed25519_keypair_from_keystore, @@ -35,11 +40,13 @@ mod test { use sui_simulator::{configs::*, SimConfig}; use sui_storage::blob::Blob; use sui_surfer::surf_strategy::SurfStrategy; + use sui_swarm_config::network_config_builder::ConfigBuilder; use sui_types::base_types::{ConciseableName, ObjectID, SequenceNumber}; use sui_types::digests::TransactionDigest; use sui_types::full_checkpoint_content::CheckpointData; use sui_types::messages_checkpoint::VerifiedCheckpoint; use sui_types::supported_protocol_versions::SupportedProtocolVersions; + use sui_types::traffic_control::{FreqThresholdConfig, PolicyConfig, PolicyType}; use sui_types::transaction::{ DEFAULT_VALIDATOR_GAS_PRICE, TEST_ONLY_GAS_UNIT_FOR_HEAVY_COMPUTATION_STORAGE, }; @@ -409,6 +416,7 @@ mod test { } }); register_fail_point_async("consensus-delay", || delay_failpoint(10..20, 0.001)); + register_fail_point_async("write_object_entry", || delay_failpoint(10..20, 0.001)); register_fail_point_async("writeback-cache-commit", || delay_failpoint(10..20, 0.001)); @@ -462,16 +470,17 @@ mod test { let txn_count_limit; // When using transaction count as congestion control mode, the limit of transactions per object per commit. let max_deferral_rounds; let cap_factor_denominator; + let absolute_cap_factor; + let allow_overage_factor; + let separate_randomness_budget; { let mut rng = thread_rng(); mode = if rng.gen_bool(0.33) { PerObjectCongestionControlMode::TotalGasBudget + } else if rng.gen_bool(0.5) { + PerObjectCongestionControlMode::TotalTxCount } else { - if rng.gen_bool(0.5) { - PerObjectCongestionControlMode::TotalTxCount - } else { - PerObjectCongestionControlMode::TotalGasBudgetWithCap - } + PerObjectCongestionControlMode::TotalGasBudgetWithCap }; checkpoint_budget_factor = rng.gen_range(1..20); txn_count_limit = rng.gen_range(1..=10); @@ -480,26 +489,34 @@ mod test { } else { rng.gen_range(1000..10000) // Large deferral round (testing liveness) }; - + allow_overage_factor = if rng.gen_bool(0.5) { + 0 + } else { + rng.gen_range(1..100) + }; cap_factor_denominator = rng.gen_range(1..100); + absolute_cap_factor = rng.gen_range(2..50); + separate_randomness_budget = rng.gen_bool(0.5); } info!( "test_simulated_load_shared_object_congestion_control setup. - mode: {:?}, checkpoint_budget_factor: {:?}, - max_deferral_rounds: {:?}, - txn_count_limit: {:?}", - mode, checkpoint_budget_factor, max_deferral_rounds, txn_count_limit + mode: {mode:?}, checkpoint_budget_factor: {checkpoint_budget_factor:?}, + max_deferral_rounds: {max_deferral_rounds:?}, + txn_count_limit: {txn_count_limit:?}, allow_overage_factor: {allow_overage_factor:?}, + cap_factor_denominator: {cap_factor_denominator:?}, + absolute_cap_factor: {absolute_cap_factor:?}, + separate_randomness_budget: {separate_randomness_budget:?}", ); let _guard = ProtocolConfig::apply_overrides_for_testing(move |_, mut config| { + let total_gas_limit = checkpoint_budget_factor + * DEFAULT_VALIDATOR_GAS_PRICE + * TEST_ONLY_GAS_UNIT_FOR_HEAVY_COMPUTATION_STORAGE; config.set_per_object_congestion_control_mode_for_testing(mode); match mode { PerObjectCongestionControlMode::None => panic!("Congestion control mode cannot be None in test_simulated_load_shared_object_congestion_control"), PerObjectCongestionControlMode::TotalGasBudget => { - let total_gas_limit = checkpoint_budget_factor - * DEFAULT_VALIDATOR_GAS_PRICE - * TEST_ONLY_GAS_UNIT_FOR_HEAVY_COMPUTATION_STORAGE; config.set_max_accumulated_txn_cost_per_object_in_narwhal_commit_for_testing(total_gas_limit); config.set_max_accumulated_txn_cost_per_object_in_mysticeti_commit_for_testing(total_gas_limit); }, @@ -512,15 +529,25 @@ mod test { ); }, PerObjectCongestionControlMode::TotalGasBudgetWithCap => { - let total_gas_limit = checkpoint_budget_factor - * DEFAULT_VALIDATOR_GAS_PRICE - * TEST_ONLY_GAS_UNIT_FOR_HEAVY_COMPUTATION_STORAGE; config.set_max_accumulated_txn_cost_per_object_in_narwhal_commit_for_testing(total_gas_limit); config.set_max_accumulated_txn_cost_per_object_in_mysticeti_commit_for_testing(total_gas_limit); config.set_gas_budget_based_txn_cost_cap_factor_for_testing(total_gas_limit/cap_factor_denominator); + config.set_gas_budget_based_txn_cost_absolute_cap_commit_count_for_testing(absolute_cap_factor); }, } config.set_max_deferral_rounds_for_congestion_control_for_testing(max_deferral_rounds); + config.set_max_txn_cost_overage_per_object_in_commit_for_testing( + allow_overage_factor * total_gas_limit, + ); + if separate_randomness_budget { + config + .set_max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit_for_testing( + std::cmp::max( + 1, + config.max_accumulated_txn_cost_per_object_in_mysticeti_commit() / 10, + ), + ); + } config }); @@ -541,7 +568,64 @@ mod test { info!("Simulated load config: {:?}", simulated_load_config); } - test_simulated_load_with_test_config(test_cluster, 50, simulated_load_config).await; + test_simulated_load_with_test_config(test_cluster, 50, simulated_load_config, None, None) + .await; + } + + // Tests cluster defense against failing transaction floods Traffic Control + #[sim_test(config = "test_config()")] + async fn test_simulated_load_expected_failure_traffic_control() { + // TODO: can we get away with significatly increasing this? + let target_qps = get_var("SIM_STRESS_TEST_QPS", 10); + let num_workers = get_var("SIM_STRESS_TEST_WORKERS", 10); + + let expected_tps = target_qps * num_workers; + let error_policy_type = PolicyType::FreqThreshold(FreqThresholdConfig { + client_threshold: expected_tps / 2, + window_size_secs: 5, + update_interval_secs: 1, + ..Default::default() + }); + info!( + "test_simulated_load_expected_failure_traffic_control setup. + Policy type: {:?}", + error_policy_type + ); + + let policy_config = PolicyConfig { + connection_blocklist_ttl_sec: 1, + error_policy_type, + dry_run: false, + ..Default::default() + }; + let network_config = ConfigBuilder::new_with_temp_dir() + .committee_size(NonZeroUsize::new(4).unwrap()) + .with_policy_config(Some(policy_config)) + .with_epoch_duration(5000) + .build(); + let test_cluster = Arc::new( + TestClusterBuilder::new() + .set_network_config(network_config) + .build() + .await, + ); + + let mut simulated_load_config = SimulatedLoadConfig::default(); + { + simulated_load_config.expected_failure_weight = 20; + simulated_load_config.expected_failure_config.failure_type = + ExpectedFailureType::try_from(0).unwrap(); + info!("Simulated load config: {:?}", simulated_load_config); + } + + test_simulated_load_with_test_config( + test_cluster, + 50, + simulated_load_config, + Some(target_qps), + Some(num_workers), + ) + .await; } // Tests cluster liveness when DKG has failed. @@ -853,6 +937,8 @@ mod test { num_shared_counters: Option, use_shared_counter_max_tip: bool, shared_counter_max_tip: u64, + expected_failure_weight: u32, + expected_failure_config: ExpectedFailurePayloadCfg, } impl Default for SimulatedLoadConfig { @@ -869,6 +955,10 @@ mod test { num_shared_counters: Some(1), use_shared_counter_max_tip: false, shared_counter_max_tip: 0, + expected_failure_weight: 0, + expected_failure_config: ExpectedFailurePayloadCfg { + failure_type: ExpectedFailureType::try_from(0).unwrap(), + }, } } } @@ -878,6 +968,8 @@ mod test { test_cluster, test_duration_secs, SimulatedLoadConfig::default(), + None, + None, ) .await; } @@ -886,6 +978,8 @@ mod test { test_cluster: Arc, test_duration_secs: u64, config: SimulatedLoadConfig, + target_qps: Option, + num_workers: Option, ) { let sender = test_cluster.get_address_0(); let keystore_path = test_cluster.swarm.dir().join(SUI_KEYSTORE_FILENAME); @@ -916,17 +1010,10 @@ mod test { // The default test parameters are somewhat conservative in order to keep the running time // of the test reasonable in CI. - let target_qps = get_var("SIM_STRESS_TEST_QPS", 10); - let num_workers = get_var("SIM_STRESS_TEST_WORKERS", 10); + let target_qps = target_qps.unwrap_or(get_var("SIM_STRESS_TEST_QPS", 10)); + let num_workers = num_workers.unwrap_or(get_var("SIM_STRESS_TEST_WORKERS", 10)); let in_flight_ratio = get_var("SIM_STRESS_TEST_IFR", 2); let batch_payment_size = get_var("SIM_BATCH_PAYMENT_SIZE", 15); - let shared_counter_weight = config.shared_counter_weight; - let transfer_object_weight = config.transfer_object_weight; - let num_transfer_accounts = config.num_transfer_accounts; - let delegation_weight = config.delegation_weight; - let batch_payment_weight = config.batch_payment_weight; - let shared_object_deletion_weight = config.shared_deletion_weight; - let randomness_weight = config.randomness_weight; // Run random payloads at 100% load let adversarial_cfg = AdversarialPayloadCfg::from_str("0-1.0").unwrap(); @@ -937,8 +1024,6 @@ mod test { // tests run for ever let adversarial_weight = 0; - let shared_counter_hotness_factor = config.shared_counter_hotness_factor; - let num_shared_counters = config.num_shared_counters; let shared_counter_max_tip = if config.use_shared_counter_max_tip { config.shared_counter_max_tip } else { @@ -946,25 +1031,35 @@ mod test { }; let gas_request_chunk_size = 100; - let workloads_builders = WorkloadConfiguration::create_workload_builders( - 0, + let weights = WorkloadWeights { + shared_counter: config.shared_counter_weight, + transfer_object: config.transfer_object_weight, + delegation: config.delegation_weight, + batch_payment: config.batch_payment_weight, + shared_deletion: config.shared_deletion_weight, + randomness: config.randomness_weight, + adversarial: adversarial_weight, + expected_failure: config.expected_failure_weight, + }; + + let workload_config = WorkloadConfig { + group: 0, num_workers, - num_transfer_accounts, - shared_counter_weight, - transfer_object_weight, - delegation_weight, - batch_payment_weight, - shared_object_deletion_weight, - adversarial_weight, + num_transfer_accounts: config.num_transfer_accounts, + weights, adversarial_cfg, - randomness_weight, + expected_failure_cfg: config.expected_failure_config, batch_payment_size, - shared_counter_hotness_factor, - num_shared_counters, + shared_counter_hotness_factor: config.shared_counter_hotness_factor, + num_shared_counters: config.num_shared_counters, shared_counter_max_tip, target_qps, in_flight_ratio, duration, + }; + + let workloads_builders = WorkloadConfiguration::create_workload_builders( + workload_config, system_state_observer.clone(), ) .await; diff --git a/crates/sui-bridge-cli/src/lib.rs b/crates/sui-bridge-cli/src/lib.rs index a607e0acacf03..dc40a7f2f38d1 100644 --- a/crates/sui-bridge-cli/src/lib.rs +++ b/crates/sui-bridge-cli/src/lib.rs @@ -541,6 +541,8 @@ pub enum BridgeClientCommands { ClaimOnEth { #[clap(long)] seq_num: u64, + #[clap(long, default_value_t = true, action = clap::ArgAction::Set)] + dry_run: bool, }, } @@ -576,8 +578,8 @@ impl BridgeClientCommands { ); Ok(()) } - BridgeClientCommands::ClaimOnEth { seq_num } => { - claim_on_eth(seq_num, config, sui_bridge_client) + BridgeClientCommands::ClaimOnEth { seq_num, dry_run } => { + claim_on_eth(seq_num, config, sui_bridge_client, dry_run) .await .map_err(|e| anyhow!("{:?}", e)) } @@ -681,6 +683,7 @@ async fn claim_on_eth( seq_num: u64, config: &LoadedBridgeCliConfig, sui_bridge_client: SuiBridgeClient, + dry_run: bool, ) -> BridgeResult<()> { let sui_chain_id = sui_bridge_client.get_bridge_summary().await?.chain_id; let parsed_message = sui_bridge_client @@ -710,8 +713,20 @@ async fn claim_on_eth( ); let message = eth_sui_bridge::Message::from(parsed_message); let tx = eth_sui_bridge.transfer_bridged_tokens_with_signatures(signatures, message); - let _eth_claim_tx_receipt = tx.send().await.unwrap().await.unwrap().unwrap(); - info!("Sui to Eth bridge transfer claimed"); + if dry_run { + let tx = tx.tx; + let resp = config.eth_signer.estimate_gas(&tx, None).await; + println!( + "Sui to Eth bridge transfer claim dry run result: {:?}", + resp + ); + } else { + let eth_claim_tx_receipt = tx.send().await.unwrap().await.unwrap().unwrap(); + println!( + "Sui to Eth bridge transfer claimed: {:?}", + eth_claim_tx_receipt + ); + } Ok(()) } diff --git a/crates/sui-bridge-cli/src/main.rs b/crates/sui-bridge-cli/src/main.rs index 25532f6bc5335..3ee9c4acf5d76 100644 --- a/crates/sui-bridge-cli/src/main.rs +++ b/crates/sui-bridge-cli/src/main.rs @@ -7,6 +7,7 @@ use ethers::types::Address as EthAddress; use fastcrypto::encoding::{Encoding, Hex}; use shared_crypto::intent::Intent; use shared_crypto::intent::IntentMessage; +use std::collections::BTreeMap; use std::collections::HashMap; use std::str::from_utf8; use std::str::FromStr; @@ -83,7 +84,7 @@ async fn main() -> anyhow::Result<()> { let config = LoadedBridgeCliConfig::load(config).await?; let metrics = Arc::new(BridgeMetrics::new_for_testing()); let sui_bridge_client = - SuiClient::::new(&config.sui_rpc_url, metrics).await?; + SuiClient::::new(&config.sui_rpc_url, metrics.clone()).await?; let (sui_key, sui_address, gas_object_ref) = config .get_sui_account_info() @@ -99,7 +100,11 @@ async fn main() -> anyhow::Result<()> { .await .expect("Failed to get bridge committee"), ); - let agg = BridgeAuthorityAggregator::new(bridge_committee); + let agg = BridgeAuthorityAggregator::new( + bridge_committee, + metrics, + Arc::new(BTreeMap::new()), + ); // Handle Sui Side if chain_id.is_sui_chain() { diff --git a/crates/sui-bridge-indexer/Cargo.toml b/crates/sui-bridge-indexer/Cargo.toml index 4e41c2d6e0100..75602df4ae818 100644 --- a/crates/sui-bridge-indexer/Cargo.toml +++ b/crates/sui-bridge-indexer/Cargo.toml @@ -36,13 +36,16 @@ backoff.workspace = true sui-config.workspace = true tempfile.workspace = true sui-indexer-builder.workspace = true -sui-bridge-watchdog.workspace = true [dev-dependencies] sui-types = { workspace = true, features = ["test-utils"] } sui-test-transaction-builder.workspace = true test-cluster.workspace = true hex-literal = "0.3.4" +sui-indexer.workspace = true +diesel_migrations = "2.2.0" +sui-indexer-builder = { workspace = true, features = ["test-utils"] } +sui-bridge = { workspace = true, features = ["test-utils"] } [[bin]] name = "bridge-indexer" diff --git a/crates/sui-bridge-indexer/README.md b/crates/sui-bridge-indexer/README.md new file mode 100644 index 0000000000000..c6ce103c0d73e --- /dev/null +++ b/crates/sui-bridge-indexer/README.md @@ -0,0 +1,41 @@ +## Overview + +Sui Bridge Indexer is a binary that scans Sui Bridge transactions on Sui and Ethereum networks, and indexes the processed data for further use. + +## Get Binary + +```bash +cargo build --bin bridge-indexer --release +``` + +The pre-built Docker image for Bridge Indexer can be found in `mysten/sui-tools:{SHA}` + +## Run Binary + +``` +bridge-indexer --config-path config.yaml +``` + + +## Config + +```yaml +--- +remote_store_url: https://checkpoints.mainnet.sui.io +eth_rpc_url: {eth rpc url} +sui_rpc_url: {sui rpc url} + +concurrency: 500 +checkpoints_path: {path-for-checkpoints} + +eth_sui_bridge_contract_address: 0xda3bD1fE1973470312db04551B65f401Bc8a92fD # <-- mainnet, 0xAE68F87938439afEEDd6552B0E83D2CbC2473623 for testnet +metric_port: {port to export metrics} + +sui_bridge_genesis_checkpoint: 55455583 # <-- mainnet, 43917829 for testnet +# genesis block number for eth +eth_bridge_genesis_block: 20811249 # <-- mainnet, 5997013 for testnet + +eth_ws_url: {eth websocket url} + +``` + diff --git a/crates/sui-bridge-indexer/src/config.rs b/crates/sui-bridge-indexer/src/config.rs index 58b742642b9a8..6686da9fb2b74 100644 --- a/crates/sui-bridge-indexer/src/config.rs +++ b/crates/sui-bridge-indexer/src/config.rs @@ -25,9 +25,6 @@ pub struct IndexerConfig { pub eth_sui_bridge_contract_address: String, pub metric_port: u16, - - /// A temporary flag to disable the eth indexer to test mainnet before eth contracts are deployed. - pub disable_eth: Option, } impl sui_config::Config for IndexerConfig {} diff --git a/crates/sui-bridge-indexer/src/lib.rs b/crates/sui-bridge-indexer/src/lib.rs index b95802502eb93..8e8a906fbcb53 100644 --- a/crates/sui-bridge-indexer/src/lib.rs +++ b/crates/sui-bridge-indexer/src/lib.rs @@ -1,14 +1,35 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::fmt::{Display, Formatter}; -use strum_macros::Display; - -use sui_types::base_types::{SuiAddress, TransactionDigest}; - +use crate::config::IndexerConfig; +use crate::eth_bridge_indexer::{ + EthDataMapper, EthFinalizedSyncDatasource, EthSubscriptionDatasource, +}; +use crate::metrics::BridgeIndexerMetrics; use crate::models::GovernanceAction as DBGovernanceAction; use crate::models::TokenTransferData as DBTokenTransferData; use crate::models::{SuiErrorTransactions, TokenTransfer as DBTokenTransfer}; +use crate::postgres_manager::PgPool; +use crate::storage::PgBridgePersistent; +use crate::sui_bridge_indexer::SuiBridgeDataMapper; +use crate::sui_datasource::SuiCheckpointDatasource; +use ethers::providers::{Http, Provider}; +use ethers::types::Address as EthAddress; +use std::fmt::{Display, Formatter}; +use std::str::FromStr; +use std::sync::Arc; +use strum_macros::Display; +use sui_bridge::eth_client::EthClient; +use sui_bridge::metered_eth_provider::MeteredEthHttpProvier; +use sui_bridge::metrics::BridgeMetrics; +use sui_bridge::utils::get_eth_contract_addresses; +use sui_data_ingestion_core::DataIngestionMetrics; +use sui_indexer_builder::indexer_builder::{BackfillStrategy, Datasource, Indexer, IndexerBuilder}; +use sui_indexer_builder::progress::{ + OutOfOrderSaveAfterDurationPolicy, ProgressSavingPolicy, SaveAfterDurationPolicy, +}; +use sui_sdk::SuiClientBuilder; +use sui_types::base_types::{SuiAddress, TransactionDigest}; pub mod config; pub mod metrics; @@ -179,3 +200,146 @@ impl Display for BridgeDataSource { write!(f, "{str}") } } + +pub async fn create_sui_indexer( + pool: PgPool, + metrics: BridgeIndexerMetrics, + ingestion_metrics: DataIngestionMetrics, + config: &IndexerConfig, +) -> anyhow::Result< + Indexer, + anyhow::Error, +> { + let datastore_with_out_of_order_source = PgBridgePersistent::new( + pool, + ProgressSavingPolicy::OutOfOrderSaveAfterDuration(OutOfOrderSaveAfterDurationPolicy::new( + tokio::time::Duration::from_secs(30), + )), + ); + + let sui_client = Arc::new( + SuiClientBuilder::default() + .build(config.sui_rpc_url.clone()) + .await?, + ); + + let sui_checkpoint_datasource = SuiCheckpointDatasource::new( + config.remote_store_url.clone(), + sui_client, + config.concurrency as usize, + config + .checkpoints_path + .clone() + .map(|p| p.into()) + .unwrap_or(tempfile::tempdir()?.into_path()), + config.sui_bridge_genesis_checkpoint, + ingestion_metrics, + metrics.clone(), + ); + + Ok(IndexerBuilder::new( + "SuiBridgeIndexer", + sui_checkpoint_datasource, + SuiBridgeDataMapper { metrics }, + datastore_with_out_of_order_source, + ) + .build()) +} + +pub async fn create_eth_sync_indexer( + pool: PgPool, + metrics: BridgeIndexerMetrics, + bridge_metrics: Arc, + config: &IndexerConfig, + eth_client: Arc>, +) -> Result, anyhow::Error> { + let bridge_addresses = get_eth_bridge_contract_addresses(config).await?; + // Start the eth sync data source + let eth_sync_datasource = EthFinalizedSyncDatasource::new( + bridge_addresses, + eth_client.clone(), + config.eth_rpc_url.clone(), + metrics.clone(), + bridge_metrics.clone(), + config.eth_bridge_genesis_block, + ) + .await?; + Ok(create_eth_indexer_builder( + pool, + metrics, + eth_sync_datasource, + "EthBridgeFinalizedSyncIndexer", + ) + .await? + .with_backfill_strategy(BackfillStrategy::Partitioned { task_size: 1000 }) + .build()) +} + +pub async fn create_eth_subscription_indexer( + pool: PgPool, + metrics: BridgeIndexerMetrics, + config: &IndexerConfig, + eth_client: Arc>, +) -> Result, anyhow::Error> { + // Start the eth subscription indexer + let bridge_addresses = get_eth_bridge_contract_addresses(config).await?; + // Start the eth subscription indexer + let eth_subscription_datasource = EthSubscriptionDatasource::new( + bridge_addresses.clone(), + eth_client.clone(), + config.eth_ws_url.clone(), + metrics.clone(), + config.eth_bridge_genesis_block, + ) + .await?; + + Ok(create_eth_indexer_builder( + pool, + metrics, + eth_subscription_datasource, + "EthBridgeSubscriptionIndexer", + ) + .await? + .with_backfill_strategy(BackfillStrategy::Disabled) + .build()) +} + +async fn create_eth_indexer_builder>( + pool: PgPool, + metrics: BridgeIndexerMetrics, + datasource: D, + indexer_name: &str, +) -> Result, anyhow::Error> { + let datastore = PgBridgePersistent::new( + pool, + ProgressSavingPolicy::SaveAfterDuration(SaveAfterDurationPolicy::new( + tokio::time::Duration::from_secs(30), + )), + ); + + // Start the eth subscription indexer + Ok(IndexerBuilder::new( + indexer_name, + datasource, + EthDataMapper { metrics }, + datastore.clone(), + )) +} + +async fn get_eth_bridge_contract_addresses( + config: &IndexerConfig, +) -> Result, anyhow::Error> { + let bridge_address = EthAddress::from_str(&config.eth_sui_bridge_contract_address)?; + let provider = Arc::new( + Provider::::try_from(&config.eth_rpc_url)? + .interval(std::time::Duration::from_millis(2000)), + ); + let bridge_addresses = get_eth_contract_addresses(bridge_address, &provider).await?; + Ok(vec![ + bridge_address, + bridge_addresses.0, + bridge_addresses.1, + bridge_addresses.2, + bridge_addresses.3, + ]) +} diff --git a/crates/sui-bridge-indexer/src/main.rs b/crates/sui-bridge-indexer/src/main.rs index 8ba1d128ce1d4..a42fc23b04ab7 100644 --- a/crates/sui-bridge-indexer/src/main.rs +++ b/crates/sui-bridge-indexer/src/main.rs @@ -3,7 +3,6 @@ use anyhow::Result; use clap::*; -use ethers::providers::{Http, Provider}; use ethers::types::Address as EthAddress; use prometheus::Registry; use std::collections::HashSet; @@ -15,10 +14,9 @@ use std::str::FromStr; use std::sync::Arc; use sui_bridge::eth_client::EthClient; use sui_bridge::metered_eth_provider::{new_metered_eth_provider, MeteredEthHttpProvier}; +use sui_bridge::sui_bridge_watchdog::Observable; use sui_bridge::sui_client::SuiBridgeClient; use sui_bridge::utils::get_eth_contract_addresses; -use sui_bridge_indexer::eth_bridge_indexer::EthFinalizedSyncDatasource; -use sui_bridge_indexer::eth_bridge_indexer::EthSubscriptionDatasource; use sui_config::Config; use tokio::task::JoinHandle; use tracing::info; @@ -28,24 +26,19 @@ use mysten_metrics::spawn_logged_monitored_task; use mysten_metrics::start_prometheus_server; use sui_bridge::metrics::BridgeMetrics; +use sui_bridge::sui_bridge_watchdog::{ + eth_bridge_status::EthBridgeStatus, eth_vault_balance::EthVaultBalance, + metrics::WatchdogMetrics, sui_bridge_status::SuiBridgeStatus, BridgeWatchDog, +}; use sui_bridge_indexer::config::IndexerConfig; -use sui_bridge_indexer::eth_bridge_indexer::EthDataMapper; use sui_bridge_indexer::metrics::BridgeIndexerMetrics; use sui_bridge_indexer::postgres_manager::{get_connection_pool, read_sui_progress_store}; -use sui_bridge_indexer::storage::PgBridgePersistent; -use sui_bridge_indexer::sui_bridge_indexer::SuiBridgeDataMapper; -use sui_bridge_indexer::sui_datasource::SuiCheckpointDatasource; use sui_bridge_indexer::sui_transaction_handler::handle_sui_transactions_loop; use sui_bridge_indexer::sui_transaction_queries::start_sui_tx_polling_task; -use sui_bridge_watchdog::{ - eth_bridge_status::EthBridgeStatus, eth_vault_balance::EthVaultBalance, - metrics::WatchdogMetrics, sui_bridge_status::SuiBridgeStatus, BridgeWatchDog, +use sui_bridge_indexer::{ + create_eth_subscription_indexer, create_eth_sync_indexer, create_sui_indexer, }; use sui_data_ingestion_core::DataIngestionMetrics; -use sui_indexer_builder::indexer_builder::{BackfillStrategy, IndexerBuilder}; -use sui_indexer_builder::progress::{ - OutOfOrderSaveAfterDurationPolicy, ProgressSavingPolicy, SaveAfterDurationPolicy, -}; use sui_sdk::SuiClientBuilder; #[derive(Parser, Clone, Debug)] @@ -86,18 +79,7 @@ async fn main() -> Result<()> { let bridge_metrics = Arc::new(BridgeMetrics::new(®istry)); let db_url = config.db_url.clone(); - let datastore = PgBridgePersistent::new( - get_connection_pool(db_url.clone()).await, - ProgressSavingPolicy::SaveAfterDuration(SaveAfterDurationPolicy::new( - tokio::time::Duration::from_secs(30), - )), - ); - let datastore_with_out_of_order_source = PgBridgePersistent::new( - get_connection_pool(db_url.clone()).await, - ProgressSavingPolicy::OutOfOrderSaveAfterDuration(OutOfOrderSaveAfterDurationPolicy::new( - tokio::time::Duration::from_secs(30), - )), - ); + let pool = get_connection_pool(db_url.clone()).await; let eth_client: Arc> = Arc::new( EthClient::::new( @@ -109,98 +91,30 @@ async fn main() -> Result<()> { ); let eth_bridge_proxy_address = EthAddress::from_str(&config.eth_sui_bridge_contract_address)?; let mut tasks = vec![]; - if Some(true) == config.disable_eth { - info!("Eth indexer is disabled"); - } else { - // Start the eth subscription indexer - let bridge_address = EthAddress::from_str(&config.eth_sui_bridge_contract_address)?; - let provider = Arc::new( - Provider::::try_from(&config.eth_rpc_url)? - .interval(std::time::Duration::from_millis(2000)), - ); - let bridge_addresses = get_eth_contract_addresses(bridge_address, &provider).await?; - let bridge_addresses: Vec = vec![ - bridge_address, - bridge_addresses.0, - bridge_addresses.1, - bridge_addresses.2, - bridge_addresses.3, - ]; - - // Start the eth subscription indexer - let eth_subscription_datasource = EthSubscriptionDatasource::new( - bridge_addresses.clone(), - eth_client.clone(), - config.eth_ws_url.clone(), - indexer_meterics.clone(), - config.eth_bridge_genesis_block, - ) - .await?; - let eth_subscription_indexer = IndexerBuilder::new( - "EthBridgeSubscriptionIndexer", - eth_subscription_datasource, - EthDataMapper { - metrics: indexer_meterics.clone(), - }, - datastore.clone(), - ) - .with_backfill_strategy(BackfillStrategy::Disabled) - .build(); - tasks.push(spawn_logged_monitored_task!( - eth_subscription_indexer.start() - )); - - // Start the eth sync data source - let eth_sync_datasource = EthFinalizedSyncDatasource::new( - bridge_addresses.clone(), - eth_client.clone(), - config.eth_rpc_url.clone(), - indexer_meterics.clone(), - bridge_metrics.clone(), - config.eth_bridge_genesis_block, - ) - .await?; - - let eth_sync_indexer = IndexerBuilder::new( - "EthBridgeFinalizedSyncIndexer", - eth_sync_datasource, - EthDataMapper { - metrics: indexer_meterics.clone(), - }, - datastore, - ) - .with_backfill_strategy(BackfillStrategy::Partitioned { task_size: 1000 }) - .build(); - tasks.push(spawn_logged_monitored_task!(eth_sync_indexer.start())); - } + // Start the eth subscription indexer + let eth_subscription_indexer = create_eth_subscription_indexer( + pool.clone(), + indexer_meterics.clone(), + &config, + eth_client.clone(), + ) + .await?; + tasks.push(spawn_logged_monitored_task!( + eth_subscription_indexer.start() + )); - let sui_client = Arc::new( - SuiClientBuilder::default() - .build(config.sui_rpc_url.clone()) - .await?, - ); - let sui_checkpoint_datasource = SuiCheckpointDatasource::new( - config.remote_store_url.clone(), - sui_client, - config.concurrency as usize, - config - .checkpoints_path - .clone() - .map(|p| p.into()) - .unwrap_or(tempfile::tempdir()?.into_path()), - config.sui_bridge_genesis_checkpoint, - ingestion_metrics.clone(), + // Start the eth sync data source + let eth_sync_indexer = create_eth_sync_indexer( + pool.clone(), indexer_meterics.clone(), - ); - let indexer = IndexerBuilder::new( - "SuiBridgeIndexer", - sui_checkpoint_datasource, - SuiBridgeDataMapper { - metrics: indexer_meterics.clone(), - }, - datastore_with_out_of_order_source, + bridge_metrics.clone(), + &config, + eth_client, ) - .build(); + .await?; + tasks.push(spawn_logged_monitored_task!(eth_sync_indexer.start())); + + let indexer = create_sui_indexer(pool, indexer_meterics, ingestion_metrics, &config).await?; tasks.push(spawn_logged_monitored_task!(indexer.start())); let sui_bridge_client = @@ -247,14 +161,12 @@ async fn start_watchdog( let sui_bridge_status = SuiBridgeStatus::new(sui_client, watchdog_metrics.sui_bridge_paused.clone()); - - BridgeWatchDog::new(vec![ - Arc::new(eth_vault_balance), - Arc::new(eth_bridge_status), - Arc::new(sui_bridge_status), - ]) - .run() - .await; + let observables: Vec> = vec![ + Box::new(eth_vault_balance), + Box::new(eth_bridge_status), + Box::new(sui_bridge_status), + ]; + BridgeWatchDog::new(observables).run().await; Ok(()) } diff --git a/crates/sui-bridge-indexer/src/storage.rs b/crates/sui-bridge-indexer/src/storage.rs index 5955ca8e1c37c..a279c6aa7410e 100644 --- a/crates/sui-bridge-indexer/src/storage.rs +++ b/crates/sui-bridge-indexer/src/storage.rs @@ -202,7 +202,7 @@ impl IndexerProgressStore for PgBridgePersistent { let cp: Vec = // TODO: using like could be error prone, change the progress store schema to stare the task name properly. QueryDsl::filter( - QueryDsl::filter(dsl::progress_store, columns::task_name.like(format!("{prefix} - %"))), + QueryDsl::filter(dsl::progress_store, columns::task_name.like(format!("{prefix} - %"))), columns::checkpoint.lt(columns::target_checkpoint)) .order_by(columns::target_checkpoint.desc()) .load(&mut conn) @@ -219,7 +219,7 @@ impl IndexerProgressStore for PgBridgePersistent { let cp: Option = // TODO: using like could be error prone, change the progress store schema to stare the task name properly. QueryDsl::filter(QueryDsl::filter(dsl::progress_store - .select(columns::target_checkpoint), columns::task_name.like(format!("{prefix} - %"))), + .select(columns::target_checkpoint), columns::task_name.like(format!("{prefix} - %"))), columns::target_checkpoint.ne(i64::MAX)) .order_by(columns::target_checkpoint.desc()) .first::(&mut conn) diff --git a/crates/sui-bridge-indexer/tests/indexer_tests.rs b/crates/sui-bridge-indexer/tests/indexer_tests.rs new file mode 100644 index 0000000000000..34591bca4fa75 --- /dev/null +++ b/crates/sui-bridge-indexer/tests/indexer_tests.rs @@ -0,0 +1,174 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use diesel::associations::HasTable; +use diesel::QueryDsl; +use diesel_async::RunQueryDsl; +use diesel_migrations::{embed_migrations, EmbeddedMigrations}; +use prometheus::Registry; +use std::time::Duration; +use sui_bridge::e2e_tests::test_utils::{ + initiate_bridge_eth_to_sui, BridgeTestCluster, BridgeTestClusterBuilder, +}; +use sui_bridge_indexer::config::IndexerConfig; +use sui_bridge_indexer::metrics::BridgeIndexerMetrics; +use sui_bridge_indexer::models::{GovernanceAction, TokenTransfer}; +use sui_bridge_indexer::postgres_manager::get_connection_pool; +use sui_bridge_indexer::storage::PgBridgePersistent; +use sui_bridge_indexer::{create_sui_indexer, schema}; +use sui_data_ingestion_core::DataIngestionMetrics; +use sui_indexer::database::Connection; +use sui_indexer::tempdb::TempDb; +use sui_indexer_builder::indexer_builder::IndexerProgressStore; + +const MIGRATIONS: EmbeddedMigrations = embed_migrations!("src/migrations"); + +#[tokio::test] +async fn test_indexing_transfer() { + let metrics = BridgeIndexerMetrics::new_for_testing(); + let registry = Registry::new(); + let ingestion_metrics = DataIngestionMetrics::new(®istry); + + let (config, cluster, _db) = setup_bridge_env(false).await; + + let pool = get_connection_pool(config.db_url.clone()).await; + let indexer = create_sui_indexer(pool.clone(), metrics.clone(), ingestion_metrics, &config) + .await + .unwrap(); + let storage = indexer.test_only_storage().clone(); + let indexer_name = indexer.test_only_name(); + let indexer_handle = tokio::spawn(indexer.start()); + + // wait until backfill finish + wait_for_back_fill_to_finish(&storage, &indexer_name) + .await + .unwrap(); + + let data: Vec = schema::token_transfer::dsl::token_transfer::table() + .load(&mut pool.get().await.unwrap()) + .await + .unwrap(); + + // token transfer data should be empty + assert!(data.is_empty()); + + use schema::governance_actions::columns; + let data = schema::governance_actions::dsl::governance_actions::table() + .select(( + columns::nonce, + columns::data_source, + columns::txn_digest, + columns::sender_address, + columns::timestamp_ms, + columns::action, + columns::data, + )) + .load::(&mut pool.get().await.unwrap()) + .await + .unwrap(); + + // 8 governance actions in total, token registration and approval events for ETH USDC, USDT and BTC. + assert_eq!(8, data.len()); + + // transfer eth to sui + initiate_bridge_eth_to_sui(&cluster, 1000, 0).await.unwrap(); + + let current_block_height = cluster + .sui_client() + .read_api() + .get_latest_checkpoint_sequence_number() + .await + .unwrap(); + wait_for_block(&storage, &indexer_name, current_block_height) + .await + .unwrap(); + + let data = schema::token_transfer::dsl::token_transfer::table() + .load::(&mut pool.get().await.unwrap()) + .await + .unwrap() + .iter() + .map(|t| (t.chain_id, t.nonce, t.status.clone())) + .collect::>(); + + assert_eq!(2, data.len()); + assert_eq!( + vec![ + (12, 0, "Approved".to_string()), + (12, 0, "Claimed".to_string()) + ], + data + ); + + indexer_handle.abort() +} + +async fn wait_for_block( + storage: &PgBridgePersistent, + task: &str, + block: u64, +) -> Result<(), anyhow::Error> { + while storage + .get_ongoing_tasks(task) + .await? + .live_task() + .map(|t| t.start_checkpoint) + .unwrap_or_default() + < block + { + tokio::time::sleep(Duration::from_millis(100)).await; + } + Ok(()) +} + +async fn wait_for_back_fill_to_finish( + storage: &PgBridgePersistent, + task: &str, +) -> Result<(), anyhow::Error> { + // wait until tasks are set up + while storage.get_ongoing_tasks(task).await?.live_task().is_none() { + tokio::time::sleep(Duration::from_millis(100)).await; + } + // wait until all backfill tasks have completed + while !storage + .get_ongoing_tasks(task) + .await? + .backfill_tasks_ordered_desc() + .is_empty() + { + tokio::time::sleep(Duration::from_millis(1000)).await; + } + Ok(()) +} + +async fn setup_bridge_env(with_eth_env: bool) -> (IndexerConfig, BridgeTestCluster, TempDb) { + let bridge_test_cluster = BridgeTestClusterBuilder::new() + .with_eth_env(with_eth_env) + .with_bridge_cluster(true) + .with_num_validators(3) + .build() + .await; + + let db = TempDb::new().unwrap(); + + // Run database migration + let conn = Connection::dedicated(db.database().url()).await.unwrap(); + conn.run_pending_migrations(MIGRATIONS).await.unwrap(); + + let config = IndexerConfig { + remote_store_url: format!("{}/rest", bridge_test_cluster.sui_rpc_url()), + checkpoints_path: None, + sui_rpc_url: bridge_test_cluster.sui_rpc_url(), + eth_rpc_url: bridge_test_cluster.eth_rpc_url(), + // TODO: add WS support + eth_ws_url: "".to_string(), + db_url: db.database().url().to_string(), + concurrency: 10, + sui_bridge_genesis_checkpoint: 0, + eth_bridge_genesis_block: 0, + eth_sui_bridge_contract_address: bridge_test_cluster.sui_bridge_address(), + metric_port: 9001, + }; + + (config, bridge_test_cluster, db) +} diff --git a/crates/sui-bridge-watchdog/Cargo.toml b/crates/sui-bridge-watchdog/Cargo.toml deleted file mode 100644 index b6148e6bd6222..0000000000000 --- a/crates/sui-bridge-watchdog/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "sui-bridge-watchdog" -version = "0.1.0" -authors = ["Mysten Labs "] -license = "Apache-2.0" -publish = false -edition = "2021" - -[dependencies] -sui-bridge.workspace = true -mysten-metrics.workspace = true -prometheus.workspace = true -anyhow.workspace = true -futures.workspace = true -async-trait.workspace = true -ethers = { version = "2.0" } -tracing.workspace = true -tokio = { workspace = true, features = ["full"] } diff --git a/crates/sui-bridge-watchdog/eth_bridge_status.rs b/crates/sui-bridge-watchdog/eth_bridge_status.rs new file mode 100644 index 0000000000000..cdd795f2f71f9 --- /dev/null +++ b/crates/sui-bridge-watchdog/eth_bridge_status.rs @@ -0,0 +1,58 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//! The EthBridgeStatus observable monitors whether the Eth Bridge is paused. + +use crate::Observable; +use async_trait::async_trait; +use ethers::providers::Provider; +use ethers::types::Address as EthAddress; +use prometheus::IntGauge; +use std::sync::Arc; +use sui_bridge::abi::EthSuiBridge; +use sui_bridge::metered_eth_provider::MeteredEthHttpProvier; +use tokio::time::Duration; +use tracing::{error, info}; + +pub struct EthBridgeStatus { + bridge_contract: EthSuiBridge>, + metric: IntGauge, +} + +impl EthBridgeStatus { + pub fn new( + provider: Arc>, + bridge_address: EthAddress, + metric: IntGauge, + ) -> Self { + let bridge_contract = EthSuiBridge::new(bridge_address, provider.clone()); + Self { + bridge_contract, + metric, + } + } +} + +#[async_trait] +impl Observable for EthBridgeStatus { + fn name(&self) -> &str { + "EthBridgeStatus" + } + + async fn observe_and_report(&self) { + let status = self.bridge_contract.paused().call().await; + match status { + Ok(status) => { + self.metric.set(status as i64); + info!("Eth Bridge Status: {:?}", status); + } + Err(e) => { + error!("Error getting eth bridge status: {:?}", e); + } + } + } + + fn interval(&self) -> Duration { + Duration::from_secs(10) + } +} diff --git a/crates/sui-bridge-watchdog/eth_vault_balance.rs b/crates/sui-bridge-watchdog/eth_vault_balance.rs new file mode 100644 index 0000000000000..dfc359e0cb393 --- /dev/null +++ b/crates/sui-bridge-watchdog/eth_vault_balance.rs @@ -0,0 +1,75 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::Observable; +use async_trait::async_trait; +use ethers::providers::Provider; +use ethers::types::{Address as EthAddress, U256}; +use prometheus::IntGauge; +use std::sync::Arc; +use sui_bridge::abi::EthERC20; +use sui_bridge::metered_eth_provider::MeteredEthHttpProvier; +use tokio::time::Duration; +use tracing::{error, info}; + +const TEN_ZEROS: u64 = 10_u64.pow(10); + +pub struct EthVaultBalance { + coin_contract: EthERC20>, + vault_address: EthAddress, + ten_zeros: U256, + metric: IntGauge, +} + +impl EthVaultBalance { + pub fn new( + provider: Arc>, + vault_address: EthAddress, + coin_address: EthAddress, // for now this only support one coin which is WETH + metric: IntGauge, + ) -> Self { + let ten_zeros = U256::from(TEN_ZEROS); + let coin_contract = EthERC20::new(coin_address, provider); + Self { + coin_contract, + vault_address, + ten_zeros, + metric, + } + } +} + +#[async_trait] +impl Observable for EthVaultBalance { + fn name(&self) -> &str { + "EthVaultBalance" + } + + async fn observe_and_report(&self) { + match self + .coin_contract + .balance_of(self.vault_address) + .call() + .await + { + Ok(balance) => { + // Why downcasting is safe: + // 1. On Ethereum we only take the first 8 decimals into account, + // meaning the trailing 10 digits can be ignored + // 2. i64::MAX is 9_223_372_036_854_775_807, with 8 decimal places is + // 92_233_720_368. We likely won't see any balance higher than this + // in the next 12 months. + let balance = (balance / self.ten_zeros).as_u64() as i64; + self.metric.set(balance); + info!("Eth Vault Balance: {:?}", balance); + } + Err(e) => { + error!("Error getting balance from vault: {:?}", e); + } + } + } + + fn interval(&self) -> Duration { + Duration::from_secs(10) + } +} diff --git a/crates/sui-bridge-watchdog/lib.rs b/crates/sui-bridge-watchdog/lib.rs new file mode 100644 index 0000000000000..b78e436fd696a --- /dev/null +++ b/crates/sui-bridge-watchdog/lib.rs @@ -0,0 +1,62 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//! The BridgeWatchDog module is responsible for monitoring the health +//! of the bridge by periodically running various observables and +//! reporting the results. + +use anyhow::Result; +use async_trait::async_trait; +use mysten_metrics::spawn_logged_monitored_task; +use std::sync::Arc; +use tokio::time::Duration; +use tokio::time::MissedTickBehavior; +use tracing::{error_span, info, Instrument}; + +pub mod eth_bridge_status; +pub mod eth_vault_balance; +pub mod metrics; +pub mod sui_bridge_status; + +pub struct BridgeWatchDog { + observables: Vec>, +} + +impl BridgeWatchDog { + pub fn new(observables: Vec>) -> Self { + Self { observables } + } + + pub async fn run(self) { + let mut handles = vec![]; + for observable in self.observables.into_iter() { + let handle = spawn_logged_monitored_task!(Self::run_observable(observable)); + handles.push(handle); + } + // Return when any task returns an error or all tasks exit. + futures::future::try_join_all(handles).await.unwrap(); + unreachable!("watch dog tasks should not exit"); + } + + async fn run_observable(observable: Arc) -> Result<()> { + let mut interval = tokio::time::interval(observable.interval()); + interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + let name = observable.name(); + let span = error_span!("observable", name); + loop { + info!("Running observable {}", name); + observable + .observe_and_report() + .instrument(span.clone()) + .await; + interval.tick().await; + } + } +} + +#[async_trait] +pub trait Observable { + fn name(&self) -> &str; + async fn observe_and_report(&self); + fn interval(&self) -> Duration; +} diff --git a/crates/sui-bridge-watchdog/metrics.rs b/crates/sui-bridge-watchdog/metrics.rs new file mode 100644 index 0000000000000..c33d2e4876e3b --- /dev/null +++ b/crates/sui-bridge-watchdog/metrics.rs @@ -0,0 +1,41 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use prometheus::{register_int_gauge_with_registry, IntGauge, Registry}; + +#[derive(Clone, Debug)] +pub struct WatchdogMetrics { + pub eth_vault_balance: IntGauge, + pub eth_bridge_paused: IntGauge, + pub sui_bridge_paused: IntGauge, +} + +impl WatchdogMetrics { + pub fn new(registry: &Registry) -> Self { + Self { + eth_vault_balance: register_int_gauge_with_registry!( + "bridge_eth_vault_balance", + "Current balance of eth vault", + registry, + ) + .unwrap(), + eth_bridge_paused: register_int_gauge_with_registry!( + "bridge_eth_bridge_paused", + "Whether the eth bridge is paused", + registry, + ) + .unwrap(), + sui_bridge_paused: register_int_gauge_with_registry!( + "bridge_sui_bridge_paused", + "Whether the sui bridge is paused", + registry, + ) + .unwrap(), + } + } + + pub fn new_for_testing() -> Self { + let registry = Registry::new(); + Self::new(®istry) + } +} diff --git a/crates/sui-bridge-watchdog/sui_bridge_status.rs b/crates/sui-bridge-watchdog/sui_bridge_status.rs new file mode 100644 index 0000000000000..09e5b5adf9cb3 --- /dev/null +++ b/crates/sui-bridge-watchdog/sui_bridge_status.rs @@ -0,0 +1,48 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//! The SuiBridgeStatus observable monitors whether the Sui Bridge is paused. + +use crate::Observable; +use async_trait::async_trait; +use prometheus::IntGauge; +use std::sync::Arc; +use sui_bridge::sui_client::SuiBridgeClient; + +use tokio::time::Duration; +use tracing::{error, info}; + +pub struct SuiBridgeStatus { + sui_client: Arc, + metric: IntGauge, +} + +impl SuiBridgeStatus { + pub fn new(sui_client: Arc, metric: IntGauge) -> Self { + Self { sui_client, metric } + } +} + +#[async_trait] +impl Observable for SuiBridgeStatus { + fn name(&self) -> &str { + "SuiBridgeStatus" + } + + async fn observe_and_report(&self) { + let status = self.sui_client.is_bridge_paused().await; + match status { + Ok(status) => { + self.metric.set(status as i64); + info!("Sui Bridge Status: {:?}", status); + } + Err(e) => { + error!("Error getting sui bridge status: {:?}", e); + } + } + } + + fn interval(&self) -> Duration { + Duration::from_secs(2) + } +} diff --git a/crates/sui-bridge/Cargo.toml b/crates/sui-bridge/Cargo.toml index 700d4979f9b6f..8ed6d95cdeb64 100644 --- a/crates/sui-bridge/Cargo.toml +++ b/crates/sui-bridge/Cargo.toml @@ -48,12 +48,15 @@ mysten-common.workspace = true enum_dispatch.workspace = true sui-json-rpc-api.workspace = true sui-test-transaction-builder.workspace = true +hex-literal = { version = "0.3.4", optional = true } +test-cluster = { workspace = true, optional = true } [dev-dependencies] sui-types = { workspace = true, features = ["test-utils"] } sui-json-rpc-types = { workspace = true, features = ["test-utils"] } sui-config.workspace = true sui-test-transaction-builder.workspace = true -test-cluster.workspace = true -hex-literal = "0.3.4" maplit = "1.0.2" + +[features] +test-utils = ["hex-literal", "test-cluster"] diff --git a/crates/sui-bridge/src/action_executor.rs b/crates/sui-bridge/src/action_executor.rs index e8d54728e4011..8e0fabafb8191 100644 --- a/crates/sui-bridge/src/action_executor.rs +++ b/crates/sui-bridge/src/action_executor.rs @@ -1513,9 +1513,9 @@ mod tests { let committee = BridgeCommittee::new(authorities).unwrap(); - let agg = Arc::new(ArcSwap::new(Arc::new(BridgeAuthorityAggregator::new( - Arc::new(committee), - )))); + let agg = Arc::new(ArcSwap::new(Arc::new( + BridgeAuthorityAggregator::new_for_testing(Arc::new(committee)), + ))); let metrics = Arc::new(BridgeMetrics::new(®istry)); let sui_token_type_tags = sui_client.get_token_id_map().await.unwrap(); let sui_token_type_tags = Arc::new(ArcSwap::new(Arc::new(sui_token_type_tags))); diff --git a/crates/sui-bridge/src/client/bridge_authority_aggregator.rs b/crates/sui-bridge/src/client/bridge_authority_aggregator.rs index 1de1c41635a7e..c3229fc5e4dcc 100644 --- a/crates/sui-bridge/src/client/bridge_authority_aggregator.rs +++ b/crates/sui-bridge/src/client/bridge_authority_aggregator.rs @@ -7,6 +7,7 @@ use crate::client::bridge_client::BridgeClient; use crate::crypto::BridgeAuthorityPublicKeyBytes; use crate::crypto::BridgeAuthoritySignInfo; use crate::error::{BridgeError, BridgeResult}; +use crate::metrics::BridgeMetrics; use crate::types::BridgeCommitteeValiditySignInfo; use crate::types::{ BridgeAction, BridgeCommittee, CertifiedBridgeAction, VerifiedCertifiedBridgeAction, @@ -24,16 +25,23 @@ use sui_types::committee::StakeUnit; use sui_types::committee::TOTAL_VOTING_POWER; use tracing::{error, info, warn}; -const TOTAL_TIMEOUT_MS: u64 = 5000; -const PREFETCH_TIMEOUT_MS: u64 = 1500; +const TOTAL_TIMEOUT_MS: u64 = 5_000; +const PREFETCH_TIMEOUT_MS: u64 = 1_500; +const RETRY_INTERVAL_MS: u64 = 500; pub struct BridgeAuthorityAggregator { pub committee: Arc, pub clients: Arc>>, + pub metrics: Arc, + pub committee_keys_to_names: Arc>, } impl BridgeAuthorityAggregator { - pub fn new(committee: Arc) -> Self { + pub fn new( + committee: Arc, + metrics: Arc, + committee_keys_to_names: Arc>, + ) -> Self { let clients: BTreeMap> = committee .members() .iter() @@ -62,14 +70,30 @@ impl BridgeAuthorityAggregator { Self { committee, clients: Arc::new(clients), + metrics, + committee_keys_to_names, } } + #[cfg(test)] + pub fn new_for_testing(committee: Arc) -> Self { + Self::new( + committee, + Arc::new(BridgeMetrics::new_for_testing()), + Arc::new(BTreeMap::new()), + ) + } + pub async fn request_committee_signatures( &self, action: BridgeAction, ) -> BridgeResult { - let state = GetSigsState::new(action.approval_threshold(), self.committee.clone()); + let state = GetSigsState::new( + action.approval_threshold(), + self.committee.clone(), + self.metrics.clone(), + self.committee_keys_to_names.clone(), + ); request_sign_bridge_action_into_certification( action, self.committee.clone(), @@ -88,16 +112,25 @@ struct GetSigsState { sigs: BTreeMap, validity_threshold: StakeUnit, committee: Arc, + metrics: Arc, + committee_keys_to_names: Arc>, } impl GetSigsState { - fn new(validity_threshold: StakeUnit, committee: Arc) -> Self { + fn new( + validity_threshold: StakeUnit, + committee: Arc, + metrics: Arc, + committee_keys_to_names: Arc>, + ) -> Self { Self { committee, total_bad_stake: 0, total_ok_stake: 0, sigs: BTreeMap::new(), validity_threshold, + metrics, + committee_keys_to_names, } } @@ -119,7 +152,7 @@ impl GetSigsState { match self.sigs.entry(name.clone()) { Entry::Vacant(e) => { e.insert(signed_action.auth_sig().clone()); - self.total_ok_stake += stake; + self.add_ok_stake(stake, &name); } Entry::Occupied(_e) => { return Err(BridgeError::AuthoritySignatureDuplication(format!( @@ -156,7 +189,23 @@ impl GetSigsState { } } - fn add_bad_stake(&mut self, bad_stake: StakeUnit) { + fn add_ok_stake(&mut self, ok_stake: StakeUnit, name: &BridgeAuthorityPublicKeyBytes) { + if let Some(host_name) = self.committee_keys_to_names.get(name) { + self.metrics + .auth_agg_ok_responses + .with_label_values(&[host_name]) + .inc(); + } + self.total_ok_stake += ok_stake; + } + + fn add_bad_stake(&mut self, bad_stake: StakeUnit, name: &BridgeAuthorityPublicKeyBytes) { + if let Some(host_name) = self.committee_keys_to_names.get(name) { + self.metrics + .auth_agg_bad_responses + .with_label_values(&[host_name]) + .inc(); + } self.total_bad_stake += bad_stake; } @@ -201,8 +250,29 @@ async fn request_sign_bridge_action_into_certification( clients, preference, state, - |_name, client| { - Box::pin(async move { client.request_sign_bridge_action(action.clone()).await }) + |name, client| { + Box::pin(async move { + let start = std::time::Instant::now(); + let timeout = Duration::from_millis(TOTAL_TIMEOUT_MS); + let retry_interval = Duration::from_millis(RETRY_INTERVAL_MS); + while start.elapsed() < timeout { + match client.request_sign_bridge_action(action.clone()).await { + Ok(result) => { + return Ok(result); + } + // retryable errors + Err(BridgeError::TxNotFinalized) => { + warn!("Bridge authority {} observing transaction not yet finalized, retrying in {:?}", name.concise(), retry_interval); + tokio::time::sleep(retry_interval).await; + } + // non-retryable errors + Err(e) => { + return Err(e); + } + } + } + Err(BridgeError::TransientProviderError(format!("Bridge authority {} did not observe finalized transaction after {:?}", name.concise(), timeout))) + }) }, |mut state, name, stake, result| { Box::pin(async move { @@ -223,7 +293,7 @@ async fn request_sign_bridge_action_into_certification( name.concise(), e ); - state.add_bad_stake(stake); + state.add_bad_stake(stake, &name); } } } @@ -233,7 +303,7 @@ async fn request_sign_bridge_action_into_certification( name.concise(), e ); - state.add_bad_stake(stake); + state.add_bad_stake(stake, &name); } }; @@ -245,7 +315,7 @@ async fn request_sign_bridge_action_into_certification( } }) }, - Duration::from_secs(TOTAL_TIMEOUT_MS), + Duration::from_millis(TOTAL_TIMEOUT_MS), ) .await .map_err(|state| { @@ -296,7 +366,7 @@ mod tests { } let committee = BridgeCommittee::new(authorities.clone()).unwrap(); - let agg = BridgeAuthorityAggregator::new(Arc::new(committee)); + let agg = BridgeAuthorityAggregator::new_for_testing(Arc::new(committee)); assert_eq!( agg.clients.keys().cloned().collect::>(), BTreeSet::from_iter(vec![ @@ -310,7 +380,7 @@ mod tests { // authority 2 is blocklisted authorities[2].is_blocklisted = true; let committee = BridgeCommittee::new(authorities.clone()).unwrap(); - let agg = BridgeAuthorityAggregator::new(Arc::new(committee)); + let agg = BridgeAuthorityAggregator::new_for_testing(Arc::new(committee)); assert_eq!( agg.clients.keys().cloned().collect::>(), BTreeSet::from_iter(vec![ @@ -323,7 +393,7 @@ mod tests { // authority 3 has bad url authorities[3].base_url = "".into(); let committee = BridgeCommittee::new(authorities.clone()).unwrap(); - let agg = BridgeAuthorityAggregator::new(Arc::new(committee)); + let agg = BridgeAuthorityAggregator::new_for_testing(Arc::new(committee)); assert_eq!( agg.clients.keys().cloned().collect::>(), BTreeSet::from_iter(vec![ @@ -351,7 +421,7 @@ mod tests { let committee = BridgeCommittee::new(authorities).unwrap(); - let agg = BridgeAuthorityAggregator::new(Arc::new(committee)); + let agg = BridgeAuthorityAggregator::new_for_testing(Arc::new(committee)); let sui_tx_digest = TransactionDigest::random(); let sui_tx_event_index = 0; @@ -468,7 +538,7 @@ mod tests { let authorities_clone = authorities.clone(); let committee = Arc::new(BridgeCommittee::new(authorities_clone).unwrap()); - let agg = BridgeAuthorityAggregator::new(committee.clone()); + let agg = BridgeAuthorityAggregator::new_for_testing(committee.clone()); let sui_tx_digest = TransactionDigest::random(); let sui_tx_event_index = 0; @@ -542,7 +612,13 @@ mod tests { // we should receive all signatures in time, but only aggregate 2 authorities // to achieve quorum - let state = GetSigsState::new(action.approval_threshold(), committee.clone()); + let metrics = Arc::new(BridgeMetrics::new_for_testing()); + let state = GetSigsState::new( + action.approval_threshold(), + committee.clone(), + metrics.clone(), + Arc::new(BTreeMap::new()), + ); let resp = request_sign_bridge_action_into_certification( action.clone(), agg.committee.clone(), @@ -559,7 +635,12 @@ mod tests { // we should receive all but the highest stake signatures in time, but still be able to // achieve quorum with 3 sigs - let state = GetSigsState::new(action.approval_threshold(), committee.clone()); + let state = GetSigsState::new( + action.approval_threshold(), + committee.clone(), + metrics.clone(), + Arc::new(BTreeMap::new()), + ); let resp = request_sign_bridge_action_into_certification( action.clone(), agg.committee.clone(), @@ -576,7 +657,12 @@ mod tests { assert!(!sig_keys.contains(&authorities[8].pubkey_bytes())); // we should have fallen back to arrival order given that we timeout before we reach quorum - let state = GetSigsState::new(action.approval_threshold(), committee.clone()); + let state = GetSigsState::new( + action.approval_threshold(), + committee.clone(), + metrics.clone(), + Arc::new(BTreeMap::new()), + ); let start = std::time::Instant::now(); let resp = request_sign_bridge_action_into_certification( action.clone(), @@ -625,7 +711,7 @@ mod tests { let committee = BridgeCommittee::new(authorities.clone()).unwrap(); - let agg = BridgeAuthorityAggregator::new(Arc::new(committee)); + let agg = BridgeAuthorityAggregator::new_for_testing(Arc::new(committee)); let sui_tx_digest = TransactionDigest::random(); let sui_tx_event_index = 0; @@ -721,40 +807,52 @@ mod tests { let committee = BridgeCommittee::new(authorities.clone()).unwrap(); let threshold = VALIDITY_THRESHOLD; - let mut state = GetSigsState::new(threshold, Arc::new(committee)); + let metrics = Arc::new(BridgeMetrics::new_for_testing()); + let mut state = GetSigsState::new( + threshold, + Arc::new(committee), + metrics.clone(), + Arc::new(BTreeMap::new()), + ); assert!(!state.is_too_many_error()); - + let dummy = authorities[0].pubkey_bytes(); // bad stake: 2500 - state.add_bad_stake(2500); + state.add_bad_stake(2500, &dummy); assert!(!state.is_too_many_error()); // bad stake ; 5000 - state.add_bad_stake(2500); + state.add_bad_stake(2500, &dummy); assert!(!state.is_too_many_error()); // bad stake : 6666 - state.add_bad_stake(1666); + state.add_bad_stake(1666, &dummy); assert!(!state.is_too_many_error()); // bad stake : 6667 - too many errors - state.add_bad_stake(1); + state.add_bad_stake(1, &dummy); assert!(state.is_too_many_error()); // Authority 0 is blocklisted, we lose 2500 stake authorities[0].is_blocklisted = true; let committee = BridgeCommittee::new(authorities.clone()).unwrap(); let threshold = VALIDITY_THRESHOLD; - let mut state = GetSigsState::new(threshold, Arc::new(committee)); + let metrics = Arc::new(BridgeMetrics::new_for_testing()); + let mut state = GetSigsState::new( + threshold, + Arc::new(committee), + metrics.clone(), + Arc::new(BTreeMap::new()), + ); assert!(!state.is_too_many_error()); // bad stake: 2500 + 2500 - state.add_bad_stake(2500); + state.add_bad_stake(2500, &dummy); assert!(!state.is_too_many_error()); // bad stake: 5000 + 2500 - too many errors - state.add_bad_stake(2500); + state.add_bad_stake(2500, &dummy); assert!(state.is_too_many_error()); // Below we test `handle_verified_signed_action` @@ -764,7 +862,12 @@ mod tests { authorities[3].is_blocklisted = true; // blocklist authority 3 let committee = BridgeCommittee::new(authorities.clone()).unwrap(); let threshold = VALIDITY_THRESHOLD; - let mut state = GetSigsState::new(threshold, Arc::new(committee.clone())); + let mut state = GetSigsState::new( + threshold, + Arc::new(committee.clone()), + metrics.clone(), + Arc::new(BTreeMap::new()), + ); let sui_tx_digest = TransactionDigest::random(); let sui_tx_event_index = 0; diff --git a/crates/sui-bridge/src/client/bridge_client.rs b/crates/sui-bridge/src/client/bridge_client.rs index 83c11e73ba995..09ceac2d6d133 100644 --- a/crates/sui-bridge/src/client/bridge_client.rs +++ b/crates/sui-bridge/src/client/bridge_client.rs @@ -207,11 +207,16 @@ impl BridgeClient { .await?; if !resp.status().is_success() { let error_status = format!("{:?}", resp.error_for_status_ref()); - return Err(BridgeError::RestAPIError(format!( - "request_sign_bridge_action failed with status {:?}: {:?}", - error_status, - resp.text().await? - ))); + let resp_text = resp.text().await?; + return match resp_text { + text if text.contains(&format!("{:?}", BridgeError::TxNotFinalized)) => { + Err(BridgeError::TxNotFinalized) + } + _ => Err(BridgeError::RestAPIError(format!( + "request_sign_bridge_action failed with status {:?}: {:?}", + error_status, resp_text + ))), + }; } let signed_bridge_action = resp.json().await?; verify_signed_bridge_action( diff --git a/crates/sui-bridge/src/config.rs b/crates/sui-bridge/src/config.rs index e59576417caac..12464b171c621 100644 --- a/crates/sui-bridge/src/config.rs +++ b/crates/sui-bridge/src/config.rs @@ -17,6 +17,7 @@ use ethers::types::Address as EthAddress; use futures::{future, StreamExt}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; +use std::collections::BTreeMap; use std::collections::HashSet; use std::path::PathBuf; use std::str::FromStr; @@ -119,6 +120,9 @@ pub struct BridgeNodeConfig { pub metrics_key_pair: NetworkKeyPair, #[serde(skip_serializing_if = "Option::is_none")] pub metrics: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub watchdog_config: Option, } pub fn default_ed25519_key_pair() -> NetworkKeyPair { @@ -133,6 +137,13 @@ pub struct MetricsConfig { pub push_url: String, } +#[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(rename_all = "kebab-case")] +pub struct WatchdogConfig { + /// Total supplies to watch on Sui. Mapping from coin name to coin type tag + pub total_supplies: BTreeMap, +} + impl Config for BridgeNodeConfig {} impl BridgeNodeConfig { @@ -197,6 +208,7 @@ impl BridgeNodeConfig { let bridge_server_config = BridgeServerConfig { key: bridge_authority_key, metrics_port: self.metrics_port, + eth_bridge_proxy_address: eth_contracts[0], // the first contract is bridge proxy server_listen_port: self.server_listen_port, sui_client: sui_client.clone(), eth_client: eth_client.clone(), @@ -385,6 +397,7 @@ impl BridgeNodeConfig { pub struct BridgeServerConfig { pub key: BridgeAuthorityKeyPair, pub server_listen_port: u16, + pub eth_bridge_proxy_address: EthAddress, pub metrics_port: u16, pub sui_client: Arc>, pub eth_client: Arc>, diff --git a/crates/sui-bridge/src/e2e_tests/basic.rs b/crates/sui-bridge/src/e2e_tests/basic.rs index abdde652e9c13..21f11273b4e11 100644 --- a/crates/sui-bridge/src/e2e_tests/basic.rs +++ b/crates/sui-bridge/src/e2e_tests/basic.rs @@ -1,49 +1,37 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::abi::{eth_sui_bridge, EthBridgeEvent, EthERC20, EthSuiBridge}; +use crate::abi::{eth_sui_bridge, EthSuiBridge}; use crate::client::bridge_authority_aggregator::BridgeAuthorityAggregator; use crate::crypto::BridgeAuthorityKeyPair; +use crate::e2e_tests::test_utils::TestClusterWrapperBuilder; use crate::e2e_tests::test_utils::{ - get_signatures, send_eth_tx_and_get_tx_receipt, BridgeTestClusterBuilder, + get_signatures, initiate_bridge_erc20_to_sui, initiate_bridge_eth_to_sui, + initiate_bridge_sui_to_eth, send_eth_tx_and_get_tx_receipt, BridgeTestClusterBuilder, }; -use crate::e2e_tests::test_utils::{BridgeTestCluster, TestClusterWrapperBuilder}; use crate::eth_transaction_builder::build_eth_transaction; use crate::events::{ SuiBridgeEvent, SuiToEthTokenBridgeV1, TokenTransferApproved, TokenTransferClaimed, }; -use crate::sui_client::SuiBridgeClient; use crate::sui_transaction_builder::build_add_tokens_on_sui_transaction; -use crate::types::{AddTokensOnEvmAction, BridgeAction, BridgeActionStatus, SuiToEthBridgeAction}; +use crate::types::{AddTokensOnEvmAction, BridgeAction}; use crate::utils::publish_and_register_coins_return_add_coins_on_sui_action; -use crate::utils::EthSigner; use crate::BRIDGE_ENABLE_PROTOCOL_VERSION; -use eth_sui_bridge::EthSuiBridgeEvents; use ethers::prelude::*; use ethers::types::Address as EthAddress; -use move_core_types::ident_str; -use std::collections::{HashMap, HashSet}; +use std::collections::HashSet; use sui_json_rpc_api::BridgeReadApiClient; use sui_types::crypto::get_key_pair; use test_cluster::TestClusterBuilder; use std::path::Path; -use anyhow::anyhow; use std::sync::Arc; -use sui_json_rpc_types::{ - SuiExecutionStatus, SuiTransactionBlockEffectsAPI, SuiTransactionBlockResponse, -}; -use sui_sdk::wallet_context::WalletContext; -use sui_sdk::SuiClient; -use sui_types::base_types::{ObjectRef, SuiAddress}; +use sui_json_rpc_types::{SuiExecutionStatus, SuiTransactionBlockEffectsAPI}; use sui_types::bridge::{ - get_bridge, BridgeChainId, BridgeTokenMetadata, BridgeTrait, BRIDGE_MODULE_NAME, TOKEN_ID_ETH, + get_bridge, BridgeChainId, BridgeTokenMetadata, BridgeTrait, TOKEN_ID_ETH, }; -use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; -use sui_types::transaction::{ObjectArg, TransactionData}; -use sui_types::{TypeTag, BRIDGE_PACKAGE_ID, SUI_BRIDGE_OBJECT_ID}; -use tap::TapFallible; +use sui_types::SUI_BRIDGE_OBJECT_ID; use tracing::info; #[tokio::test(flavor = "multi_thread", worker_threads = 8)] @@ -190,7 +178,6 @@ async fn test_add_new_coins_on_sui_and_eth() { .with_num_validators(3) .build() .await; - let bridge_arg = bridge_test_cluster.get_mut_bridge_arg().await.unwrap(); // Register tokens on Sui @@ -239,7 +226,7 @@ async fn test_add_new_coins_on_sui_and_eth() { .await .expect("Failed to get bridge committee"), ); - let agg = BridgeAuthorityAggregator::new(bridge_committee); + let agg = BridgeAuthorityAggregator::new_for_testing(bridge_committee); let certified_sui_action = agg .request_committee_signatures(sui_action) .await @@ -427,343 +414,3 @@ async fn test_bridge_api_compatibility() { .await .unwrap(); } - -pub(crate) async fn deposit_native_eth_to_sol_contract( - signer: &EthSigner, - contract_address: EthAddress, - sui_recipient_address: SuiAddress, - sui_chain_id: BridgeChainId, - amount: u64, -) -> ContractCall { - let contract = EthSuiBridge::new(contract_address, signer.clone().into()); - let sui_recipient_address = sui_recipient_address.to_vec().into(); - let amount = U256::from(amount) * U256::exp10(18); // 1 ETH - contract - .bridge_eth(sui_recipient_address, sui_chain_id as u8) - .value(amount) -} - -async fn deposit_eth_to_sui_package( - sui_client: &SuiClient, - sui_address: SuiAddress, - wallet_context: &WalletContext, - target_chain: BridgeChainId, - target_address: EthAddress, - token: ObjectRef, - bridge_object_arg: ObjectArg, - sui_token_type_tags: &HashMap, -) -> Result { - let mut builder = ProgrammableTransactionBuilder::new(); - let arg_target_chain = builder.pure(target_chain as u8).unwrap(); - let arg_target_address = builder.pure(target_address.as_bytes()).unwrap(); - let arg_token = builder.obj(ObjectArg::ImmOrOwnedObject(token)).unwrap(); - let arg_bridge = builder.obj(bridge_object_arg).unwrap(); - - builder.programmable_move_call( - BRIDGE_PACKAGE_ID, - BRIDGE_MODULE_NAME.to_owned(), - ident_str!("send_token").to_owned(), - vec![sui_token_type_tags.get(&TOKEN_ID_ETH).unwrap().clone()], - vec![arg_bridge, arg_target_chain, arg_target_address, arg_token], - ); - - let pt = builder.finish(); - let gas_object_ref = wallet_context - .get_one_gas_object_owned_by_address(sui_address) - .await - .unwrap() - .unwrap(); - let tx_data = TransactionData::new_programmable( - sui_address, - vec![gas_object_ref], - pt, - 500_000_000, - sui_client - .governance_api() - .get_reference_gas_price() - .await - .unwrap(), - ); - let tx = wallet_context.sign_transaction(&tx_data); - wallet_context.execute_transaction_may_fail(tx).await -} - -pub async fn initiate_bridge_erc20_to_sui( - bridge_test_cluster: &BridgeTestCluster, - amount_u64: u64, - token_address: EthAddress, - token_id: u8, - nonce: u64, -) -> Result<(), anyhow::Error> { - let (eth_signer, eth_address) = bridge_test_cluster - .get_eth_signer_and_address() - .await - .unwrap(); - - // First, mint ERC20 tokens to the signer - let contract = EthERC20::new(token_address, eth_signer.clone().into()); - let decimal = contract.decimals().await? as usize; - let amount = U256::from(amount_u64) * U256::exp10(decimal); - let sui_amount = amount.as_u64(); - let mint_call = contract.mint(eth_address, amount); - let mint_tx_receipt = send_eth_tx_and_get_tx_receipt(mint_call).await; - assert_eq!(mint_tx_receipt.status.unwrap().as_u64(), 1); - - // Second, set allowance - let allowance_call = contract.approve(bridge_test_cluster.contracts().sui_bridge, amount); - let allowance_tx_receipt = send_eth_tx_and_get_tx_receipt(allowance_call).await; - assert_eq!(allowance_tx_receipt.status.unwrap().as_u64(), 1); - - // Third, deposit to bridge - let sui_recipient_address = bridge_test_cluster.sui_user_address(); - let sui_chain_id = bridge_test_cluster.sui_chain_id(); - let eth_chain_id = bridge_test_cluster.eth_chain_id(); - - info!( - "Depositing ERC20 (token id:{}, token_address: {}) to Solidity contract", - token_id, token_address - ); - let contract = EthSuiBridge::new( - bridge_test_cluster.contracts().sui_bridge, - eth_signer.clone().into(), - ); - let deposit_call = contract.bridge_erc20( - token_id, - amount, - sui_recipient_address.to_vec().into(), - sui_chain_id as u8, - ); - let tx_receipt = send_eth_tx_and_get_tx_receipt(deposit_call).await; - let eth_bridge_event = tx_receipt - .logs - .iter() - .find_map(EthBridgeEvent::try_from_log) - .unwrap(); - let EthBridgeEvent::EthSuiBridgeEvents(EthSuiBridgeEvents::TokensDepositedFilter( - eth_bridge_event, - )) = eth_bridge_event - else { - unreachable!(); - }; - // assert eth log matches - assert_eq!(eth_bridge_event.source_chain_id, eth_chain_id as u8); - assert_eq!(eth_bridge_event.nonce, nonce); - assert_eq!(eth_bridge_event.destination_chain_id, sui_chain_id as u8); - assert_eq!(eth_bridge_event.token_id, token_id); - assert_eq!(eth_bridge_event.sui_adjusted_amount, sui_amount); - assert_eq!(eth_bridge_event.sender_address, eth_address); - assert_eq!( - eth_bridge_event.recipient_address, - sui_recipient_address.to_vec() - ); - info!( - "Deposited ERC20 (token id:{}, token_address: {}) to Solidity contract", - token_id, token_address - ); - - wait_for_transfer_action_status( - bridge_test_cluster.bridge_client(), - eth_chain_id, - nonce, - BridgeActionStatus::Claimed, - ) - .await - .tap_ok(|_| { - info!( - nonce, - token_id, amount_u64, "Eth to Sui bridge transfer claimed" - ); - }) -} - -pub async fn initiate_bridge_eth_to_sui( - bridge_test_cluster: &BridgeTestCluster, - amount: u64, - nonce: u64, -) -> Result<(), anyhow::Error> { - info!("Depositing native Ether to Solidity contract, nonce: {nonce}, amount: {amount}"); - let (eth_signer, eth_address) = bridge_test_cluster - .get_eth_signer_and_address() - .await - .unwrap(); - - let sui_address = bridge_test_cluster.sui_user_address(); - let sui_chain_id = bridge_test_cluster.sui_chain_id(); - let eth_chain_id = bridge_test_cluster.eth_chain_id(); - let token_id = TOKEN_ID_ETH; - - let sui_amount = (U256::from(amount) * U256::exp10(8)).as_u64(); // DP for Ether on Sui - - let eth_tx = deposit_native_eth_to_sol_contract( - ð_signer, - bridge_test_cluster.contracts().sui_bridge, - sui_address, - sui_chain_id, - amount, - ) - .await; - let tx_receipt = send_eth_tx_and_get_tx_receipt(eth_tx).await; - let eth_bridge_event = tx_receipt - .logs - .iter() - .find_map(EthBridgeEvent::try_from_log) - .unwrap(); - let EthBridgeEvent::EthSuiBridgeEvents(EthSuiBridgeEvents::TokensDepositedFilter( - eth_bridge_event, - )) = eth_bridge_event - else { - unreachable!(); - }; - // assert eth log matches - assert_eq!(eth_bridge_event.source_chain_id, eth_chain_id as u8); - assert_eq!(eth_bridge_event.nonce, nonce); - assert_eq!(eth_bridge_event.destination_chain_id, sui_chain_id as u8); - assert_eq!(eth_bridge_event.token_id, token_id); - assert_eq!(eth_bridge_event.sui_adjusted_amount, sui_amount); - assert_eq!(eth_bridge_event.sender_address, eth_address); - assert_eq!(eth_bridge_event.recipient_address, sui_address.to_vec()); - info!( - "Deposited Eth to Solidity contract, block: {:?}", - tx_receipt.block_number - ); - - wait_for_transfer_action_status( - bridge_test_cluster.bridge_client(), - eth_chain_id, - nonce, - BridgeActionStatus::Claimed, - ) - .await - .tap_ok(|_| { - info!("Eth to Sui bridge transfer claimed"); - }) -} - -pub async fn initiate_bridge_sui_to_eth( - bridge_test_cluster: &BridgeTestCluster, - eth_address: EthAddress, - token: ObjectRef, - nonce: u64, - sui_amount: u64, -) -> Result { - let bridge_object_arg = bridge_test_cluster - .bridge_client() - .get_mutable_bridge_object_arg_must_succeed() - .await; - let sui_client = bridge_test_cluster.sui_client(); - let token_types = bridge_test_cluster - .bridge_client() - .get_token_id_map() - .await - .unwrap(); - let sui_address = bridge_test_cluster.sui_user_address(); - - let resp = match deposit_eth_to_sui_package( - sui_client, - sui_address, - bridge_test_cluster.wallet(), - bridge_test_cluster.eth_chain_id(), - eth_address, - token, - bridge_object_arg, - &token_types, - ) - .await - { - Ok(resp) => { - if !resp.status_ok().unwrap() { - return Err(anyhow!("Sui TX error")); - } else { - resp - } - } - Err(e) => return Err(e), - }; - - let sui_events = resp.events.unwrap().data; - let bridge_event = sui_events - .iter() - .filter_map(|e| { - let sui_bridge_event = SuiBridgeEvent::try_from_sui_event(e).unwrap()?; - sui_bridge_event.try_into_bridge_action(e.id.tx_digest, e.id.event_seq as u16) - }) - .find_map(|e| { - if let BridgeAction::SuiToEthBridgeAction(a) = e { - Some(a) - } else { - None - } - }) - .unwrap(); - info!("Deposited Eth to move package"); - assert_eq!(bridge_event.sui_bridge_event.nonce, nonce); - assert_eq!( - bridge_event.sui_bridge_event.sui_chain_id, - bridge_test_cluster.sui_chain_id() - ); - assert_eq!( - bridge_event.sui_bridge_event.eth_chain_id, - bridge_test_cluster.eth_chain_id() - ); - assert_eq!(bridge_event.sui_bridge_event.sui_address, sui_address); - assert_eq!(bridge_event.sui_bridge_event.eth_address, eth_address); - assert_eq!(bridge_event.sui_bridge_event.token_id, TOKEN_ID_ETH); - assert_eq!( - bridge_event.sui_bridge_event.amount_sui_adjusted, - sui_amount - ); - - // Wait for the bridge action to be approved - wait_for_transfer_action_status( - bridge_test_cluster.bridge_client(), - bridge_test_cluster.sui_chain_id(), - nonce, - BridgeActionStatus::Approved, - ) - .await - .unwrap(); - info!("Sui to Eth bridge transfer approved."); - - Ok(bridge_event) -} - -async fn wait_for_transfer_action_status( - sui_bridge_client: &SuiBridgeClient, - chain_id: BridgeChainId, - nonce: u64, - status: BridgeActionStatus, -) -> Result<(), anyhow::Error> { - // Wait for the bridge action to be approved - let now = std::time::Instant::now(); - info!( - "Waiting for onchain status {:?}. chain: {:?}, nonce: {nonce}", - status, chain_id as u8 - ); - loop { - let timer = std::time::Instant::now(); - let res = sui_bridge_client - .get_token_transfer_action_onchain_status_until_success(chain_id as u8, nonce) - .await; - info!( - "get_token_transfer_action_onchain_status_until_success took {:?}, status: {:?}", - timer.elapsed(), - res - ); - - if res == status { - info!( - "detected on chain status {:?}. chain: {:?}, nonce: {nonce}", - status, chain_id as u8 - ); - return Ok(()); - } - if now.elapsed().as_secs() > 60 { - return Err(anyhow!( - "Timeout waiting for token transfer action to be {:?}. chain_id: {chain_id:?}, nonce: {nonce}. Time elapsed: {:?}", - status, - now.elapsed(), - )); - } - tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - } -} diff --git a/crates/sui-bridge/src/e2e_tests/complex.rs b/crates/sui-bridge/src/e2e_tests/complex.rs index d822074146ae6..a52c3649d2f1c 100644 --- a/crates/sui-bridge/src/e2e_tests/complex.rs +++ b/crates/sui-bridge/src/e2e_tests/complex.rs @@ -2,9 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 use crate::client::bridge_authority_aggregator::BridgeAuthorityAggregator; -use crate::e2e_tests::basic::initiate_bridge_eth_to_sui; -use crate::e2e_tests::basic::initiate_bridge_sui_to_eth; -use crate::e2e_tests::test_utils::BridgeTestClusterBuilder; + +use crate::e2e_tests::test_utils::{ + initiate_bridge_eth_to_sui, initiate_bridge_sui_to_eth, BridgeTestClusterBuilder, +}; use crate::sui_transaction_builder::build_sui_transaction; use crate::types::{BridgeAction, EmergencyAction}; use crate::types::{BridgeActionStatus, EmergencyActionType}; @@ -70,7 +71,7 @@ async fn test_sui_bridge_paused() { // get pause bridge signatures from committee let bridge_committee = Arc::new(bridge_client.get_bridge_committee().await.unwrap()); - let agg = BridgeAuthorityAggregator::new(bridge_committee); + let agg = BridgeAuthorityAggregator::new_for_testing(bridge_committee); let certified_action = agg .request_committee_signatures(pause_action) .await diff --git a/crates/sui-bridge/src/e2e_tests/mod.rs b/crates/sui-bridge/src/e2e_tests/mod.rs index 26ee8f143271a..9b88cebf9d5d3 100644 --- a/crates/sui-bridge/src/e2e_tests/mod.rs +++ b/crates/sui-bridge/src/e2e_tests/mod.rs @@ -1,6 +1,8 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +#[cfg(test)] mod basic; +#[cfg(test)] mod complex; pub mod test_utils; diff --git a/crates/sui-bridge/src/e2e_tests/test_utils.rs b/crates/sui-bridge/src/e2e_tests/test_utils.rs index 187f28d5eb9b0..6a95435f26011 100644 --- a/crates/sui-bridge/src/e2e_tests/test_utils.rs +++ b/crates/sui-bridge/src/e2e_tests/test_utils.rs @@ -1,8 +1,8 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::abi::EthBridgeCommittee; use crate::abi::EthBridgeConfig; +use crate::abi::{EthBridgeCommittee, EthBridgeEvent, EthERC20, EthSuiBridge, EthSuiBridgeEvents}; use crate::config::default_ed25519_key_pair; use crate::crypto::BridgeAuthorityKeyPair; use crate::crypto::BridgeAuthorityPublicKeyBytes; @@ -12,10 +12,10 @@ use crate::metrics::BridgeMetrics; use crate::server::BridgeNodePublicMetadata; use crate::sui_transaction_builder::build_add_tokens_on_sui_transaction; use crate::sui_transaction_builder::build_committee_register_transaction; -use crate::types::BridgeAction; use crate::types::BridgeCommitteeValiditySignInfo; use crate::types::CertifiedBridgeAction; use crate::types::VerifiedCertifiedBridgeAction; +use crate::types::{BridgeAction, BridgeActionStatus, SuiToEthBridgeAction}; use crate::utils::get_eth_signer_client; use crate::utils::publish_and_register_coins_return_add_coins_on_sui_action; use crate::utils::wait_for_server_to_be_up; @@ -23,13 +23,13 @@ use crate::utils::EthSigner; use ethers::types::Address as EthAddress; use futures::future::join_all; use futures::Future; -use move_core_types::language_storage::StructTag; +use move_core_types::language_storage::{StructTag, TypeTag}; use prometheus::Registry; use rand::rngs::SmallRng; use rand::{Rng, SeedableRng}; use serde::{Deserialize, Serialize}; -use std::collections::BTreeMap; use std::collections::HashSet; +use std::collections::{BTreeMap, HashMap}; use std::fs::File; use std::fs::{self, DirBuilder}; use std::io::{Read, Write}; @@ -48,12 +48,12 @@ use sui_json_rpc_types::SuiTransactionBlockResponseQuery; use sui_json_rpc_types::TransactionFilter; use sui_sdk::wallet_context::WalletContext; use sui_test_transaction_builder::TestTransactionBuilder; -use sui_types::base_types::ObjectID; -use sui_types::bridge::get_bridge; +use sui_types::base_types::{ObjectID, ObjectRef}; use sui_types::bridge::get_bridge_obj_initial_shared_version; use sui_types::bridge::BridgeChainId; use sui_types::bridge::BridgeSummary; use sui_types::bridge::BridgeTrait; +use sui_types::bridge::{get_bridge, BRIDGE_MODULE_NAME}; use sui_types::bridge::{TOKEN_ID_BTC, TOKEN_ID_ETH, TOKEN_ID_USDC, TOKEN_ID_USDT}; use sui_types::committee::TOTAL_VOTING_POWER; use sui_types::crypto::get_key_pair; @@ -61,7 +61,7 @@ use sui_types::crypto::ToFromBytes; use sui_types::digests::TransactionDigest; use sui_types::object::Object; use sui_types::transaction::{ObjectArg, Transaction, TransactionData}; -use sui_types::SUI_BRIDGE_OBJECT_ID; +use sui_types::{BRIDGE_PACKAGE_ID, SUI_BRIDGE_OBJECT_ID}; use tokio::join; use tokio::task::JoinHandle; use tokio::time::Instant; @@ -73,13 +73,17 @@ use crate::config::{BridgeNodeConfig, EthConfig, SuiConfig}; use crate::node::run_bridge_node; use crate::sui_client::SuiBridgeClient; use crate::BRIDGE_ENABLE_PROTOCOL_VERSION; +use anyhow::anyhow; use ethers::prelude::*; +use move_core_types::ident_str; use std::process::Child; use sui_config::local_ip_utils::get_available_port; use sui_sdk::SuiClient; use sui_types::base_types::SuiAddress; use sui_types::crypto::EncodeDecodeBase64; use sui_types::crypto::KeypairTraits; +use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; +use tap::TapFallible; use tempfile::tempdir; use test_cluster::TestCluster; use test_cluster::TestClusterBuilder; @@ -294,7 +298,7 @@ impl BridgeTestCluster { self.eth_chain_id } - pub(crate) fn eth_env(&self) -> &EthBridgeEnvironment { + pub fn eth_env(&self) -> &EthBridgeEnvironment { &self.eth_environment } @@ -716,7 +720,7 @@ impl EthBridgeEnvironment { self.contracts.as_ref().unwrap() } - pub(crate) fn get_bridge_config( + pub fn get_bridge_config( &self, ) -> EthBridgeConfig> { let provider = Arc::new( @@ -727,7 +731,7 @@ impl EthBridgeEnvironment { EthBridgeConfig::new(self.contracts().bridge_config, provider.clone()) } - pub(crate) async fn get_supported_token(&self, token_id: u8) -> (EthAddress, u8, u64) { + pub async fn get_supported_token(&self, token_id: u8) -> (EthAddress, u8, u64) { let config = self.get_bridge_config(); let token_address = config.token_address_of(token_id).call().await.unwrap(); let token_sui_decimal = config.token_sui_decimal_of(token_id).call().await.unwrap(); @@ -804,6 +808,7 @@ pub(crate) async fn start_bridge_cluster( }, metrics_key_pair: default_ed25519_key_pair(), metrics: None, + watchdog_config: None, }; // Spawn bridge node in memory handles.push( @@ -819,7 +824,7 @@ pub(crate) async fn start_bridge_cluster( handles } -pub(crate) async fn get_signatures( +pub async fn get_signatures( sui_bridge_client: &SuiBridgeClient, nonce: u64, sui_chain_id: u8, @@ -1210,3 +1215,343 @@ async fn trigger_reconfiguration_if_not_yet_and_assert_bridge_committee_initiali bridge.committee().members.contents.len() ); } + +pub async fn initiate_bridge_eth_to_sui( + bridge_test_cluster: &BridgeTestCluster, + amount: u64, + nonce: u64, +) -> Result<(), anyhow::Error> { + info!("Depositing native Ether to Solidity contract, nonce: {nonce}, amount: {amount}"); + let (eth_signer, eth_address) = bridge_test_cluster + .get_eth_signer_and_address() + .await + .unwrap(); + + let sui_address = bridge_test_cluster.sui_user_address(); + let sui_chain_id = bridge_test_cluster.sui_chain_id(); + let eth_chain_id = bridge_test_cluster.eth_chain_id(); + let token_id = TOKEN_ID_ETH; + + let sui_amount = (U256::from(amount) * U256::exp10(8)).as_u64(); // DP for Ether on Sui + + let eth_tx = deposit_native_eth_to_sol_contract( + ð_signer, + bridge_test_cluster.contracts().sui_bridge, + sui_address, + sui_chain_id, + amount, + ) + .await; + let tx_receipt = send_eth_tx_and_get_tx_receipt(eth_tx).await; + let eth_bridge_event = tx_receipt + .logs + .iter() + .find_map(EthBridgeEvent::try_from_log) + .unwrap(); + let EthBridgeEvent::EthSuiBridgeEvents(EthSuiBridgeEvents::TokensDepositedFilter( + eth_bridge_event, + )) = eth_bridge_event + else { + unreachable!(); + }; + // assert eth log matches + assert_eq!(eth_bridge_event.source_chain_id, eth_chain_id as u8); + assert_eq!(eth_bridge_event.nonce, nonce); + assert_eq!(eth_bridge_event.destination_chain_id, sui_chain_id as u8); + assert_eq!(eth_bridge_event.token_id, token_id); + assert_eq!(eth_bridge_event.sui_adjusted_amount, sui_amount); + assert_eq!(eth_bridge_event.sender_address, eth_address); + assert_eq!(eth_bridge_event.recipient_address, sui_address.to_vec()); + info!( + "Deposited Eth to Solidity contract, block: {:?}", + tx_receipt.block_number + ); + + wait_for_transfer_action_status( + bridge_test_cluster.bridge_client(), + eth_chain_id, + nonce, + BridgeActionStatus::Claimed, + ) + .await + .tap_ok(|_| { + info!("Eth to Sui bridge transfer claimed"); + }) +} + +pub async fn initiate_bridge_sui_to_eth( + bridge_test_cluster: &BridgeTestCluster, + eth_address: EthAddress, + token: ObjectRef, + nonce: u64, + sui_amount: u64, +) -> Result { + let bridge_object_arg = bridge_test_cluster + .bridge_client() + .get_mutable_bridge_object_arg_must_succeed() + .await; + let sui_client = bridge_test_cluster.sui_client(); + let token_types = bridge_test_cluster + .bridge_client() + .get_token_id_map() + .await + .unwrap(); + let sui_address = bridge_test_cluster.sui_user_address(); + + let resp = match deposit_eth_to_sui_package( + sui_client, + sui_address, + bridge_test_cluster.wallet(), + bridge_test_cluster.eth_chain_id(), + eth_address, + token, + bridge_object_arg, + &token_types, + ) + .await + { + Ok(resp) => { + if !resp.status_ok().unwrap() { + return Err(anyhow!("Sui TX error")); + } else { + resp + } + } + Err(e) => return Err(e), + }; + + let sui_events = resp.events.unwrap().data; + let bridge_event = sui_events + .iter() + .filter_map(|e| { + let sui_bridge_event = SuiBridgeEvent::try_from_sui_event(e).unwrap()?; + sui_bridge_event.try_into_bridge_action(e.id.tx_digest, e.id.event_seq as u16) + }) + .find_map(|e| { + if let BridgeAction::SuiToEthBridgeAction(a) = e { + Some(a) + } else { + None + } + }) + .unwrap(); + info!("Deposited Eth to move package"); + assert_eq!(bridge_event.sui_bridge_event.nonce, nonce); + assert_eq!( + bridge_event.sui_bridge_event.sui_chain_id, + bridge_test_cluster.sui_chain_id() + ); + assert_eq!( + bridge_event.sui_bridge_event.eth_chain_id, + bridge_test_cluster.eth_chain_id() + ); + assert_eq!(bridge_event.sui_bridge_event.sui_address, sui_address); + assert_eq!(bridge_event.sui_bridge_event.eth_address, eth_address); + assert_eq!(bridge_event.sui_bridge_event.token_id, TOKEN_ID_ETH); + assert_eq!( + bridge_event.sui_bridge_event.amount_sui_adjusted, + sui_amount + ); + + // Wait for the bridge action to be approved + wait_for_transfer_action_status( + bridge_test_cluster.bridge_client(), + bridge_test_cluster.sui_chain_id(), + nonce, + BridgeActionStatus::Approved, + ) + .await + .unwrap(); + info!("Sui to Eth bridge transfer approved."); + + Ok(bridge_event) +} + +async fn wait_for_transfer_action_status( + sui_bridge_client: &SuiBridgeClient, + chain_id: BridgeChainId, + nonce: u64, + status: BridgeActionStatus, +) -> Result<(), anyhow::Error> { + // Wait for the bridge action to be approved + let now = std::time::Instant::now(); + info!( + "Waiting for onchain status {:?}. chain: {:?}, nonce: {nonce}", + status, chain_id as u8 + ); + loop { + let timer = std::time::Instant::now(); + let res = sui_bridge_client + .get_token_transfer_action_onchain_status_until_success(chain_id as u8, nonce) + .await; + info!( + "get_token_transfer_action_onchain_status_until_success took {:?}, status: {:?}", + timer.elapsed(), + res + ); + + if res == status { + info!( + "detected on chain status {:?}. chain: {:?}, nonce: {nonce}", + status, chain_id as u8 + ); + return Ok(()); + } + if now.elapsed().as_secs() > 60 { + return Err(anyhow!( + "Timeout waiting for token transfer action to be {:?}. chain_id: {chain_id:?}, nonce: {nonce}. Time elapsed: {:?}", + status, + now.elapsed(), + )); + } + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + } +} + +async fn deposit_eth_to_sui_package( + sui_client: &SuiClient, + sui_address: SuiAddress, + wallet_context: &WalletContext, + target_chain: BridgeChainId, + target_address: EthAddress, + token: ObjectRef, + bridge_object_arg: ObjectArg, + sui_token_type_tags: &HashMap, +) -> Result { + let mut builder = ProgrammableTransactionBuilder::new(); + let arg_target_chain = builder.pure(target_chain as u8).unwrap(); + let arg_target_address = builder.pure(target_address.as_bytes()).unwrap(); + let arg_token = builder.obj(ObjectArg::ImmOrOwnedObject(token)).unwrap(); + let arg_bridge = builder.obj(bridge_object_arg).unwrap(); + + builder.programmable_move_call( + BRIDGE_PACKAGE_ID, + BRIDGE_MODULE_NAME.to_owned(), + ident_str!("send_token").to_owned(), + vec![sui_token_type_tags.get(&TOKEN_ID_ETH).unwrap().clone()], + vec![arg_bridge, arg_target_chain, arg_target_address, arg_token], + ); + + let pt = builder.finish(); + let gas_object_ref = wallet_context + .get_one_gas_object_owned_by_address(sui_address) + .await + .unwrap() + .unwrap(); + let tx_data = TransactionData::new_programmable( + sui_address, + vec![gas_object_ref], + pt, + 500_000_000, + sui_client + .governance_api() + .get_reference_gas_price() + .await + .unwrap(), + ); + let tx = wallet_context.sign_transaction(&tx_data); + wallet_context.execute_transaction_may_fail(tx).await +} + +pub async fn initiate_bridge_erc20_to_sui( + bridge_test_cluster: &BridgeTestCluster, + amount_u64: u64, + token_address: EthAddress, + token_id: u8, + nonce: u64, +) -> Result<(), anyhow::Error> { + let (eth_signer, eth_address) = bridge_test_cluster + .get_eth_signer_and_address() + .await + .unwrap(); + + // First, mint ERC20 tokens to the signer + let contract = EthERC20::new(token_address, eth_signer.clone().into()); + let decimal = contract.decimals().await? as usize; + let amount = U256::from(amount_u64) * U256::exp10(decimal); + let sui_amount = amount.as_u64(); + let mint_call = contract.mint(eth_address, amount); + let mint_tx_receipt = send_eth_tx_and_get_tx_receipt(mint_call).await; + assert_eq!(mint_tx_receipt.status.unwrap().as_u64(), 1); + + // Second, set allowance + let allowance_call = contract.approve(bridge_test_cluster.contracts().sui_bridge, amount); + let allowance_tx_receipt = send_eth_tx_and_get_tx_receipt(allowance_call).await; + assert_eq!(allowance_tx_receipt.status.unwrap().as_u64(), 1); + + // Third, deposit to bridge + let sui_recipient_address = bridge_test_cluster.sui_user_address(); + let sui_chain_id = bridge_test_cluster.sui_chain_id(); + let eth_chain_id = bridge_test_cluster.eth_chain_id(); + + info!( + "Depositing ERC20 (token id:{}, token_address: {}) to Solidity contract", + token_id, token_address + ); + let contract = EthSuiBridge::new( + bridge_test_cluster.contracts().sui_bridge, + eth_signer.clone().into(), + ); + let deposit_call = contract.bridge_erc20( + token_id, + amount, + sui_recipient_address.to_vec().into(), + sui_chain_id as u8, + ); + let tx_receipt = send_eth_tx_and_get_tx_receipt(deposit_call).await; + let eth_bridge_event = tx_receipt + .logs + .iter() + .find_map(EthBridgeEvent::try_from_log) + .unwrap(); + let EthBridgeEvent::EthSuiBridgeEvents(EthSuiBridgeEvents::TokensDepositedFilter( + eth_bridge_event, + )) = eth_bridge_event + else { + unreachable!(); + }; + // assert eth log matches + assert_eq!(eth_bridge_event.source_chain_id, eth_chain_id as u8); + assert_eq!(eth_bridge_event.nonce, nonce); + assert_eq!(eth_bridge_event.destination_chain_id, sui_chain_id as u8); + assert_eq!(eth_bridge_event.token_id, token_id); + assert_eq!(eth_bridge_event.sui_adjusted_amount, sui_amount); + assert_eq!(eth_bridge_event.sender_address, eth_address); + assert_eq!( + eth_bridge_event.recipient_address, + sui_recipient_address.to_vec() + ); + info!( + "Deposited ERC20 (token id:{}, token_address: {}) to Solidity contract", + token_id, token_address + ); + + wait_for_transfer_action_status( + bridge_test_cluster.bridge_client(), + eth_chain_id, + nonce, + BridgeActionStatus::Claimed, + ) + .await + .tap_ok(|_| { + info!( + nonce, + token_id, amount_u64, "Eth to Sui bridge transfer claimed" + ); + }) +} + +pub(crate) async fn deposit_native_eth_to_sol_contract( + signer: &EthSigner, + contract_address: EthAddress, + sui_recipient_address: SuiAddress, + sui_chain_id: BridgeChainId, + amount: u64, +) -> ContractCall { + let contract = EthSuiBridge::new(contract_address, signer.clone().into()); + let sui_recipient_address = sui_recipient_address.to_vec().into(); + let amount = U256::from(amount) * U256::exp10(18); // 1 ETH + contract + .bridge_eth(sui_recipient_address, sui_chain_id as u8) + .value(amount) +} diff --git a/crates/sui-bridge/src/eth_client.rs b/crates/sui-bridge/src/eth_client.rs index 30f8e6d92e536..215ea5428b01b 100644 --- a/crates/sui-bridge/src/eth_client.rs +++ b/crates/sui-bridge/src/eth_client.rs @@ -36,6 +36,10 @@ impl EthClient { self_.describe().await?; Ok(self_) } + + pub fn provider(&self) -> Arc> { + Arc::new(self.provider.clone()) + } } #[cfg(test)] diff --git a/crates/sui-bridge/src/lib.rs b/crates/sui-bridge/src/lib.rs index 0a138372c50fe..80a582e994c69 100644 --- a/crates/sui-bridge/src/lib.rs +++ b/crates/sui-bridge/src/lib.rs @@ -19,24 +19,25 @@ pub mod node; pub mod orchestrator; pub mod server; pub mod storage; +pub mod sui_bridge_watchdog; pub mod sui_client; pub mod sui_syncer; pub mod sui_transaction_builder; pub mod types; pub mod utils; -#[cfg(test)] +#[cfg(any(feature = "test-utils", test))] pub(crate) mod eth_mock_provider; #[cfg(test)] pub(crate) mod sui_mock_client; -#[cfg(test)] +#[cfg(any(feature = "test-utils", test))] pub mod test_utils; pub const BRIDGE_ENABLE_PROTOCOL_VERSION: u64 = 45; -#[cfg(test)] +#[cfg(any(feature = "test-utils", test))] pub mod e2e_tests; #[macro_export] diff --git a/crates/sui-bridge/src/metrics.rs b/crates/sui-bridge/src/metrics.rs index 6d1fdda6e2a7c..c147787dc11c6 100644 --- a/crates/sui-bridge/src/metrics.rs +++ b/crates/sui-bridge/src/metrics.rs @@ -114,6 +114,9 @@ pub struct BridgeMetrics { pub(crate) sui_rpc_errors: IntCounterVec, pub(crate) observed_governance_actions: IntCounterVec, pub(crate) current_bridge_voting_rights: IntGaugeVec, + + pub(crate) auth_agg_ok_responses: IntCounterVec, + pub(crate) auth_agg_bad_responses: IntCounterVec, } impl BridgeMetrics { @@ -325,6 +328,20 @@ impl BridgeMetrics { registry ) .unwrap(), + auth_agg_ok_responses: register_int_counter_vec_with_registry!( + "bridge_auth_agg_ok_responses", + "Total number of ok respones from auth agg", + &["authority"], + registry, + ) + .unwrap(), + auth_agg_bad_responses: register_int_counter_vec_with_registry!( + "bridge_auth_agg_bad_responses", + "Total number of bad respones from auth agg", + &["authority"], + registry, + ) + .unwrap(), } } diff --git a/crates/sui-bridge/src/monitor.rs b/crates/sui-bridge/src/monitor.rs index af169737f81d5..af837c394acff 100644 --- a/crates/sui-bridge/src/monitor.rs +++ b/crates/sui-bridge/src/monitor.rs @@ -162,9 +162,12 @@ where Duration::from_secs(10), ) .await; - bridge_auth_agg.store(Arc::new(BridgeAuthorityAggregator::new(Arc::new( - new_committee, - )))); + let committee_names = bridge_auth_agg.load().committee_keys_to_names.clone(); + bridge_auth_agg.store(Arc::new(BridgeAuthorityAggregator::new( + Arc::new(new_committee), + bridge_metrics.clone(), + committee_names, + ))); info!("Committee updated with CommitteeMemberUrlUpdateEvent"); } @@ -180,9 +183,12 @@ where Duration::from_secs(10), ) .await; - bridge_auth_agg.store(Arc::new(BridgeAuthorityAggregator::new(Arc::new( - new_committee, - )))); + let committee_names = bridge_auth_agg.load().committee_keys_to_names.clone(); + bridge_auth_agg.store(Arc::new(BridgeAuthorityAggregator::new( + Arc::new(new_committee), + bridge_metrics.clone(), + committee_names, + ))); info!("Committee updated with BlocklistValidatorEvent"); } @@ -926,9 +932,9 @@ mod tests { bridge_metrics, ) = setup(); let old_committee = BridgeCommittee::new(authorities.clone()).unwrap(); - let agg = Arc::new(ArcSwap::new(Arc::new(BridgeAuthorityAggregator::new( - Arc::new(old_committee), - )))); + let agg = Arc::new(ArcSwap::new(Arc::new( + BridgeAuthorityAggregator::new_for_testing(Arc::new(old_committee)), + ))); let sui_token_type_tags = Arc::new(ArcSwap::from(Arc::new(HashMap::new()))); let _handle = tokio::task::spawn( BridgeMonitor::new( @@ -985,9 +991,9 @@ mod tests { bridge_metrics, ) = setup(); let old_committee = BridgeCommittee::new(authorities.clone()).unwrap(); - let agg = Arc::new(ArcSwap::new(Arc::new(BridgeAuthorityAggregator::new( - Arc::new(old_committee), - )))); + let agg = Arc::new(ArcSwap::new(Arc::new( + BridgeAuthorityAggregator::new_for_testing(Arc::new(old_committee)), + ))); let sui_token_type_tags = Arc::new(ArcSwap::from(Arc::new(HashMap::new()))); let _handle = tokio::task::spawn( BridgeMonitor::new( @@ -1045,9 +1051,9 @@ mod tests { frozen: !*bridge_pause_tx.borrow(), // toggle the bridge pause status }; let committee = BridgeCommittee::new(authorities.clone()).unwrap(); - let agg = Arc::new(ArcSwap::new(Arc::new(BridgeAuthorityAggregator::new( - Arc::new(committee), - )))); + let agg = Arc::new(ArcSwap::new(Arc::new( + BridgeAuthorityAggregator::new_for_testing(Arc::new(committee)), + ))); let sui_token_type_tags = Arc::new(ArcSwap::from(Arc::new(HashMap::new()))); let _handle = tokio::task::spawn( BridgeMonitor::new( @@ -1095,9 +1101,9 @@ mod tests { notional_value: 100000000, }; let committee = BridgeCommittee::new(authorities.clone()).unwrap(); - let agg = Arc::new(ArcSwap::new(Arc::new(BridgeAuthorityAggregator::new( - Arc::new(committee), - )))); + let agg = Arc::new(ArcSwap::new(Arc::new( + BridgeAuthorityAggregator::new_for_testing(Arc::new(committee)), + ))); let sui_token_type_tags = Arc::new(ArcSwap::from(Arc::new(HashMap::new()))); let sui_token_type_tags_clone = sui_token_type_tags.clone(); let _handle = tokio::task::spawn( diff --git a/crates/sui-bridge/src/node.rs b/crates/sui-bridge/src/node.rs index 97b0b2caf22e9..671f2b358f3c0 100644 --- a/crates/sui-bridge/src/node.rs +++ b/crates/sui-bridge/src/node.rs @@ -1,8 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use crate::config::WatchdogConfig; +use crate::crypto::BridgeAuthorityPublicKeyBytes; +use crate::metered_eth_provider::MeteredEthHttpProvier; +use crate::sui_bridge_watchdog::eth_bridge_status::EthBridgeStatus; +use crate::sui_bridge_watchdog::eth_vault_balance::EthVaultBalance; +use crate::sui_bridge_watchdog::metrics::WatchdogMetrics; +use crate::sui_bridge_watchdog::sui_bridge_status::SuiBridgeStatus; +use crate::sui_bridge_watchdog::total_supplies::TotalSupplies; +use crate::sui_bridge_watchdog::{BridgeWatchDog, Observable}; +use crate::sui_client::SuiBridgeClient; use crate::types::BridgeCommittee; -use crate::utils::get_committee_voting_power_by_name; +use crate::utils::{ + get_committee_voting_power_by_name, get_eth_contract_addresses, get_validator_names_by_pub_keys, +}; use crate::{ action_executor::BridgeActionExecutor, client::bridge_authority_aggregator::BridgeAuthorityAggregator, @@ -17,8 +29,10 @@ use crate::{ sui_syncer::SuiSyncer, }; use arc_swap::ArcSwap; +use ethers::providers::Provider; use ethers::types::Address as EthAddress; use mysten_metrics::spawn_logged_monitored_task; +use std::collections::BTreeMap; use std::{ collections::HashMap, net::{IpAddr, Ipv4Addr, SocketAddr}, @@ -43,6 +57,7 @@ pub async fn run_bridge_node( ) -> anyhow::Result> { init_all_struct_tags(); let metrics = Arc::new(BridgeMetrics::new(&prometheus_registry)); + let watchdog_config = config.watchdog_config.clone(); let (server_config, client_config) = config.validate(metrics.clone()).await?; let sui_chain_identifier = server_config .sui_client @@ -71,12 +86,19 @@ pub async fn run_bridge_node( .await .expect("Failed to get committee"), ); - // Start Client - let _handles = if let Some(client_config) = client_config { - start_client_components(client_config, committee.clone(), metrics.clone()).await - } else { - Ok(vec![]) - }?; + let mut handles = vec![]; + + // Start watchdog + let eth_provider = server_config.eth_client.provider(); + let eth_bridge_proxy_address = server_config.eth_bridge_proxy_address; + let sui_client = server_config.sui_client.clone(); + handles.push(spawn_logged_monitored_task!(start_watchdog( + watchdog_config, + &prometheus_registry, + eth_provider, + eth_bridge_proxy_address, + sui_client + ))); // Update voting right metrics // Before reconfiguration happens we only set it once when the node starts @@ -86,7 +108,22 @@ pub async fn run_bridge_node( .governance_api() .get_latest_sui_system_state() .await?; - let committee_name_mapping = get_committee_voting_power_by_name(&committee, sui_system).await; + + // Start Client + if let Some(client_config) = client_config { + let committee_keys_to_names = + Arc::new(get_validator_names_by_pub_keys(&committee, &sui_system).await); + let client_components = start_client_components( + client_config, + committee.clone(), + committee_keys_to_names, + metrics.clone(), + ) + .await?; + handles.extend(client_components); + } + + let committee_name_mapping = get_committee_voting_power_by_name(&committee, &sui_system).await; for (name, voting_power) in committee_name_mapping.into_iter() { metrics .current_bridge_voting_rights @@ -113,10 +150,61 @@ pub async fn run_bridge_node( )) } +async fn start_watchdog( + watchdog_config: Option, + registry: &prometheus::Registry, + eth_provider: Arc>, + eth_bridge_proxy_address: EthAddress, + sui_client: Arc, +) { + let watchdog_metrics = WatchdogMetrics::new(registry); + let (_committee_address, _limiter_address, vault_address, _config_address, weth_address) = + get_eth_contract_addresses(eth_bridge_proxy_address, ð_provider) + .await + .unwrap_or_else(|e| panic!("get_eth_contract_addresses should not fail: {}", e)); + + let eth_vault_balance = EthVaultBalance::new( + eth_provider.clone(), + vault_address, + weth_address, + watchdog_metrics.eth_vault_balance.clone(), + ); + + let eth_bridge_status = EthBridgeStatus::new( + eth_provider, + eth_bridge_proxy_address, + watchdog_metrics.eth_bridge_paused.clone(), + ); + + let sui_bridge_status = SuiBridgeStatus::new( + sui_client.clone(), + watchdog_metrics.sui_bridge_paused.clone(), + ); + + let mut observables: Vec> = vec![ + Box::new(eth_vault_balance), + Box::new(eth_bridge_status), + Box::new(sui_bridge_status), + ]; + if let Some(watchdog_config) = watchdog_config { + if !watchdog_config.total_supplies.is_empty() { + let total_supplies = TotalSupplies::new( + Arc::new(sui_client.sui_client().clone()), + watchdog_config.total_supplies, + watchdog_metrics.total_supplies.clone(), + ); + observables.push(Box::new(total_supplies)); + } + } + + BridgeWatchDog::new(observables).run().await +} + // TODO: is there a way to clean up the overrides after it's stored in DB? async fn start_client_components( client_config: BridgeClientConfig, committee: Arc, + committee_keys_to_names: Arc>, metrics: Arc, ) -> anyhow::Result>> { let store: std::sync::Arc = @@ -154,6 +242,8 @@ async fn start_client_components( let bridge_auth_agg = Arc::new(ArcSwap::from(Arc::new(BridgeAuthorityAggregator::new( committee, + metrics.clone(), + committee_keys_to_names, )))); // TODO: should we use one query instead of two? let sui_token_type_tags = sui_client.get_token_id_map().await.unwrap(); @@ -488,6 +578,7 @@ mod tests { db_path: None, metrics_key_pair: default_ed25519_key_pair(), metrics: None, + watchdog_config: None, }; // Spawn bridge node in memory let _handle = run_bridge_node( @@ -554,6 +645,7 @@ mod tests { db_path: Some(db_path), metrics_key_pair: default_ed25519_key_pair(), metrics: None, + watchdog_config: None, }; // Spawn bridge node in memory let _handle = run_bridge_node( @@ -631,6 +723,7 @@ mod tests { db_path: Some(db_path), metrics_key_pair: default_ed25519_key_pair(), metrics: None, + watchdog_config: None, }; // Spawn bridge node in memory let _handle = run_bridge_node( diff --git a/crates/sui-bridge/src/server/mod.rs b/crates/sui-bridge/src/server/mod.rs index 7986f3483692b..8b68513e732be 100644 --- a/crates/sui-bridge/src/server/mod.rs +++ b/crates/sui-bridge/src/server/mod.rs @@ -33,7 +33,7 @@ use tracing::{info, instrument}; pub mod governance_verifier; pub mod handler; -#[cfg(test)] +#[cfg(any(feature = "test-utils", test))] pub(crate) mod mock_handler; pub const APPLICATION_JSON: &str = "application/json"; diff --git a/crates/sui-bridge/src/sui_bridge_watchdog/eth_bridge_status.rs b/crates/sui-bridge/src/sui_bridge_watchdog/eth_bridge_status.rs new file mode 100644 index 0000000000000..2df78d137b62c --- /dev/null +++ b/crates/sui-bridge/src/sui_bridge_watchdog/eth_bridge_status.rs @@ -0,0 +1,58 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//! The EthBridgeStatus observable monitors whether the Eth Bridge is paused. + +use crate::abi::EthSuiBridge; +use crate::metered_eth_provider::MeteredEthHttpProvier; +use crate::sui_bridge_watchdog::Observable; +use async_trait::async_trait; +use ethers::providers::Provider; +use ethers::types::Address as EthAddress; +use prometheus::IntGauge; +use std::sync::Arc; +use tokio::time::Duration; +use tracing::{error, info}; + +pub struct EthBridgeStatus { + bridge_contract: EthSuiBridge>, + metric: IntGauge, +} + +impl EthBridgeStatus { + pub fn new( + provider: Arc>, + bridge_address: EthAddress, + metric: IntGauge, + ) -> Self { + let bridge_contract = EthSuiBridge::new(bridge_address, provider.clone()); + Self { + bridge_contract, + metric, + } + } +} + +#[async_trait] +impl Observable for EthBridgeStatus { + fn name(&self) -> &str { + "EthBridgeStatus" + } + + async fn observe_and_report(&self) { + let status = self.bridge_contract.paused().call().await; + match status { + Ok(status) => { + self.metric.set(status as i64); + info!("Eth Bridge Status: {:?}", status); + } + Err(e) => { + error!("Error getting eth bridge status: {:?}", e); + } + } + } + + fn interval(&self) -> Duration { + Duration::from_secs(10) + } +} diff --git a/crates/sui-bridge/src/sui_bridge_watchdog/eth_vault_balance.rs b/crates/sui-bridge/src/sui_bridge_watchdog/eth_vault_balance.rs new file mode 100644 index 0000000000000..b43b7538067d4 --- /dev/null +++ b/crates/sui-bridge/src/sui_bridge_watchdog/eth_vault_balance.rs @@ -0,0 +1,75 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::abi::EthERC20; +use crate::metered_eth_provider::MeteredEthHttpProvier; +use crate::sui_bridge_watchdog::Observable; +use async_trait::async_trait; +use ethers::providers::Provider; +use ethers::types::{Address as EthAddress, U256}; +use prometheus::IntGauge; +use std::sync::Arc; +use tokio::time::Duration; +use tracing::{error, info}; + +const TEN_ZEROS: u64 = 10_u64.pow(10); + +pub struct EthVaultBalance { + coin_contract: EthERC20>, + vault_address: EthAddress, + ten_zeros: U256, + metric: IntGauge, +} + +impl EthVaultBalance { + pub fn new( + provider: Arc>, + vault_address: EthAddress, + coin_address: EthAddress, // for now this only support one coin which is WETH + metric: IntGauge, + ) -> Self { + let ten_zeros = U256::from(TEN_ZEROS); + let coin_contract = EthERC20::new(coin_address, provider); + Self { + coin_contract, + vault_address, + ten_zeros, + metric, + } + } +} + +#[async_trait] +impl Observable for EthVaultBalance { + fn name(&self) -> &str { + "EthVaultBalance" + } + + async fn observe_and_report(&self) { + match self + .coin_contract + .balance_of(self.vault_address) + .call() + .await + { + Ok(balance) => { + // Why downcasting is safe: + // 1. On Ethereum we only take the first 8 decimals into account, + // meaning the trailing 10 digits can be ignored + // 2. i64::MAX is 9_223_372_036_854_775_807, with 8 decimal places is + // 92_233_720_368. We likely won't see any balance higher than this + // in the next 12 months. + let balance = (balance / self.ten_zeros).as_u64() as i64; + self.metric.set(balance); + info!("Eth Vault Balance: {:?}", balance); + } + Err(e) => { + error!("Error getting balance from vault: {:?}", e); + } + } + } + + fn interval(&self) -> Duration { + Duration::from_secs(10) + } +} diff --git a/crates/sui-bridge/src/sui_bridge_watchdog/metrics.rs b/crates/sui-bridge/src/sui_bridge_watchdog/metrics.rs new file mode 100644 index 0000000000000..8fea209d7f43f --- /dev/null +++ b/crates/sui-bridge/src/sui_bridge_watchdog/metrics.rs @@ -0,0 +1,52 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use prometheus::{ + register_int_gauge_vec_with_registry, register_int_gauge_with_registry, IntGauge, IntGaugeVec, + Registry, +}; + +#[derive(Clone, Debug)] +pub struct WatchdogMetrics { + pub eth_vault_balance: IntGauge, + pub total_supplies: IntGaugeVec, + pub eth_bridge_paused: IntGauge, + pub sui_bridge_paused: IntGauge, +} + +impl WatchdogMetrics { + pub fn new(registry: &Registry) -> Self { + Self { + eth_vault_balance: register_int_gauge_with_registry!( + "bridge_eth_vault_balance", + "Current balance of eth vault", + registry, + ) + .unwrap(), + total_supplies: register_int_gauge_vec_with_registry!( + "bridge_total_supplies", + "Current total supplies of coins on Sui based on Treasury Cap", + &["token_name"], + registry, + ) + .unwrap(), + eth_bridge_paused: register_int_gauge_with_registry!( + "bridge_eth_bridge_paused", + "Whether the eth bridge is paused", + registry, + ) + .unwrap(), + sui_bridge_paused: register_int_gauge_with_registry!( + "bridge_sui_bridge_paused", + "Whether the sui bridge is paused", + registry, + ) + .unwrap(), + } + } + + pub fn new_for_testing() -> Self { + let registry = Registry::new(); + Self::new(®istry) + } +} diff --git a/crates/sui-bridge/src/sui_bridge_watchdog/mod.rs b/crates/sui-bridge/src/sui_bridge_watchdog/mod.rs new file mode 100644 index 0000000000000..63ed7af86990e --- /dev/null +++ b/crates/sui-bridge/src/sui_bridge_watchdog/mod.rs @@ -0,0 +1,62 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//! The BridgeWatchDog module is responsible for monitoring the health +//! of the bridge by periodically running various observables and +//! reporting the results. + +use anyhow::Result; +use async_trait::async_trait; +use mysten_metrics::spawn_logged_monitored_task; +use tokio::time::Duration; +use tokio::time::MissedTickBehavior; +use tracing::{error_span, info, Instrument}; + +pub mod eth_bridge_status; +pub mod eth_vault_balance; +pub mod metrics; +pub mod sui_bridge_status; +pub mod total_supplies; + +pub struct BridgeWatchDog { + observables: Vec>, +} + +impl BridgeWatchDog { + pub fn new(observables: Vec>) -> Self { + Self { observables } + } + + pub async fn run(self) { + let mut handles = vec![]; + for observable in self.observables.into_iter() { + let handle = spawn_logged_monitored_task!(Self::run_observable(observable)); + handles.push(handle); + } + // Return when any task returns an error or all tasks exit. + futures::future::try_join_all(handles).await.unwrap(); + unreachable!("watch dog tasks should not exit"); + } + + async fn run_observable(observable: Box) -> Result<()> { + let mut interval = tokio::time::interval(observable.interval()); + interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + let name = observable.name(); + let span = error_span!("observable", name); + loop { + info!("Running observable {}", name); + observable + .observe_and_report() + .instrument(span.clone()) + .await; + interval.tick().await; + } + } +} + +#[async_trait] +pub trait Observable { + fn name(&self) -> &str; + async fn observe_and_report(&self); + fn interval(&self) -> Duration; +} diff --git a/crates/sui-bridge/src/sui_bridge_watchdog/sui_bridge_status.rs b/crates/sui-bridge/src/sui_bridge_watchdog/sui_bridge_status.rs new file mode 100644 index 0000000000000..42506286c55e8 --- /dev/null +++ b/crates/sui-bridge/src/sui_bridge_watchdog/sui_bridge_status.rs @@ -0,0 +1,48 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//! The SuiBridgeStatus observable monitors whether the Sui Bridge is paused. + +use crate::sui_bridge_watchdog::Observable; +use crate::sui_client::SuiBridgeClient; +use async_trait::async_trait; +use prometheus::IntGauge; +use std::sync::Arc; + +use tokio::time::Duration; +use tracing::{error, info}; + +pub struct SuiBridgeStatus { + sui_client: Arc, + metric: IntGauge, +} + +impl SuiBridgeStatus { + pub fn new(sui_client: Arc, metric: IntGauge) -> Self { + Self { sui_client, metric } + } +} + +#[async_trait] +impl Observable for SuiBridgeStatus { + fn name(&self) -> &str { + "SuiBridgeStatus" + } + + async fn observe_and_report(&self) { + let status = self.sui_client.is_bridge_paused().await; + match status { + Ok(status) => { + self.metric.set(status as i64); + info!("Sui Bridge Status: {:?}", status); + } + Err(e) => { + error!("Error getting sui bridge status: {:?}", e); + } + } + } + + fn interval(&self) -> Duration { + Duration::from_secs(10) + } +} diff --git a/crates/sui-bridge/src/sui_bridge_watchdog/total_supplies.rs b/crates/sui-bridge/src/sui_bridge_watchdog/total_supplies.rs new file mode 100644 index 0000000000000..199074a8e1a7a --- /dev/null +++ b/crates/sui-bridge/src/sui_bridge_watchdog/total_supplies.rs @@ -0,0 +1,65 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//! The SuiBridgeStatus observable monitors whether the Sui Bridge is paused. + +use crate::sui_bridge_watchdog::Observable; +use async_trait::async_trait; +use prometheus::IntGaugeVec; +use std::{collections::BTreeMap, sync::Arc}; +use sui_sdk::SuiClient; + +use tokio::time::Duration; +use tracing::{error, info}; + +pub struct TotalSupplies { + sui_client: Arc, + coins: BTreeMap, + metric: IntGaugeVec, +} + +impl TotalSupplies { + pub fn new( + sui_client: Arc, + coins: BTreeMap, + metric: IntGaugeVec, + ) -> Self { + Self { + sui_client, + coins, + metric, + } + } +} + +#[async_trait] +impl Observable for TotalSupplies { + fn name(&self) -> &str { + "TotalSupplies" + } + + async fn observe_and_report(&self) { + for (coin_name, coin_type) in &self.coins { + let resp = self + .sui_client + .coin_read_api() + .get_total_supply(coin_type.clone()) + .await; + match resp { + Ok(supply) => { + self.metric + .with_label_values(&[coin_name]) + .set(supply.value as i64); + info!("Total supply for {coin_type}: {}", supply.value); + } + Err(e) => { + error!("Error getting total supply for coin {coin_type}: {:?}", e); + } + } + } + } + + fn interval(&self) -> Duration { + Duration::from_secs(10) + } +} diff --git a/crates/sui-bridge/src/types.rs b/crates/sui-bridge/src/types.rs index d4d69e1bf10ed..13aba4d461839 100644 --- a/crates/sui-bridge/src/types.rs +++ b/crates/sui-bridge/src/types.rs @@ -147,23 +147,6 @@ impl core::fmt::Display for BridgeCommittee { } } -impl core::fmt::Display for BridgeCommittee { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> std::fmt::Result { - for m in self.members.values() { - writeln!( - f, - "pubkey: {:?}, url: {:?}, stake: {:?}, blocklisted: {}, eth address: {:x}", - Hex::encode(m.pubkey_bytes().as_bytes()), - m.base_url, - m.voting_power, - m.is_blocklisted, - m.pubkey_bytes().to_eth_address(), - )?; - } - Ok(()) - } -} - impl CommitteeTrait for BridgeCommittee { // Note: blocklisted members are always excluded. fn shuffle_by_stake_with_rng( diff --git a/crates/sui-bridge/src/utils.rs b/crates/sui-bridge/src/utils.rs index 7990ec79e0cec..d6f7ca487e191 100644 --- a/crates/sui-bridge/src/utils.rs +++ b/crates/sui-bridge/src/utils.rs @@ -5,7 +5,7 @@ use crate::abi::{ EthBridgeCommittee, EthBridgeConfig, EthBridgeLimiter, EthBridgeVault, EthSuiBridge, }; use crate::config::{ - default_ed25519_key_pair, BridgeNodeConfig, EthConfig, MetricsConfig, SuiConfig, + default_ed25519_key_pair, BridgeNodeConfig, EthConfig, MetricsConfig, SuiConfig, WatchdogConfig, }; use crate::crypto::BridgeAuthorityKeyPair; use crate::crypto::BridgeAuthorityPublicKeyBytes; @@ -207,6 +207,13 @@ pub fn generate_bridge_node_config_and_write_to_file( push_interval_seconds: None, // use default value push_url: "metrics_proxy_url".to_string(), }), + watchdog_config: Some(WatchdogConfig { + total_supplies: BTreeMap::from_iter(vec![( + "eth".to_string(), + "0xd0e89b2af5e4910726fbcd8b8dd37bb79b29e5f83f7491bca830e94f7f226d29::eth::ETH" + .to_string(), + )]), + }), }; if run_client { config.sui.bridge_client_key_path = Some(PathBuf::from("/path/to/your/bridge_client_key")); @@ -389,7 +396,7 @@ pub async fn wait_for_server_to_be_up(server_url: String, timeout_sec: u64) -> a /// If a validator is not in the Sui committee, we will use its base URL as the name. pub async fn get_committee_voting_power_by_name( bridge_committee: &Arc, - system_state: SuiSystemStateSummary, + system_state: &SuiSystemStateSummary, ) -> BTreeMap { let mut sui_committee: BTreeMap<_, _> = system_state .active_validators @@ -409,3 +416,28 @@ pub async fn get_committee_voting_power_by_name( }) .collect() } + +/// Return a mappping from validator pub keys to their names. +/// If a validator is not in the Sui committee, we will use its base URL as the name. +pub async fn get_validator_names_by_pub_keys( + bridge_committee: &Arc, + system_state: &SuiSystemStateSummary, +) -> BTreeMap { + let mut sui_committee: BTreeMap<_, _> = system_state + .active_validators + .iter() + .map(|v| (v.sui_address, v.name.clone())) + .collect(); + bridge_committee + .members() + .iter() + .map(|(name, validator)| { + ( + name.clone(), + sui_committee + .remove(&validator.sui_address) + .unwrap_or(validator.base_url.clone()), + ) + }) + .collect() +} diff --git a/crates/sui-cluster-test/src/cluster.rs b/crates/sui-cluster-test/src/cluster.rs index decf58e81714d..166d71e0ada33 100644 --- a/crates/sui-cluster-test/src/cluster.rs +++ b/crates/sui-cluster-test/src/cluster.rs @@ -223,6 +223,7 @@ impl Cluster for LocalNewCluster { // This cluster has fullnode handle, safe to unwrap let fullnode_url = test_cluster.fullnode_handle.rpc_url.clone(); + // TODO: with TestCluster supporting indexer backed rpc as well, we can remove the indexer related logic here. let mut cancellation_tokens = vec![]; let (database, indexer_url, graphql_url) = if options.with_indexer_and_graphql { let database = TempDb::new()?; @@ -237,6 +238,8 @@ impl Cluster for LocalNewCluster { None, Some(data_ingestion_path.path().to_path_buf()), None, /* cancel */ + None, /* start_checkpoint */ + None, /* end_checkpoint */ ) .await; cancellation_tokens.push(writer_token.drop_guard()); diff --git a/crates/sui-config/src/local_ip_utils.rs b/crates/sui-config/src/local_ip_utils.rs index 5e7d1298f3629..e8fb02c7f145f 100644 --- a/crates/sui-config/src/local_ip_utils.rs +++ b/crates/sui-config/src/local_ip_utils.rs @@ -122,15 +122,18 @@ pub fn new_udp_address_for_testing(host: &str) -> Multiaddr { .unwrap() } -/// Returns a new unique TCP address (SocketAddr) for localhost, by finding a new available port on localhost. -pub fn new_local_tcp_socket_for_testing() -> SocketAddr { +/// Returns a new unique TCP address in String format for localhost, by finding a new available port on localhost. +pub fn new_local_tcp_socket_for_testing_string() -> String { format!( "{}:{}", localhost_for_testing(), get_available_port(&localhost_for_testing()) ) - .parse() - .unwrap() +} + +/// Returns a new unique TCP address (SocketAddr) for localhost, by finding a new available port on localhost. +pub fn new_local_tcp_socket_for_testing() -> SocketAddr { + new_local_tcp_socket_for_testing_string().parse().unwrap() } /// Returns a new unique TCP address (Multiaddr) for localhost, by finding a new available port on localhost. diff --git a/crates/sui-config/src/node.rs b/crates/sui-config/src/node.rs index 1ab9ec678d364..41d67009c45e0 100644 --- a/crates/sui-config/src/node.rs +++ b/crates/sui-config/src/node.rs @@ -683,7 +683,10 @@ pub struct AuthorityStorePruningConfig { /// enables periodic background compaction for old SST files whose last modified time is /// older than `periodic_compaction_threshold_days` days. /// That ensures that all sst files eventually go through the compaction process - #[serde(skip_serializing_if = "Option::is_none")] + #[serde( + default = "default_periodic_compaction_threshold_days", + skip_serializing_if = "Option::is_none" + )] pub periodic_compaction_threshold_days: Option, /// number of epochs to keep the latest version of transactions and effects for #[serde(skip_serializing_if = "Option::is_none")] @@ -715,6 +718,10 @@ fn default_smoothing() -> bool { cfg!(not(test)) } +fn default_periodic_compaction_threshold_days() -> Option { + Some(1) +} + impl Default for AuthorityStorePruningConfig { fn default() -> Self { Self { diff --git a/crates/sui-core/Cargo.toml b/crates/sui-core/Cargo.toml index eac6a7277c61e..a5ee4ea284796 100644 --- a/crates/sui-core/Cargo.toml +++ b/crates/sui-core/Cargo.toml @@ -89,6 +89,7 @@ sui-protocol-config.workspace = true sui-transaction-checks.workspace = true sui-simulator.workspace = true sui-storage.workspace = true +sui-tls.workspace = true sui-types.workspace = true zeroize.workspace = true nonempty.workspace = true diff --git a/crates/sui-core/src/authority.rs b/crates/sui-core/src/authority.rs index c3268d9247faf..5c033919eb909 100644 --- a/crates/sui-core/src/authority.rs +++ b/crates/sui-core/src/authority.rs @@ -11,6 +11,7 @@ use crate::verify_indexes::verify_indexes; use anyhow::anyhow; use arc_swap::{ArcSwap, Guard}; use async_trait::async_trait; +use authority_per_epoch_store::CertLockGuard; use chrono::prelude::*; use fastcrypto::encoding::Base58; use fastcrypto::encoding::Encoding; @@ -54,11 +55,12 @@ use sui_types::layout_resolver::LayoutResolver; use sui_types::messages_consensus::{AuthorityCapabilitiesV1, AuthorityCapabilitiesV2}; use sui_types::object::bounded_visitor::BoundedVisitor; use sui_types::transaction_executor::SimulateTransactionResult; + use tap::{TapFallible, TapOptional}; use tokio::sync::mpsc::unbounded_channel; use tokio::sync::{mpsc, oneshot, RwLock}; use tokio::task::JoinHandle; -use tracing::{debug, error, info, instrument, warn, Instrument}; +use tracing::{debug, error, info, instrument, warn}; use self::authority_store::ExecutionLockWriteGuard; use self::authority_store_pruner::AuthorityStorePruningMetrics; @@ -67,8 +69,10 @@ use mysten_metrics::{monitored_scope, spawn_monitored_task}; use mamoru_sui_sniffer::SuiSniffer; use move_core_types::trace::CallTrace; + use crate::jsonrpc_index::IndexStore; use crate::jsonrpc_index::{CoinInfo, ObjectIndexChanges}; +use mysten_common::debug_fatal; use once_cell::sync::OnceCell; use shared_crypto::intent::{AppId, Intent, IntentMessage, IntentScope, IntentVersion}; use sui_archival::reader::ArchiveReaderBalancer; @@ -92,7 +96,7 @@ use sui_types::digests::TransactionEventsDigest; use sui_types::dynamic_field::{DynamicFieldInfo, DynamicFieldName}; use sui_types::effects::{ InputSharedObject, SignedTransactionEffects, TransactionEffects, TransactionEffectsAPI, - TransactionEvents, VerifiedCertifiedTransactionEffects, VerifiedSignedTransactionEffects, + TransactionEvents, VerifiedSignedTransactionEffects, }; use sui_types::error::{ExecutionError, UserInputError}; use sui_types::event::{Event, EventID}; @@ -126,7 +130,6 @@ use sui_types::{ committee::Committee, crypto::AuthoritySignature, error::{SuiError, SuiResult}, - fp_ensure, object::{Object, ObjectRead}, transaction::*, SUI_SYSTEM_ADDRESS, @@ -240,7 +243,6 @@ pub struct AuthorityMetrics { execute_certificate_latency_shared_object: Histogram, await_transaction_latency: Histogram, - execute_certificate_with_effects_latency: Histogram, internal_execution_latency: Histogram, execution_load_input_objects_latency: Histogram, prepare_certificate_latency: Histogram, @@ -304,8 +306,6 @@ pub struct AuthorityMetrics { /// bytecode verifier metrics for tracking timeouts pub bytecode_verifier_metrics: Arc, - pub authenticator_state_update_failed: IntCounter, - /// Count of zklogin signatures pub zklogin_sig_count: IntCounter, /// Count of multisig signatures @@ -460,13 +460,6 @@ impl AuthorityMetrics { registry, ) .unwrap(), - execute_certificate_with_effects_latency: register_histogram_with_registry!( - "authority_state_execute_certificate_with_effects_latency", - "Latency of executing certificates with effects, including waiting for inputs", - LATENCY_SEC_BUCKETS.to_vec(), - registry, - ) - .unwrap(), internal_execution_latency: register_histogram_with_registry!( "authority_state_internal_execution_latency", "Latency of actual certificate executions", @@ -737,12 +730,6 @@ impl AuthorityMetrics { ).unwrap(), limits_metrics: Arc::new(LimitsMetrics::new(registry)), bytecode_verifier_metrics: Arc::new(BytecodeVerifierMetrics::new(registry)), - authenticator_state_update_failed: register_int_counter_with_registry!( - "authenticator_state_update_failed", - "Number of failed authenticator state updates", - registry, - ) - .unwrap(), zklogin_sig_count: register_int_counter_with_registry!( "zklogin_sig_count", "Count of zkLogin signatures", @@ -1122,81 +1109,6 @@ impl AuthorityState { .inc(); } - /// Executes a transaction that's known to have correct effects. - /// For such transaction, we don't have to wait for consensus to set shared object - /// locks because we already know the shared object versions based on the effects. - /// This function can be called by a fullnode only. - // TODO: This function is no longer needed. Remove it and cleanup all the - // related functions. - #[instrument(level = "trace", skip_all)] - pub async fn fullnode_execute_certificate_with_effects( - &self, - transaction: &VerifiedExecutableTransaction, - // NOTE: the caller of this must promise to wait until it - // knows for sure this tx is finalized, namely, it has seen a - // CertifiedTransactionEffects or at least f+1 identifical effects - // digests matching this TransactionEffectsEnvelope, before calling - // this function, in order to prevent a byzantine validator from - // giving us incorrect effects. - effects: &VerifiedCertifiedTransactionEffects, - epoch_store: &Arc, - ) -> SuiResult { - assert!(self.is_fullnode(epoch_store)); - // NOTE: the fullnode can change epoch during local execution. It should not cause - // data inconsistency, but can be problematic for certain tests. - // The check below mitigates the issue, but it is not a fundamental solution to - // avoid race between local execution and reconfiguration. - if self.epoch_store.load().epoch() != epoch_store.epoch() { - return Err(SuiError::EpochEnded(epoch_store.epoch())); - } - let _metrics_guard = self - .metrics - .execute_certificate_with_effects_latency - .start_timer(); - let digest = *transaction.digest(); - debug!("execute_certificate_with_effects"); - fp_ensure!( - *effects.data().transaction_digest() == digest, - SuiError::ErrorWhileProcessingCertificate { - err: "effects/tx digest mismatch".to_string() - } - ); - - if transaction.contains_shared_object() { - epoch_store - .acquire_shared_locks_from_effects( - transaction, - effects.data(), - self.get_object_cache_reader().as_ref(), - ) - .await?; - } - - let expected_effects_digest = effects.digest(); - - self.transaction_manager - .enqueue(vec![transaction.clone()], epoch_store); - - let observed_effects = self - .get_transaction_cache_reader() - .notify_read_executed_effects(&[digest]) - .instrument(tracing::debug_span!( - "notify_read_effects_in_execute_certificate_with_effects" - )) - .await? - .pop() - .expect("notify_read_effects should return exactly 1 element"); - - let observed_effects_digest = observed_effects.digest(); - if &observed_effects_digest != expected_effects_digest { - panic!( - "Locally executed effects do not match canonical effects! expected_effects_digest={:?} observed_effects_digest={:?} expected_effects={:?} observed_effects={:?} input_objects={:?}", - expected_effects_digest, observed_effects_digest, effects.data(), observed_effects, transaction.data().transaction_data().input_objects() - ); - } - Ok(()) - } - /// Executes a certificate for its effects. #[instrument(level = "trace", skip_all)] pub async fn execute_certificate( @@ -1225,7 +1137,13 @@ impl AuthorityState { self.enqueue_certificates_for_execution(vec![certificate.clone()], epoch_store); } - self.notify_read_effects(*certificate.digest()).await + // tx could be reverted when epoch ends, so we must be careful not to return a result + // here after the epoch ends. + epoch_store + .within_alive_epoch(self.notify_read_effects(*certificate.digest())) + .await + .map_err(|_| SuiError::EpochEnded(epoch_store.epoch())) + .and_then(|r| r) } /// Awaits the effects of executing a user transaction. @@ -1234,12 +1152,20 @@ impl AuthorityState { pub async fn await_transaction_effects( &self, digest: TransactionDigest, + epoch_store: &Arc, ) -> SuiResult { let _metrics_guard = self.metrics.await_transaction_latency.start_timer(); debug!("await_transaction"); // TODO(fastpath): Add handling for transactions rejected by Mysticeti fast path. - self.notify_read_effects(digest).await + // TODO(fastpath): Can an MFP transaction be reverted after epoch ends? If so, + // same warning as above applies: We must be careful not to return a result + // here after the epoch ends. + epoch_store + .within_alive_epoch(self.notify_read_effects(digest)) + .await + .map_err(|_| SuiError::EpochEnded(epoch_store.epoch())) + .and_then(|r| r) } /// Internal logic to execute a certificate. @@ -1265,7 +1191,22 @@ impl AuthorityState { debug!("execute_certificate_internal"); let tx_digest = certificate.digest(); - let input_objects = self.read_objects_for_execution(certificate, epoch_store)?; + + // prevent concurrent executions of the same tx. + let tx_guard = epoch_store.acquire_tx_guard(certificate).await?; + + // The cert could have been processed by a concurrent attempt of the same cert, so check if + // the effects have already been written. + if let Some(effects) = self + .get_transaction_cache_reader() + .get_executed_effects(tx_digest)? + { + tx_guard.release(); + return Ok((effects, None)); + } + + let input_objects = + self.read_objects_for_execution(tx_guard.as_lock_guard(), certificate, epoch_store)?; if expected_effects_digest.is_none() { // We could be re-executing a previously executed but uncommitted transaction, perhaps after @@ -1275,12 +1216,6 @@ impl AuthorityState { expected_effects_digest = epoch_store.get_signed_effects_digest(tx_digest)?; } - // This acquires a lock on the tx digest to prevent multiple concurrent executions of the - // same tx. While we don't need this for safety (tx sequencing is ultimately atomic), it is - // very common to receive the same tx multiple times simultaneously due to gossip, so we - // may as well hold the lock and save the cpu time for other requests. - let tx_guard = epoch_store.acquire_tx_guard(certificate).await?; - self.process_certificate( tx_guard, certificate, @@ -1294,6 +1229,7 @@ impl AuthorityState { pub fn read_objects_for_execution( &self, + tx_lock: &CertLockGuard, certificate: &VerifiedExecutableTransaction, epoch_store: &Arc, ) -> SuiResult { @@ -1306,6 +1242,7 @@ impl AuthorityState { self.input_loader.read_objects_for_execution( epoch_store.as_ref(), &certificate.key(), + tx_lock, input_objects, epoch_store.epoch(), ) @@ -1395,15 +1332,6 @@ impl AuthorityState { } }); - // The cert could have been processed by a concurrent attempt of the same cert, so check if - // the effects have already been written. - if let Some(effects) = self - .get_transaction_cache_reader() - .get_executed_effects(&digest)? - { - tx_guard.release(); - return Ok((effects, None)); - } let execution_guard = self .execution_lock_for_executable_transaction(certificate) .await; @@ -1503,10 +1431,8 @@ impl AuthorityState { certificate.data().transaction_data().kind() { if let Some(err) = &execution_error_opt { - error!("Authenticator state update failed: {err}"); - self.metrics.authenticator_state_update_failed.inc(); + debug_fatal!("Authenticator state update failed: {:?}", err); } - debug_assert!(execution_error_opt.is_none()); epoch_store.update_authenticator_state(auth_state); // double check that the signature verifier always matches the authenticator state @@ -3224,7 +3150,16 @@ impl AuthorityState { ); self.committee_store.insert_new_committee(&new_committee)?; + + // Wait until no transactions are being executed. let mut execution_lock = self.execution_lock_for_reconfiguration().await; + + // Terminate all epoch-specific tasks (those started with within_alive_epoch). + cur_epoch_store.epoch_terminated().await; + + // Safe to being reconfiguration now. No transactions are being executed, + // and no epoch-specific tasks are running. + // TODO: revert_uncommitted_epoch_transactions will soon be unnecessary - // clear_state_end_of_epoch() can simply drop all uncommitted transactions self.revert_uncommitted_epoch_transactions(cur_epoch_store) @@ -5119,7 +5054,7 @@ impl AuthorityState { ); fail_point_async!("change_epoch_tx_delay"); - let _tx_lock = epoch_store.acquire_tx_lock(tx_digest).await; + let tx_lock = epoch_store.acquire_tx_lock(tx_digest).await; // The tx could have been executed by state sync already - if so simply return an error. // The checkpoint builder will shortly be terminated by reconfiguration anyway. @@ -5147,7 +5082,8 @@ impl AuthorityState { ) .await?; - let input_objects = self.read_objects_for_execution(&executable_tx, epoch_store)?; + let input_objects = + self.read_objects_for_execution(&tx_lock, &executable_tx, epoch_store)?; let (temporary_store, effects, _execution_error_opt) = self.prepare_certificate(&execution_guard, &executable_tx, input_objects, epoch_store)?; @@ -5238,7 +5174,6 @@ impl AuthorityState { cur_epoch_store.get_chain_identifier(), ); self.epoch_store.store(new_epoch_store.clone()); - cur_epoch_store.epoch_terminated().await; Ok(new_epoch_store) } diff --git a/crates/sui-core/src/authority/authority_per_epoch_store.rs b/crates/sui-core/src/authority/authority_per_epoch_store.rs index 70c1e98bbb2e1..b49ae87c33881 100644 --- a/crates/sui-core/src/authority/authority_per_epoch_store.rs +++ b/crates/sui-core/src/authority/authority_per_epoch_store.rs @@ -26,7 +26,9 @@ use sui_types::base_types::{AuthorityName, EpochId, ObjectID, SequenceNumber, Tr use sui_types::base_types::{ConciseableName, ObjectRef}; use sui_types::committee::Committee; use sui_types::committee::CommitteeTrait; -use sui_types::crypto::{AuthoritySignInfo, AuthorityStrongQuorumSignInfo, RandomnessRound}; +use sui_types::crypto::{ + AuthorityPublicKeyBytes, AuthoritySignInfo, AuthorityStrongQuorumSignInfo, RandomnessRound, +}; use sui_types::digests::{ChainIdentifier, TransactionEffectsDigest}; use sui_types::error::{SuiError, SuiResult}; use sui_types::signature::GenericSignature; @@ -48,7 +50,9 @@ use typed_store::{ use super::authority_store_tables::ENV_VAR_LOCKS_BLOCK_CACHE_SIZE; use super::epoch_start_configuration::EpochStartConfigTrait; -use super::shared_object_congestion_tracker::SharedObjectCongestionTracker; +use super::shared_object_congestion_tracker::{ + CongestionPerObjectDebt, SharedObjectCongestionTracker, +}; use super::transaction_deferral::{transaction_deferral_within_limit, DeferralKey, DeferralReason}; use crate::authority::epoch_start_configuration::{EpochFlag, EpochStartConfiguration}; use crate::authority::AuthorityMetrics; @@ -129,6 +133,16 @@ pub struct CertTxGuard(#[allow(unused)] CertLockGuard); impl CertTxGuard { pub fn release(self) {} pub fn commit_tx(self) {} + pub fn as_lock_guard(&self) -> &CertLockGuard { + &self.0 + } +} + +impl CertLockGuard { + pub fn dummy_for_tests() -> Self { + let lock = Arc::new(tokio::sync::Mutex::new(())); + Self(lock.try_lock_owned().unwrap()) + } } type JwkAggregator = GenericMultiStakeAggregator<(JwkId, JWK), true>; @@ -596,6 +610,10 @@ pub struct AuthorityEpochTables { pub(crate) randomness_highest_completed_round: DBMap, /// Holds the timestamp of the most recently generated round of randomness. pub(crate) randomness_last_round_timestamp: DBMap, + + /// Accumulated per-object debts for congestion control. + pub(crate) congestion_control_object_debts: DBMap, + pub(crate) congestion_control_randomness_object_debts: DBMap, } fn signed_transactions_table_default_config() -> DBOptions { @@ -746,6 +764,62 @@ impl AuthorityEpochTables { batch.write()?; Ok(()) } + + pub fn load_initial_object_debts( + &self, + current_round: Round, + for_randomness: bool, + protocol_config: &ProtocolConfig, + transactions: &[VerifiedSequencedConsensusTransaction], + ) -> SuiResult> { + let default_per_commit_budget = protocol_config + .max_accumulated_txn_cost_per_object_in_mysticeti_commit_as_option() + .unwrap_or(0); + let (table, per_commit_budget) = if for_randomness { + ( + &self.congestion_control_randomness_object_debts, + protocol_config + .max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit_as_option() + .unwrap_or(default_per_commit_budget), + ) + } else { + ( + &self.congestion_control_object_debts, + default_per_commit_budget, + ) + }; + + let shared_input_object_ids: BTreeSet<_> = transactions + .iter() + .filter_map(|tx| { + if let SequencedConsensusTransactionKind::External(ConsensusTransaction { + kind: ConsensusTransactionKind::CertifiedTransaction(tx), + .. + }) = &tx.0.transaction + { + Some(tx.shared_input_objects().map(|obj| obj.id)) + } else { + None + } + }) + .flatten() + .collect(); + Ok(table + .multi_get(shared_input_object_ids.iter())? + .into_iter() + .zip(shared_input_object_ids) + .filter_map(|(debt, object_id)| debt.map(|debt| (debt, object_id))) + .map(move |(debt, object_id)| { + let (round, debt) = debt.into_v1(); + ( + object_id, + // Stored debts already account for the budget of the round in which + // they were accumulated. Application of budget from future rounds to + // the debt is handled here. + debt.saturating_sub(per_commit_budget * (current_round - round - 1)), + ) + })) + } } pub(crate) const MUTEX_TABLE_SIZE: usize = 1024; @@ -898,18 +972,6 @@ impl AuthorityPerEpochStore { randomness_reporter: OnceCell::new(), }); - if matches!(chain_identifier.chain(), Chain::Mainnet | Chain::Testnet) { - // If we disable randomness, and if the release in which it was disabled did not have - // the commit that added this comment, we will need to revert this commit. This is - // because the previous release will have been writing to the deprecated - // assigned_shared_object_versions table. - // - // If we disable randomness *after* this commit has been shipped to all networks, then - // we can simply remove this assert, as we will no longer switch back and forth between - // the two tables. - assert!(s.randomness_state_enabled()); - } - s.update_buffer_stake_metric(); s } @@ -1333,23 +1395,29 @@ impl AuthorityPerEpochStore { &self, key: &TransactionKey, objects: &[InputObjectKind], - ) -> BTreeSet { - let mut shared_locks = HashMap::::new(); + ) -> SuiResult> { + let shared_locks = + once_cell::unsync::OnceCell::>>::new(); objects .iter() .map(|kind| { - match kind { + Ok(match kind { InputObjectKind::SharedMoveObject { id, .. } => { - if shared_locks.is_empty() { - shared_locks = self - .get_shared_locks(key) - .expect("Read from storage should not fail!") - .into_iter() - .collect(); - } - // If we can't find the locked version, it means - // 1. either we have a bug that skips shared object version assignment - // 2. or we have some DB corruption + let shared_locks = shared_locks + .get_or_init(|| { + self.get_shared_locks(key) + .expect("reading shared locks should not fail") + .map(|locks| locks.into_iter().collect()) + }) + .as_ref() + // Shared version assignments could have been deleted if the tx just + // finished executing concurrently. + .ok_or(SuiError::GenericAuthorityError { + error: "no shared locks".to_string(), + })?; + + // If we found locks, but they are missing the assignment for this object, + // it indicates a serious inconsistency! let Some(version) = shared_locks.get(id) else { panic!( "Shared object locks should have been set. key: {key:?}, obj \ @@ -1366,7 +1434,7 @@ impl AuthorityPerEpochStore { id: objref.0, version: objref.1, }, - } + }) }) .collect() } @@ -1794,11 +1862,6 @@ impl AuthorityPerEpochStore { .collect::, _>>()?) } - fn get_max_accumulated_txn_cost_per_object_in_commit(&self) -> Option { - self.protocol_config() - .max_accumulated_txn_cost_per_object_in_mysticeti_commit_as_option() - } - fn should_defer( &self, cert: &VerifiedExecutableTransaction, @@ -1825,25 +1888,18 @@ impl AuthorityPerEpochStore { )); } - if let Some(max_accumulated_txn_cost_per_object_in_commit) = - self.get_max_accumulated_txn_cost_per_object_in_commit() + // Defer transaction if it uses shared objects that are congested. + if let Some((deferral_key, congested_objects)) = shared_object_congestion_tracker + .should_defer_due_to_object_congestion( + cert, + previously_deferred_tx_digests, + commit_round, + ) { - // Defer transaction if it uses shared objects that are congested. - if let Some((deferral_key, congested_objects)) = shared_object_congestion_tracker - .should_defer_due_to_object_congestion( - cert, - max_accumulated_txn_cost_per_object_in_commit, - previously_deferred_tx_digests, - commit_round, - ) - { - Some(( - deferral_key, - DeferralReason::SharedObjectCongestion(congested_objects), - )) - } else { - None - } + Some(( + deferral_key, + DeferralReason::SharedObjectCongestion(congested_objects), + )) } else { None } @@ -2665,7 +2721,7 @@ impl AuthorityPerEpochStore { } } - let mut output = ConsensusCommitOutput::new(); + let mut output = ConsensusCommitOutput::new(consensus_commit_info.round); // Load transactions deferred from previous commits. let deferred_txs: Vec<(DeferralKey, Vec)> = self @@ -2792,7 +2848,6 @@ impl AuthorityPerEpochStore { }) .collect(); - // We always order transactions using randomness last. PostConsensusTxReorder::reorder( &mut sequenced_transactions, self.protocol_config.consensus_transaction_ordering(), @@ -2801,6 +2856,27 @@ impl AuthorityPerEpochStore { &mut sequenced_randomness_transactions, self.protocol_config.consensus_transaction_ordering(), ); + + // We track transaction execution cost separately for regular transactions and transactions using randomness, since + // they will be in different PendingCheckpoints. + let tables = self.tables()?; + let shared_object_congestion_tracker = SharedObjectCongestionTracker::from_protocol_config( + &tables, + self.protocol_config(), + consensus_commit_info.round, + false, + &sequenced_transactions, + )?; + let shared_object_using_randomness_congestion_tracker = + SharedObjectCongestionTracker::from_protocol_config( + &tables, + self.protocol_config(), + consensus_commit_info.round, + true, + &sequenced_randomness_transactions, + )?; + + // We always order transactions using randomness last. let consensus_transactions: Vec<_> = system_transactions .into_iter() .chain(sequenced_transactions) @@ -2823,6 +2899,8 @@ impl AuthorityPerEpochStore { consensus_commit_info, &mut roots, &mut randomness_roots, + shared_object_congestion_tracker, + shared_object_using_randomness_congestion_tracker, previously_deferred_tx_digests, randomness_manager.as_deref_mut(), dkg_failed, @@ -2845,7 +2923,6 @@ impl AuthorityPerEpochStore { }; let make_checkpoint = should_accept_tx || final_round; if make_checkpoint { - // Generate pending checkpoint for regular user tx. let checkpoint_height = if self.randomness_state_enabled() { consensus_commit_info.round * 2 } else { @@ -2866,29 +2943,34 @@ impl AuthorityPerEpochStore { } } checkpoint_roots.extend(roots.into_iter()); + + if let Some(randomness_round) = randomness_round { + randomness_roots.insert(TransactionKey::RandomnessRound( + self.epoch(), + randomness_round, + )); + } + + // Determine whether to write pending checkpoint for user tx with randomness. + // - If randomness is not generated for this commit, we will skip the + // checkpoint with the associated height. Therefore checkpoint heights may + // not be contiguous. + // - Exception: if DKG fails, we always need to write out a PendingCheckpoint + // for randomness tx that are canceled. + let should_write_random_checkpoint = + randomness_round.is_some() || (dkg_failed && !randomness_roots.is_empty()); + let pending_checkpoint = PendingCheckpointV2::V2(PendingCheckpointV2Contents { roots: checkpoint_roots, details: PendingCheckpointInfo { timestamp_ms: consensus_commit_info.timestamp, - last_of_epoch: final_round && randomness_round.is_none(), + last_of_epoch: final_round && !should_write_random_checkpoint, checkpoint_height, }, }); self.write_pending_checkpoint(&mut output, &pending_checkpoint)?; - // Generate pending checkpoint for user tx with randomness. - // - If randomness is not generated for this commit, we will skip the - // checkpoint with the associated height. Therefore checkpoint heights may - // not be contiguous. - // - Exception: if DKG fails, we always need to write out a PendingCheckpoint - // for randomness tx that are canceled. - if let Some(randomness_round) = randomness_round { - randomness_roots.insert(TransactionKey::RandomnessRound( - self.epoch(), - randomness_round, - )); - } - if randomness_round.is_some() || (dkg_failed && !randomness_roots.is_empty()) { + if should_write_random_checkpoint { let pending_checkpoint = PendingCheckpointV2::V2(PendingCheckpointV2Contents { roots: randomness_roots.into_iter().collect(), details: PendingCheckpointInfo { @@ -3083,7 +3165,7 @@ impl AuthorityPerEpochStore { cache_reader: &dyn ObjectCacheRead, transactions: &[VerifiedExecutableTransaction], ) -> SuiResult { - let mut output = ConsensusCommitOutput::new(); + let mut output = ConsensusCommitOutput::new(0); self.process_consensus_transaction_shared_object_versions( cache_reader, transactions, @@ -3128,6 +3210,8 @@ impl AuthorityPerEpochStore { consensus_commit_info: &ConsensusCommitInfo, roots: &mut BTreeSet, randomness_roots: &mut BTreeSet, + mut shared_object_congestion_tracker: SharedObjectCongestionTracker, + mut shared_object_using_randomness_congestion_tracker: SharedObjectCongestionTracker, previously_deferred_tx_digests: HashMap, mut randomness_manager: Option<&mut RandomnessManager>, dkg_failed: bool, @@ -3140,6 +3224,8 @@ impl AuthorityPerEpochStore { bool, // true if final round Option, // consensus commit prologue root )> { + let _scope = monitored_scope("ConsensusCommitHandler::process_consensus_transactions"); + if randomness_round.is_some() { assert!(!dkg_failed); // invariant check } @@ -3152,20 +3238,6 @@ impl AuthorityPerEpochStore { let mut cancelled_txns: BTreeMap = BTreeMap::new(); - // We track transaction execution cost separately for regular transactions and transactions using randomness, since - // they will be in different checkpoints. - let mut shared_object_congestion_tracker = SharedObjectCongestionTracker::new( - self.protocol_config().per_object_congestion_control_mode(), - self.protocol_config() - .gas_budget_based_txn_cost_cap_factor_as_option(), - ); - let mut shared_object_using_randomness_congestion_tracker = - SharedObjectCongestionTracker::new( - self.protocol_config().per_object_congestion_control_mode(), - self.protocol_config() - .gas_budget_based_txn_cost_cap_factor_as_option(), - ); - fail_point_arg!( "initial_congestion_tracker", |tracker: SharedObjectCongestionTracker| { @@ -3278,6 +3350,13 @@ impl AuthorityPerEpochStore { .with_label_values(&["randomness_commit"]) .set(shared_object_using_randomness_congestion_tracker.max_cost() as i64); + output.set_congestion_control_object_debts( + shared_object_congestion_tracker.accumulated_debts(), + ); + output.set_congestion_control_randomness_object_debts( + shared_object_using_randomness_congestion_tracker.accumulated_debts(), + ); + if randomness_state_updated { if let Some(randomness_manager) = randomness_manager.as_mut() { randomness_manager @@ -3434,7 +3513,8 @@ impl AuthorityPerEpochStore { shared_object_congestion_tracker: &mut SharedObjectCongestionTracker, authority_metrics: &Arc, ) -> SuiResult { - let _scope = monitored_scope("HandleConsensusTransaction"); + let _scope = monitored_scope("ConsensusCommitHandler::process_consensus_transaction"); + let VerifiedSequencedConsensusTransaction(SequencedConsensusTransaction { certificate_author_index: _, certificate_author, @@ -3457,107 +3537,21 @@ impl AuthorityPerEpochStore { ); return Ok(ConsensusCertificateResult::Ignored); } - if self.has_sent_end_of_publish(certificate_author)? - && !previously_deferred_tx_digests.contains_key(certificate.digest()) - { - // This can not happen with valid authority - // With some edge cases consensus might sometimes resend previously seen certificate after EndOfPublish - // However this certificate will be filtered out before this line by `consensus_message_processed` call in `verify_consensus_transaction` - // If we see some new certificate here it means authority is byzantine and sent certificate after EndOfPublish (or we have some bug in ConsensusAdapter) - warn!("[Byzantine authority] Authority {:?} sent a new, previously unseen certificate {:?} after it sent EndOfPublish message to consensus", certificate_author.concise(), certificate.digest()); - return Ok(ConsensusCertificateResult::Ignored); - } // Safe because signatures are verified when consensus called into SuiTxValidator::validate_batch. let certificate = VerifiedCertificate::new_unchecked(*certificate.clone()); - let certificate = VerifiedExecutableTransaction::new_from_certificate(certificate); - - debug!( - ?tracking_id, - tx_digest = ?certificate.digest(), - "handle_consensus_transaction UserTransaction", - ); - - if !self - .get_reconfig_state_read_lock_guard() - .should_accept_consensus_certs() - && !previously_deferred_tx_digests.contains_key(certificate.digest()) - { - debug!("Ignoring consensus certificate for transaction {:?} because of end of epoch", - certificate.digest()); - return Ok(ConsensusCertificateResult::Ignored); - } + let transaction = VerifiedExecutableTransaction::new_from_certificate(certificate); - let deferral_info = self.should_defer( - &certificate, + self.process_consensus_user_transaction( + transaction, + certificate_author, commit_round, + tracking_id, + previously_deferred_tx_digests, dkg_failed, generating_randomness, - previously_deferred_tx_digests, shared_object_congestion_tracker, - ); - - if let Some((deferral_key, deferral_reason)) = deferral_info { - debug!( - "Deferring consensus certificate for transaction {:?} until {:?}", - certificate.digest(), - deferral_key - ); - - let deferral_result = match deferral_reason { - DeferralReason::RandomnessNotReady => { - // Always defer transaction due to randomness not ready. - ConsensusCertificateResult::Deferred(deferral_key) - } - DeferralReason::SharedObjectCongestion(congested_objects) => { - authority_metrics - .consensus_handler_congested_transactions - .inc(); - if transaction_deferral_within_limit( - &deferral_key, - self.protocol_config() - .max_deferral_rounds_for_congestion_control(), - ) { - ConsensusCertificateResult::Deferred(deferral_key) - } else { - // Cancel the transaction that has been deferred for too long. - debug!( - "Cancelling consensus certificate for transaction {:?} with deferral key {:?} due to congestion on objects {:?}", - certificate.digest(), - deferral_key, - congested_objects - ); - ConsensusCertificateResult::Cancelled(( - certificate, - CancelConsensusCertificateReason::CongestionOnObjects( - congested_objects, - ), - )) - } - } - }; - return Ok(deferral_result); - } - - if dkg_failed - && self.randomness_state_enabled() - && certificate.transaction_data().uses_randomness() - { - debug!( - "Canceling randomness-using certificate for transaction {:?} because DKG failed", - certificate.digest(), - ); - return Ok(ConsensusCertificateResult::Cancelled(( - certificate, - CancelConsensusCertificateReason::DkgFailed, - ))); - } - - // This certificate will be scheduled. Update object execution cost. - if certificate.contains_shared_object() { - shared_object_congestion_tracker.bump_object_execution_cost(&certificate); - } - - Ok(ConsensusCertificateResult::SuiTransaction(certificate)) + authority_metrics, + ) } SequencedConsensusTransactionKind::External(ConsensusTransaction { kind: ConsensusTransactionKind::CheckpointSignature(info), @@ -3720,11 +3714,30 @@ impl AuthorityPerEpochStore { } SequencedConsensusTransactionKind::External(ConsensusTransaction { - kind: ConsensusTransactionKind::UserTransaction(_tx), + kind: ConsensusTransactionKind::UserTransaction(tx), .. }) => { - // TODO(fastpath): implement handling of user transactions from consensus commits. - Ok(ConsensusCertificateResult::Ignored) + // Ignore consensus certified user transaction if Mysticeti fastpath is not enabled. + if !self.protocol_config().mysticeti_fastpath() { + return Ok(ConsensusCertificateResult::Ignored); + } + // Safe because transactions are certified by consensus. + let tx = VerifiedTransaction::new_unchecked(*tx.clone()); + // TODO(fastpath): accept position in consensus, after plumbing consensus round, authority index, and transaction index here. + let transaction = + VerifiedExecutableTransaction::new_from_consensus(tx, self.epoch()); + + self.process_consensus_user_transaction( + transaction, + certificate_author, + commit_round, + tracking_id, + previously_deferred_tx_digests, + dkg_failed, + generating_randomness, + shared_object_congestion_tracker, + authority_metrics, + ) } SequencedConsensusTransactionKind::System(system_transaction) => { Ok(self.process_consensus_system_transaction(system_transaction)) @@ -3749,6 +3762,122 @@ impl AuthorityPerEpochStore { ConsensusCertificateResult::SuiTransaction(system_transaction.clone()) } + fn process_consensus_user_transaction( + &self, + transaction: VerifiedExecutableTransaction, + block_author: &AuthorityPublicKeyBytes, + commit_round: Round, + tracking_id: u64, + previously_deferred_tx_digests: &HashMap, + dkg_failed: bool, + generating_randomness: bool, + shared_object_congestion_tracker: &mut SharedObjectCongestionTracker, + authority_metrics: &Arc, + ) -> SuiResult { + let _scope = monitored_scope("ConsensusCommitHandler::process_consensus_user_transaction"); + + if self.has_sent_end_of_publish(block_author)? + && !previously_deferred_tx_digests.contains_key(transaction.digest()) + { + // This can not happen with valid authority + // With some edge cases consensus might sometimes resend previously seen certificate after EndOfPublish + // However this certificate will be filtered out before this line by `consensus_message_processed` call in `verify_consensus_transaction` + // If we see some new certificate here it means authority is byzantine and sent certificate after EndOfPublish (or we have some bug in ConsensusAdapter) + warn!("[Byzantine authority] Authority {:?} sent a new, previously unseen transaction {:?} after it sent EndOfPublish message to consensus", block_author.concise(), transaction.digest()); + return Ok(ConsensusCertificateResult::Ignored); + } + + debug!( + ?tracking_id, + tx_digest = ?transaction.digest(), + "handle_consensus_transaction UserTransaction", + ); + + if !self + .get_reconfig_state_read_lock_guard() + .should_accept_consensus_certs() + && !previously_deferred_tx_digests.contains_key(transaction.digest()) + { + debug!( + "Ignoring consensus transaction {:?} because of end of epoch", + transaction.digest() + ); + return Ok(ConsensusCertificateResult::Ignored); + } + + let deferral_info = self.should_defer( + &transaction, + commit_round, + dkg_failed, + generating_randomness, + previously_deferred_tx_digests, + shared_object_congestion_tracker, + ); + + if let Some((deferral_key, deferral_reason)) = deferral_info { + debug!( + "Deferring consensus certificate for transaction {:?} until {:?}", + transaction.digest(), + deferral_key + ); + + let deferral_result = match deferral_reason { + DeferralReason::RandomnessNotReady => { + // Always defer transaction due to randomness not ready. + ConsensusCertificateResult::Deferred(deferral_key) + } + DeferralReason::SharedObjectCongestion(congested_objects) => { + authority_metrics + .consensus_handler_congested_transactions + .inc(); + if transaction_deferral_within_limit( + &deferral_key, + self.protocol_config() + .max_deferral_rounds_for_congestion_control(), + ) { + ConsensusCertificateResult::Deferred(deferral_key) + } else { + // Cancel the transaction that has been deferred for too long. + debug!( + "Cancelling consensus transaction {:?} with deferral key {:?} due to congestion on objects {:?}", + transaction.digest(), + deferral_key, + congested_objects + ); + ConsensusCertificateResult::Cancelled(( + transaction, + CancelConsensusCertificateReason::CongestionOnObjects( + congested_objects, + ), + )) + } + } + }; + return Ok(deferral_result); + } + + if dkg_failed + && self.randomness_state_enabled() + && transaction.transaction_data().uses_randomness() + { + debug!( + "Canceling randomness-using transaction {:?} because DKG failed", + transaction.digest(), + ); + return Ok(ConsensusCertificateResult::Cancelled(( + transaction, + CancelConsensusCertificateReason::DkgFailed, + ))); + } + + // This certificate will be scheduled. Update object execution cost. + if transaction.contains_shared_object() { + shared_object_congestion_tracker.bump_object_execution_cost(&transaction); + } + + Ok(ConsensusCertificateResult::SuiTransaction(transaction)) + } + pub(crate) fn write_pending_checkpoint( &self, output: &mut ConsensusCommitOutput, @@ -4062,6 +4191,7 @@ impl AuthorityPerEpochStore { #[derive(Default)] pub(crate) struct ConsensusCommitOutput { // Consensus and reconfig state + consensus_round: Round, consensus_messages_processed: BTreeSet, end_of_publish: BTreeSet, reconfig_state: Option, @@ -4090,11 +4220,18 @@ pub(crate) struct ConsensusCommitOutput { // jwk state pending_jwks: BTreeSet<(AuthorityName, JwkId, JWK)>, active_jwks: BTreeSet<(u64, (JwkId, JWK))>, + + // congestion control state + congestion_control_object_debts: Vec<(ObjectID, u64)>, + congestion_control_randomness_object_debts: Vec<(ObjectID, u64)>, } impl ConsensusCommitOutput { - pub fn new() -> Self { - Default::default() + pub fn new(consensus_round: Round) -> Self { + Self { + consensus_round, + ..Default::default() + } } fn insert_end_of_publish(&mut self, authority: AuthorityName) { @@ -4189,6 +4326,17 @@ impl ConsensusCommitOutput { self.active_jwks.insert((round, key)); } + fn set_congestion_control_object_debts(&mut self, object_debts: Vec<(ObjectID, u64)>) { + self.congestion_control_object_debts = object_debts; + } + + fn set_congestion_control_randomness_object_debts( + &mut self, + object_debts: Vec<(ObjectID, u64)>, + ) { + self.congestion_control_randomness_object_debts = object_debts; + } + pub fn write_to_batch( self, epoch_store: &AuthorityPerEpochStore, @@ -4285,6 +4433,29 @@ impl ConsensusCommitOutput { self.active_jwks.into_iter().map(|j| (j, ())), )?; + batch.insert_batch( + &tables.congestion_control_object_debts, + self.congestion_control_object_debts + .into_iter() + .map(|(object_id, debt)| { + ( + object_id, + CongestionPerObjectDebt::new(self.consensus_round, debt), + ) + }), + )?; + batch.insert_batch( + &tables.congestion_control_randomness_object_debts, + self.congestion_control_randomness_object_debts + .into_iter() + .map(|(object_id, debt)| { + ( + object_id, + CongestionPerObjectDebt::new(self.consensus_round, debt), + ) + }), + )?; + Ok(()) } } @@ -4293,12 +4464,8 @@ impl GetSharedLocks for AuthorityPerEpochStore { fn get_shared_locks( &self, key: &TransactionKey, - ) -> Result, SuiError> { - Ok(self - .tables()? - .assigned_shared_object_versions_v2 - .get(key)? - .unwrap_or_default()) + ) -> SuiResult>> { + Ok(self.tables()?.assigned_shared_object_versions_v2.get(key)?) } } diff --git a/crates/sui-core/src/authority/shared_object_congestion_tracker.rs b/crates/sui-core/src/authority/shared_object_congestion_tracker.rs index 237d400114a43..82250fe4e37e9 100644 --- a/crates/sui-core/src/authority/shared_object_congestion_tracker.rs +++ b/crates/sui-core/src/authority/shared_object_congestion_tracker.rs @@ -1,11 +1,15 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use super::authority_per_epoch_store::AuthorityEpochTables; use crate::authority::transaction_deferral::DeferralKey; +use crate::consensus_handler::VerifiedSequencedConsensusTransaction; use narwhal_types::Round; +use serde::{Deserialize, Serialize}; use std::collections::HashMap; -use sui_protocol_config::PerObjectCongestionControlMode; +use sui_protocol_config::{PerObjectCongestionControlMode, ProtocolConfig}; use sui_types::base_types::{ObjectID, TransactionDigest}; +use sui_types::error::SuiResult; use sui_types::executable_transaction::VerifiedExecutableTransaction; use sui_types::transaction::{Argument, SharedInputObject, TransactionDataAPI}; @@ -25,35 +29,71 @@ use sui_types::transaction::{Argument, SharedInputObject, TransactionDataAPI}; pub struct SharedObjectCongestionTracker { object_execution_cost: HashMap, mode: PerObjectCongestionControlMode, + max_accumulated_txn_cost_per_object_in_commit: u64, gas_budget_based_txn_cost_cap_factor: Option, + gas_budget_based_txn_cost_absolute_cap: Option, + max_txn_cost_overage_per_object_in_commit: u64, } impl SharedObjectCongestionTracker { pub fn new( + initial_object_debts: impl IntoIterator, mode: PerObjectCongestionControlMode, + max_accumulated_txn_cost_per_object_in_commit: Option, gas_budget_based_txn_cost_cap_factor: Option, + gas_budget_based_txn_cost_absolute_cap_commit_count: Option, + max_txn_cost_overage_per_object_in_commit: u64, ) -> Self { + let max_accumulated_txn_cost_per_object_in_commit = + if mode == PerObjectCongestionControlMode::None { + 0 + } else { + max_accumulated_txn_cost_per_object_in_commit.expect( + "max_accumulated_txn_cost_per_object_in_commit must be set if mode is not None", + ) + }; Self { - object_execution_cost: HashMap::new(), + object_execution_cost: initial_object_debts.into_iter().collect(), mode, + max_accumulated_txn_cost_per_object_in_commit, gas_budget_based_txn_cost_cap_factor, + gas_budget_based_txn_cost_absolute_cap: + gas_budget_based_txn_cost_absolute_cap_commit_count + .map(|m| m * max_accumulated_txn_cost_per_object_in_commit), + max_txn_cost_overage_per_object_in_commit, } } - pub fn new_with_initial_value_for_test( - init_values: &[(ObjectID, u64)], - mode: PerObjectCongestionControlMode, - gas_budget_based_txn_cost_cap_factor: Option, - ) -> Self { - let mut object_execution_cost = HashMap::new(); - for (object_id, total_cost) in init_values { - object_execution_cost.insert(*object_id, *total_cost); - } - Self { - object_execution_cost, - mode, - gas_budget_based_txn_cost_cap_factor, - } + pub fn from_protocol_config( + tables: &AuthorityEpochTables, + protocol_config: &ProtocolConfig, + round: Round, + for_randomness: bool, + transactions: &[VerifiedSequencedConsensusTransaction], + ) -> SuiResult { + let max_accumulated_txn_cost_per_object_in_commit = + protocol_config.max_accumulated_txn_cost_per_object_in_mysticeti_commit_as_option(); + Ok(Self::new( + tables.load_initial_object_debts( + round, + for_randomness, + protocol_config, + transactions, + )?, + protocol_config.per_object_congestion_control_mode(), + if for_randomness { + protocol_config + .max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit_as_option() + .or(max_accumulated_txn_cost_per_object_in_commit) + } else { + max_accumulated_txn_cost_per_object_in_commit + }, + protocol_config.gas_budget_based_txn_cost_cap_factor_as_option(), + protocol_config.gas_budget_based_txn_cost_absolute_cap_commit_count_as_option(), + protocol_config + .max_txn_cost_overage_per_object_in_commit_as_option() + .unwrap_or(0), + )) } // Given a list of shared input objects, returns the starting cost of a transaction that operates on @@ -84,7 +124,6 @@ impl SharedObjectCongestionTracker { pub fn should_defer_due_to_object_congestion( &self, cert: &VerifiedExecutableTransaction, - max_accumulated_txn_cost_per_object_in_commit: u64, previously_deferred_tx_digests: &HashMap, commit_round: Round, ) -> Option<(DeferralKey, Vec)> { @@ -97,7 +136,18 @@ impl SharedObjectCongestionTracker { } let start_cost = self.compute_tx_start_at_cost(&shared_input_objects); - if start_cost + tx_cost <= max_accumulated_txn_cost_per_object_in_commit { + // Allow tx if it's within budget. + if start_cost + tx_cost <= self.max_accumulated_txn_cost_per_object_in_commit { + return None; + } + + // Allow over-budget tx if it's not above the overage limit. + if start_cost <= self.max_accumulated_txn_cost_per_object_in_commit + && start_cost + tx_cost + <= self + .max_accumulated_txn_cost_per_object_in_commit + .saturating_add(self.max_txn_cost_overage_per_object_in_commit) + { return None; } @@ -156,6 +206,28 @@ impl SharedObjectCongestionTracker { } } + // Returns accumulated debts for objects whose budgets have been exceeded over the course + // of the commit. Consumes the tracker object, since this should only be called once after + // all tx have been processed. + pub fn accumulated_debts(self) -> Vec<(ObjectID, u64)> { + if self.max_txn_cost_overage_per_object_in_commit == 0 { + return vec![]; // early-exit if overage is not allowed + } + + self.object_execution_cost + .into_iter() + .filter_map(|(obj_id, cost)| { + let remaining_cost = + cost.saturating_sub(self.max_accumulated_txn_cost_per_object_in_commit); + if remaining_cost > 0 { + Some((obj_id, remaining_cost)) + } else { + None + } + }) + .collect() + } + // Returns the maximum cost of all objects. pub fn max_cost(&self) -> u64 { self.object_execution_cost @@ -178,10 +250,34 @@ impl SharedObjectCongestionTracker { } } } - (number_of_move_call + number_of_move_input) as u64 + let cap = (number_of_move_call + number_of_move_input) as u64 * self .gas_budget_based_txn_cost_cap_factor - .expect("cap factor must be set if TotalGasBudgetWithCap mode is used.") + .expect("cap factor must be set if TotalGasBudgetWithCap mode is used."); + + // Apply absolute cap if configured. + std::cmp::min( + cap, + self.gas_budget_based_txn_cost_absolute_cap + .unwrap_or(u64::MAX), + ) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum CongestionPerObjectDebt { + V1(Round, u64), +} + +impl CongestionPerObjectDebt { + pub fn new(round: Round, debt: u64) -> Self { + Self::V1(round, debt) + } + + pub fn into_v1(self) -> (Round, u64) { + match self { + Self::V1(round, debt) => (round, debt), + } } } @@ -214,12 +310,14 @@ mod object_cost_tests { let object_id_1 = ObjectID::random(); let object_id_2 = ObjectID::random(); - let shared_object_congestion_tracker = - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[(object_id_0, 5), (object_id_1, 10)], - PerObjectCongestionControlMode::TotalGasBudget, - None, - ); + let shared_object_congestion_tracker = SharedObjectCongestionTracker::new( + [(object_id_0, 5), (object_id_1, 10)], + PerObjectCongestionControlMode::TotalGasBudget, + Some(0), // not part of this test + None, + None, + 0, + ); let shared_input_objects = construct_shared_input_objects(&[(object_id_0, false)]); assert_eq!( @@ -363,10 +461,13 @@ mod object_cost_tests { // 1 10 // object 0: | // object 1: | - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[(shared_obj_0, 10), (shared_obj_1, 1)], + SharedObjectCongestionTracker::new( + [(shared_obj_0, 10), (shared_obj_1, 1)], mode, + Some(max_accumulated_txn_cost_per_object_in_commit), + None, None, + 0, ) } PerObjectCongestionControlMode::TotalTxCount => { @@ -374,10 +475,13 @@ mod object_cost_tests { // 1 2 // object 0: | // object 1: | - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[(shared_obj_0, 2), (shared_obj_1, 1)], + SharedObjectCongestionTracker::new( + [(shared_obj_0, 2), (shared_obj_1, 1)], mode, + Some(max_accumulated_txn_cost_per_object_in_commit), + None, None, + 0, ) } PerObjectCongestionControlMode::TotalGasBudgetWithCap => { @@ -385,10 +489,13 @@ mod object_cost_tests { // 1 10 // object 0: | // object 1: | - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[(shared_obj_0, 10), (shared_obj_1, 1)], + SharedObjectCongestionTracker::new( + [(shared_obj_0, 10), (shared_obj_1, 1)], mode, + Some(max_accumulated_txn_cost_per_object_in_commit), Some(45), // Make the cap just less than the gas budget, there are 1 objects in tx. + None, + 0, ) } }; @@ -397,12 +504,7 @@ mod object_cost_tests { for mutable in [true, false].iter() { let tx = build_transaction(&[(shared_obj_0, *mutable)], tx_gas_budget); if let Some((_, congested_objects)) = shared_object_congestion_tracker - .should_defer_due_to_object_congestion( - &tx, - max_accumulated_txn_cost_per_object_in_commit, - &HashMap::new(), - 0, - ) + .should_defer_due_to_object_congestion(&tx, &HashMap::new(), 0) { assert_eq!(congested_objects.len(), 1); assert_eq!(congested_objects[0], shared_obj_0); @@ -417,12 +519,7 @@ mod object_cost_tests { for mutable in [true, false].iter() { let tx = build_transaction(&[(shared_obj_1, *mutable)], tx_gas_budget); assert!(shared_object_congestion_tracker - .should_defer_due_to_object_congestion( - &tx, - max_accumulated_txn_cost_per_object_in_commit, - &HashMap::new(), - 0, - ) + .should_defer_due_to_object_congestion(&tx, &HashMap::new(), 0,) .is_none()); } @@ -434,12 +531,7 @@ mod object_cost_tests { tx_gas_budget, ); if let Some((_, congested_objects)) = shared_object_congestion_tracker - .should_defer_due_to_object_congestion( - &tx, - max_accumulated_txn_cost_per_object_in_commit, - &HashMap::new(), - 0, - ) + .should_defer_due_to_object_congestion(&tx, &HashMap::new(), 0) { assert_eq!(congested_objects.len(), 1); assert_eq!(congested_objects[0], shared_obj_0); @@ -461,9 +553,15 @@ mod object_cost_tests { ) { let shared_obj_0 = ObjectID::random(); let tx = build_transaction(&[(shared_obj_0, true)], 100); - // Make should_defer_due_to_object_congestion always defer transactions. - let max_accumulated_txn_cost_per_object_in_commit = 0; - let shared_object_congestion_tracker = SharedObjectCongestionTracker::new(mode, Some(2)); + + let shared_object_congestion_tracker = SharedObjectCongestionTracker::new( + [], + mode, + Some(0), // Make should_defer_due_to_object_congestion always defer transactions. + Some(2), + None, + 0, + ); // Insert a random pre-existing transaction. let mut previously_deferred_tx_digests = HashMap::new(); @@ -484,7 +582,6 @@ mod object_cost_tests { _, )) = shared_object_congestion_tracker.should_defer_due_to_object_congestion( &tx, - max_accumulated_txn_cost_per_object_in_commit, &previously_deferred_tx_digests, 10, ) { @@ -511,7 +608,6 @@ mod object_cost_tests { _, )) = shared_object_congestion_tracker.should_defer_due_to_object_congestion( &tx, - max_accumulated_txn_cost_per_object_in_commit, &previously_deferred_tx_digests, 10, ) { @@ -539,7 +635,6 @@ mod object_cost_tests { _, )) = shared_object_congestion_tracker.should_defer_due_to_object_congestion( &tx, - max_accumulated_txn_cost_per_object_in_commit, &previously_deferred_tx_digests, 10, ) { @@ -550,6 +645,118 @@ mod object_cost_tests { } } + #[rstest] + fn test_should_defer_allow_overage( + #[values( + PerObjectCongestionControlMode::TotalGasBudget, + PerObjectCongestionControlMode::TotalTxCount, + PerObjectCongestionControlMode::TotalGasBudgetWithCap + )] + mode: PerObjectCongestionControlMode, + ) { + telemetry_subscribers::init_for_testing(); + + // Creates two shared objects and three transactions that operate on these objects. + let shared_obj_0 = ObjectID::random(); + let shared_obj_1 = ObjectID::random(); + + let tx_gas_budget = 100; + + // Set max_accumulated_txn_cost_per_object_in_commit to only allow 1 transaction to go through + // before overage occurs. + let max_accumulated_txn_cost_per_object_in_commit = match mode { + PerObjectCongestionControlMode::None => unreachable!(), + PerObjectCongestionControlMode::TotalGasBudget => tx_gas_budget + 1, + PerObjectCongestionControlMode::TotalTxCount => 2, + PerObjectCongestionControlMode::TotalGasBudgetWithCap => tx_gas_budget - 1, + }; + + let shared_object_congestion_tracker = match mode { + PerObjectCongestionControlMode::None => unreachable!(), + PerObjectCongestionControlMode::TotalGasBudget => { + // Construct object execution cost as following + // 90 102 + // object 0: | + // object 1: | + SharedObjectCongestionTracker::new( + [(shared_obj_0, 102), (shared_obj_1, 90)], + mode, + Some(max_accumulated_txn_cost_per_object_in_commit), + None, + None, + max_accumulated_txn_cost_per_object_in_commit * 10, + ) + } + PerObjectCongestionControlMode::TotalTxCount => { + // Construct object execution cost as following + // 2 3 + // object 0: | + // object 1: | + SharedObjectCongestionTracker::new( + [(shared_obj_0, 3), (shared_obj_1, 2)], + mode, + Some(max_accumulated_txn_cost_per_object_in_commit), + None, + None, + max_accumulated_txn_cost_per_object_in_commit * 10, + ) + } + PerObjectCongestionControlMode::TotalGasBudgetWithCap => { + // Construct object execution cost as following + // 90 100 + // object 0: | + // object 1: | + SharedObjectCongestionTracker::new( + [(shared_obj_0, 100), (shared_obj_1, 90)], + mode, + Some(max_accumulated_txn_cost_per_object_in_commit), + Some(45), // Make the cap just less than the gas budget, there are 1 objects in tx. + None, + max_accumulated_txn_cost_per_object_in_commit * 10, + ) + } + }; + + // Read/write to object 0 should be deferred. + for mutable in [true, false].iter() { + let tx = build_transaction(&[(shared_obj_0, *mutable)], tx_gas_budget); + if let Some((_, congested_objects)) = shared_object_congestion_tracker + .should_defer_due_to_object_congestion(&tx, &HashMap::new(), 0) + { + assert_eq!(congested_objects.len(), 1); + assert_eq!(congested_objects[0], shared_obj_0); + } else { + panic!("should defer"); + } + } + + // Read/write to object 1 should go through even though the budget is exceeded. + for mutable in [true, false].iter() { + let tx = build_transaction(&[(shared_obj_1, *mutable)], tx_gas_budget); + assert!(shared_object_congestion_tracker + .should_defer_due_to_object_congestion(&tx, &HashMap::new(), 0,) + .is_none()); + } + + // Transactions touching both objects should be deferred, with object 0 as the congested object. + for mutable_0 in [true, false].iter() { + for mutable_1 in [true, false].iter() { + let tx = build_transaction( + &[(shared_obj_0, *mutable_0), (shared_obj_1, *mutable_1)], + tx_gas_budget, + ); + if let Some((_, congested_objects)) = shared_object_congestion_tracker + .should_defer_due_to_object_congestion(&tx, &HashMap::new(), 0) + { + assert_eq!(congested_objects.len(), 1); + assert_eq!(congested_objects[0], shared_obj_0); + } else { + panic!("should defer"); + } + } + } + } + #[rstest] fn test_bump_object_execution_cost( #[values( @@ -565,12 +772,14 @@ mod object_cost_tests { let cap_factor = Some(1); - let mut shared_object_congestion_tracker = - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[(object_id_0, 5), (object_id_1, 10)], - mode, - cap_factor, - ); + let mut shared_object_congestion_tracker = SharedObjectCongestionTracker::new( + [(object_id_0, 5), (object_id_1, 10)], + mode, + Some(0), // not part of this test + cap_factor, + None, + 0, + ); assert_eq!(shared_object_congestion_tracker.max_cost(), 10); // Read two objects should not change the object execution cost. @@ -578,10 +787,13 @@ mod object_cost_tests { shared_object_congestion_tracker.bump_object_execution_cost(&cert); assert_eq!( shared_object_congestion_tracker, - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[(object_id_0, 5), (object_id_1, 10)], + SharedObjectCongestionTracker::new( + [(object_id_0, 5), (object_id_1, 10)], mode, + Some(0), // not part of this test cap_factor, + None, + 0, ) ); assert_eq!(shared_object_congestion_tracker.max_cost(), 10); @@ -597,10 +809,13 @@ mod object_cost_tests { }; assert_eq!( shared_object_congestion_tracker, - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[(object_id_0, expected_object_0_cost), (object_id_1, 10)], + SharedObjectCongestionTracker::new( + [(object_id_0, expected_object_0_cost), (object_id_1, 10)], mode, + Some(0), // not part of this test cap_factor, + None, + 0, ) ); assert_eq!( @@ -626,14 +841,17 @@ mod object_cost_tests { shared_object_congestion_tracker.bump_object_execution_cost(&cert); assert_eq!( shared_object_congestion_tracker, - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[ + SharedObjectCongestionTracker::new( + [ (object_id_0, expected_object_cost), (object_id_1, expected_object_cost), (object_id_2, expected_object_cost) ], mode, + Some(0), // not part of this test cap_factor, + None, + 0, ) ); assert_eq!( @@ -660,14 +878,17 @@ mod object_cost_tests { shared_object_congestion_tracker.bump_object_execution_cost(&cert); assert_eq!( shared_object_congestion_tracker, - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[ + SharedObjectCongestionTracker::new( + [ (object_id_0, expected_object_cost), (object_id_1, expected_object_cost), (object_id_2, expected_object_cost) ], mode, + Some(0), // not part of this test cap_factor, + None, + 0, ) ); assert_eq!( @@ -675,4 +896,152 @@ mod object_cost_tests { expected_object_cost ); } + + #[rstest] + fn test_accumulated_debts( + #[values( + PerObjectCongestionControlMode::TotalGasBudget, + PerObjectCongestionControlMode::TotalTxCount, + PerObjectCongestionControlMode::TotalGasBudgetWithCap + )] + mode: PerObjectCongestionControlMode, + ) { + telemetry_subscribers::init_for_testing(); + + // Creates two shared objects and three transactions that operate on these objects. + let shared_obj_0 = ObjectID::random(); + let shared_obj_1 = ObjectID::random(); + + let tx_gas_budget = 100; + + // Set max_accumulated_txn_cost_per_object_in_commit to only allow 1 transaction to go through + // before overage occurs. + let max_accumulated_txn_cost_per_object_in_commit = match mode { + PerObjectCongestionControlMode::None => unreachable!(), + PerObjectCongestionControlMode::TotalGasBudget + | PerObjectCongestionControlMode::TotalGasBudgetWithCap => 90, + PerObjectCongestionControlMode::TotalTxCount => 2, + }; + + let mut shared_object_congestion_tracker = match mode { + PerObjectCongestionControlMode::None => unreachable!(), + PerObjectCongestionControlMode::TotalGasBudget => { + // Starting with two objects with accumulated cost 80. + SharedObjectCongestionTracker::new( + [(shared_obj_0, 80), (shared_obj_1, 80)], + mode, + Some(max_accumulated_txn_cost_per_object_in_commit), + None, + None, + max_accumulated_txn_cost_per_object_in_commit * 10, + ) + } + PerObjectCongestionControlMode::TotalGasBudgetWithCap => { + // Starting with two objects with accumulated cost 80. + SharedObjectCongestionTracker::new( + [(shared_obj_0, 80), (shared_obj_1, 80)], + mode, + Some(max_accumulated_txn_cost_per_object_in_commit), + Some(45), + None, + max_accumulated_txn_cost_per_object_in_commit * 10, + ) + } + PerObjectCongestionControlMode::TotalTxCount => { + // Starting with two objects with accumulated tx count 2. + SharedObjectCongestionTracker::new( + [(shared_obj_0, 2), (shared_obj_1, 2)], + mode, + Some(max_accumulated_txn_cost_per_object_in_commit), + None, + None, + max_accumulated_txn_cost_per_object_in_commit * 10, + ) + } + }; + + // Simulate a tx on object 0 that exceeds the budget. + for mutable in [true, false].iter() { + let tx = build_transaction(&[(shared_obj_0, *mutable)], tx_gas_budget); + shared_object_congestion_tracker.bump_object_execution_cost(&tx); + } + + // Verify that accumulated_debts reports the debt for object 0. + let accumulated_debts = shared_object_congestion_tracker.accumulated_debts(); + assert_eq!(accumulated_debts.len(), 1); + match mode { + PerObjectCongestionControlMode::None => unreachable!(), + PerObjectCongestionControlMode::TotalGasBudget => { + assert_eq!(accumulated_debts[0], (shared_obj_0, 90)); // init 80 + cost 100 - budget 90 = 90 + } + PerObjectCongestionControlMode::TotalGasBudgetWithCap => { + assert_eq!(accumulated_debts[0], (shared_obj_0, 80)); // init 80 + capped cost 90 - budget 90 = 80 + } + PerObjectCongestionControlMode::TotalTxCount => { + assert_eq!(accumulated_debts[0], (shared_obj_0, 1)); // init 2 + 1 tx - budget 2 = 1 + } + } + } + + #[test] + fn test_accumulated_debts_empty() { + let object_id_0 = ObjectID::random(); + let object_id_1 = ObjectID::random(); + let object_id_2 = ObjectID::random(); + + let shared_object_congestion_tracker = SharedObjectCongestionTracker::new( + [(object_id_0, 5), (object_id_1, 10), (object_id_2, 100)], + PerObjectCongestionControlMode::TotalGasBudget, + Some(100), + None, + None, + 0, + ); + + let accumulated_debts = shared_object_congestion_tracker.accumulated_debts(); + assert!(accumulated_debts.is_empty()); + } + + #[test] + fn test_tx_cost_absolute_cap() { + let object_id_0 = ObjectID::random(); + let object_id_1 = ObjectID::random(); + let object_id_2 = ObjectID::random(); + + let tx_gas_budget = 2000; + + let mut shared_object_congestion_tracker = SharedObjectCongestionTracker::new( + [(object_id_0, 5), (object_id_1, 10), (object_id_2, 100)], + PerObjectCongestionControlMode::TotalGasBudgetWithCap, + Some(100), + Some(1000), + Some(2), + 1000, + ); + + // Create a transaction using all three objects + let tx = build_transaction( + &[ + (object_id_0, false), + (object_id_1, false), + (object_id_2, true), + ], + tx_gas_budget, + ); + + // Verify that the transaction is allowed to execute. + // 2000 gas budget would exceed overage limit of 1000 but is capped to 200 by the absolute cap. + assert!(shared_object_congestion_tracker + .should_defer_due_to_object_congestion(&tx, &HashMap::new(), 0) + .is_none()); + + // Verify max cost after bumping is limited by the absolute cap. + shared_object_congestion_tracker.bump_object_execution_cost(&tx); + assert_eq!(300, shared_object_congestion_tracker.max_cost()); + + // Verify accumulated debts still uses the per-commit budget to decrement. + let accumulated_debts = shared_object_congestion_tracker.accumulated_debts(); + assert_eq!(accumulated_debts.len(), 1); + assert_eq!(accumulated_debts[0], (object_id_2, 200)); + } } diff --git a/crates/sui-core/src/authority_client.rs b/crates/sui-core/src/authority_client.rs index 78461b0656476..9537dec300c71 100644 --- a/crates/sui-core/src/authority_client.rs +++ b/crates/sui-core/src/authority_client.rs @@ -11,6 +11,7 @@ use std::time::Duration; use sui_network::{api::ValidatorClient, tonic}; use sui_types::base_types::AuthorityName; use sui_types::committee::CommitteeWithNetworkMetadata; +use sui_types::crypto::NetworkPublicKey; use sui_types::messages_checkpoint::{ CheckpointRequest, CheckpointRequestV2, CheckpointResponse, CheckpointResponseV2, }; @@ -97,15 +98,32 @@ pub struct NetworkAuthorityClient { } impl NetworkAuthorityClient { - pub async fn connect(address: &Multiaddr) -> anyhow::Result { - let channel = mysten_network::client::connect(address) + pub async fn connect( + address: &Multiaddr, + tls_target: Option, + ) -> anyhow::Result { + let tls_config = tls_target.map(|tls_target| { + sui_tls::create_rustls_client_config( + tls_target, + sui_tls::SUI_VALIDATOR_SERVER_NAME.to_string(), + None, + ) + }); + let channel = mysten_network::client::connect(address, tls_config) .await .map_err(|err| anyhow!(err.to_string()))?; Ok(Self::new(channel)) } - pub fn connect_lazy(address: &Multiaddr) -> Self { - let client: SuiResult<_> = mysten_network::client::connect_lazy(address) + pub fn connect_lazy(address: &Multiaddr, tls_target: Option) -> Self { + let tls_config = tls_target.map(|tls_target| { + sui_tls::create_rustls_client_config( + tls_target, + sui_tls::SUI_VALIDATOR_SERVER_NAME.to_string(), + None, + ) + }); + let client: SuiResult<_> = mysten_network::client::connect_lazy(address, tls_config) .map(ValidatorClient::new) .map_err(|err| err.to_string().into()); Self { client } @@ -265,7 +283,16 @@ pub fn make_network_authority_clients_with_network_config( for (name, (_state, network_metadata)) in committee.validators() { let address = network_metadata.network_address.clone(); let address = address.rewrite_udp_to_tcp(); - let maybe_channel = network_config.connect_lazy(&address).map_err(|e| { + // TODO: Enable TLS on this interface with below config, once support is rolled out to validators. + // let tls_config = network_metadata.network_public_key.as_ref().map(|key| { + // sui_tls::create_rustls_client_config( + // key.clone(), + // sui_tls::SUI_VALIDATOR_SERVER_NAME.to_string(), + // None, + // ) + // }); + // TODO: Change below code to generate a SuiError if no valid TLS config is available. + let maybe_channel = network_config.connect_lazy(&address, None).map_err(|e| { tracing::error!( address = %address, name = %name, diff --git a/crates/sui-core/src/authority_server.rs b/crates/sui-core/src/authority_server.rs index a5ece01381f9b..0e9aeb30ee1ef 100644 --- a/crates/sui-core/src/authority_server.rs +++ b/crates/sui-core/src/authority_server.rs @@ -4,7 +4,9 @@ use anyhow::Result; use async_trait::async_trait; +use fastcrypto::traits::KeyPair; use mysten_metrics::spawn_monitored_task; +use mysten_network::server::SUI_TLS_SERVER_NAME; use prometheus::{ register_histogram_with_registry, register_int_counter_vec_with_registry, register_int_counter_with_registry, Histogram, IntCounter, IntCounterVec, Registry, @@ -149,6 +151,11 @@ impl AuthorityServer { self, address: Multiaddr, ) -> Result { + let tls_config = sui_tls::create_rustls_server_config( + self.state.config.network_key_pair().copy().private(), + SUI_TLS_SERVER_NAME.to_string(), + sui_tls::AllowAll, + ); let mut server = mysten_network::config::Config::new() .server_builder() .add_service(ValidatorServer::new(ValidatorService::new_for_tests( @@ -156,7 +163,7 @@ impl AuthorityServer { self.consensus_adapter, self.metrics, ))) - .bind(&address) + .bind(&address, Some(tls_config)) .await .unwrap(); let local_addr = server.local_addr().to_owned(); @@ -828,7 +835,7 @@ impl ValidatorService { .await? } ConsensusTransactionKind::UserTransaction(tx) => { - self.state.await_transaction_effects(*tx.digest()).await? + self.state.await_transaction_effects(*tx.digest(), epoch_store).await? } _ => panic!("`handle_submit_to_consensus` received transaction that is not a CertifiedTransaction or UserTransaction"), }; diff --git a/crates/sui-core/src/checkpoints/mod.rs b/crates/sui-core/src/checkpoints/mod.rs index d9aed27ff48f3..77694b0cc9a6f 100644 --- a/crates/sui-core/src/checkpoints/mod.rs +++ b/crates/sui-core/src/checkpoints/mod.rs @@ -18,10 +18,8 @@ use crate::execution_cache::TransactionCacheRead; use crate::stake_aggregator::{InsertResult, MultiStakeAggregator}; use crate::state_accumulator::StateAccumulator; use diffy::create_patch; -use futures::future::{select, Either}; -use futures::FutureExt; use itertools::Itertools; -use mysten_metrics::{monitored_scope, spawn_monitored_task, MonitoredFutureExt}; +use mysten_metrics::{monitored_future, monitored_scope, MonitoredFutureExt}; use parking_lot::Mutex; use serde::{Deserialize, Serialize}; use sui_macros::fail_point; @@ -63,10 +61,7 @@ use sui_types::messages_consensus::ConsensusTransactionKey; use sui_types::signature::GenericSignature; use sui_types::sui_system_state::{SuiSystemState, SuiSystemStateTrait}; use sui_types::transaction::{TransactionDataAPI, TransactionKey, TransactionKind}; -use tokio::{ - sync::{watch, Notify}, - time::timeout, -}; +use tokio::{sync::Notify, task::JoinSet, time::timeout}; use tracing::{debug, error, info, instrument, warn}; use typed_store::traits::{TableSummary, TypedStoreDebug}; use typed_store::DBMapUtils; @@ -862,7 +857,6 @@ pub struct CheckpointBuilder { effects_store: Arc, accumulator: Weak, output: Box, - exit: watch::Receiver<()>, metrics: Arc, max_transactions_per_checkpoint: usize, max_checkpoint_size_bytes: usize, @@ -872,7 +866,6 @@ pub struct CheckpointAggregator { tables: Arc, epoch_store: Arc, notify: Arc, - exit: watch::Receiver<()>, current: Option, output: Box, state: Arc, @@ -900,7 +893,6 @@ impl CheckpointBuilder { effects_store: Arc, accumulator: Weak, output: Box, - exit: watch::Receiver<()>, notify_aggregator: Arc, metrics: Arc, max_transactions_per_checkpoint: usize, @@ -914,7 +906,6 @@ impl CheckpointBuilder { effects_store, accumulator, output, - exit, notify_aggregator, metrics, max_transactions_per_checkpoint, @@ -925,26 +916,10 @@ impl CheckpointBuilder { async fn run(mut self) { info!("Starting CheckpointBuilder"); loop { - // Check whether an exit signal has been received, if so we break the loop. - // This gives us a chance to exit, in case checkpoint making keeps failing. - match self.exit.has_changed() { - Ok(true) | Err(_) => { - break; - } - Ok(false) => (), - }; - self.maybe_build_checkpoints().await; - match select(self.exit.changed().boxed(), self.notify.notified().boxed()).await { - Either::Left(_) => { - // break loop on exit signal - break; - } - Either::Right(_) => {} - } + self.notify.notified().await; } - info!("Shutting down CheckpointBuilder"); } async fn maybe_build_checkpoints(&mut self) { @@ -1768,7 +1743,6 @@ impl CheckpointAggregator { tables: Arc, epoch_store: Arc, notify: Arc, - exit: watch::Receiver<()>, output: Box, state: Arc, metrics: Arc, @@ -1778,7 +1752,6 @@ impl CheckpointAggregator { tables, epoch_store, notify, - exit, current, output, state, @@ -1799,19 +1772,7 @@ impl CheckpointAggregator { continue; } - match select( - self.exit.changed().boxed(), - timeout(Duration::from_secs(1), self.notify.notified()).boxed(), - ) - .await - { - Either::Left(_) => { - // return on exit signal - info!("Shutting down CheckpointAggregator"); - return; - } - Either::Right(_) => {} - } + let _ = timeout(Duration::from_secs(1), self.notify.notified()).await; } } @@ -2240,14 +2201,14 @@ impl CheckpointService { metrics: Arc, max_transactions_per_checkpoint: usize, max_checkpoint_size_bytes: usize, - ) -> (Arc, watch::Sender<()> /* The exit sender */) { + ) -> (Arc, JoinSet<()> /* Handle to tasks */) { info!( "Starting checkpoint service with {max_transactions_per_checkpoint} max_transactions_per_checkpoint and {max_checkpoint_size_bytes} max_checkpoint_size_bytes" ); let notify_builder = Arc::new(Notify::new()); let notify_aggregator = Arc::new(Notify::new()); - let (exit_snd, exit_rcv) = watch::channel(()); + let mut tasks = JoinSet::new(); let builder = CheckpointBuilder::new( state.clone(), @@ -2257,27 +2218,22 @@ impl CheckpointService { effects_store, accumulator, checkpoint_output, - exit_rcv.clone(), notify_aggregator.clone(), metrics.clone(), max_transactions_per_checkpoint, max_checkpoint_size_bytes, ); - - let epoch_store_clone = epoch_store.clone(); - spawn_monitored_task!(epoch_store_clone.within_alive_epoch(builder.run())); + tasks.spawn(monitored_future!(builder.run())); let aggregator = CheckpointAggregator::new( checkpoint_store.clone(), epoch_store.clone(), notify_aggregator.clone(), - exit_rcv, certified_checkpoint_output, state.clone(), metrics.clone(), ); - - spawn_monitored_task!(aggregator.run()); + tasks.spawn(monitored_future!(aggregator.run())); let last_signature_index = epoch_store .get_last_checkpoint_signature_index() @@ -2291,7 +2247,8 @@ impl CheckpointService { last_signature_index, metrics, }); - (service, exit_snd) + + (service, tasks) } #[cfg(test)] @@ -2302,7 +2259,7 @@ impl CheckpointService { ) -> SuiResult { use crate::authority::authority_per_epoch_store::ConsensusCommitOutput; - let mut output = ConsensusCommitOutput::new(); + let mut output = ConsensusCommitOutput::new(0); epoch_store.write_pending_checkpoint(&mut output, &checkpoint)?; let mut batch = epoch_store.db_batch_for_test(); output.write_to_batch(epoch_store, &mut batch)?; @@ -2404,6 +2361,7 @@ mod tests { use super::*; use crate::authority::test_authority_builder::TestAuthorityBuilder; use futures::future::BoxFuture; + use futures::FutureExt as _; use shared_crypto::intent::{Intent, IntentScope}; use std::collections::{BTreeMap, HashMap}; use std::ops::Deref; @@ -2535,7 +2493,7 @@ mod tests { &epoch_store, )); - let (checkpoint_service, _exit) = CheckpointService::spawn( + let (checkpoint_service, _tasks) = CheckpointService::spawn( state.clone(), checkpoint_store, epoch_store.clone(), diff --git a/crates/sui-core/src/consensus_handler.rs b/crates/sui-core/src/consensus_handler.rs index d00b8273f1d02..45e1a269a7876 100644 --- a/crates/sui-core/src/consensus_handler.rs +++ b/crates/sui-core/src/consensus_handler.rs @@ -45,9 +45,7 @@ use crate::{ }, checkpoints::{CheckpointService, CheckpointServiceNotify}, consensus_throughput_calculator::ConsensusThroughputCalculator, - consensus_types::consensus_output_api::{ - parse_block_transactions, ConsensusCommitAPI, ParsedTransaction, - }, + consensus_types::consensus_output_api::{parse_block_transactions, ConsensusCommitAPI}, execution_cache::ObjectCacheRead, scoring_decision::update_low_scoring_authorities, transaction_manager::TransactionManager, @@ -192,7 +190,7 @@ impl ConsensusHandler { impl ConsensusHandler { #[instrument(level = "debug", skip_all)] async fn handle_consensus_commit(&mut self, consensus_commit: impl ConsensusCommitAPI) { - let _scope = monitored_scope("HandleConsensusOutput"); + let _scope = monitored_scope("ConsensusCommitHandler::handle_consensus_commit"); let last_committed_round = self.last_consensus_stats.index.last_committed_round; @@ -295,7 +293,7 @@ impl ConsensusHandler { .inc(); { - let span = trace_span!("process_consensus_certs"); + let span = trace_span!("ConsensusHandler::HandleCommit::process_consensus_txns"); let _guard = span.enter(); for (authority_index, parsed_transactions) in consensus_commit.transactions() { // TODO: consider only messages within 1~3 rounds of the leader? @@ -497,14 +495,8 @@ impl MysticetiConsensusHandler { tasks.spawn(monitored_future!(async move { while let Some(blocks_and_rejected_transactions) = transaction_receiver.recv().await { - let parsed_transactions = blocks_and_rejected_transactions - .into_iter() - .flat_map(|(block, rejected_transactions)| { - parse_block_transactions(&block, &rejected_transactions) - }) - .collect::>(); consensus_transaction_handler - .handle_consensus_transactions(parsed_transactions) + .handle_consensus_transactions(blocks_and_rejected_transactions) .await; } })); @@ -886,7 +878,18 @@ impl ConsensusTransactionHandler { self.enabled } - pub async fn handle_consensus_transactions(&self, parsed_transactions: Vec) { + pub async fn handle_consensus_transactions( + &self, + blocks_and_rejected_transactions: Vec<(VerifiedBlock, Vec)>, + ) { + let _scope = monitored_scope("ConsensusTransactionHandler::handle_consensus_transactions"); + + let parsed_transactions = blocks_and_rejected_transactions + .into_iter() + .flat_map(|(block, rejected_transactions)| { + parse_block_transactions(&block, &rejected_transactions) + }) + .collect::>(); let mut pending_consensus_transactions = vec![]; let executable_transactions: Vec<_> = parsed_transactions .into_iter() @@ -911,13 +914,10 @@ impl ConsensusTransactionHandler { return None; } pending_consensus_transactions.push(parsed.transaction.clone()); - let tx = VerifiedTransaction::new_from_verified(*tx.clone()); + let tx = VerifiedTransaction::new_unchecked(*tx.clone()); Some(VerifiedExecutableTransaction::new_from_consensus( tx, self.epoch_store.epoch(), - parsed.round, - parsed.authority, - parsed.transaction_index, )) } _ => None, @@ -984,7 +984,7 @@ mod tests { }, checkpoints::CheckpointServiceNoop, consensus_adapter::consensus_tests::{ - test_certificates, test_gas_objects, test_user_transaction, + test_certificates_with_gas_objects, test_user_transaction, }, post_consensus_tx_reorder::PostConsensusTxReorder, }; @@ -992,13 +992,27 @@ mod tests { #[tokio::test] pub async fn test_consensus_commit_handler() { // GIVEN - let mut objects = test_gas_objects(); - let shared_object = Object::shared_for_testing(); - objects.push(shared_object.clone()); + // 1 account keypair + let (sender, keypair) = deterministic_random_account_key(); + // 12 gas objects. + let gas_objects: Vec = (0..12) + .map(|_| Object::with_id_owner_for_testing(ObjectID::random(), sender)) + .collect(); + // 4 owned objects. + let owned_objects: Vec = (0..4) + .map(|_| Object::with_id_owner_for_testing(ObjectID::random(), sender)) + .collect(); + // 6 shared objects. + let shared_objects: Vec = (0..6) + .map(|_| Object::shared_for_testing()) + .collect::>(); + let mut all_objects = gas_objects.clone(); + all_objects.extend(owned_objects.clone()); + all_objects.extend(shared_objects.clone()); let network_config = sui_swarm_config::network_config_builder::ConfigBuilder::new_with_temp_dir() - .with_objects(objects.clone()) + .with_objects(all_objects.clone()) .build(); let state = TestAuthorityBuilder::new() @@ -1025,18 +1039,58 @@ mod tests { Arc::new(throughput_calculator), ); - // AND - // Create test transactions - let transactions = test_certificates(&state, shared_object).await; - let mut blocks = Vec::new(); + // AND create test user transactions alternating between owned and shared input. + let mut user_transactions = vec![]; + for (i, gas_object) in gas_objects[0..8].iter().enumerate() { + let input_object = if i % 2 == 0 { + owned_objects.get(i / 2).unwrap().clone() + } else { + shared_objects.get(i / 2).unwrap().clone() + }; + let transaction = test_user_transaction( + &state, + sender, + &keypair, + gas_object.clone(), + vec![input_object], + ) + .await; + user_transactions.push(transaction); + } - for (i, transaction) in transactions.iter().enumerate() { - let transaction_bytes: Vec = bcs::to_bytes( - &ConsensusTransaction::new_certificate_message(&state.name, transaction.clone()), + // AND create 4 certified transactions with remaining gas objects and 2 shared objects. + // Having more txns on the same shared object may get deferred. + let certified_transactions = [ + test_certificates_with_gas_objects( + &state, + &gas_objects[8..10], + shared_objects[4].clone(), + ) + .await, + test_certificates_with_gas_objects( + &state, + &gas_objects[10..12], + shared_objects[5].clone(), ) - .unwrap(); + .await, + ] + .concat(); - // AND create block for each transaction + // AND create block for each user and certified transaction + let mut blocks = Vec::new(); + for (i, consensus_transaction) in user_transactions + .iter() + .map(|t| { + ConsensusTransaction::new_user_transaction_message(&state.name, t.inner().clone()) + }) + .chain( + certified_transactions + .iter() + .map(|t| ConsensusTransaction::new_certificate_message(&state.name, t.clone())), + ) + .enumerate() + { + let transaction_bytes = bcs::to_bytes(&consensus_transaction).unwrap(); let block = VerifiedBlock::new_for_test( TestBlock::new(100 + i as u32, (i % consensus_committee.size()) as u32) .set_transactions(vec![Transaction::new(transaction_bytes)]) @@ -1046,7 +1100,7 @@ mod tests { blocks.push(block); } - // AND create the consensus output + // AND create the consensus commit let leader_block = blocks[0].clone(); let committed_sub_dag = CommittedSubDag::new( leader_block.reference(), @@ -1057,14 +1111,14 @@ mod tests { vec![], ); - // AND processing the consensus output once + // AND process the consensus commit once consensus_handler .handle_consensus_commit(committed_sub_dag.clone()) .await; - // AND capturing the consensus stats + // THEN check the consensus stats let num_blocks = blocks.len(); - let num_transactions = transactions.len(); + let num_transactions = user_transactions.len() + certified_transactions.len(); let last_consensus_stats_1 = consensus_handler.last_consensus_stats.clone(); assert_eq!( last_consensus_stats_1.index.transaction_index, @@ -1082,6 +1136,39 @@ mod tests { num_transactions as u64 ); + // THEN check for execution status of user transactions. + for (i, t) in user_transactions.iter().enumerate() { + let digest = t.digest(); + if let Ok(Ok(_)) = tokio::time::timeout( + std::time::Duration::from_secs(10), + state.notify_read_effects(*digest), + ) + .await + { + // Effects exist as expected. + } else { + panic!("User transaction {} {} did not execute", i, digest); + } + } + + // THEN check for execution status of certified transactions. + for (i, t) in certified_transactions.iter().enumerate() { + let digest = t.digest(); + if let Ok(Ok(_)) = tokio::time::timeout( + std::time::Duration::from_secs(10), + state.notify_read_effects(*digest), + ) + .await + { + // Effects exist as expected. + } else { + panic!("Certified transaction {} {} did not execute", i, digest); + } + } + + // THEN check for no inflight or suspended transactions. + state.transaction_manager().check_empty_for_testing(); + // WHEN processing the same output multiple times // THEN the consensus stats do not update for _ in 0..2 { @@ -1178,10 +1265,10 @@ mod tests { // AND process the transactions from consensus output. transaction_handler - .handle_consensus_transactions(parse_block_transactions(&block, &rejected_transactions)) + .handle_consensus_transactions(vec![(block.clone(), rejected_transactions.clone())]) .await; - // THEN check for execution status of transactions. + // THEN check for status of transactions that should have been executed. for (i, t) in transactions.iter().enumerate() { // Do not expect shared transactions or rejected transactions to be executed. if i % 2 == 1 || rejected_transactions.contains(&(i as TransactionIndex)) { diff --git a/crates/sui-core/src/consensus_types/consensus_output_api.rs b/crates/sui-core/src/consensus_types/consensus_output_api.rs index e0c2eeebc16e4..c6d7d7c662599 100644 --- a/crates/sui-core/src/consensus_types/consensus_output_api.rs +++ b/crates/sui-core/src/consensus_types/consensus_output_api.rs @@ -6,7 +6,7 @@ use consensus_core::{BlockAPI, CommitDigest, TransactionIndex, VerifiedBlock}; use sui_protocol_config::ProtocolConfig; use sui_types::{ digests::ConsensusCommitDigest, - messages_consensus::{AuthorityIndex, ConsensusTransaction, Round}, + messages_consensus::{AuthorityIndex, ConsensusTransaction}, }; pub(crate) struct ParsedTransaction { @@ -16,12 +16,6 @@ pub(crate) struct ParsedTransaction { pub(crate) rejected: bool, // Bytes length of the serialized transaction pub(crate) serialized_len: usize, - // Consensus round of the block containing the transaction. - pub(crate) round: Round, - // Authority index of the block containing the transaction. - pub(crate) authority: AuthorityIndex, - // Transaction index in the block. - pub(crate) transaction_index: TransactionIndex, } pub(crate) trait ConsensusCommitAPI: Display { @@ -136,9 +130,6 @@ pub(crate) fn parse_block_transactions( transaction, rejected, serialized_len: tx.data().len(), - round, - authority, - transaction_index: index as TransactionIndex, } }) .collect() diff --git a/crates/sui-core/src/epoch/randomness.rs b/crates/sui-core/src/epoch/randomness.rs index 952ea5952d8f1..4c1ac20c73e62 100644 --- a/crates/sui-core/src/epoch/randomness.rs +++ b/crates/sui-core/src/epoch/randomness.rs @@ -896,7 +896,7 @@ mod tests { } } for i in 0..randomness_managers.len() { - let mut output = ConsensusCommitOutput::new(); + let mut output = ConsensusCommitOutput::new(0); for (j, dkg_message) in dkg_messages.iter().cloned().enumerate() { randomness_managers[i] .add_message(&epoch_stores[j].name, dkg_message) @@ -926,7 +926,7 @@ mod tests { } } for i in 0..randomness_managers.len() { - let mut output = ConsensusCommitOutput::new(); + let mut output = ConsensusCommitOutput::new(0); for (j, dkg_confirmation) in dkg_confirmations.iter().cloned().enumerate() { randomness_managers[i] .add_confirmation(&mut output, &epoch_stores[j].name, dkg_confirmation) @@ -1028,7 +1028,7 @@ mod tests { } } for i in 0..randomness_managers.len() { - let mut output = ConsensusCommitOutput::new(); + let mut output = ConsensusCommitOutput::new(0); for (j, dkg_message) in dkg_messages.iter().cloned().enumerate() { randomness_managers[i] .add_message(&epoch_stores[j].name, dkg_message) diff --git a/crates/sui-core/src/execution_cache.rs b/crates/sui-core/src/execution_cache.rs index 83cca46df4ccb..c26c57134568c 100644 --- a/crates/sui-core/src/execution_cache.rs +++ b/crates/sui-core/src/execution_cache.rs @@ -8,6 +8,7 @@ use crate::authority::epoch_start_configuration::EpochStartConfiguration; use crate::authority::AuthorityStore; use crate::state_accumulator::AccumulatorStore; use crate::transaction_outputs::TransactionOutputs; +use mysten_common::fatal; use sui_types::bridge::Bridge; use futures::{future::BoxFuture, FutureExt}; @@ -587,6 +588,13 @@ pub trait TransactionCacheRead: Send + Sync { digests: &'a [TransactionDigest], ) -> BoxFuture<'a, SuiResult>>; + /// Wait until the effects of the given transactions are available and return them. + /// WARNING: If calling this on a transaction that could be reverted, you must be + /// sure that this function cannot be called during reconfiguration. The best way to + /// do this is to wrap your future in EpochStore::within_alive_epoch. Holding an + /// ExecutionLockReadGuard would also prevent reconfig from happening while waiting, + /// but this is very dangerous, as it could prevent reconfiguration from ever + /// occurring! fn notify_read_executed_effects<'a>( &'a self, digests: &'a [TransactionDigest], @@ -597,7 +605,7 @@ pub trait TransactionCacheRead: Send + Sync { self.multi_get_effects(&digests).map(|effects| { effects .into_iter() - .map(|e| e.expect("digests must exist")) + .map(|e| e.unwrap_or_else(|| fatal!("digests must exist"))) .collect() }) } diff --git a/crates/sui-core/src/execution_cache/writeback_cache.rs b/crates/sui-core/src/execution_cache/writeback_cache.rs index 8431429b683db..5ff74be1c0182 100644 --- a/crates/sui-core/src/execution_cache/writeback_cache.rs +++ b/crates/sui-core/src/execution_cache/writeback_cache.rs @@ -463,15 +463,37 @@ impl WritebackCache { trace!(?object_id, ?version, ?object, "inserting object entry"); fail_point_async!("write_object_entry"); self.metrics.record_cache_write("object"); - self.dirty - .objects - .entry(*object_id) - .or_default() - .insert(version, object.clone()); + + // We must hold the lock for the object entry while inserting to the + // object_by_id_cache. Otherwise, a surprising bug can occur: + // + // 1. A thread executing TX1 can write object (O,1) to the dirty set and then pause. + // 2. TX2, which reads (O,1) can begin executing, because TransactionManager immediately + // schedules transactions if their inputs are available. It does not matter that TX1 + // hasn't finished executing yet. + // 3. TX2 can write (O,2) to both the dirty set and the object_by_id_cache. + // 4. The thread executing TX1 can resume and write (O,1) to the object_by_id_cache. + // + // Now, any subsequent attempt to get the latest version of O will return (O,1) instead of + // (O,2). + // + // This seems very unlikely, but it may be possible under the following circumstances: + // - While a thread is unlikely to pause for so long, moka cache uses optimistic + // lock-free algorithms that have retry loops. Possibly, under high contention, this + // code might spin for a surprisingly long time. + // - Additionally, many concurrent re-executions of the same tx could happen due to + // the tx finalizer, plus checkpoint executor, consensus, and RPCs from fullnodes. + let mut entry = self.dirty.objects.entry(*object_id).or_default(); + self.cached.object_by_id_cache.insert( *object_id, - Arc::new(Mutex::new(LatestObjectCacheEntry::Object(version, object))), + Arc::new(Mutex::new(LatestObjectCacheEntry::Object( + version, + object.clone(), + ))), ); + + entry.insert(version, object); } async fn write_marker_value( diff --git a/crates/sui-core/src/execution_driver.rs b/crates/sui-core/src/execution_driver.rs index ca433a8a4d383..e0bfa1523b736 100644 --- a/crates/sui-core/src/execution_driver.rs +++ b/crates/sui-core/src/execution_driver.rs @@ -12,6 +12,7 @@ use rand::{ Rng, SeedableRng, }; use sui_macros::fail_point_async; +use sui_protocol_config::Chain; use tokio::{ sync::{mpsc::UnboundedReceiver, oneshot, Semaphore}, time::sleep, @@ -44,6 +45,18 @@ pub async fn execution_process( let limit = Arc::new(Semaphore::new(num_cpus::get())); let mut rng = StdRng::from_rng(&mut OsRng).unwrap(); + let is_mainnet = { + let Some(state) = authority_state.upgrade() else { + info!("Authority state has shutdown. Exiting ..."); + return; + }; + + state + .get_chain_identifier() + .map(|chain_id| chain_id.chain()) + == Some(Chain::Mainnet) + }; + // Loop whenever there is a signal that a new transactions is ready to process. loop { let _scope = monitored_scope("ExecutionDriver::loop"); @@ -86,6 +99,16 @@ pub async fn execution_process( let digest = *certificate.digest(); trace!(?digest, "Pending certificate execution activated."); + if epoch_store.epoch() != certificate.epoch() { + info!( + ?digest, + cur_epoch = epoch_store.epoch(), + cert_epoch = certificate.epoch(), + "Ignoring certificate from previous epoch." + ); + continue; + } + let limit = limit.clone(); // hold semaphore permit until task completes. unwrap ok because we never close // the semaphore in this context. @@ -122,7 +145,9 @@ pub async fn execution_process( .try_execute_immediately(&certificate, expected_effects_digest, &epoch_store_clone) .await; if let Err(e) = res { - if attempts == EXECUTION_MAX_ATTEMPTS { + // Tighten this check everywhere except mainnet - if we don't see an increase in + // these crashes we will remove the retries. + if !is_mainnet || attempts == EXECUTION_MAX_ATTEMPTS { panic!("Failed to execute certified transaction {digest:?} after {attempts} attempts! error={e} certificate={certificate:?}"); } // Assume only transient failure can happen. Permanent failure is probably diff --git a/crates/sui-core/src/generate_format.rs b/crates/sui-core/src/generate_format.rs index 21d00d1a10da3..17d90318317a9 100644 --- a/crates/sui-core/src/generate_format.rs +++ b/crates/sui-core/src/generate_format.rs @@ -131,7 +131,7 @@ fn get_registry() -> Result { let sig2: GenericSignature = Signature::new_secure(&msg, &kp2).into(); let sig3: GenericSignature = Signature::new_secure(&msg, &kp3).into(); let sig4: GenericSignature = GenericSignature::from_str("BQNNMTczMTgwODkxMjU5NTI0MjE3MzYzNDIyNjM3MTc5MzI3MTk0Mzc3MTc4NDQyODI0MTAxODc5NTc5ODQ3NTE5Mzk5NDI4OTgyNTEyNTBNMTEzNzM5NjY2NDU0NjkxMjI1ODIwNzQwODIyOTU5ODUzODgyNTg4NDA2ODE2MTgyNjg1OTM5NzY2OTczMjU4OTIyODA5MTU2ODEyMDcBMQMCTDU5Mzk4NzExNDczNDg4MzQ5OTczNjE3MjAxMjIyMzg5ODAxNzcxNTIzMDMyNzQzMTEwNDcyNDk5MDU5NDIzODQ5MTU3Njg2OTA4OTVMNDUzMzU2ODI3MTEzNDc4NTI3ODczMTIzNDU3MDM2MTQ4MjY1MTk5Njc0MDc5MTg4ODI4NTg2NDk2Njg4NDAzMjcxNzA0OTgxMTcwOAJNMTA1NjQzODcyODUwNzE1NTU0Njk3NTM5OTA2NjE0MTA4NDAxMTg2MzU5MjU0NjY1OTcwMzcwMTgwNTg3NzAwNDEzNDc1MTg0NjEzNjhNMTI1OTczMjM1NDcyNzc1NzkxNDQ2OTg0OTYzNzIyNDI2MTUzNjgwODU4MDEzMTMzNDMxNTU3MzU1MTEzMzAwMDM4ODQ3Njc5NTc4NTQCATEBMANNMTU3OTE1ODk0NzI1NTY4MjYyNjMyMzE2NDQ3Mjg4NzMzMzc2MjkwMTUyNjk5ODQ2OTk0MDQwNzM2MjM2MDMzNTI1Mzc2Nzg4MTMxNzFMNDU0Nzg2NjQ5OTI0ODg4MTQ0OTY3NjE2MTE1ODAyNDc0ODA2MDQ4NTM3MzI1MDAyOTQyMzkwNDExMzAxNzQyMjUzOTAzNzE2MjUyNwExMXdpYVhOeklqb2lhSFIwY0hNNkx5OXBaQzUwZDJsMFkyZ3VkSFl2YjJGMWRHZ3lJaXcCMmV5SmhiR2NpT2lKU1V6STFOaUlzSW5SNWNDSTZJa3BYVkNJc0ltdHBaQ0k2SWpFaWZRTTIwNzk0Nzg4NTU5NjIwNjY5NTk2MjA2NDU3MDIyOTY2MTc2OTg2Njg4NzI3ODc2MTI4MjIzNjI4MTEzOTE2MzgwOTI3NTAyNzM3OTExCgAAAAAAAABhAG6Bf8BLuaIEgvF8Lx2jVoRWKKRIlaLlEJxgvqwq5nDX+rvzJxYAUFd7KeQBd9upNx+CHpmINkfgj26jcHbbqAy5xu4WMO8+cRFEpkjbBruyKE9ydM++5T/87lA8waSSAA==").unwrap(); - let sig5: GenericSignature = GenericSignature::from_str("BiVJlg3liA6MaHQ0Fw9kdmBbj+SuuaKGMseZXPO6gx2XYx0AAAAAigF7InR5cGUiOiJ3ZWJhdXRobi5nZXQiLCJjaGFsbGVuZ2UiOiJBQUFBdF9taklCMXZiVnBZTTZXVjZZX29peDZKOGFOXzlzYjhTS0ZidWtCZmlRdyIsIm9yaWdpbiI6Imh0dHA6Ly9sb2NhbGhvc3Q6NTE3MyIsImNyb3NzT3JpZ2luIjpmYWxzZX1iApjskL9Xyfopyg9Av7MSrcchSpfWqAYoJ+qfSId4gNmoQ1YNgj2alDpRIbq9kthmyGY25+k24FrW114PEoy5C+8DPRcOCTtACi3ZywtZ4UILhwV+Suh79rWtbKqDqhBQwxM=").unwrap(); + let sig5: GenericSignature = GenericSignature::from_str("BiVYDmenOnqS+thmz5m5SrZnWaKXZLVxgh+rri6LHXs25B0AAAAAnQF7InR5cGUiOiJ3ZWJhdXRobi5nZXQiLCAiY2hhbGxlbmdlIjoiQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQSIsIm9yaWdpbiI6Imh0dHA6Ly9sb2NhbGhvc3Q6NTE3MyIsImNyb3NzT3JpZ2luIjpmYWxzZSwgInVua25vd24iOiAidW5rbm93biJ9YgJMwqcOmZI7F/N+K5SMe4DRYCb4/cDWW68SFneSHoD2GxKKhksbpZ5rZpdrjSYABTCsFQQBpLORzTvbj4edWKd/AsEBeovrGvHR9Ku7critg6k7qvfFlPUngujXfEzXd8Eg").unwrap(); let multi_sig = MultiSig::combine( vec![sig1.clone(), sig2.clone(), sig3.clone(), sig4.clone()], diff --git a/crates/sui-core/src/post_consensus_tx_reorder.rs b/crates/sui-core/src/post_consensus_tx_reorder.rs index 9781777055b63..7f6d05fd7d61f 100644 --- a/crates/sui-core/src/post_consensus_tx_reorder.rs +++ b/crates/sui-core/src/post_consensus_tx_reorder.rs @@ -25,7 +25,7 @@ impl PostConsensusTxReorder { } fn order_by_gas_price(transactions: &mut [VerifiedSequencedConsensusTransaction]) { - let _scope = monitored_scope("HandleConsensusOutput::order_by_gas_price"); + let _scope = monitored_scope("ConsensusCommitHandler::order_by_gas_price"); transactions.sort_by_key(|txn| { // Reverse order, so that transactions with higher gas price are put to the beginning. std::cmp::Reverse({ diff --git a/crates/sui-core/src/safe_client.rs b/crates/sui-core/src/safe_client.rs index e1f01bfa46831..c0c7a53086858 100644 --- a/crates/sui-core/src/safe_client.rs +++ b/crates/sui-core/src/safe_client.rs @@ -68,10 +68,11 @@ impl SafeClientMetricsBase { registry, ) .unwrap(), + // Address label is removed to reduce high cardinality, can be added back if needed latency: register_histogram_vec_with_registry!( "safe_client_latency", - "RPC latency observed by safe client aggregator, group by address and method", - &["address", "method"], + "RPC latency observed by safe client aggregator, group by method", + &["method"], mysten_metrics::COARSE_LATENCY_SEC_BUCKETS.to_vec(), registry, ) @@ -113,16 +114,16 @@ impl SafeClientMetrics { let handle_transaction_latency = metrics_base .latency - .with_label_values(&[&validator_address, "handle_transaction"]); + .with_label_values(&["handle_transaction"]); let handle_certificate_latency = metrics_base .latency - .with_label_values(&[&validator_address, "handle_certificate"]); + .with_label_values(&["handle_certificate"]); let handle_obj_info_latency = metrics_base .latency - .with_label_values(&[&validator_address, "handle_object_info_request"]); + .with_label_values(&["handle_object_info_request"]); let handle_tx_info_latency = metrics_base .latency - .with_label_values(&[&validator_address, "handle_transaction_info_request"]); + .with_label_values(&["handle_transaction_info_request"]); Self { total_requests_handle_transaction_info_request, diff --git a/crates/sui-core/src/transaction_input_loader.rs b/crates/sui-core/src/transaction_input_loader.rs index b9f1028598c88..ee70de4829e7c 100644 --- a/crates/sui-core/src/transaction_input_loader.rs +++ b/crates/sui-core/src/transaction_input_loader.rs @@ -1,13 +1,16 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::execution_cache::ObjectCacheRead; +use crate::{ + authority::authority_per_epoch_store::CertLockGuard, execution_cache::ObjectCacheRead, +}; use itertools::izip; +use mysten_common::fatal; use once_cell::unsync::OnceCell; use std::collections::HashMap; use std::sync::Arc; use sui_types::{ - base_types::{EpochId, ObjectID, ObjectRef, SequenceNumber, TransactionDigest}, + base_types::{EpochId, ObjectRef, TransactionDigest}, error::{SuiError, SuiResult, UserInputError}, storage::{GetSharedLocks, ObjectKey}, transaction::{ @@ -126,10 +129,11 @@ impl TransactionInputLoader { &self, shared_lock_store: &impl GetSharedLocks, tx_key: &TransactionKey, + _tx_lock: &CertLockGuard, // see below for why this is needed input_object_kinds: &[InputObjectKind], epoch_id: EpochId, ) -> SuiResult { - let shared_locks_cell: OnceCell> = OnceCell::new(); + let shared_locks_cell: OnceCell>> = OnceCell::new(); let mut results = vec![None; input_object_kinds.len()]; let mut object_keys = Vec::with_capacity(input_object_kinds.len()); @@ -153,17 +157,22 @@ impl TransactionInputLoader { fetches.push((i, input)); } InputObjectKind::SharedMoveObject { id, .. } => { - let shared_locks = shared_locks_cell.get_or_try_init(|| { - Ok::, SuiError>( + let shared_locks = shared_locks_cell + .get_or_init(|| { shared_lock_store - .get_shared_locks(tx_key)? - .into_iter() - .collect(), - ) - })?; - // If we can't find the locked version, it means - // 1. either we have a bug that skips shared object version assignment - // 2. or we have some DB corruption + .get_shared_locks(tx_key) + .expect("loading shared locks should not fail") + .map(|locks| locks.into_iter().collect()) + }) + .as_ref() + .unwrap_or_else(|| { + // Important to hold the _tx_lock here - otherwise it would be possible + // for a concurrent execution of the same tx to enter this point after the + // first execution has finished and the shared locks have been deleted. + fatal!("Failed to get shared locks for transaction {tx_key:?}"); + }); + + // If we find a set of locks but an object is missing, it indicates a serious inconsistency: let version = shared_locks.get(id).unwrap_or_else(|| { panic!("Shared object locks should have been set. key: {tx_key:?}, obj id: {id:?}") }); diff --git a/crates/sui-core/src/transaction_manager.rs b/crates/sui-core/src/transaction_manager.rs index 0ffd0c7a92b51..88290bb4caaaa 100644 --- a/crates/sui-core/src/transaction_manager.rs +++ b/crates/sui-core/src/transaction_manager.rs @@ -9,6 +9,7 @@ use std::{ }; use lru::LruCache; +use mysten_common::fatal; use mysten_metrics::monitored_scope; use parking_lot::RwLock; use sui_types::{ @@ -414,7 +415,7 @@ impl TransactionManager { .transaction_cache_read .is_tx_already_executed(&digest) .unwrap_or_else(|err| { - panic!("Failed to check if tx is already executed: {:?}", err) + fatal!("Failed to check if tx is already executed: {:?}", err) }) { self.metrics @@ -432,7 +433,7 @@ impl TransactionManager { let mut receiving_objects: HashSet = HashSet::new(); let certs: Vec<_> = certs .into_iter() - .map(|(cert, fx_digest)| { + .filter_map(|(cert, fx_digest)| { let input_object_kinds = cert .data() .intent_message() @@ -440,7 +441,24 @@ impl TransactionManager { .input_objects() .expect("input_objects() cannot fail"); let mut input_object_keys = - epoch_store.get_input_object_keys(&cert.key(), &input_object_kinds); + match epoch_store.get_input_object_keys(&cert.key(), &input_object_kinds) { + Ok(keys) => keys, + Err(e) => { + // Because we do not hold the transaction lock during enqueue, it is possible + // that the transaction was executed and the shared version assignments deleted + // since the earlier check. This is a rare race condition, and it is better to + // handle it ad-hoc here than to hold tx locks for every cert for the duration + // of this function in order to remove the race. + if self + .transaction_cache_read + .is_tx_already_executed(cert.digest()) + .expect("is_tx_already_executed cannot fail") + { + return None; + } + fatal!("Failed to get input object keys: {:?}", e); + } + }; if input_object_kinds.len() != input_object_keys.len() { error!("Duplicated input objects: {:?}", input_object_kinds); @@ -467,7 +485,7 @@ impl TransactionManager { } } - (cert, fx_digest, input_object_keys) + Some((cert, fx_digest, input_object_keys)) }) .collect(); diff --git a/crates/sui-core/src/unit_tests/authority_tests.rs b/crates/sui-core/src/unit_tests/authority_tests.rs index 70ec3e454c5de..86651b7221293 100644 --- a/crates/sui-core/src/unit_tests/authority_tests.rs +++ b/crates/sui-core/src/unit_tests/authority_tests.rs @@ -11,7 +11,6 @@ use move_binary_format::{ }; use move_core_types::identifier::IdentStr; use move_core_types::language_storage::StructTag; -use move_core_types::parser::parse_type_tag; use move_core_types::{ account_address::AccountAddress, ident_str, identifier::Identifier, language_storage::TypeTag, }; @@ -24,6 +23,7 @@ use rand::{ use serde_json::json; use std::collections::HashSet; use std::fs; +use std::str::FromStr; use std::{convert::TryInto, env}; use sui_json_rpc_types::{ @@ -1198,9 +1198,18 @@ async fn test_handle_transfer_transaction_bad_signature() { let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some( + authority_state + .config + .network_key_pair() + .public() + .to_owned(), + ), + ) + .await + .unwrap(); let (_unknown_address, unknown_key): (_, AccountKeyPair) = get_key_pair(); let mut bad_signature_transfer_transaction = transfer_transaction.clone().into_inner(); @@ -3674,7 +3683,7 @@ async fn test_dynamic_field_struct_name_parsing() { assert!(matches!(fields[0].type_, DynamicFieldType::DynamicField)); assert_eq!(json!({"name_str": "Test Name"}), fields[0].name.value); assert_eq!( - parse_type_tag("0x0::object_basics::Name").unwrap(), + TypeTag::from_str("0x0::object_basics::Name").unwrap(), fields[0].name.type_ ) } @@ -3686,7 +3695,10 @@ async fn test_dynamic_field_bytearray_name_parsing() { assert_eq!(fields.len(), 1); assert!(matches!(fields[0].type_, DynamicFieldType::DynamicField)); - assert_eq!(parse_type_tag("vector").unwrap(), fields[0].name.type_); + assert_eq!( + TypeTag::from_str("vector").unwrap(), + fields[0].name.type_ + ); assert_eq!(json!("Test Name".as_bytes()), fields[0].name.value); } @@ -3697,7 +3709,7 @@ async fn test_dynamic_field_address_name_parsing() { assert_eq!(fields.len(), 1); assert!(matches!(fields[0].type_, DynamicFieldType::DynamicField)); - assert_eq!(parse_type_tag("address").unwrap(), fields[0].name.type_); + assert_eq!(TypeTag::from_str("address").unwrap(), fields[0].name.type_); assert_eq!(json!(sender), fields[0].name.value); } @@ -3709,7 +3721,7 @@ async fn test_dynamic_object_field_struct_name_parsing() { assert!(matches!(fields[0].type_, DynamicFieldType::DynamicObject)); assert_eq!(json!({"name_str": "Test Name"}), fields[0].name.value); assert_eq!( - parse_type_tag("0x0::object_basics::Name").unwrap(), + TypeTag::from_str("0x0::object_basics::Name").unwrap(), fields[0].name.type_ ) } @@ -3721,7 +3733,10 @@ async fn test_dynamic_object_field_bytearray_name_parsing() { assert_eq!(fields.len(), 1); assert!(matches!(fields[0].type_, DynamicFieldType::DynamicObject)); - assert_eq!(parse_type_tag("vector").unwrap(), fields[0].name.type_); + assert_eq!( + TypeTag::from_str("vector").unwrap(), + fields[0].name.type_ + ); assert_eq!(json!("Test Name".as_bytes()), fields[0].name.value); } @@ -3732,7 +3747,7 @@ async fn test_dynamic_object_field_address_name_parsing() { assert_eq!(fields.len(), 1); assert!(matches!(fields[0].type_, DynamicFieldType::DynamicObject)); - assert_eq!(parse_type_tag("address").unwrap(), fields[0].name.type_); + assert_eq!(TypeTag::from_str("address").unwrap(), fields[0].name.type_); assert_eq!(json!(sender), fields[0].name.value); } @@ -4742,6 +4757,7 @@ async fn test_shared_object_transaction_ok() { .epoch_store_for_testing() .get_shared_locks(&certificate.key()) .expect("Reading shared locks should not fail") + .expect("Locks should be set") .into_iter() .find_map(|(object_id, version)| { if object_id == shared_object_id { @@ -4858,6 +4874,7 @@ async fn test_consensus_commit_prologue_generation() { .epoch_store_for_testing() .get_shared_locks(txn_key) .unwrap() + .expect("locks should be set") .iter() .filter_map(|(id, seq)| { if id == &SUI_CLOCK_OBJECT_ID { @@ -5903,6 +5920,7 @@ async fn test_consensus_handler_per_object_congestion_control( } } protocol_config.set_max_deferral_rounds_for_congestion_control_for_testing(1000); // Set to a large number so that we don't hit this limit. + protocol_config.set_max_txn_cost_overage_per_object_in_commit_for_testing(0); let authority = TestAuthorityBuilder::new() .with_reference_gas_price(1000) .with_protocol_config(protocol_config) @@ -6131,6 +6149,7 @@ async fn test_consensus_handler_congestion_control_transaction_cancellation() { protocol_config .set_max_accumulated_txn_cost_per_object_in_mysticeti_commit_for_testing(100_000_000); protocol_config.set_max_deferral_rounds_for_congestion_control_for_testing(2); + protocol_config.set_max_txn_cost_overage_per_object_in_commit_for_testing(0); let authority = TestAuthorityBuilder::new() .with_reference_gas_price(1000) .with_protocol_config(protocol_config) @@ -6216,6 +6235,7 @@ async fn test_consensus_handler_congestion_control_transaction_cancellation() { .epoch_store_for_testing() .get_shared_locks(&cancelled_txn.key()) .expect("Reading shared locks should not fail") + .expect("locks should be set") .into_iter() .collect::>(); assert_eq!( @@ -6234,6 +6254,7 @@ async fn test_consensus_handler_congestion_control_transaction_cancellation() { .read_objects_for_execution( authority.epoch_store_for_testing().as_ref(), &cancelled_txn.key(), + &CertLockGuard::dummy_for_tests(), &cancelled_txn .data() .transaction_data() diff --git a/crates/sui-core/src/unit_tests/congestion_control_tests.rs b/crates/sui-core/src/unit_tests/congestion_control_tests.rs index 24a6defc2b407..53016c0b38ef9 100644 --- a/crates/sui-core/src/unit_tests/congestion_control_tests.rs +++ b/crates/sui-core/src/unit_tests/congestion_control_tests.rs @@ -297,13 +297,18 @@ async fn test_congestion_control_execution_cancellation() { // Initialize shared object queue so that any transaction touches shared_object_1 should result in congestion and cancellation. register_fail_point_arg("initial_congestion_tracker", move || { - Some( - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[(shared_object_1.0, 10)], - PerObjectCongestionControlMode::TotalGasBudget, - Some(1000), // Not used. + Some(SharedObjectCongestionTracker::new( + [(shared_object_1.0, 10)], + PerObjectCongestionControlMode::TotalGasBudget, + Some( + test_setup + .protocol_config + .max_accumulated_txn_cost_per_object_in_mysticeti_commit(), ), - ) + Some(1000), // Not used. + None, // Not used. + 0, // Disable overage. + )) }); // Runs a transaction that touches shared_object_1, shared_object_2 and a owned object. diff --git a/crates/sui-core/src/unit_tests/consensus_tests.rs b/crates/sui-core/src/unit_tests/consensus_tests.rs index 46247ae04edf1..47f4c404c85b2 100644 --- a/crates/sui-core/src/unit_tests/consensus_tests.rs +++ b/crates/sui-core/src/unit_tests/consensus_tests.rs @@ -9,18 +9,13 @@ use crate::checkpoints::CheckpointServiceNoop; use crate::consensus_handler::SequencedConsensusTransaction; use fastcrypto::traits::KeyPair; use move_core_types::{account_address::AccountAddress, ident_str}; -use narwhal_types::Transactions; -use narwhal_types::TransactionsServer; -use narwhal_types::{Empty, TransactionProto}; use rand::rngs::StdRng; use rand::SeedableRng; -use sui_network::tonic; use sui_types::crypto::{deterministic_random_account_key, AccountKeyPair}; use sui_types::gas::GasCostSummary; use sui_types::messages_checkpoint::{ CheckpointContents, CheckpointSignatureMessage, CheckpointSummary, SignedCheckpointSummary, }; -use sui_types::multiaddr::Multiaddr; use sui_types::transaction::TEST_ONLY_GAS_UNIT_FOR_OBJECT_BASICS; use sui_types::utils::{make_committee_key, to_sender_signed_transaction}; use sui_types::SUI_FRAMEWORK_PACKAGE_ID; @@ -29,8 +24,6 @@ use sui_types::{ object::Object, transaction::{CallArg, CertifiedTransaction, ObjectArg, TransactionData, VerifiedTransaction}, }; -use tokio::sync::mpsc::channel; -use tokio::sync::mpsc::{Receiver, Sender}; /// Fixture: a few test gas objects. pub fn test_gas_objects() -> Vec { @@ -47,10 +40,19 @@ pub fn test_gas_objects() -> Vec { GAS_OBJECTS.with(|v| v.clone()) } -/// Fixture: a few test certificates containing a shared object. +/// Fixture: create a few test certificates containing a shared object. pub async fn test_certificates( authority: &AuthorityState, shared_object: Object, +) -> Vec { + test_certificates_with_gas_objects(authority, &test_gas_objects(), shared_object).await +} + +/// Fixture: create a few test certificates containing a shared object using specified gas objects. +pub async fn test_certificates_with_gas_objects( + authority: &AuthorityState, + gas_objects: &[Object], + shared_object: Object, ) -> Vec { let epoch_store = authority.load_epoch_store_one_call_per_task(); let (sender, keypair) = deterministic_random_account_key(); @@ -62,7 +64,7 @@ pub async fn test_certificates( initial_shared_version: shared_object.version(), mutable: true, }; - for gas_object in test_gas_objects() { + for gas_object in gas_objects { // Object digest may be different in genesis than originally generated. let gas_object = authority .get_object(&gas_object.id()) @@ -401,45 +403,3 @@ async fn submit_checkpoint_signature_to_consensus_adapter() { .unwrap(); waiter.await.unwrap(); } - -pub struct ConsensusMockServer { - sender: Sender, -} - -impl ConsensusMockServer { - pub fn spawn(address: Multiaddr) -> Receiver { - let (sender, receiver) = channel(1); - tokio::spawn(async move { - let config = mysten_network::config::Config::new(); - let mock = Self { sender }; - config - .server_builder() - .add_service(TransactionsServer::new(mock)) - .bind(&address) - .await - .unwrap() - .serve() - .await - }); - receiver - } -} - -#[tonic::async_trait] -impl Transactions for ConsensusMockServer { - /// Submit a Transactions - async fn submit_transaction( - &self, - request: tonic::Request, - ) -> Result, tonic::Status> { - self.sender.send(request.into_inner()).await.unwrap(); - Ok(tonic::Response::new(Empty {})) - } - /// Submit a Transactions - async fn submit_transaction_stream( - &self, - _request: tonic::Request>, - ) -> Result, tonic::Status> { - unimplemented!() - } -} diff --git a/crates/sui-core/src/unit_tests/move_package_upgrade_tests.rs b/crates/sui-core/src/unit_tests/move_package_upgrade_tests.rs index eed9dcf2af4be..4ca5c82492a5f 100644 --- a/crates/sui-core/src/unit_tests/move_package_upgrade_tests.rs +++ b/crates/sui-core/src/unit_tests/move_package_upgrade_tests.rs @@ -15,7 +15,12 @@ use sui_types::{ MOVE_STDLIB_PACKAGE_ID, SUI_FRAMEWORK_PACKAGE_ID, }; -use std::{collections::BTreeSet, path::PathBuf, str::FromStr, sync::Arc}; +use std::{ + collections::BTreeSet, + path::{Path, PathBuf}, + str::FromStr, + sync::Arc, +}; use sui_types::effects::{TransactionEffects, TransactionEffectsAPI}; use sui_types::error::{SuiError, UserInputError}; use sui_types::execution_config_utils::to_binary_config; @@ -48,11 +53,62 @@ macro_rules! move_call { } } +enum FileOverlay<'a> { + Remove(&'a str), + Add { + file_name: &'a str, + contents: &'a str, + }, +} + +fn build_upgrade_test_modules_with_overlay( + base_pkg: &str, + overlay: FileOverlay<'_>, +) -> (Vec, Vec>) { + // Root temp dirs under `move_upgrade` directory so that dependency paths remain correct. + let mut tmp_dir_root_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + tmp_dir_root_path.extend(["src", "unit_tests", "data", "move_upgrade"]); + + let tmp_dir = tempfile::TempDir::new_in(tmp_dir_root_path).unwrap(); + let tmp_dir_path = tmp_dir.path(); + + let mut copy_options = fs_extra::dir::CopyOptions::new(); + copy_options.copy_inside = true; + copy_options.content_only = true; + let source_dir = pkg_path_of(base_pkg); + fs_extra::dir::copy(source_dir, tmp_dir_path, ©_options).unwrap(); + + match overlay { + FileOverlay::Remove(file_name) => { + let file_path = tmp_dir_path.join(format!("sources/{}", file_name)); + std::fs::remove_file(file_path).unwrap(); + } + FileOverlay::Add { + file_name, + contents, + } => { + let new_file_path = tmp_dir_path.join(format!("sources/{}", file_name)); + std::fs::write(new_file_path, contents).unwrap(); + } + } + + build_pkg_at_path(tmp_dir_path) +} + fn build_upgrade_test_modules(test_dir: &str) -> (Vec, Vec>) { + let path = pkg_path_of(test_dir); + build_pkg_at_path(&path) +} + +fn pkg_path_of(pkg_name: &str) -> PathBuf { let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - path.extend(["src", "unit_tests", "data", "move_upgrade", test_dir]); + path.extend(["src", "unit_tests", "data", "move_upgrade", pkg_name]); + path +} + +fn build_pkg_at_path(path: &Path) -> (Vec, Vec>) { let with_unpublished_deps = false; - let package = BuildConfig::new_for_testing().build(&path).unwrap(); + let package = BuildConfig::new_for_testing().build(path).unwrap(); ( package.get_package_digest(with_unpublished_deps).to_vec(), package.get_package_bytes(with_unpublished_deps), @@ -457,6 +513,116 @@ async fn test_upgrade_package_compatible_in_dep_only_mode() { ); } +#[tokio::test] +async fn test_upgrade_package_add_new_module_in_dep_only_mode_pre_v68() { + // Allow new modules in deps-only mode for this test. + let _guard = ProtocolConfig::apply_overrides_for_testing(|_, mut config| { + config.set_disallow_new_modules_in_deps_only_packages_for_testing(false); + config + }); + + let mut runner = UpgradeStateRunner::new("move_upgrade/base").await; + let base_pkg = "dep_only_upgrade"; + assert_valid_dep_only_upgrade(&mut runner, base_pkg).await; + let (digest, modules) = build_upgrade_test_modules_with_overlay( + base_pkg, + FileOverlay::Add { + file_name: "new_module.move", + contents: "module base_addr::new_module;", + }, + ); + let effects = runner + .upgrade( + UpgradePolicy::DEP_ONLY, + digest, + modules, + vec![SUI_FRAMEWORK_PACKAGE_ID, MOVE_STDLIB_PACKAGE_ID], + ) + .await; + + assert!(effects.status().is_ok(), "{:#?}", effects.status()); +} + +#[tokio::test] +async fn test_upgrade_package_invalid_dep_only_upgrade_pre_v68() { + let _guard = ProtocolConfig::apply_overrides_for_testing(|_, mut config| { + config.set_disallow_new_modules_in_deps_only_packages_for_testing(false); + config + }); + + let mut runner = UpgradeStateRunner::new("move_upgrade/base").await; + let base_pkg = "dep_only_upgrade"; + assert_valid_dep_only_upgrade(&mut runner, base_pkg).await; + let overlays = [ + FileOverlay::Add { + file_name: "new_friend_module.move", + contents: r#" +module base_addr::new_friend_module; +public fun friend_call(): u64 { base_addr::base::friend_fun(1) } + "#, + }, + FileOverlay::Remove("friend_module.move"), + ]; + for overlay in overlays { + let (digest, modules) = build_upgrade_test_modules_with_overlay(base_pkg, overlay); + let effects = runner + .upgrade( + UpgradePolicy::DEP_ONLY, + digest, + modules, + vec![SUI_FRAMEWORK_PACKAGE_ID, MOVE_STDLIB_PACKAGE_ID], + ) + .await; + + assert_eq!( + effects.into_status().unwrap_err().0, + ExecutionFailureStatus::PackageUpgradeError { + upgrade_error: PackageUpgradeError::IncompatibleUpgrade + }, + ); + } +} + +#[tokio::test] +async fn test_invalid_dep_only_upgrades() { + let mut runner = UpgradeStateRunner::new("move_upgrade/base").await; + let base_pkg = "dep_only_upgrade"; + assert_valid_dep_only_upgrade(&mut runner, base_pkg).await; + let overlays = [ + FileOverlay::Add { + file_name: "new_module.move", + contents: "module base_addr::new_module;", + }, + FileOverlay::Add { + file_name: "new_friend_module.move", + contents: r#" +module base_addr::new_friend_module; +public fun friend_call(): u64 { base_addr::base::friend_fun(1) } + "#, + }, + FileOverlay::Remove("friend_module.move"), + ]; + + for overlay in overlays { + let (digest, modules) = build_upgrade_test_modules_with_overlay(base_pkg, overlay); + let effects = runner + .upgrade( + UpgradePolicy::DEP_ONLY, + digest, + modules, + vec![SUI_FRAMEWORK_PACKAGE_ID, MOVE_STDLIB_PACKAGE_ID], + ) + .await; + + assert_eq!( + effects.into_status().unwrap_err().0, + ExecutionFailureStatus::PackageUpgradeError { + upgrade_error: PackageUpgradeError::IncompatibleUpgrade + }, + ); + } +} + #[tokio::test] async fn test_upgrade_package_compatible_in_additive_mode() { let mut runner = UpgradeStateRunner::new("move_upgrade/base").await; @@ -572,18 +738,7 @@ async fn test_upgrade_package_additive_dep_only_mode() { #[tokio::test] async fn test_upgrade_package_dep_only_mode() { let mut runner = UpgradeStateRunner::new("move_upgrade/base").await; - - let (digest, modules) = build_upgrade_test_modules("dep_only_upgrade"); - let effects = runner - .upgrade( - UpgradePolicy::DEP_ONLY, - digest, - modules, - vec![SUI_FRAMEWORK_PACKAGE_ID, MOVE_STDLIB_PACKAGE_ID], - ) - .await; - - assert!(effects.status().is_ok(), "{:#?}", effects.status()); + assert_valid_dep_only_upgrade(&mut runner, "dep_only_upgrade").await; } #[tokio::test] @@ -1432,3 +1587,17 @@ async fn test_upgrade_more_than_max_packages_error() { } ); } + +async fn assert_valid_dep_only_upgrade(runner: &mut UpgradeStateRunner, package_name: &str) { + let (digest, modules) = build_upgrade_test_modules(package_name); + let effects = runner + .upgrade( + UpgradePolicy::DEP_ONLY, + digest, + modules, + vec![SUI_FRAMEWORK_PACKAGE_ID, MOVE_STDLIB_PACKAGE_ID], + ) + .await; + + assert!(effects.status().is_ok(), "{:#?}", effects.status()); +} diff --git a/crates/sui-core/src/unit_tests/server_tests.rs b/crates/sui-core/src/unit_tests/server_tests.rs index 0a2e83627b26d..fce54971bc7f3 100644 --- a/crates/sui-core/src/unit_tests/server_tests.rs +++ b/crates/sui-core/src/unit_tests/server_tests.rs @@ -19,13 +19,22 @@ async fn test_simple_request() { let authority_state = init_state_with_object_id(sender, object_id).await; // The following two fields are only needed for shared objects (not by this bench). - let server = AuthorityServer::new_for_test(authority_state); + let server = AuthorityServer::new_for_test(authority_state.clone()); let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some( + authority_state + .config + .network_key_pair() + .public() + .to_owned(), + ), + ) + .await + .unwrap(); let req = ObjectInfoRequest::latest_object_info_request(object_id, LayoutGenerationOption::Generate); diff --git a/crates/sui-core/src/unit_tests/transaction_tests.rs b/crates/sui-core/src/unit_tests/transaction_tests.rs index 053e494af838f..69b4eade03a6c 100644 --- a/crates/sui-core/src/unit_tests/transaction_tests.rs +++ b/crates/sui-core/src/unit_tests/transaction_tests.rs @@ -440,9 +440,18 @@ async fn do_transaction_test_impl( let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some( + authority_state + .config + .network_key_pair() + .public() + .to_owned(), + ), + ) + .await + .unwrap(); post_sign_mutations(&mut transfer_transaction); post_sign_mutations(&mut move_call_transaction); @@ -1033,9 +1042,18 @@ async fn setup_zklogin_network( let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some( + authority_state + .config + .network_key_pair() + .public() + .to_owned(), + ), + ) + .await + .unwrap(); ( object_ids, gas_object_ids, @@ -1326,9 +1344,18 @@ async fn execute_transaction_assert_err( let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some( + authority_state + .config + .network_key_pair() + .public() + .to_owned(), + ), + ) + .await + .unwrap(); let err = client .handle_transaction(txn.clone(), Some(make_socket_addr())) .await; @@ -1378,9 +1405,18 @@ async fn test_oversized_txn() { let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some( + authority_state + .config + .network_key_pair() + .public() + .to_owned(), + ), + ) + .await + .unwrap(); let res = client .handle_transaction(txn, Some(make_socket_addr())) @@ -1429,9 +1465,18 @@ async fn test_very_large_certificate() { let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some( + authority_state + .config + .network_key_pair() + .public() + .to_owned(), + ), + ) + .await + .unwrap(); let socket_addr = make_socket_addr(); let auth_sig = client @@ -1511,9 +1556,18 @@ async fn test_handle_certificate_errors() { let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some( + authority_state + .config + .network_key_pair() + .public() + .to_owned(), + ), + ) + .await + .unwrap(); // Test handle certificate from the wrong epoch let epoch_store = authority_state.epoch_store_for_testing(); @@ -1681,9 +1735,12 @@ async fn test_handle_soft_bundle_certificates() { let server = AuthorityServer::new_for_test_with_consensus_adapter(authority.clone(), adapter); let _metrics = server.metrics.clone(); let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some(authority.config.network_key_pair().public().to_owned()), + ) + .await + .unwrap(); let signed_tx_into_certificate = |transaction: Transaction| async { let epoch_store = authority.load_epoch_store_one_call_per_task(); @@ -1836,9 +1893,12 @@ async fn test_handle_soft_bundle_certificates_errors() { let authority = server.state.clone(); let _metrics = server.metrics.clone(); let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some(authority.config.network_key_pair().public().to_owned()), + ) + .await + .unwrap(); let signed_tx_into_certificate = |transaction: Transaction| async { let epoch_store = authority.load_epoch_store_one_call_per_task(); diff --git a/crates/sui-data-ingestion-core/src/reader.rs b/crates/sui-data-ingestion-core/src/reader.rs index 50a948b8d85d9..54f4c8540e0e3 100644 --- a/crates/sui-data-ingestion-core/src/reader.rs +++ b/crates/sui-data-ingestion-core/src/reader.rs @@ -46,22 +46,25 @@ pub struct CheckpointReader { #[derive(Clone)] pub struct ReaderOptions { - pub tick_interal_ms: u64, + pub tick_internal_ms: u64, pub timeout_secs: u64, /// number of maximum concurrent requests to the remote store. Increase it for backfills pub batch_size: usize, pub data_limit: usize, pub upper_limit: Option, + /// Whether to delete processed checkpoint files from the local directory. + pub gc_checkpoint_files: bool, } impl Default for ReaderOptions { fn default() -> Self { Self { - tick_interal_ms: 100, + tick_internal_ms: 100, timeout_secs: 5, batch_size: 10, data_limit: 0, upper_limit: None, + gc_checkpoint_files: true, } } } @@ -76,25 +79,19 @@ impl CheckpointReader { /// Represents a single iteration of the reader. /// Reads files in a local directory, validates them, and forwards `CheckpointData` to the executor. async fn read_local_files(&self) -> Result>> { - let mut files = vec![]; - for entry in fs::read_dir(self.path.clone())? { - let entry = entry?; - let filename = entry.file_name(); - if let Some(sequence_number) = Self::checkpoint_number_from_file_path(&filename) { - if sequence_number >= self.current_checkpoint_number { - files.push((sequence_number, entry.path())); - } - } - } - files.sort(); - debug!("unprocessed local files {:?}", files); let mut checkpoints = vec![]; - for (_, filename) in files.iter().take(MAX_CHECKPOINTS_IN_PROGRESS) { - let checkpoint = Blob::from_bytes::>(&fs::read(filename)?)?; - if self.exceeds_capacity(checkpoint.checkpoint_summary.sequence_number) { + for offset in 0..MAX_CHECKPOINTS_IN_PROGRESS { + let sequence_number = self.current_checkpoint_number + offset as u64; + if self.exceeds_capacity(sequence_number) { break; } - checkpoints.push(checkpoint); + match fs::read(self.path.join(format!("{}.chk", sequence_number))) { + Ok(bytes) => checkpoints.push(Blob::from_bytes::>(&bytes)?), + Err(err) => match err.kind() { + std::io::ErrorKind::NotFound => break, + _ => Err(err)?, + }, + } } Ok(checkpoints) } @@ -294,9 +291,12 @@ impl CheckpointReader { /// Cleans the local directory by removing all processed checkpoint files. fn gc_processed_files(&mut self, watermark: CheckpointSequenceNumber) -> Result<()> { - info!("cleaning processed files, watermark is {}", watermark); self.data_limiter.gc(watermark); self.last_pruned_watermark = watermark; + if !self.options.gc_checkpoint_files { + return Ok(()); + } + info!("cleaning processed files, watermark is {}", watermark); for entry in fs::read_dir(self.path.clone())? { let entry = entry?; let filename = entry.file_name(); @@ -384,7 +384,7 @@ impl CheckpointReader { Some(gc_checkpoint_number) = self.processed_receiver.recv() => { self.gc_processed_files(gc_checkpoint_number).expect("Failed to clean the directory"); } - Ok(Some(_)) | Err(_) = timeout(Duration::from_millis(self.options.tick_interal_ms), inotify_recv.recv()) => { + Ok(Some(_)) | Err(_) = timeout(Duration::from_millis(self.options.tick_internal_ms), inotify_recv.recv()) => { self.sync().await.expect("Failed to read checkpoint files"); } } diff --git a/crates/sui-data-ingestion-core/src/tests.rs b/crates/sui-data-ingestion-core/src/tests.rs index 4465b0c31c381..4963deeb3b51f 100644 --- a/crates/sui-data-ingestion-core/src/tests.rs +++ b/crates/sui-data-ingestion-core/src/tests.rs @@ -40,7 +40,7 @@ async fn run( duration: Option, ) -> Result { let options = ReaderOptions { - tick_interal_ms: 10, + tick_internal_ms: 10, batch_size: 1, ..Default::default() }; diff --git a/crates/sui-data-ingestion/Cargo.toml b/crates/sui-data-ingestion/Cargo.toml index 2450691f4cdd6..6a80d6c5c6b90 100644 --- a/crates/sui-data-ingestion/Cargo.toml +++ b/crates/sui-data-ingestion/Cargo.toml @@ -31,6 +31,7 @@ tracing.workspace = true sui-archival.workspace = true sui-storage.workspace = true sui-data-ingestion-core.workspace = true +sui-kvstore.workspace = true sui-types.workspace = true tempfile.workspace = true url.workspace = true diff --git a/crates/sui-data-ingestion/src/main.rs b/crates/sui-data-ingestion/src/main.rs index 0a03b9af29591..71e49acb6f154 100644 --- a/crates/sui-data-ingestion/src/main.rs +++ b/crates/sui-data-ingestion/src/main.rs @@ -12,6 +12,7 @@ use sui_data_ingestion::{ }; use sui_data_ingestion_core::{DataIngestionMetrics, ReaderOptions}; use sui_data_ingestion_core::{IndexerExecutor, WorkerPool}; +use sui_kvstore::{BigTableClient, KvWorker}; use tokio::signal; use tokio::sync::oneshot; @@ -21,6 +22,7 @@ enum Task { Archival(ArchivalConfig), Blob(BlobTaskConfig), KV(KVStoreTaskConfig), + BigTableKV(BigTableTaskConfig), } #[derive(Serialize, Deserialize, Clone, Debug)] @@ -40,6 +42,11 @@ struct ProgressStoreConfig { pub table_name: String, } +#[derive(Serialize, Deserialize, Clone, Debug)] +struct BigTableTaskConfig { + instance_id: String, +} + #[derive(Debug, Clone, Serialize, Deserialize)] struct IndexerConfig { path: PathBuf, @@ -146,6 +153,15 @@ async fn main() -> Result<()> { ); executor.register(worker_pool).await?; } + Task::BigTableKV(kv_config) => { + let client = BigTableClient::new_remote(kv_config.instance_id, false, None).await?; + let worker_pool = WorkerPool::new( + KvWorker { client }, + task_config.name, + task_config.concurrency, + ); + executor.register(worker_pool).await?; + } }; } let reader_options = ReaderOptions { diff --git a/crates/sui-deepbook-indexer/src/migrations/00000000000000_diesel_initial_setup/down.sql b/crates/sui-deepbook-indexer/src/migrations/00000000000000_diesel_initial_setup/down.sql index 454dcfecb7003..b86f980c0eacb 100644 --- a/crates/sui-deepbook-indexer/src/migrations/00000000000000_diesel_initial_setup/down.sql +++ b/crates/sui-deepbook-indexer/src/migrations/00000000000000_diesel_initial_setup/down.sql @@ -3,5 +3,12 @@ DROP TABLE IF EXISTS order_updates; DROP TABLE IF EXISTS order_fills; DROP TABLE IF EXISTS flashloans; DROP TABLE IF EXISTS pool_prices; +DROP TABLE IF EXISTS balances; +DROP TABLE IF EXISTS trade_params_update; +DROP TABLE IF EXISTS stakes; +DROP TABLE IF EXISTS proposals; +DROP TABLE IF EXISTS votes; +DROP TABLE IF EXISTS rebates; DROP TABLE IF EXISTS sui_error_transactions; DROP TABLE IF EXISTS progress_store; +DROP TABLE IF EXISTS pools; \ No newline at end of file diff --git a/crates/sui-deepbook-indexer/src/migrations/00000000000000_diesel_initial_setup/up.sql b/crates/sui-deepbook-indexer/src/migrations/00000000000000_diesel_initial_setup/up.sql index 04d9951f6e66a..20f7d6a5172ca 100644 --- a/crates/sui-deepbook-indexer/src/migrations/00000000000000_diesel_initial_setup/up.sql +++ b/crates/sui-deepbook-indexer/src/migrations/00000000000000_diesel_initial_setup/up.sql @@ -2,11 +2,12 @@ CREATE TABLE IF NOT EXISTS order_updates ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, status TEXT NOT NULL, pool_id TEXT NOT NULL, @@ -24,11 +25,12 @@ CREATE TABLE IF NOT EXISTS order_updates CREATE TABLE IF NOT EXISTS order_fills ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, pool_id TEXT NOT NULL, maker_order_id TEXT NOT NULL, @@ -50,11 +52,12 @@ CREATE TABLE IF NOT EXISTS order_fills CREATE TABLE IF NOT EXISTS flashloans ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, borrow BOOLEAN NOT NULL, pool_id TEXT NOT NULL, @@ -64,11 +67,12 @@ CREATE TABLE IF NOT EXISTS flashloans CREATE TABLE IF NOT EXISTS pool_prices ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, target_pool TEXT NOT NULL, reference_pool TEXT NOT NULL, @@ -77,11 +81,12 @@ CREATE TABLE IF NOT EXISTS pool_prices CREATE TABLE IF NOT EXISTS balances ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, balance_manager_id TEXT NOT NULL, asset TEXT NOT NULL, @@ -91,11 +96,12 @@ CREATE TABLE IF NOT EXISTS balances CREATE TABLE IF NOT EXISTS trade_params_update ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, pool_id TEXT NOT NULL, taker_fee BIGINT NOT NULL, @@ -105,11 +111,12 @@ CREATE TABLE IF NOT EXISTS trade_params_update CREATE TABLE IF NOT EXISTS stakes ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, pool_id TEXT NOT NULL, balance_manager_id TEXT NOT NULL, @@ -120,11 +127,12 @@ CREATE TABLE IF NOT EXISTS stakes CREATE TABLE IF NOT EXISTS proposals ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, pool_id TEXT NOT NULL, balance_manager_id TEXT NOT NULL, @@ -136,11 +144,12 @@ CREATE TABLE IF NOT EXISTS proposals CREATE TABLE IF NOT EXISTS votes ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, pool_id TEXT NOT NULL, balance_manager_id TEXT NOT NULL, @@ -152,11 +161,12 @@ CREATE TABLE IF NOT EXISTS votes CREATE TABLE IF NOT EXISTS rebates ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, pool_id TEXT NOT NULL, balance_manager_id TEXT NOT NULL, diff --git a/crates/sui-deepbook-indexer/src/models.rs b/crates/sui-deepbook-indexer/src/models.rs index d0d57671e3bce..a6e30debcef38 100644 --- a/crates/sui-deepbook-indexer/src/models.rs +++ b/crates/sui-deepbook-indexer/src/models.rs @@ -2,22 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 use diesel::data_types::PgTimestamp; -use diesel::{Identifiable, Insertable, Queryable, Selectable}; +use diesel::{Identifiable, Insertable, Queryable, QueryableByName, Selectable}; use serde::Serialize; use sui_indexer_builder::{Task, LIVE_TASK_TARGET_CHECKPOINT}; use crate::schema::{ - balances, flashloans, order_fills, order_updates, pool_prices, pools, progress_store, - proposals, rebates, stakes, sui_error_transactions, trade_params_update, votes, + balances, balances_summary, flashloans, order_fills, order_updates, pool_prices, pools, + progress_store, proposals, rebates, stakes, sui_error_transactions, trade_params_update, votes, }; #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = order_updates, primary_key(digest))] +#[diesel(table_name = order_updates, primary_key(event_digest))] pub struct OrderUpdate { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub status: String, pub pool_id: String, @@ -34,11 +36,13 @@ pub struct OrderUpdate { } #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = order_fills, primary_key(digest))] +#[diesel(table_name = order_fills, primary_key(event_digest))] pub struct OrderFill { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub pool_id: String, pub maker_order_id: String, // u128 @@ -65,12 +69,22 @@ pub struct OrderFillSummary { pub base_quantity: i64, } +#[derive(QueryableByName, Debug, Serialize)] +#[diesel(table_name = balances_summary)] +pub struct BalancesSummary { + pub asset: String, + pub amount: i64, + pub deposit: bool, +} + #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = flashloans, primary_key(digest))] +#[diesel(table_name = flashloans, primary_key(event_digest))] pub struct Flashloan { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub pool_id: String, pub borrow_quantity: i64, @@ -79,11 +93,13 @@ pub struct Flashloan { } #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = pool_prices, primary_key(digest))] +#[diesel(table_name = pool_prices, primary_key(event_digest))] pub struct PoolPrice { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub target_pool: String, pub reference_pool: String, @@ -91,11 +107,13 @@ pub struct PoolPrice { } #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = balances, primary_key(digest))] +#[diesel(table_name = balances, primary_key(event_digest))] pub struct Balances { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub balance_manager_id: String, pub asset: String, @@ -104,12 +122,15 @@ pub struct Balances { } #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = proposals, primary_key(digest))] +#[diesel(table_name = proposals, primary_key(event_digest))] pub struct Proposals { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, + pub pool_id: String, pub balance_manager_id: String, pub epoch: i64, pub taker_fee: i64, @@ -118,11 +139,13 @@ pub struct Proposals { } #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = rebates, primary_key(digest))] +#[diesel(table_name = rebates, primary_key(event_digest))] pub struct Rebates { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub pool_id: String, pub balance_manager_id: String, @@ -131,11 +154,13 @@ pub struct Rebates { } #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = stakes, primary_key(digest))] +#[diesel(table_name = stakes, primary_key(event_digest))] pub struct Stakes { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub pool_id: String, pub balance_manager_id: String, @@ -145,11 +170,13 @@ pub struct Stakes { } #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = trade_params_update, primary_key(digest))] +#[diesel(table_name = trade_params_update, primary_key(event_digest))] pub struct TradeParamsUpdate { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub pool_id: String, pub taker_fee: i64, @@ -158,11 +185,13 @@ pub struct TradeParamsUpdate { } #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = votes, primary_key(digest))] +#[diesel(table_name = votes, primary_key(event_digest))] pub struct Votes { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub pool_id: String, pub balance_manager_id: String, diff --git a/crates/sui-deepbook-indexer/src/schema.rs b/crates/sui-deepbook-indexer/src/schema.rs index 61abd7987b533..849dd530c28f7 100644 --- a/crates/sui-deepbook-indexer/src/schema.rs +++ b/crates/sui-deepbook-indexer/src/schema.rs @@ -3,12 +3,13 @@ // @generated automatically by Diesel CLI. diesel::table! { - balances (id) { - id -> Int4, + balances (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, balance_manager_id -> Text, asset -> Text, @@ -18,12 +19,13 @@ diesel::table! { } diesel::table! { - flashloans (id) { - id -> Int4, + flashloans (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, borrow -> Bool, pool_id -> Text, @@ -33,12 +35,13 @@ diesel::table! { } diesel::table! { - order_fills (id) { - id -> Int4, + order_fills (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, pool_id -> Text, maker_order_id -> Text, @@ -60,12 +63,13 @@ diesel::table! { } diesel::table! { - order_updates (id) { - id -> Int4, + order_updates (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, status -> Text, pool_id -> Text, @@ -83,12 +87,13 @@ diesel::table! { } diesel::table! { - pool_prices (id) { - id -> Int4, + pool_prices (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, target_pool -> Text, reference_pool -> Text, @@ -124,12 +129,13 @@ diesel::table! { } diesel::table! { - proposals (id) { - id -> Int4, + proposals (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, pool_id -> Text, balance_manager_id -> Text, @@ -141,12 +147,13 @@ diesel::table! { } diesel::table! { - rebates (id) { - id -> Int4, + rebates (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, pool_id -> Text, balance_manager_id -> Text, @@ -156,12 +163,13 @@ diesel::table! { } diesel::table! { - stakes (id) { - id -> Int4, + stakes (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, pool_id -> Text, balance_manager_id -> Text, @@ -184,12 +192,13 @@ diesel::table! { } diesel::table! { - trade_params_update (id) { - id -> Int4, + trade_params_update (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, pool_id -> Text, taker_fee -> Int8, @@ -199,12 +208,13 @@ diesel::table! { } diesel::table! { - votes (id) { - id -> Int4, + votes (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, pool_id -> Text, balance_manager_id -> Text, @@ -230,3 +240,11 @@ diesel::allow_tables_to_appear_in_same_query!( trade_params_update, votes, ); + +diesel::table! { + balances_summary (asset) { + asset -> Text, + amount -> Int8, + deposit -> Bool, + } +} diff --git a/crates/sui-deepbook-indexer/src/server.rs b/crates/sui-deepbook-indexer/src/server.rs index 1f2a5a9985be6..0d2b1ff85092f 100644 --- a/crates/sui-deepbook-indexer/src/server.rs +++ b/crates/sui-deepbook-indexer/src/server.rs @@ -3,8 +3,8 @@ use crate::{ error::DeepBookError, - models::{OrderFillSummary, Pools}, - schema, + models::{BalancesSummary, OrderFillSummary, Pools}, + schema::{self}, sui_deepbook_indexer::PgDeepbookPersistent, }; use axum::{ @@ -18,14 +18,18 @@ use diesel::BoolExpressionMethods; use diesel::QueryDsl; use diesel::{ExpressionMethods, SelectableHelper}; use diesel_async::RunQueryDsl; -use std::net::SocketAddr; use std::time::{SystemTime, UNIX_EPOCH}; +use std::{collections::HashMap, net::SocketAddr}; use tokio::{net::TcpListener, task::JoinHandle}; pub const GET_POOLS_PATH: &str = "/get_pools"; -pub const GET_24HR_VOLUME_PATH: &str = "/get_24hr_volume/:pool_id"; +pub const GET_24HR_VOLUME_PATH: &str = "/get_24hr_volume/:pool_ids"; pub const GET_24HR_VOLUME_BY_BALANCE_MANAGER_ID: &str = "/get_24hr_volume_by_balance_manager_id/:pool_id/:balance_manager_id"; +pub const GET_HISTORICAL_VOLUME_PATH: &str = + "/get_historical_volume/:pool_ids/:start_time/:end_time"; +pub const GET_NET_DEPOSITS: &str = "/get_net_deposits/:asset_ids/:timestamp"; +pub const GET_MANAGER_BALANCE: &str = "/get_manager_balance/:manager_id"; pub fn run_server(socket_address: SocketAddr, state: PgDeepbookPersistent) -> JoinHandle<()> { tokio::spawn(async move { @@ -39,10 +43,13 @@ pub(crate) fn make_router(state: PgDeepbookPersistent) -> Router { .route("/", get(health_check)) .route(GET_POOLS_PATH, get(get_pools)) .route(GET_24HR_VOLUME_PATH, get(get_24hr_volume)) + .route(GET_HISTORICAL_VOLUME_PATH, get(get_historical_volume)) .route( GET_24HR_VOLUME_BY_BALANCE_MANAGER_ID, get(get_24hr_volume_by_balance_manager_id), ) + .route(GET_MANAGER_BALANCE, get(get_manager_balance)) + .route(GET_NET_DEPOSITS, get(get_net_deposits)) .with_state(state) } @@ -85,22 +92,61 @@ async fn get_pools( } async fn get_24hr_volume( - Path(pool_id): Path, + Path(pool_ids): Path, State(state): State, -) -> Result, DeepBookError> { +) -> Result>, DeepBookError> { let connection = &mut state.pool.get().await?; let unix_ts = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_millis() as i64; let day_ago = unix_ts - 24 * 60 * 60 * 1000; - let vols: Vec = schema::order_fills::table - .select(schema::order_fills::base_quantity) - .filter(schema::order_fills::pool_id.eq(pool_id)) + + let pool_ids_list: Vec = pool_ids.split(',').map(|s| s.to_string()).collect(); + + let results: Vec<(String, i64)> = schema::order_fills::table + .select(( + schema::order_fills::pool_id, + schema::order_fills::base_quantity, + )) + .filter(schema::order_fills::pool_id.eq_any(pool_ids_list)) .filter(schema::order_fills::onchain_timestamp.gt(day_ago)) .load(connection) .await?; - Ok(Json(vols.into_iter().map(|v| v as u64).sum())) + + let mut volume_by_pool = HashMap::new(); + for (pool_id, volume) in results { + *volume_by_pool.entry(pool_id).or_insert(0) += volume as u64; + } + + Ok(Json(volume_by_pool)) +} + +async fn get_historical_volume( + Path((pool_ids, start_time, end_time)): Path<(String, i64, i64)>, + State(state): State, +) -> Result>, DeepBookError> { + let connection = &mut state.pool.get().await?; + + let pool_ids_list: Vec = pool_ids.split(',').map(|s| s.to_string()).collect(); + + let results: Vec<(String, i64)> = schema::order_fills::table + .select(( + schema::order_fills::pool_id, + schema::order_fills::base_quantity, + )) + .filter(schema::order_fills::pool_id.eq_any(pool_ids_list)) + .filter(schema::order_fills::onchain_timestamp.between(start_time, end_time)) + .load(connection) + .await?; + + // Aggregate volume by pool + let mut volume_by_pool = HashMap::new(); + for (pool_id, volume) in results { + *volume_by_pool.entry(pool_id).or_insert(0) += volume as u64; + } + + Ok(Json(volume_by_pool)) } async fn get_24hr_volume_by_balance_manager_id( @@ -142,3 +188,71 @@ async fn get_24hr_volume_by_balance_manager_id( Ok(Json(vec![maker_vol, taker_vol])) } + +async fn get_manager_balance( + Path(manager_id): Path, + State(state): State, +) -> Result>, DeepBookError> { + let connection = &mut state.pool.get().await?; + + // Query to get the balance for all assets for the specified manager_id + let query = format!( + "SELECT asset, SUM(CASE WHEN deposit THEN amount ELSE -amount END)::bigint AS amount, deposit FROM balances \ + WHERE balance_manager_id = '{}' GROUP BY asset, deposit", + manager_id + ); + + let results: Vec = diesel::sql_query(query).load(connection).await?; + + // Aggregate results into a HashMap as {asset: balance} + let mut manager_balances = HashMap::new(); + for result in results { + let mut asset = result.asset; + if !asset.starts_with("0x") { + asset.insert_str(0, "0x"); + } + manager_balances.insert(asset, result.amount); + } + + Ok(Json(manager_balances)) +} + +#[debug_handler] +async fn get_net_deposits( + Path((asset_ids, timestamp)): Path<(String, String)>, + State(state): State, +) -> Result>, DeepBookError> { + let connection = &mut state.pool.get().await?; + let mut query = + "SELECT asset, SUM(amount)::bigint AS amount, deposit FROM balances WHERE checkpoint_timestamp_ms < " + .to_string(); + query.push_str(×tamp); + query.push_str("000 AND asset in ("); + for asset in asset_ids.split(",") { + if asset.starts_with("0x") { + let len = asset.len(); + query.push_str(&format!("'{}',", &asset[2..len])); + } else { + query.push_str(&format!("'{}',", asset)); + } + } + query.pop(); + query.push_str(") GROUP BY asset, deposit"); + + let results: Vec = diesel::sql_query(query).load(connection).await?; + let mut net_deposits = HashMap::new(); + for result in results { + let mut asset = result.asset; + if !asset.starts_with("0x") { + asset.insert_str(0, "0x"); + } + let amount = result.amount; + if result.deposit { + *net_deposits.entry(asset).or_insert(0) += amount; + } else { + *net_deposits.entry(asset).or_insert(0) -= amount; + } + } + + Ok(Json(net_deposits)) +} diff --git a/crates/sui-deepbook-indexer/src/sui_deepbook_indexer.rs b/crates/sui-deepbook-indexer/src/sui_deepbook_indexer.rs index 92eac3b0ad449..0e71f8040ba2a 100644 --- a/crates/sui-deepbook-indexer/src/sui_deepbook_indexer.rs +++ b/crates/sui-deepbook-indexer/src/sui_deepbook_indexer.rs @@ -318,19 +318,23 @@ impl DataMapper for SuiDeepBookDataMapper { match &data.events { Some(events) => { let processed_sui_events = - events.data.iter().try_fold(vec![], |mut result, ev| { - if let Some(data) = process_sui_event( - ev, - &data, - checkpoint_num, - // timestamp_ms, - self.package_id, - )? { - result.push(data); - } - Ok::<_, anyhow::Error>(result) - })?; - + events + .data + .iter() + .enumerate() + .try_fold(vec![], |mut result, (i, ev)| { + if let Some(data) = process_sui_event( + ev, + i, + &data, + checkpoint_num, + timestamp_ms, + self.package_id, + )? { + result.push(data); + } + Ok::<_, anyhow::Error>(result) + })?; if !processed_sui_events.is_empty() { info!( "SUI: Extracted {} deepbook data entries for tx {}.", @@ -367,9 +371,10 @@ impl DataMapper for SuiDeepBookDataMapper { fn process_sui_event( ev: &Event, + event_index: usize, tx: &CheckpointTransaction, checkpoint: u64, - // timestamp_ms: u64, + checkpoint_timestamp_ms: u64, package_id: ObjectID, ) -> Result, anyhow::Error> { Ok(if ev.type_.address == *package_id { @@ -383,10 +388,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::OrderUpdate(OrderUpdate { digest: tx.transaction.digest().to_string(), sender: tx.transaction.sender_address().to_string(), + event_digest, checkpoint, + checkpoint_timestamp_ms, package, status: OrderUpdateStatus::Placed, pool_id: move_event.pool_id.to_string(), @@ -414,10 +423,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::OrderUpdate(OrderUpdate { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, status: OrderUpdateStatus::Modified, pool_id: move_event.pool_id.to_string(), @@ -445,10 +458,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::OrderUpdate(OrderUpdate { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, status: OrderUpdateStatus::Canceled, pool_id: move_event.pool_id.to_string(), @@ -477,10 +494,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::OrderUpdate(OrderUpdate { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, status: OrderUpdateStatus::Expired, pool_id: move_event.pool_id.to_string(), @@ -509,10 +530,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::OrderFill(OrderFill { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, pool_id: move_event.pool_id.to_string(), maker_order_id: move_event.maker_order_id, @@ -544,10 +569,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::Flashloan(Flashloan { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, pool_id: move_event.pool_id.to_string(), borrow_quantity: move_event.borrow_quantity, @@ -567,10 +596,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::PoolPrice(PoolPrice { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, target_pool: move_event.target_pool.to_string(), conversion_rate: move_event.conversion_rate, @@ -589,10 +622,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::Balances(Balances { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, balance_manager_id: move_event.balance_manager_id.to_string(), asset: move_event.asset.to_string(), @@ -612,11 +649,16 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::Proposals(Proposals { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, + pool_id: move_event.pool_id.to_string(), balance_manager_id: move_event.balance_manager_id.to_string(), epoch: move_event.epoch, taker_fee: move_event.taker_fee, @@ -636,10 +678,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::Rebates(Rebates { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, pool_id: move_event.pool_id.to_string(), balance_manager_id: move_event.balance_manager_id.to_string(), @@ -659,10 +705,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::Stakes(Stakes { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, pool_id: move_event.pool_id.to_string(), balance_manager_id: move_event.balance_manager_id.to_string(), @@ -683,6 +733,8 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let shared_objects = &tx.input_objects; let mut pool_id = "0x0".to_string(); for obj in shared_objects.iter() { @@ -697,8 +749,10 @@ fn process_sui_event( } let txn_data = Some(ProcessedTxnData::TradeParamsUpdate(TradeParamsUpdate { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, pool_id, taker_fee: move_event.taker_fee, @@ -718,10 +772,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::Votes(Votes { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, pool_id: move_event.pool_id.to_string(), balance_manager_id: move_event.balance_manager_id.to_string(), diff --git a/crates/sui-deepbook-indexer/src/types.rs b/crates/sui-deepbook-indexer/src/types.rs index 251299f0b4870..c17fbe01dc7af 100644 --- a/crates/sui-deepbook-indexer/src/types.rs +++ b/crates/sui-deepbook-indexer/src/types.rs @@ -55,9 +55,11 @@ impl Display for OrderUpdateStatus { #[derive(Clone, Debug)] pub struct OrderUpdate { - pub(crate) digest: String, + pub digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, pub(crate) status: OrderUpdateStatus, pub(crate) pool_id: String, @@ -77,8 +79,10 @@ impl OrderUpdate { pub(crate) fn to_db(&self) -> DBOrderUpdate { DBOrderUpdate { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), status: self.status.clone().to_string(), pool_id: self.pool_id.clone(), @@ -99,8 +103,10 @@ impl OrderUpdate { #[derive(Clone, Debug)] pub struct OrderFill { pub(crate) digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, pub(crate) pool_id: String, pub(crate) maker_order_id: u128, @@ -124,8 +130,10 @@ impl OrderFill { pub(crate) fn to_db(&self) -> DBOrderFill { DBOrderFill { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), pool_id: self.pool_id.clone(), maker_order_id: BigDecimal::from(self.maker_order_id).to_string(), @@ -150,8 +158,10 @@ impl OrderFill { #[derive(Clone, Debug)] pub struct Flashloan { pub(crate) digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, pub(crate) borrow: bool, pub(crate) pool_id: String, @@ -163,8 +173,10 @@ impl Flashloan { pub(crate) fn to_db(&self) -> DBFlashloan { DBFlashloan { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), borrow: self.borrow, pool_id: self.pool_id.clone(), @@ -177,8 +189,10 @@ impl Flashloan { #[derive(Clone, Debug)] pub struct PoolPrice { pub(crate) digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, pub(crate) target_pool: String, pub(crate) reference_pool: String, @@ -189,8 +203,10 @@ impl PoolPrice { pub(crate) fn to_db(&self) -> DBPoolPrice { DBPoolPrice { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), target_pool: self.target_pool.clone(), reference_pool: self.reference_pool.clone(), @@ -201,22 +217,26 @@ impl PoolPrice { #[derive(Clone, Debug)] pub struct Balances { - pub digest: String, - pub sender: String, - pub checkpoint: u64, - pub package: String, - pub balance_manager_id: String, - pub asset: String, - pub amount: u64, - pub deposit: bool, + pub(crate) digest: String, + pub(crate) event_digest: String, + pub(crate) sender: String, + pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, + pub(crate) package: String, + pub(crate) balance_manager_id: String, + pub(crate) asset: String, + pub(crate) amount: u64, + pub(crate) deposit: bool, } impl Balances { pub(crate) fn to_db(&self) -> DBBalances { DBBalances { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), balance_manager_id: self.balance_manager_id.clone(), asset: self.asset.clone(), @@ -229,9 +249,12 @@ impl Balances { #[derive(Clone, Debug)] pub struct Proposals { pub(crate) digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, + pub(crate) pool_id: String, pub(crate) balance_manager_id: String, pub(crate) epoch: u64, pub(crate) taker_fee: u64, @@ -243,9 +266,12 @@ impl Proposals { pub(crate) fn to_db(&self) -> DBProposals { DBProposals { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), + pool_id: self.pool_id.clone(), balance_manager_id: self.balance_manager_id.clone(), epoch: self.epoch as i64, taker_fee: self.taker_fee as i64, @@ -258,8 +284,10 @@ impl Proposals { #[derive(Clone, Debug)] pub struct Rebates { pub(crate) digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, pub(crate) pool_id: String, pub(crate) balance_manager_id: String, @@ -271,8 +299,10 @@ impl Rebates { pub(crate) fn to_db(&self) -> DBRebates { DBRebates { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), pool_id: self.pool_id.clone(), balance_manager_id: self.balance_manager_id.clone(), @@ -285,8 +315,10 @@ impl Rebates { #[derive(Clone, Debug)] pub struct Stakes { pub(crate) digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, pub(crate) pool_id: String, pub(crate) balance_manager_id: String, @@ -299,8 +331,10 @@ impl Stakes { pub(crate) fn to_db(&self) -> DBStakes { DBStakes { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), pool_id: self.pool_id.clone(), balance_manager_id: self.balance_manager_id.clone(), @@ -314,8 +348,10 @@ impl Stakes { #[derive(Clone, Debug)] pub struct TradeParamsUpdate { pub(crate) digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, pub(crate) pool_id: String, pub(crate) taker_fee: u64, @@ -327,8 +363,10 @@ impl TradeParamsUpdate { pub(crate) fn to_db(&self) -> DBTradeParamsUpdate { DBTradeParamsUpdate { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), pool_id: self.pool_id.clone(), taker_fee: self.taker_fee as i64, @@ -341,8 +379,10 @@ impl TradeParamsUpdate { #[derive(Clone, Debug)] pub struct Votes { pub(crate) digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, pub(crate) pool_id: String, pub(crate) balance_manager_id: String, @@ -356,8 +396,10 @@ impl Votes { pub(crate) fn to_db(&self) -> DBVotes { DBVotes { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), pool_id: self.pool_id.clone(), balance_manager_id: self.balance_manager_id.clone(), diff --git a/crates/sui-e2e-tests/Cargo.toml b/crates/sui-e2e-tests/Cargo.toml index 9d4370817302d..7cfa4e5d1353b 100644 --- a/crates/sui-e2e-tests/Cargo.toml +++ b/crates/sui-e2e-tests/Cargo.toml @@ -31,6 +31,8 @@ anyhow.workspace = true async-trait.workspace = true clap.workspace = true serde_json.workspace = true +reqwest.workspace = true +prost.workspace = true move-binary-format.workspace = true move-package.workspace = true @@ -39,7 +41,6 @@ fastcrypto.workspace = true fastcrypto-zkp.workspace = true move-core-types.workspace = true -sui-bridge.workspace = true sui-core.workspace = true sui-framework.workspace = true sui-json-rpc.workspace = true diff --git a/crates/sui-e2e-tests/tests/passkey_e2e_tests.rs b/crates/sui-e2e-tests/tests/passkey_e2e_tests.rs index 505f240d82edc..30016d0d3a604 100644 --- a/crates/sui-e2e-tests/tests/passkey_e2e_tests.rs +++ b/crates/sui-e2e-tests/tests/passkey_e2e_tests.rs @@ -1,6 +1,6 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use fastcrypto::{hash::HashFunction, traits::ToFromBytes}; +use fastcrypto::traits::ToFromBytes; use p256::pkcs8::DecodePublicKey; use passkey_authenticator::{Authenticator, UserValidationMethod}; use passkey_client::Client; @@ -15,11 +15,12 @@ use passkey_types::{ }, Bytes, Passkey, }; -use shared_crypto::intent::{Intent, IntentMessage, INTENT_PREFIX_LENGTH}; +use shared_crypto::intent::{Intent, IntentMessage}; use std::net::SocketAddr; use sui_core::authority_client::AuthorityAPI; use sui_macros::sim_test; use sui_test_transaction_builder::TestTransactionBuilder; +use sui_types::crypto::Signature; use sui_types::error::UserInputError; use sui_types::error::{SuiError, SuiResult}; use sui_types::signature::GenericSignature; @@ -30,10 +31,6 @@ use sui_types::{ passkey_authenticator::{to_signing_message, PasskeyAuthenticator}, transaction::TransactionData, }; -use sui_types::{ - crypto::{DefaultHash, Signature}, - passkey_authenticator::to_signing_digest, -}; use test_cluster::TestCluster; use test_cluster::TestClusterBuilder; use url::Url; @@ -159,23 +156,22 @@ async fn create_credential_and_sign_test_tx( // Compute the challenge = blake2b_hash(intent_msg(tx)) for passkey credential request. // If change_intent, mangle the intent bytes. If change_tx, mangle the hashed tx bytes. - let mut extended = [0; INTENT_PREFIX_LENGTH + DefaultHash::OUTPUT_SIZE]; - let passkey_digest = if change_intent { - extended[..INTENT_PREFIX_LENGTH].copy_from_slice(&Intent::personal_message().to_bytes()); - extended[INTENT_PREFIX_LENGTH..].copy_from_slice(&to_signing_digest(&intent_msg)); - extended + let passkey_challenge = if change_intent { + to_signing_message(&IntentMessage::new( + Intent::personal_message(), + intent_msg.value.clone(), + )) + .to_vec() } else if change_tx { - extended[..INTENT_PREFIX_LENGTH].copy_from_slice(&intent_msg.intent.to_bytes()); - extended[INTENT_PREFIX_LENGTH..].copy_from_slice(&random_vec(32)); - extended + random_vec(32) } else { - to_signing_message(&intent_msg) + to_signing_message(&intent_msg).to_vec() }; // Request a signature from passkey with challenge set to passkey_digest. let credential_request = CredentialRequestOptions { public_key: PublicKeyCredentialRequestOptions { - challenge: Bytes::from(passkey_digest.to_vec()), + challenge: Bytes::from(passkey_challenge), timeout: None, rp_id: Some(String::from(origin.domain().unwrap())), allow_credentials: None, diff --git a/crates/sui-e2e-tests/tests/rest/checkpoints.rs b/crates/sui-e2e-tests/tests/rest/checkpoints.rs new file mode 100644 index 0000000000000..d72ac52afdf61 --- /dev/null +++ b/crates/sui-e2e-tests/tests/rest/checkpoints.rs @@ -0,0 +1,182 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use prost::Message; +use sui_macros::sim_test; +use sui_rest_api::client::sdk::Client; +use sui_rest_api::client::Client as CoreClient; +use sui_rest_api::{CheckpointResponse, ListCheckpointsQueryParameters}; +use sui_sdk_types::types::SignedCheckpointSummary; +use test_cluster::TestClusterBuilder; + +use crate::transfer_coin; + +#[sim_test] +async fn list_checkpoint() { + let test_cluster = TestClusterBuilder::new().build().await; + + let _transaction_digest = transfer_coin(&test_cluster.wallet).await; + + let client = Client::new(test_cluster.rpc_url()).unwrap(); + let core_client = CoreClient::new(test_cluster.rpc_url()); + + let checkpoints = client + .list_checkpoints(&ListCheckpointsQueryParameters::default()) + .await + .unwrap() + .into_inner(); + + assert!(!checkpoints.is_empty()); + + let _latest = client.get_latest_checkpoint().await.unwrap().into_inner(); + + let _latest = core_client.get_latest_checkpoint().await.unwrap(); + + let client = reqwest::Client::new(); + let url = format!("{}/v2/checkpoints", test_cluster.rpc_url()); + // Make sure list works with json + let _checkpoints = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_JSON) + .send() + .await + .unwrap() + .json::>() + .await + .unwrap(); + + // Make sure list works with protobuf + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_PROTOBUF) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _checkpoints = sui_rest_api::proto::ListCheckpointResponse::decode(bytes).unwrap(); + + // TODO remove this once the BCS format is no longer accepted and clients have migrated to the + // protobuf version + // Make sure list works with BCS and the old format of only a SignedCheckpoint with no contents + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_BCS) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _checkpoints = bcs::from_bytes::>(&bytes).unwrap(); +} + +#[sim_test] +async fn get_checkpoint() { + let test_cluster = TestClusterBuilder::new().build().await; + + let _transaction_digest = transfer_coin(&test_cluster.wallet).await; + + let client = Client::new(test_cluster.rpc_url()).unwrap(); + + let latest = client.get_latest_checkpoint().await.unwrap().into_inner(); + let _ = client + .get_checkpoint(latest.checkpoint.sequence_number) + .await + .unwrap(); + + let client = reqwest::Client::new(); + let url = format!( + "{}/v2/checkpoints/{}", + test_cluster.rpc_url(), + latest.checkpoint.sequence_number + ); + // Make sure list works with json + let _checkpoints = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_JSON) + .send() + .await + .unwrap() + .json::() + .await + .unwrap(); + + // Make sure it works with protobuf + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_PROTOBUF) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _checkpoints = sui_rest_api::proto::GetCheckpointResponse::decode(bytes).unwrap(); + + // TODO remove this once the BCS format is no longer accepted and clients have migrated to the + // protobuf version + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_BCS) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _checkpoints = bcs::from_bytes::(&bytes).unwrap(); +} + +#[sim_test] +async fn get_full_checkpoint() { + let test_cluster = TestClusterBuilder::new().build().await; + + let _transaction_digest = transfer_coin(&test_cluster.wallet).await; + + let client = Client::new(test_cluster.rpc_url()).unwrap(); + let core_client = CoreClient::new(test_cluster.rpc_url()); + + let latest = client.get_latest_checkpoint().await.unwrap().into_inner(); + let _ = client + .get_full_checkpoint(latest.checkpoint.sequence_number) + .await + .unwrap(); + let _ = core_client + .get_full_checkpoint(latest.checkpoint.sequence_number) + .await + .unwrap(); + + let client = reqwest::Client::new(); + let url = format!( + "{}/v2/checkpoints/{}/full", + test_cluster.rpc_url(), + latest.checkpoint.sequence_number + ); + // Make sure it works with protobuf + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_PROTOBUF) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _checkpoints = sui_rest_api::proto::FullCheckpoint::decode(bytes).unwrap(); + + // TODO remove this once the BCS format is no longer accepted and clients have migrated to the + // protobuf version + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_BCS) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _checkpoints = + bcs::from_bytes::(&bytes).unwrap(); +} diff --git a/crates/sui-e2e-tests/tests/rest/committee.rs b/crates/sui-e2e-tests/tests/rest/committee.rs new file mode 100644 index 0000000000000..6e4b2227483fe --- /dev/null +++ b/crates/sui-e2e-tests/tests/rest/committee.rs @@ -0,0 +1,65 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use prost::Message; +use sui_macros::sim_test; +use sui_rest_api::client::sdk::Client; +use sui_sdk_types::types::ValidatorCommittee; +use test_cluster::TestClusterBuilder; + +#[sim_test] +async fn get_committee() { + let test_cluster = TestClusterBuilder::new().build().await; + + let client = Client::new(test_cluster.rpc_url()).unwrap(); + + let _committee = client.get_committee(0).await.unwrap(); + let _committee = client.get_current_committee().await.unwrap(); + + async fn raw_request(url: &str) { + let client = reqwest::Client::new(); + + // Make sure list works with json + let _object = client + .get(url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_JSON) + .send() + .await + .unwrap() + .json::() + .await + .unwrap(); + + // Make sure it works with protobuf + let bytes = client + .get(url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_PROTOBUF) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _object = sui_rest_api::proto::ValidatorCommittee::decode(bytes).unwrap(); + + // TODO remove this once the BCS format is no longer accepted and clients have migrated to the + // protobuf version + let bytes = client + .get(url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_BCS) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _object = bcs::from_bytes::(&bytes).unwrap(); + } + + let url = format!("{}/v2/system/committee", test_cluster.rpc_url(),); + + raw_request(&url).await; + + let url = format!("{}/v2/system/committee/0", test_cluster.rpc_url()); + raw_request(&url).await; +} diff --git a/crates/sui-e2e-tests/tests/rest.rs b/crates/sui-e2e-tests/tests/rest/execute.rs similarity index 100% rename from crates/sui-e2e-tests/tests/rest.rs rename to crates/sui-e2e-tests/tests/rest/execute.rs diff --git a/crates/sui-e2e-tests/tests/rest/main.rs b/crates/sui-e2e-tests/tests/rest/main.rs new file mode 100644 index 0000000000000..da5c338f13c82 --- /dev/null +++ b/crates/sui-e2e-tests/tests/rest/main.rs @@ -0,0 +1,27 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +mod checkpoints; +mod committee; +mod execute; +mod objects; +mod resolve; +mod transactions; + +async fn transfer_coin( + context: &sui_sdk::wallet_context::WalletContext, +) -> sui_sdk_types::types::TransactionDigest { + let gas_price = context.get_reference_gas_price().await.unwrap(); + let accounts_and_objs = context.get_all_accounts_and_gas_objects().await.unwrap(); + let sender = accounts_and_objs[0].0; + let receiver = accounts_and_objs[1].0; + let gas_object = accounts_and_objs[0].1[0]; + let object_to_send = accounts_and_objs[0].1[1]; + let txn = context.sign_transaction( + &sui_test_transaction_builder::TestTransactionBuilder::new(sender, gas_object, gas_price) + .transfer(object_to_send, receiver) + .build(), + ); + let resp = context.execute_transaction_must_succeed(txn).await; + resp.digest.into() +} diff --git a/crates/sui-e2e-tests/tests/rest/objects.rs b/crates/sui-e2e-tests/tests/rest/objects.rs new file mode 100644 index 0000000000000..afbcddd1f0fae --- /dev/null +++ b/crates/sui-e2e-tests/tests/rest/objects.rs @@ -0,0 +1,79 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use prost::Message; +use sui_macros::sim_test; +use sui_rest_api::client::sdk::Client; +use sui_rest_api::client::Client as CoreClient; +use sui_rest_api::ObjectResponse; +use sui_sdk_types::types::Object; +use test_cluster::TestClusterBuilder; + +#[sim_test] +async fn get_object() { + let test_cluster = TestClusterBuilder::new().build().await; + + let client = Client::new(test_cluster.rpc_url()).unwrap(); + let core_client = CoreClient::new(test_cluster.rpc_url()); + + let _object = client.get_object("0x5".parse().unwrap()).await.unwrap(); + let _object = core_client + .get_object("0x5".parse().unwrap()) + .await + .unwrap(); + + let _object = client + .get_object_with_version("0x5".parse().unwrap(), 1) + .await + .unwrap(); + let _object = core_client + .get_object_with_version("0x5".parse().unwrap(), 1.into()) + .await + .unwrap(); + + async fn raw_request(url: &str) { + let client = reqwest::Client::new(); + + // Make sure list works with json + let _object = client + .get(url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_JSON) + .send() + .await + .unwrap() + .json::() + .await + .unwrap(); + + // Make sure it works with protobuf + let bytes = client + .get(url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_PROTOBUF) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _object = sui_rest_api::proto::GetObjectResponse::decode(bytes).unwrap(); + + // TODO remove this once the BCS format is no longer accepted and clients have migrated to the + // protobuf version + let bytes = client + .get(url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_BCS) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _object = bcs::from_bytes::(&bytes).unwrap(); + } + + let url = format!("{}/v2/objects/0x5", test_cluster.rpc_url()); + raw_request(&url).await; + + let url = format!("{}/v2/objects/0x5/version/1", test_cluster.rpc_url()); + raw_request(&url).await; +} diff --git a/crates/sui-e2e-tests/tests/rest/resolve.rs b/crates/sui-e2e-tests/tests/rest/resolve.rs new file mode 100644 index 0000000000000..482b022e52a6e --- /dev/null +++ b/crates/sui-e2e-tests/tests/rest/resolve.rs @@ -0,0 +1,437 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use shared_crypto::intent::Intent; +use sui_keys::keystore::AccountKeystore; +use sui_macros::sim_test; +use sui_rest_api::client::reqwest::StatusCode; +use sui_rest_api::transactions::ResolveTransactionQueryParameters; +use sui_rest_api::Client; +use sui_rest_api::ExecuteTransactionQueryParameters; +use sui_sdk_types::types::Argument; +use sui_sdk_types::types::Command; +use sui_sdk_types::types::TransactionExpiration; +use sui_sdk_types::types::UnresolvedGasPayment; +use sui_sdk_types::types::UnresolvedInputArgument; +use sui_sdk_types::types::UnresolvedProgrammableTransaction; +use sui_sdk_types::types::UnresolvedTransaction; +use sui_sdk_types::types::UnresolvedValue; +use sui_types::base_types::SuiAddress; +use sui_types::effects::TransactionEffectsAPI; +use test_cluster::TestClusterBuilder; + +#[sim_test] +async fn resolve_transaction_simple_transfer() { + let test_cluster = TestClusterBuilder::new().build().await; + + let client = Client::new(test_cluster.rpc_url()); + let recipient = SuiAddress::random_for_testing_only(); + + let (sender, mut gas) = test_cluster.wallet.get_one_account().await.unwrap(); + gas.sort_by_key(|object_ref| object_ref.0); + let obj_to_send = gas.first().unwrap().0; + + let unresolved_transaction = UnresolvedTransaction { + ptb: UnresolvedProgrammableTransaction { + inputs: vec![ + UnresolvedInputArgument { + object_id: Some(obj_to_send.into()), + ..Default::default() + }, + UnresolvedInputArgument { + value: Some(UnresolvedValue::String(recipient.to_string())), + ..Default::default() + }, + ], + commands: vec![Command::TransferObjects( + sui_sdk_types::types::TransferObjects { + objects: vec![Argument::Input(0)], + address: Argument::Input(1), + }, + )], + }, + sender: sender.into(), + gas_payment: None, + expiration: TransactionExpiration::None, + }; + + let resolved = client + .inner() + .resolve_transaction_with_parameters( + &unresolved_transaction, + &ResolveTransactionQueryParameters { + simulate: true, + ..Default::default() + }, + ) + .await + .unwrap() + .into_inner(); + + let signed_transaction = test_cluster + .wallet + .sign_transaction(&resolved.transaction.try_into().unwrap()); + let effects = client + .execute_transaction( + &ExecuteTransactionQueryParameters::default(), + &signed_transaction, + ) + .await + .unwrap() + .effects; + + assert!(effects.status().is_ok()); + assert_eq!( + resolved.simulation.unwrap().effects, + effects.try_into().unwrap() + ); +} + +#[sim_test] +async fn resolve_transaction_transfer_with_sponsor() { + let test_cluster = TestClusterBuilder::new().build().await; + + let client = Client::new(test_cluster.rpc_url()); + let recipient = SuiAddress::random_for_testing_only(); + + let (sender, gas) = test_cluster.wallet.get_one_account().await.unwrap(); + let obj_to_send = gas.first().unwrap().0; + let sponsor = test_cluster.wallet.get_addresses()[1]; + + let unresolved_transaction = UnresolvedTransaction { + ptb: UnresolvedProgrammableTransaction { + inputs: vec![ + UnresolvedInputArgument { + object_id: Some(obj_to_send.into()), + ..Default::default() + }, + UnresolvedInputArgument { + value: Some(UnresolvedValue::String(recipient.to_string())), + ..Default::default() + }, + ], + commands: vec![Command::TransferObjects( + sui_sdk_types::types::TransferObjects { + objects: vec![Argument::Input(0)], + address: Argument::Input(1), + }, + )], + }, + sender: sender.into(), + gas_payment: Some(UnresolvedGasPayment { + objects: vec![], + owner: sponsor.into(), + price: None, + budget: None, + }), + expiration: TransactionExpiration::None, + }; + + let resolved = client + .inner() + .resolve_transaction_with_parameters( + &unresolved_transaction, + &ResolveTransactionQueryParameters { + simulate: true, + ..Default::default() + }, + ) + .await + .unwrap() + .into_inner(); + + let transaction_data = resolved.transaction.clone().try_into().unwrap(); + let sender_sig = test_cluster + .wallet + .config + .keystore + .sign_secure(&sender, &transaction_data, Intent::sui_transaction()) + .unwrap(); + let sponsor_sig = test_cluster + .wallet + .config + .keystore + .sign_secure(&sponsor, &transaction_data, Intent::sui_transaction()) + .unwrap(); + + let signed_transaction = sui_types::transaction::Transaction::from_data( + transaction_data, + vec![sender_sig, sponsor_sig], + ); + let effects = client + .execute_transaction( + &ExecuteTransactionQueryParameters::default(), + &signed_transaction, + ) + .await + .unwrap() + .effects; + + assert!(effects.status().is_ok()); + assert_eq!( + resolved.simulation.unwrap().effects, + effects.try_into().unwrap() + ); +} + +#[sim_test] +async fn resolve_transaction_borrowed_shared_object() { + let test_cluster = TestClusterBuilder::new().build().await; + + let client = Client::new(test_cluster.rpc_url()); + + let sender = test_cluster.wallet.get_addresses()[0]; + + let unresolved_transaction = UnresolvedTransaction { + ptb: UnresolvedProgrammableTransaction { + inputs: vec![UnresolvedInputArgument { + object_id: Some("0x6".parse().unwrap()), + ..Default::default() + }], + commands: vec![Command::MoveCall(sui_sdk_types::types::MoveCall { + package: "0x2".parse().unwrap(), + module: "clock".parse().unwrap(), + function: "timestamp_ms".parse().unwrap(), + type_arguments: vec![], + arguments: vec![Argument::Input(0)], + })], + }, + sender: sender.into(), + gas_payment: None, + expiration: TransactionExpiration::None, + }; + + let resolved = client + .inner() + .resolve_transaction_with_parameters( + &unresolved_transaction, + &ResolveTransactionQueryParameters { + simulate: true, + ..Default::default() + }, + ) + .await + .unwrap() + .into_inner(); + + let signed_transaction = test_cluster + .wallet + .sign_transaction(&resolved.transaction.try_into().unwrap()); + let effects = client + .execute_transaction( + &ExecuteTransactionQueryParameters::default(), + &signed_transaction, + ) + .await + .unwrap() + .effects; + + assert!(effects.status().is_ok()); +} + +#[sim_test] +async fn resolve_transaction_mutable_shared_object() { + let test_cluster = TestClusterBuilder::new().build().await; + + let client = Client::new(test_cluster.rpc_url()); + + let (sender, mut gas) = test_cluster.wallet.get_one_account().await.unwrap(); + gas.sort_by_key(|object_ref| object_ref.0); + let obj_to_stake = gas.first().unwrap().0; + let validator_address = client + .inner() + .get_system_state_summary() + .await + .unwrap() + .inner() + .active_validators + .first() + .unwrap() + .address; + + let unresolved_transaction = UnresolvedTransaction { + ptb: UnresolvedProgrammableTransaction { + inputs: vec![ + UnresolvedInputArgument { + object_id: Some("0x5".parse().unwrap()), + ..Default::default() + }, + UnresolvedInputArgument { + object_id: Some(obj_to_stake.into()), + ..Default::default() + }, + UnresolvedInputArgument { + value: Some(UnresolvedValue::String(validator_address.to_string())), + ..Default::default() + }, + ], + commands: vec![Command::MoveCall(sui_sdk_types::types::MoveCall { + package: "0x3".parse().unwrap(), + module: "sui_system".parse().unwrap(), + function: "request_add_stake".parse().unwrap(), + type_arguments: vec![], + arguments: vec![Argument::Input(0), Argument::Input(1), Argument::Input(2)], + })], + }, + sender: sender.into(), + gas_payment: None, + expiration: TransactionExpiration::None, + }; + + let resolved = client + .inner() + .resolve_transaction_with_parameters( + &unresolved_transaction, + &ResolveTransactionQueryParameters { + simulate: true, + ..Default::default() + }, + ) + .await + .unwrap() + .into_inner(); + + let signed_transaction = test_cluster + .wallet + .sign_transaction(&resolved.transaction.try_into().unwrap()); + let effects = client + .execute_transaction( + &ExecuteTransactionQueryParameters::default(), + &signed_transaction, + ) + .await + .unwrap() + .effects; + + assert!(effects.status().is_ok()); + assert_eq!( + resolved.simulation.unwrap().effects, + effects.try_into().unwrap() + ); +} + +#[sim_test] +async fn resolve_transaction_insufficient_gas() { + let test_cluster = TestClusterBuilder::new().build().await; + let client = Client::new(test_cluster.rpc_url()); + + // Test the case where we don't have enough coins/gas for the required budget + let unresolved_transaction = UnresolvedTransaction { + ptb: UnresolvedProgrammableTransaction { + inputs: vec![UnresolvedInputArgument { + object_id: Some("0x6".parse().unwrap()), + ..Default::default() + }], + commands: vec![Command::MoveCall(sui_sdk_types::types::MoveCall { + package: "0x2".parse().unwrap(), + module: "clock".parse().unwrap(), + function: "timestamp_ms".parse().unwrap(), + type_arguments: vec![], + arguments: vec![Argument::Input(0)], + })], + }, + sender: SuiAddress::random_for_testing_only().into(), // random account with no gas + gas_payment: None, + expiration: TransactionExpiration::None, + }; + + let error = client + .inner() + .resolve_transaction(&unresolved_transaction) + .await + .unwrap_err(); + + assert_eq!(error.status(), Some(StatusCode::BAD_REQUEST)); + assert_contains( + error.message().unwrap_or_default(), + "unable to select sufficient gas", + ); +} + +fn assert_contains(haystack: &str, needle: &str) { + if !haystack.contains(needle) { + panic!("{haystack:?} does not contain {needle:?}"); + } +} + +#[sim_test] +async fn resolve_transaction_with_raw_json() { + let test_cluster = TestClusterBuilder::new().build().await; + + let client = Client::new(test_cluster.rpc_url()); + let recipient = SuiAddress::random_for_testing_only(); + + let (sender, mut gas) = test_cluster.wallet.get_one_account().await.unwrap(); + gas.sort_by_key(|object_ref| object_ref.0); + let obj_to_send = gas.first().unwrap().0; + + let unresolved_transaction = serde_json::json!({ + "inputs": [ + { + "object_id": obj_to_send + }, + { + "value": 1 + }, + { + "value": recipient + } + ], + + "commands": [ + { + "command": "split_coins", + "coin": { "input": 0 }, + "amounts": [ + { + "input": 1, + }, + { + "input": 1, + } + ] + }, + { + "command": "transfer_objects", + "objects": [ + { "result": [0, 1] }, + { "result": [0, 0] } + ], + "address": { "input": 2 } + } + ], + + "sender": sender + }); + + let resolved = client + .inner() + .resolve_transaction_with_parameters( + &serde_json::from_value(unresolved_transaction).unwrap(), + &ResolveTransactionQueryParameters { + simulate: true, + ..Default::default() + }, + ) + .await + .unwrap() + .into_inner(); + + let signed_transaction = test_cluster + .wallet + .sign_transaction(&resolved.transaction.try_into().unwrap()); + let effects = client + .execute_transaction( + &ExecuteTransactionQueryParameters::default(), + &signed_transaction, + ) + .await + .unwrap() + .effects; + + assert!(effects.status().is_ok(), "{:?}", effects.status()); + assert_eq!( + resolved.simulation.unwrap().effects, + effects.try_into().unwrap() + ); +} diff --git a/crates/sui-e2e-tests/tests/rest/transactions.rs b/crates/sui-e2e-tests/tests/rest/transactions.rs new file mode 100644 index 0000000000000..13b2d9925ff73 --- /dev/null +++ b/crates/sui-e2e-tests/tests/rest/transactions.rs @@ -0,0 +1,118 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use prost::Message; +use sui_macros::sim_test; +use sui_rest_api::client::sdk::Client; +use sui_rest_api::transactions::{ListTransactionsQueryParameters, TransactionResponse}; +use test_cluster::TestClusterBuilder; + +use crate::transfer_coin; + +#[sim_test] +async fn get_transaction() { + let test_cluster = TestClusterBuilder::new().build().await; + + let transaction_digest = transfer_coin(&test_cluster.wallet).await; + + let client = Client::new(test_cluster.rpc_url()).unwrap(); + + let _transaction = client.get_transaction(&transaction_digest).await.unwrap(); + + let client = reqwest::Client::new(); + let url = format!( + "{}/v2/transactions/{}", + test_cluster.rpc_url(), + transaction_digest, + ); + // Make sure it works with json + let _transaction = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_JSON) + .send() + .await + .unwrap() + .json::() + .await + .unwrap(); + + // Make sure it works with protobuf + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_PROTOBUF) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _transaction = sui_rest_api::proto::GetTransactionResponse::decode(bytes).unwrap(); + + // TODO remove this once the BCS format is no longer accepted and clients have migrated to the + // protobuf version + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_BCS) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _transaction = bcs::from_bytes::(&bytes).unwrap(); +} + +#[sim_test] +async fn list_checkpoint() { + let test_cluster = TestClusterBuilder::new().build().await; + + let _transaction_digest = transfer_coin(&test_cluster.wallet).await; + + let client = Client::new(test_cluster.rpc_url()).unwrap(); + + let transactions = client + .list_transactions(&ListTransactionsQueryParameters::default()) + .await + .unwrap() + .into_inner(); + + assert!(!transactions.is_empty()); + + let client = reqwest::Client::new(); + let url = format!("{}/v2/transactions", test_cluster.rpc_url()); + // Make sure it works with json + let _transactions = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_JSON) + .send() + .await + .unwrap() + .json::>() + .await + .unwrap(); + + // Make sure it works with protobuf + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_PROTOBUF) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _transactions = sui_rest_api::proto::ListTransactionsResponse::decode(bytes).unwrap(); + + // TODO remove this once the BCS format is no longer accepted and clients have migrated to the + // protobuf version + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_BCS) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _transactions = bcs::from_bytes::>(&bytes).unwrap(); +} diff --git a/crates/sui-e2e-tests/tests/traffic_control_tests.rs b/crates/sui-e2e-tests/tests/traffic_control_tests.rs index 46d1ab050245d..27985b8b1d589 100644 --- a/crates/sui-e2e-tests/tests/traffic_control_tests.rs +++ b/crates/sui-e2e-tests/tests/traffic_control_tests.rs @@ -608,6 +608,63 @@ async fn test_fullnode_traffic_control_error_blocked() -> Result<(), anyhow::Err panic!("Expected spam policy to trigger within {txn_count} requests"); } +#[tokio::test] +async fn test_fullnode_traffic_control_error_blocked() -> Result<(), anyhow::Error> { + let txn_count = 5; + let policy_config = PolicyConfig { + connection_blocklist_ttl_sec: 3, + error_policy_type: PolicyType::TestNConnIP(txn_count - 1), + dry_run: false, + ..Default::default() + }; + let test_cluster = TestClusterBuilder::new() + .with_fullnode_policy_config(Some(policy_config)) + .build() + .await; + + let jsonrpc_client = &test_cluster.fullnode_handle.rpc_client; + let context = test_cluster.wallet; + + let mut txns = batch_make_transfer_transactions(&context, txn_count as usize).await; + assert!( + txns.len() >= txn_count as usize, + "Expect at least {} txns. Do we generate enough gas objects during genesis?", + txn_count, + ); + + // it should take no more than 4 requests to be added to the blocklist + for _ in 0..txn_count { + let txn = txns.swap_remove(0); + let tx_digest = txn.digest(); + let (tx_bytes, _signatures) = txn.to_tx_bytes_and_signatures(); + // create invalid (empty) client signature + let signatures: Vec = vec![]; + let params = rpc_params![ + tx_bytes, + signatures, + SuiTransactionBlockResponseOptions::new(), + ExecuteTransactionRequestType::WaitForLocalExecution + ]; + let response: RpcResult = jsonrpc_client + .request("sui_executeTransactionBlock", params.clone()) + .await; + if let Err(err) = response { + if err.to_string().contains("Too many requests") { + return Ok(()); + } + } else { + let SuiTransactionBlockResponse { + digest, + confirmed_local_execution, + .. + } = response.unwrap(); + assert_eq!(&digest, tx_digest); + assert!(confirmed_local_execution.unwrap()); + } + } + panic!("Expected spam policy to trigger within {txn_count} requests"); +} + #[tokio::test] async fn test_validator_traffic_control_error_delegated() -> Result<(), anyhow::Error> { let n = 5; diff --git a/crates/sui-faucet/Cargo.toml b/crates/sui-faucet/Cargo.toml index fee9e1d93af61..fea887f1371cc 100644 --- a/crates/sui-faucet/Cargo.toml +++ b/crates/sui-faucet/Cargo.toml @@ -10,6 +10,7 @@ publish = false anyhow.workspace = true async-trait.workspace = true axum.workspace = true +bin-version.workspace = true clap.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["full"] } diff --git a/crates/sui-faucet/src/main.rs b/crates/sui-faucet/src/main.rs index 89d49cf34c6a9..210b5ddb07d1f 100644 --- a/crates/sui-faucet/src/main.rs +++ b/crates/sui-faucet/src/main.rs @@ -12,6 +12,9 @@ use tracing::info; const CONCURRENCY_LIMIT: usize = 30; const PROM_PORT_ADDR: &str = "0.0.0.0:9184"; +// Define the `GIT_REVISION` and `VERSION` consts +bin_version::bin_version!(); + #[tokio::main] async fn main() -> Result<(), anyhow::Error> { // initialize tracing @@ -38,6 +41,10 @@ async fn main() -> Result<(), anyhow::Error> { info!("Starting Prometheus HTTP endpoint at {}", prom_binding); let registry_service = mysten_metrics::start_prometheus_server(prom_binding); let prometheus_registry = registry_service.default_registry(); + prometheus_registry + .register(mysten_metrics::uptime_metric("faucet", VERSION, "unknown")) + .unwrap(); + let app_state = Arc::new(AppState { faucet: SimpleFaucet::new( context, diff --git a/crates/sui-faucet/src/metrics.rs b/crates/sui-faucet/src/metrics.rs index 9571701835e20..b28902a0854a7 100644 --- a/crates/sui-faucet/src/metrics.rs +++ b/crates/sui-faucet/src/metrics.rs @@ -135,47 +135,83 @@ impl FaucetMetrics { impl MetricsCallbackProvider for RequestMetrics { fn on_request(&self, path: String) { + let normalized_path = normalize_path(&path); + if !is_path_tracked(normalized_path) { + return; + } + self.total_requests_received - .with_label_values(&[path.as_str()]) + .with_label_values(&[normalized_path]) .inc(); } fn on_response(&self, path: String, latency: Duration, _status: u16, grpc_status_code: Code) { + let normalized_path = normalize_path(&path); + if !is_path_tracked(normalized_path) { + return; + } + self.process_latency - .with_label_values(&[path.as_str()]) + .with_label_values(&[normalized_path]) .observe(latency.as_secs_f64()); match grpc_status_code { Code::Ok => { self.total_requests_succeeded - .with_label_values(&[path.as_str()]) + .with_label_values(&[normalized_path]) .inc(); } Code::Unavailable | Code::ResourceExhausted => { self.total_requests_shed - .with_label_values(&[path.as_str()]) + .with_label_values(&[normalized_path]) .inc(); } _ => { self.total_requests_failed - .with_label_values(&[path.as_str()]) + .with_label_values(&[normalized_path]) .inc(); } } } fn on_start(&self, path: &str) { + let normalized_path = normalize_path(path); + if !is_path_tracked(normalized_path) { + return; + } + self.current_requests_in_flight - .with_label_values(&[path]) + .with_label_values(&[normalized_path]) .inc(); } fn on_drop(&self, path: &str) { + let normalized_path = normalize_path(path); + if !is_path_tracked(normalized_path) { + return; + } + self.total_requests_disconnected - .with_label_values(&[path]) + .with_label_values(&[normalized_path]) .inc(); self.current_requests_in_flight - .with_label_values(&[path]) + .with_label_values(&[normalized_path]) .dec(); } } + +/// Normalizes the given path to handle variations across different deployments. +/// Specifically, it trims dynamic segments from the `/v1/status/` endpoint. +pub fn normalize_path(path: &str) -> &str { + if path.starts_with("/v1/status/") { + return "/v1/status"; + } + + path +} + +/// Determines whether the given path should be tracked for metrics collection. +/// Only specified paths relevant to monitoring are included. +pub fn is_path_tracked(path: &str) -> bool { + matches!(path, "/v1/gas" | "/gas" | "/v1/status") +} diff --git a/crates/sui-faucet/src/metrics_layer.rs b/crates/sui-faucet/src/metrics_layer.rs index 7e7dfe3569af9..0b8106603d359 100644 --- a/crates/sui-faucet/src/metrics_layer.rs +++ b/crates/sui-faucet/src/metrics_layer.rs @@ -13,7 +13,7 @@ use prometheus::{HistogramTimer, Registry}; use tower::{load_shed::error::Overloaded, BoxError, Layer, Service, ServiceExt}; use tracing::{error, info, warn}; -use crate::metrics::RequestMetrics; +use crate::metrics::{is_path_tracked, normalize_path, RequestMetrics}; use http::Request; /// Tower Layer for tracking metrics in Prometheus related to number, success-rate and latency of @@ -81,16 +81,19 @@ where let future = Box::pin(async move { let resp = inner.oneshot(req).await; - match &resp { - Ok(resp) if !resp.status().is_success() => { - metrics.failed(None, Some(resp.status())) - } - Ok(_) => metrics.succeeded(), - Err(err) => { - if err.is::() { - metrics.shed(); - } else { - metrics.failed(Some(err), None); + + if let Some(metrics) = metrics { + match &resp { + Ok(resp) if !resp.status().is_success() => { + metrics.failed(None, Some(resp.status())) + } + Ok(_) => metrics.succeeded(), + Err(err) => { + if err.is::() { + metrics.shed(); + } else { + metrics.failed(Some(err), None); + } } } } @@ -110,25 +113,31 @@ impl Future for RequestMetricsFuture { } impl MetricsGuard { - fn new(metrics: Arc, path: &str) -> Self { + fn new(metrics: Arc, path: &str) -> Option { + let normalized_path = normalize_path(path); + + if !is_path_tracked(normalized_path) { + return None; + } + metrics .total_requests_received - .with_label_values(&[path]) + .with_label_values(&[normalized_path]) .inc(); metrics .current_requests_in_flight - .with_label_values(&[path]) + .with_label_values(&[normalized_path]) .inc(); - MetricsGuard { + Some(MetricsGuard { timer: Some( metrics .process_latency - .with_label_values(&[path]) + .with_label_values(&[normalized_path]) .start_timer(), ), metrics, - path: path.to_string(), - } + path: normalized_path.to_string(), + }) } fn succeeded(mut self) { @@ -183,22 +192,28 @@ impl MetricsGuard { impl Drop for MetricsGuard { fn drop(&mut self) { - self.metrics + if self + .metrics .current_requests_in_flight - .with_label_values(&[&self.path]) - .dec(); - - // Request was still in flight when the guard was dropped, implying the client disconnected. - if let Some(timer) = self.timer.take() { - let elapsed = timer.stop_and_record(); + .get_metric_with_label_values(&[&self.path]) + .is_ok() + { self.metrics - .total_requests_disconnected + .current_requests_in_flight .with_label_values(&[&self.path]) - .inc(); - info!( - "Request disconnected for path {} in {:.2}s", - self.path, elapsed - ); + .dec(); + + if let Some(timer) = self.timer.take() { + let elapsed = timer.stop_and_record(); + self.metrics + .total_requests_disconnected + .with_label_values(&[&self.path]) + .inc(); + info!( + "Request disconnected for path {} in {:.2}s", + self.path, elapsed + ); + } } } } diff --git a/crates/sui-field-count-derive/Cargo.toml b/crates/sui-field-count-derive/Cargo.toml new file mode 100644 index 0000000000000..40f188937500f --- /dev/null +++ b/crates/sui-field-count-derive/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "sui-field-count-derive" +version.workspace = true +authors = ["Mysten Labs "] +license = "Apache-2.0" +publish = false +edition = "2021" + +[lib] +proc-macro = true + +[dependencies] +syn.workspace = true +quote.workspace = true diff --git a/crates/sui-field-count-derive/src/lib.rs b/crates/sui-field-count-derive/src/lib.rs new file mode 100644 index 0000000000000..a34c2ef1d3ff6 --- /dev/null +++ b/crates/sui-field-count-derive/src/lib.rs @@ -0,0 +1,29 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +use proc_macro::TokenStream; +use quote::quote; +use syn::{parse_macro_input, DeriveInput}; + +#[proc_macro_derive(FieldCount)] +pub fn field_count_derive(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let name = input.ident; + let generics = input.generics; + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + + let fields_count = if let syn::Data::Struct(data_struct) = input.data { + data_struct.fields.len() + } else { + panic!("FieldCount can only be derived for structs"); + }; + + let expanded = quote! { + impl #impl_generics FieldCount for #name #ty_generics #where_clause { + fn field_count() -> usize { + #fields_count + } + } + }; + + TokenStream::from(expanded) +} diff --git a/crates/sui-field-count-main/Cargo.toml b/crates/sui-field-count-main/Cargo.toml new file mode 100644 index 0000000000000..df609bcd40b0a --- /dev/null +++ b/crates/sui-field-count-main/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "sui-field-count-main" +version.workspace = true +authors = ["Mysten Labs "] +license = "Apache-2.0" +publish = false +edition = "2021" diff --git a/crates/sui-field-count-main/src/lib.rs b/crates/sui-field-count-main/src/lib.rs new file mode 100644 index 0000000000000..6476f99af9085 --- /dev/null +++ b/crates/sui-field-count-main/src/lib.rs @@ -0,0 +1,6 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +pub trait FieldCount { + fn field_count() -> usize; +} diff --git a/crates/sui-field-count/Cargo.toml b/crates/sui-field-count/Cargo.toml new file mode 100644 index 0000000000000..5006aa3afb2d9 --- /dev/null +++ b/crates/sui-field-count/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "sui-field-count" +version.workspace = true +authors = ["Mysten Labs "] +license = "Apache-2.0" +publish = false +edition = "2021" + +[dependencies] +sui-field-count-derive.workspace = true +sui-field-count-main.workspace = true diff --git a/crates/sui-field-count/src/lib.rs b/crates/sui-field-count/src/lib.rs new file mode 100644 index 0000000000000..9a71ea4aa83c4 --- /dev/null +++ b/crates/sui-field-count/src/lib.rs @@ -0,0 +1,5 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +pub use sui_field_count_derive::*; +pub use sui_field_count_main::*; diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/66/0x0000000000000000000000000000000000000000000000000000000000000001 b/crates/sui-framework-snapshot/bytecode_snapshot/66/0x0000000000000000000000000000000000000000000000000000000000000001 new file mode 100644 index 0000000000000000000000000000000000000000..b9845e76190cee1ca53a19f6efe841a5a9196dda GIT binary patch literal 14381 zcmd5?Ym6M(Rld)9+*?)MHQn>D*Sp)}@z_Z=@p}BCvr3e;*LH00CPXCKNU&1Vo+;Z6 zGt;x(J$@__Xh8v@ED|Jw4N?f~iY$mIFT@H$6p2NE1V7OViOoMiicl00@hiVD-?{ae zp7xB#_9}u>_pMvE&VAIq=Y01&r;4$A9(?MB*AD!^dJytB6H<6O@cmjG9B4%!QYht& zwoyrwq> zz4cM*(#~+yUH-++XwY5G?x#b}(doaavR8vg5<*GuF@0#pf9j>+FL=v)o1ZgZ6gv6? z@mUeSp#;NY`t;+BNlLho41+?VB`!UwFfjq+Qb@^h=3FWK6pJqOZe=T<=wrAijyIPF zS$C9mt`7QZok4fh>#uiLr?(g0?A++CZf0hsccZtGbv93*jIUA?osGPW_*ynX-9pUo z4LdJogMPHOxjO1?tlp*y+O@FPT&Vh;;wU9$fMs+SG7t>2ghK|h%BkwEF3Km^Wz{*o z-xsc`pb0)^AoTQ;k+z(KBW2X_vYqmL(DlcUSZ&q*NE*3=#DO3Zx@B24i1|oZp%(O_ zua6i>gMOcWKk`}4FSPNYvMY>H_`(mu7UpTKyFBRc zZFv6`l(n(uFzHpbZuG4e{r}{z^Wbx`<$YC}=uP=~jVIS+-VCOVlq-7l$fGq!mq=|0 z3u2fYq;*Sa&5(QoIKVSRl!hWkDiccmq=q#1sa8;@yQ#~2DQbnrnvh#=3}y%oc$5He zaFPTt!bh$+N$%q&6>!|8vy|>Wm2g*wYH;c9Q3J-wKuY4sE1-ma_a1;!g601DsM}i~#w*!yH0a;%WNRCv+uEi6)z$83l&uH7^_A>ar+?LhU^W;9 zuuCW4t!C@jM%ROl{zm5ta4Ou`9A0-hZ;-9^Z)E-;yO9lsSvc&iUt7&a{dG0G+1;>| zbaD~zMz%ca4|dUrvX}#ZO0z677(!fLNz`#0AX#}8B+~lUPLHt%vS{f4&29}g^>&D5e*$O;;b=z zUd_1ncmPYU27+N}w1|FquAYsBU79*D-qdW-loh)*R<$A$46u19pG#%9QHNE8>OMPS zRW*)TKpAc=sCmOMc)o46W+HB-bu+4AT<47-w`x0bLEWaPGd(k|F&3kZWm_b&En8|z+gP5*;GsyO zt+Z$51ysoos1^HB1vmf!C6p)4E%mEB(Z9`qgZrBdJ^N)4jp1U2GoSKjR*%;Qvu~0 z0l}1S$w*)r$OGDtHaQfFtArrU#+24c)f!?-q(Dj_3itwU5$w><%X6>|mmwNNxRYNI zR=Hx7<{3gQ(r?1C3garYLQMiYk)Is8A@8ut)o}@3qLe`Er#Fa1+zncd0>5tdM%Ozl z+0`!SK?P({31sX2^{l!W_Sdpzd1t(ypWPyIHBwi5tE+XQALDl`q$4Uq4bqv~Qgq?< z-2lhIjcyURMGoQmgAO(8fieQ+B>oS8c0@jjqClfu4r?=wSpf=S9hrPOr!dHV5O=6Z zS=OctT5Y=cDb@y)qYFR*U9{XbpI=$t7H+3!v0-hglnyZcKKKPhD=R-M9H!J7R7#N9 z=735GTmitOKo{U7+IPoUL1aUa8rP?GlCN(QZaQfK>lDU+nh`aE(W*d4&{~2K0OL8~ zFq_7B6ob?{EC+d-ofkRGCOh1#2K5Y(3|&k$cMziIIZuO@)Ded6Fn8Jw1`Lb_W7-N2 znizFnv^5Pll2$os>302QV%bxe7~lj~p_`QmP?67sL&$oP&#vdD7P2B1xDOz?P%R)J zgpiLJ$a+`_kDeawZb>*;W1wnJxlK^8-%3h~gU9oPm&oQ9xcU=7&YS!z+r3ijU zx4duZIrER&MsMr?@#BAv6_$2}Jr27?cOd*&aAaVsjO{1V|CoTT0bdxK^ReO1(uUd8 z*s7GpWfCEIBqqN=C!j*WiyXBYq%kQab*lTQhR6X*;u`8&QV-hhc$7{7ai;3PFFqXr zcKN<3q(V(Y4@5FYX?+%VXXLb;le2OkdxY%=s&l}ZK?-DYsYhN2C>m|{94F8yx&Twc zA3xlOtKu1`Z@@^{Lp?lZo&s~A!qgC`5`;Vn9z~gwVL`Kmqh}?%(&YR}=nEu-hb%6s zqJasaTmm-LGuOL=60$xyf(v(TuDiP0Sw=DN1uUrJJi6|c)vTanL3fzDlDa!w?)4P8 zmD>51ifW~lbbuYJ{hQffxjW3Nl1uxcD`a3!S0xC7s+ zx8h0*YB!E^j^@#ra+*guMOf&8IO1u;;pUY*7mbgZE~??0(cm^*S`vQ%BqSZpkuc9; zaR7rnz!^y4@GuAs_*P{*Q@8;cVh<^d4J3>?cMNs~KIRaz@npgx-}q-(#G$kKK^|D- z8Ew1;)q;PcjBnLh)=cA;dc^uX1R5N&ZvS-^W+H+lpTqBhpY;T}Qf**_{y$$EL$Eegyq`+ppyekLf0ruP)2xgr( z&oPJP0f4^7&NDndQG@`SKx)pw+UWv&c8yt{+O`rf8#dDdHDgCpZxK^N{Fe|)q&5OO zXlp!gtj-hWQ!P-pwxHmFr!MDAe$ zpFZZ+UrzWLR~8*7 z^{`hgwGIP~+0rjBf{@z~XqzJ}$<5sE}p1os>O~-W+1fbjP^>2=cQ! z54=eSQD=1S%P$tsIyQCWL=)7yL1PBszAEe(nh;aZPb@@CBziXY82+?!QURoa2ogE- zGFPwiK>s1X;s37qoCy9%wYbL1;g5j{_Q~P#yOnXs=Y<$=#57{ zXku?PGDXRK6F5=`Iwr7)&#n|t7ngBpjtH98RG#Ap%IwG{7^2)r;y;SUy?&bFTE%_Aeq7-Z_93{iflN;7voH8x=3nB$ zUyGLaj_8{2NH2Ozen!Xt;KF3|t_nB=405|SjobyyEN$JIoL64}LIO$*M{GZW<0&M7 zNF)|zTjbIsPteNYaM>NIAM;T4ocq(KSH$1h?{>T^{cVYVT$vmQ+|EiVwB&S?GuXO*p9D69f0CtF2cy zMDb>=Mpy7x(VH7X@pcna_UonuIM4|&AtR?gsj?}3bH)E2|1%zZ2C@3D$T0f4{F;Yd zBw#U{=8pDIU=rwqgEkrZVgTD3B{XO7B9KJ*V{^hH)`)G_!JhKaSV93eKL9`C&@3Qp zbJhnlqM%n4$8nU#Y1D`t(R4gri#gsR;3vp&a|yk}LL37LToRBVyndG?Q>&>OMclCg zIyvBB9)d@3Kc*W9^(S=3YXk{b0QQQ%i@k3rF%~|{&E61z(I2Um-qov(;w61|eWf!X znt?EmTgt z^GMz$M31t>591xbMMmsfV)Gyks5pn(D8Q!zcwYr`oI?x-lC$}>GYR_v8sg>gJ}Eyr zj)>nSGFgTC`WW$~JtYs~3k1Z|ffRfO9vqDfIe1Z4EMW~fp~RxgqfNdfj$tc+19!^B zup(L=pv>_*69YJ;LdOQ-@0=(IA$h&^N`9gO$%7BX}M)e(WDSk}~*{GDwzNM7ag z&*4P?$x~peIWAACRt}5gyGSEA&5BlWa?euZSqp>)C ztn8pTLy!XN9`}{X~@ESmazLPJE|A_Jg`@hiZN&<%Jf zQKFJ0cHo65-xj(#6vH0#e}Q6KAVnPLp)pDWqb}aLlcSPHT69lT+AgsZDtY&bO5S!< zy2t<*6F77c&UJ-6euX?Jem7VXIIOuu=n`?9uu%dto+Hd~R}YB`&pKRixDH&nc!%wR z1aV<`?t#hHz$-Rh0i%nCkp&q`xzc6;r&NJ#=Epm7hZ+4#tN!=-w|MYH zaEHGFcX&&_;ydmTJpgyujW)pW;I#PR7yPf0?(QG^m{!kM&{0fu8_CL-a zaH$-LsMTtcq)>o2kXsf;1#zw554D0n)DTgjnl-MR1=fDj7I+thuq8tvN2AK$2c3XJ z^3bMA5rfMmjo9s+8?xEckhe?hWXR?|4cTlrkeZaQJw6l6_2$%gD=4?Mk-ts(+W)c1iu5(b;AH8~h7A_*<}*e*;_jfp}TL<07`wcrdn7ue`Pd zUK{tZMxZP)QZbck;O!I~K4Yzg8P{S9TuksA$5`SnNs>quD~Yc{9I_^p$eQrjA!zcG zM$kPyv2i+c;`l>jqz8<=MB19;puZ;$ZkN~z2mSlRL4P|AIxcY+)?I>ijFFn4!ZUZ{ z-C$PeXpIZ^P#@c(A^6Ce=uH(1dPs`%oTE5y=K#gIMA$|EK*8?`cPA+T1WS?2On@AL zRJ{O+VxkBLqAe;^P%)69OJwXuULu1gqlK&N@}*(IU!#gdiT=0>oI4{6~iorT-NdcBgp>|*7$O0Ezjxg9J$-IB`*5y?%^7NQ-E z0ujb0pHKnYUg;oJcN`7yN*aI<=*#V%5F$bVw!AJ;f?ju_Awr4Q_1N}}*F8#_6Y9f4 zf28Givf%G{%li++u7Aj1k@#RR#K-dVj>oNni>T-}d2kF(PpkSiqYMiV&98o4=@epm^u@80SvmO}rR+Xd{#`!lh#Ggg3rN zFkrjHPS&UH)B4nQ>!Z-&L@Z#5;&Fw&smk7#Tmbc9qmJ4c+C?lHhv!XEfM>~=oEQF3 uvB4A0e7r)9=_yEmi69@$gK0~-f4_7L5mG~{iVxbBOsS&`ItpRZQTD$s_t1X; literal 0 HcmV?d00001 diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/66/0x0000000000000000000000000000000000000000000000000000000000000002 b/crates/sui-framework-snapshot/bytecode_snapshot/66/0x0000000000000000000000000000000000000000000000000000000000000002 new file mode 100644 index 0000000000000000000000000000000000000000..0153f6cc52752e4c343b28072be6fdb57101cf96 GIT binary patch literal 66985 zcmeFad3+{YUEp1HYJIBe*^(!FI(s_3XHTB=-Fm(I#$C85Xn2z6N#D@vPE695+lUVP zqPVaqiYQ!01QB0e77;`QQ4~=TcLp3p1eHMrosYi5=AG~FRMqo5NvHc>W=8*bKl60w z)YRY-&Tl`4am}Ba`Ke__D=A#)^prx_XK^Jf@} zz_M(^as$PGWd_RRYR5^PV-~Bm<@F02o3*X2#f?i#=WCb!+H!TPvUq;7b*r|#F*Q4P zesZz4yh*i3YMX^d9;wxp%hwz&L$Lg`hULqcrg=X&NTu9xalE|O-s&G5 zaBu62&e_SJ8-rx_P1C6@KeD+J>F`O9<;q#Kd@GqxnMvbW=>l=}0%0hT*V^G98i;OB z$C0cEJqWK>otmAUT*xKYCpi$^)%Wb|ig)FCt=<07f#@z@FF|=v^17s_Km|){mGiZ! zB^H(aaAoV^%=y~#a^*b9bo^Jj(nh>QjO)`LEgsHOo}U>o40mHw7UxxNPP3Kq`CV_R z(`nLt? z4Dr3$mfdD|DW_-1yWxnxdcysA^9^qFG4EHEVVjPjmko^ysE{YNZ2n9E6Uq((Wmy0X zFUT1Dmw6!Rn0%>NE9o1SGUlx}xZ`a(~Mai-ru085LQd{0$f8v*NmFG^f z&Ak8!V|#)7Jf$Wk)WoEkm{JqdYGOuB%&LhwH8HOy%4%XkO;psxlA2gn6IC@)Qxhv{ zazaf`s>vxeIjtsV)a0z1oKutYYO<^*7t~}$O)ja)Wi?qWDK$H-W@pswteTxuv-4`UtY#O~Y(>p3so7;UTUE0)HM^qb zCe+-dnwwH{(`s%;&CRO0IW;%0=E`brLCsav+>)AGR&!M~S5tE1V<`>j_Ma?g%`DHar|z$}_4wtIBh#Jg>@S zRbEi#iYhOu^0F#dRk^0hD{5gvEljF~DYY=I7G~7KtXh~;3-fBBtQHp3LPaessfA^= zP*n>xwXmWp6RI+)DpRU5ttvCBGOH?csxq%CWmQ>Fm5Qn?smijdR8^&>Dl2MfLM=_I zr75*Et(Iog(yUsVQ%mz|sjQY3)KWz)EvcntwNzD0HMO*&mM7Hmq*|U*%hPIkMlH{( zIk~KzGjm3uPMM{dQ%0$5go^DU{9*JSpWpl;1MUjr%f@3YEQoG6;?K$&($C?OZ=u zkt*XH#@D!zcw@v50Dq|n-ic~FxY5ahrGaLwn7i2 zB?4VDvjU^@8Qz=afSB3xCsTB-ACP_^DbEi){sh0q zt`L6&F?JvbBzS=#UqRVW>}H1NuwPomY*F?uk~T@Z80p0sOa{hD*FBN+I%vBNOUd-q zm}f<1eVIoVt3RzYY##}Xz&hoYie987+O*7drk!PlPuk<5-E+nb8{bVk>oG_3p;=^C zrO)tsmg&TPz2Fx{_gL`9DL=ViFZi2B8w-Af4`N7m9_msJY3WGGgzM3|^=s;0%Sl(H>%*g4qgom}_-UvnV%q?wC8Yo5>k4iCxlIJeY7d93zY?W46-SLQ@F@(D&IT<;uhfxMqnmcOR*vX_0C8%8#tl zt4#anOxJn4Iqdy``A*A!o^!(eu=796;C)^g{f75mmHk@A&VLK|*sXE5A$cXNa+W+z zcgSQT0s%xUZNuo5pkD%Cf?-*N`#tK70-KBx0b-p#1);;*FJ>$N z5-^CcHDeOC0JnU@%Xoy~lz=oI%GgI>4pPBT%$i~4A?~a`TN$0I6m$Z`SQ31r4mS~} z1-(p6mIuTrt!E5%+nL4!u^znbObX&!E0?w(u3g+-U9Nyg0&RHh0`M|R>zkYF&t?$? zN$e8qX=!t{damYZ;N&c?tuLPkKV7SB3w|n>#nlTNYplPEmlxMox3;s@%NHvbR+kr7 zR%>h3Jy%;3)Kv7fT1}UsPgM7lClbTj7$|4cv@VsDfX}#$wJRxIVqL5~yU5=n1CJY! zb?e59-QBEQtgc@u#FvXgnd?W-2@SqkT}EX_YPBHwTbH-CYZv0% z>yW$Yi;1NF$`-W9#oA_05{>cdvUI>{LRXY^e2pK==$b#%8MtOSP~{10xKCKad%_z2 z6Vt5IwR62!NbP2i&{N(u-oDb>{CJ?v; z1Z$I#x`9#bkVoeU1*~-lXp?EPjljr)XZ08#siSX<%~p2KYPsL8`Obwy-fPXToBk`U z0r#EORPfi9AAZ?V(LY!}Zf0NTsN9Re{R(VbybBO6+1O-$3oxO48aH8~S-ccr9bshT zbR6JEJfy%ke8R}c+5*%BZT5}44nYWsN3Jy6;%C6IGi-s8QW?Y1kPe^+1_0FKiRI#( zmcyUoPXk9sVBvkW<=W~apn3uevuuO4=hTw#x@AX7Lo^^$0yj$V@74sMHQ$j-GJzd5 zT5C;`TcCVtqxmFn8-TO5Qrq;Bx>0h~-L%FDM%4f@QFANB=7he*PaDjp5uLpY%y=+v zwpQ0KJ^@H*VuS3(NNljcEMGnq{Zop`Z56Hv%f@5@hGz1$)f1Y6;tMOA`eQTWs30{u zN(+ssAbMUHn#T?3E8gR=VpbS0WearbY9)eqNoVsc?Tn>B7eV;>i}Y$H_~|+xz!&8z zx83S}v1vbMb~^7j-)MUOMZL`O{|ZFlF?TBXV*tQkx+?lB_it79V;ML1iHx8Bvy5#- zW{=U$t`34{vzBEz$^_Yi#E3I&nr4``k+B`Q^xCdma#Zz;X~J@OVWT-r`i7osls>&oi61YOyM zn51Y*f(UDD#j8CE2D7zTNUqOb-G&o+d7~ESVz%>@^;{z=-4#GEb_}_5cgAAUN>NY| z-P$5}$RZK}K+H~5#s+h$U5UTg7uPS=QY2BG4%xf(o<=W!k7y%xP%l7ipOxIyQAEb;}T{PM!^?>_ofEB~&llLHErzv!WYn<3E&;%Heqc~8g?)WooSfTXxJP4oSO&GlX!(Po zq$w|9EcrH2&EP`0nWVT)S}?eH(i|Ko{sbMN83;EvUgpjc0?uY&lB5xUFHn_+0mAgb z-XdARWEl`iS0{nzG%o{XnOfWikH?mN<4j`E;HFUFKI;j{O%9<+oW@*zQJ%g?S~`e} z=&O)4Ezhf)4lHR#nxKv^H~X%~cHm}CE^mYS-2UwIEhn1^%KL?$?6@-PHX(+r+aJ2a z;g;K~3G8=c_S;TMwNTMbFFxzp=ND_=bE&cxHZrzkr?4t(=X@Zqruz1xXOo?yX*Y^snjAYARSzTMppR2((sSj@p zAnvVhEeeRXR=2h+DrV}#b|to2jp9rEC^~O#+d98mwa#x}4%XHy)r3%NL}hCcZlQT` zFW?_@Xm+59DO)jS9meyrxVBx#6`P1t>(bmz_oa<0&%C$?C*a8U=e_xsj9U$p}{K@HpB zAQ=IaPtmlqLTZK3K}ARgrxlb%hy)F?>A^7JY8MoB)u25_)FNsP+5<#E)(ZBZ-9-Z+ zYXxVJ`;XS|-#f}ipb07&fy@2wpdNt7{hr*vqkkD};X$^X7C=9{P~-*9$=1L-**a{u zhMs(|aIl>F)7%kzAkJm@&Qk!6@h|cNU#bk-cQ^;wKmAj!CAJkWj<|sxxWo4KNqdSt z#vQS5P8w1n04ZswJ@TEQdIzBrCVxyJKaHp1S$H18A-kZdS63#r;6hOAQcst9LyS=> z%M%mErSS|C4%riw5}}kZEtN@U>WAzTx)iV`$+T4718-x}O&=?wRyivvs z6TQ9uYC4APAqS)f7C8c+UHoyxJ%S?@ZM)0aC*y@p-NZj<1qpc!Iu3L4%~CO$AI+6F z#&U2@Sbag1Vd~t-ZA?~_ZH!2E>iEYhbqnFi_Q=j<2m`^`hV28aw?KqLhhld5^G&H0 zIE**SWde_D7(_u>H0=9=s1*5ORESDpNKkCe`gzEhZ0Mh9hfCU6k51?{r97uUhze#9 z%C~HvgJ$HGsPx45(5r1fa<%%rL}X~$FqD>d_otujl@zB>b47O!Pb!aYzmZz;lU~`- zE6p$)y7{n;Z^bb7&o|An-I!RjY_nPsD4uK|wr8CPai7V&shkDc*_B060KQMzb&inX zNrGwEAA$>-O|xV=;1|@(T4lct(V&vi?5Y5|8#0b#zi{Ph&}$$F0{w<_o*ikWDE;Om zznqB*z?$5!{hd++0Mjm*yMTen%@xXlJ>?w87DBgF=6OShmwRT^c`%T&XwbeRbj@6N zaz0Pme||K}$*3FSdp)ZO(%8O$@RVq+FFjc62PWUxPH z>d|HxZ8pg2(R#TeqkVR^7(Kndce3)g%oyU0NW|(PeBI7KD`7n@05HX*u26NY!$lQB{P=p~i zNIOU(VoHQUdYb~z5a67dEyp@5BwS$|e98^uY!7Evd*CsX|J+$Ar1PwfdY| z6FR29tg8o23_9Li&NoY1vSx|=wXx>mWD8kr<=CgR;!+th4;au`eaOztHccYStp%ZA zW8R!3z~DdqpL zeZu{O{U$T`DnjP3J9hMM&Nqn#X@9QoZEJt{(#L6inEpIrbkz9W_()UE~*K~~}+WFwR+w5ZDZaOzc| zP&BNTTwdhn5Pq6hCJbpoKlOhdfe@1f)S2+ol9Y~1Lh{lAi6j4wlR}iS>yt$Us*+Dp zNIV#BU@oUBYn6-3wS=~E@4vLMv39xfb@&g!)4Cg8Q>^UZEydN_W1>jJTIYo!y;*y9 zWwW}KO|x|?d9>0H6mQiQw{*IrntUi*$Pz2(YTUI1WwDj3=j&=wntV=ebKQc5%CBDB z(N2JBUn0c|bu^}A>}Rd7UQ9%C&K9KFwuJyLiu;b0E1JHgj4$L$mUd%vmU$9d@E$M1jewkM{WfCPw#C19~= zSOMj(8RfEsb(}9#crmax(q~c?=Sxlh2j+|M?c+z(@1!6lnB+TsBVDDlaF%g_Fb zSPjmbAh$`5G)z?)hC>Y z1#7Hq7W2id=M36tfO|GIXV*C3NCub^c7V>7!Ag{Z(Py~nga zW{x<2Z~mV0-e>=@<^Q06!u?(UbnwF&H+oIxv0V0-Iz;Qnf?hZv96TTvlS-3g8eXv@ zfq(QoAq zW6&x1p!sCI^ft@od80s3tYfLHd3^D3{#ItWqKz8 zdi(bgTBCivhz=dl7~AX382k1OxczpY-S6}{{cfMz@AY~8exKi;>C5y7eL;WN2Zjnz zgfXVMYa)zbHqES=V+L%|<8qh?&^X{*)BU_x?%5iFN{HgB^%Sgr$2E~Qq$t^kc2$4&T*09u_d zj6Dc|B!z-r)(f(6QP$0QK`xHxT+0gt6g*-jjwbbsl7_V4B_KoS{KRadCd?&a zK53Cdv{9?ArAikWomg65Uu)H|SSpd07Mk;<(vFNvlT+n#>BzIXKr&?5ai@}s_4gX3(o;Mk zQc7=4MfBKIG(FXnn41af9b8H#K^oAFlo1e%(6*)pHg&awmIgDeyhe(dqHNU>pB|TGy=hT&mtLS(1V9UhAd1bs-`Fa3WP9!1XCQl75HjT z0%HSfvM_A#4*(};-8@k zSsbDPG}h0WWBkQ1YT~S+M-&Oa))|Jkn9I5>a=4(j)!nP zsRL`5cDkuEx3i94j?y|=U8jdS^E>Ng%B;wEJb7IQtkx*+tP_+AX`Q030|?9gE=Dm8TK#+LwHND%fw=e2pE7KpWExP%dCe^G**1d#;U z3_)IE1qs^WR+y^9XRA6pK>=D@OC2f00xVa>yo-c`A@XQ&1uaekdu}ej_ia>G#oVOP>ei*DV*NhUa#lq!YjID{?nbs(V^K94 zQ(n?cc1xtum>6%?R#qQ{HL~;{i#IkH9)dK2AUeD|q^@w9N#=QP=;g@T&9ge@q08ru zK`}5kEJ%IZ5#c66r$B^HVo(%ewUo%yO2U$$qKMt(HuPgq4maJHtq4( zB)ZZ1ODEa4ZwR1OPdTLsGBD@De1H|J$ni^&g|(OkPXsG5&0*BY0s<5*MHDfPk@IqP zvA{6pd)+bi;(~%-A!bdnmSdJ{jz6)ED)3k2uZ5f1X99KXWGm>5jRsEq8_}pNk7JEt zz+*;U9Xk=>4IK9PH*8M^rajVAQ;U$3T|PpCgq-W#1y+y2)x1McgBAB}>ywU?q_YyUC| zl|~=;0^sbfb@Cv`f%iAUGiPdWr-? zXnh-l67;tcj7l(EA{dZhq>bQkdkdV6UUn=q)7Q`AnPI0S_76*VFwQzOqDv0<9b({y zIoAX9pOkwj|?3gIy`uE=+Nka(TUL``w#9vJbHBhp|JyF z6JtjX96WIBV6#2C);fYm%`_sA0be5)hX8n;Eg@fxzQkd3H58qa3Sb#>ufFh^kdTc* zE^*j=7y6$`h330h>Qt9aLN0OGe3vlE8x@i!=u>tN*>G&x9@4!cZiXF2XY&GeJ)O{5 zaebJ0w3t3I&q=+eElFC`PbBJBN($jbp+F8ZPZYI)6S`NG?lbC^Np;JVy5#})ehuVh z->Gjl?dh(wcwzP8;>P*JO1)!8_SWjTBwu!=uzKmjh0C$gOw-=vo|}|CNG?>$a?xfn zTJ0$!b*_d^m00nFBvvt3*pWXN$NrJQIx{cD1#1@W<(Bg z<;KmoeytzK_Ta#^DoELrsKaM7Iq~5qYG_xv-1FLh`s24uzV^>wv;W7IUvbaRzdipO z55MA`*KWVxdF<}vH+A&9{b_3t{@qPqdrtRPzH$7k!Dohk>jQzRJoDS_XIHmRn3O(V z{kg->yzhJ7@XYUf`yapYWn=f$zT^BSKlzgb@9qAjPgg$h7ys$r+otdQrwD6SQ`oeSmU`iUfAYMie(cu=Klr{teZ>B7${2XU5hWF5_xiuT|2qTq8HiU=%e`Oz)c1Tdc-33p z^{L5+Kl$flV@J>QeC4rc+tFTD@A%GtefimA@A!p(dh2U$y!Z4Q_I=084*kS` zdt*nw>z|kB|H%2q&9DEn54QcrpT6;~kG=Fo-+1>Y-}RZ#zWJ`x@A~DNj(niyPha?w zw{F~+f6WW_f8qQ8?x)>vyy?bY{^&chZ+PBoqPKj{dmsPP7k}^D{(0!jKlYYCdfrc0 zKJ>X`J>Tki_;K~s_Z1HP)kpsHPk*7i_2CbkdB$^(4!qX-=Y@w~|JQd~kN(^bZ~ol7 z-_`!<+|Zk?o(KNx;}1=|`p4h>ikJ1?`J$is$c-=UnpN++_?eeI^pUNvd*6BQQx495 z?(*GzwYOBB`=g)w;EMb7{>Q(#?>P_s$_fkPw{>48TtKNKUDtaS047S z3v(a+Sof>nJ@Jp-=e{r7_W1Yy#N97{^eeZ%tZ(Y;zti*T_iRmn{#PEdX7dle<^TES zAOA+rJ-@s5#b@{a)*HX{=(B$5wbKJ1bANHpKUnzQw-!G7#y5QHzx>+JE8qB$@BGO( zneY0Sp<8a~XnkeN)8Bg6ZyL}2#y|g$PhT2;?sn&W);B-?wAcN1*KgeQ_y^whsnh#@ zvA6%1Ui-E;o6q;!?iqbc^@G=6e)V(CzVGbKpI*M>N6!D%?a!-x@fn}^m0Nyr`2YL# z$N%#)Uh$S6{BK`cI&t#yWbe(@#CAM*deeaYtUezN=ffBU+(|LmV1`T5y1 zpMCt{^WVDgFQ@T*!03R=ti#s?u6lI&EmM{V{Jr%h;O?+3Jh`#$r088IauMPIYI`+O=*o0 z;cM5E;7QF%41}7KHcGYN?`{#k57IbPn_y9a4Ku1p!M}&VLh~@AxuG-(9hfOQnw!Z8 z(2(*nGA7IlxrrPEmuZf072`V?ldG*F>q$?WMb?wPHjJz%GuklHcZ;xm^q2}tm}8DD z-IoI;ysdg=gwb&lRz;>9X@(U8u^C7pLyI(fiX5kr1DY-ka0f%nYQ9rFnNz}IsweYG zxJmV7LG2@%b&DOz(7IKhlp`o)XhqfET)9OJkfdvuu0t72s~U`RyhhGYd`oPvQc}as z$u>38oNQO4&B+e6pJa<$97qN;tj6MU=7}fkRK~yADR~Exnv@>5Br6naQ(Q%@$O~5PL=#(OCf!P|t;)bzK)jJeWdg#c0lv zNTopV#UKrNsg6rj@({0z>I{@C<*|*Nj%#tWPyk{fr*j|>PH6-AhT)ojUk$@fuh)CHgLPy%#aULU9a}&E;)vNU*1AJQ`$T!)~oFJuB53i$$wGzOWXTD$D0BI6YcG z*rfLI?`O@0%30{Zs*#COe6(5kCD`|}EKz4F$~R-cL=gs0fa=+tFc(w~Yb^P=VfdIR zS72u>;3(G@^6iP7C5Nh6h<^4?J&_Pa`!+Z%ybw{(n3Gi&TWV)`VOVKF#-Sdo#gDQY zj5k^7`l~BK_S@6SneFrLe~$nA=79ST=8*tSYxJ19C;L^j^2NCfum=_@50mIXVwTLq z2tUDwpMV-6{v59FwLUtoEmb)|hSBsMzEY4L~9c==+;82d=*LbDJv zjjc;q2d-bZu(}P4ZhdY29KK*g?PG9_AcTv&)XXj5z9u#om!7twCpC?x7#drw5GZStnx*6a%f z7Lad6UCwC9Te#bQsd*y!ZPSf@-<--m&Xy(mEZi+tWu%2K;^5ye$;W#NeC+t#(lMj12A6a0{u_5|QY03KN_n`jVsJ3nJS z-TRvPUekYvb;A7$p!TGSe}CUB!Y|p4wFE9(K8Ze zmWKXl5s`EyfVqoGGcn;liS`YG2yQ|kp;M{7+EZ)G_t&=lRC8EI*a5b7Kzc{(0LMO| zu=L&X#AM3kv43*C?MB1w^^?!K1!v!?ywk?$$nu=ZnF+fV)mOVie$EsB;bQ( z{i0SX*4qN+Tg!_`1^yk?Mp)z)Gb781w{M-Oe-+7AfElK7LPHQ)k7Vk20drPBzcG;N z@ugt?3<3cf^2Kr&5YG;x!9Tl5tY8@OCS^0wJBzJRROw;fI0@X+>yK@y!M-2VsDxOr zqk|{-sR^4_TkEo@%DxyDV{)H0&Z=^TFB4X~ehMwA5QV_}aDzJz$Gr^WJ9fmjPwa2z z;jx?ou^qHKSwLDI0NS`qTSdfH6qRKUMC=9_ZH(Xcoj?qR6zC&`%6Ng>^f`h@6|BxJ z`v;LB{FwQ$_bu}+rvI}j^#3Lb{hz?!#-D*neaiZxnf;(+=YAc<{#H@+M#T`E+Cx#U zuG}wjJhb)`^?o~6?{{ML{$i}&@5YM#IV8B$%m!M`pAS(|e5Xo2*;vaT#TqhAywb@f zjP?7Av3|cN`u+L3et#~TjkWu=$-k)VCy_+Yzn%h?F~UZ&q4A&O0TI{~uBP_StF@d5 zDobm%o!hS#0LH!nnv6f|pX;9L{dRS8ePgGN{}nC(p1f|qvn``)LeF^3*ZMW>)qE-u zE=~ZB@@a`ueX&qqjmg&~I{tzkv4VK@pji4FkPc$ST>Z7lts-t~((%_ARYS)=Mz9jK z-Fg!{bam^EBs%``(E*YL=8IClbe%rep#IW|DE@+}Y3#32gzza*>d)(MO`1J!Abyke zkH1&G#fQ>QKmfho`)n@?{f>cXKHd8)0J34%z@!|sPBsqS9#Qu3U(V|u@fgDN_TCmK6JNnw1~cg zZ?zV2wKIVEL8qI`w&wD91}Wu=Zim~RZOOK|Nd6bXsnY>uP0Q_TAf>y@PlU zZHjN)P5HY24|@r@s_U}JS8ovfIyDLo0yi#6lu=c8o5n_+@L|l}0ME-OxDL z?XskCcVc!VRuOv{lgGt-c@ok1#4g7nSNYu1>^drtX6(AMz|Af`WKDKup!OnAEo`4v zga-nAeAqb;mmXGtPLlZg2;F0$4IY4#5D#il=*(8A3`Ef;m7yX5g81;zAg{E$0A(VL zSag&ccLdh4K*&>v79AXT&t%zpW@c==aF~;=@z2|#)p|M%?w~dX~f8Y3^xLww2ED9|007(Xi=3bPLe9_seGZQ$<`9U?fN|J30x}3Z#IF$G6HbGWg&Gh; zTxnNK023fHY$PEHdx6i0;USI;<1lN4Ikiy;fUP;^##bm1okrlVuc6D7P(I?t<-<5u zAvv1O**?1eW-tIR%#8Zsc$sbes0o_wgS2aQ56G~3bl5LyXXc?Iq4IlCOM=_kF(Q{p zI~`Fi9RPoYe|X93IDCl2{(bEvtgeHH468SfGuwkjp2$87<>WiXQk&6h?larlTRMxK zE#0j>Z9UoUd}pq^tEZ!<)Dw0WI-~BWv$Gf+QGih#gT#;%`bYpOe8d>A3c>^?C&lcU z+V_&cTgfIfp@9J8`1#N`WFV&5v@)O}IDwM5+y`>y$U-%7S1D~_1qV!~n^3pf2+#_l zmpE+X3dIC+C!ua$T4almor4^y2Zjt+2^Vgfx>9UM6beSDD+w})>Y7x;*Q<0Lr|_BH z=;Vs7<0p>0#TbtVj0ZqtG)@!JBhN!zJGF95y;-4G%aci6Jr+GM;_hkfK5DnKDi7*! z8@ek@u4gLSl`B!umI|({R`9NQ5uV?ocEg%*2D=i+RXYm>m^b08iR&sGU9ig4HmKM^ z$yJR}`*-gMyN0ua8TllfQ>9)`QsPwjlp%kZjc8|5;Z73QFO(NuHpODJkD$_M#599Cg)7e*Oe zH~2v?f=}g4h#oMCXlDY*@If4Vm&1H)3HVgQc-~kpQ_k4eJ158#246^{)W#o|3nm$b zG#L*dj2J+GnYR~uT2XK4&d`#v;&oCeXBn#)2eM@g5krP22KmW|GLgqa;ZAA99%jEs z8xSS;slW|UzB}L?6LWgNftV+LpA3pYds0PwW<^m10~$8~VMuEsPaOt&D4~68^6a7M z+|K>LdQlfsDS?EuU6R9x^#ILsMAwP|r^Q(i6en=!&2nyDl?LtJFpgX0JcAoR5|NP+ zpdxpWmn#5O^yKu`zJEA=cAAoxQTBMug6QSZP>6kDi_;DsAu5LUJk4omB03$~a~1+0 z+38hlI<5f}t@Xqugk4q-3CGE@?;pU-vDgiQBF;=&G**LUO0f^Akmw8wnWcq-MYB_N zV!)R-eswh6vN1*=^JeS2Kd0)=Xdq&4X#@y#*hDBm zlw%96XnO3pUe<=~6EiNro{TUjTt(w9A$}i_;BHy`_lTn3Qv?^gSAzTGecdm()3hXu z5_I4c)M)o$-^@$FEh1;UO{^^Ll-L~-yH#4bJxkCl0>$H^=sYk0>R=8I3=&TC!-P^j zrz9MomGHz2rB?n9p~tM&TTjTP?a*Pl%;at}>_RK1_=p2bZ8#m`dY}bvHP@N)0=Ucd zf(Cf@4MTCIo7(i%8M}%7$2okRY;?|%n`qQ`j^0S4zB@9=mgODn?qRBCa(5cuY4c9Z z@y|&Ae(O=`-)}o6{rf#fSz@+7le=3kHy#`3k~ziwesJ>k2_x(|i{WTEYEBX!x|wFf zW0O;aH{L>gu-IQ5D2^5ncMbP;SnXDuHBuZL=^q&w866oO8yVSmaPr9Xk%ftg>FMe2 ziLQyE`LW}Nu0MGE!14VzbWY4q96K^`WNxB7F*$u~`uIfe!SU(wBRvOa?wXpOI-)Hl zaS8AFS+FzYCB$81&L+;vbVA!aim7Ba$)OaXij-;j3cP(U%m}03)t98cBlrPrQB^LW zBnbTQ07WFJPmcbN!XDU+zLz#*?L8(#S`++&?m(0zt}!A(lmIIpcmcfbdbVVO5y8rj zTZ&t5Y89##?*Q=-MKv%UvCzfRCtCviQF(4@S?X~C^ym=eBOOez=lH=uWj4?{6ubT% zBOo6jBmU$l>%bz_7h_Qr|C?)NJ(_4wUW;o_+WS(h)o2fi2{5gTZB3D6*y&roBI z253?mm9ayP=Vb803I`(;mbhk@+Yn9i)VQ7kePxXbO3DMv94dGQQpPw!X%zHU7zR28 z7D%2j|6p;V&Z+tF^vNJR{LR(-7JO0n9Aj=W1{#)n@g4ZpqZ!_iEFk?j(huws4dSKO z;AHF|{EpgX>lAK4VstJOU&zeGv4=KSw`->>8~zzwnBR&2&)h9*>v%-Cw{mH#Mzsbb zqI7Rcc_np>aylR+a%AI@hBi&eDAdHJWaKcuy7vXEfenFQt!f7qleU>~aT(Lbt zD0el}y?3@Cn)b3dRfv1b#217U4IXJMp)8~C{XRnEbLuV|nUqa$dBKG)R7kr2h zym~_97tKnorM=iG2|rI?=-eMtdam0!JR^h_xIrM64 zi;ql->-I;c1lvvJwK3|7ad7h5VniE@)<3It=k*xTN6(j^SvgX zn@WNb4;ecU-m7`AkNMMk36))*mIWDdVO3}kIVIvd1}_!5uEUELDa`rNKauoZ>nDP{YyD!irK{J#9^{%o+-_X+hewWU{REl4*6*`l5^Me^ zrTTHu52x5hl{tj)P?fC(a1Ui(=iq>U2*f~5`uN}sV5L&AkL}rkb`{f4*|CLYE!+l! zG=PhX`6uM14G9+H$P3*t3^Q<3#g8Dqn;1yPVFm&Q zz+e=v2BCWc{}}y;@jw_>5Qd%}2s|$6+%VKW4`m<`hhn*UAfcOe!U(RjI31PZ z7hH)VYzh=k9S1X7IYO-5Qf`%(80B;Ms4M6?IniwT$4P5ZXHW<_hwU>#Au0x4Ve!;N zN!DLp1zx!%JdTE#wj;o`AnEk`9b}PHk@wzahK0H)wmm9f7udy0g{{EHWk&c=irSg( zFvuMX+JbhO?(cS<#s@}%#hh`TiHl1I|l5ZGPB)hCs<=0 zK?%2rybKrr0-3M_*Nc1r^3k>gk^bmjiJu+)$>9_uz=L9oA4Kxi1+ZKYF!I&4`eAFE zvl^C!XQ0n(mwmwd%7PPrQ%g<;&l zg4XxI^}=q{D)+Doc4ikOyI1>8Y_6@d_bDOmPnJ*|=56C1vu^-R3t3R2Yyraq#z+B< zn1wfYfxV|^#JL?WomY+G2`||!_m>AGM$JKCr5r<%aR}BL-#oA&g!U@4R&f=q$|D1~ zJLO~tTt?8A{*_1R1pyFm8x~LFZNuYfQ~4)3HaxM@*lA84+dT~2!p8%V$FUgM{<#0$ z-E{<d9qi607t%S9PWOJkliUr9bW?Gq5i6y~Ufb8cW70s7L%jiFVN zKG57gS_gcwjoBQ~Y4Q2TL>`nmRJf8dZqx4UXFb$B6F*)C&BJNKRg9=*7E9$rMVu?6 zS$LS!YUmyQW|2eT%8a^vv;|QQI)M7?=rn$RAmv`6!Up&18P@jiSy<^DbNvXdPhQ6% zM3q|!mQD;9?y|(HHxkrtBDo@Y=cLNR60Dvi`K(g}=c(fLO%Rl4%JATBE=?Hz_&7(h z__v=ffL7cvGXNa83*-KHc|0?2k4NLzkI#+YJwC%R0=LiK-hJo7~5 zk_p2kzW4=rBNEkx!YGkSI!F}WVUmdJY)G_jyf4nNTV?D8(*(CT-p+Y^Q;rScTQ50xkn z2n$q4lPftgD-?>LmC?&H(2yRl9)8ldj6dnmQQ);AW5vOC?8VbQ4Ra@w;I7(sMFfcX z)0*I2zMQbEJmGY~@yaCEr%u&*Ukzqg+kLfjm)2l`RGXXH=}|O%03O)ZrA^&f{VdA5 zcTs(;4|>@xIN#)0!Dd=~r{mEuaX_khQ%jdNFKPx=X_;qOypTLZaw1SZ=1nCu{PN|l zB)yrAwqDYE)e_h_JAIn^_%vO;P+8mB?c6lc79<;9*K!3XA^Ow+t*u`M8lh`b#*sE2JmlXBri#yM2i(=dCc)>@aQ(b?9aiYO``gUruEI$bE}*!4=WT^?Bo`$q_xvit#BOiR2z4G z6}Po}spv<{>BiuVrv?M2#G%b&Ol9j%jFiFcB%_i}99Ae60U)o+N z@4?ah{|_rNHv9Syb!&_|LfOx^>OGB(n|QA}>u2uwy(4~RUT?W@O~~%>bNxQb<5K-W zy$_H&h^CHFEl6N=D^;Fv?7Ve4eo;Gg%$5WUnHCJ%ee)(FTy?wGujMy z7U!79o#JifLUF2i6lRbZMumJD_XR%?rgZTZB5Od)Rwz@7Tu~%clMd&)e>oZF_YgZ0Pcm9%NFj-y{P zDw8iCCWmeBPvcHW+XL%-x1rm;}mA(VBc#}X3 zEMp)H#FI3Br3L7yaSOoX0C>D{_@~#n;$ufVV}@m=$RS9HSHaW>Ou( zUKiim+Jkon&wZr!p>n|J@DeSl9=PmWilI&c-6T~ovvNVuE|*fs8M^Xib4)G_O^@aDkmWk3g^I3-TQ#So}ITlOC2T0 zmLu|V9LDb3U8Sz}u1>@5XgBTls4Z$mnc6`}A}tPQy(ry&#s3+49B%lP0HL&4JS455wovLe)ZlqKSDaWoiP z6DF-+VN@tW?kfxnt&R1KU1y3mk0uo}H<=mAP#I&4gJ_}Xh04aiqtRi*S9QbZVB<_; zbeHv)M<7nBlQ#z5(3&y&*yH6u9li#`Iw7n@{Y%WLjFzj}C9)rxXX29*^r@oY15LJx z$7AQ#oQMNbQnm(g8YmnCePY0zakfOcpeI+{dAbBvBDjhiVVX86?hQ|s=3~qygU%n^ zXbfdE4q}&(Dyf+Li%lUefw3|Wv?i}%KOzmWxW`IJo=@O3T=3YYksY(#I16b|BQ`y- z#09Iu5)*ItoQ0sqLi6VOq_A_KAa@|HgAebZj?2)Cd9oBaO~)?TCpZcq{vqqY$KkL& z?6hW0d3f|g@pc$mXWGF#+%uiKEF=+UG&%*3fE`W8=75}hML(UwB+huw7?zu;eU_tg zmi~=0$HI2wwBI<}PA{G1yf|sTxjh{OZPo|G90}L6Z=mS`TW;aXooWTW>1lY4QYSlloqge!Y(vUT5_tkAgOY_=FR2&+>x?y*Q zzqYeIUXK)^=_%-0KDH7~-e$u}v_)fT0~~+>D?BrtE;DN^)&ph0aO&o1o7E#8jR)Ie zJd>&kpIxd7=5)GVl@%!NigA9ZQPL}`mKDpF_aS&VpAJT8udHv8tPn&fBi7U}oA#Fw z8UAj>7!iM?&Gz#MmG$*M^FRWdUxjt#h zV{R37cD1&V&es;sd-cHG__6bP>XsK%ZcHJa%Iad=QL&McKJsS#!h|Q8Pvc3M&pd2z zt}bsUI$_yu{Wejal3C0psibmR7>}gRthokX1}2PNJ!oI)hsf2_dXaz$Y;NJ8Sb*j= zetdA({5^qD#a<`?Xs$A!r_2vO8LbzKV`WEtyPxk#**fmdS)=H`*jW^@eRovh;uh{j zoD$TbwT1l$CoyaTV2lRm1hEmD#ePVSPX4+QUmuEHg3X(vW5q#C76k_vG`1B;Q0&pn zi&ivPWCJe31;BEDP~!>vI;25q3`r`P9Dz=wB1J6?;$EB+FR&~SY*$bRu?#81$bybS zc}OmsKklqFBB0}99FXQiya``J=R>^7Hm~ClZ?g01IE0_xG3&+`nf6D_cIQ*(Z<^lgt-n6Z9M>&25`?4R#y$q5bY zuWp@)XbXcLP7g=G2PMpM5Q$cN@%fCCXk0~B3&pZIva4yzQ1-wqv>5m;wb=?qho0px zA>)>sUa}o>dTd-N*(f#X2PLO7Y1(Jg!~h_rP1#MgObsEME?^{aL~40wRyl00ayge= z6Uo`NcUh5lxu|| zomTdc=g45}8vF4TD%0-DYHh8$wPQJhC+H5G@$O!IUyeKz%&L-)b81@pn3f@|F15 zV4E@KRS{1R5Tv-HVY{Fa>0YP`4!tJXIjV<@;!!%}~M&*9;aLw z=gy;|O?(MDOy;>%xb8fqcKyItOvbOPX z1<%%tOUS?%&rQY0v-znPacAk`)a*=ruti(7wiegH`}E=l$Cq8^49>Q5l?xa2oz1x( z=Ur?+JUIuDXye=n8CBwloKcA*s=hsQ>#(Z>Pj>4oAm=jdCU0te2_734uj*%LD$Z>@ za=s}UNmAsfGSCumf>~KiMkc=U)>roJm02G}8@I=gS6N!tlOzqXaIW=haM<#n5L>e>U*2`#ZnN!G>?t6>>%?6lU7MZDm6GpwWwTa;x{p;| zu0RK3XO(gYZygDEBGJA`%dmNgg7|1cO2^UT1kz>UscB;1D2BS5`GA+hr#&f~UaiPpk z$g+lP5h4&Pf=J;LO3uC#M>C+DKn>YK*(RxhQtMx}Hp!m4Rrdqd#UEGpi>w*veb&b; z?|I&v9RE$x3HS5S^})|%ZwNnw!TsN6f6B?eptX>DRcpaGftBwmyiB!oaEkERri>em zn~amjEu#I$KMv9&c!wLy7&WWT7$ihlxL4Y6!64AR3OE!oL+`L=t60 zd2O4GoSq5d02ab74X|$2N~}FNixWF+nm1Z9SSg?xJ%*8iTse$JcJ*RbUk>C%6vio9 zai!Bx4~B83APmWcoanS3EeWMKT@vTZ!xA@p4~oR<$N~7IYQN37Vb;xuD79|02Y>-1 z<3|W*uLGpmHy$SL6y`aA(RSJvj>~0e0j~wl{Ef3D-2N_`XYSs|;r0i|+F+|4>)mH~ zXNzryKiWh4!F5#dgPTSP!{fsvM$|c-H!U>nI|&aV_qU?Mo!C`J^EXNS`l)u)I?yrP z)z{bFZ+49gjSdbBwYImkxAk}R_Z{o#Kh$xgW2_^X>7Fai9_;9u>+J8ne&5{T4ormk z4`KzJ0DXw?w+$R7p&x7A?jUBI?duYG@v&9N{G-86CtxRELmz+#v*+zQj*1WM91- zt&S<0tHNyMOd?=2RwtGo45v7238D;L=2lC!i+xK;nMnUd=2k?o7gvZQGR- z+ZcE3%smrJ7;DvhntvZ>nvvOXL$ib7D>Nz2YJ><4`9l#=b9;RqXK*=*G*sHsT_*8Y zlWaC(n!I^|dv#BBxu;jHtyDPdEaBajQt7AAvgzULJA|Ok`_fLAi&s##sWGrEoZp=) zf0`GZa3Do~sx|vIc^01oVHPFQAb}eOzp3jQ8;kD?vi*twn)H!&s>r3}u+2eh)lAGD zMJ_9vaxhr+{B0$qaTqHy>Ruh`dTTFhTrE14ZBy&W)%VW)cCsQ8X7`?)R}0d*AmlZ2 zUODY;{n?!QvxDAl62sz5$4_VeKU6ntZ)vQ3)DNbuVLzDDpAVQDc7!GUB`?Gulv3o( z@bxP5AbQj~M7V)B{4T4@>{4BJmxHYUn{t9s*L{w656Ett@MPgJyedAVH<-m17O?QRMYA+$BO$C?{WJlg}VrBgsN@k z1=wREb+t#?)#Z3kad3mUbQDpd*1{4mA7i-3v7Li)2_0j4r|DeqY+S>NIO}L1Ol2<@ zw=L#MkQ5v_Zyv|?j2n#O2$`&6Kl)ew=wI1o6u#&$tC_$P)6~O%9t=>a>vB{j{Ns}y zV#G$5s+^Z2>u~6)HM{)!^o-a?v+8UI5?d;vgHy0a90b1FP71D@U_bn5M&BzsUf?W} z5AVQ~ncyHkJrnwHWLT|aVniifK>}N(=d8?vJYx%rMXKD2UXrkNPA{OO|by+vRl`A013lJk;{AR{C!1k(XNbx2!YRdH{y5P><9G}V0!qZ8A_IjDOfEYjA&g$U=to*sps~G}^zvYocr9Ri2t)&pImO;V z$S*S#v1pHcc-Qeze2mbk_lx-90a*}y9Ir7lPD&TXKGbR=1lTVm|@&@ zV2tRkI=uZrKl9OdE9=V~zlD=#)iZD7kPWqDPJmnPpPDAT^Cr+;>!I5v{>&RFwOi3g z^lW?h&Q3nnyJpLhxO;Yvi=&Gor~S@*@c!y_%-$;(UC|l2vr`hfe&h~vaEs;j9^=Iv z8X5zsxXw6cc#FMbM&?0^{LIsi5C+$uJY^c;Qx5GjP98pS^vL)PM~{vdu``{SIX+(s zCTFfYegn)$!0XI)*G-Ji;0EZr6Q_bh{RjIGkB^T}-Z(p7zGGp2>aMA~=jP|O#ZpV-xN*fal{6EAVNw+=3xte8${$FsG%P5pZcSRCQIV_**$yfR zUlD+dRx*zh^W>;5Xr)l_B2bW962Ss9D*^=>3UElb&;pZBsB}pQ?uzlHm{K{ol+XIP zoNhNYztXcNx1y}T&;+p+ul6|PAu8^!B~SFvVwXSK9S#pok75*ryj~D(ZK$OUzBHa` zF+U7S&zMb2`gOSQ5Byk~5xm3iN__LzL{q#gDX;qN&Ld zCfY~{Y}LLEOem7zOif&q)T)6b?$pt&MDM+{xeQKKt2%eAu5X>s>hK;xKtNYLsR-B5 zzLR4x5>7^#-a+{uIPcYQR&6c>Dq4umh47om`56cgwss+;O>Pp?C^iT!%WLafHLYw= z$9}|sXcvMicq(Jwfs*b-L$BqiMjTJ4Sm_lW!Yw>H?!Zz5!9^>}%jhNe5?y6FThd4@ zf)P^(Fx3btP3_U;wM$!=hGa;{7;DwGdaO~pc3GD*-?eIr&j!*mrbD>p@pN>x>YSIi zmDeFRCsXOi>D6j$9MOoRHf&<-wshJqK|HpNCFDm=FT)mj`Y0mb#_PTC?DTE0*IBl`>DLrL>IgJIHo zDn8Ogt_TA5>`NM;I#@ZLG**(v6ZnGzw~D55h;deDH6A0jTwA1AKbkaHIfqxEzltTr z?FA{!d@H_!$U9gkJ{ml09Q#E4cQ=lIx1s=MDl`|Jj$7E7+fQIxmv;e0; z&x-?F7CG{0V^&Nj3sDJN8KmccF~C4CmBS}}Kz51%^D3Eo>8M0I9~|VCqk1Zc+IBh^ zq9;kB^q<^%B@Yj6%J^*5CWDmbaamB=0~f_Em@U{Nfn9q>qkYh4vZKZhoFf-0&zk`D z;g1XZf!hU|2&_)5gZ_ZM!xRV~nuOuHT4^wqwr~SyrDZqNOUrJUl?RhI{=P2mPc#Jl zsx?zPmKuDYI!;}*L7v+%nNk;fcy;xVtAD9msuG=C&8qW^`70JIYWuT^9GNScH@EpUTf$qf{=@CFwjZP6BVu39vGwebK zL(uomo$fym3b5Q2w1@mHrkMihK8roWECAQV?8H*`9LY#*?-Df>eiAY+r%w{&p__;o zhu9w|WYDr@S3iH=R9m!SDAP}iHC0c5CJID&f8I>Y&@S(J|H)!^r;t1KAnNEp+BPjE z%%2`KuHG7t?CQb;D<T|!_d`I*wwa3LieS#aoeUd5vd=x3hQ~0rIOyV7(R=xU2Dd}0r z3>{7M9KN7P*|!D|KsQju>`eI;b_GiG4xJ}EEno^!5O*Tn84>g-3YIGDm@(W{e@Cge z$G}sQz~15Zuy(fBh>a2;&TV-mNI)ovIc9eCAuCvKey@c}Vc4}6>*GS4vj{1O5w>(g ztCtkD9mX2MkCX^e4?yN7B|%gfj1p!G;HG8!5El33(_ZnJS}H_aCjp5vA=)}`mLMm0 z`S3_U6+%mh=xccAw!oeTXvDAgVUPW#a;V+(im|WwsU=)Qtf_Ll$keW~da}teQoYbb1+X zu9`JsIF{5>_Ngj*hgtNN)`ibl%JwUv%Yl3y@F1GLnK4j%nIG|VlR{pEPJp++S^*v{u6%mHIk|PPydJf@}C7?3X?CEW=sE}bhi3gGM9WFQ2!5?KNmHAqE^CgQSnk- z!F#-YFb3lRGt44)0lEgVEyU9liP7JPR!ZXaM;8+g9P3J8xy1r+-qsfYf(PCrUZ=pD z#fJrgWS9Gy1hWl9|NZ-`LsFcV^c&S;5H%=;mI78HqTwd~E5Uz`s#QG71SxFY8X zT)A)tOUI;Oex-b>5UIi$qzx@l2QvLHJbUGOfCMC^kb+7c$)sYhESCN{}m9mA=~2=FUQny5(wmI@ z(NriHL5V!SiLPpb|5oj18zrUy->yM(`QGL|(>RB;m0XCzt%6E8FH-j29-1P<$JzbmmKECRPPhw_a$V>ZTKG>zMCzSAZFld%Trgro}|< zTa`0IGdIm*t@g?YT{x!AIBF-%-9Ui}!q(IsO~l9zW+9hUh!0S&qO;AVC3_QtQ!`1G z`8l0bl>yi2Qv3i^FN4V`-is(~WKH%3W94x7(XHQZ9tgPjzZ?Wya^lvDq7S-AdXl67 zSC1NlShhW_dg0(#gYmu3H}XLqfelS(Pk7*Ke)MPl?)d%S7ya_r!ru;(e@ZCqHvlz% z8&LC)5eoa`<$oMEey-kZ{z|>+P5Lw7D|dSbfYt#cK;97 z;W?PmB9}}n^5$Kdx;gg)q(a9ade+Zu$Ye#A@3Po*TR1-Kz{WdW)* zLWSzIC);D~(e`+Iq&?BzH8w>|(9BrcuGDvrF@!7A6+=Hp zIX*+a;w+-swhqc+?T)$;9BW~JQvt8C>1BId8JLoVgXV0rX(EKPK3HVy6r=rS4$Yz? z-K@c6VtK%rGx$Ym$gfsCWv~Rr`NHNctWB0|FM8?r`bK8ydw6Py-X$m`^BWJ%4H=to z<|Q7U7=~BcTN4RXM`5f?x zIi;AOq?G4bzJ15?EaUUNBe){>01o-7%AE)IZrFjUt=W-mMi@lJ*6)32^TqOCMz8(raOB}5TjjT-D@*ph>TZOzi zN)(jt&227>?0tV~EdQ7TFWM?)aQ+BNFS$At{R;I2UQU8!gbZd+h*Px;RcVAO6pQ?n zeRx7wgDUi=P!Qmi#6KA?$oC52DAMaHLJTSyURc3^DQ<`=(OV_hLRJdhldxJc250vs z2*mDaI6v-(KlGvdAdT`z8%N;`q6&sD z0nWmX$u)a=Ji&y(;!T#;UVKE18ayKOlu@)>cBIvS_1YMBh{sVWBWNY{qzqtgZ-@be z_W;-KTy(Ui3q0*1o0#@Mn<0ntc>I)RM`T%Q&}KH_3Y_9@hL$?55(sP++K8u-!74is zkN41dlv_McCIT2t+*={;R-4iv&?Tfy+v_NDO`5tqjS@+EN9 zIH-hXxr-Hgofy98y({-`gzsJXbciXLxcTpm?tPLhzFJ&g4&Hy>Oa&y@aIRIUEYD5?KK^liWK(*z)mK^##hj*Yk@h7mWJD{eQR&_`5X zXpWWnvgDFt+5}$rI!ZTN2Th1>MRGQ#&^n>Vm~h*Q*H!*!ZwgY)auB1m698RmGv_j) z3$zS;vMrUY+16!2tF7}osR|6MP`-vE_`QQ*rjoX1jt`Q*|(2X0Gem3|E}A3bRv!7vljzuIpRd>6YyeY_>w+W`D}c z9Zsm9xm>oR{m~$cmQ*fVQS%~H0~N>QrIu=c$orVRtfec~#kgtRbfitA^Bv@}wM~ly zEEWOuMV2!MitiVa<+9Doq@}5LvXEd2(U`7a6cv+I#GA8$eU!9SqsnbKJf`(i+m6`a z+rNtp7(DiW?{_sPO;3l5pQfwT<1hQ4EdNOGYku+vbhR(i)xHuXwVxq*;J=Fg&~N-G zYQ#UG%Z>P>fOu1&y$pzDoCe!;JD?i$h3z}uSij@xl$P>5*S)6lJlC1et*1^fNzt#i z?Rj=HjY#z5bT05!=YJ1!4Fv)Dvs2 zCz?$9m+6at@s9WCepb)=nF*MG6+NwykjN>EY@>0fX}i=SNxf#Fr1Lbe|Kj|m$6C(E2uv-6*;qM1hei%8> z{Dg5LgxA>^rKf6kC6a_?BhnHHfF(I1S(cm>A(Cb^S?P_opB{<06%bB~d9a3)vQ~v| z0?7rYE2I{DYRKN8R0LzzDiaHni>+3*4#3-XgOIU)!6~*weS~#!$xvVSqr}RViOKc} zsKOzMD*U-KGGbq5eh!79!8-k)46#hY8nl6v z{K{u`2nuoRkt4*aZVuB^R@^QM?&DqyGfzIy?(kc%eQ6?*?F5OaZU!l&H%v%1G4wMo zcYBJ=Le<~ZZ~KHrHswxr!H*rWvsn0JlW;EV=);kZluRj_Whj@=APBq4GwNtQbS42j%+3a>wH; zgg2zf!}=zK>NfTE3k54kvkG6q+JL@i5#ktZ(GcpWm0M3(uZ=EqwRtJxM8*I~yA}FF!bXVl^Qt(#Gcc?OV-!<+8;$ zP7H7M;U`HRJGeu$$#)l;&miQlHB0^f^oPIwjdP#B^Yfp1_K%Mpc<1dO{K22QaOtZD zzqs;^yDzODQS~**c+v6xHz}itQ6)Ze{GdqC$gibspFC6^=-wmw05Vip3UEJJ34 zZ>POI@BG$wr;A~G@KXlB1_5uAulz_^0PP&bEX%YZ^e(Z};VjXf zxT0l6#^Qu6OR4%0?+3IHi9=Us6 zCe-19P}epX z0e_7oH^ScIN6h|~&h%FcgR$O16&9|2jY3_qBY^kJFI47Q68wOdhun=3uwNB1e6?Us z6^~qdf8qTR0mv)hef3YBD7UPl4{4V%52ytc?2 zCF(Kq#&HWXLsDV*K$|k@`=u5+3xL0n`h=g6bd;ZaI$*V`DwMvcCSX)uH6Z$V|@x1#7I7!wy$en(EElB;YpPzd2Zy zPqSb|sFA!or}D^#!^sRF>8u9IWuEas6$HC-wkv1-s1VSvXV_S;_t!pz!C}at09`|e z4;)M;Ldwv3NMs$P*rUPMg^dtP03A=Fmr3xIOt05KR4HR&P>b=$Ku-~6p(=!gL)`*o*mG5l#Pv07}Keg&N&+@TeK~Sjf{F`#ZdPhhgbpw zLZ~U8@lK@w1m4F@|fh`y(UFsCM9s(M ze)@t3{VysO@}&rEVehNsf9zCPj8sU+G*uzr-9!a0zOhP5&=SRunzJo0H?O$Sfa$n1v+dDJAuy=m%)W}3SX@(8;Uj1{{Li8}} z+6>MHws)fda7-+S@Ivr|XAqN5Hc;7;7v%LE@4-K}nGL#||oGBM?H9?3WV3*=tlJS);LnC1okO2=j6) zXtm-7cW#tk#kPI>y_WlVW&>RR6~vsH+rD^!cnIAXl&-_>@u`i=mI&fqVMH7uK}}w=%kGk~YZP@OLqaPphDvT+clFu0 z{O;}BH=BbkQGE;!%HZDVAJi6F!e!zruWyhW!k!4S!j)AR0S6oXQwrtKkqQ?aJ6gEl z80kbGE_3yG;gr?m{ZpVM%BeTqr(|QZtr`SoFjQ|D zwHp#~vpe3to5!^BF8IiG_v0v6+HT_(ui3daiaUVO+~0`qZEUP>7s^_`cZ1JNtZlA> zZKu^YhQ32fDqey8Wg_E;DpbPmnF-Gi&lo$8ZFVFVeRn!-Tes=<#4j3e_16eyf!O@{ zpS!Ji*8T3hG=(t;_9o%1)T!tso$XL~P;4>bVdT;V+%dX(7lTtboh_FUEG;_#%Ao`) zPGSF^zND&vDUuZImX?mT&IBiLIRz{ts<*v#QJ0kQ0%r3NY<0|LFj67bFr(F=!<&qE zqccETC8#-99@zQHHgK99u4FLx4qbz#-|Aelq|=9KTIKRED1gc1Q_}(V5~Y^-_Em`&z9OJ(G~1 z@nwWKyRlx3{yIed*5c^ZVnNiB1dDj1Iaf@A@CJy~Pddh7*sju`jdIOwyf>X@RtA%t z?s^u#AyH_i4IIX!3OMyyBj;+x_+Q&*-)qYQ%{&jM)h-i*1(Qs*OVrFF_mGslyix{C zRK+|(;njeG7o#!6W2A_b9G|s-ask}pOZAT%#=E>mKE`IWHbSz8c}^;6U0TiKx~5gW zFmisx#TbJ};faq3W2}q(;|me9Y+8EG-ozso^=Ji21SK7@1<;ks?Jvk$<~C@QgXjo4B$dG~2%1qa6~8T%{LS;zU>(xxRQu(d&SmAvk|)^&HxNP`!^8VYE*;9~6O=~6$-m`C z?*s4s8UKC1{F}kg`WfTe7>?m;?cd_9`uEX43LC$NclN(P+%$%KJ5EF4KTPa2esH=P zc2I!Ow%U?|c(JmiV%1?8m86R`O&5bZo%IG84-qE8IDz_NS%Nc8>myh{5vRsXQ;SG! zfQlB6cl1-1Ul{F5KO~z?Mf`$)+LXE0_yk)cEw2$b(iVbNOZi(zTdh{RRc(~XJBifB zyG2-%ItMMYZ6HjctJ|uHRhdN|lS{V7*w?pz$%rNU06(VSVF1gaU2a7LEw+bCaDj1h zzG?ojd_#enLh_pN%e-2NEurox$X{2s)ASO#m9c7VNj+dlDwlGgFHscy!`NGMpi_=) z%2d0`Jt8`9Zrs0pE0S?*$iijrt%@z>CnzX;d5hiQcUv^E`d}nc z$cPiyY}E=#FeQ#NK(p|wbviw@H?YIb*@#|&kiSR6m%&@U5vHlW+ek_rzE% za0m9O7$JaN*q@f2eJjIz!T@I%?qDF>Y75JPlqx_Xi*+`9v7lkvxHiMnAu|{>%ff?LgOb7!PiB8(c5$XRvLD+>9f98E*&i;wZ(m|2 zlI05J6$q|Sq53jkS}2SRP*kmlKk$=L^v~n<_;aP7DwY3T{TGwu|BRg}{hRT>S^d|Y zw`xB`w$&e<`0-lf=jS`k|2*IEu99i>vVWZ*fIZ$J?{V)%Z{54*eUJAEu$@wEVWVZ~ z@~t{$7c&ie5Ck-KXc`Tmdk@L7s?miphEC2*qY1~4Eki5(vL3U#S9RG|O;D<*Mp^Zo zhC1(UYclKDzCG_dN?KlF{6Q0C{PDc=W@P<1Ap+!M#lVBCR96$b0Dl76SDQM2ujOE! zwH&N-BZ6Gdk7^0~oB&MY7+KoS zk;x?;e{<;o%a@l9vYcAd)!(&rh~?AaW4yRs=Z~`7f0Fz7OD7>C@Q+;7a_u~K^UvAx zh43WnAM3IVZhU;d7p{hDcq0~Hr1s?zvaG)O)B&%yaA78(!5j`(^s{>5Y5kmh`wjh^eoHQu z+NHNY#!r3W*%^4d4u@yO-t{tt*PlH549iR3$@)jTmySFaJbV0w;B@!J`tbwDmTTQ( z2aYd4*$-80?g<&)jjm)=@C{Lf;B9xyA!?NK2R*0@W>RhEj zrDcR<>FNpz1gF@Gg80joPy>Zv;BAhGtib|vO70vvtSCuBfbD>?!hTg3qi5j$`e<%1 zN6U8DUF_Cn^J&3BIrNoccWY5nicOGAJF4r^gwj6z$FE##aYf{krP;C*q@wa zZ8$$|{QB)rY}~5Z(lM?z3+ousM~a({Yi)b>&4>8Z;P}pH)k;^ae&Lmj;u~w&-P@SO zl|I8b!IabW+UE7q{Z{)QbH-W~!q zf_8)$?dtvb8k`t69%9z082_8-JzO>|%M_e_2iV}sN}i>#ERk)9kuCcKQ(LwNI82Oh zTj^0VZ}L#K*8!EBBG2pzVN79_n<=(UQ3v*B_@x!x-?$2kM;?k`5F3Y~fn?b-xeljl zbswn6Nw-aaLIscZ=ABLC{Y+dRDbUYl&xx=4z1tXE?a>FVtyXvyt-`u$zcX=N#!=xa zmuHE^-dn#Xt8d0flZ#b!fS)lp#P*$O6RWxC1B9BQOci4n$vC^ zv~@@*Feb9|9ozW^!)Z2vGT}X&lqbVC88+W~K z*p0eUx(>Gpx_h1|5dV}+39J*6Ky-#oVQ5{FouMGYu$aIhfP4bg1Y(po-)>41i{-&r z)JHlmdVyY&DjCOL0M7H2qEB zVWoa~Dbu*l5MVA+4=k1emQqn7MCnWQN^hiIg$e-ifnb=VRy~C=gh9sA(uC3JvfC=;G*A}9) z94MV#t5=$FtcW7#uaqVxeO^KUd|2_Q)Wl8_U?AU1i`X zUBw=RO{k0V8GLVG1Q=)_*I32kedX4jNHIXy%Ii zK0{~G6%S$JA`p^jb}OsuYEyAp1`UW!o$Q0d8!{X56oRwxrXIt9Xve9rqLdEXI~Li) zcbIWuryCxw^EEK3#?7erxY0i>W1ZnE`n@jQRh=-TzljW@*o!$K;3HcvmJa+54*HXk z8+BI2%hHyama`&wE%xYvunAcZm!pYLf3(fcSTL)@vLA7MFqdyAqsF{8!g3`&5iGdX zgjMOw!mhEp&#l&2m3D2VkX>mhUng6VG;P~o84ILu+j?~@kiKo}Fi4_xdxyY7S%<;E z!tXMp;t>Eb%DT~8!FS*Xhp`gC`gRJKvro2IoaunS@a@9PfMSD@fdM?Q;XCe@y5(-t zt#qqh@7JT;N`oN@2u(YjJQ_duWclycZ`4+}U6b%uHs{V2T>ooUG6;0^Btf+o}W5 z*=lH$tF1;hcPcfqV&(ZGy@O^-g=FJ(%vQ)zAhVmOKcL|7{#(B#ffGR_X@}Fj=+z*8 zeYO1e{NMGHuhW>m!1(ghVXgiP;b!AYAV5ZFO!7ari_J+y9W*g+V~xn#Pde0oK-4!} zBLeD<`V8d}EQXB3qN*3YW4~F2Epb+cVT_W@bLbb3tBm$WmBCbv4z9NK8ko>q*9%5- znv>Eox#nc8Awey0c}KRiAUis<-OinxRskoKw;u#Nzz@p8ckB8<%)9?-iRbsxY{s{? zC%f;&!)?m;=M7|sUtaMe{|fDD0DuN$L<_6Onax#Hw%auR^?Kuy!OtP?e)4GRwdh?-?{g` z0Dxq-q|)jSJrd`hd-6H=#NY4x`yIo0=+6p1>VBv1M}bSIP#K=-Rjj`4_F3CGe=zeA z^{1xyr`8GgyY^=PpV+zZpWAnH|EXga{=jjKlJZqv6$~ph4BIr6kuj8M867|PAr%2OFXFw7wDs?Z9(tYL?4s9XA0u{4z!n*Qjrv2O2R_A}P{)v_O`OIJV-gio`yB{t8vGV_A<@NA)tG`jm{cj^% z3O_Mw8_%eN>UlFVOXh_6ta;hIYF;yZ21O=A_vdNyrw0iG#LbYYWrXx@TN%_kV>mh6 za6Qw=1fiM1H5)R3_R9b|Bb8?)8pdfCqnhyyqbuth#w!6yzF^_jm&+T*sRG8yBF3Z` zuSR&}OW3-_xKyqf#<6OLVQlWic)H5#Q)28acNxZZF@`&Nz0gg{dwPg*zL$J2_F~)H zhq0}nAdhbvFpR^J)DbZT%7cdS+z__M#MrfoAS0Wxjf$}~PvDMCc|(;??4p`O$Mcw5 zPCu?Jwc|t=_Hk_<$d6$^_c*4x&#M?#@krjx*j8o#0eN&D?ot_Rzp>r0ckih%Bb=Yu zhWYYNvA>qb?(}ZS8_w9S4&^!fjV(r|cuW$=l+T=%N9UO?dF(evd3aY(<&~G+w7X(t zpWJ=YI6ktga%k(pt%uctt$X)PY~5Gev+wB0w#s;AM`dT_acg^JYvr+#V_@%CfXYp!#%Cao_S{y(Jwg^XH)6zlBG$li9k^&D&UgF6^Pl#eN z4`z8-N|Bf(n8V-FHpyS!Op+xa|0NG5NJau3$&x4YDSwuV-?baotdUj9^Ht!gtU-OG z{9?(F6q&|vOtOqcsjfC+s}zYmbqH;E%*vimsH~NQFg?e!DOQFQGg*}cwp^;lFT?z= zV`S3=J%=I^7%xmBcxiJcB^5`T+DOyr5|c3hU2V6>JZmXOuLrRci-Xi(qDinU5he1; z>6@98)AQ2{Gxg}~>cY*r>+|)?E7P~@vzJ%rs_R@%Pp?dOZRmQTexH@cedeyLD%EEf z7w4}m-mWj4d3)y8^uo>hCFX7G^3`+MM&g0k<`!;BQ0K}t`L-`x)#;g)xqC9*GwDmf zsk?+MCaz>MRlmD9bIY2Zowcr0i|cVM-0O==ON(#$@nh=F>dLN}>G_$}c}eL;QbV0& zeRfJ#mbd0sZp|)Dzcn>|XK{65rEBI^edhL5k{V0Rz3HX7>4lYLPu7^}xrOCTSXdxd zrdAd=O0aXbzG2Br4fjUb-P!tleWhNQr6$XBD^pALx2Bh7mxI~WJ9q9+v0%^7I=Yhi zdeV$jtlIVEkd=LDIu5XG-$CH|}ubb0Yk-F{o?V*i@RgF8rnqT^pTzWJB*|y&FsGwA`~) zpRKdm+MwS=OMK<}@``K(<3ZGsx}?!#=P>EZdeFM@M$VQG>MY-#pIhnH=0-UbW1r?X z?a_HFq8V~)k5qjv4bB*Te)Fl6BYl=V9fIitaet97?-Rk^n!fc8i-f*nswAVWDR;5w#v_0 zk6ET^6XBSbeabo!FWsgUJG^YM1IBtK@o-GluVm9bT=EU}$;vmPBQ;fP?xbsG+9XEM zbZxjbEP)a2FU7m|9lP?XX0cN@#-r+>on@bHsK8R0V9e*tnEXcUiqn&Ll*R%pIzsrG z9hiYCnW62=4n1I3ZxoCw`*zs|k6*~EqN@02bYjHr3{1r?Ka&K_+cm2*4NOXSxuL`M z-=Qe3Z}_fn1w@R(kUc~`Q?oikk4(%;;C1*$!L0IPRRT+1EM6$PeTrK7&KXLXbAn9G zQGufm5PL5Ls%(Wm2Uj~FamqbphfYbVW2%ff7T8fu3LzQxOG^~&Y{{v)p~aEK_pKtO z!gMQ<9eQC#TYbONw+e1GunLk{Cd}s8=NH&&)g)_O)_j(7x0KeG_OYr#mP5B;9Nd#P zd@C9Wvw`VnqurqsWPPWTda8-QndtJZiJk*A25;U(SD@cAdA=)AY#J!j5!j>l4rf=U zIu^J=#&l@(L(fxJlTv1>twX4|wmGNVsI6~~M<@L37Q4q8OdKm?ffF}DmImMumiKB_ zG)h&CqHYRr&^}Jt#m@IU;#19%UCTPpC62U}?c1~vrSffQ7Mo`A^e>P5F-Y z(kZ+O-Is6RIUxm*4`Ar7Lf-TzI*FAxIS5jMfITEltGm`eh;dRL7_iw<4u2CGa&PUfiww8UWRF$nNf+0nvY$gQ4(lLjs%RV96gQ4NMM{n zMrX`Blx&1Ak@3|QyA)8z&d_!$jcn=DGW-}KbRk!_f?La&IcX!RLS*&n7lIDD^8cZM|^=oNb3c(JMoH%x9iX}Did-*ukoo0xvE*9N%J-*@@&N{rgpDl zMyWktF-xhvP_xW(`l2|dr}nUxGpp%Ku9i1D(wAbz>=b*yFO5{JjM^C|Mo{ zG_=Uk8cioava^}z$SY%Pu{)f;qzvUTUx~|*L!0SRl_g=OL*1>J@JPBqdu2+d*`sVD zpA8tZjGFF%fj3zzn7z0<^n~%eiu&xNjdQdyUEI`NTv9NlEB8^rE>8K9wv3x0JEG~y zLuOWJDGrk;^QurF0@Gx1FuYyNo+=u)88Uo$X@;DyYDI>_ZmMV$!&+3Kbu8c9VsGP} z`K?y6N1RP0><3n@T%iv)22N*aIn+o}+C=L5%R?p{NbP3D9F!g-R3P6RN}Z#MxjD6$ zD&{bKx+`MzRdlgpYEWF1K#l0Kk4TVV$`dua_K=;`L2)g$xM~s-DI3tel-9J*qy{wv%5V`r!H z8TaSx%rEyRgZdBy=iut z-R2haX>-YZpZR|COUhU@zN(Cfm8Zy`Oj9z{HL=bzRG2X=M<`u3HV0~#tEpX{oiVad zx`Lc(goTWegDRGDc|s-2K{3mh*s2{D%GWVTX_HXC%3@T7{51t>#ON3D*K0!ldOd^N zOb*-iJhq!+OiSP!Vhj~9UMylvMC`8)MQp8>BX(7_s0txWj6D(5u$RO*BCpShaZQYG zC^27&It}AxF^-DyO(o{(2s+nUF`f|Pf*9wbZo_y|j5A_f6yuqw2Z~(OYZ&*%xGl!p zV*I2M^Oh9xlGrYbu~UpbF|PLY8^+ye6O^|BioJhOCck|{VxHJcoKLA?%ui_ZliK{E zHb1S+4~pk!we`bd{hYRbM4MmG=Et=8c{PIXN45DG?eZZB{*JbOT${tv;y@J@rJPff9dA_qMTiZ-M#1(i<)zVbr&91s;F zp@?~rwimR$pb3f+BKG1{TgpNijH8N0LP6T&C%hBkk%(JfxxNW$ETiO=hrB`H6Y?Wu zK(7cnPptfhNS1*>CcKoyrr#y)n65}%$yllqCsVS8K#6CElh$76u-@=I+&s@W^+Ow8 zw!Hxru8`+Ep>V=AQl6_Qy>5prWhe=;(2HmVs^M!w9)wlgb=yfH2wM_95tKu(U+D|O_&{`!Lt|Uw+-J-OY z(p)M^pzFXP&?`Bu^u^8aq=at95&g>I*|oIBGxzEXE9*&+kD@6KT+*}((%d-32h$wO zmt%6lrI@0j^LH<=Kwi1oBt%xPE+kZi)?n7vrTOf&lqnW9Y)>ULuZ=lsUP>0+n6lY3 zJ^RKglod^D&;*&A(?TYjo2k1o5yP2Yo|&6#C3afZXOEsboLTWT*+f^fX9Ma}LdgrqF3k%ua&{G>)C|SW@siLnp;dLZz3FeF$vhjHLH3-YbhU5W!n>EVcyDJ-NHZLCOciJ1IqLR*6q*O087p zVmqgJZZ;u12Q8KQOAwY*>hYrq!-X}?RBR9*lTouVz4%=bgag`H81yn+${Vz~B)z_& z*Ru296Ir=0B`fnO(kXFeDjC$hsV9Wjwf(l*>euYJscVA}t3Q1%p2U(CO32jxI#uuq z0+pz*O~13PByTtG=YE-47sMYf^PR$#y*@yH^)3W>2mCjCt+X=%Q^z z?}GTn1<;%ycUdoILJQXf@T6NmfG++(oW){1!2}7#oIL8K7JE{SjcDivW~rn|nvXTO zdN*#35G74&V|m~HRAMzdmYr98^Qdor zlf%)=zFG6U&7QZ%%fIC14}0Eoo_EdjzQOtC9ESwod{Ucdw0TjRr!YP5tTvy}<^^qz z0r)WeLmq?z>lrWDX)SuCKI^=7(wemJGfn4;7yP(#7MxM5LogBP>l@Ywtmmy+csw>v zZ&-J%?G_9R?AxssOV~VHPX|Vr4^#;0FzgSka8qE11A!9`25vYMWWvn=tA>MYSP1;E z7zAMygkcHBP?^69e^vfEYAWpHuZzDP{(6I4*car3?w~)|6buA|!BDU{7!C?SF^Gav zP!2kRN>B|tgRY<_=neW(KG6sB*~y)mXw*KM_hU;fAHC8y<(=5FJMYGpo%u{` z*_QXTB`i9RWgle(Mf-$vIw*ih8T0d*;;4Pjc}iZrF+Z0n4cnKTYw}uv2n!qriX{X$ zCweA&K5(^}iKYUNzpQ@qwHatL6w@z6FGNNd<)i2Vwa(EVh3IL`wsL@*6@;#LT{Djw zudkanpTx;Q>4T}&WI@3dscP2Cz9TRrgcsJN7>>Z=5Q2NcPP4|NR~wYo4$Wwi(mQa0 z?g-bbXhd~jn`9ro!JAX@ZDANVn!mLl%A#A$RHU+`6WArQW(BV1svPNrl)M<)n0Fh= zn$a{2EXY9nrET(7Gz-fHXq}m%9*_4Cc2)?&QdbDr^O`f~RN%~+LSAhg^<1wX5a{#v>04Mm?65U0yi$Ta4yoOBJ68{@K0Wq{!y}O5CEWj^A-Di zQ+ko(K%AFeL=MZs4w42IvaOKSGkFO6^aWMbOdSVe`FK=JT$e3VBUjSl4&J+zi%bjx=AoO(>yhaeB-x&6i#iT%(~Ir_bD)y2H4FEE-2JqBqU$tuH&0 z2+?+PXX@TxF?YouKt61L)f`WKjOeDhyZLg<+>^e*NZFg(?TWci?DAz~CYm$%r(QdupZ;ACoSCAvIed3 z6b>AD7V*5g&a)JImhimsAkQ+MZ>{qz$1%n8Gql^9OcP&8_Z#bdg7k zDxN=F@7bk2yK27upRf1q(Vjhc{)-2B_Tu?x>pgpQO!5485Au{`KgI;JrlN6G!dhT| zdA(;`l^mYmUhf%KrC1B?UtjMTSEYpK?>)$~jOQP&_l&C|o`3uxPf7Mi?Vi#@;u%${ z^6%Gs3WCEeN>x6~WYp3K60=KtN>zSxy=Ra1lstd&L7r0D-&*h4t7D4iAGCSGPIZec zQSWa3xrlwVdrUswg2mR>8)w=}{WRZXP7a(-ejvu8g`YeAb+1PL|Rbmhjgl zMlCc)xczsQiZKU(Ty&c zk4fN7Yu2+gZb3s**x$u<(J{j`=J9utO*IYt_BtR|=P}Kp32yjpb=tu(O(*PYhz-9tGsRMK-0Sak=P90#lR<>{I09`fN`%7ch zjM#wyymu$=QR?2UBaRaBz7Ein1Sq8edUSv-1o%Q+o8BZunGkwJ_Ew!Q%EV({NLK$^ zHM(eCL=NUDX&z=U1aF>Z5_+yH4*;mbhS-X303(y~M!D;!tcX#`6ia!kijZ2AqpC5J)eIFfhRms(l_M}7n+ z7V-;HLhS+@mQ-Ot3M1JBT9p1`xeu$!qAOg}gxP0>E_-g_#$vSQ{o1~A!hLI{2Nh#c zfTjpQd3NsJ+$?lkWY?Au7MPt{z1typ_%^~XaQ7RFE`kE^Iz8ba-MPDR-?}sXwxH9C zZ>}ybt=^ehxdlh)*5dr^I=Zo=3A`H+TU(1J;N)^b`fGJ>63Yq+F%4qb98z>|w~)%9 zRJ->e=Pxd`k-wxsoQj`-Q$ z4B(y&Q^EM8A7vvS3JiPiaR#6Y0W2bFBsf-%sH}2q=f@*gL@bREyjTQGvoRh8uQut2 zP0~S_Ig#R$4UY7tZx7r19X2GsdD4#xa`>Id7@$NwkV*i)f>0Rt*}w&GXn}vVA`7fw zwgddE*+O$LHRLith%Lx;GT0bKea%8J5mAWdETJ)R=CIvGEUM)z$-0A$Ofpjty(3gk zM)q$AQ1MzEZKfs-F`46hZb{)g9iW#q%*h;3Vc?!d1>l#{t&wZ?U<>4o^c`@F9Eyk`aYqxNIYBvs1*^#jn=$=1$kX-Y>*$u!z1+mQX7b2(#;naoQ@ zz$U4t2JOgka_UU3tVDd4j~tk%K-oo1MiN<_ZG2>%PRG4QYSNF|dqg&F$YCd>au%PW z7z(JF%=B*rl%hbiGwxWB4Z;p^#Z4TaG%DRegfX`|J0+Ag)`$s0Rmmq>_FKQJY)^4q zaE``bJ_JK5oi7Z0>41m@V|sxU!wCXRAj!TrV!tAvW{drT^NM5xh!U{yussJM#vQfm z&RpW!-YXBT=~Cl_LblQnles*$u;R#?Q!#$uZeMGPR@WZ;N!NM$K;~oWzgFJAFi*H& zvwZ(smK*+pHJkgaZ5BR{#52HKI$kKfiWLIzTZsG}tBoM>koKbk1AF7!3GThX7%^># zySC|OL(D8YA=1f#kFztjE95Yl=b<(ObK{RSK|m=Knb|&TXg^S-tSus1CukJUY~nYX zOF;2jir07^i4H`Ov)AfN!Z5DSx--Zkt<7BudgU!0z8kz>9y zeg8Vhfx9=Crf2K<)N{2ivSux^$WanP#Kc7SY{{o>dkbR$pbo7i)2iz>#+gh6H8uyagfW2haV9plAGKJ}*md)UIFyZ= zrrv^&LJGicL>~fQi*U%8o=4o61V$brM*NSiwREC`=E1kEUsM0mwLkBDRykkIzT;=U zQ~qb&-oM%Ty8B=5`hDepY4?{@__KTda5VR$tHr{fUv-RMR%${0LuC$`o6X0}dGo&c zb!GmPGJjf`-&N+%Df0)){0(LNxHA5pGXB0YzORfQDx*pVD^kaZzY>3CaAq2L*_1JM zL0x7T9{@+$_y8TH#EfVyYs?zKhAjk-nd{8xwItugt^@jE$UZa!5}qf>%woYdqI?MC z)-*~T6Oa}r|6p@+Fv57)FIAmh_sDgM6JW;}J z5|ZLCC@FbOioU-DKJ!~j%wJO#%wN&wuWR#HRRuie*Hj6p<}ayEAeV1;g0XxRGSIdTiOdyRf_%EF^rzlK|@Wv|9LRGC$5S4`c=rA>bCWM z%rC0vd6+w2z{z~|vtV%z^W*9^!K|B)KWW$pzs-izuxGxbz{T4?roN@V&5UOM1N9Q` z&f(|TOuARzS4Fn>=9^2*eYY0xV!pk+h*(44Nw%eScKG~j@)&t$N*+7U_sHYs<0s_t zarMlzJpJMO?^Cuv^5Kst9!DQPh570|(CB{sUK#VPA6L)w`0!N9^tgq*Fx8wE)cJi7r&|y4xGR7j=J@Zn(umJ z9@*jf^Dmxz@wM!eFV4K9PIq1WMHrDDItLJ%=O}{wy4X1nc`QbGaX{KV4?q$L_Nut@ zCPM7UF99>v(aJhnS%9h9F@Duvg4Q%Vz?bPW*McodUP5Y8Ng3?|L0TS3AX_wRm8ArLocilM^}#(l=d1xLZm3(l(N% zoFl93Z7ojQ{I@N_Q)_KX8>i8g@7NL^Kaskyvhj;pTcfy=T%1(es>xSzNj~**(^RRj zWa4?q>BkMpx2<|?l{{%EK3}RffZ8W^k+4@Z8i-i%z+I*zf9M)0Bnk#NnviHCu$gqF zE{o7KIu)l;8iM+wDnM$*7ovr9vK)Y+O3?OvOGkD*nfM`)WlqC_$OYRRqnGJ;Ro#1N zd+D21DY%C8Qb_a=P>C+>ES)j7NVQ_ra8>uiP})$pw>B(acR+CzeG8n|s}wEnM{yMe zJ1w788ba*y)ZJ7p^2e#4I`A2NKM*|xf;z^Orqv%oiL1%xPAr%bN?tVJIinwZ72Q@9 z>M2bv(Zww;hj!7>WZg1Ao29uZG0@Am1RWX%T3~?ED#>@ff98$@iwJeC;A-jN2u#5kPEwJ1H!CS_&of z5UE05>|`vBNKwSF#fdDWk4i2+x?$2bR&nYC(!D(*B%PlBI!WFvn=h44G*uXP(&eeS zK+Wlx@<`rdyC@M#`;C`;Zu!D87OM0_0hm)|%Ag0?$)*Yk=gh*&(*5_2;?6z0G>5LC zmHW@F)>j{?C&JU(BCgeEn*01m={Rbh13DY$fHR^#La0F+`rZ8i_iOt~4WIL3enUwlU~CY=pdAUs-nsajC>p(|6^8cLN~a6RS&TIde?Mu-N)?wAO3c!tWv2 z@!a(6J<;wFqkOkagOf#d#lm7kZGfPuMAJsSiGp4(p@v~;Zuxez1k0_k^YuD5I+{8* zT9ARuTW03#)8Nqo|B>lZeP$8$7E`$06O`h;^h8!zgFPPM{q8E*%a|w!j6A_<&dv#< z@%pOh80kp^r5H{PpT7gNe7WJZJU_jBi|CDcYbk_ui$lkM0@K8Pd{nQ(CzI z)V0bw9~_%U%ohCp#<=<(G+)U_0j^$Os>=s#Kuy!*X_y>SwW5s1i)a>z8Vw~0h*FV9 zLGybBpWke!1QV{2`pb@{V77wy^KHs0+Vzp7SpNJC=#I@4``ce*;JR+CDH`Ts`b*Y> z`Yk~Hdj!~@bX>Y{sssb;Oaoln`0R2te{T_J_O6y3nknHiDjv;TngggAiu{m3Sdo0Y z<(U?*8G>{+luf8g@MbXD5CM?osNI!hd)B#VB_C5WD{t?9uotx8nfX<%2c=Pg<>3c} zhxnki#)*QoH3gvb2vL+%#V*YPFDHcW?A!u$52PBFH$wtnJ81!?jP=gM(^{JdMhD>= z@)*s8C7GtPz_{0?I=nu}x^{06C6k zeck!`!p)Ui52YH3kVjUeIGC-L?r)?>g z6K+#$la}*cNhG5!VM3QV(4Fn>DJ0AlPRdNtBO?uWgE*;4_ch&@9Ruf=UI8@7ttLw zwaoUV-r4RIZ*8E~Zosh9jJ!lTA`zHqEx|?t6OUA2IhHW}gaw&)==sIB5)qk3hkht( zzOOj~k`|E0&@DbUTWNYWX1e7Tdi6S-eMd7wYlXYpouw_499pt(*()aP)RJ0jTdgON z$MAj#!-r*X)Z2$ue4s6W?0uGu3jYO(u z1It#?6DUs2}-YiCZ2(Ke!$-8>`NR%^sf|594Vts0fAuX=o&!&1yr7B7R*x<0&jc0 znI>9iL<0+$IKXYBAd$W39T=j3(m#-@A;K5@xJBB+CN0o9wXDif%kgL`x<_6x(qX2n zh>VbQr+F`(fVKX*;N6RSubPYM7SR-wX5z$|zIOy3yKaOxtEC5zT(Xt|tL7O7=ceF5^a!!E+>0pL& zWf;%br9R+z&1is5U;{o(G}2_payEcL_`F5n`L+BYO@S5`@ZORgEd0}DjqV4!V%7>7 zdkefQQJ5ss1hL>lvW1zN0^JgtU{1w ze9;oU4G^P=5giOGA%y|4P>Ci&LE0Pnd~_rfy;EWv3haD2Ux^NcVHkx$sQU!+GX*4Q zD#+5n5=S8C5FC2Z3x*n?Jp5xmmnTYy7_0lml(OvgMW#dt;9HXNVNe3OXtyx=3B8jy zSm)5%(ZCwP`Uk|nkGQS?&-9QtqMX(mbwh+kWRgKyRAZVMv1br+(D*4mqj%y!lk$huk5FnY8#ui_0gJmf~n}`Ha$%94#)NR9c(^QY#}? z3v+0lQl_seOc3yR8OK;fFO*2WFa_`?bHKQC578{u!I)YRt!7D_c$CI*NEHpsX0(;q zOo%P9C2I7(XwmE>Ituvo_ri20%VbluYkeJN7hZ$ts?rr42u-HC(1ceED8qJ77BYY3D~`?A^y!K|WJ*j^2oVTEPJem|P&}8-=vW2Z3uP;DVSkeL-RJznAz6Kb2Ett01 zL|3;l&*xZE^R&7#P-l{l=c^&nb;i6?8{lI!Mj-m|M7l)P2F<}Ftu@tYORLuZ?$R0} ztq+X*YO*VAs*Xf2j)tcC&M?@cP_(R_>=G4ZtxICES_tcBEa$SS44p2ogD=IO#x*z7 zA3n&sHqNv@AiD_G)MV`^wxvP%0<@acipM^LMl`)z4~RBBf);a*EW5fjEdt@qhAJ8i z-MowUV9v#x@uye(%`}@mtd+fqKv>Jy7?EB{2cZz32UTW9!{#VXU9h-h&Bw#c;=K^p_cL|x)58-e{#~?O5~3#7KEe%BKStp0M|M!Q=qx@ey%&0p~UfcafsEZ z5PFXYKH{M;a4UQjef6#4P95>>_Pv0k_p@{a+2hJe$v!m0K9npnbdHMMhm+V?6LE6^GBPKN z8XujAzY1r_hnYg=r_D`3GfNpiflReC7{ zX#GjyxTv){sq4hXZJR(q+1b~&+Y`>HPo}jUfjeH?X|p?E0Fj(?m?aa#C1a+5w$V>( zD|0vHn50Tf#k-{aL=w}kjfZBAc|Kb5Ro-OaV|)?~i#2v6w5oY}Qezj*Jw*PRVjD9) zS!ThbE9f5sTjK`z#CT8hWVyn+jNl_|@+62cg4$9vnl|E~Wsqc;7evYnPrdbNdUrfg zrgyZLU$5ifa*TN-aK<;B2=z9=XBwa`AKoB3y}{f^3SU>78YPp0kl6Y?MUP%Y6uTz5 zokGqB55&Rlmr{w4#fAx0q+RsZWO<{l32-FP^rxEz@@jipw-Z@joxCCa{iu`68T58h zXX?{*oVzBvGFFd1Ncz5eptd*g0{4Jy(coyehRA>r_Wg{6U~@=_EGkyvxT2)jLs}Rt zy(t1v-%wjKRkBmHeS@fo%;Z#UKgSok30=Jl^sT6d1RhHd;_ePQ-2vu3l($Al*zs(! zO>%KhcR5=4%hB>uKdpAN8f5rGoiFXfDaUtu!fdm?$WO~kv0VgAeEZ}iJFN-d1n8_L zq-LlCK5)961G>us(d>1s4zlvXB^iFh!O)akM z^2>f@*uE)(F@8DDG3?G;*y+2Sc{g9K9klo8=b;M1_MJhYlNNi-cP4`FF293<6>%&| zJ0gvbfPE00F6ZqyfiBd~Ndi~(lLR6p>!Z%lcU}cBTkQ!;yq7vcHjl_BsHl zM_7(E6^8*^BP#Z@>UP|<+IRWcSQRMaoX;p~cV!!sDYbxnMO>Oq=WFuuPEFEll$5&Mesq(oz@X@(kc7gZrPNy7uGyX?S5WeVf!%I+95b!_}O6;07!{F z1|UZ&$&FG`k}XWh@;p|d^SRC@`YWdkisIn-nB7wUjI*uLs-WUC`p|(A%jiCTpH>*3 z5o3j_dQ?>oUpxY~tlpR_34kN6PeE|(HO%()E9h)Ipx6{(xarUxA2B}Osulr%ew8FkEO3HaJ7;t!gIf* z(Cy5ibZC#}yvs!F$fQI=a!l%D4*_k84o_>^ImYklt~FB<@-$RXu8MMz%0ae9$XW;o zondM)ezSqX4)8bKyj9t0pYWU~&t<->exSVHH&6Hw?gZY07^cy7@u9n>pQB z>_=!J`Jj$15@!j<+CX4amid}&NAxaATcoEY7Dww|5I+|uy?aDQ1k__mNjfE3pCQVk zwVQQ;7wL#->6Tsuw%+)^T5m5yNT$QuyxqQNJI|iad{g~XxxMUl_djD)|A}1*|9ATz ztK1J9h6vIq)WSyG zp~Lvqvn>J-d(FLft?#IBx%NNizJmWd^GBJ?KP`N3!27r3ue<+z_ig`o_WZ62|DU~o zP|p3{neoEEIb$1dsq^YTQf9xo$s90un&-@m<|;A|A2vUt%pWTAx0P|*STNpX=@DUv zUjzEhWR4U9hY==q{t%g4E9u}MBge=SEz=p(@QbmGLl`05(2+=Q4c9p8Rq3auvgZ>d-0<#*JJ^7gyR<>BnU za~$*7aqRBcIc_@fCf^`p@XUe>O?lhiZX=xCJ7}N{`ZP;JZmYXp9{ZlWA&&zWpOVMX z3nCAhyL9mqkNn`%r;!lY>Ym3u`V1-NLD6GgdRFXn=Ws9dzE&^_`(N!gqTXr9-)9S3 zjp%(Z^ZEgd(%F4`jM5XAx$5$jV;J?vFm8zL;|dwV(kHa}No{^go1a$CbT#~_HSzQr^GD($D*#f(lxf(sPv2-G%CHXyGEt& z8phCMgxKR)w^6AbI%-r7i*e)##$%^W8L?PC4e%s`Z zQ#;OWyE6FHwu{@I8GLT=^5CVxr?;Kh@z{>XcO2hw`uh3VBZE)PUfA~R;F%qVyKCJi zcbuI&wgX%`B9l;108|@ZA%lizq`L*Y%TK=vNSFmycD5V-)4@axL4wNzZ5dcKs#>1% z4jf!xFb7syGAQw|I5&_j%Qd2laSYM$9BRn~CS}C2OFD2-+l$Rq9n0$g^PB_moJT4_ zk^&&C^NuM!+ODZE*_X%FxV&6-AFvUQ{gTVB%IFMLe{HzvN2!LBh?5JA` zDiyT6PEHUNT{K|ZK)1beToOrAVCK{+1~;?kpt5eKG^GWYoBe%KySQv9%Eq-!Nz=MW zYCy|96sT?7*uokmRsK4487OdP@-FQxq6-yq6T8@BV!G6gGVqW}NCWNWbLt<8*JDe9K5u;XW+!Oc zB&2@Rh;(1W)or-0xaqRUL))n%O2|{I=%*{B@O7kQIwb;)mYC@`Bymd3$)&74nuZ8d zz!Vdy0O@W?L(MmLj|z+m)HVzLl{RFU0i zQ+#fp0yqBNLGkPNxO&4Y7c;h=p~}2p*0fqw|}ebZVjV;i1p17v*rN#I>V-&+q1_6#4+ zdzVn~M2fg8ks@A>FDwRNn6&Fd0NoAjwp@_cco86XqBdPLNW}Q-C_b3Xu5u~>1V43W zxhpxUfI{W=#<@EP8c*K=SqW@=ou2hdqGuhY#$2eF9ErnNhj@H_W~w*6FFDDyU%Z@% zBMbiefk&Ja_pC3EY{27C(mfzBk$lN1JUM-DTIb((a{n6!JSRBjP~(`BmUOSB(?H*UX2({J`@>+baZjnKoW9#1p~C;eqDy!Ig& z+HyOF7HlxrAH*ovVj@P;s1Dd!EmzWTn3}tDcYZy_mh9GCS_v+HUXOvlUGUpRX+SbI z#XTMX%B|4I%=hhAlxOv&0CyH=>-`NI6%}ckc)o~1v&$n*-!(M0#sIC?yPJVh*JP+} z=%eqh{VVBEACr1FP^WF(zU^*;p$CO)h{$A@84=vAyST6srKO^WN`ul8->%W!?#LVS zsh(Tg5X&1Ql+nr_Qxa!xO)uT7x6vlatx2d%CtY%Daa^jSWQd7dq?Z~=#ceIr9j&lE zUQixDt^=Nsb@_|x4ny?ahbO~ zBxH*!_lM=ZMyLD3gSIJme^@cwwV`i)NHNClg_B5A)_ zfR#19h$FoUq1gpnnye>j@82juLz!Gx(>IxR6PeqVTv$6>ge>9-Jy`*pvBkMXWMkj~KB<6C zOATAsi8$HK&B!cF_otc_Ym~dCK&?_Aej{eWn)|DZdo=EULBQv)gVFa$Zb(Dsqg4T3 z$_=r!k|cS1)E(TJBnnz065Dz<3koN;bHBkU>x}g)%K8mu{j#!liTFiUoKLUwFwNpN z>uKw4>o-x>@upY4;2FmKGkqhi?wJo8pIdOuY||G&Pvpb^Au3F0L|rnESM3#?31 zB@U{xM3MlmptyizCa>4D2mu!~Y9~12yn}VoTK(nzWFUYxkpH+PP}KoW3}5(OXf$gi z42D~j_qVvbrS`3HkHfyXbav3g{69|CN0{TZn!ly)_2#FZ*@$rS;;{ZA{gQ)NW z^H%flvB8#0dgDn<%@x&hM1B$}&B)eq(Hx10iXeRui2(X#jUmV=QIqx&FQ9wmbW?G(vb2tr#H=R(z=R-hW?WGr5(uP4KC6E zZ$lIFo$hIrd0VvC&EAco&9_JETU*TQW`C>GeR8^EqPVKPGBf5ttCNIUGzVLqq!sGs z5KdQ7`$@^I=stK>ptkhJ1)4EM{8IMK5;5+UmNXwBX3b%Vy0+4H+e-e@ z(Qda#6Hs$ov}tn;rx!j2!@KBK5EK(Au8X3W17RO1x4u#2ki4L8Gz^`NED|RnXj3g8 z#8nvF=_2DjBh1zK2L#mR=3o6I9aYGPunSn`a62%i?KPw@P;|%ZwwxVo;}zY;+&^96 zPL)O)gz&4#naF+2AkJFd1C4J6M;r>fK8#)rcl*18!yVxvI%qybTnw`xru_~9wmlJU z@kLrnO1Q;-o0%{!Z?4j@C9F8_%Z8PpV*h>N(dVOUZeKD757}VKwzzwe*O9R5d=?$@ z)u3wsyb78gong=@8yETn;Vyr(ziXI4{$`Z}3R}oWSEY)ZTcBr~ogZ)Y%kQATJ86p@ z67bMOSo8ZQxGiY+M6jtVKptg3qJ{ep)C%FgV1JMSR1oxpdvp8zz2YX*ZJ`~SfKG9# zPl4MO{zN%{rcu*;DIdu=avIF#RK+gQ;czb_ol4TC7nsr)dRjo>QF)Sh5i5Z2N2 z;Z|L~P^L&hY4WI4!mePH%aX>!o?!0*ZaQ)R3C15dAM6CU+0U{y6dddTf;j^0vO`~T z)XgPFJ@FMl@fAqmwu{nzMSD1%s_}J(>_0c1|5<<>UM=6^`hPMB7qhKSU^f3h0lj8h zEJ4}+nB{!J5{>s;HLzd9GzwfUB7;fhA&?B)Q+r261LnayS*GYQ)Bz9(5_N{M)M|YG z7nCWQ4QII50C_c4i{uKRT4@kvyFOZH2Q}(M77!7(qqD^P_5=tUgMB{2+gAI;(>OM6>Zn&>WmW zQk3`~h&TaT8#uax7K7EveM0E*#+5d)z>moeS1Uiyz$|ou z|Cq)e)SCa&PzzjU&>Vcpdf%T}_Md=y{$K2m+L`~=`{{!B`L5U9A9Wq{ztio7-|POp zOzvL|+Jzqtn#OT8YIbr1d!IRiZqP~dq+m_JFQITE)n7vwNWkhNV|f zFuQ?M7<&bTH!j8=G4_*@aafFfA}(DMV~5Nsy8zV~J4MItfhjg2M72~WJ zFADBvOOgFbxyQ6=-uWi*Hmm$}D%NQ{@uU53@aLqI6iP3-$@yX(+Vd2Bm`T(s?t zA@mS>M+V`rXAf`Tv6WJ!{y=ws?W%6Il%4YTX*BJox68( zJGOGM{myL|Ulow!ND%_KjlfAHg`VhZqONttM`V#qC4~>7ABT&Tw^4?yMM3 zi1DNt=k{Zq7uy9fE{gG#7*C7wj2O>~@thc!#JDUmu88fb7}vyjzV|S9rXPrm@&0`S zdo#PX@7=YhfBeYqBZ#;ol}Kk7ZGc`;*z{a-4xaI05CIRCGCW}cWw3SN{%)T9d7=># zOta?#x`4x+<6)q&8V7c3g2d1 z1|X$waf~4_@CncrlDHT+Fi_q6)l+UEXLxHJfldsr5SRhA zM?h-he>!z)50*s=#ZBWK4)#X`a}nnUk~-1;jzJXtofg;2bFzq&Hh7Z4fD|;0rwJsRTuW{yk};tV&VLfBpRY zd^^AX-Lv%9aqri%>aDBiPQQD0eZq_X%QN9E2wd%K_Y~CaUte5WLQx5-t2A5v5%-fP z-1OY+eW;UjGgCL_>hrVjol}jHsxGaeTWj)0Q%H;I?nryIGES=NT%@?|{LhUTp6`Lb z`5<2Ad*WI)RQS-^G20h~ehaZOsRj>G{2KJL|4<5r8l@4AU4uLK0FLHh%BO7*>0;_+ zqi!3rJQI%NUmx!OttWC{uVMsI-3P;>bP}N;UOC=L%<<@PlbZ}$-X~3`oH6&;kZP5Ll4?+@Lt33ki^EZ^!K0bgcyxvvSV^rJ=NM`{ z+8oZK#RG@5t2nX2-b6T@#$%Npgp|1M5L#lNhxEK z@&A@mN(F+Ls7pCwUAb-lBb9glP<`Lb{Dt+|toNzh>+b)^t*-w*_iFgx=Knh@_x)0r zF`#y;C(RO)LOUQ*oHPzXoH${eGR|@egEGM%_kkc(myjxuBZ5f5fb0Oa8$mj* zCxOf4Lz;l7%duU^4+SAV-M?u7 z5t7M4#PWKf-+CQU$;ftaABNvqfXtQSq#bNW3^zE?uZ(c8x6cSS=knZ*;$@tw-D!1Z zGj7K9MAX8xVDxVBjnJ}H<;B)r!y$}b{*dKQDj)rguLhBEQ$T;XI+CU)Q3yC1X zI1Zxtoy5Rc3!qwy{Bj5r3_WzQ^(nYW0z^-Wi`?bXE{jTDI4}XS_AeSGE0woT{BWaIhd?LBnVGvzg@HT}MC&Mv~n*f(f={vZYdCt3N z5*8?L``I&SLCh4^pkjg+3`!aM;te3zsN6)~`?6dmC2oz4V3NZ+pf0D-nYp`jA{?8P zt1ms1HIJI;hEKrZ8(}NgNUF9;v3ADixm2oFW5A{3Y-@Ly8x`l!x7sE^8~3v{skh0& zJ{Z8MF`KA?*IJ_+8%jC91^lH|eKg8)qXSzL=JQv??zBU6-UVnTxx+EvzH5(~XQJdQ z|Bb+0HrgK>W&z%Jmv0UsA>^CKolREN+UuKpd~=&`p7+hIzPSq)w{M02% z9kmWyN327+QELl8mM6fiAc9*%86>l~X>mEDbxdYzmQ2I`3WUw^sL9zIvNI4mnWWiQ z*x)j@kDC3Yu)&jorfppYau8F1BB*bh9U2ry(5 zz>qQs5L8)&-GD&2$I))t$q^9pqrx@_U8NYy-|74zUVwF zWm03gzM%Is0FTDTrGs^(kDZU3>m!QF^+jlAL(UjOiD8OiiD8T3h~ZWUnuqt+#Y;w9 zJTbE39Lm+sL0`~`11eZdn6fb4swln4$zH*z?1a~24x(vGu1l6Lq~ literal 0 HcmV?d00001 diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/66/0x000000000000000000000000000000000000000000000000000000000000000b b/crates/sui-framework-snapshot/bytecode_snapshot/66/0x000000000000000000000000000000000000000000000000000000000000000b new file mode 100644 index 0000000000000000000000000000000000000000..48d06a9c45a896ff74d828c894dc4e28ded40ddc GIT binary patch literal 19826 zcmdsfX^wbFNA>hEN6*nS12k^pW)TEQU;vN=36LNT9(y$#Jw>1wbFsUJ zB-WO+UfHsC*S5U2thFRdme%yjYuP{K2>)FFkR6VY74rUZ*rAQ}4~Ih$-tZ9)9})H{ z9P;piUb~xNXWoBi*JS&g)zV~t%+w!x^*k4zpQ+1k}zQY21KZ&vuxv z0~Qnmmd^=R2pxe8jJ?1({8>hCXN-H^k_b;wB{RsyrZl~ua6PzJeZ(H1zVTzF2`1)G zzK{m$}oHjSBq=<^R@1T)d3p>qcBM0J8yGY9VMUfl1icE8i@T6f#`yt|vd-sXp`_3eRu*NlO@yWQK6 z9rV-gpc#YMS&cj0_4ZmR^+CHf=x(ibvmf2_2T$TaVw#% z_RO4!d#(49iIUW2_u}=<58`5fduwa6H;AcE=tW;AeeSLXgUtbIr}VXl8gpu+|6bhl z2Fax8#-L2+hcoCsY+<;(!AGr5GD}=D7#o--*7n)cV!Jc5ZDvcW-S1;uhkY|$Eo>)q zrj>TZ`5^AlKn5SSy8{eatGBtizUR4tZTu6`vG?xz!}gGoSX!i{mc{{LewN0*o&1Dc z(fQ}GhH3ja0v~~E8e4-rBxbEM)=4ge^`h1<>q4D&nHR)qOA7UZK-@`A6w0L61vY)u z88|+Ra3bV+k(IIW?ug-Kfh~EQ21lEPR^^|~UOLq6@g67ga}li0|#X$KVOAKQ-ksxPk6Q`J*&hcje2an zJCW<_$nyo&=G(q3$;ekH(M+LGr_>$`=b|9W`7E3$F=3lXYGfp$JZda(L5I1d0@FBO zL`6X#@EDp^v@u*EjX_kBu0lEB1Dc)~>qJRaDb{NGGMWe!3Q~C$l>^HYzEuzvpZlU@ zMHQSDjGZlFjJeN>3guP$3mEAkbgt}Q!T z-{<e88=_x6L{p978NnV z%vJd6T7k%NZH~1?Tnq6o;7~j;CulP`1||@OYkMxkQSK+l@XgKbL0q9ttT&*I8JmCS ze!IKT>aMz3ps#5YL}cHm6RAsk{N{!i5AL@*+x@}jx=+D%5NzNaqHsUn+Fa|lT{?1l zR6`zzL{hqW4ULN-41Wa zd=?fBS;@jyL)NpfY{(@O_L-v^SiD>wQb!!SDO@Uj!2#D_!s9Tw;QfuO;0h;kDMmWn zxB{*+VdWrv!O1o{L0rD}eCC5NutJyihP)LqfvF9S_|XKjZP-oUqEm;n-&+`43xNoZ zSkXTe@;{2A`d{KdaOa2eR|5UB;k)*~4O`ytmhJ{0PyCZ0_wVKpu_LU^j^PyftSEw~ z#$JerPBN@7%xPK$nEH6I9WfhQljAT&!q74;mL-Z^r2T3?Es=d#J*7s;SQ z9WIs{^5Q?DEVNr&<_Dis0(BvGadq`Jv5K#6+}iG1E0=D$E7{4DOLocY$&pjs@%C!` z(7%!$Os{WE-)Q&xaZ=Lll(|gvrD93h)RLv$Xg^%rY_Ap*spBHCkw)qWlZd3MYjkWq zdPv&s_08>#f!*HPiZ@mhvE%WZxOKPF&kqk$b7sztUd`^=#-5om|1pS-xPmSG{np0z z`sfgN+`+t%h%%IW=P3>m!yM9+DZ!}9G+`!97(dywOkSGyAbyyUnNz-%HZ3ipk&-=n z_Gbkx?xaYw8&)7A*}+a$4Fp1M`D;V8iK9l z%h$6!I||&LxTEK3;&hht(XP+zN*eET*+^{=lWLyScgMn-Juz z*4@}i!cky+5~L8H5SW*!Fic3_dvPc3en4WDVPGn~`2Fo}5Axs#5I_+NxiaoKJqXcQ zuesDu1|#3ca>B@9K9fA3qt{_UPP!yQ@Jd~9N)Sl1;Ys+Fq&_k?D;~4Wsns0oz4of#P@$g<0yT16ry=ZrQ z(%NVPB(^pE?D0>ElLT+F*N^yeOP5oU5tI38C3j1MsWCrGXL28yS2_a09DxZE>Zo{1 ztXP-D8N3VlN5upVWp%+5FNqgD@rDR+0`gNhzC|cLgPaiM&x?!VFphC?O`NnGA&>Lx zp16tg^)(Mjf*KZh8q63fS=dh;Z8^7f85KV4JWqVF&KP077zj$hX2XgbYLs zQu{n4dtX92CYfB6XgB~ommu7-i=knbsdM0pl}~~!h>?a^QEwBTjW`TtBC3z?(DBI=Gw}(|%16$4J(6-Ck zw@&a{V5zb%hdp>kJxh&3>r3E*(2(VMYC8>u$O$w=!aWFiTcYD)n60cPhuz^Q&$mj- z2$9Q(e@ynVtR+=W#>^_&M%>(Yc09||rp9v^01-YLXt>V=_He)g>q66X8xD;|USHHp zvZ=uM=D|#@=yG`CGT8R9?pvxdR{Jq^JgJ?&mtArMQ64D!Y=VKWmna`9i{uyZ9EXs= z!vm4XabI2z;fe?{gAsl7ao`_^JS#i|_}M}I;$nCTG#F~o12Um-US^R1KoF@+q@5I+ zcQ8dRq_s8Bi-V5S439yl0T2Tpgc-SN)}wmDqiM9b5TeEA2Lh6k4nyz?59O_k8UTs# z)Tyy9IHQrX0r03LxnG5O%d;_31iV}fNldG&IY2CE9vZo8>IC7DvGy1_YQpqe(0mxR z1z$~OgB2c6vp_gYB&^WmAZJe?e+uNF1Hi<9-#|c`G#wI=g(YaCR2n-QD3LNgK(9!I zhtoQ2WP-Ye%>o=UXb=~9di61lc^Qtt$1PgrFeeHW4-~B^%K}=RBn;?|2Gqn`Wl_q) z0Qn}eFd)Ek7FK0Z$-*`?T3OgBi&_@eWigqB-LjaeTq=vjEId&bOIf&F7RwYCjw|*h0Veh%aO5@y zx`=h;gvT0^pi0}$C36eA#!W;YZAg5#kOw-ii*DI*F2MOONeoS#HvgIiMjAhz3I zoygLB9w>DKWullP4S8ymB-Lw(d#WMNjp8hI4ROaC^5Q5?s8@U^Jlv2ks+S!#R<`i5 z=lfP5eTgZr0^7Gm6OH0Daf;LzTS3}~6b2k3L_6UY@Pv3ugjNCv(Bc4EIxBN50@f=I z;^kOjUqJXOV3irIu$0r8wrMVuuBypcpn zu%U!bOhMGQnGKm9@CCu4-z;QM0;YOV6!%+aKj!lLd_nz){|DCxQ22gcum$F^lYE(<1%gu*XP5*mKq^f@uTVAuenUGf!ex}u%yCj} z=rkR14Z_=eN%-DrAPp=}WYCP!ynr5TLQFDDc{mkvre^0DKL8=0lcdiNQ``}9j=~Xz z8csQ}M6Ok=Uw?~sga9;o?@CXp0>HuM1+j0vTaX3>Ed4Y-Q?3i?s#9Fth1bIe{S8#IhWVswV^ z@WG8s1p)_15dlc-!iecO&IxiDhbDJv3)Ll(%u{V&ArqyIlpOc++51hX&w8t=KJO;s zkA}kN^HL>Vt-bn1V3(~S!wnu1dHSigZY1_LI``wwgI0TEwUv-SGERhoEUW5Jn}!(a zWM{kAgMO}cA6RCq-`(iMEx@6j`;SrdJxRm&DX`T<2}qj1Ou;uoTMI+nHNm}I)0F#h zdo>9G%U!E=H=v7xJ}m1-LfTK2eVJCzG2D0D8|w1R`y;wMqq!??L3A=zC6Y?NfDs!P z#ZndBC!oJOrLIo{0uQ>7SqAO(t=2lpLZszOT2R>4x#I9W{oIeJ{4&WWful`lGwt0^ z2wqP1_$RsmzOpn+|4I5o+`swnhS~Rs&lKW#vP@4H535qh8LMvX^Q@-`b2}vVi$&Vj zfc6uA2z?T1;;@Mc8G?bru}?c%;XOL%!6S$Ti7mbW0B*R64+G994*D&N$cU>5u&;+W zK|B|lFt86rhFBGNc`veYo*neD?y*Zid*nsV84sGVGm`z93+^IK@O0Jp zm*uNTCRg|xE!JnIPvdxX3)U4JgJ8ue=-ZdM$>}>)--QwjHWKK`gr39GaBdie_H-7B-vek*nG}w9l%|GxVm?sVD@PM%4=aQYn|<9(8H2o1!l zqJ?zauy#fSN=6dO5iAE~UrdW#k~J0NtURQO6KdK2(ZoA~7d3nrSFS zpk5U851JSYh&>Xae}SbBjR$iB2YEafTh|J0JCn4%oFv8Qv&1wtsOMEuygW}r7Zw%c z_0s{Eq2thB6px=^;#9uC#97#)S<^@NgDE?pN`1OMQ!nkCtxwkH4&@8E!c4Vv zY_>X8EuASnZP~E4a)6=`d*i|e3U*>(qyw7Qh%o#TAQtFIj0qMgRgeSbR79Y`Mrm3H zNJA{CD!?OAiUor((q6#f#x~68OrV(|b#$uUu&{jaUtq&03pL`suxFuAM0h*6IL8nm zUBO7rqzI)Iv@wo`uNCI7X%X>q)OhZJ3x}%2=u*bY0&54pFDb#$LZ1UI4Gc>VNj0NL z%Lhwo4V!=vl|wcV94Uq?ga@wBxEg{i^#a`lCyw!;Ex~jP9mY^03|q6LMpG1Vm!Kit zHqnE11ec3yNBW!EN~XzqlXTmp>M&ejVZ~@UE@id`JM}z;Vf^)Y{chZw+4Vkdls3C7 z?M`p)jH~I9Cor11Y%7?HL?{* z&D_%Mdk3fcDR=I1i#KAR6P7oOKZ-kG=||=v!~_?KzjyVZ88Bd3OZaz-6p* z?*3q~1%4g^z;PZaT{M4w8Mp9?CY>(GcO!VW<7EY}PCt5LdSsmE7_3T7P@u(I;_MwuP10Vb#CZ)ol z+v#o@ON|!z@(#uZMl(LyPNeh4aMpb@0jFr(Qf1i&G)SXPKgJXe)@@`b37faon7Wnm z$Hs>iGv$**w6*&lj2B_b*&b|Tu!C_0OnS;a$@`>(7{|z-KRO%t`dQ;_c2?NlhH{UC z+i?`8wZIy87$Soz`gvlabnDL5*2>MRmtVTlx_;%>t&1;Rv7nAmerh2NJkg`Q5X=T( zwi6T-*3ooxJpvz_^&t|Ov$zObtvC#Hh1But88zXFOV)l5nStt?SG>^i?7uZ+M}fQs zfg4yb*n@>|2s9Q@xymWxlE=*P0uLH?cS&AXH83>RN|0A4fC&Y5Ugv=*F34qdfIuS{ z;E^a`N0vNh%-sMOr!g{+9t^_E3-Xfh`4W6@HSp9GU-F!Wg`V04JOgXO`Q$tMO3 zZRn$A}IlA3CsLPczL6GSk4qG(eHiW*Ldp_OayVjoB-3 zS`950j(S~r2LM3L;X+tKP%^l z!8gkKM+=Y>e$@D~@P4%PrQp9Uf2)xDrPGrzf;z>XWnH$(dhDB=ALbv41L6qopqvx0 ziZ6-}#Y6E+T>LT@f18W%aO=;5d&gEt&}tQ9TEs)z$tn2Lcxrei@ytNVgB2ERBbk8F zl{JgTLcV|^l1W@Lk+9~H1%xN?)bTX%ED8ogTjrDvD=r1BG%wi1aw4yk_XVs{K`SrNcG#vp_IUzUzD4D+pH`4M*w5JH9-?Z_ zf|-Zm7g%)|eNjXU2s!(L2f6J*F3;E+8H_!xNf+`ODM8*K?Z*vLdt4)Z$7iZQ4nF4r z0sj2S7UTC$9%16S0|%M7Fg?e__17T>|I{%!f1MwN`z`(y+~49CF^;F-Jpy-|FEQ(H z@%#9^7jHkyTxD?;-q~*qhyxbwnWjn~fKdFjUaw;Ok^ z=9{m%*WJ&$*W5Q7t5fmRoiDuZ&Nc6}UvA#%+;H#2U#Pe1@77=6zOilT1witPSPHmY zw3LXAi*)nI(fdfkvh^at_arbr9m=0?~#}k$wPY0=ZS=R*ah`7pF7j*AsNv2@S&*gfh|UL^AYI z(jwP2p+ksi{(Kspvn`GgmbO^W)l^a6p6uOKd z^v&pUT}CBkNU2E|QG=0DO%agSAqfu?G4L863Yaln!qqLbhz>zr z<+_49T&M}P0v8~d*C=I71OWzQTDhE9JkpDU7srKSUceyJv=yHo_YRUNfp*f(Fb{D# zj0$E1EC66Q3CbECd@F(`&;Zm!_LsVfSWtq<))k^cp#(7Cn1sq%Pa+yj3VbSN62;au zuow$kI}-nsc>v)b!keuEO}dw^NLN z#9T8-O1NoMJ)fv$l8@X>+l>p1n|~Isj7&j|=Q8T{orJW#@}cVhw7&R zeOEt8FyZo8xCJ}SxQCl^j){y{6Ah!eJm?PLh{xIEJ_^TFhx9uzilMeLMQJ{z$?Xuu z+1T9Jc|GwbWJmkREk~HoCTLH4w~6tiI8ZsG9XF`0W|0Zfkr`IPgf=NR1v8Rse8z|o zCbT<}QsB$z0*BVTk2GhPGQ`(11IGXBcD`dbLv36V{ZhQqATTV`OQ&Y6W%}E zBQxa;rrUM5h^&`0t#zmpPkmA_ z{TRm3lJ3Nk>Ri%-Nw2)_y60*e1M<~EiV9|1guAufzmIFNuq?Ch)9sGaZl)1WV5~j-iy)2xE_kV{L z08hq1$F2eIb?Y}X9T?k6{=YPu-i`le{T?%!P-X#T!0?1Yg1fN5A8iYdqQjsz2d$g> z@oI|eJjwe%4iN)Mfw-4k2mMLgY3~gj0EeV3tW90~b}D2>kPrD51R3K3lR+5w!{EBc zrKV0Tdg564d1opy00(sDH68k+B`{{86d1OJFTuKlhx2X;cQkPe65uQu5ChsnohYKX z025@w zwn7NC@8DMFBCzx%t_I3*CB*G* zy$x)ihRe4bvYUjde$Oo`+QKGY>PsL+b20-~xaAyhMl z=-1ZJ(PO5&X5i8*{b4#=$3`I?V;95kDNGFuX~zjNC$$9z&QHG!ln%2XxCm2WP#l2f z0#_}_{YMEY(Rst2(6 zdZ8ijA#y*I5K#ArB}AfX)r7$lW4EZ$0X`-VSI~+E7Mq3PmQlDMOY{dFEG&{D43MKq zvY5ellk>0(b%gMh`Dpuq3jf zXjM^1Q_w4Lm}C36R!u_*pbIHn(3bj{C+95aX!;2`X$$`Rcat^Z8sjut>>pih@#kpaT~?wg1`%k%FG04lfo5zHYblxoK>U|8RdX-DllVp zMZLZw!KMU-#a7H0!?WQVP{IT>!rgHy-z({8CaU|s0M*k06kliZSfj$B zfutz=Vyz+P)Z!>=sB>DDQBGd*agR!0*~GON`$`S#1C`Pr5y3oH;T4ll&=LNA9c}k< zlWGZLf|kLmXQ&B_mWO3x5fzAPAN7_DB*(N;KK=%XCFuUyo7mmm(?!C{R_x}C%Z%@DPzLs^fzu)--@Atefj(j7^5zAMW?b!R= z{Wb6IH~qh^#&iEn?TtTR|Ds#`B`;^WR%j{9iY(i*a^mvD<%=s2mlnfzupCD@wsMqK zwuPx}+twG>HqLuuEWL81dST=0`u5UVYk6gL{e`7BTALeN-i6K8?enYK!OH3@OPhqR z=*!F7FFKd6t_A!{y~3;OTgw+)OT;O4o&8}%&cZtBZq@~s^*evy_}1U~(K{rTE@iGP zjYGGz{~XIwUSvh8tYTY5Hf^LUh9c{>*oo9_vG!v-bVA+q)ZmdUQD#r*N)X$T9XXLi z7`m0@plig1Wz7u%5>$$y7fkz?y_<=nJ#u2gR1n#y_BZOu^c zRJ&@sb`m4fR4>;p*^O^fvPU`IrO?`;#opn!V&$$Lwy1YE{EymyBRiyMMAqRG2V*O; z51**_rIL=3ggd3$L#pvK)%f+PB;8J_ZXs1EO;y?{m9=B41ycEGDu2gR-`uIJM@gmA zRC>o$e{-i)&yvbcQ`w}7x_y}PiS^b_sa~M-lti~<6)^pJ&Zo5hP47hbRex{vk9{5g6aP2u{Le&w;VV(D`16r%)m2f|)TEkH zdl)B*OjV2{##Xj`+m5&$*Rvd7TRIop9wNZ?sKK~MiO?|fgrv1*Tw$+aPX<>b2r)+@ zaW`Xej|JkMj>QaY>Pv;itQ3lrFgk==?H(nj+Sd^Gejm3zI7pt(gjKSfy~87x&X4)F zr*)}T7puH$T&#iGfLIe&6RS}js1_^Ds#_^n;!3_!sN^bMMOR9dTD4wPp6@GP*}2G8 zf#tg**Njtp#&UALi4tjooz%~FBZsf-lNh8CpLRGNy`R1-qB2V=!iA{H&pA2cqCi9| zd5Smn@a(_}B!*JCfPW?{)%HfR5Ja8HPh4qW&d({To_aadBFiICcxE5U3zWBT!N}#X zZJ%1c;+h@6Axs>JDj%+3%eSvoP7pU^`>iSl2A#2dCmDYvL*78FQQ%^ZNRa^oRFX zGQPOEaXG88y}Gh%b^Yd<^@sOoreC6lvG`><7WyNUF0V3mW{tbBe8pLBy_(xN|5EG1 zw!3v@ZFSq-maY%CUt5xfLYw&N>dMmg#?tEgdTXT}!eh+C3G;AhA5)3V1Z*F(v9-*vOq*nx3&}FF66d%q zN$`-l+a?up64%U_zArOtk~_-4PCRAHoaqPRS8~eLw!d%Z#5?TqMz%_Hnfgqwl7;yB zwvSgBhSZ)26$TNemy0odW0q7N zPAFn(t7>8%EQq^Nq%4&m9I|Y;nMbPIIV0okU}Q|bie=+j5-Vrdt6Drqs)$`}PgrHK zrbkAxMoXiWk;-tz4jYv)tc8t|A9`WEQVHv&YM2Y%aIB)jW~o^jD;2__(okivGFV~? z(CM^*JeG+9iO*E%w5JUbx#H^}z&oN|#4{q7j%2FHW)o(DGy~GI=!9#Gw6|d)iH@*O zzcYtPPRkv~$xCr0VYYJ8=y+0annLCbS&N7G5iWC*)TSG(2xkI-?WABa+Y=5kqf_a+ z{K?H$D}DfZvbnyz_Q=LcOP_5mU%YM_SYBCai{)Bd7PHE~xODz?q|Zw0waPV~ms+p) zi0@{P`0h*gws#NtmDbkw=Emzwt;<)oU)L*}8&^sfDS3&V#}+|L6c`u}$$#{P)^;bv z6lf|bwIyD}6w%r=+mwq1gsy4%(mGAw@>jRg67o3KS{KRX#nrVHhg3Q8d8M@#c6|%R zd3l4zJw~?lHSYGF6kc^9>_9 zorq2iIciz;heI=C#f}OcFE`0fku9WT?RlZoh+Qc`=C@viOY^}6F`_3~>;cwN1ORh(*M)(Bp1w=TLgg@!Oq(-6vJ6@lcog^_L) z&+myRGkD{hN@jL1cU;9WxzTgX#8_rx6h^EwG3N%tyvM>@zWHnH)=ih4xbiF*fN47) z#Yq%e^D=*@M4Hg2!hEOOC-gy=-Kp}FP&MBmkw=zTY9Nxi-Wlk`gjvv&d ze$;(JQrpvqPegc(-c}wse4+t!bgC4&hgIWXTnG!o*m^j0H*(ika1C zXs7&7cQ+->hm&O8=CDX3iEQOMEhZt0vvxxGPX-Q4RyM#tDZMATiWeOy)eY5zhUD{M zE-5n1p`g&6Ajbs3Li15BDa9;1G(nC~N3tm24iV!>dM&AR%AU{a83m?sh{@}Q>+)!{(+Z$L$nofk37 z4%sYuYexf{m96nGOKA64U~^1hIl>Dp%wt>UTo&&lBUz2xN!>1y!7j4Aoh7S6KE$lH zRMH@L7$bXvnM|fKn-#vTWId~FJI9Vr zCLjf?Q4YtPEQkZfY9wPSvSRC7_h&l|&Sb{z@sB9yOX@RP|FZiN%KhJRUk$vEmVUk9 z|Gly2b6?)|(<=N2x)_T;~D-@d{R-9Kc_S+`oZS|=#(o3m2$EhMV4YACXtknnulpZ%tMa}>>f!KSLV zD@Rq@_h^ti;8{7=$09I&%QuHuAycKtK~*Y&*41La5psZLS?__iSntjk#Clh1JzX?kYbDCCth1G>SeMws zV69f`mUUOdV%k-tIb*IoB<^)%zH-;B z(stvWRSw^p@0`Ru`q&}NW2eM?=rG$*``lw*MMcj`WJUNXvkOdXStPb$Ti_m(C zXm#6!Z`7bYLQ@668$(HNVj5HBI$_3B43ZQj->wJe z!QC8+CJqt0o>ZnlI$KQ3IJ-rfAm)fHgS7CTa_nw7-B_}bPV4FfouqQkO5M@`dcf?` zea;=UYs_@R+-)iq4Cpg2Dzle&9KWJ`4i4HCA(u`^aUtV#b<+(|p|2@`A~H{M(3xBI z-$fChI7|sd?1YgM9f;VWP>SQ9IL-duoB#-s(oYT!6m>G4MpYsL{EGRaFX=$>X!nBd zB}Uq#rkkmsU!;!G?`iKtmk~|^OtsRSQ{qTYnqv-A)mRQ3(y~nP3^(abPmZ#4ewBX} zGL=(mx=So4J?Y3Al8a&q%eBALQ;bd%%r67-D*?oEKad6IaDXPCbXN2m-AitpH55ri?L31Pa&7J+dQYYL?j@qLtLt1KnA?fmqgS?9H`d)pH&AUbL$h`~)SMA%AsQq~b zlE%gjw=B`LjxIu{ao^bm(Ms#e#@6aKH#%D_PAJolm#=PY%U92NWwq~%vpN_`w=S=4 zSH(!1+97qh$1kO@OZ@fiQtP!VtDCK*?bXZVy?ps*2cUgdS1$Kh>)UlX6E!6tSvtS5 zacPM>wy$pO+SwbHB*yAW>qg;lfC(>2`pe6km$0sEu3l&v4@0hY32)=(tg^Z;*IY|$ ztDWxXJ*15}E#1hj&2pT~Qh4X2*5iA9W4+bBU^7x;$=sBc)>b*1-MG?P&zPzAg3Mpr z*ro+>+;*7WaW`J&O0aCCKyR;B zQlB=xObSm%$Kp$&AlOMIT9ps(gW?%wxQ>Rjmwu;x3(Dh zh17%bZ>Sq=BhwA#DV>(H?0nl;!);4iHs@y&(n@)oD^*I&5ys4!*D zoivt;SGP8Md2+{l+I;8QR+`nl#5K+RZd~HB$GzzvE7UCf+nw8IhbNA|(+NJ;VCK)^ zFfVgO9hcmF%AeyGQU@+M5|O{I>K5u$99GyIRj@9}>h%_@(93>h)X#7F`8S=XocBBL z;3h!)UUVK+FZq>mXTv$cjXZ*`=G zT-@$Z9W`gqIdjqt`#5i`a;PEaSkevb?>WGjrcY9bTt=u`fgAIT5Y{BeVsga9^`ysX zlANY-(M) zdDG~$#Y7L#BeJYp5APJ+E@wtm9vk(|Wu7a4oKLFEPUTuFr)>lS4d*BW`a#Dd|C}zo)2J(-xm{ zJQj}!@u&$M*Zafq(B2cf?tvh7BUk@Ck;1X067LFkm2^@~YPER4I0we{7i@8b14&&@ z`b#dC6+t|RD_0AhJ^DHKQJ+sJkC#_7F_K1D4+}}t{XgWayue9h`EXng%hX%f%D2Wt zdLtU@-VVe=VL2MA$E9#2EKTV*qf#73We)xGap3+1D(V zCf5!@>U|5TxD-i?8PUxsDGd8 z=iG154;`+UfSGdolXf@|*V7FWY+vTI!bcNL|*@BR@kGCk@?QgAXJ){+So?kei2 zmP{)5M`X7xpAcquT;xKhSab)%QJq9W!U6piA$&BROsRN0^rEq3Pno06`EWeh8;>a> zj`#8iqyOv=boiJ!8W zhhVDVa5!VCs&T?p-%{eD&Am(o&$OB98TVm7mRqBKrlNOYs$WoHBg{|f3-CwIZ9zBs z6QYJG3mk5;qY5JhQ(j1j`98I|CIy8b#`;JJUJp9|2O(+rSmTxs{kn2ZtOg5ToAM#f;({fD^v zxS%3jFbo$|gbV(?oSw)hJ=wzrjV`b0qmlIffc~%$EE*AhfMU^*;Rk;{9DyH(=+T}e zW5OA2D$4}=@Mn8C171j`4Oax2v_aptEp6Zmq|H9V6(Wen^iKkq&yTplj2BVpyohWvxW;xsRGnxNb`sB+oF)801%tnD{jn#$7rXG4Zc=WRTzPFvvgdFo+a^ znCNGaZ*mFQWspJ?M+L-$bWgV&eQTRVzR!q^k>r4|h}UKjL`KRY*Cdjw=yZx1;Ofh= z9y!OAj>zP}o}2@>EYWf@nKJ$LOpTMe?ixPJ!Xle`r1PN5JgAum=b*lqcOs8V%zQF0 zlWbCel^%~26RFyxZ+B02vzO%u7cG+5Vfk?onMJbLL6^dpgvlYJ)t85%7q`(^upz0V z$GC=4%uaG~NG+o(J=k%d&rW%6O6<8pf|(@>N~j`|}-evVGMR zhq|Jv6dkGva zoE#h1t5gFgjZ2c_?j`AFnJ;3v0L8cD_9(s7d(z)9d;LzgLCOXL(YSx?_}wL>Nj0L@~8l3utX??4DvXt6iUfm4MeAmvnm9X`)nppM6c)mR66`T zNOiZ6w4Fl$dx%y6+NDX%*e(>4yE-{Uog7da2wkrz`ULIjvZjioa`zV3CcHGE|5i6# zNHw8(2`tK>1QJhi)a`b%MzC3KRMd zl>4oo1d;T8JcWdp@dKm84dHZIm-eO2O1P#@MFxaI`N*s6woBr!MsyR}>ljrrSkx&* zF6yj)4K>jdc3EPvMBXWu5J@824HI7~a@vUxpeS945zfU{?2Dh-Ltc z946y3b<6NLiLT2S+ZuWW^_A4`lngUfE~4O&({e)|aJ_v9+d-Ge;aY!h?CYcMo!w+P zeUglscieli9hrZk2w{LWLAlD>I;Wp0cvKd{OeSawz&N5QFymgpjTVRr=Tbiv*#(cg zec3cVb1RuRG8Lo2U?-)92WW>)z!mTh`QdM%>Y3N8xCD(kg~UW>zcJ~=b4m&CqL zRt_F6h7^51ESp+Jzb^2eFG` z8VyTy67pKVrw~aeMLir8mJA1dAQe3&6M-t_^dt%&O?q!>kTX^oLkBkD1#;BA2fm7%i7nGe(Jq z)@UnLcI*8xj||s2VIm{!wN#tR*KLx6JI!%+#CemeQO?QbaskSSg&c~CV4&n0(E`{w zm!5M4bKzVx7tiJA3UkG|(p-7Y`g%T7IYI>tJ+1>z^0SIbdd&TR{eLO%|91YV_Wu#! zhR>p?^pgmNUkBXqcY+^t^1sI#_=mA=HIx9`_NaYo=BB_lkII0p^w58C(T1{i ziw;m-j@X7p3z)eIv|3Kis-^@>Yt6x-VA)_m!LJUFMBMWZZhK^mJe{GMV>we+&2sh+xB%TI1k9jIgTrE#CwGfA zFgPUElqJxH;pSko(imy_jasAFC^gEBfGw8Ss5Y9-p(YC#&j8&7&*q`G2$aC(4q0#u zH$c6ynCd~jrJy(NsU?Nr3uM%pi|w{Y?`IF>jhzJ}6m06xJHWTLTdvuˈp`w;Dcw~u(su6h5*!rk&2tjk=>$W8Ef3H+7; zuluhn0TCS>9ikgH=Xzt*clUDXAT}b~ZV!@%)ez7^6;0Q2J!XkjZZLH-XA7tf zx#DmpSF*|+mX%a(g7teb;m{A|)DRo?z=3q`k;Ifz>(JAoBzk;-tttz!NE8&o`hthZeJ8F}{Mq1D7X|I+JG? zmnW*(92&|lvLBZxYS^8CUR<7N8A~QTxV%EYm*Dbb3nnVxMu{7|BFiS$*C=~mgUdsL zLkA)&yWuo+17zM;wt6P7efGYeP|mNbk9+#J{NGpZ*TUZ}dEY<$F9!T?9C|+Y8;3t) zhre~p|6)f!a=qOBmajlpDcWB^FZ-mZ8~Vop0rL`o9g}QD@y&cLV@=-(2$R! zH2XgLrL{L{bglUxO*$Lbz(CeTD$#?_N(Z~TE(_4y0OX{GG<$U79DFQJoIbJ z%$gEfjhQSuI<=nBsf}d#IigeRYu^ftwk->Iqf;9Po)r&jD~}#+VIZAPOAY4J zUnIKds(y`mPV8S$23KhPN;=(I_j%mvJrrOZhve4Xu~D(TD{rjZ3&uJT7bWRE`I1<7 z7L0XA!Fb+ZHc=j^80)XAswnbWzpAP-nOk2}19h=~S`A2LtUsfg=KIg8rup7rKE-=I zwJr~vukTYJ2l4&50&9rwDBBo%HZNQ;|nz zjCFXwv3^9&9&o%l#d`UUJH^`N+$Glj)cQ-xG1|eGQtL0YKR?m7-@N-C%X&*Cla}=tjrk#Ce%hGJ z33dN4hY1|UNsQ076CF(suvU3vljYHzF%Pb9;C}oKQ5b&WjjNbTAEx12oxdb_#H~x` zFt?5QYWPmv?|DOfKB}9B(aA-CS7Kad`z9g8hz%3WuN|#x*zww+hRWZ zEGsVig%7cevFxj`hy7~!E}VZ=ZHv#>)iN7%=aqAlmh*E;%)e&LpI7IuT6*^7O;J3) zxQKb-WrU@EufK-*6KWC5eTV-f>c{8bLx8vP3f1s_Qr$ z@ZI1AVt>Bl`yNMp2T@+a5_?4GgmqoK%r^T9o{WYtoW~} zdrw$}v5%Tqh1pY5Okw_2DW;H|k{k=KhEHP^e?UFztGM`MihB8_FQ`XOSRYU)-%=;< zS-5!Sr8C=S!-Wqi@58DTEL?ibTX^BI3yT-dY&`W~@IiI*sl}(xytw${nRsD+@zUax ziyP0apSg7A$uk>E8_z$zxN-jJGlL7m3rnr~!V8NpE#4O_w;l*i1y|2LH~Grhm(RX) z@rlLOnbzXu!oh{t&OSf+J?i8e=iWHGdG?{;;o#n2B_p2doiQ4n|kp*$kZx+ zs48;elzMyqkcw*D*>mWRaAT8$hhNBvR%3*YaFcJ8Dv4s8sek*!Zs);Px$RCaR@qO82BDjY&(w zPPS5$v_Ujf+C}~tqJDi{CTUy~2aHrZXX9x)s<&cxPi4B!EJv;_(rWFHSh~5VSv~dW zq>>7QyC$)86E_c8BT?lry5H3_2wnAqV*JJ`}Le3h<>|^EhTnJ zA#;?)^RR?y>q-zi8Z;Mr+LXCPW4udP`X6m@*=}`O&yh+X%GQpkw~O|+sE@ZvM%rqo zJ}#vZs}n}Hut1PQy*{lTHwA{`Fmvh%sJe2EGA@3So&@}pD*FMOKH-lx+f$=K^PQ?_ z6R`mc&>~?6c6egh#*_{xFwMkdgi~@N%@7Wkm}Web{f6t;0g*NQGKESG$``e6B7#id zuqH2F?Q%g6%2&>?3Nd7Kh>x2l*&{q)TP}&Qc1ShDYc{+A&7ZTpwscDA@aTeWX!b3v;J!<*_%d*0ZluUPL=Aj z6eLbF&xKN=HYM(r{r#V`;eGAixz(bTZ#vk6z4v{mg^31#+Q~bYW{T=7vbS-*sc&i+ zM(Szz&Q7Vh$&+5qSWos5;szx#1Ry}h9aK+wL|9gM+pr5!BvyxF!rCbw0MCp@fht|o z{YE;chKP%-p*uT~Os0Na13{BZN0yI#NST*yk|B0peB_T~8xe<`pvuS&Qt6Sl1t38{ zSSt9y5zx3=J0#7FOCFOdXb1mm@#_882V6)~w1p<3Su)Sk_uv-F3u|kSiXQydsZ2fpI-gS; zm#?6eyuO`1we)R44)N(!fqX$;>ft4(Uc^J~VbMWut(a0DZf!5~PQ&usghBKIAFhKs z!~%PGRFs8J@#4_i$35hqYCkMwYWwydh`RnH?0;e+UX>TFZumsXx4}d-F7O!0`qEX= zl@@qFTYG$~cwuc7?(681XD5Xzd?ErwvEE7p`_B%qfl}17qeOX8>oVH%D@&p;KDe_t zr;3v|2r+8%o2~7uo9oxZGg>CorA9lg(Maz}kez2<>%X)kxZ}Hi$T`qHJ11SjPnqlZh$Yj{sp-|Ip9PKq~?!{I*AO10|-TH^Yx-}Z*@^k8@xlb z$`ex^gvE_8Dg)P{R%rD0uIqh`gvk192V_hAda#S+G`@p(Usf3+CNKkDAVND~V~SU< zo?lzNAh?(Ht&6SAKPgsb>_#A(CG&#cpBPHh+-j{|1ewLULEhq(x1*R3fWzdrTD;h` z8E?JFt5Qqp1B2#0D803Dx%GAsIMX-8=A>w-j3zfm>h$K&kKyUhAw~4$QvgpkFx&W^ z{`u|Op?*Sx`Z4&o8vua(jFN8$`jbAq*sjqvmq!`=hsTUF=i5O74P1i^VnPSMHhitG zfSuaqrr4p}D?sNqHoGVxgU#xP5Ou!{z*E@WybAfJ2`G{~68Y=ifc@z}i$u9p{(3=1 zqEjb-ppsJuu1tBw@#WsdC-yILz7>f7Tgth`Symrb?uQhwnSDw*ivXDX>wH0vQ;-qh zU*O`3*L>R-dqSq3Ph~DZee=SZx&55vd2j9(Lv>7*-|SqQf(YU%FEx)k6^c7N{fXML z+$$Q~l;mBFRrx)(JX`k=SBTXxMP_lzCxwxF`5oMaSymAp>lEe0Q%){#6?v%dfgGAI ze?%o{F35wTlEPeudPCl@=xE7fqT-oCnk2nWMDr2#QL=penSxGYZ)C!}dW42WaT--> z(Y{7!I<(8rbo8U9^ng1k+TG=86nWxFJ?2izXF+rz!BS1Tv+{{Xo?Ct|+UHUEeWJ%H zA08iXf?YySwL-q4-i=}%sZjlLDppeDu`V|$$v-Xvo7kO(;;vBHDwVD!1IJO+Q3P%em4=}1RqogSaJs(ZS<4;kl@gDF@# zkAj_K7pmZJl_B-GUH)UH*U1Fw(XcYdZkS?WDb@}&A9d-eWhVq(?RH@vPuG`MVXqN% ziqTGlQ9U3p0R7O7+h@Q88r6Eyb6vgtyI7j2;rar3FvW=`t`sL4of;^dG*CqROJ6_} z*U&oV^{#j>5g1@DM&a|Q8xNegoharn-W?9iCo|fBEO_yZL%@18lk5j}HlNJK2jjy* zJb&VHCwkLFAJ>P&!=c_2`|d3PB=hwL9bgX!lLPUg@KDK1=92kZyw^Anj_cpI#TD*N z4#i{P7&@i#Xb|tm71Tm^kA9kK&p7WskJ|QQc!sqJRN3?TbKHCFH{(Y9Ogc~(VC5hy?Qqn@ zDZeY82q)+*pkU9&v-)RLG~1(oI~z_!vvtsMbK&@uW|A0>hoTAKYNPR>`@7U}KAiJJ zEi?!0@ZD*SXg>2Yo4^jeS`&2GVCwyhfZVcz4rrbvJJ3_mfp9oFPzU8GQBbCSCZ3E& z!@)*88qI}6jTmKiWQggD!Q@ChlAi$~<;44pR1oj{zHl-eu7heB#m|oRCAX-29Wd9e zqAx1~N7U}6kEsyFG{Q;XBcA@Uo!^~BFu~DaG!zXtqRHgw>D#0I`sZvI&izF?tRq<_ zVDv!$6Fb}+?`aeFSUeR@CCRw{U+rkh(BJ(HT$GK9ywoo_9_~qQSK*(4sT?&M?~xAIW^BA)*tpSV<1E77XgH0~&(}lY3@L@F-?H;ErVZDpOg$9M0CW|Oo(2Tt z!w(?YdN~>psQy_yoI)n1v^W+|GDP6$FW8I)B-)hw>J;LRN?Fxg!<-co3d!j+Z z<$_Gz3#WVf8+J4Smq+fuw*`0z@({yr!sTy%s52Mr0ozzl1`LM>ee=MC5$xLF^|E=u za5(e8srbN-lfYa!&m0NT?I6<;Rrq%d*YE%wj`nny!^H`Q|E4XThQmk0QQ^Ol08SVc z4%d71Irnjy1fDS*J_-+{lfdWUaESyCVH9SC<8~P?kr>gg)~K>CIN-qI~tffLK#Q50fhR&YeoZ@^Jss2H0;x#GxGgM?Xc&O zP$S=eB^?bZ-8!S;XFH?e%bn36)j+8sHB3xEzN7PC~H2`;rHc&H=yh!1W)m7q5j5tJB6F zecFAbFUQ<8DI8Kfh(It+@CHq;F^D2Rc_=P`hm435rMdF1ZAtP?&PD~^29U*e(H^0F zA5^c0e)%z;n|49I%9i6@f~;hv&ocvwz@Zpk?7^_Q>`ZtY`5GGJs}u2}WAX^G7@!h) zC*-Ru$g6ZKXC8UYqtcS&$E#?#Am;9gi{O|*W!d}}B#&0YWRaTk>T)777Ahx?k+Tu< zyhaQ*Eia%Gwt3!hFA)?Em}>cHk9BG%Z(#??6UmeA6(N$WO7gHQK+NP^6x*Z17uS)V z76c*i1yYs>LqTqqQfqNQkr3G*NGlVpQD1a^B%}hh_epI8}$P-@pr0sqV zfSaKwtdNHly%joqZ4B^|06JB9Tu@_u7+`)}juwW< zm7oU$OtO9qumBVV0}PyyK{919z|R`^F9wT{XJgl_0O%z*Ih}$&`Smiv2PeSgj67m@ z{=7X!@BM&EQNSz{4GMUfMUa!6&rrY=E-2sw0;Xf$bO5vq3YgVR9}3tJ6tFBCQxq`! zpbQ1PhyMCO^%)tbW+TI|5YTfeOrS!~wQ<1ho~zJv13jCSSeAVk#`X6qajeK9w}cA} zEuB~fzdFv?%Dap(DSF+!-z!S=haMaVF{OF3729CmtqN*VhNXG3729SF74-0o|VeOQfyWRWgp0o z9LBOj{vkp3$Ff4U_tHO~Ry?ZG?jN3BH}Xw5tKR9K&H(Q&P2y(9GadwKJQo<|yDF$0qxc8^7<^MZka;Kq;st%d{iZz-&XgOmE% zsLCp{#B`U!7$3zp{L0$yp18sS`Q7atLQbH0$f?BBHYslXBDUdK7Mo38l&C`msG;91 z)PxGPlnR22g$i|EemL!ZSUwQUCPs1?3o^J^kRgW!89Xe=;A26C01GmNSdf9`6@r2z z+4jJ7<2=CS{OQY4vAZH*tumQbnK=QX{HkJ}C>WwlLzEv!A?ogC^$Gy)a2PygUScc26A1L7WqRw9@K;;^+H{CJGI^Vb@zSJv=VcU{v?ZAAyd7VOg#A_WcsWjQ`9suAX7ah zQ#~b9Jtb2;B~v{mljH@NB)7LsCeD&oN~6EaB}kh_`=HTh)I}psyb~Zr(m$pYUWiIs zhb;}PD zm`I#|r08=_Cp3-4D2h=K!+_EA;^I6|)0NBh8FnV$vRKGhq?K;ZWq2)seQAL>EAU+y5^FU#~VK=x(H3#U_FEzePP-5E6e;D&*< zPqH7-rza&IcM}{&^uyYpOAB|GnYoqATQOAhz_)E0{c~;8q^D>a&r9vr=HNeTwQ13w zHmKjFw`q4@)8xk@(x%A|N%S?1^~~F9nq={|nl=ZiKChU=N$^668?sJ+!S?dhki4w` zcW01ySPq6Yyx4`j`~8r2;vDOPyz|oo){n3%ka8VpPGV?(xt*L~VS|AdWGx ztYeHb>lh=>I#v>+ECvrjSjTw&!8%rxpOG4<@td!t;j-+QYahv+fqjcOS%|Vhv2V@X ze$BoubLVRIQE>~0yU98+gJiwVk1D3Q0T&d!mt;YW?c-25WO9hsY~@66mF z{O-!!Va>ifa|bp1p0=CcY<${20j!cMGARH9`C^HDu|&RDB3~@J2?P0JiF~n)eBnVI zhCYv=n~NldR-gkf;Ac@$Pj&+*u+5^MUZ-c-BL_ae?Ex6d=ao(#I?wt1P^!g+uv`qg z3%P~TLV2OGP+h1k3@p?anhS#qLm-z%7DgAw7IrO+FMx@%ey;P{05_J5hkeeGuPWYw z8+Sio^Aoz?az5qwZ+Xw>{v~gg{9CUY|99`d>imC>eO?NPtb&^0;Ex|lL?PW$tkwiU z33`&>BM}58*YtFW=D7{|-4UyvLLmoIt06x$!o3}@1BkE~)7%}8iZSW)a_Te&GjiSr zT*`5sSg~KU*hk<$&hH{BiKZkPKW*3qO*c5e&wAKH#vEygdykk-vCM6yQ?0UGaB9Zf zRp9>7*;~TY>F+7({0Kh^#xnz>Rk4Og#0sOqpvj)R5ex*CTDexL4G&jqwc5z=(D3MR zJ;?ExfYAWwLK&&v{RfW9B2X(hx5r!D3Lzv25)dHb;1Wzc8 zyH9hZI~437F$1fbs)CEAzsWqc<);_;%|mh}69HJHSQT-}`vyQ$P>nY)9SD|!coJfg zNHW4Y?+Ij1;;G)}>3j9F`&6+5qouvL{6fp9TL({OZ^*Y*ox5+R@9?XMnVxR1hHmGF zZL{~w27to!+V%Khcwt z7mm~4Cydv69@O@)`$0o31$3XO&YSX5Ex^^4qWO~i+Ap4qPT|d-u zD0&R)v3zuIkxe*z)v#Pl6lQ^WhU+O&266YY<4xr0t+oeJ&8+Y%sp5*sxvebl-OCvf zVpaYc8z%EZX8brAHyCveo;Ytr1%rnUdBw=$_qlLUpHQ);jpF7B!NQwAj-tBzv`G|6 zb`=`Z+U_=FH-}9#ft(TPud=fjEP`3JY zPKS9TSWYH9Zs)iitCyqGMRtgF#j{(k`K3J51W4)rC0fl#By^P>n`wir>yY(uOo55K zWEpj$f=s=ns< ze8!EC*H-)9pxe53XOQ;qS**<`7#oZi9_UEBQ;&J48sR&=qHrei8kk;7ukBpd+r7@# zAhBVqKWMN1Qfts_ucr4?A?K*{JF2MFpi4q1={%;7&A3l}!21i{aK6ninokKG{(<^w?&;fs2c7O#dt-V$@mA|*dt)axYn_{&wY0T!?o6~!MYOiFGNSA00C{sUx6^NZ zAnkR-&7F-wXM5ugWze>SgT_Kt@8(w_DLqW1eUP4D7$scNlVwbmZ*`uAA;5d&(e;3x zjd_*T&(1A1PKCkjsqti%`*4wUP9JuK z%_?YsPZJp$LRp(2K4< zVI(#Bjrs}119@f&bBelM88Ac@MODnuN1-Y1*!Zv@8n9n1mQ|)HMu*a^%zDQg;iYUb zLx$v_De6YMf5YqFXfL;x&z_^mX^Iq`+2@WK5?Ed+<1Iy1Xrpk2>-i0g(`I|M*F9MA z{?;#Ym+SvI{ok0!?hRd+7IO zI_Ru6y4z`nYDQEmD;I%aeL&oWJMY7g7?D(ql*Y9?c9;Mkk%bTUl;Kz%*>zND?LvPT zdxEiFJu#{H52xJ@!Pr?_o?M;;KCAPN#`1nMNo1-|h~+r}I|I0Z`qQHKr>)B8KsQ5B zwMSV1J;w#WJZsAm1&nxgo7uwLapmj43cWx=!xytT(0Hy*3wYuph2UnG;vt2GCr<&~ zJ}+Eq-EIuNN9($I>jXfb`FkNVHgu2Owqlb2y+^sgYulj@7UlZXL~q!&MFFT8OIZ-X zM9OgyPNWC|AHYjC)Rxh=UvmG0zrwwb$%gZJX~M6{mo?y9 z6Gvg-CkVNs0Sz4jGvFq|z^AqW;Sp8?3M+=>V{jT&6jJEhmdKde=c_;^$_8k>qtPG?i zt}Fvm=!d`4-ri2P)|_kIUJvS;-CCPF18=pvHE4IX`q5h2AN0C+TIuHY;EuMTyS~vL z4AL#Hv$dAqZgtlk2&TP(2XbKroQ-tr`rwAQ-Q8|o1KIMocltMM%;}|@-J7Y~O92u6 z)bDq;u5YA+?w0D`YHtS=v@#LrX1WUO%v5~ekb^`O+#=uT1nv@lg}=t%!QaK7=8d~g zo1(0cM?7E`O~3^$y{SdRPM$YJ14?9>R{SfP8@`~98mtq;pEZG1 zGh04%J>a9FI-!zi{#g?7CX0EKH&m!y!y>+7E*dQ$5b#T7`|M(8I$-Bb*?@Vw-fQz^ z0w4wN(P&}hf_g+_4H{nP> ziXfG%AIrv48E({Z6(YOKP6VoK$1M05UM;A313qB**l5j!JdlAMQ4QrtOb{^#9Jm?Cl7gZ`^7DdpoJKAJTvPF)x3ZohDK%fY!HbI{0nb8#! zptX@~idZ&fLrv)*k}GID7LsTx?F8}?vSb&Q75i}sH~;}9lq1b;^=mxVzs-M>yPp)_ z=iYw-c3xI>^Qv-#FDMayN!^vv7d;Vw8I-!hW`G@3WNhImv9vgpL@gs5Dot^2SEba{ z6XTMZ;lri?^!c<42@6(J9>r@O!IT@2l0Y#K2ecuqa{wt!4Cbf&(RIKVIySA5$MQ0twI zjVjqtqi;*3BYTPpq&tHKg1z31HVMx+T!t2`ZI)e?*V7%vKo+0@39Xr$JD(a3XbMPx7=@A0nez%xKHR54ZGLx}pb zLXDPG5t>dMqc&|D12d8uW10#bjg49nO-&6BWuUCIw7Y&i4%kx|7~lj~z8#eiP?3#< zMaXKLjjm&d7P2A|cn=`CR8AlugpiLJ$U2w_@Fh&cQ*0<LC2K{vS8`=SX2{XV~kYx2O(Y3rsj7 z&{f6`ljq_wfwcyFVQkLDhC53eW?f^eQUM<05Wyp0=CIUaP$A$&hFUdRF$o1#%KNB> z$Po&n3i1ZDK4`n+VKN28nW_T6__PPu<%gya3N?*&AmTX+tFw4JBd6t@oRx>zBkV9x zodeDcLLie39df0BqS0p0aRZH_2QVf4@xy!gTO5Ps>oF2`kPqF=LSPP-Fck!<1R+O) zM^U6;SXf!Y(zAkHY19r8LmW&vh~kkl8mJJ`VfhO6%#C)hfUI{7;D@?C2Wzdhie%sm zSWwHlrP|jv(wvTY?SAGD)9wChr=!R>rmcfaQLco77O(>rR@z%__tP>3re2mZ25^%+ z&A_D1J#HnCB>=Fp9ky`=q(yiG2T^CursmXc6z43>qc&wUk7A0j&;fBoZ++O&Su7lF zGd+~eHG|$A_~l6a0g#Z^XoiGY43h&GA*$Pjx-pll#v%=(m| zSKwn7A!|>@EOd=~o`pjbliWz5W3+J=R0B>LW!ykrVD%(ws7C^q`#^(Zf!%*?n`T0S zB$vZ^1ZUCX%T-tv#u={)I1*~3j3#!Z07jq{R8`ziM*_!I zwKzhhWr2Hpq|w1&LPlrhX&hTGa-$V;Q?9aL@Qrw zCA3R|Bkx}c0F^!Rc7Sen(R?M2;RzZ{OO9686z$WPIR}KAQf3#`v%?J7=}73L2Jf!I zH)qrgCd9yHYiQ-e4XT%D*)L&%Hz(HP;Q)E()rGtQIche|U?B`^uAzBHW-v2t%nWWH z7|smZhY6cS74TD#9*KG(d_EI?-=r{^H-#{^6~tE{(!1vOPHAAt?t_^{znfcsL2RW!VHo9=o^#?5^;s4T3y0x0p zaYb(cU~IJqax1+h@zV5fbk+y0jdXpWcVK!CvPs@I9I=v#H)aHbV%deAF{hFkBL$ZM zHct!y?AmfU93vkaRMudNT!7gcBVN7?#nTv^?6O8NTTqWh+`p(NOi#>cz@Y(z!5-EU zt*rA{WI4_dyCEog29(MR#I(iZ1LS;?BRKWnYs=&H<78A)xgl|myj9quX*EBl6T&Sc zsS3(EV$ajUBuiDl2lgnfemI}&3+iq9jX*=A5~a?c723pzVJkQSoO>&%v4%==>F5 zQ_oLKL_{QdHnSN1v@}uyq@M6%IrAZ|KFK}(hy14dyW(TQ`yW|9a5;cRjd)e-EF#6zePF90E%wxnkE3Cj=xBQQJuSk++y zq9j(cxC>kJ>rkyO12o{MK90tq0c4q?_zK32w9H5Oge+AU*dl@|#rRk72bE&PO=ye~ zC5S;}{U-KDQ9`UfW%ROBaW5y|mxYmsBye``m3NOh_Yt zFW65?JiNB>~mWOU#T90CT}-J4o& z9cBS--I|P7R{%mh3Ji`Ec?iQ(NC1(D1Ei4!N#-DJ;~Yk1jn<*H<0b+O{_ZyrBKSu}{gLvU&HlO$IJKO0C?aj0po(Uv~a^F>E1W7oY0&+U#V`(V9*4_VK)gXzh^Xg|u(<^Qq#z7^H=T&Po zJ#s5y7({V6uQo=~&wEax^T*X!(eUDlUGVBIeDq(YH1yhDlqTlY=P9i~L`p-WU!^oG zmH@VL1r z61QyY9ELcXI^|YJEhge6V2#HjxcqTC;ngpK-Id)Y(moBO0>1jz^B6vPk2=cQK-cox zG-`t;gZ^kEFJO%r(1fGv^$~E7su~{I5XGAfG^&EVirQ=&@}okG*>9Kv;6Nw9gp8be zmrAGftu^=i{Li@eVfgAlC;jk?@(T`j5syV|np@gKj!B>oF4|=1vnOn86wsK#ia-$J zkIe~_SRuAu1$)X|V=+10To3$+V`TwZ>$5JH5jnlWD2l=)O2S%H3#X&$O2l!BfS(}4 z%?5Oa1wRG?m?R)U_=ZT5Or@eKw6z3 zF#&9rTb(`tqdQP*o%Qt^KAE5|jqtTWt4A~gZXCJUzAf>!!>C(GZg+1L=X{n+sQyd6 zL5V@#f{hS)fWwQRqlf}Nis-pBeh~$|0Fpci$AIUKpRzcEX&@uNJclk$^dGghh>s&8 zEGMGuyunU>J#MQ4kdkKQa=%b169mk@Pji66&@N&zC`&=Q*mX+Xv~j@C)>$##T%m&jxms_X5;Pq!?@lz}nDQ+fguQb*Cxm46R!jMb z3?vVdgsB(L!Bs~D(w+?u*7TG1?$cQW}iP1nkk_iE-C z4fr14pUuP3$UyJ7KI)Qvx7QbHk49qoR8c{GhafrDN3TOOr!Qhn?nG~Ei?eC2)`Wc2;G2(5-BQ|#Z2EdpX<8PPd3%%;gw& zA8TQPZ5)`x;^Wmx_A%KmBd<=fkx7x9og64vATPePE7-~>-Cb7O74`E*r&rxC@lSB? zZ@^am6>Q~);zJ4+7qOMvgRzxrX|=_$+PI530%eI1;#+DByq$uJGGX8k;~6!arAqPj4!mf|WyGL&Iq~=|v$mwHI8&){jC2 z2;UNu+|Yp{a{?rJY}J+gz5j0f`_z4EpBmRbave_i0+z@hm+PA<^=-*I zP#@OosI8)1#-wpr-sA;%j+Dt-;Sc2-JZ;s-$(J$pg4Qn)W=8*bKl60w z)YRY-&Tl`4am}Ba`Ke__D=A#)^prx_XK^Jf@} zz_M(^as$PGWd_RRYR5^PV-~Bm<@F02o3*X2#f?i#=WCb!+H!TPvUq;7b*r|#F*Q4P zesZz4yh*i3YMX^d9;wxp%hwz&L$Lg`hULqcrg=X&NTu9xalE|O-s&G5 zaBu62&e_SJ8-rx_P1C6@KeD+J>F`O9<;q#Kd@GqxnMvbW=>l=}0%0hT*V^G98i;OB z$C0cEJqWK>otmAUT*xKYCpi$^)%Wb|ig)FCt=<07f#@z@FF|=v^17s_Km|){mGiZ! zB^H(aaAoV^%=y~#a^*b9bo^Jj(nh>QjO)`LEgsHOo}U>o40mHw7UxxNPP3Kq`CV_R z(`nLt? z4Dr3$mfdD|DW_-1yWxnxdcysA^9^qFG4EHEVVjPjmko^ysE{YNZ2n9E6Uq((Wmy0X zFUT1Dmw6!Rn0%>NE9o1SGUlx}xZ`a(~Mai-ru085LQd{0$f8v*NmFG^f z&Ak8!V|#)7Jf$Wk)WoEkm{JqdYGOuB%&LhwH8HOy%4%XkO;psxlA2gn6IC@)Qxhv{ zazaf`s>vxeIjtsV)a0z1oKutYYO<^*7t~}$O)ja)Wi?qWDK$H-W@pswteTxuv-4`UtY#O~Y(>p3so7;UTUE0)HM^qb zCe+-dnwwH{(`s%;&CRO0IW;%0=E`brLCsav+>)AGR&!M~S5tE1V<`>j_Ma?g%`DHar|z$}_4wtIBh#Jg>@S zRbEi#iYhOu^0F#dRk^0hD{5gvEljF~DYY=I7G~7KtXh~;3-fBBtQHp3LPaessfA^= zP*n>xwXmWp6RI+)DpRU5ttvCBGOH?csxq%CWmQ>Fm5Qn?smijdR8^&>Dl2MfLM=_I zr75*Et(Iog(yUsVQ%mz|sjQY3)KWz)EvcntwNzD0HMO*&mM7Hmq*|U*%hPIkMlH{( zIk~KzGjm3uPMM{dQ%0$5go^DU{9*JSpWpl;1MUjr%f@3YEQoG6;?K$&($C?OZ=u zkt*XH#@D!zcw@v50Dq|n-ic~FxY5ahrGaLwn7i2 zB?4VDvjU^@8Qz=afSB3xCsTB-ACP_^DbEi){sh0q zt`L6&F?JvbBzS=#UqRVW>}H1NuwPomY*F?uk~T@Z80p0sOa{hD*FBN+I%vBNOUd-q zm}f<1eVIoVt3RzYY##}Xz&hoYie987+O*7drk!PlPuk<5-E+nb8{bVk>oG_3p;=^C zrO)tsmg&TPz2Fx{_gL`9DL=ViFZi2B8w-Af4`N7m9_msJY3WGGgzM3|^=s;0%Sl(H>%*g4qgom}_-UvnV%q?wC8Yo5>k4iCxlIJeY7d93zY?W46-SLQ@F@(D&IT<;uhfxMqnmcOR*vX_0C8%8#tl zt4#anOxJn4Iqdy``A*A!o^!(eu=796;C)^g{f75mmHk@A&VLK|*sXE5A$cXNa+W+z zcgSQT0s%xUZNuo5pkD%Cf?-*N`#tK70-KBx0b-p#1);;*FJ>$N z5-^CcHDeOC0JnU@%Xoy~lz=oI%GgI>4pPBT%$i~4A?~a`TN$0I6m$Z`SQ31r4mS~} z1-(p6mIuTrt!E5%+nL4!u^znbObX&!E0?w(u3g+-U9Nyg0&RHh0`M|R>zkYF&t?$? zN$e8qX=!t{damYZ;N&c?tuLPkKV7SB3w|n>#nlTNYplPEmlxMox3;s@%NHvbR+kr7 zR%>h3Jy%;3)Kv7fT1}UsPgM7lClbTj7$|4cv@VsDfX}#$wJRxIVqL5~yU5=n1CJY! zb?e59-QBEQtgc@u#FvXgnd?W-2@SqkT}EX_YPBHwTbH-CYZv0% z>yW$Yi;1NF$`-W9#oA_05{>cdvUI>{LRXY^e2pK==$b#%8MtOSP~{10xKCKad%_z2 z6Vt5IwR62!NbP2i&{N(u-oDb>{CJ?v; z1Z$I#x`9#bkVoeU1*~-lXp?EPjljr)XZ08#siSX<%~p2KYPsL8`Obwy-fPXToBk`U z0r#EORPfi9AAZ?V(LY!}Zf0NTsN9Re{R(VbybBO6+1O-$3oxO48aH8~S-ccr9bshT zbR6JEJfy%ke8R}c+5*%BZT5}44nYWsN3Jy6;%C6IGi-s8QW?Y1kPe^+1_0FKiRI#( zmcyUoPXk9sVBvkW<=W~apn3uevuuO4=hTw#x@AX7Lo^^$0yj$V@74sMHQ$j-GJzd5 zT5C;`TcCVtqxmFn8-TO5Qrq;Bx>0h~-L%FDM%4f@QFANB=7he*PaDjp5uLpY%y=+v zwpQ0KJ^@H*VuS3(NNljcEMGnq{Zop`Z56Hv%f@5@hGz1$)f1Y6;tMOA`eQTWs30{u zN(+ssAbMUHn#T?3E8gR=VpbS0WearbY9)eqNoVsc?Tn>B7eV;>i}Y$H_~|+xz!&8z zx83S}v1vbMb~^7j-)MUOMZL`O{|ZFlF?TBXV*tQkx+?lB_it79V;ML1iHx8Bvy5#- zW{=U$t`34{vzBEz$^_Yi#E3I&nr4``k+B`Q^xCdma#Zz;X~J@OVWT-r`i7osls>&oi61YOyM zn51Y*f(UDD#j8CE2D7zTNUqOb-G&o+d7~ESVz%>@^;{z=-4#GEb_}_5cgAAUN>NY| z-P$5}$RZK}K+H~5#s+h$U5UTg7uPS=QY2BG4%xf(o<=W!k7y%xP%l7ipOxIyQAEb;}T{PM!^?>_ofEB~&llLHErzv!WYn<3E&;%Heqc~8g?)WooSfTXxJP4oSO&GlX!(Po zq$w|9EcrH2&EP`0nWVT)S}?eH(i|Ko{sbMN83;EvUgpjc0?uY&lB5xUFHn_+0mAgb z-XdARWEl`iS0{nzG%o{XnOfWikH?mN<4j`E;HFUFKI;j{O%9<+oW@*zQJ%g?S~`e} z=&O)4Ezhf)4lHR#nxKv^H~X%~cHm}CE^mYS-2UwIEhn1^%KL?$?6@-PHX(+r+aJ2a z;g;K~3G8=c_S;TMwNTMbFFxzp=ND_=bE&cxHZrzkr?4t(=X@Zqruz1xXOo?yX*Y^snjAYARSzTMppR2((sSj@p zAnvVhEeeRXR=2h+DrV}#b|to2jp9rEC^~O#+d98mwa#x}4%XHy)r3%NL}hCcZlQT` zFW?_@Xm+59DO)jS9meyrxVBx#6`P1t>(bmz_oa<0&%C$?C*a8U=e_xsj9U$p}{K@HpB zAQ=IaPtmlqLTZK3K}ARgrxlb%hy)F?>A^7JY8MoB)u25_)FNsP+5<#E)(ZBZ-9-Z+ zYXxVJ`;XS|-#f}ipb07&fy@2wpdNt7{hr*vqkkD};X$^X7C=9{P~-*9$=1L-**a{u zhMs(|aIl>F)7%kzAkJm@&Qk!6@h|cNU#bk-cQ^;wKmAj!CAJkWj<|sxxWo4KNqdSt z#vQS5P8w1n04ZswJ@TEQdIzBrCVxyJKaHp1S$H18A-kZdS63#r;6hOAQcst9LyS=> z%M%mErSS|C4%riw5}}kZEtN@U>WAzTx)iV`$+T4718-x}O&=?wRyivvs z6TQ9uYC4APAqS)f7C8c+UHoyxJ%S?@ZM)0aC*y@p-NZj<1qpc!Iu3L4%~CO$AI+6F z#&U2@Sbag1Vd~t-ZA?~_ZH!2E>iEYhbqnFi_Q=j<2m`^`hV28aw?KqLhhld5^G&H0 zIE**SWde_D7(_u>H0=9=s1*5ORESDpNKkCe`gzEhZ0Mh9hfCU6k51?{r97uUhze#9 z%C~HvgJ$HGsPx45(5r1fa<%%rL}X~$FqD>d_otujl@zB>b47O!Pb!aYzmZz;lU~`- zE6p$)y7{n;Z^bb7&o|An-I!RjY_nPsD4uK|wr8CPai7V&shkDc*_B060KQMzb&inX zNrGwEAA$>-O|xV=;1|@(T4lct(V&vi?5Y5|8#0b#zi{Ph&}$$F0{w<_o*ikWDE;Om zznqB*z?$5!{hd++0Mjm*yMTen%@xXlJ>?w87DBgF=6OShmwRT^c`%T&XwbeRbj@6N zaz0Pme||K}$*3FSdp)ZO(%8O$@RVq+FFjc62PWUxPH z>d|HxZ8pg2(R#TeqkVR^7(Kndce3)g%oyU0NW|(PeBI7KD`7n@05HX*u26NY!$lQB{P=p~i zNIOU(VoHQUdYb~z5a67dEyp@5BwS$|e98^uY!7Evd*CsX|J+$Ar1PwfdY| z6FR29tg8o23_9Li&NoY1vSx|=wXx>mWD8kr<=CgR;!+th4;au`eaOztHccYStp%ZA zW8R!3z~DdqpL zeZu{O{U$T`DnjP3J9hMM&Nqn#X@9QoZEJt{(#L6inEpIrbkz9W_()UE~*K~~}+WFwR+w5ZDZaOzc| zP&BNTTwdhn5Pq6hCJbpoKlOhdfe@1f)S2+ol9Y~1Lh{lAi6j4wlR}iS>yt$Us*+Dp zNIV#BU@oUBYn6-3wS=~E@4vLMv39xfb@&g!)4Cg8Q>^UZEydN_W1>jJTIYo!y;*y9 zWwW}KO|x|?d9>0H6mQiQw{*IrntUi*$Pz2(YTUI1WwDj3=j&=wntV=ebKQc5%CBDB z(N2JBUn0c|bu^}A>}Rd7UQ9%C&K9KFwuJyLiu;b0E1JHgj4$L$mUd%vmU$9d@E$M1jewkM{WfCPw#C19~= zSOMj(8RfEsb(}9#crmax(q~c?=Sxlh2j+|M?c+z(@1!6lnB+TsBVDDlaF%g_Fb zSPjmbAh$`5G)z?)hC>Y z1#7Hq7W2id=M36tfO|GIXV*C3NCub^c7V>7!Ag{Z(Py~nga zW{x<2Z~mV0-e>=@<^Q06!u?(UbnwF&H+oIxv0V0-Iz;Qnf?hZv96TTvlS-3g8eXv@ zfq(QoAq zW6&x1p!sCI^ft@od80s3tYfLHd3^D3{#ItWqKz8 zdi(bgTBCivhz=dl7~AX382k1OxczpY-S6}{{cfMz@AY~8exKi;>C5y7eL;WN2Zjnz zgfXVMYa)zbHqES=V+L%|<8qh?&^X{*)BU_x?%5iFN{HgB^%Sgr$2E~Qq$t^kc2$4&T*09u_d zj6Dc|B!z-r)(f(6QP$0QK`xHxT+0gt6g*-jjwbbsl7_V4B_KoS{KRadCd?&a zK53Cdv{9?ArAikWomg65Uu)H|SSpd07Mk;<(vFNvlT+n#>BzIXKr&?5ai@}s_4gX3(o;Mk zQc7=4MfBKIG(FXnn41af9b8H#K^oAFlo1e%(6*)pHg&awmIgDeyhe(dqHNU>pB|TGy=hT&mtLS(1V9UhAd1bs-`Fa3WP9!1XCQl75HjT z0%HSfvM_A#4*(};-8@k zSsbDPG}h0WWBkQ1YT~S+M-&Oa))|Jkn9I5>a=4(j)!nP zsRL`5cDkuEx3i94j?y|=U8jdS^E>Ng%B;wEJb7IQtkx*+tP_+AX`Q030|?9gE=Dm8TK#+LwHND%fw=e2pE7KpWExP%dCe^G**1d#;U z3_)IE1qs^WR+y^9XRA6pK>=D@OC2f00xVa>yo-c`A@XQ&1uaekdu}ej_ia>G#oVOP>ei*DV*NhUa#lq!YjID{?nbs(V^K94 zQ(n?cc1xtum>6%?R#qQ{HL~;{i#IkH9)dK2AUeD|q^@w9N#=QP=;g@T&9ge@q08ru zK`}5kEJ%IZ5#c66r$B^HVo(%ewUo%yO2U$$qKMt(HuPgq4maJHtq4( zB)ZZ1ODEa4ZwR1OPdTLsGBD@De1H|J$ni^&g|(OkPXsG5&0*BY0s<5*MHDfPk@IqP zvA{6pd)+bi;(~%-A!bdnmSdJ{jz6)ED)3k2uZ5f1X99KXWGm>5jRsEq8_}pNk7JEt zz+*;U9Xk=>4IK9PH*8M^rajVAQ;U$3T|PpCgq-W#1y+y2)x1McgBAB}>ywU?q_YyUC| zl|~=;0^sbfb@Cv`f%iAUGiPdWr-? zXnh-l67;tcj7l(EA{dZhq>bQkdkdV6UUn=q)7Q`AnPI0S_76*VFwQzOqDv0<9b({y zIoAX9pOkwj|?3gIy`uE=+Nka(TUL``w#9vJbHBhp|JyF z6JtjX96WIBV6#2C);fYm%`_sA0be5)hX8n;Eg@fxzQkd3H58qa3Sb#>ufFh^kdTc* zE^*j=7y6$`h330h>Qt9aLN0OGe3vlE8x@i!=u>tN*>G&x9@4!cZiXF2XY&GeJ)O{5 zaebJ0w3t3I&q=+eElFC`PbBJBN($jbp+F8ZPZYI)6S`NG?lbC^Np;JVy5#})ehuVh z->Gjl?dh(wcwzP8;>P*JO1)!8_SWjTBwu!=uzKmjh0C$gOw-=vo|}|CNG?>$a?xfn zTJ0$!b*_d^m00nFBvvt3*pWXN$NrJQIx{cD1#1@W<(Bg z<;KmoeytzK_Ta#^DoELrsKaM7Iq~5qYG_xv-1FLh`s24uzV^>wv;W7IUvbaRzdipO z55MA`*KWVxdF<}vH+A&9{b_3t{@qPqdrtRPzH$7k!Dohk>jQzRJoDS_XIHmRn3O(V z{kg->yzhJ7@XYUf`yapYWn=f$zT^BSKlzgb@9qAjPgg$h7ys$r+otdQrwD6SQ`oeSmU`iUfAYMie(cu=Klr{teZ>B7${2XU5hWF5_xiuT|2qTq8HiU=%e`Oz)c1Tdc-33p z^{L5+Kl$flV@J>QeC4rc+tFTD@A%GtefimA@A!p(dh2U$y!Z4Q_I=084*kS` zdt*nw>z|kB|H%2q&9DEn54QcrpT6;~kG=Fo-+1>Y-}RZ#zWJ`x@A~DNj(niyPha?w zw{F~+f6WW_f8qQ8?x)>vyy?bY{^&chZ+PBoqPKj{dmsPP7k}^D{(0!jKlYYCdfrc0 zKJ>X`J>Tki_;K~s_Z1HP)kpsHPk*7i_2CbkdB$^(4!qX-=Y@w~|JQd~kN(^bZ~ol7 z-_`!<+|Zk?o(KNx;}1=|`p4h>ikJ1?`J$is$c-=UnpN++_?eeI^pUNvd*6BQQx495 z?(*GzwYOBB`=g)w;EMb7{>Q(#?>P_s$_fkPw{>48TtKNKUDtaS047S z3v(a+Sof>nJ@Jp-=e{r7_W1Yy#N97{^eeZ%tZ(Y;zti*T_iRmn{#PEdX7dle<^TES zAOA+rJ-@s5#b@{a)*HX{=(B$5wbKJ1bANHpKUnzQw-!G7#y5QHzx>+JE8qB$@BGO( zneY0Sp<8a~XnkeN)8Bg6ZyL}2#y|g$PhT2;?sn&W);B-?wAcN1*KgeQ_y^whsnh#@ zvA6%1Ui-E;o6q;!?iqbc^@G=6e)V(CzVGbKpI*M>N6!D%?a!-x@fn}^m0Nyr`2YL# z$N%#)Uh$S6{BK`cI&t#yWbe(@#CAM*deeaYtUezN=ffBU+(|LmV1`T5y1 zpMCt{^WVDgFQ@T*!03R=ti#s?u6lI&EmM{V{Jr%h;O?+3Jh`#$r088IauMPIYI`+O=*o0 z;cM5E;7QF%41}7KHcGYN?`{#k57IbPn_y9a4Ku1p!M}&VLh~@AxuG-(9hfOQnw!Z8 z(2(*nGA7IlxrrPEmuZf072`V?ldG*F>q$?WMb?wPHjJz%GuklHcZ;xm^q2}tm}8DD z-IoI;ysdg=gwb&lRz;>9X@(U8u^C7pLyI(fiX5kr1DY-ka0f%nYQ9rFnNz}IsweYG zxJmV7LG2@%b&DOz(7IKhlp`o)XhqfET)9OJkfdvuu0t72s~U`RyhhGYd`oPvQc}as z$u>38oNQO4&B+e6pJa<$97qN;tj6MU=7}fkRK~yADR~Exnv@>5Br6naQ(Q%@$O~5PL=#(OCf!P|t;)bzK)jJeWdg#c0lv zNTopV#UKrNsg6rj@({0z>I{@C<*|*Nj%#tWPyk{fr*j|>PH6-AhT)ojUk$@fuh)CHgLPy%#aULU9a}&E;)vNU*1AJQ`$T!)~oFJuB53i$$wGzOWXTD$D0BI6YcG z*rfLI?`O@0%30{Zs*#COe6(5kCD`|}EKz4F$~R-cL=gs0fa=+tFc(w~Yb^P=VfdIR zS72u>;3(G@^6iP7C5Nh6h<^4?J&_Pa`!+Z%ybw{(n3Gi&TWV)`VOVKF#-Sdo#gDQY zj5k^7`l~BK_S@6SneFrLe~$nA=79ST=8*tSYxJ19C;L^j^2NCfum=_@50mIXVwTLq z2tUDwpMV-6{v59FwLUtoEmb)|hSBsMzEY4L~9c==+;82d=*LbDJv zjjc;q2d-bZu(}P4ZhdY29KK*g?PG9_AcTv&)XXj5z9u#om!7twCpC?x7#drw5GZStnx*6a%f z7Lad6UCwC9Te#bQsd*y!ZPSf@-<--m&Xy(mEZi+tWu%2K;^5ye$;W#NeC+t#(lMj12A6a0{u_5|QY03KN_n`jVsJ3nJS z-TRvPUekYvb;A7$p!TGSe}CUB!Y|p4wFE9(K8Ze zmWKXl5s`EyfVqoGGcn;liS`YG2yQ|kp;M{7+EZ)G_t&=lRC8EI*a5b7Kzc{(0LMO| zu=L&X#AM3kv43*C?MB1w^^?!K1!v!?ywk?$$nu=ZnF+fV)mOVie$EsB;bQ( z{i0SX*4qN+Tg!_`1^yk?Mp)z)Gb781w{M-Oe-+7AfElK7LPHQ)k7Vk20drPBzcG;N z@ugt?3<3cf^2Kr&5YG;x!9Tl5tY8@OCS^0wJBzJRROw;fI0@X+>yK@y!M-2VsDxOr zqk|{-sR^4_TkEo@%DxyDV{)H0&Z=^TFB4X~ehMwA5QV_}aDzJz$Gr^WJ9fmjPwa2z z;jx?ou^qHKSwLDI0NS`qTSdfH6qRKUMC=9_ZH(Xcoj?qR6zC&`%6Ng>^f`h@6|BxJ z`v;LB{FwQ$_bu}+rvI}j^#3Lb{hz?!#-D*neaiZxnf;(+=YAc<{#H@+M#T`E+Cx#U zuG}wjJhb)`^?o~6?{{ML{$i}&@5YM#IV8B$%m!M`pAS(|e5Xo2*;vaT#TqhAywb@f zjP?7Av3|cN`u+L3et#~TjkWu=$-k)VCy_+Yzn%h?F~UZ&q4A&O0TI{~uBP_StF@d5 zDobm%o!hS#0LH!nnv6f|pX;9L{dRS8ePgGN{}nC(p1f|qvn``)LeF^3*ZMW>)qE-u zE=~ZB@@a`ueX&qqjmg&~I{tzkv4VK@pji4FkPc$ST>Z7lts-t~((%_ARYS)=Mz9jK z-Fg!{bam^EBs%``(E*YL=8IClbe%rep#IW|DE@+}Y3#32gzza*>d)(MO`1J!Abyke zkH1&G#fQ>QKmfho`)n@?{f>cXKHd8)0J34%z@!|sPBsqS9#Qu3U(V|u@fgDN_TCmK6JNnw1~cg zZ?zV2wKIVEL8qI`w&wD91}Wu=Zim~RZOOK|Nd6bXsnY>uP0Q_TAf>y@PlU zZHjN)P5HY24|@r@s_U}JS8ovfIyDLo0yi#6lu=c8o5n_+@L|l}0ME-OxDL z?XskCcVc!VRuOv{lgGt-c@ok1#4g7nSNYu1>^drtX6(AMz|Af`WKDKup!OnAEo`4v zga-nAeAqb;mmXGtPLlZg2;F0$4IY4#5D#il=*(8A3`Ef;m7yX5g81;zAg{E$0A(VL zSag&ccLdh4K*&>v79AXT&t%zpW@c==aF~;=@z2|#)p|M%?w~dX~f8Y3^xLww2ED9|007(Xi=3bPLe9_seGZQ$<`9U?fN|J30x}3Z#IF$G6HbGWg&Gh; zTxnNK023fHY$PEHdx6i0;USI;<1lN4Ikiy;fUP;^##bm1okrlVuc6D7P(I?t<-<5u zAvv1O**?1eW-tIR%#8Zsc$sbes0o_wgS2aQ56G~3bl5LyXXc?Iq4IlCOM=_kF(Q{p zI~`Fi9RPoYe|X93IDCl2{(bEvtgeHH468SfGuwkjp2$87<>WiXQk&6h?larlTRMxK zE#0j>Z9UoUd}pq^tEZ!<)Dw0WI-~BWv$Gf+QGih#gT#;%`bYpOe8d>A3c>^?C&lcU z+V_&cTgfIfp@9J8`1#N`WFV&5v@)O}IDwM5+y`>y$U-%7S1D~_1qV!~n^3pf2+#_l zmpE+X3dIC+C!ua$T4almor4^y2Zjt+2^Vgfx>9UM6beSDD+w})>Y7x;*Q<0Lr|_BH z=;Vs7<0p>0#TbtVj0ZqtG)@!JBhN!zJGF95y;-4G%aci6Jr+GM;_hkfK5DnKDi7*! z8@ek@u4gLSl`B!umI|({R`9NQ5uV?ocEg%*2D=i+RXYm>m^b08iR&sGU9ig4HmKM^ z$yJR}`*-gMyN0ua8TllfQ>9)`QsPwjlp%kZjc8|5;Z73QFO(NuHpODJkD$_M#599Cg)7e*Oe zH~2v?f=}g4h#oMCXlDY*@If4Vm&1H)3HVgQc-~kpQ_k4eJ158#246^{)W#o|3nm$b zG#L*dj2J+GnYR~uT2XK4&d`#v;&oCeXBn#)2eM@g5krP22KmW|GLgqa;ZAA99%jEs z8xSS;slW|UzB}L?6LWgNftV+LpA3pYds0PwW<^m10~$8~VMuEsPaOt&D4~68^6a7M z+|K>LdQlfsDS?EuU6R9x^#ILsMAwP|r^Q(i6en=!&2nyDl?LtJFpgX0JcAoR5|NP+ zpdxpWmn#5O^yKu`zJEA=cAAoxQTBMug6QSZP>6kDi_;DsAu5LUJk4omB03$~a~1+0 z+38hlI<5f}t@Xqugk4q-3CGE@?;pU-vDgiQBF;=&G**LUO0f^Akmw8wnWcq-MYB_N zV!)R-eswh6vN1*=^JeS2Kd0)=Xdq&4X#@y#*hDBm zlw%96XnO3pUe<=~6EiNro{TUjTt(w9A$}i_;BHy`_lTn3Qv?^gSAzTGecdm()3hXu z5_I4c)M)o$-^@$FEh1;UO{^^Ll-L~-yH#4bJxkCl0>$H^=sYk0>R=8I3=&TC!-P^j zrz9MomGHz2rB?n9p~tM&TTjTP?a*Pl%;at}>_RK1_=p2bZ8#m`dY}bvHP@N)0=Ucd zf(Cf@4MTCIo7(i%8M}%7$2okRY;?|%n`qQ`j^0S4zB@9=mgODn?qRBCa(5cuY4c9Z z@y|&Ae(O=`-)}o6{rf#fSz@+7le=3kHy#`3k~ziwesJ>k2_x(|i{WTEYEBX!x|wFf zW0O;aH{L>gu-IQ5D2^5ncMbP;SnXDuHBuZL=^q&w866oO8yVSmaPr9Xk%ftg>FMe2 ziLQyE`LW}Nu0MGE!14VzbWY4q96K^`WNxB7F*$u~`uIfe!SU(wBRvOa?wXpOI-)Hl zaS8AFS+FzYCB$81&L+;vbVA!aim7Ba$)OaXij-;j3cP(U%m}03)t98cBlrPrQB^LW zBnbTQ07WFJPmcbN!XDU+zLz#*?L8(#S`++&?m(0zt}!A(lmIIpcmcfbdbVVO5y8rj zTZ&t5Y89##?*Q=-MKv%UvCzfRCtCviQF(4@S?X~C^ym=eBOOez=lH=uWj4?{6ubT% zBOo6jBmU$l>%bz_7h_Qr|C?)NJ(_4wUW;o_+WS(h)o2fi2{5gTZB3D6*y&roBI z253?mm9ayP=Vb803I`(;mbhk@+Yn9i)VQ7kePxXbO3DMv94dGQQpPw!X%zHU7zR28 z7D%2j|6p;V&Z+tF^vNJR{LR(-7JO0n9Aj=W1{#)n@g4ZpqZ!_iEFk?j(huws4dSKO z;AHF|{EpgX>lAK4VstJOU&zeGv4=KSw`->>8~zzwnBR&2&)h9*>v%-Cw{mH#Mzsbb zqI7Rcc_np>aylR+a%AI@hBi&eDAdHJWaKcuy7vXEfenFQt!f7qleU>~aT(Lbt zD0el}y?3@Cn)b3dRfv1b#217U4IXJMp)8~C{XRnEbLuV|nUqa$dBKG)R7kr2h zym~_97tKnorM=iG2|rI?=-eMtdam0!JR^h_xIrM64 zi;ql->-I;c1lvvJwK3|7ad7h5VniE@)<3It=k*xTN6(j^SvgX zn@WNb4;ecU-m7`AkNMMk36))*mIWDdVO3}kIVIvd1}_!5uEUELDa`rNKauoZ>nDP{YyD!irK{J#9^{%o+-_X+hewWU{REl4*6*`l5^Me^ zrTTHu52x5hl{tj)P?fC(a1Ui(=iq>U2*f~5`uN}sV5L&AkL}rkb`{f4*|CLYE!+l! zG=PhX`6uM14G9+H$P3*t3^Q<3#g8Dqn;1yPVFm&Q zz+e=v2BCWc{}}y;@jw_>5Qd%}2s|$6+%VKW4`m<`hhn*UAfcOe!U(RjI31PZ z7hH)VYzh=k9S1X7IYO-5Qf`%(80B;Ms4M6?IniwT$4P5ZXHW<_hwU>#Au0x4Ve!;N zN!DLp1zx!%JdTE#wj;o`AnEk`9b}PHk@wzahK0H)wmm9f7udy0g{{EHWk&c=irSg( zFvuMX+JbhO?(cS<#s@}%#hh`TiHl1I|l5ZGPB)hCs<=0 zK?%2rybKrr0-3M_*Nc1r^3k>gk^bmjiJu+)$>9_uz=L9oA4Kxi1+ZKYF!I&4`eAFE zvl^C!XQ0n(mwmwd%7PPrQ%g<;&l zg4XxI^}=q{D)+Doc4ikOyI1>8Y_6@d_bDOmPnJ*|=56C1vu^-R3t3R2Yyraq#z+B< zn1wfYfxV|^#JL?WomY+G2`||!_m>AGM$JKCr5r<%aR}BL-#oA&g!U@4R&f=q$|D1~ zJLO~tTt?8A{*_1R1pyFm8x~LFZNuYfQ~4)3HaxM@*lA84+dT~2!p8%V$FUgM{<#0$ z-E{<d9qi607t%S9PWOJkliUr9bW?Gq5i6y~Ufb8cW70s7L%jiFVN zKG57gS_gcwjoBQ~Y4Q2TL>`nmRJf8dZqx4UXFb$B6F*)C&BJNKRg9=*7E9$rMVu?6 zS$LS!YUmyQW|2eT%8a^vv;|QQI)M7?=rn$RAmv`6!Up&18P@jiSy<^DbNvXdPhQ6% zM3q|!mQD;9?y|(HHxkrtBDo@Y=cLNR60Dvi`K(g}=c(fLO%Rl4%JATBE=?Hz_&7(h z__v=ffL7cvGXNa83*-KHc|0?2k4NLzkI#+YJwC%R0=LiK-hJo7~5 zk_p2kzW4=rBNEkx!YGkSI!F}WVUmdJY)G_jyf4nNTV?D8(*(CT-p+Y^Q;rScTQ50xkn z2n$q4lPftgD-?>LmC?&H(2yRl9)8ldj6dnmQQ);AW5vOC?8VbQ4Ra@w;I7(sMFfcX z)0*I2zMQbEJmGY~@yaCEr%u&*Ukzqg+kLfjm)2l`RGXXH=}|O%03O)ZrA^&f{VdA5 zcTs(;4|>@xIN#)0!Dd=~r{mEuaX_khQ%jdNFKPx=X_;qOypTLZaw1SZ=1nCu{PN|l zB)yrAwqDYE)e_h_JAIn^_%vO;P+8mB?c6lc79<;9*K!3XA^Ow+t*u`M8lh`b#*sE2JmlXBri#yM2i(=dCc)>@aQ(b?9aiYO``gUruEI$bE}*!4=WT^?Bo`$q_xvit#BOiR2z4G z6}Po}spv<{>BiuVrv?M2#G%b&Ol9j%jFiFcB%_i}99Ae60U)o+N z@4?ah{|_rNHv9Syb!&_|LfOx^>OGB(n|QA}>u2uwy(4~RUT?W@O~~%>bNxQb<5K-W zy$_H&h^CHFEl6N=D^;Fv?7Ve4eo;Gg%$5WUnHCJ%ee)(FTy?wGujMy z7U!79o#JifLUF2i6lRbZMumJD_XR%?rgZTZB5Od)Rwz@7Tu~%clMd&)e>oZF_YgZ0Pcm9%NFj-y{P zDw8iCCWmeBPvcHW+XL%-x1rm;}mA(VBc#}X3 zEMp)H#FI3Br3L7yaSOoX0C>D{_@~#n;$ufVV}@m=$RS9HSHaW>Ou( zUKiim+Jkon&wZr!p>n|J@DeSl9=PmWilI&c-6T~ovvNVuE|*fs8M^Xib4)G_O^@aDkmWk3g^I3-TQ#So}ITlOC2T0 zmLu|V9LDb3U8Sz}u1>@5XgBTls4Z$mnc6`}A}tPQy(ry&#s3+49B%lP0HL&4JS455wovLe)ZlqKSDaWoiP z6DF-+VN@tW?kfxnt&R1KU1y3mk0uo}H<=mAP#I&4gJ_}Xh04aiqtRi*S9QbZVB<_; zbeHv)M<7nBlQ#z5(3&y&*yH6u9li#`Iw7n@{Y%WLjFzj}C9)rxXX29*^r@oY15LJx z$7AQ#oQMNbQnm(g8YmnCePY0zakfOcpeI+{dAbBvBDjhiVVX86?hQ|s=3~qygU%n^ zXbfdE4q}&(Dyf+Li%lUefw3|Wv?i}%KOzmWxW`IJo=@O3T=3YYksY(#I16b|BQ`y- z#09Iu5)*ItoQ0sqLi6VOq_A_KAa@|HgAebZj?2)Cd9oBaO~)?TCpZcq{vqqY$KkL& z?6hW0d3f|g@pc$mXWGF#+%uiKEF=+UG&%*3fE`W8=75}hML(UwB+huw7?zu;eU_tg zmi~=0$HI2wwBI<}PA{G1yf|sTxjh{OZPo|G90}L6Z=mS`TW;aXooWTW>1lY4QYSlloqge!Y(vUT5_tkAgOY_=FR2&+>x?y*Q zzqYeIUXK)^=_%-0KDH7~-e$u}v_)fT0~~+>D?BrtE;DN^)&ph0aO&o1o7E#8jR)Ie zJd>&kpIxd7=5)GVl@%!NigA9ZQPL}`mKDpF_aS&VpAJT8udHv8tPn&fBi7U}oA#Fw z8UAj>7!iM?&Gz#MmG$*M^FRWdUxjt#h zV{R37cD1&V&es;sd-cHG__6bP>XsK%ZcHJa%Iad=QL&McKJsS#!h|Q8Pvc3M&pd2z zt}bsUI$_yu{Wejal3C0psibmR7>}gRthokX1}2PNJ!oI)hsf2_dXaz$Y;NJ8Sb*j= zetdA({5^qD#a<`?Xs$A!r_2vO8LbzKV`WEtyPxk#**fmdS)=H`*jW^@eRovh;uh{j zoD$TbwT1l$CoyaTV2lRm1hEmD#ePVSPX4+QUmuEHg3X(vW5q#C76k_vG`1B;Q0&pn zi&ivPWCJe31;BEDP~!>vI;25q3`r`P9Dz=wB1J6?;$EB+FR&~SY*$bRu?#81$bybS zc}OmsKklqFBB0}99FXQiya``J=R>^7Hm~ClZ?g01IE0_xG3&+`nf6D_cIQ*(Z<^lgt-n6Z9M>&25`?4R#y$q5bY zuWp@)XbXcLP7g=G2PMpM5Q$cN@%fCCXk0~B3&pZIva4yzQ1-wqv>5m;wb=?qho0px zA>)>sUa}o>dTd-N*(f#X2PLO7Y1(Jg!~h_rP1#MgObsEME?^{aL~40wRyl00ayge= z6Uo`NcUh5lxu|| zomTdc=g45}8vF4TD%0-DYHh8$wPQJhC+H5G@$O!IUyeKz%&L-)b81@pn3f@|F15 zV4E@KRS{1R5Tv-HVY{Fa>0YP`4!tJXIjV<@;!!%}~M&*9;aLw z=gy;|O?(MDOy;>%xb8fqcKyItOvbOPX z1<%%tOUS?%&rQY0v-znPacAk`)a*=ruti(7wiegH`}E=l$Cq8^49>Q5l?xa2oz1x( z=Ur?+JUIuDXye=n8CBwloKcA*s=hsQ>#(Z>Pj>4oAm=jdCU0te2_734uj*%LD$Z>@ za=s}UNmAsfGSCumf>~KiMkc=U)>roJm02G}8@I=gS6N!tlOzqXaIW=haM<#n5L>e>U*2`#ZnN!G>?t6>>%?6lU7MZDm6GpwWwTa;x{p;| zu0RK3XO(gYZygDEBGJA`%dmNgg7|1cO2^UT1kz>UscB;1D2BS5`GA+hr#&f~UaiPpk z$g+lP5h4&Pf=J;LO3uC#M>C+DKn>YK*(RxhQtMx}Hp!m4Rrdqd#UEGpi>w*veb&b; z?|I&v9RE$x3HS5S^})|%ZwNnw!TsN6f6B?eptX>DRcpaGftBwmyiB!oaEkERri>em zn~amjEu#I$KMv9&c!wLy7&WWT7$ihlxL4Y6!64AR3OE!oL+`L=t60 zd2O4GoSq5d02ab74X|$2N~}FNixWF+nm1Z9SSg?xJ%*8iTse$JcJ*RbUk>C%6vio9 zai!Bx4~B83APmWcoanS3EeWMKT@vTZ!xA@p4~oR<$N~7IYQN37Vb;xuD79|02Y>-1 z<3|W*uLGpmHy$SL6y`aA(RSJvj>~0e0j~wl{Ef3D-2N_`XYSs|;r0i|+F+|4>)mH~ zXNzryKiWh4!F5#dgPTSP!{fsvM$|c-H!U>nI|&aV_qU?Mo!C`J^EXNS`l)u)I?yrP z)z{bFZ+49gjSdbBwYImkxAk}R_Z{o#Kh$xgW2_^X>7Fai9_;9u>+J8ne&5{T4ormk z4`KzJ0DXw?w+$R7p&x7A?jUBI?duYG@v&9N{G-86CtxRELmz+#v*+zQj*1WM91- zt&S<0tHNyMOd?=2RwtGo45v7238D;L=2lC!i+xK;nMnUd=2k?o7gvZQGR- z+ZcE3%smrJ7;DvhntvZ>nvvOXL$ib7D>Nz2YJ><4`9l#=b9;RqXK*=*G*sHsT_*8Y zlWaC(n!I^|dv#BBxu;jHtyDPdEaBajQt7AAvgzULJA|Ok`_fLAi&s##sWGrEoZp=) zf0`GZa3Do~sx|vIc^01oVHPFQAb}eOzp3jQ8;kD?vi*twn)H!&s>r3}u+2eh)lAGD zMJ_9vaxhr+{B0$qaTqHy>Ruh`dTTFhTrE14ZBy&W)%VW)cCsQ8X7`?)R}0d*AmlZ2 zUODY;{n?!QvxDAl62sz5$4_VeKU6ntZ)vQ3)DNbuVLzDDpAVQDc7!GUB`?Gulv3o( z@bxP5AbQj~M7V)B{4T4@>{4BJmxHYUn{t9s*L{w656Ett@MPgJyedAVH<-m17O?QRMYA+$BO$C?{WJlg}VrBgsN@k z1=wREb+t#?)#Z3kad3mUbQDpd*1{4mA7i-3v7Li)2_0j4r|DeqY+S>NIO}L1Ol2<@ zw=L#MkQ5v_Zyv|?j2n#O2$`&6Kl)ew=wI1o6u#&$tC_$P)6~O%9t=>a>vB{j{Ns}y zV#G$5s+^Z2>u~6)HM{)!^o-a?v+8UI5?d;vgHy0a90b1FP71D@U_bn5M&BzsUf?W} z5AVQ~ncyHkJrnwHWLT|aVniifK>}N(=d8?vJYx%rMXKD2UXrkNPA{OO|by+vRl`A013lJk;{AR{C!1k(XNbx2!YRdH{y5P><9G}V0!qZ8A_IjDOfEYjA&g$U=to*sps~G}^zvYocr9Ri2t)&pImO;V z$S*S#v1pHcc-Qeze2mbk_lx-90a*}y9Ir7lPD&TXKGbR=1lTVm|@&@ zV2tRkI=uZrKl9OdE9=V~zlD=#)iZD7kPWqDPJmnPpPDAT^Cr+;>!I5v{>&RFwOi3g z^lW?h&Q3nnyJpLhxO;Yvi=&Gor~S@*@c!y_%-$;(UC|l2vr`hfe&h~vaEs;j9^=Iv z8X5zsxXw6cc#FMbM&?0^{LIsi5C+$uJY^c;Qx5GjP98pS^vL)PM~{vdu``{SIX+(s zCTFfYegn)$!0XI)*G-Ji;0EZr6Q_bh{RjIGkB^T}-Z(p7zGGp2>aMA~=jP|O#ZpV-xN*fal{6EAVNw+=3xte8${$FsG%P5pZcSRCQIV_**$yfR zUlD+dRx*zh^W>;5Xr)l_B2bW962Ss9D*^=>3UElb&;pZBsB}pQ?uzlHm{K{ol+XIP zoNhNYztXcNx1y}T&;+p+ul6|PAu8^!B~SFvVwXSK9S#pok75*ryj~D(ZK$OUzBHa` zF+U7S&zMb2`gOSQ5Byk~5xm3iN__LzL{q#gDX;qN&Ld zCfY~{Y}LLEOem7zOif&q)T)6b?$pt&MDM+{xeQKKt2%eAu5X>s>hK;xKtNYLsR-B5 zzLR4x5>7^#-a+{uIPcYQR&6c>Dq4umh47om`56cgwss+;O>Pp?C^iT!%WLafHLYw= z$9}|sXcvMicq(Jwfs*b-L$BqiMjTJ4Sm_lW!Yw>H?!Zz5!9^>}%jhNe5?y6FThd4@ zf)P^(Fx3btP3_U;wM$!=hGa;{7;DwGdaO~pc3GD*-?eIr&j!*mrbD>p@pN>x>YSIi zmDeFRCsXOi>D6j$9MOoRHf&<-wshJqK|HpNCFDm=FT)mj`Y0mb#_PTC?DTE0*IBl`>DLrL>IgJIHo zDn8Ogt_TA5>`NM;I#@ZLG**(v6ZnGzw~D55h;deDH6A0jTwA1AKbkaHIfqxEzltTr z?FA{!d@H_!$U9gkJ{ml09Q#E4cQ=lIx1s=MDl`|Jj$7E7+fQIxmv;e0; z&x-?F7CG{0V^&Nj3sDJN8KmccF~C4CmBS}}Kz51%^D3Eo>8M0I9~|VCqk1Zc+IBh^ zq9;kB^q<^%B@Yj6%J^*5CWDmbaamB=0~f_Em@U{Nfn9q>qkYh4vZKZhoFf-0&zk`D z;g1XZf!hU|2&_)5gZ_ZM!xRV~nuOuHT4^wqwr~SyrDZqNOUrJUl?RhI{=P2mPc#Jl zsx?zPmKuDYI!;}*L7v+%nNk;fcy;xVtAD9msuG=C&8qW^`70JIYWuT^9GNScH@EpUTf$qf{=@CFwjZP6BVu39vGwebK zL(uomo$fym3b5Q2w1@mHrkMihK8roWECAQV?8H*`9LY#*?-Df>eiAY+r%w{&p__;o zhu9w|WYDr@S3iH=R9m!SDAP}iHC0c5CJID&f8I>Y&@S(J|H)!^r;t1KAnNEp+BPjE z%%2`KuHG7t?CQb;D<T|!_d`I*wwa3LieS#aoeUd5vd=x3hQ~0rIOyV7(R=xU2Dd}0r z3>{7M9KN7P*|!D|KsQju>`eI;b_GiG4xJ}EEno^!5O*Tn84>g-3YIGDm@(W{e@Cge z$G}sQz~15Zuy(fBh>a2;&TV-mNI)ovIc9eCAuCvKey@c}Vc4}6>*GS4vj{1O5w>(g ztCtkD9mX2MkCX^e4?yN7B|%gfj1p!G;HG8!5El33(_ZnJS}H_aCjp5vA=)}`mLMm0 z`S3_U6+%mh=xccAw!oeTXvDAgVUPW#a;V+(im|WwsU=)Qtf_Ll$keW~da}teQoYbb1+X zu9`JsIF{5>_Ngj*hgtNN)`ibl%JwUv%Yl3y@F1GLnK4j%nIG|VlR{pEPJp++S^*v{u6%mHIk|PPydJf@}C7?3X?CEW=sE}bhi3gGM9WFQ2!5?KNmHAqE^CgQSnk- z!F#-YFb3lRGt44)0lEgVEyU9liP7JPR!ZXaM;8+g9P3J8xy1r+-qsfYf(PCrUZ=pD z#fJrgWS9Gy1hWl9|NZ-`LsFcV^c&S;5H%=;mI78HqTwd~E5Uz`s#QG71SxFY8X zT)A)tOUI;Oex-b>5UIi$qzx@l2QvLHJbUGOfCMC^kb+7c$)sYhESCN{}m9mA=~2=FUQny5(wmI@ z(NriHL5V!SiLPpb|5oj18zrUy->yM(`QGL|(>RB;m0XCzt%6E8FH-j29-1P<$JzbmmKECRPPhw_a$V>ZTKG>zMCzSAZFld%Trgro}|< zTa`0IGdIm*t@g?YT{x!AIBF-%-9Ui}!q(IsO~l9zW+9hUh!0S&qO;AVC3_QtQ!`1G z`8l0bl>yi2Qv3i^FN4V`-is(~WKH%3W94x7(XHQZ9tgPjzZ?Wya^lvDq7S-AdXl67 zSC1NlShhW_dg0(#gYmu3H}XLqfelS(Pk7*Ke)MPl?)d%S7ya_r!ru;(e@ZCqHvlz% z8&LC)5eoa`<$oMEey-kZ{z|>+P5Lw7D|dSbfYt#cK;97 z;W?PmB9}}n^5$Kdx;gg)q(a9ade+Zu$Ye#A@3Po*TR1-Kz{WdW)* zLWSzIC);D~(e`+Iq&?BzH8w>|(9BrcuGDvrF@!7A6+=Hp zIX*+a;w+-swhqc+?T)$;9BW~JQvt8C>1BId8JLoVgXV0rX(EKPK3HVy6r=rS4$Yz? z-K@c6VtK%rGx$Ym$gfsCWv~Rr`NHNctWB0|FM8?r`bK8ydw6Py-X$m`^BWJ%4H=to z<|Q7U7=~BcTN4RXM`5f?x zIi;AOq?G4bzJ15?EaUUNBe){>01o-7%AE)IZrFjUt=W-mMi@lJ*6)32^TqOCMz8(raOB}5TjjT-D@*ph>TZOzi zN)(jt&227>?0tV~EdQ7TFWM?)aQ+BNFS$At{R;I2UQU8!gbZd+h*Px;RcVAO6pQ?n zeRx7wgDUi=P!Qmi#6KA?$oC52DAMaHLJTSyURc3^DQ<`=(OV_hLRJdhldxJc250vs z2*mDaI6v-(KlGvdAdT`z8%N;`q6&sD z0nWmX$u)a=Ji&y(;!T#;UVKE18ayKOlu@)>cBIvS_1YMBh{sVWBWNY{qzqtgZ-@be z_W;-KTy(Ui3q0*1o0#@Mn<0ntc>I)RM`T%Q&}KH_3Y_9@hL$?55(sP++K8u-!74is zkN41dlv_McCIT2t+*={;R-4iv&?Tfy+v_NDO`5tqjS@+EN9 zIH-hXxr-Hgofy98y({-`gzsJXbciXLxcTpm?tPLhzFJ&g4&Hy>Oa&y@aIRIUEYD5?KK^liWK(*z)mK^##hj*Yk@h7mWJD{eQR&_`5X zXpWWnvgDFt+5}$rI!ZTN2Th1>MRGQ#&^n>Vm~h*Q*H!*!ZwgY)auB1m698RmGv_j) z3$zS;vMrUY+16!2tF7}osR|6MP`-vE_`QQ*rjoX1jt`Q*|(2X0Gem3|E}A3bRv!7vljzuIpRd>6YyeY_>w+W`D}c z9Zsm9xm>oR{m~$cmQ*fVQS%~H0~N>QrIu=c$orVRtfec~#kgtRbfitA^Bv@}wM~ly zEEWOuMV2!MitiVa<+9Doq@}5LvXEd2(U`7a6cv+I#GA8$eU!9SqsnbKJf`(i+m6`a z+rNtp7(DiW?{_sPO;3l5pQfwT<1hQ4EdNOGYku+vbhR(i)xHuXwVxq*;J=Fg&~N-G zYQ#UG%Z>P>fOu1&y$pzDoCe!;JD?i$h3z}uSij@xl$P>5*S)6lJlC1et*1^fNzt#i z?Rj=HjY#z5bT05!=YJ1!4Fv)Dvs2 zCz?$9m+6at@s9WCepb)=nF*MG6+NwykjN>EY@>0fX}i=SNxf#Fr1Lbe|Kj|m$6C(E2uv-6*;qM1hei%8> z{Dg5LgxA>^rKf6kC6a_?BhnHHfF(I1S(cm>A(Cb^S?P_opB{<06%bB~d9a3)vQ~v| z0?7rYE2I{DYRKN8R0LzzDiaHni>+3*4#3-XgOIU)!6~*weS~#!$xvVSqr}RViOKc} zsKOzMD*U-KGGbq5eh!79!8-k)46#hY8nl6v z{K{u`2nuoRkt4*aZVuB^R@^QM?&DqyGfzIy?(kc%eQ6?*?F5OaZU!l&H%v%1G4wMo zcYBJ=Le<~ZZ~KHrHswxr!H*rWvsn0JlW;EV=);kZluRj_Whj@=APBq4GwNtQbS42j%+3a>wH; zgg2zf!}=zK>NfTE3k54kvkG6q+JL@i5#ktZ(GcpWm0M3(uZ=EqwRtJxM8*I~yA}FF!bXVl^Qt(#Gcc?OV-!<+8;$ zP7H7M;U`HRJGeu$$#)l;&miQlHB0^f^oPIwjdP#B^Yfp1_K%Mpc<1dO{K22QaOtZD zzqs;^yDzODQS~**c+v6xHz}itQ6)Ze{GdqC$gibspFC6^=-wmw05Vip3UEJJ34 zZ>POI@BG$wr;A~G@KXlB1_5uAulz_^0PP&bEX%YZ^e(Z};VjXf zxT0l6#^Qu6OR4%0?+3IHi9=Us6 zCe-19P}epX z0e_7oH^ScIN6h|~&h%FcgR$O16&9|2jY3_qBY^kJFI47Q68wOdhun=3uwNB1e6?Us z6^~qdf8qTR0mv)hef3YBD7UPl4{4V%52ytc?2 zCF(Kq#&HWXLsDV*K$|k@`=u5+3xL0n`h=g6bd;ZaI$*V`DwMvcCSX)uH6Z$V|@x1#7I7!wy$en(EElB;YpPzd2Zy zPqSb|sFA!or}D^#!^sRF>8u9IWuEas6$HC-wkv1-s1VSvXV_S;_t!pz!C}at09`|e z4;)M;Ldwv3NMs$P*rUPMg^dtP03A=Fmr3xIOt05KR4HR&P>b=$Ku-~6p(=!gL)`*o*mG5l#Pv07}Keg&N&+@TeK~Sjf{F`#ZdPhhgbpw zLZ~U8@lK@w1m4F@|fh`y(UFsCM9s(M ze)@t3{VysO@}&rEVehNsf9zCPj8sU+G*uzr-9!a0zOhP5&=SRunzJo0H?O$Sfa$n1v+dDJAuy=m%)W}3SX@(8;Uj1{{Li8}} z+6>MHws)fda7-+S@Ivr|XAqN5Hc;7;7v%LE@4-K}nGL#||oGBM?H9?3WV3*=tlJS);LnC1okO2=j6) zXtm-7cW#tk#kPI>y_WlVW&>RR6~vsH+rD^!cnIAXl&-_>@u`i=mI&fqVMH7uK}}w=%kGk~YZP@OLqaPphDvT+clFu0 z{O;}BH=BbkQGE;!%HZDVAJi6F!e!zruWyhW!k!4S!j)AR0S6oXQwrtKkqQ?aJ6gEl z80kbGE_3yG;gr?m{ZpVM%BeTqr(|QZtr`SoFjQ|D zwHp#~vpe3to5!^BF8IiG_v0v6+HT_(ui3daiaUVO+~0`qZEUP>7s^_`cZ1JNtZlA> zZKu^YhQ32fDqey8Wg_E;DpbPmnF-Gi&lo$8ZFVFVeRn!-Tes=<#4j3e_16eyf!O@{ zpS!Ji*8T3hG=(t;_9o%1)T!tso$XL~P;4>bVdT;V+%dX(7lTtboh_FUEG;_#%Ao`) zPGSF^zND&vDUuZImX?mT&IBiLIRz{ts<*v#QJ0kQ0%r3NY<0|LFj67bFr(F=!<&qE zqccETC8#-99@zQHHgK99u4FLx4qbz#-|Aelq|=9KTIKRED1gc1Q_}(V5~Y^-_Em`&z9OJ(G~1 z@nwWKyRlx3{yIed*5c^ZVnNiB1dDj1Iaf@A@CJy~Pddh7*sju`jdIOwyf>X@RtA%t z?s^u#AyH_i4IIX!3OMyyBj;+x_+Q&*-)qYQ%{&jM)h-i*1(Qs*OVrFF_mGslyix{C zRK+|(;njeG7o#!6W2A_b9G|s-ask}pOZAT%#=E>mKE`IWHbSz8c}^;6U0TiKx~5gW zFmisx#TbJ};faq3W2}q(;|me9Y+8EG-ozso^=Ji21SK7@1<;ks?Jvk$<~C@QgXjo4B$dG~2%1qa6~8T%{LS;zU>(xxRQu(d&SmAvk|)^&HxNP`!^8VYE*;9~6O=~6$-m`C z?*s4s8UKC1{F}kg`WfTe7>?m;?cd_9`uEX43LC$NclN(P+%$%KJ5EF4KTPa2esH=P zc2I!Ow%U?|c(JmiV%1?8m86R`O&5bZo%IG84-qE8IDz_NS%Nc8>myh{5vRsXQ;SG! zfQlB6cl1-1Ul{F5KO~z?Mf`$)+LXE0_yk)cEw2$b(iVbNOZi(zTdh{RRc(~XJBifB zyG2-%ItMMYZ6HjctJ|uHRhdN|lS{V7*w?pz$%rNU06(VSVF1gaU2a7LEw+bCaDj1h zzG?ojd_#enLh_pN%e-2NEurox$X{2s)ASO#m9c7VNj+dlDwlGgFHscy!`NGMpi_=) z%2d0`Jt8`9Zrs0pE0S?*$iijrt%@z>CnzX;d5hiQcUv^E`d}nc z$cPiyY}E=#FeQ#NK(p|wbviw@H?YIb*@#|&kiSR6m%&@U5vHlW+ek_rzE% za0m9O7$JaN*q@f2eJjIz!T@I%?qDF>Y75JPlqx_Xi*+`9v7lkvxHiMnAu|{>%ff?LgOb7!PiB8(c5$XRvLD+>9f98E*&i;wZ(m|2 zlI05J6$q|Sq53jkS}2SRP*kmlKk$=L^v~n<_;aP7DwY3T{TGwu|BRg}{hRT>S^d|Y zw`xB`w$&e<`0-lf=jS`k|2*IEu99i>vVWZ*fIZ$J?{V)%Z{54*eUJAEu$@wEVWVZ~ z@~t{$7c&ie5Ck-KXc`Tmdk@L7s?miphEC2*qY1~4Eki5(vL3U#S9RG|O;D<*Mp^Zo zhC1(UYclKDzCG_dN?KlF{6Q0C{PDc=W@P<1Ap+!M#lVBCR96$b0Dl76SDQM2ujOE! zwH&N-BZ6Gdk7^0~oB&MY7+KoS zk;x?;e{<;o%a@l9vYcAd)!(&rh~?AaW4yRs=Z~`7f0Fz7OD7>C@Q+;7a_u~K^UvAx zh43WnAM3IVZhU;d7p{hDcq0~Hr1s?zvaG)O)B&%yaA78(!5j`(^s{>5Y5kmh`wjh^eoHQu z+NHNY#!r3W*%^4d4u@yO-t{tt*PlH549iR3$@)jTmySFaJbV0w;B@!J`tbwDmTTQ( z2aYd4*$-80?g<&)jjm)=@C{Lf;B9xyA!?NK2R*0@W>RhEj zrDcR<>FNpz1gF@Gg80joPy>Zv;BAhGtib|vO70vvtSCuBfbD>?!hTg3qi5j$`e<%1 zN6U8DUF_Cn^J&3BIrNoccWY5nicOGAJF4r^gwj6z$FE##aYf{krP;C*q@wa zZ8$$|{QB)rY}~5Z(lM?z3+ousM~a({Yi)b>&4>8Z;P}pH)k;^ae&Lmj;u~w&-P@SO zl|I8b!IabW+UE7q{Z{)QbH-W~!q zf_8)$?dtvb8k`t69%9z082_8-JzO>|%M_e_2iV}sN}i>#ERk)9kuCcKQ(LwNI82Oh zTj^0VZ}L#K*8!EBBG2pzVN79_n<=(UQ3v*B_@x!x-?$2kM;?k`5F3Y~fn?b-xeljl zbswn6Nw-aaLIscZ=ABLC{Y+dRDbUYl&xx=4z1tXE?a>FVtyXvyt-`u$zcX=N#!=xa zmuHE^-dn#Xt8d0flZ#b!fS)lp#P*$O6RWxC1B9BQOci4n$vC^ zv~@@*Feb9|9ozW^!)Z2vGT}X&lqbVC88+W~K z*p0eUx(>Gpx_h1|5dV}+39J*6Ky-#oVQ5{FouMGYu$aIhfP4bg1Y(po-)>41i{-&r z)JHlmdVyY&DjCOL0M7H2qEB zVWoa~Dbu*l5MVA+4=k1emQqn7MCnWQN^hiIg$e-ifnb=VRy~C=gh9sA(uC3JvfC=;G*A}9) z94MV#t5=$FtcW7#uaqVxeO^KUd|2_Q)Wl8_U?AU1i`X zUBw=RO{k0V8GLVG1Q=)_*I32kedX4jNHIXy%Ii zK0{~G6%S$JA`p^jb}OsuYEyAp1`UW!o$Q0d8!{X56oRwxrXIt9Xve9rqLdEXI~Li) zcbIWuryCxw^EEK3#?7erxY0i>W1ZnE`n@jQRh=-TzljW@*o!$K;3HcvmJa+54*HXk z8+BI2%hHyama`&wE%xYvunAcZm!pYLf3(fcSTL)@vLA7MFqdyAqsF{8!g3`&5iGdX zgjMOw!mhEp&#l&2m3D2VkX>mhUng6VG;P~o84ILu+j?~@kiKo}Fi4_xdxyY7S%<;E z!tXMp;t>Eb%DT~8!FS*Xhp`gC`gRJKvro2IoaunS@a@9PfMSD@fdM?Q;XCe@y5(-t zt#qqh@7JT;N`oN@2u(YjJQ_duWclycZ`4+}U6b%uHs{V2T>ooUG6;0^Btf+o}W5 z*=lH$tF1;hcPcfqV&(ZGy@O^-g=FJ(%vQ)zAhVmOKcL|7{#(B#ffGR_X@}Fj=+z*8 zeYO1e{NMGHuhW>m!1(ghVXgiP;b!AYAV5ZFO!7ari_J+y9W*g+V~xn#Pde0oK-4!} zBLeD<`V8d}EQXB3qN*3YW4~F2Epb+cVT_W@bLbb3tBm$WmBCbv4z9NK8ko>q*9%5- znv>Eox#nc8Awey0c}KRiAUis<-OinxRskoKw;u#Nzz@p8ckB8<%)9?-iRbsxY{s{? zC%f;&!)?m;=M7|sUtaMe{|fDD0DuN$L<_6Onax#Hw%auR^?Kuy!OtP?e)4GRwdh?-?{g` z0Dxq-q|)jSJrd`hd-6H=#NY4x`yIo0=+6p1>VBv1M}bSIP#K=-Rjj`4_F3CGe=zeA z^{1xyr`8GgyY^=PpV+zZpWAnH|EXga{=jjKlJZqv6$~ph4BIr6kuj8M867|PAr%2OFXFw7wDs?Z9(tYL?4s9XA0u{4z!n*Qjrv2O2R_A}P{)v_O`OIJV-gio`yB{t8vGV_A<@NA)tG`jm{cj^% z3O_Mw8_%eN>UlFVOXh_6ta;hIYF;yZ21O=A_vdNyrw0iG#LbYYWrXx@TN%_kV>mh6 za6Qw=1fiM1H5)R3_R9b|Bb8?)8pdfCqnhyyqbuth#w!6yzF^_jm&+T*sRG8yBF3Z` zuSR&}OW3-_xKyqf#<6OLVQlWic)H5#Q)28acNxZZF@`&Nz0gg{dwPg*zL$J2_F~)H zhq0}nAdhbvFpR^J)DbZT%7cdS+z__M#MrfoAS0Wxjf$}~PvDMCc|(;??4p`O$Mcw5 zPCu?Jwc|t=_Hk_<$d6$^_c*4x&#M?#@krjx*j8o#0eN&D?ot_Rzp>r0ckih%Bb=Yu zhWYYNvA>qb?(}ZS8_w9S4&^!fjV(r|cuW$=l+T=%N9UO?dF(evd3aY(<&~G+w7X(t zpWJ=YI6ktga%k(pt%uctt$X)PY~5Gev+wB0w#s;AM`dT_acg^JYvr+#V_@%CfXYp!#%Cao_S{y(Jwg^XH)6zlBG$li9k^&D&UgF6^Pl#eN z4`z8-N|Bf(n8V-FHpyS!Op+xa|0NG5NJau3$&x4YDSwuV-?baotdUj9^Ht!gtU-OG z{9?(F6q&|vOtOqcsjfC+s}zYmbqH;E%*vimsH~NQFg?e!DOQFQGg*}cwp^;lFT?z= zV`S3=J%=I^7%xmBcxiJcB^5`T+DOyr5|c3hU2V6>JZmXOuLrRci-Xi(qDinU5he1; z>6@98)AQ2{Gxg}~>cY*r>+|)?E7P~@vzJ%rs_R@%Pp?dOZRmQTexH@cedeyLD%EEf z7w4}m-mWj4d3)y8^uo>hCFX7G^3`+MM&g0k<`!;BQ0K}t`L-`x)#;g)xqC9*GwDmf zsk?+MCaz>MRlmD9bIY2Zowcr0i|cVM-0O==ON(#$@nh=F>dLN}>G_$}c}eL;QbV0& zeRfJ#mbd0sZp|)Dzcn>|XK{65rEBI^edhL5k{V0Rz3HX7>4lYLPu7^}xrOCTSXdxd zrdAd=O0aXbzG2Br4fjUb-P!tleWhNQr6$XBD^pALx2Bh7mxI~WJ9q9+v0%^7I=Yhi zdeV$jtlIVEkd=LDIu5XG-$CH|}ubb0Yk-F{o?V*i@RgF8rnqT^pTzWJB*|y&FsGwA`~) zpRKdm+MwS=OMK<}@``K(<3ZGsx}?!#=P>EZdeFM@M$VQG>MY-#pIhnH=0-UbW1r?X z?a_HFq8V~)k5qjv4bB*Te)Fl6BYl=V9fIitaet97?-Rk^n!fc8i-f*nswAVWDR;5w#v_0 zk6ET^6XBSbeabo!FWsgUJG^YM1IBtK@o-GluVm9bT=EU}$;vmPBQ;fP?xbsG+9XEM zbZxjbEP)a2FU7m|9lP?XX0cN@#-r+>on@bHsK8R0V9e*tnEXcUiqn&Ll*R%pIzsrG z9hiYCnW62=4n1I3ZxoCw`*zs|k6*~EqN@02bYjHr3{1r?Ka&K_+cm2*4NOXSxuL`M z-=Qe3Z}_fn1w@R(kUc~`Q?oikk4(%;;C1*$!L0IPRRT+1EM6$PeTrK7&KXLXbAn9G zQGufm5PL5Ls%(Wm2Uj~FamqbphfYbVW2%ff7T8fu3LzQxOG^~&Y{{v)p~aEK_pKtO z!gMQ<9eQC#TYbONw+e1GunLk{Cd}s8=NH&&)g)_O)_j(7x0KeG_OYr#mP5B;9Nd#P zd@C9Wvw`VnqurqsWPPWTda8-QndtJZiJk*A25;U(SD@cAdA=)AY#J!j5!j>l4rf=U zIu^J=#&l@(L(fxJlTv1>twX4|wmGNVsI6~~M<@L37Q4q8OdKm?ffF}DmImMumiKB_ zG)h&CqHYRr&^}Jt#m@IU;#19%UCTPpC62U}?c1~vrSffQ7Mo`A^e>P5F-Y z(kZ+O-Is6RIUxm*4`Ar7Lf-TzI*FAxIS5jMfITEltGm`eh;dRL7_iw<4u2CGa&PUfiww8UWRF$nNf+0nvY$gQ4(lLjs%RV96gQ4NMM{n zMrX`Blx&1Ak@3|QyA)8z&d_!$jcn=DGW-}KbRk!_f?La&IcX!RLS*&n7lIDD^8cZM|^=oNb3c(JMoH%x9iX}Did-*ukoo0xvE*9N%J-*@@&N{rgpDl zMyWktF-xhvP_xW(`l2|dr}nUxGpp%Ku9i1D(wAbz>=b*yFO5{JjM^C|Mo{ zG_=Uk8cioava^}z$SY%Pu{)f;qzvUTUx~|*L!0SRl_g=OL*1>J@JPBqdu2+d*`sVD zpA8tZjGFF%fj3zzn7z0<^n~%eiu&xNjdQdyUEI`NTv9NlEB8^rE>8K9wv3x0JEG~y zLuOWJDGrk;^QurF0@Gx1FuYyNo+=u)88Uo$X@;DyYDI>_ZmMV$!&+3Kbu8c9VsGP} z`K?y6N1RP0><3n@T%iv)22N*aIn+o}+C=L5%R?p{NbP3D9F!g-R3P6RN}Z#MxjD6$ zD&{bKx+`MzRdlgpYEWF1K#l0Kk4TVV$`dua_K=;`L2)g$xM~s-DI3tel-9J*qy{wv%5V`r!H z8TaSx%rEyRgZdBy=iut z-R2haX>-YZpZR|COUhU@zN(Cfm8Zy`Oj9z{HL=bzRG2X=M<`u3HV0~#tEpX{oiVad zx`Lc(goTWegDRGDc|s-2K{3mh*s2{D%GWVTX_HXC%3@T7{51t>#ON3D*K0!ldOd^N zOb*-iJhq!+OiSP!Vhj~9UMylvMC`8)MQp8>BX(7_s0txWj6D(5u$RO*BCpShaZQYG zC^27&It}AxF^-DyO(o{(2s+nUF`f|Pf*9wbZo_y|j5A_f6yuqw2Z~(OYZ&*%xGl!p zV*I2M^Oh9xlGrYbu~UpbF|PLY8^+ye6O^|BioJhOCck|{VxHJcoKLA?%ui_ZliK{E zHb1S+4~pk!we`bd{hYRbM4MmG=Et=8c{PIXN45DG?eZZB{*JbOT${tv;y@J@rJPff9dA_qMTiZ-M#1(i<)zVbr&91s;F zp@?~rwimR$pb3f+BKG1{TgpNijH8N0LP6T&C%hBkk%(JfxxNW$ETiO=hrB`H6Y?Wu zK(7cnPptfhNS1*>CcKoyrr#y)n65}%$yllqCsVS8K#6CElh$76u-@=I+&s@W^+Ow8 zw!Hxru8`+Ep>V=AQl6_Qy>5prWhe=;(2HmVs^M!w9)wlgb=yfH2wM_95tKu(U+D|O_&{`!Lt|Uw+-J-OY z(p)M^pzFXP&?`Bu^u^8aq=at95&g>I*|oIBGxzEXE9*&+kD@6KT+*}((%d-32h$wO zmt%6lrI@0j^LH<=Kwi1oBt%xPE+kZi)?n7vrTOf&lqnW9Y)>ULuZ=lsUP>0+n6lY3 zJ^RKglod^D&;*&A(?TYjo2k1o5yP2Yo|&6#C3afZXOEsboLTWT*+f^fX9Ma}LdgrqF3k%ua&{G>)C|SW@siLnp;dLZz3FeF$vhjHLH3-YbhU5W!n>EVcyDJ-NHZLCOciJ1IqLR*6q*O087p zVmqgJZZ;u12Q8KQOAwY*>hYrq!-X}?RBR9*lTouVz4%=bgag`H81yn+${Vz~B)z_& z*Ru296Ir=0B`fnO(kXFeDjC$hsV9Wjwf(l*>euYJscVA}t3Q1%p2U(CO32jxI#uuq z0+pz*O~13PByTtG=YE-47sMYf^PR$#y*@yH^)3W>2mCjCt+X=%Q^z z?}GTn1<;%ycUdoILJQXf@T6NmfG++(oW){1!2}7#oIL8K7JE{SjcDivW~rn|nvXTO zdN*#35G74&V|m~HRAMzdmYr98^Qdor zlf%)=zFG6U&7QZ%%fIC14}0Eoo_EdjzQOtC9ESwod{Ucdw0TjRr!YP5tTvy}<^^qz z0r)WeLmq?z>lrWDX)SuCKI^=7(wemJGfn4;7yP(#7MxM5LogBP>l@Ywtmmy+csw>v zZ&-J%?G_9R?AxssOV~VHPX|Vr4^#;0FzgSka8qE11A!9`25vYMWWvn=tA>MYSP1;E z7zAMygkcHBP?^69e^vfEYAWpHuZzDP{(6I4*car3?w~)|6buA|!BDU{7!C?SF^Gav zP!2kRN>B|tgRY<_=neW(KG6sB*~y)mXw*KM_hU;fAHC8y<(=5FJMYGpo%u{` z*_QXTB`i9RWgle(Mf-$vIw*ih8T0d*;;4Pjc}iZrF+Z0n4cnKTYw}uv2n!qriX{X$ zCweA&K5(^}iKYUNzpQ@qwHatL6w@z6FGNNd<)i2Vwa(EVh3IL`wsL@*6@;#LT{Djw zudkanpTx;Q>4T}&WI@3dscP2Cz9TRrgcsJN7>>Z=5Q2NcPP4|NR~wYo4$Wwi(mQa0 z?g-bbXhd~jn`9ro!JAX@ZDANVn!mLl%A#A$RHU+`6WArQW(BV1svPNrl)M<)n0Fh= zn$a{2EXY9nrET(7Gz-fHXq}m%9*_4Cc2)?&QdbDr^O`f~RN%~+LSAhg^<1wX5a{#v>04Mm?65U0yi$Ta4yoOBJ68{@K0Wq{!y}O5CEWj^A-Di zQ+ko(K%AFeL=MZs4w42IvaOKSGkFO6^aWMbOdSVe`FK=JT$e3VBUjSl4&J+zi%bjx=AoO(>yhaeB-x&6i#iT%(~Ir_bD)y2H4FEE-2JqBqU$tuH&0 z2+?+PXX@TxF?YouKt61L)f`WKjOeDhyZLg<+>^e*NZFg(?TWci?DAz~CYm$%r(QdupZ;ACoSCAvIed3 z6b>AD7V*5g&a)JImhimsAkQ+MZ>{qz$1%n8Gql^9OcP&8_Z#bdg7k zDxN=F@7bk2yK27upRf1q(Vjhc{)-2B_Tu?x>pgpQO!5485Au{`KgI;JrlN6G!dhT| zdA(;`l^mYmUhf%KrC1B?UtjMTSEYpK?>)$~jOQP&_l&C|o`3uxPf7Mi?Vi#@;u%${ z^6%Gs3WCEeN>x6~WYp3K60=KtN>zSxy=Ra1lstd&L7r0D-&*h4t7D4iAGCSGPIZec zQSWa3xrlwVdrUswg2mR>8)w=}{WRZXP7a(-ejvu8g`YeAb+1PL|Rbmhjgl zMlCc)xczsQiZKU(Ty&c zk4fN7Yu2+gZb3s**x$u<(J{j`=J9utO*IYt_BtR|=P}Kp32yjpb=tu(O(*PYhz-9tGsRMK-0Sak=P90#lR<>{I09`fN`%7ch zjM#wyymu$=QR?2UBaRaBz7Ein1Sq8edUSv-1o%Q+o8BZunGkwJ_Ew!Q%EV({NLK$^ zHM(eCL=NUDX&z=U1aF>Z5_+yH4*;mbhS-X303(y~M!D;!tcX#`6ia!kijZ2AqpC5J)eIFfhRms(l_M}7n+ z7V-;HLhS+@mQ-Ot3M1JBT9p1`xeu$!qAOg}gxP0>E_-g_#$vSQ{o1~A!hLI{2Nh#c zfTjpQd3NsJ+$?lkWY?Au7MPt{z1typ_%^~XaQ7RFE`kE^Iz8ba-MPDR-?}sXwxH9C zZ>}ybt=^ehxdlh)*5dr^I=Zo=3A`H+TU(1J;N)^b`fGJ>63Yq+F%4qb98z>|w~)%9 zRJ->e=Pxd`k-wxsoQj`-Q$ z4B(y&Q^EM8A7vvS3JiPiaR#6Y0W2bFBsf-%sH}2q=f@*gL@bREyjTQGvoRh8uQut2 zP0~S_Ig#R$4UY7tZx7r19X2GsdD4#xa`>Id7@$NwkV*i)f>0Rt*}w&GXn}vVA`7fw zwgddE*+O$LHRLith%Lx;GT0bKea%8J5mAWdETJ)R=CIvGEUM)z$-0A$Ofpjty(3gk zM)q$AQ1MzEZKfs-F`46hZb{)g9iW#q%*h;3Vc?!d1>l#{t&wZ?U<>4o^c`@F9Eyk`aYqxNIYBvs1*^#jn=$=1$kX-Y>*$u!z1+mQX7b2(#;naoQ@ zz$U4t2JOgka_UU3tVDd4j~tk%K-oo1MiN<_ZG2>%PRG4QYSNF|dqg&F$YCd>au%PW z7z(JF%=B*rl%hbiGwxWB4Z;p^#Z4TaG%DRegfX`|J0+Ag)`$s0Rmmq>_FKQJY)^4q zaE``bJ_JK5oi7Z0>41m@V|sxU!wCXRAj!TrV!tAvW{drT^NM5xh!U{yussJM#vQfm z&RpW!-YXBT=~Cl_LblQnles*$u;R#?Q!#$uZeMGPR@WZ;N!NM$K;~oWzgFJAFi*H& zvwZ(smK*+pHJkgaZ5BR{#52HKI$kKfiWLIzTZsG}tBoM>koKbk1AF7!3GThX7%^># zySC|OL(D8YA=1f#kFztjE95Yl=b<(ObK{RSK|m=Knb|&TXg^S-tSus1CukJUY~nYX zOF;2jir07^i4H`Ov)AfN!Z5DSx--Zkt<7BudgU!0z8kz>9y zeg8Vhfx9=Crf2K<)N{2ivSux^$WanP#Kc7SY{{o>dkbR$pbo7i)2iz>#+gh6H8uyagfW2haV9plAGKJ}*md)UIFyZ= zrrv^&LJGicL>~fQi*U%8o=4o61V$brM*NSiwREC`=E1kEUsM0mwLkBDRykkIzT;=U zQ~qb&-oM%Ty8B=5`hDepY4?{@__KTda5VR$tHr{fUv-RMR%${0LuC$`o6X0}dGo&c zb!GmPGJjf`-&N+%Df0)){0(LNxHA5pGXB0YzORfQDx*pVD^kaZzY>3CaAq2L*_1JM zL0x7T9{@+$_y8TH#EfVyYs?zKhAjk-nd{8xwItugt^@jE$UZa!5}qf>%woYdqI?MC z)-*~T6Oa}r|6p@+Fv57)FIAmh_sDgM6JW;}J z5|ZLCC@FbOioU-DKJ!~j%wJO#%wN&wuWR#HRRuie*Hj6p<}ayEAeV1;g0XxRGSIdTiOdyRf_%EF^rzlK|@Wv|9LRGC$5S4`c=rA>bCWM z%rC0vd6+w2z{z~|vtV%z^W*9^!K|B)KWW$pzs-izuxGxbz{T4?roN@V&5UOM1N9Q` z&f(|TOuARzS4Fn>=9^2*eYY0xV!pk+h*(44Nw%eScKG~j@)&t$N*+7U_sHYs<0s_t zarMlzJpJMO?^Cuv^5Kst9!DQPh570|(CB{sUK#VPA6L)w`0!N9^tgq*Fx8wE)cJi7r&|y4xGR7j=J@Zn(umJ z9@*jf^Dmxz@wM!eFV4K9PIq1WMHrDDItLJ%=O}{wy4X1nc`QbGaX{KV4?q$L_Nut@ zCPM7UF99>v(aJhnS%9h9F@Duvg4Q%Vz?bPW*McodUP5Y8Ng3?|L0TS3AX_wRm8ArLocilM^}#(l=d1xLZm3(l(N% zoFl93Z7ojQ{I@N_Q)_KX8>i8g@7NL^Kaskyvhj;pTcfy=T%1(es>xSzNj~**(^RRj zWa4?q>BkMpx2<|?l{{%EK3}RffZ8W^k+4@Z8i-i%z+I*zf9M)0Bnk#NnviHCu$gqF zE{o7KIu)l;8iM+wDnM$*7ovr9vK)Y+O3?OvOGkD*nfM`)WlqC_$OYRRqnGJ;Ro#1N zd+D21DY%C8Qb_a=P>C+>ES)j7NVQ_ra8>uiP})$pw>B(acR+CzeG8n|s}wEnM{yMe zJ1w788ba*y)ZJ7p^2e#4I`A2NKM*|xf;z^Orqv%oiL1%xPAr%bN?tVJIinwZ72Q@9 z>M2bv(Zww;hj!7>WZg1Ao29uZG0@Am1RWX%T3~?ED#>@ff98$@iwJeC;A-jN2u#5kPEwJ1H!CS_&of z5UE05>|`vBNKwSF#fdDWk4i2+x?$2bR&nYC(!D(*B%PlBI!WFvn=h44G*uXP(&eeS zK+Wlx@<`rdyC@M#`;C`;Zu!D87OM0_0hm)|%Ag0?$)*Yk=gh*&(*5_2;?6z0G>5LC zmHW@F)>j{?C&JU(BCgeEn*01m={Rbh13DY$fHR^#La0F+`rZ8i_iOt~4WIL3enUwlU~CY=pdAUs-nsajC>p(|6^8cLN~a6RS&TIde?Mu-N)?wAO3c!tWv2 z@!a(6J<;wFqkOkagOf#d#lm7kZGfPuMAJsSiGp4(p@v~;Zuxez1k0_k^YuD5I+{8* zT9ARuTW03#)8Nqo|B>lZeP$8$7E`$06O`h;^h8!zgFPPM{q8E*%a|w!j6A_<&dv#< z@%pOh80kp^r5H{PpT7gNe7WJZJU_jBi|CDcYbk_ui$lkM0@K8Pd{nQ(CzI z)V0bw9~_%U%ohCp#<=<(G+)U_0j^$Os>=s#Kuy!*X_y>SwW5s1i)a>z8Vw~0h*FV9 zLGybBpWke!1QV{2`pb@{V77wy^KHs0+Vzp7SpNJC=#I@4``ce*;JR+CDH`Ts`b*Y> z`Yk~Hdj!~@bX>Y{sssb;Oaoln`0R2te{T_J_O6y3nknHiDjv;TngggAiu{m3Sdo0Y z<(U?*8G>{+luf8g@MbXD5CM?osNI!hd)B#VB_C5WD{t?9uotx8nfX<%2c=Pg<>3c} zhxnki#)*QoH3gvb2vL+%#V*YPFDHcW?A!u$52PBFH$wtnJ81!?jP=gM(^{JdMhD>= z@)*s8C7GtPz_{0?I=nu}x^{06C6k zeck!`!p)Ui52YH3kVjUeIGC-L?r)?>g z6K+#$la}*cNhG5!VM3QV(4Fn>DJ0AlPRdNtBO?uWgE*;4_ch&@9Ruf=UI8@7ttLw zwaoUV-r4RIZ*8E~Zosh9jJ!lTA`zHqEx|?t6OUA2IhHW}gaw&)==sIB5)qk3hkht( zzOOj~k`|E0&@DbUTWNYWX1e7Tdi6S-eMd7wYlXYpouw_499pt(*()aP)RJ0jTdgON z$MAj#!-r*X)Z2$ue4s6W?0uGu3jYO(u z1It#?6DUs2}-YiCZ2(Ke!$-8>`NR%^sf|594Vts0fAuX=o&!&1yr7B7R*x<0&jc0 znI>9iL<0+$IKXYBAd$W39T=j3(m#-@A;K5@xJBB+CN0o9wXDif%kgL`x<_6x(qX2n zh>VbQr+F`(fVKX*;N6RSubPYM7SR-wX5z$|zIOy3yKaOxtEC5zT(Xt|tL7O7=ceF5^a!!E+>0pL& zWf;%br9R+z&1is5U;{o(G}2_payEcL_`F5n`L+BYO@S5`@ZORgEd0}DjqV4!V%7>7 zdkefQQJ5ss1hL>lvW1zN0^JgtU{1w ze9;oU4G^P=5giOGA%y|4P>Ci&LE0Pnd~_rfy;EWv3haD2Ux^NcVHkx$sQU!+GX*4Q zD#+5n5=S8C5FC2Z3x*n?Jp5xmmnTYy7_0lml(OvgMW#dt;9HXNVNe3OXtyx=3B8jy zSm)5%(ZCwP`Uk|nkGQS?&-9QtqMX(mbwh+kWRgKyRAZVMv1br+(D*4mqj%y!lk$huk5FnY8#ui_0gJmf~n}`Ha$%94#)NR9c(^QY#}? z3v+0lQl_seOc3yR8OK;fFO*2WFa_`?bHKQC578{u!I)YRt!7D_c$CI*NEHpsX0(;q zOo%P9C2I7(XwmE>Ituvo_ri20%VbluYkeJN7hZ$ts?rr42u-HC(1ceED8qJ77BYY3D~`?A^y!K|WJ*j^2oVTEPJem|P&}8-=vW2Z3uP;DVSkeL-RJznAz6Kb2Ett01 zL|3;l&*xZE^R&7#P-l{l=c^&nb;i6?8{lI!Mj-m|M7l)P2F<}Ftu@tYORLuZ?$R0} ztq+X*YO*VAs*Xf2j)tcC&M?@cP_(R_>=G4ZtxICES_tcBEa$SS44p2ogD=IO#x*z7 zA3n&sHqNv@AiD_G)MV`^wxvP%0<@acipM^LMl`)z4~RBBf);a*EW5fjEdt@qhAJ8i z-MowUV9v#x@uye(%`}@mtd+fqKv>Jy7?EB{2cZz32UTW9!{#VXU9h-h&Bw#c;=K^p_cL|x)58-e{#~?O5~3#7KEe%BKStp0M|M!Q=qx@ey%&0p~UfcafsEZ z5PFXYKH{M;a4UQjef6#4P95>>_Pv0k_p@{a+2hJe$v!m0K9npnbdHMMhm+V?6LE6^GBPKN z8XujAzY1r_hnYg=r_D`3GfNpiflReC7{ zX#GjyxTv){sq4hXZJR(q+1b~&+Y`>HPo}jUfjeH?X|p?E0Fj(?m?aa#C1a+5w$V>( zD|0vHn50Tf#k-{aL=w}kjfZBAc|Kb5Ro-OaV|)?~i#2v6w5oY}Qezj*Jw*PRVjD9) zS!ThbE9f5sTjK`z#CT8hWVyn+jNl_|@+62cg4$9vnl|E~Wsqc;7evYnPrdbNdUrfg zrgyZLU$5ifa*TN-aK<;B2=z9=XBwa`AKoB3y}{f^3SU>78YPp0kl6Y?MUP%Y6uTz5 zokGqB55&Rlmr{w4#fAx0q+RsZWO<{l32-FP^rxEz@@jipw-Z@joxCCa{iu`68T58h zXX?{*oVzBvGFFd1Ncz5eptd*g0{4Jy(coyehRA>r_Wg{6U~@=_EGkyvxT2)jLs}Rt zy(t1v-%wjKRkBmHeS@fo%;Z#UKgSok30=Jl^sT6d1RhHd;_ePQ-2vu3l($Al*zs(! zO>%KhcR5=4%hB>uKdpAN8f5rGoiFXfDaUtu!fdm?$WO~kv0VgAeEZ}iJFN-d1n8_L zq-LlCK5)961G>us(d>1s4zlvXB^iFh!O)akM z^2>f@*uE)(F@8DDG3?G;*y+2Sc{g9K9klo8=b;M1_MJhYlNNi-cP4`FF293<6>%&| zJ0gvbfPE00F6ZqyfiBd~Ndi~(lLR6p>!Z%lcU}cBTkQ!;yq7vcHjl_BsHl zM_7(E6^8*^BP#Z@>UP|<+IRWcSQRMaoX;p~cV!!sDYbxnMO>Oq=WFuuPEFEll$5&Mesq(oz@X@(kc7gZrPNy7uGyX?S5WeVf!%I+95b!_}O6;07!{F z1|UZ&$&FG`k}XWh@;p|d^SRC@`YWdkisIn-nB7wUjI*uLs-WUC`p|(A%jiCTpH>*3 z5o3j_dQ?>oUpxY~tlpR_34kN6PeE|(HO%()E9h)Ipx6{(xarUxA2B}Osulr%ew8FkEO3HaJ7;t!gIf* z(Cy5ibZC#}yvs!F$fQI=a!l%D4*_k84o_>^ImYklt~FB<@-$RXu8MMz%0ae9$XW;o zondM)ezSqX4)8bKyj9t0pYWU~&t<->exSVHH&6Hw?gZY07^cy7@u9n>pQB z>_=!J`Jj$15@!j<+CX4amid}&NAxaATcoEY7Dww|5I+|uy?aDQ1k__mNjfE3pCQVk zwVQQ;7wL#->6Tsuw%+)^T5m5yNT$QuyxqQNJI|iad{g~XxxMUl_djD)|A}1*|9ATz ztK1J9h6vIq)WSyG zp~Lvqvn>J-d(FLft?#IBx%NNizJmWd^GBJ?KP`N3!27r3ue<+z_ig`o_WZ62|DU~o zP|p3{neoEEIb$1dsq^YTQf9xo$s90un&-@m<|;A|A2vUt%pWTAx0P|*STNpX=@DUv zUjzEhWR4U9hY==q{t%g4E9u}MBge=SEz=p(@QbmGLl`05(2+=Q4c9p8Rq3auvgZ>d-0<#*JJ^7gyR<>BnU za~$*7aqRBcIc_@fCf^`p@XUe>O?lhiZX=xCJ7}N{`ZP;JZmYXp9{ZlWA&&zWpOVMX z3nCAhyL9mqkNn`%r;!lY>Ym3u`V1-NLD6GgdRFXn=Ws9dzE&^_`(N!gqTXr9-)9S3 zjp%(Z^ZEgd(%F4`jM5XAx$5$jV;J?vFm8zL;|dwV(kHa}No{^go1a$CbT#~_HSzQr^GD($D*#f(lxf(sPv2-G%CHXyGEt& z8phCMgxKR)w^6AbI%-r7i*e)##$%^W8L?PC4e%s`Z zQ#;OWyE6FHwu{@I8GLT=^5CVxr?;Kh@z{>XcO2hw`uh3VBZE)PUfA~R;F%qVyKCJi zcbuI&wgX%`B9l;108|@ZA%lizq`L*Y%TK=vNSFmycD5V-)4@axL4wNzZ5dcKs#>1% z4jf!xFb7syGAQw|I5&_j%Qd2laSYM$9BRn~CS}C2OFD2-+l$Rq9n0$g^PB_moJT4_ zk^&&C^NuM!+ODZE*_X%FxV&6-AFvUQ{gTVB%IFMLe{HzvN2!LBh?5JA` zDiyT6PEHUNT{K|ZK)1beToOrAVCK{+1~;?kpt5eKG^GWYoBe%KySQv9%Eq-!Nz=MW zYCy|96sT?7*uokmRsK4487OdP@-FQxq6-yq6T8@BV!G6gGVqW}NCWNWbLt<8*JDe9K5u;XW+!Oc zB&2@Rh;(1W)or-0xaqRUL))n%O2|{I=%*{B@O7kQIwb;)mYC@`Bymd3$)&74nuZ8d zz!Vdy0O@W?L(MmLj|z+m)HVzLl{RFU0i zQ+#fp0yqBNLGkPNxO&4Y7c;h=p~}2p*0fqw|}ebZVjV;i1p17v*rN#I>V-&+q1_6#4+ zdzVn~M2fg8ks@A>FDwRNn6&Fd0NoAjwp@_cco86XqBdPLNW}Q-C_b3Xu5u~>1V43W zxhpxUfI{W=#<@EP8c*K=SqW@=ou2hdqGuhY#$2eF9ErnNhj@H_W~w*6FFDDyU%Z@% zBMbiefk&Ja_pC3EY{27C(mfzBk$lN1JUM-DTIb((a{n6!JSRBjP~(`BmUOSB(?H*UX2({J`@>+baZjnKoW9#1p~C;eqDy!Ig& z+HyOF7HlxrAH*ovVj@P;s1Dd!EmzWTn3}tDcYZy_mh9GCS_v+HUXOvlUGUpRX+SbI z#XTMX%B|4I%=hhAlxOv&0CyH=>-`NI6%}ckc)o~1v&$n*-!(M0#sIC?yPJVh*JP+} z=%eqh{VVBEACr1FP^WF(zU^*;p$CO)h{$A@84=vAyST6srKO^WN`ul8->%W!?#LVS zsh(Tg5X&1Ql+nr_Qxa!xO)uT7x6vlatx2d%CtY%Daa^jSWQd7dq?Z~=#ceIr9j&lE zUQixDt^=Nsb@_|x4ny?ahbO~ zBxH*!_lM=ZMyLD3gSIJme^@cwwV`i)NHNClg_B5A)_ zfR#19h$FoUq1gpnnye>j@82juLz!Gx(>IxR6PeqVTv$6>ge>9-Jy`*pvBkMXWMkj~KB<6C zOATAsi8$HK&B!cF_otc_Ym~dCK&?_Aej{eWn)|DZdo=EULBQv)gVFa$Zb(Dsqg4T3 z$_=r!k|cS1)E(TJBnnz065Dz<3koN;bHBkU>x}g)%K8mu{j#!liTFiUoKLUwFwNpN z>uKw4>o-x>@upY4;2FmKGkqhi?wJo8pIdOuY||G&Pvpb^Au3F0L|rnESM3#?31 zB@U{xM3MlmptyizCa>4D2mu!~Y9~12yn}VoTK(nzWFUYxkpH+PP}KoW3}5(OXf$gi z42D~j_qVvbrS`3HkHfyXbav3g{69|CN0{TZn!ly)_2#FZ*@$rS;;{ZA{gQ)NW z^H%flvB8#0dgDn<%@x&hM1B$}&B)eq(Hx10iXeRui2(X#jUmV=QIqx&FQ9wmbW?G(vb2tr#H=R(z=R-hW?WGr5(uP4KC6E zZ$lIFo$hIrd0VvC&EAco&9_JETU*TQW`C>GeR8^EqPVKPGBf5ttCNIUGzVLqq!sGs z5KdQ7`$@^I=stK>ptkhJ1)4EM{8IMK5;5+UmNXwBX3b%Vy0+4H+e-e@ z(Qda#6Hs$ov}tn;rx!j2!@KBK5EK(Au8X3W17RO1x4u#2ki4L8Gz^`NED|RnXj3g8 z#8nvF=_2DjBh1zK2L#mR=3o6I9aYGPunSn`a62%i?KPw@P;|%ZwwxVo;}zY;+&^96 zPL)O)gz&4#naF+2AkJFd1C4J6M;r>fK8#)rcl*18!yVxvI%qybTnw`xru_~9wmlJU z@kLrnO1Q;-o0%{!Z?4j@C9F8_%Z8PpV*h>N(dVOUZeKD757}VKwzzwe*O9R5d=?$@ z)u3wsyb78gong=@8yETn;Vyr(ziXI4{$`Z}3R}oWSEY)ZTcBr~ogZ)Y%kQATJ86p@ z67bMOSo8ZQxGiY+M6jtVKptg3qJ{ep)C%FgV1JMSR1oxpdvp8zz2YX*ZJ`~SfKG9# zPl4MO{zN%{rcu*;DIdu=avIF#RK+gQ;czb_ol4TC7nsr)dRjo>QF)Sh5i5Z2N2 z;Z|L~P^L&hY4WI4!mePH%aX>!o?!0*ZaQ)R3C15dAM6CU+0U{y6dddTf;j^0vO`~T z)XgPFJ@FMl@fAqmwu{nzMSD1%s_}J(>_0c1|5<<>UM=6^`hPMB7qhKSU^f3h0lj8h zEJ4}+nB{!J5{>s;HLzd9GzwfUB7;fhA&?B)Q+r261LnayS*GYQ)Bz9(5_N{M)M|YG z7nCWQ4QII50C_c4i{uKRT4@kvyFOZH2Q}(M77!7(qqD^P_5=tUgMB{2+gAI;(>OM6>Zn&>WmW zQk3`~h&TaT8#uax7K7EveM0E*#+5d)z>moeS1Uiyz$|ou z|Cq)e)SCa&PzzjU&>Vcpdf%T}_Md=y{$K2m+L`~=`{{!B`L5U9A9Wq{ztio7-|POp zOzvL|+Jzqtn#OT8YIbr1d!IRiZqP~dq+m_JFQITE)n7vwNWkhNV|f zFuQ?M7<&bTH!j8=G4_*@aafFfA}(DMV~5Nsy8zV~J4MItfhjg2M72~WJ zFADBvOOgFbxyQ6=-uWi*Hmm$}D%NQ{@uU53@aLqI6iP3-$@yX(+Vd2Bm`T(s?t zA@mS>M+V`rXAf`Tv6WJ!{y=ws?W%6Il%4YTX*BJox68( zJGOGM{myL|Ulow!ND%_KjlfAHg`VhZqONttM`V#qC4~>7ABT&Tw^4?yMM3 zi1DNt=k{Zq7uy9fE{gG#7*C7wj2O>~@thc!#JDUmu88fb7}vyjzV|S9rXPrm@&0`S zdo#PX@7=YhfBeYqBZ#;ol}Kk7ZGc`;*z{a-4xaI05CIRCGCW}cWw3SN{%)T9d7=># zOta?#x`4x+<6)q&8V7c3g2d1 z1|X$waf~4_@CncrlDHT+Fi_q6)l+UEXLxHJfldsr5SRhA zM?h-he>!z)50*s=#ZBWK4)#X`a}nnUk~-1;jzJXtofg;2bFzq&Hh7Z4fD|;0rwJsRTuW{yk};tV&VLfBpRY zd^^AX-Lv%9aqri%>aDBiPQQD0eZq_X%QN9E2wd%K_Y~CaUte5WLQx5-t2A5v5%-fP z-1OY+eW;UjGgCL_>hrVjol}jHsxGaeTWj)0Q%H;I?nryIGES=NT%@?|{LhUTp6`Lb z`5<2Ad*WI)RQS-^G20h~ehaZOsRj>G{2KJL|4<5r8l@4AU4uLK0FLHh%BO7*>0;_+ zqi!3rJQI%NUmx!OttWC{uVMsI-3P;>bP}N;UOC=L%<<@PlbZ}$-X~3`oH6&;kZP5Ll4?+@Lt33ki^EZ^!K0bgcyxvvSV^rJ=NM`{ z+8oZK#RG@5t2nX2-b6T@#$%Npgp|1M5L#lNhxEK z@&A@mN(F+Ls7pCwUAb-lBb9glP<`Lb{Dt+|toNzh>+b)^t*-w*_iFgx=Knh@_x)0r zF`#y;C(RO)LOUQ*oHPzXoH${eGR|@egEGM%_kkc(myjxuBZ5f5fb0Oa8$mj* zCxOf4Lz;l7%duU^4+SAV-M?u7 z5t7M4#PWKf-+CQU$;ftaABNvqfXtQSq#bNW3^zE?uZ(c8x6cSS=knZ*;$@tw-D!1Z zGj7K9MAX8xVDxVBjnJ}H<;B)r!y$}b{*dKQDj)rguLhBEQ$T;XI+CU)Q3yC1X zI1Zxtoy5Rc3!qwy{Bj5r3_WzQ^(nYW0z^-Wi`?bXE{jTDI4}XS_AeSGE0woT{BWaIhd?LBnVGvzg@HT}MC&Mv~n*f(f={vZYdCt3N z5*8?L``I&SLCh4^pkjg+3`!aM;te3zsN6)~`?6dmC2oz4V3NZ+pf0D-nYp`jA{?8P zt1ms1HIJI;hEKrZ8(}NgNUF9;v3ADixm2oFW5A{3Y-@Ly8x`l!x7sE^8~3v{skh0& zJ{Z8MF`KA?*IJ_+8%jC91^lH|eKg8)qXSzL=JQv??zBU6-UVnTxx+EvzH5(~XQJdQ z|Bb+0HrgK>W&z%Jmv0UsA>^CKolREN+UuKpd~=&`p7+hIzPSq)w{M02% z9kmWyN327+QELl8mM6fiAc9*%86>l~X>mEDbxdYzmQ2I`3WUw^sL9zIvNI4mnWWiQ z*x)j@kDC3Yu)&jorfppYau8F1BB*bh9U2ry(5 zz>qQs5L8)&-GD&2$I))t$q^9pqrx@_U8NYy-|74zUVwF zWm03gzM%Is0FTDTrGs^(kDZU3>m!QF^+jlAL(UjOiD8OiiD8T3h~ZWUnuqt+#Y;w9 zJTbE39Lm+sL0`~`11eZdn6fb4swln4$zH*z?1a~24x(vGu1l6Lq~ literal 0 HcmV?d00001 diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/67/0x000000000000000000000000000000000000000000000000000000000000000b b/crates/sui-framework-snapshot/bytecode_snapshot/67/0x000000000000000000000000000000000000000000000000000000000000000b new file mode 100644 index 0000000000000000000000000000000000000000..48d06a9c45a896ff74d828c894dc4e28ded40ddc GIT binary patch literal 19826 zcmdsfX^wbFNA>hEN6*nS12k^pW)TEQU;vN=36LNT9(y$#Jw>1wbFsUJ zB-WO+UfHsC*S5U2thFRdme%yjYuP{K2>)FFkR6VY74rUZ*rAQ}4~Ih$-tZ9)9})H{ z9P;piUb~xNXWoBi*JS&g)zV~t%+w!x^*k4zpQ+1k}zQY21KZ&vuxv z0~Qnmmd^=R2pxe8jJ?1({8>hCXN-H^k_b;wB{RsyrZl~ua6PzJeZ(H1zVTzF2`1)G zzK{m$}oHjSBq=<^R@1T)d3p>qcBM0J8yGY9VMUfl1icE8i@T6f#`yt|vd-sXp`_3eRu*NlO@yWQK6 z9rV-gpc#YMS&cj0_4ZmR^+CHf=x(ibvmf2_2T$TaVw#% z_RO4!d#(49iIUW2_u}=<58`5fduwa6H;AcE=tW;AeeSLXgUtbIr}VXl8gpu+|6bhl z2Fax8#-L2+hcoCsY+<;(!AGr5GD}=D7#o--*7n)cV!Jc5ZDvcW-S1;uhkY|$Eo>)q zrj>TZ`5^AlKn5SSy8{eatGBtizUR4tZTu6`vG?xz!}gGoSX!i{mc{{LewN0*o&1Dc z(fQ}GhH3ja0v~~E8e4-rBxbEM)=4ge^`h1<>q4D&nHR)qOA7UZK-@`A6w0L61vY)u z88|+Ra3bV+k(IIW?ug-Kfh~EQ21lEPR^^|~UOLq6@g67ga}li0|#X$KVOAKQ-ksxPk6Q`J*&hcje2an zJCW<_$nyo&=G(q3$;ekH(M+LGr_>$`=b|9W`7E3$F=3lXYGfp$JZda(L5I1d0@FBO zL`6X#@EDp^v@u*EjX_kBu0lEB1Dc)~>qJRaDb{NGGMWe!3Q~C$l>^HYzEuzvpZlU@ zMHQSDjGZlFjJeN>3guP$3mEAkbgt}Q!T z-{<e88=_x6L{p978NnV z%vJd6T7k%NZH~1?Tnq6o;7~j;CulP`1||@OYkMxkQSK+l@XgKbL0q9ttT&*I8JmCS ze!IKT>aMz3ps#5YL}cHm6RAsk{N{!i5AL@*+x@}jx=+D%5NzNaqHsUn+Fa|lT{?1l zR6`zzL{hqW4ULN-41Wa zd=?fBS;@jyL)NpfY{(@O_L-v^SiD>wQb!!SDO@Uj!2#D_!s9Tw;QfuO;0h;kDMmWn zxB{*+VdWrv!O1o{L0rD}eCC5NutJyihP)LqfvF9S_|XKjZP-oUqEm;n-&+`43xNoZ zSkXTe@;{2A`d{KdaOa2eR|5UB;k)*~4O`ytmhJ{0PyCZ0_wVKpu_LU^j^PyftSEw~ z#$JerPBN@7%xPK$nEH6I9WfhQljAT&!q74;mL-Z^r2T3?Es=d#J*7s;SQ z9WIs{^5Q?DEVNr&<_Dis0(BvGadq`Jv5K#6+}iG1E0=D$E7{4DOLocY$&pjs@%C!` z(7%!$Os{WE-)Q&xaZ=Lll(|gvrD93h)RLv$Xg^%rY_Ap*spBHCkw)qWlZd3MYjkWq zdPv&s_08>#f!*HPiZ@mhvE%WZxOKPF&kqk$b7sztUd`^=#-5om|1pS-xPmSG{np0z z`sfgN+`+t%h%%IW=P3>m!yM9+DZ!}9G+`!97(dywOkSGyAbyyUnNz-%HZ3ipk&-=n z_Gbkx?xaYw8&)7A*}+a$4Fp1M`D;V8iK9l z%h$6!I||&LxTEK3;&hht(XP+zN*eET*+^{=lWLyScgMn-Juz z*4@}i!cky+5~L8H5SW*!Fic3_dvPc3en4WDVPGn~`2Fo}5Axs#5I_+NxiaoKJqXcQ zuesDu1|#3ca>B@9K9fA3qt{_UPP!yQ@Jd~9N)Sl1;Ys+Fq&_k?D;~4Wsns0oz4of#P@$g<0yT16ry=ZrQ z(%NVPB(^pE?D0>ElLT+F*N^yeOP5oU5tI38C3j1MsWCrGXL28yS2_a09DxZE>Zo{1 ztXP-D8N3VlN5upVWp%+5FNqgD@rDR+0`gNhzC|cLgPaiM&x?!VFphC?O`NnGA&>Lx zp16tg^)(Mjf*KZh8q63fS=dh;Z8^7f85KV4JWqVF&KP077zj$hX2XgbYLs zQu{n4dtX92CYfB6XgB~ommu7-i=knbsdM0pl}~~!h>?a^QEwBTjW`TtBC3z?(DBI=Gw}(|%16$4J(6-Ck zw@&a{V5zb%hdp>kJxh&3>r3E*(2(VMYC8>u$O$w=!aWFiTcYD)n60cPhuz^Q&$mj- z2$9Q(e@ynVtR+=W#>^_&M%>(Yc09||rp9v^01-YLXt>V=_He)g>q66X8xD;|USHHp zvZ=uM=D|#@=yG`CGT8R9?pvxdR{Jq^JgJ?&mtArMQ64D!Y=VKWmna`9i{uyZ9EXs= z!vm4XabI2z;fe?{gAsl7ao`_^JS#i|_}M}I;$nCTG#F~o12Um-US^R1KoF@+q@5I+ zcQ8dRq_s8Bi-V5S439yl0T2Tpgc-SN)}wmDqiM9b5TeEA2Lh6k4nyz?59O_k8UTs# z)Tyy9IHQrX0r03LxnG5O%d;_31iV}fNldG&IY2CE9vZo8>IC7DvGy1_YQpqe(0mxR z1z$~OgB2c6vp_gYB&^WmAZJe?e+uNF1Hi<9-#|c`G#wI=g(YaCR2n-QD3LNgK(9!I zhtoQ2WP-Ye%>o=UXb=~9di61lc^Qtt$1PgrFeeHW4-~B^%K}=RBn;?|2Gqn`Wl_q) z0Qn}eFd)Ek7FK0Z$-*`?T3OgBi&_@eWigqB-LjaeTq=vjEId&bOIf&F7RwYCjw|*h0Veh%aO5@y zx`=h;gvT0^pi0}$C36eA#!W;YZAg5#kOw-ii*DI*F2MOONeoS#HvgIiMjAhz3I zoygLB9w>DKWullP4S8ymB-Lw(d#WMNjp8hI4ROaC^5Q5?s8@U^Jlv2ks+S!#R<`i5 z=lfP5eTgZr0^7Gm6OH0Daf;LzTS3}~6b2k3L_6UY@Pv3ugjNCv(Bc4EIxBN50@f=I z;^kOjUqJXOV3irIu$0r8wrMVuuBypcpn zu%U!bOhMGQnGKm9@CCu4-z;QM0;YOV6!%+aKj!lLd_nz){|DCxQ22gcum$F^lYE(<1%gu*XP5*mKq^f@uTVAuenUGf!ex}u%yCj} z=rkR14Z_=eN%-DrAPp=}WYCP!ynr5TLQFDDc{mkvre^0DKL8=0lcdiNQ``}9j=~Xz z8csQ}M6Ok=Uw?~sga9;o?@CXp0>HuM1+j0vTaX3>Ed4Y-Q?3i?s#9Fth1bIe{S8#IhWVswV^ z@WG8s1p)_15dlc-!iecO&IxiDhbDJv3)Ll(%u{V&ArqyIlpOc++51hX&w8t=KJO;s zkA}kN^HL>Vt-bn1V3(~S!wnu1dHSigZY1_LI``wwgI0TEwUv-SGERhoEUW5Jn}!(a zWM{kAgMO}cA6RCq-`(iMEx@6j`;SrdJxRm&DX`T<2}qj1Ou;uoTMI+nHNm}I)0F#h zdo>9G%U!E=H=v7xJ}m1-LfTK2eVJCzG2D0D8|w1R`y;wMqq!??L3A=zC6Y?NfDs!P z#ZndBC!oJOrLIo{0uQ>7SqAO(t=2lpLZszOT2R>4x#I9W{oIeJ{4&WWful`lGwt0^ z2wqP1_$RsmzOpn+|4I5o+`swnhS~Rs&lKW#vP@4H535qh8LMvX^Q@-`b2}vVi$&Vj zfc6uA2z?T1;;@Mc8G?bru}?c%;XOL%!6S$Ti7mbW0B*R64+G994*D&N$cU>5u&;+W zK|B|lFt86rhFBGNc`veYo*neD?y*Zid*nsV84sGVGm`z93+^IK@O0Jp zm*uNTCRg|xE!JnIPvdxX3)U4JgJ8ue=-ZdM$>}>)--QwjHWKK`gr39GaBdie_H-7B-vek*nG}w9l%|GxVm?sVD@PM%4=aQYn|<9(8H2o1!l zqJ?zauy#fSN=6dO5iAE~UrdW#k~J0NtURQO6KdK2(ZoA~7d3nrSFS zpk5U851JSYh&>Xae}SbBjR$iB2YEafTh|J0JCn4%oFv8Qv&1wtsOMEuygW}r7Zw%c z_0s{Eq2thB6px=^;#9uC#97#)S<^@NgDE?pN`1OMQ!nkCtxwkH4&@8E!c4Vv zY_>X8EuASnZP~E4a)6=`d*i|e3U*>(qyw7Qh%o#TAQtFIj0qMgRgeSbR79Y`Mrm3H zNJA{CD!?OAiUor((q6#f#x~68OrV(|b#$uUu&{jaUtq&03pL`suxFuAM0h*6IL8nm zUBO7rqzI)Iv@wo`uNCI7X%X>q)OhZJ3x}%2=u*bY0&54pFDb#$LZ1UI4Gc>VNj0NL z%Lhwo4V!=vl|wcV94Uq?ga@wBxEg{i^#a`lCyw!;Ex~jP9mY^03|q6LMpG1Vm!Kit zHqnE11ec3yNBW!EN~XzqlXTmp>M&ejVZ~@UE@id`JM}z;Vf^)Y{chZw+4Vkdls3C7 z?M`p)jH~I9Cor11Y%7?HL?{* z&D_%Mdk3fcDR=I1i#KAR6P7oOKZ-kG=||=v!~_?KzjyVZ88Bd3OZaz-6p* z?*3q~1%4g^z;PZaT{M4w8Mp9?CY>(GcO!VW<7EY}PCt5LdSsmE7_3T7P@u(I;_MwuP10Vb#CZ)ol z+v#o@ON|!z@(#uZMl(LyPNeh4aMpb@0jFr(Qf1i&G)SXPKgJXe)@@`b37faon7Wnm z$Hs>iGv$**w6*&lj2B_b*&b|Tu!C_0OnS;a$@`>(7{|z-KRO%t`dQ;_c2?NlhH{UC z+i?`8wZIy87$Soz`gvlabnDL5*2>MRmtVTlx_;%>t&1;Rv7nAmerh2NJkg`Q5X=T( zwi6T-*3ooxJpvz_^&t|Ov$zObtvC#Hh1But88zXFOV)l5nStt?SG>^i?7uZ+M}fQs zfg4yb*n@>|2s9Q@xymWxlE=*P0uLH?cS&AXH83>RN|0A4fC&Y5Ugv=*F34qdfIuS{ z;E^a`N0vNh%-sMOr!g{+9t^_E3-Xfh`4W6@HSp9GU-F!Wg`V04JOgXO`Q$tMO3 zZRn$A}IlA3CsLPczL6GSk4qG(eHiW*Ldp_OayVjoB-3 zS`950j(S~r2LM3L;X+tKP%^l z!8gkKM+=Y>e$@D~@P4%PrQp9Uf2)xDrPGrzf;z>XWnH$(dhDB=ALbv41L6qopqvx0 ziZ6-}#Y6E+T>LT@f18W%aO=;5d&gEt&}tQ9TEs)z$tn2Lcxrei@ytNVgB2ERBbk8F zl{JgTLcV|^l1W@Lk+9~H1%xN?)bTX%ED8ogTjrDvD=r1BG%wi1aw4yk_XVs{K`SrNcG#vp_IUzUzD4D+pH`4M*w5JH9-?Z_ zf|-Zm7g%)|eNjXU2s!(L2f6J*F3;E+8H_!xNf+`ODM8*K?Z*vLdt4)Z$7iZQ4nF4r z0sj2S7UTC$9%16S0|%M7Fg?e__17T>|I{%!f1MwN`z`(y+~49CF^;F-Jpy-|FEQ(H z@%#9^7jHkyTxD?;-q~*qhyxbwnWjn~fKdFjUaw;Ok^ z=9{m%*WJ&$*W5Q7t5fmRoiDuZ&Nc6}UvA#%+;H#2U#Pe1@77=6zOilT1witPSPHmY zw3LXAi*)nI(fdfkvh^at_arbr9m=0?~#}k$wPY0=ZS=R*ah`7pF7j*AsNv2@S&*gfh|UL^AYI z(jwP2p+ksi{(Kspvn`GgmbO^W)l^a6p6uOKd z^v&pUT}CBkNU2E|QG=0DO%agSAqfu?G4L863Yaln!qqLbhz>zr z<+_49T&M}P0v8~d*C=I71OWzQTDhE9JkpDU7srKSUceyJv=yHo_YRUNfp*f(Fb{D# zj0$E1EC66Q3CbECd@F(`&;Zm!_LsVfSWtq<))k^cp#(7Cn1sq%Pa+yj3VbSN62;au zuow$kI}-nsc>v)b!keuEO}dw^NLN z#9T8-O1NoMJ)fv$l8@X>+l>p1n|~Isj7&j|=Q8T{orJW#@}cVhw7&R zeOEt8FyZo8xCJ}SxQCl^j){y{6Ah!eJm?PLh{xIEJ_^TFhx9uzilMeLMQJ{z$?Xuu z+1T9Jc|GwbWJmkREk~HoCTLH4w~6tiI8ZsG9XF`0W|0Zfkr`IPgf=NR1v8Rse8z|o zCbT<}QsB$z0*BVTk2GhPGQ`(11IGXBcD`dbLv36V{ZhQqATTV`OQ&Y6W%}E zBQxa;rrUM5h^&`0t#zmpPkmA_ z{TRm3lJ3Nk>Ri%-Nw2)_y60*e1M<~EiV9|1guAufzmIFNuq?Ch)9sGaZl)1WV5~j-iy)2xE_kV{L z08hq1$F2eIb?Y}X9T?k6{=YPu-i`le{T?%!P-X#T!0?1Yg1fN5A8iYdqQjsz2d$g> z@oI|eJjwe%4iN)Mfw-4k2mMLgY3~gj0EeV3tW90~b}D2>kPrD51R3K3lR+5w!{EBc zrKV0Tdg564d1opy00(sDH68k+B`{{86d1OJFTuKlhx2X;cQkPe65uQu5ChsnohYKX z025@w zwn7NC@8DMFBCzx%t_I3*CB*G* zy$x)ihRe4bvYUjde$Oo`+QKGY>PsL+b20-~xaAyhMl z=-1ZJ(PO5&X5i8*{b4#=$3`I?V;95kDNGFuX~zjNC$$9z&QHG!ln%2XxCm2WP#l2f z0#_}_{YMEY(Rst2(6 zdZ8ijA#y*I5K#ArB}AfX)r7$lW4EZ$0X`-VSI~+E7Mq3PmQlDMOY{dFEG&{D43MKq zvY5ellk>0(b%gMh`Dpuq3jf zXjM^1Q_w4Lm}C36R!u_*pbIHn(3bj{C+95aX!;2`X$$`Rcat^Z8sjut>>pih@#kpaT~?wg1`%k%FG04lfo5zHYblxoK>U|8RdX-DllVp zMZLZw!KMU-#a7H0!?WQVP{IT>!rgHy-z({8CaU|s0M*k06kliZSfj$B zfutz=Vyz+P)Z!>=sB>DDQBGd*agR!0*~GON`$`S#1C`Pr5y3oH;T4ll&=LNA9c}k< zlWGZLf|kLmXQ&B_mWO3x5fzAPAN7_DB*(N;KK=%XCFuUyo7mmm(?!C{R_x}C%Z%@DPzLs^fzu)--@Atefj(j7^5zAMW?b!R= z{Wb6IH~qh^#&iEn?TtTR|Ds#`B`;^WR%j{9iY(i*a^mvD<%=s2mlnfzupCD@wsMqK zwuPx}+twG>HqLuuEWL81dST=0`u5UVYk6gL{e`7BTALeN-i6K8?enYK!OH3@OPhqR z=*!F7FFKd6t_A!{y~3;OTgw+)OT;O4o&8}%&cZtBZq@~s^*evy_}1U~(K{rTE@iGP zjYGGz{~XIwUSvh8tYTY5Hf^LUh9c{>*oo9_vG!v-bVA+q)ZmdUQD#r*N)X$T9XXLi z7`m0@plig1Wz7u%5>$$y7fkz?y_<=nJ#u2gR1n#y_BZOu^c zRJ&@sb`m4fR4>;p*^O^fvPU`IrO?`;#opn!V&$$Lwy1YE{EymyBRiyMMAqRG2V*O; z51**_rIL=3ggd3$L#pvK)%f+PB;8J_ZXs1EO;y?{m9=B41ycEGDu2gR-`uIJM@gmA zRC>o$e{-i)&yvbcQ`w}7x_y}PiS^b_sa~M-lti~<6)^pJ&Zo5hP47hbRex{vk9{5g6aP2u{Le&w;VV(D`16r%)m2f|)TEkH zdl)B*OjV2{##Xj`+m5&$*Rvd7TRIop9wNZ?sKK~MiO?|fgrv1*Tw$+aPX<>b2r)+@ zaW`Xej|JkMj>QaY>Pv;itQ3lrFgk==?H(nj+Sd^Gejm3zI7pt(gjKSfy~87x&X4)F zr*)}T7puH$T&#iGfLIe&6RS}js1_^Ds#_^n;!3_!sN^bMMOR9dTD4wPp6@GP*}2G8 zf#tg**Njtp#&UALi4tjooz%~FBZsf-lNh8CpLRGNy`R1-qB2V=!iA{H&pA2cqCi9| zd5Smn@a(_}B!*JCfPW?{)%HfR5Ja8HPh4qW&d({To_aadBFiICcxE5U3zWBT!N}#X zZJ%1c;+h@6Axs>JDj%+3%eSvoP7pU^`>iSl2A#2dCmDYvL*78FQQ%^ZNRa^oRFX zGQPOEaXG88y}Gh%b^Yd<^@sOoreC6lvG`><7WyNUF0V3mW{tbBe8pLBy_(xN|5EG1 zw!3v@ZFSq-maY%CUt5xfLYw&N>dMmg#?tEgdTXT}!eh+C3G;AhA5)3V1Z*F(v9-*vOq*nx3&}FF66d%q zN$`-l+a?up64%U_zArOtk~_-4PCRAHoaqPRS8~eLw!d%Z#5?TqMz%_Hnfgqwl7;yB zwvSgBhSZ)26$TNemy0odW0q7N zPAFn(t7>8%EQq^Nq%4&m9I|Y;nMbPIIV0okU}Q|bie=+j5-Vrdt6Drqs)$`}PgrHK zrbkAxMoXiWk;-tz4jYv)tc8t|A9`WEQVHv&YM2Y%aIB)jW~o^jD;2__(okivGFV~? z(CM^*JeG+9iO*E%w5JUbx#H^}z&oN|#4{q7j%2FHW)o(DGy~GI=!9#Gw6|d)iH@*O zzcYtPPRkv~$xCr0VYYJ8=y+0annLCbS&N7G5iWC*)TSG(2xkI-?WABa+Y=5kqf_a+ z{K?H$D}DfZvbnyz_Q=LcOP_5mU%YM_SYBCai{)Bd7PHE~xODz?q|Zw0waPV~ms+p) zi0@{P`0h*gws#NtmDbkw=Emzwt;<)oU)L*}8&^sfDS3&V#}+|L6c`u}$$#{P)^;bv z6lf|bwIyD}6w%r=+mwq1gsy4%(mGAw@>jRg67o3KS{KRX#nrVHhg3Q8d8M@#c6|%R zd3l4zJw~?lHSYGF6kc^9>_9 zorq2iIciz;heI=C#f}OcFE`0fku9WT?RlZoh+Qc`=C@viOY^}6F`_3~>;cwN1ORh(*M)(Bp1w=TLgg@!Oq(-6vJ6@lcog^_L) z&+myRGkD{hN@jL1cU;9WxzTgX#8_rx6h^EwG3N%tyvM>@zWHnH)=ih4xbiF*fN47) z#Yq%e^D=*@M4Hg2!hEOOC-gy=-Kp}FP&MBmkw=zTY9Nxi-Wlk`gjvv&d ze$;(JQrpvqPegc(-c}wse4+t!bgC4&hgIWXTnG!o*m^j0H*(ika1C zXs7&7cQ+->hm&O8=CDX3iEQOMEhZt0vvxxGPX-Q4RyM#tDZMATiWeOy)eY5zhUD{M zE-5n1p`g&6Ajbs3Li15BDa9;1G(nC~N3tm24iV!>dM&AR%AU{a83m?sh{@}Q>+)!{(+Z$L$nofk37 z4%sYuYexf{m96nGOKA64U~^1hIl>Dp%wt>UTo&&lBUz2xN!>1y!7j4Aoh7S6KE$lH zRMH@L7$bXvnM|fKn-#vTWId~FJI9Vr zCLjf?Q4YtPEQkZfY9wPSvSRC7_h&l|&Sb{z@sB9yOX@RP|FZiN%KhJRUk$vEmVUk9 z|Gly2b6?)|(<=N2x)_T;~D-@d{R-9Kc_S+`oZS|=#(o3m2$EhMV4YACXtknnulpZ%tMa}>>f!KSLV zD@Rq@_h^ti;8{7=$09I&%QuHuAycKtK~*Y&*41La5psZLS?__iSntjk#Clh1JzX?kYbDCCth1G>SeMws zV69f`mUUOdV%k-tIb*IoB<^)%zH-;B z(stvWRSw^p@0`Ru`q&}NW2eM?=rG$*``lw*MMcj`WJUNXvkOdXStPb$Ti_m(C zXm#6!Z`7bYLQ@668$(HNVj5HBI$_3B43ZQj->wJe z!QC8+CJqt0o>ZnlI$KQ3IJ-rfAm)fHgS7CTa_nw7-B_}bPV4FfouqQkO5M@`dcf?` zea;=UYs_@R+-)iq4Cpg2Dzle&9KWJ`4i4HCA(u`^aUtV#b<+(|p|2@`A~H{M(3xBI z-$fChI7|sd?1YgM9f;VWP>SQ9IL-duoB#-s(oYT!6m>G4MpYsL{EGRaFX=$>X!nBd zB}Uq#rkkmsU!;!G?`iKtmk~|^OtsRSQ{qTYnqv-A)mRQ3(y~nP3^(abPmZ#4ewBX} zGL=(mx=So4J?Y3Al8a&q%eBALQ;bd%%r67-D*?oEKad6IaDXPCbXN2m-AitpH55ri?L31Pa&7J+dQYYL?j@qLtLt1KnA?fmqgS?9H`d)pH&AUbL$h`~)SMA%AsQq~b zlE%gjw=B`LjxIu{ao^bm(Ms#e#@6aKH#%D_PAJolm#=PY%U92NWwq~%vpN_`w=S=4 zSH(!1+97qh$1kO@OZ@fiQtP!VtDCK*?bXZVy?ps*2cUgdS1$Kh>)UlX6E!6tSvtS5 zacPM>wy$pO+SwbHB*yAW>qg;lfC(>2`pe6km$0sEu3l&v4@0hY32)=(tg^Z;*IY|$ ztDWxXJ*15}E#1hj&2pT~Qh4X2*5iA9W4+bBU^7x;$=sBc)>b*1-MG?P&zPzAg3Mpr z*ro+>+;*7WaW`J&O0aCCKyR;B zQlB=xObSm%$Kp$&AlOMIT9ps(gW?%wxQ>Rjmwu;x3(Dh zh17%bZ>Sq=BhwA#DV>(H?0nl;!);4iHs@y&(n@)oD^*I&5ys4!*D zoivt;SGP8Md2+{l+I;8QR+`nl#5K+RZd~HB$GzzvE7UCf+nw8IhbNA|(+NJ;VCK)^ zFfVgO9hcmF%AeyGQU@+M5|O{I>K5u$99GyIRj@9}>h%_@(93>h)X#7F`8S=XocBBL z;3h!)UUVK+FZq>mXTv$cjXZ*`=G zT-@$Z9W`gqIdjqt`#5i`a;PEaSkevb?>WGjrcY9bTt=u`fgAIT5Y{BeVsga9^`ysX zlANY-(M) zdDG~$#Y7L#BeJYp5APJ+E@wtm9vk(|Wu7a4oKLFEPUTuFr)>lS4d*BW`a#Dd|C}zo)2J(-xm{ zJQj}!@u&$M*Zafq(B2cf?tvh7BUk@Ck;1X067LFkm2^@~YPER4I0we{7i@8b14&&@ z`b#dC6+t|RD_0AhJ^DHKQJ+sJkC#_7F_K1D4+}}t{XgWayue9h`EXng%hX%f%D2Wt zdLtU@-VVe=VL2MA$E9#2EKTV*qf#73We)xGap3+1D(V zCf5!@>U|5TxD-i?8PUxsDGd8 z=iG154;`+UfSGdolXf@|*V7FWY+vTI!bcNL|*@BR@kGCk@?QgAXJ){+So?kei2 zmP{)5M`X7xpAcquT;xKhSab)%QJq9W!U6piA$&BROsRN0^rEq3Pno06`EWeh8;>a> zj`#8iqyOv=boiJ!8W zhhVDVa5!VCs&T?p-%{eD&Am(o&$OB98TVm7mRqBKrlNOYs$WoHBg{|f3-CwIZ9zBs z6QYJG3mk5;qY5JhQ(j1j`98I|CIy8b#`;JJUJp9|2O(+rSmTxs{kn2ZtOg5ToAM#f;({fD^v zxS%3jFbo$|gbV(?oSw)hJ=wzrjV`b0qmlIffc~%$EE*AhfMU^*;Rk;{9DyH(=+T}e zW5OA2D$4}=@Mn8C171j`4Oax2v_aptEp6Zmq|H9V6(Wen^iKkq&yTplj2BVpyohWvxW;xsRGnxNb`sB+oF)801%tnD{jn#$7rXG4Zc=WRTzPFvvgdFo+a^ znCNGaZ*mFQWspJ?M+L-$bWgV&eQTRVzR!q^k>r4|h}UKjL`KRY*Cdjw=yZx1;Ofh= z9y!OAj>zP}o}2@>EYWf@nKJ$LOpTMe?ixPJ!Xle`r1PN5JgAum=b*lqcOs8V%zQF0 zlWbCel^%~26RFyxZ+B02vzO%u7cG+5Vfk?onMJbLL6^dpgvlYJ)t85%7q`(^upz0V z$GC=4%uaG~NG+o(J=k%d&rW%6O6<8pf|(@>N~j`|}-evVGMR zhq|Jv6dkGva zoE#h1t5gFgjZ2c_?j`AFnJ;3v0L8cD_9(s7d(z)9d;LzgLCOXL(YSx?_}wL>Nj0L@~8l3utX??4DvXt6iUfm4MeAmvnm9X`)nppM6c)mR66`T zNOiZ6w4Fl$dx%y6+NDX%*e(>4yE-{Uog7da2wkrz`ULIjvZjioa`zV3CcHGE|5i6# zNHw8(2`tK>1QJhi)a`b%MzC3KRMd zl>4oo1d;T8JcWdp@dKm84dHZIm-eO2O1P#@MFxaI`N*s6woBr!MsyR}>ljrrSkx&* zF6yj)4K>jdc3EPvMBXWu5J@824HI7~a@vUxpeS945zfU{?2Dh-Ltc z946y3b<6NLiLT2S+ZuWW^_A4`lngUfE~4O&({e)|aJ_v9+d-Ge;aY!h?CYcMo!w+P zeUglscieli9hrZk2w{LWLAlD>I;Wp0cvKd{OeSawz&N5QFymgpjTVRr=Tbiv*#(cg zec3cVb1RuRG8Lo2U?-)92WW>)z!mTh`QdM%>Y3N8xCD(kg~UW>zcJ~=b4m&CqL zRt_F6h7^51ESp+Jzb^2eFG` z8VyTy67pKVrw~aeMLir8mJA1dAQe3&6M-t_^dt%&O?q!>kTX^oLkBkD1#;BA2fm7%i7nGe(Jq z)@UnLcI*8xj||s2VIm{!wN#tR*KLx6JI!%+#CemeQO?QbaskSSg&c~CV4&n0(E`{w zm!5M4bKzVx7tiJA3UkG|(p-7Y`g%T7IYI>tJ+1>z^0SIbdd&TR{eLO%|91YV_Wu#! zhR>p?^pgmNUkBXqcY+^t^1sI#_=mA=HIx9`_NaYo=BB_lkII0p^w58C(T1{i ziw;m-j@X7p3z)eIv|3Kis-^@>Yt6x-VA)_m!LJUFMBMWZZhK^mJe{GMV>we+&2sh+xB%TI1k9jIgTrE#CwGfA zFgPUElqJxH;pSko(imy_jasAFC^gEBfGw8Ss5Y9-p(YC#&j8&7&*q`G2$aC(4q0#u zH$c6ynCd~jrJy(NsU?Nr3uM%pi|w{Y?`IF>jhzJ}6m06xJHWTLTdvuˈp`w;Dcw~u(su6h5*!rk&2tjk=>$W8Ef3H+7; zuluhn0TCS>9ikgH=Xzt*clUDXAT}b~ZV!@%)ez7^6;0Q2J!XkjZZLH-XA7tf zx#DmpSF*|+mX%a(g7teb;m{A|)DRo?z=3q`k;Ifz>(JAoBzk;-tttz!NE8&o`hthZeJ8F}{Mq1D7X|I+JG? zmnW*(92&|lvLBZxYS^8CUR<7N8A~QTxV%EYm*Dbb3nnVxMu{7|BFiS$*C=~mgUdsL zLkA)&yWuo+17zM;wt6P7efGYeP|mNbk9+#J{NGpZ*TUZ}dEY<$F9!T?9C|+Y8;3t) zhre~p|6)f!a=qOBmajlpDcWB^FZ-mZ8~Vop0rL`o9g}QD@y&cLV@=-(2$R! zH2XgLrL{L{bglUxO*$Lbz(CeTD$#?_N(Z~TE(_4y0OX{GG<$U79DFQJoIbJ z%$gEfjhQSuI<=nBsf}d#IigeRYu^ftwk->Iqf;9Po)r&jD~}#+VIZAPOAY4J zUnIKds(y`mPV8S$23KhPN;=(I_j%mvJrrOZhve4Xu~D(TD{rjZ3&uJT7bWRE`I1<7 z7L0XA!Fb+ZHc=j^80)XAswnbWzpAP-nOk2}19h=~S`A2LtUsfg=KIg8rup7rKE-=I zwJr~vukTYJ2l4&50&9rwDBBo%HZNQ;|nz zjCFXwv3^9&9&o%l#d`UUJH^`N+$Glj)cQ-xG1|eGQtL0YKR?m7-@N-C%X&*Cla}=tjrk#Ce%hGJ z33dN4hY1|UNsQ076CF(suvU3vljYHzF%Pb9;C}oKQ5b&WjjNbTAEx12oxdb_#H~x` zFt?5QYWPmv?|DOfKB}9B(aA-CS7Kad`z9g8hz%3WuN|#x*zww+hRWZ zEGsVig%7cevFxj`hy7~!E}VZ=ZHv#>)iN7%=aqAlmh*E;%)e&LpI7IuT6*^7O;J3) zxQKb-WrU@EufK-*6KWC5eTV-f>c{8bLx8vP3f1s_Qr$ z@ZI1AVt>Bl`yNMp2T@+a5_?4GgmqoK%r^T9o{WYtoW~} zdrw$}v5%Tqh1pY5Okw_2DW;H|k{k=KhEHP^e?UFztGM`MihB8_FQ`XOSRYU)-%=;< zS-5!Sr8C=S!-Wqi@58DTEL?ibTX^BI3yT-dY&`W~@IiI*sl}(xytw${nRsD+@zUax ziyP0apSg7A$uk>E8_z$zxN-jJGlL7m3rnr~!V8NpE#4O_w;l*i1y|2LH~Grhm(RX) z@rlLOnbzXu!oh{t&OSf+J?i8e=iWHGdG?{;;o#n2B_p2doiQ4n|kp*$kZx+ zs48;elzMyqkcw*D*>mWRaAT8$hhNBvR%3*YaFcJ8Dv4s8sek*!Zs);Px$RCaR@qO82BDjY&(w zPPS5$v_Ujf+C}~tqJDi{CTUy~2aHrZXX9x)s<&cxPi4B!EJv;_(rWFHSh~5VSv~dW zq>>7QyC$)86E_c8BT?lry5H3_2wnAqV*JJ`}Le3h<>|^EhTnJ zA#;?)^RR?y>q-zi8Z;Mr+LXCPW4udP`X6m@*=}`O&yh+X%GQpkw~O|+sE@ZvM%rqo zJ}#vZs}n}Hut1PQy*{lTHwA{`Fmvh%sJe2EGA@3So&@}pD*FMOKH-lx+f$=K^PQ?_ z6R`mc&>~?6c6egh#*_{xFwMkdgi~@N%@7Wkm}Web{f6t;0g*NQGKESG$``e6B7#id zuqH2F?Q%g6%2&>?3Nd7Kh>x2l*&{q)TP}&Qc1ShDYc{+A&7ZTpwscDA@aTeWX!b3v;J!<*_%d*0ZluUPL=Aj z6eLbF&xKN=HYM(r{r#V`;eGAixz(bTZ#vk6z4v{mg^31#+Q~bYW{T=7vbS-*sc&i+ zM(Szz&Q7Vh$&+5qSWos5;szx#1Ry}h9aK+wL|9gM+pr5!BvyxF!rCbw0MCp@fht|o z{YE;chKP%-p*uT~Os0Na13{BZN0yI#NST*yk|B0peB_T~8xe<`pvuS&Qt6Sl1t38{ zSSt9y5zx3=J0#7FOCFOdXb1mm@#_882V6)~w1p<3Su)Sk_uv-F3u|kSiXQydsZ2fpI-gS; zm#?6eyuO`1we)R44)N(!fqX$;>ft4(Uc^J~VbMWut(a0DZf!5~PQ&usghBKIAFhKs z!~%PGRFs8J@#4_i$35hqYCkMwYWwydh`RnH?0;e+UX>TFZumsXx4}d-F7O!0`qEX= zl@@qFTYG$~cwuc7?(681XD5Xzd?ErwvEE7p`_B%qfl}17qeOX8>oVH%D@&p;KDe_t zr;3v|2r+8%o2~7uo9oxZGg>CorA9lg(Maz}kez2<>%X)kxZ}Hi$T`qHJ11SjPnqlZh$Yj{sp-|Ip9PKq~?!{I*AO10|-TH^Yx-}Z*@^k8@xlb z$`ex^gvE_8Dg)P{R%rD0uIqh`gvk192V_hAda#S+G`@p(Usf3+CNKkDAVND~V~SU< zo?lzNAh?(Ht&6SAKPgsb>_#A(CG&#cpBPHh+-j{|1ewLULEhq(x1*R3fWzdrTD;h` z8E?JFt5Qqp1B2#0D803Dx%GAsIMX-8=A>w-j3zfm>h$K&kKyUhAw~4$QvgpkFx&W^ z{`u|Op?*Sx`Z4&o8vua(jFN8$`jbAq*sjqvmq!`=hsTUF=i5O74P1i^VnPSMHhitG zfSuaqrr4p}D?sNqHoGVxgU#xP5Ou!{z*E@WybAfJ2`G{~68Y=ifc@z}i$u9p{(3=1 zqEjb-ppsJuu1tBw@#WsdC-yILz7>f7Tgth`Symrb?uQhwnSDw*ivXDX>wH0vQ;-qh zU*O`3*L>R-dqSq3Ph~DZee=SZx&55vd2j9(Lv>7*-|SqQf(YU%FEx)k6^c7N{fXML z+$$Q~l;mBFRrx)(JX`k=SBTXxMP_lzCxwxF`5oMaSymAp>lEe0Q%){#6?v%dfgGAI ze?%o{F35wTlEPeudPCl@=xE7fqT-oCnk2nWMDr2#QL=penSxGYZ)C!}dW42WaT--> z(Y{7!I<(8rbo8U9^ng1k+TG=86nWxFJ?2izXF+rz!BS1Tv+{{Xo?Ct|+UHUEeWJ%H zA08iXf?YySwL-q4-i=}%sZjlLDppeDu`V|$$v-Xvo7kO(;;vBHDwVD!1IJO+Q3P%em4=}1RqogSaJs(ZS<4;kl@gDF@# zkAj_K7pmZJl_B-GUH)UH*U1Fw(XcYdZkS?WDb@}&A9d-eWhVq(?RH@vPuG`MVXqN% ziqTGlQ9U3p0R7O7+h@Q88r6Eyb6vgtyI7j2;rar3FvW=`t`sL4of;^dG*CqROJ6_} z*U&oV^{#j>5g1@DM&a|Q8xNegoharn-W?9iCo|fBEO_yZL%@18lk5j}HlNJK2jjy* zJb&VHCwkLFAJ>P&!=c_2`|d3PB=hwL9bgX!lLPUg@KDK1=92kZyw^Anj_cpI#TD*N z4#i{P7&@i#Xb|tm71Tm^kA9kK&p7WskJ|QQc!sqJRN3?TbKHCFH{(Y9Ogc~(VC5hy?Qqn@ zDZeY82q)+*pkU9&v-)RLG~1(oI~z_!vvtsMbK&@uW|A0>hoTAKYNPR>`@7U}KAiJJ zEi?!0@ZD*SXg>2Yo4^jeS`&2GVCwyhfZVcz4rrbvJJ3_mfp9oFPzU8GQBbCSCZ3E& z!@)*88qI}6jTmKiWQggD!Q@ChlAi$~<;44pR1oj{zHl-eu7heB#m|oRCAX-29Wd9e zqAx1~N7U}6kEsyFG{Q;XBcA@Uo!^~BFu~DaG!zXtqRHgw>D#0I`sZvI&izF?tRq<_ zVDv!$6Fb}+?`aeFSUeR@CCRw{U+rkh(BJ(HT$GK9ywoo_9_~qQSK*(4sT?&M?~xAIW^BA)*tpSV<1E77XgH0~&(}lY3@L@F-?H;ErVZDpOg$9M0CW|Oo(2Tt z!w(?YdN~>psQy_yoI)n1v^W+|GDP6$FW8I)B-)hw>J;LRN?Fxg!<-co3d!j+Z z<$_Gz3#WVf8+J4Smq+fuw*`0z@({yr!sTy%s52Mr0ozzl1`LM>ee=MC5$xLF^|E=u za5(e8srbN-lfYa!&m0NT?I6<;Rrq%d*YE%wj`nny!^H`Q|E4XThQmk0QQ^Ol08SVc z4%d71Irnjy1fDS*J_-+{lfdWUaESyCVH9SC<8~P?kr>gg)~K>CIN-qI~tffLK#Q50fhR&YeoZ@^Jss2H0;x#GxGgM?Xc&O zP$S=eB^?bZ-8!S;XFH?e%bn36)j+8sHB3xEzN7PC~H2`;rHc&H=yh!1W)m7q5j5tJB6F zecFAbFUQ<8DI8Kfh(It+@CHq;F^D2Rc_=P`hm435rMdF1ZAtP?&PD~^29U*e(H^0F zA5^c0e)%z;n|49I%9i6@f~;hv&ocvwz@Zpk?7^_Q>`ZtY`5GGJs}u2}WAX^G7@!h) zC*-Ru$g6ZKXC8UYqtcS&$E#?#Am;9gi{O|*W!d}}B#&0YWRaTk>T)777Ahx?k+Tu< zyhaQ*Eia%Gwt3!hFA)?Em}>cHk9BG%Z(#??6UmeA6(N$WO7gHQK+NP^6x*Z17uS)V z76c*i1yYs>LqTqqQfqNQkr3G*NGlVpQD1a^B%}hh_epI8}$P-@pr0sqV zfSaKwtdNHly%joqZ4B^|06JB9Tu@_u7+`)}juwW< zm7oU$OtO9qumBVV0}PyyK{919z|R`^F9wT{XJgl_0O%z*Ih}$&`Smiv2PeSgj67m@ z{=7X!@BM&EQNSz{4GMUfMUa!6&rrY=E-2sw0;Xf$bO5vq3YgVR9}3tJ6tFBCQxq`! zpbQ1PhyMCO^%)tbW+TI|5YTfeOrS!~wQ<1ho~zJv13jCSSeAVk#`X6qajeK9w}cA} zEuB~fzdFv?%Dap(DSF+!-z!S=haMaVF{OF3729CmtqN*VhNXG3729SF74-0o|VeOQfyWRWgp0o z9LBOj{vkp3$Ff4U_tHO~Ry?ZG?jN3BH}Xw5tKR9K&H(Q&P2y(9GadwKJQo<|yDF$0qxc8^7<^MZka;Kq;st%d{iZz-&XgOmE% zsLCp{#B`U!7$3zp{L0$yp18sS`Q7atLQbH0$f?BBHYslXBDUdK7Mo38l&C`msG;91 z)PxGPlnR22g$i|EemL!ZSUwQUCPs1?3o^J^kRgW!89Xe=;A26C01GmNSdf9`6@r2z z+4jJ7<2=CS{OQY4vAZH*tumQbnK=QX{HkJ}C>WwlLzEv!A?ogC^$Gy)a2PygUScc26A1L7WqRw9@K;;^+H{CJGI^Vb@zSJv=VcU{v?ZAAyd7VOg#A_WcsWjQ`9suAX7ah zQ#~b9Jtb2;B~v{mljH@NB)7LsCeD&oN~6EaB}kh_`=HTh)I}psyb~Zr(m$pYUWiIs zhb;}PD zm`I#|r08=_Cp3-4D2h=K!+_EA;^I6|)0NBh8FnV$vRKGhq?K;ZWq2)seQAL>EAU+y5^FU#~VK=x(H3#U_FEzePP-5E6e;D&*< zPqH7-rza&IcM}{&^uyYpOAB|GnYoqATQOAhz_)E0{c~;8q^D>a&r9vr=HNeTwQ13w zHmKjFw`q4@)8xk@(x%A|N%S?1^~~F9nq={|nl=ZiKChU=N$^668?sJ+!S?dhki4w` zcW01ySPq6Yyx4`j`~8r2;vDOPyz|oo){n3%ka8VpPGV?(xt*L~VS|AdWGx ztYeHb>lh=>I#v>+ECvrjSjTw&!8%rxpOG4<@td!t;j-+QYahv+fqjcOS%|Vhv2V@X ze$BoubLVRIQE>~0yU98+gJiwVk1D3Q0T&d!mt;YW?c-25WO9hsY~@66mF z{O-!!Va>ifa|bp1p0=CcY<${20j!cMGARH9`C^HDu|&RDB3~@J2?P0JiF~n)eBnVI zhCYv=n~NldR-gkf;Ac@$Pj&+*u+5^MUZ-c-BL_ae?Ex6d=ao(#I?wt1P^!g+uv`qg z3%P~TLV2OGP+h1k3@p?anhS#qLm-z%7DgAw7IrO+FMx@%ey;P{05_J5hkeeGuPWYw z8+Sio^Aoz?az5qwZ+Xw>{v~gg{9CUY|99`d>imC>eO?NPtb&^0;Ex|lL?PW$tkwiU z33`&>BM}58*YtFW=D7{|-4UyvLLmoIt06x$!o3}@1BkE~)7%}8iZSW)a_Te&GjiSr zT*`5sSg~KU*hk<$&hH{BiKZkPKW*3qO*c5e&wAKH#vEygdykk-vCM6yQ?0UGaB9Zf zRp9>7*;~TY>F+7({0Kh^#xnz>Rk4Og#0sOqpvj)R5ex*CTDexL4G&jqwc5z=(D3MR zJ;?ExfYAWwLK&&v{RfW9B2X(hx5r!D3Lzv25)dHb;1Wzc8 zyH9hZI~437F$1fbs)CEAzsWqc<);_;%|mh}69HJHSQT-}`vyQ$P>nY)9SD|!coJfg zNHW4Y?+Ij1;;G)}>3j9F`&6+5qouvL{6fp9TL({OZ^*Y*ox5+R@9?XMnVxR1hHmGF zZL{~w27to!+V%Khcwt z7mm~4Cydv69@O@)`$0o31$3XO&YSX5Ex^^4qWO~i+Ap4qPT|d-u zD0&R)v3zuIkxe*z)v#Pl6lQ^WhU+O&266YY<4xr0t+oeJ&8+Y%sp5*sxvebl-OCvf zVpaYc8z%EZX8brAHyCveo;Ytr1%rnUdBw=$_qlLUpHQ);jpF7B!NQwAj-tBzv`G|6 zb`=`Z+U_=FH-}9#ft(TPud=fjEP`3JY zPKS9TSWYH9Zs)iitCyqGMRtgF#j{(k`K3J51W4)rC0fl#By^P>n`wir>yY(uOo55K zWEpj$f=s=SLyNW)D)7G|T0Zwk1>C<&l+O!&-_@Jt2U?h#1>guY$j;^ZztMC7-8N2JnYajl?k)H$(gdEO<6pr>>w-R|r8sR$>QaR)J zIX>e?$ZM-Tuit52yWLNFcdgcz6O8poD-U#}-HAuMQ;qO_UQsy{c?(RhrPp?@>+NoP ztDo4g+3UAff1}y&wpP=7X^?X?`a@N;YVb-zDCs<+k4?Exz3lxpZ#X~T7tCjc4*ytu zQbb=?g5fn?`f{u&q~y4Bt`vTXg%{f|rEBl&pu49R>#NMFTdj@B(ZpNLo2`wV)U35{w%5|;&bc$uI#to!&gzJ+r+t*o#oTtU z`EuIrgqu4X{r2|8Z7QH`4F`>ds@}=3LQ;B|M*AQ=!7xg=q$kUmDsy$7h9N*c%IJE) z&c?jT>St#b8mB^Ggv;htMJKL25r{K1e@s7YC1@ep>!@a-{WkqL;(3fJ=+7^ltFYPWN+ z;{AhPlrGo*bNc-cyIURa-?0AwhyO^JA1WQbp}wu6Px*o+wECe2cyUP+!f9k;3PK9( zKvk$)zS4|&6c~xI2`F$Va4D#AWM~Xut|zhZn3t;#bjGR-SMIO8|MX(>^rCF7tw|_? zXB7r26xqm&JDq+z-Rft_&;@Zl71#SN1e%bxcj$xINX159BBr)Gy;HKcbIk!VL5glD z=I*>`o7rcmi)74I6s}xIw>q2ctyaI&HR7?iy+7|mqqm)|w%6O~TH{u`f1}ZAkUBQj zhNUNxl|uZ5PUrc}*2abr&t5FjhN@`y8vRbE(Ib(^8?CLi22wgZ{T`s8(OGZwZ={Xh z&h~bv+ed@l*4FjZh|%h=wz`yfx9T@YpoArrq_8&MhO3&lm9qO$EBxXm#=4 zn`ytj+URVj8LAmkt*l%Cg7pA#=Wl-qLt;cyEm9iSZrfo3d_)#L+*5{Qb!693rL_zF zVeE0ne*46@;y;*nI|O5AZGCca9QdrxI~vRTNs`D^pAgG)0(J&)1NEmx?@wBl&w*}= zplXl00D6uKfO*!|B?=hv>b9_jx#P;$ffafW2@Nu)bD;5Dn-=iIMGC>qAjLxp4Nsl| zxVfLS(zDw^qdG7>3pZR+sGd6UOy=2A40eX*mf!FqhK2p@{Qy0C#tIZ2Q%}C0e z2*y&5i*PLEn20DP$K9CX860DIe5?d9yaRa2hT3v;|3&w|_!qhNaoKRbB2D;B`Be@0 z*2Ga5_z6O;Xh1`Uzzn#FFz~5wKzM}JfWnF)`52rA4TTi?wkI;C{!OLDQ!Nba9f5-aB1_X0%K($C2?g1 zkU~HFo!0hty0zw9>vX$N)$G>X-06F(ovnVWz154>(q6yYx!p`RxBIuX4c+yPR==Na zdF`#W^rdEJ-GN}*?Ry{=R>0Xvx32eZc-x)r<~0y4e|x8Q!^WI$y4kszy4@7;&`bSZ zd+Yi}+V5cEJQ((6XDDH|*rHAqr5U%B%`7v7&s&wl6KnP#x1DK;G~j@lK^bijs6%-Tg?n=PvV zt{@-17e>yhhcp(G5$BEJvueuL$31YCvPbBjMvZ8PXKS=Jw@+0^hO3&+s|v(^m6f#! z(!2VxY*3ZqMjclnio5JYpvrzsgOnj_PR$yS0>ej!Z7Sq}4D8TqXxNGoq(So0VxsV> z$BH@)3&6j#w?AShM2=60uAq5Qr7C7@6=h$ERMJeNZ&?OS6J66ZtN{-M>Z0}&WK2#E zub}|lj^q-LQO;Kl2JQ1#D14InunOeuu~U_xZ=U z`ZVeE=aD)?|KR zMcEkUW)hjGO-$7H z5Tf^4p-xL`2wf-YQJb`#fptlpF-r5sEYP4b!3aV80 zVGWTZ6hsx24QPGP2FSx?0*EtF1%B~K5BSRuO&}C%66-+3(-c+@A$Lkn${9H=53$4S zA>cd*oEd~bCL22BVF7a^9>8$}!=eX}DE#q5KHM;l!SeMO2|Fl<*UUm-4wf)g1QrD$ zM}lHeq#$2dS)$-v8Vw`Y5;;TkOgMN(8Wl89A!NcJ7V4QBt!@EX&-dY;x;_IVuDOb2 z;0p*+(|W5~*EZ6esd=ql<|fmv-fFw8$YrLj-%L@dgn}lp1BO@HU2XN!G6fc3mNEix z;}p)o(#;)lC6FZmu(BVva|NVD$bsLey=GH$);ElEmH|?qG6qO7MOf&7Q{ruXnAce> z9PTqcl-)J`?rpg1Nc=vKkk)91gjozL0Wiot+<_Dp4?W+oBr0`h0vV7YK9NA#VsJF; zYJy&IfTG0rv(qrT%S^`Gsw=YQ`1!y`tQHL&9VQd&@v?<_7sEsn3_>uw`fmTpcaYG#m z9NX0V5V@KPl{QGXO<5rKu;*65J*&KaAvX~L^cA+k@cKj%JZu7qS%$XL1NQ6+gFP86 zDQogg&4R44lZi855GH`XNP-aTV0o|w992gmwL;)e#lh;9p!`7LKwY#$K!FA68JO@! z%q!w;47FkyLZ3zfPC4u!w70k-v~LX@buq~9oO%p{>_p07gaQX|e}RpS?jhS03|?z# z0RgKZ81y=b)|iEC8|Ar~7_f!knzwY-!KSg$fT4|~kIaW$2R>jogIXGyD!|s_>S4@V zVnQFib0K34hHQ9EIG8BUIL4b($BlzIGl4$OLKV5R8`P7U-9iMq?$D^)YbtawN`?ny zy%W)VvyISp3HrQ$BS2R6(7S=Y*+uh}IEE+aFs(gWW)rkwW9A%iZAzJ*SI-U#V9O(+ zlNyw~3g4YkQ&9ht(+ zv@kQcy*!v1^bZpTmQKE8QYcX`gqJhn4~+|xb_e_`y4PO&dyy<~3vIsMi-_v)ti z9k?}r#NT3G5nlL7@naeNOv7hS+aMM@in_D``V@E+fNcOkXm}t--cEQaX?FqnMF{^s zZdjDC;o+{NO~60j?tj3Vx&F0wAH*-+SThvMY~K*@nwi+Ww4bAEW1|BHUa#Lo68g9cOg=K*(mRFSpWL5}BrVqrKj5Zlvpdy#w>SpH1?<;gBUw1Tte96w5AbkvXTt z7%8|Fu!~{nQ7L7`jIt$wqChc^>^;1Puj4k~+TP)yY z{1#Qbps$j6GA_G<)+}_(WY4hx2=enfJ6(_#qLy`L=FjKvIy7`-UlY{YPGbaM&?;;l znh;aZj!i^FYHeYkJ>mVSYB+xfYxM`p313&A zfv#Yit;2^l0YV0H4&sPKWy#~AOm{FL)37z!3^asc9|l`Q6048~RTXqhc(7e088Ci; z0PX-Ri3k~nW-W{nL=}_>s-%!q!6ccgG3w-XG$s-s5GQIxA*GlEGnJJ(QAIw=`*#!=4{1^3M4XhG0Lhl}||qia{C z@5dtqF2f9;7)%WSLX3?bYAmT+?U$jIhtUekh3#6KCAc=Qy_28B&v*hkc_|8>KpQia z$EKf|sM*7EE%SaCOp4W)q$M-(&J$N+iD!w&)FL5F=H6Pa&8rCe9ZzBaND?SeHDWn+ zJp;|Nka;q3L2%f~5{@7oU>9a5;eK)v)e#Jm#6zePF90)}w4`SGaZ42eB+E7M3=!yg z^)NIatm-fUQ4*_Z+=XrVQ8cT|03A50kE1i_0GX($zJhThE%Q-7AxjmOwuqogG5!_& zL8TaR6B?sL31Sf0dlL<$DN2jY6vGGa5OxGuMHTN*BYd)oY`bf4>Ee*CoA!G7k}9Tl zpSvxD32Ef{g8jV2BkV)4Wjz_6(dR$xex84hdw(k$&g-IOzAK&Z`|_hY`e*AeqXYR2 zgY522JqIWOJ!tFJWW~Ay5aLl_aOB8CSfD}zh(sJ9jZ8`CzTL(-tjij$L#z2?0S13} z4hdtW<)8r$06WkT0#+e1oXq(1UbwxTzNfXB7Q-`!1X1q9%B&#?&r?9&r+h38#Wy=^ zI~(a_>6Gep+t+abxzTK|^#W_#<@Q!)(`~lB;-8;5aF ziQ`$-98QniN*D%F9L}nZq4Z_XDRln0`W8A~JhAg$-G%%98P+Gy;NDeRkIUdw-ZaI*1X2U>#2dO}%`-=pD#ZT)n%<$7f>04n( zh(_Uhb5A60+1D8iaW-|ztqxmE#7n>$k3~2Qz`=!AzW{buzBZBeNgx&Q)weFl@QJ&$ zQMLxUmba$S8Z;U7M;mz_Ys7#i993_LfP2)`;8=$!-gKbR6zo;BX8VvIC1T7zW(t4< zod6Rua^iWFPUu@}?i>6sxc5;w?Y|`b@N4p`4t5cbMQoB=+Cv^npeHWcWazUfY-<$I zn8Aub5aN%`36oeMwp|5#%6w!odEQ(P{D@;^0a@$QE|}2_^dgL+C`_UxtVOkOGMcPJ z9LEXx2{PPlK<8X=Xdr+|0uqF8j3miaDyl-RcWi)G4tSXP;^F0wC<8wKnC>`FkT3nR zXt{*yz1Z!S7}P1)2$2VPz6d&sDBz=ro;%|gQP2w@$^CE)xbXNXi!+!8GV;rF>f*%z zVQY){I6cC0BH9l-_Za!Ku_&>tTeH)WjS-2tF$rsbz^O4@M+Ey^k@X5n5J*H|z*-H$ zIom+)(5_FCW>tks;=3nk1w|HV2@PL#{+K!I&AhdYc_?=Y(W@-*f)8*HYq;dkC5*Y881R@y_Z(u?`$lzGZ{3cTVSo zkgVQnDL+<#lRvZM zT6ywbO&y~F-{bqVS$G^7=pENbU9#`?`hxA@NGzTz8p!VuBoF)Wb!h7J1+2+k>9i2S zNGt)8sUf@p`>WDxQsUqR^#MnJ_?_+_@lSB?3m{?N7k>D!;(HQ6MmPwvNao+Y;$Vev zFt(%(E>-|vh!E{70AB?V>ci_68UR&1Z1Kp&}~y)7<-eu$6xYTltB2MZw}Cwom8`QqY`^* zuXm5y>y2t}sVciPc-V`WJ4O6bi_6S6qshieYCt%%H@J2w}+-D1G$V4k=z(&;Wd7 zUu^e;5Fs3}#dCoYblioC2qnI#hqiAV_b6#jt9J_hu7=x8y?^8l=Re`Q{waS|;>$rF z-_g?-8g>?}Z`Jx+$wQaPYQ(E25UoIBGNGW&kv Y6g;GcmMT7ITQJ2M#o-|d(;8*}3l!7lRcAJvoBBPY_pzy<18EmG(1V3%!J8IVv;%M45Am1 zO~r*p5Jg#BK*X1qRTOzu5JeHi>&68yB7(>j1@zT><$&k?{;Ip5=Se0rbG-NJAMfWr zGpVksuCA)CuKLw)FT=RzPfdT;d~^5ThAyQ%(@>VmJH>AA=yCr-pZkO6Q1mV{GRWu8 zFcg7h*@oo?ivP+Cl*!eOlRC#NE?1YI!r@Hj$=1Qc)r`(q-N73@FWISagjpwEl#MKjou0&pQn}4V; zx-;!ZG9z>+yz27g%*@1mF1bF%j_9Vob7xb0E6;20^pEyM_xO4W%6F2_B`pOiSX`@| zuTCy9sqAx=t#g_4)upA%d6H@WuX3f1c#0S|rY%~$oTofL(`Oj&#->cpt2~@$E93LK zzEa6)ZSfA@?r-(EuQr3|C(R*xE7ME5%sgd=88$)E{rcCln6z1WgN5z&h1H82m$qAX zq{!Q@ZeFacS&uHS1pFm~+FXy;ilp7U%C%tyNuO*hG>WFoKE1qcH#9;sRH0Rr-qQQG z2k8j0-fYWmu{)I0HQ?QJ#9uw`{=E4{H~N_OtIDuV$I#P;N(EHN8(TJirho}$2Z6FI zfQA=j4F1bFkaSEIs&XqE8`X=;uE37vY8YP@7B6pC-Recjvd&c>cOR`TZLeSdqg>^^ zlWcP@07Bnh>x-R^u~jd{&LmsqwNJpI74*HNL3Em(=*O8n3GH z6*V!gCMMLxq?(vg6Vqy9MorABi8(b5$JOM7nw(UV zQ)+TrP0pyvSv5JQCd+DaUQJfi))%2X2F01KzHC<8D zi)wmFO)snIs+wL=GvjJzLd{I7nJG0ht!8G_%&eN3Q!`~XGp}YUYGzT*EUB4gHB(hH zD{6LJ%}%J;tY+ucY(>p3s@WwqyR2rbYIa4xTvg>2H9xNAC)E6;nx9hh(`tT3&Cja&IW=Ea^Yd!HqUIOX z{F0hqR`XRgzoIJRsxqM}ld3YMD$}YmqbjqiGN&qKRhd_nimEKC%95%qt4dW>R@CCS zTAWaelWK8FEl#V&8MQd87U$GrSuM`1#fn;7REtY$aak=^)#8d;8dpmbYH3m}O{t}6 zwKSuaX4TT1S}Lohd9_qgON(l0Ni8j_rK(z5QOo0Mc|t8us^uxQJgt^z)bgxao>R+Z zwLGtuD{6UBEib9%Wwl&Y%PXonuBsENI;pBtsyeNzGpahPs&lGZR@He`t*GjvsxGPO zvZ_{9bw#a=tCb10GO1Rk)XKD4nNcgVYGqEXl-0_-TB)d&MYXb|R+iODRjsVBPBpMn zy@C$tpJ(Na6J}&Zb_8^c1m;?U_C6<zl-~~4f5c=Or%0(@1F=lqjlm$ zTVy*oPE@4G7)8zwMwT;?iLB7Fr=3~P9ErPq16FSOx!bxFe#-zi6Z#CU!f17fi zwcmT8`DW99i#qOpULA|xV7*swKc;174T^d|PR}rk0+2OU+#N>cz9$=d25r$K5U6T%HY1*SQdrl?HWa&=;W_M=Rxw+Yy^Ewx(k@1Nat0HDal&)b9{O}$V{uuX9?AH_i*5UeuA7Vkw$!srkzp~#x%4KDk+Q;JA zKIu&J3KV*zKGkKGf-M<}*((tS>YJ)z@++lm%N=wMwtFYm{r}tS3qEP)qR*P=v%kYj zGJW}ao=kVp4iF=m3mIcqVoYN2B&M*VXRu{5=@mGHp&Tpo(Br4p!H6E;&MZSU zJX&2?1$kIKTix1rsvGM|=bCi5u)TVLtjdLrg$rAy6=wOlh4sbff<`PPlRjKs-dcbb zsccu5-7Qd%>SnOb1dc2Bhzc5(e9x<0cm>CEGJ@am&R}=E0n?&jlr#i%!LSM-Icc^k zDVbBch*gRwRa0OT?2>%{f>{bw(FIa9z`M}M+byA~1On*i?2>Y2-~?PVMH#yf*U+@c zG<@YpR_Rry{d1=4yu%#y{=j^f<-fo=?*6*-Kg{3*UKoAEd%w#5OUBNB3;5Wnakl~a zB+PQAJXN>Jd$dX;Ehe9XP8&v_O%DPAL`-eN=$4>Y0$+kbnS}d2$_)dXj3EJH?LId` zhq+(OSO6qo5Mgu1By0k1`G%MA2*D`RksB{mCM5Fg^e}l-^I%dYpYw^+2zX@ zD;HLm7FJfPYs-7iwg#xFXlr#jorXS9-A3UgRE=c~? z<*n`Nh4}FrICpi5=tjzZIiHcLYM?3Y@L9hlZ2xaLDCHCH%z-? zmcSkwGX0zESwgL&_N6RMgQ57Wxgq4(PYPeNas#%f{ zv2sr!a0v+3B0Y5jqu3^|&Kn9?YZK5W!)6S04q{6IhsK8>~LBnyl-l9VrdbfJ_P8D8av56M)wE zL@vn$c2H@xIZ1AT@}-sLle}#J&elqG(@V-m$yGPg94F{i9mGV9wG=HK@Gfa=j;Woo6zC!dKYx*)%>+ML z!vk1Ru5w$g?w6SMo6UCTm(4es-hWXqxBR~Z5qPsZ8T=6d;4fSi{iXXQmHk-8&HaAH z&;M!0HX^gj=ww$1!Lymm(j8@j>_KA088%hZP20%Wj$FEJS1vh>G zzNW~?6oQtpC2GmG*afRpbPI02CG3bgEYG(xLQpAJBH#mbUdKET3jUj5%UtC){g%L? z7#Jp@2j<{7tRlV{3oK)jpq$C#l$4p|T8<+LssRbodA@QeC%8zGq`V*_zOucv0Bx~W zom&MrU`@LBZEQ0~WkFh%<>dl@P))0A%gJJJV?3leK}Q;Iwe69&aN*K+in<=>w3i8&FTeMvp^5I?C3xt$y;4n zh+o6rTE9@;1sxnsmZl5(D%a~G9Q955slyJd#kblr2AHi;68KTaLT?gOf;Rc z*Yo9!5`>|>wbo@;7H<+j=cqZ8H&%1388v6YJ+VWl*sKJn=x(pSN&G-456tQZX61p| z>EhSRHObV*7-Vbgas zpmc$7>=~YrKY$g(=#lT!C!bBA_hS_zHd0-xJ4*=z|#U{hrKQwIEVTS-6yEGu$Ix zuIYP+off0i-rm{T-qKziDU9StvLm^X;#grUKb9TKjR6sD{+j|C<-b5k{&R8p4=jV) zqPP6PP|}o_FqV89sAh2CzL}(WOlmN=cv2l4C;kK-p&AG`HeSZg5(3VqW0Is2fG<#$ ziUGp3!QLWSz+~wVNmnO<=Tt8pWtv(%29L*N9(BP&};Xd;T$W0ERNu0`DeNmpi zNLm_*Z_!pEsaoDwR~=YVjZ{GyUmo^dkL|$CoLJfh^||x0^Gzoj3CahBo@~1^>kc7? ztUDjM%i)nbmJ`_T#_YG9lxm@(t6q5SW9JvD-+8IB7S=PiWT&tyYiE5RucrF;qGywx zqhU9Qmd;g|&M$1OZwiwmW_CcrE??RJxueHB_Xo2Yb5O$S;~T4>bo6A)URhmR%b%^n zHmP-Q3n1>TZY>CiwpO>cEDC07-F79mx*Wxq_*Hb?+O~Cmb=f+CVa1N?ekSD zkQ3Cf1rCxCK=~9+J1eAC2pv>}WN?~6S%gT?Ae$Bp6RvhaQC9WaBSbBtR=?dx6lASn z_uCy*0J2ta`g#6n?fJc-f`x@n_;eQQ#Y3IRw-J*|=N4AdG3l`#2Z3i%nl4e!GH5DwS{O})A@s09~-THp2b zU2lLsN@00p!uW1H!h{3%ICqI~moU96gU-+o*vIuw z<xu`Pghk5pFW{ZQ}pU;;o`%5?Yh zE*W7#g2VUQ8Hd?pBIF1Riprg`NPo;iPzsqZT~064f!i=8W%KL3>eGefX&B&pKFtnj z3=G%5D1C;B-dg)KZG-lJ1JVPF9D&a+{y5?u!I6r#-Qnz${=%ki;GZ*tgggcvhdKGP zQZN}G&6PJsa&S(VeL<9A=-kMy4_1_|_egf?_{S`D3*m{@$j)U51Hsq^?E}oWK!ihw zVs`lpO(_*P^f$_70*`ALL_t_I><5CV6!~FPh)Q8dP;AcndB~V-=$~$dOIn|gPUtnH zJf}B^3T6<>TDH$ZGxA7Oy8bh?YTJ)qtvnwQ8Co_BrKa8O>34f2x6`+|qB(~*l~=dl zM5*{qudL{mW|$4#eAvQTF^v6l4P$KA2i7dx%vJ=7Ct3&X87D&AXEJUoXMuKhWKk4= z?^AZ2BV>4zU>Np?;DTnuD47oU1?93<*>6EKsH8W$3V`m0^keiFuAB{e4g^7L|lp&Zzg&Vg(pbW3I4H+1;8r-z+~1GyLV z+joVonF~)0*LsU2g<|%p;ixHSD(UjY=CG;7d6+7)g>tr~R?99p)z&bAdAe1sk7A}C zje?AH_UBC9+YG(U23g%(FIS|uk7bLYM_-+n^9{*~2JDxbGhf^cGYwT_oW(F71~LgE#*^7#?pleA zy;lfD7-EC8gCruRL@1=UDX?7=_WxIE%sDXkSirGizu1hNA2A>BK5zcA>Hh@mxeu~i ze$?`#Pgs9#X5Zr=5=9=`WVApav8N`Y4J~Jd8w!q$z)3SOTvszInn8WZ^laf-_%;L# z3=E)!kr$Pf06~apL0xz#yX56L(=EtzW|ZdvMnE1gn+sBj0vZ^qNV`Ec%tqOKHe1YP zwU`3*7qmHLR0xp}Gb$ujV^j#14Yn!J3#=DZSFVB##~3n2iGToGCo5-nB9R(TT&$eE z5*74CuvJ;TxYd+KQX+k$TH_WVG22|fyimQcv3+?bZg^ET0l1(KE~p<#MZQQCQqnyp z%`9yBoM_;NYlENRJ_CGyw$oQIPwWU-ZFpVEp;Wym~WKz;ThJ2P7} zi7by6go2HEbCQ&2U>XZq=I7%o%JAZLRsq+TD!^}qcUP;#)^u+z$!w6jBKLzsEo?dq z?vV5(eZ_t)D{Vq#+sqcM1$?zz?6)C%HtV9GktpM%nUqe2jmKE-qCQ+RAsslTEI7Bjda z;k_+yRvufZtgYG7bUM|pa+O(kt>1O1n!VWNfx7F?@&C2oab}$Z+>r7y+-PhQFfPxt4JOaS7*}w~W{sA(!vact zGT`#FzamzH^Crk`k|PyUR5~K-&-8UZ5JjL_tdikpbMkcUJ^QI7BM`Y<7$*)$ExZy5bn0Sd$_!l#2af=}0Mz(g#ed1Zk$lq?9Sg4G7c&T0n`S{>j57P66|)n%Bv zM=H}j0nppOkI)+K=|*(ufX3KfcgEPaug~qZd+c7P$LV!@++MH8>-Bs5-b_!XH|Pm^ z!yYhHfFg`B&0P~=1fywY%^V|OiyoK5K!C;p-?Aps64(QQZ3cM(n~nrp7$K-I4-w=f z&_LCeSY88MQM!e)L(B;D_l>N|$|a|Aasd;P3yfg7ayLm6j$N6~y>C&9^ znZ6QAgL0MXsn)wNXVBBK+F>+5UHIu=VM(z}JmJSnszGd(+<+fhL|?vK>$>l;*=WKma=H8q(;W~aTlP&sZEh3d5DS$#t?q}y?$l8W{B z>UX83ctGSXtu@@D#fDo`lMRX4>9E$orDPDK0$oWN0kH^et6E@FQ!Dv*)c3Bl<5*S( zD-6`0Kb=7I&nko8z#m|qg(kvJWEFx#bpuZlk#Dz?MarvnDFhgSo(NvY8YDADFnb5U zBT)z}4%9$6Y?=DrBS;{+UIXaUva-QN&$uY~HK={T>k=^w;J07zIDKpnMMqdP6Gp!s zpw0?cO-;bA5s+<;_@dvcvD9vmS!ue41~>FTu--FBhUs+wp^z>MlZm1!3V;G3%pbuN zM{Wnc+7rOoz?vuw+WP~*$r<-9xGqrFfi*P-DxjohKg(3U9IxtwaTZjRk>&}4mDQJo}hmXl}{#FwM(s$ z7t|V=xmF{h7Bk$~NMBM_LnC+AYQIV(vgM=jL2fr7#gi{`&#q=~_l<^6B04zx$7^FV zfuc(CV+Nwx4d6~c(lD|PnQ|J}kZ(P`QxQ?x0aN-<$@ATTHP*!xFx?lVIkmcBDmzD* z9G>(V0!mo7(;VtH}E|VNCs5Fj-H~Q7JiOh^cx19aW z)e1xhu2P$GWo>p+8?wf|Y7+&dgIB4|y0SJWsmRY2%sV%s&Hqq#p+Je1nD^c6nRceFDwR_xxq&66Hd-bh_+QyLp2mpqy8o*Dsgewl( zuT0nm0(@I3Gr6;j4R13kBa_BdD76?HV4OM*r3Q?{@Z&LzVl} z(9VB4M5oE@5e0B|5>wRNXxZ0_4CopWr!K-S9k!xVL#tkV2~maw{JH_>&Yay`zqGNizOm)S z8bRw(YSk!MXrY!!DyiS4w))*Gp1p8st)uoViEY%BmtgE{B1>MYKetg?6?2z*ty`BC z8xt3bwMx(tS`{g=MFm~EYusLq<>shwc}blq{+itq5jl3xo7I)o$Kls3@^|4s7NM@*7xQPsf)QamB6>w67MR6I^Y$Zb zsuF2mNf;&+Y(}gnkDmmT`)Z$;f#*yVyiG1%B-4d-J>kQy~-dUqq3N zMoIW4WOW{7t3XDJ#LvTyIrO7UHd8hc9oAn8nXyyMX3l`FWIe{(je=Z2=fJa2(mI`o z@sqh#q`UeTdf-|PU1?#6MB_8dUyeVq6f5voEe8o{_pMP3ba6TueJhxj*WPXs2t?2yf}t+p+;F+~P2&qh#={0mOO$u@Xk zp?j*WW!E4!F@getW%}91NB5;01lTYfhfb^KrZO+V4d;E1|IV<_{e$q; zQSkY;j_8TD*E+C(`gs=E&!5!AE3q?>)O!Rlk1axi37Nx_G8M}*^=2g)lle9$!L$V9 z63obSo06cQc+n<@peR8uL(nQgb3l-npd}@Oiaq)r984_K)#8hB@EEmU&D+dc!2eMmrC8PIr#?9qk((Iy^Kz zG(LQEcx-55_{f3LLx&Gd9~wV=^zhiBiNm8u4j-95GJf>v(aF&x<1-VD7X4apOH?up zn0jnT5{pB&K%FfiD_dXUu(24bVo8BO7iTMz@>!LrWiysc6Fi?sOlxV>2wG?Gry++jiA$6Y91} zb=yPkgL-SrJ&aj=>}8|6eA|TV%C)VyHQkdJF05W$*f^iK3TWGry|sEa$#2}V!{tjC zE?kZ+m>PC&_w0o1&~jlb&T`QbH;hvq0o1cuEpWM;>WMlJ_4b4x+&Fjn6o1cuEpOoh7 z7>avVn+VEb=@s|={5$d=IrobDUbp?r z&YSN&c1v5=JD#!j@Za3>FVE}z>J!Jl7JO#lw>}uC%5(m$?d)pyx=Cr{)t@{3oCm)1 zjnDb+cl^rKtC`SwS?_>S*(zTH0j zJ?hT4y!&fE9K3SB?~EM%XyIwU^@ZpC@i&g&px*nmaQS(E^@opz@38j2&H3P0|M&$@ z|JZNzf9L~&`mp`$DW~VUJxU75mivEx|7QehBM{G`ru+Z)Q{VZ`;8kyZ_opV#ee%yn zMvk8D`s$mXeg93L`f~K;AAHZ3-v5WM`DcIrqyF1YIdA{nw(ont|Kr`yy7i02@THe> zzxA_|(=R{ww&Q1RzwuSS^v-Ag^Y@(@edjNH{cW$g`TkRH-1luSKlEe&?M-d@j(=F1 z`_Il3x4z*|Kh*M(KY7zVAA8x0pLowF-~E}-{_H)c-u+9r9Qk0=pM392-?njc{xvV! z|Ap`Qo1b)_c+1Vd^wD=_-}r*pL~s4h_doe3FZr&w|HHuF{^(o(^9z2m@@tN zxhK`vK2SLHmmmIRT(% z|B+99XvKY2?~`BL_q<15zxA$(`}@CgW5=uSD}La;L!Y?)*UI1g>N)?0F!#}qb-wyN zoQO#b1w{$JnxqmOjm_wlta zKGywPZ~F4%&wbzPrushS{^G2Eu<%`PD}3}#Z~WGO`F{pp`KAv)^Yw2r-~CSmx82m% z{K}?hz3ragG@k#&Km3nRUmAPU#an;i zzkPY}_?h><-~Q+e2k-Hp`+7C>6U&=F{H^{sf8XC+p85HQobKm;=B@8}+5Uq+^Nnvh z?&6t`ztaEO(9!%~eKmjLi!WXJHUAIXmu`Oilbzr5+c&)9Xa4NS&(ECx?33rtf9t|O zo%-V+di&5jzwkXh-}=HGq z)RJIejY%xO8k08K(hwhR5r!UOR&>_jJ%Yj0t4JZ3f&N3KG^5$vR0#>4DLa}?O%EX0 zyo~e-dr=-D2Z?l=Bkbb%3Hsz}bJtqZ)Ap{lq^~VrYsrkZc=g>Pygc2ff)cK$V@vbp zKnc^bRv0aVB1mhhmHP;U=}VL9Yb@NX`bpBXOVgorrlbbq9Iu`;7(Wu* zCAFxb#$>A+ZcMhR{f)_XHA1r4E%qm!8Bz!0`wSaz)}ao@$w-f5r#d7_TMt{8GXB+0 z$ymgNv3k}G_Ok+~`k?m#^QTPzPt|evwN@#3yVV?i2utB#v%Wj~tG1o{bsLRCglIWP zbTqC+8xopL3(ds9(n8OXxhsaj4wF1W+1nw+aE7JeiAtsEixQTZ$uqCbCe#KIeMcBl zS^-jU&xLMxT^D0aICf}8CCog15u-$8<~lA>;r$_E7xgJ9bkQXgi%Cz%wMv2zC}Jh4 zbD(BUY9|SG<3<00D#pDa`*r=QMB~s)be?LuPhyCN@*rlrOWWF*WKZ`9Xc3AZ18aTp zxvA!0EME8Vhqka-SvqgQSkpq{2Gys3KZ`!p4nqfqm<*I+eP`iHVdKp_K&7oH%gcZd zBWyI7HfZFOQ3lFkktc8fwwj584YuY2xpF-r%T#1-In)h9bo6)X3WXS3w7|z={6$4) zR%T#qt)Jn8VW|hriHfuqLCb6~-eRTmuci#!YfmX>rpJ5mdHyHNKKJj;Bf;x&1M+5d zU-oNCe1v#t!ZP6?;!{0XOw)N8;hEU*Oi(4or$|Oyyke?~0*kivf|M2y9851X@3h~I z#7@gOB|b6gA76}FV?P-kaBwlz*}8;<==y~VtJ`oy*Voq1;txnv-?poK>GTWlVbT*d zx^3CZkf(-=>>X*)9CiBkF4B+-t&QzktXnGDo8q;F+ZOf@MGX2y-0GbVz zM+B6V5gxkM;1uzCi+riKH~PbY)#Y1JhcjIA=I`}iW*!fI+jOH(n3LHj+1*5^iKoSL z0DLa}Q3!<3iYhYv9Fffn!-Q`KzXINz=)|yu6Fo=k;mx&JqcNlXX)IjYTnn^UkBeKC zm1;{p+CVuchEr~A5M}X5!=UUHl{dcDPj+<70Co4_FejS1SdC)*O+xSB$pFj#?Z6sM z#>z0xrPCSUCZZ4rQ@dQJf1oeAU5|(002(Nn&FC3Jx@y-xqP#5bOdQdE+`3lZxW@?d z20!SfEdjU@P(@bLn*BZP&TGwQd0#i*Z~AYwj=Nt1az6p6d=V1s7u&z1vY&8O?hp6~ zE$Cyl@};zTp&$-gK4lqAKmiNi9l{?g&|z^>TC5&Db( z%TgRRn0A5jF)=$K%%L`HpsJ`Co09 z$ZcSX37{7hGvUH%ppOC>Nml|m=V-GN6NZ^6UTMjXNRxCbHC23ib?L$Cw$HNG+T%I` z;NRK-;2o|09lHa=$a~8ZgTdA1$HU}`i!GsS6Sm|ws$V4* z->HlxvxN``_dDwswLZDt4Y1r>T0r*l@2D}v+_o4dnMHhc>v(N3Bs%~WxW;h}IAj`< zspADKnSuC5U#h~HLif{1Td2qvqirBN8;6GaY#y<0X{f~sqKeLjVrwK;T9`9V0Iu}B zWA~|x5&AU-A&v`Bzhns-W)bBiG1L~58g1Vt-7*GH6hezIYn)MKpOuL~L%)ue2Z>T> zey}bDKufd?gC8l0_V|Yl)*Nh^li-lef5!bfg_dW!NW#xQy_i(ka{ zoj{DnR2ea^4Cag6hUEwzRj}H(>>of<^P}cD?_1_uP5);w>iA8JI({FYD}M?m^(pI% zX7)plo%;=pJDNq=AISpfw}-N1&A8G@ZUzzg}`hl)u5$ zH1=1&RX6O&>$N7;9%FgtW&Y#ym$mpvx&-J*y!Q#2;kWfgbLrk^0htZE3MS>CDS2@_ zZTTwKc&qzj%@a(FBbKc0Uo`FCF*}{VGT&r+-)p_j@_*bt9()=-jL+c(=JW3FTKRV% zD2~i-P{l5AK7k}!_gb(=Z4pb46a<*|{(>JCSy_>>PrSe2zlhQC#0QKdniv!uzDRK& ze24{wPMQ?=jdm&S8*Nh1H#<#eNV7K}1TU3!tDGiK9p#}P4@hWfCDfJLDzEaWrWNcK z!eTp)Vw7&$Rz2wN<7^Y17vE|w;%=!A6O?v0mqpJ5?=YoY(QR{EvrX9+H;S?m*et}3 z5Ks6xkiAe=;}w2lK!OwZNj75xZ>HwLW&ct*Jkqn4XDc%w7%!;@f+9lhKI9F6S{< zS#D`|4JSx5c3qinW(NydlU*68y$GBO4FP2Dp@W|nJLlon#R||%6MrwEI~v*`1UQQE zpa!+N22=(jcay?eK>)YN$O>7drQJ7BDAO25TdDp;V2uVso;uX%fE&;XaMTyYOyQg5 zFtE}Ui(8qD#yL8qlb{LWga-M)RnGL;dCa}U3cn2HAdgw>YTiuB>b~fX5qD2%7h<~|mU)nY+Yn%3ay?zyu3U+NHdSzE zwt|P%i}3swv@70(GuV+hFWY%jfO!MHnz%2sQNgQRZG(!P)Lhjdxp()Du&X;en2}Gx zL0RhQCZ$q^PZ{!u>5z6PCcewkc>KbRyG?$5O^l3mNlgXEY)$+j;sFP`RYtU{F|n(+ z#U1y=?QB96x5WF`uHugNoVdeFPGVV;IAr`cpy#``u-(b)n1!<8wX=8>-UC=qn#-)L z9&4YT)%`j&iijQBMWaix(G=l`~;aN(fm+oj~07y{GzGRvg)%tLwuQ( zt+G9jkFt3cfro?Nt1t%vj6R_AKtfu3Q9j7e;_PT>9Li(pFd4WZDvk%7QE{mN zI1m%n@0L!{X-|rX-?S(atwCHtgdw$symc7pu7vjO$-9T9b0^OO>qR9_r34ZVmq`vD z)*Up<5lt&PoZilYpg4g$hoEOpmHO@OFpgX0Je})95|NP}pbB`9k1GIG^a1tOzJD-& zcdC+)QTBMvg6JPoQHYIclhX zmvEjf`~Cq;U5lL{DB{qjNn;l|Clw%96XnO3pUe<>H6EiNr zo{TUjY(?X4A$}i};2xR$_lh#({UXG?PlBh(_j*8Zr%6c`C1}GjtkLShzL}L9w~9#e zcCoa%OJa9Q>^7<8jx0g9$R6j>aX0(=Kpo8fzJ9{7Ubs=J>x6{2Op|Zr?-WYRYQF6j zxwITQESIU=?S@@w#t{QT!cq&GrCbj*!KdarHCF~}xj_&B&%SXWE_8E?zB(f}v-3EI zj|rLO9J!fFedp*+RO-7!{cKj=!OkuQX)1S@;hi+^!WqLUY2R-?D((9%qtd?Lb(G0v z`%}4l}cZ?fh*J;d#!(nrR@X#5m4M!&?32(ZUcwe!n*jpSb9_kqA zZnIjg7HhEBH`FuKJ2W&jFfusQeQ^B9+QS2$IA#CL{MVLXn3 zu`)RY3*}v9_!Mjc84S7*N_}9o!XwZr5I^!X8b&vS=^kTawC~|&u-2@gh`Q1AHWf*? zq#u%nJ4LGDS`ALbe&O$`Ubar+{UyfWGVz7XTpW93b9K9VswyG4X^Bzj~S2x9BM*Q3oLT(bVqyL)FLtG!$5+b{3RG%&F#X(6}c;ZlsSL{r?XTsRhn&AQ%2$`+>|{ z^LIU#92n6xf7jENt(oRrrIwl?;41e8RlL>@l=7NCFu!a5M5cGGpNQzL^@|mhu3iFD zm}~wpe7WWi3(IT$1Z}<6@3UVP-wjVm$>X3OPOy6_a{w_RE~k-C!Y@1Kpk+7!Dxk)F zJgx>XO)1#JuIxaiilMI{3PPP0?tm2q`g>Hxi7<^Ol8i}VaI?{HSjDBQpwrTsAO2;B<8(9<1(n*~K1hT6lW zbR^;kG*@>dbhAzv!Cw{^v((y-Zy_w?lvE-dTVe7F&laj3Lw)#9JnaizN{u#RVK?WN zUArG*6Ef5p=IUpJDAvY-?Su8ogm7!935FIm$$NPatWFL-wY@LqMmzmnsYN(a^5F;1 z@S+)BG~z|G0Z%blIMU3!@sh!;t9lg|Al|-*Qqmcl7}g%5Hz})rOU(ge1?|jAjSl z2eCB%P;D7aA;^W8KJe4p#Rz`f3ycVE+TcGFVMw5b>NusqmjmOm z6XT7hl$@&<2Z$Fcqf_(QD zGc44EuB}l4JIfAcDhvb`ml@$PD{5u9!yq>rv;?g(+~46mg9S!{$((VXgF{mmSWDQ- zfS1C2)S2xpx8^!e)SX-m*l#eiooB|GY;8daZY!1 zq}Lw4Py!rhHu*s$%P)Y-f=rR+hs|emi?bS*gv%Os-a4MX74!qfPkmmF3Y*eLA6w%@{P=F{S1t*JRhmt$#mG6U6AZ9?J~2mw9f8U zLd_p9p;pXi$1Cc-K9nnDW{G|UJPvpv1(;wKPW%N3pPUw-hJ1a#IZ7vdZL{1{W4%%f;s+>%kX_{g9s&lI2mA1H%Yhzv3ZX9jD-Y2MG9NxWT%G!7hpp41 z@=vjEIN+zUlN>m=yBnMa5O+jg$D(J$ar?WQYYR9SjpmbH;TGpm(yta1_an%Yi_({- z`Z#G;lB_-L3KA!>j8PNE8zJfj2BxnYgRLYz(pW!=1}xqBX!hx}xO!wD56BoQ%s7qP z^(h9^9-5tr`!<8>i~GbwkZ58SOXY*bBWQ>kbB4L z*}v~Cm{V6>u%dWU(h`E97o^clAsPdA=nJkwY}&;$1eTVe?_TJD5z2;%6n6QTx2 zh_btZ12WvO#at|6fDJU?5E|lYKtg=x!vcb*l}C9d3onaPJdKBW$}k?$V#xcYu<ONwe7GP{gu)Q=;#H;iNGF&oB1IG%uY#epz}aZ6FcdzvI?v`JipTqsHWD87){ zvd|1rMrT_);qea9OF!8qcTaaqJuUmB^`Raqd020o zYFP5WeZPd?F(To!bo|*mK5;;l!lw_2F8IuW!-SJMK6T&-@%!}Ig&i$N=}-TtJUghv z0cp{^;qZ)M-aB*HP0}?3?fGGn}q5R-Uu(Cc5q%#a*xA z%-_h%IJZnn&OH+ne?Yu#I%_bC4Cg|D9QVWxoIO>!onY~JpW!Y^Z24w_>MbN!B=4*g zIVZvD36jq}NpPMbUe7o|dAbbq?bgz`;g5}R5|4l9sRH=LP1AkAf!hwBHja(mFg8Et zkCn$VWA<1ycH`LW*u7)ZoE~xK+?}0w&rgiajm?ZTkKGfEoxbz7L~4cHOLO?)Itwoa zW{X@B!4`~#e zL_B84HTngSd!fq4KXw|w$W0c$JR({huvQvx!eA31D|bPI;v(?m@MBF%Zff*MIwNJI zA==l`bhC-A^S3D{Kyn0Q&Zf(6)E662xdraVoK+~NX5PFEB^qT{jOQ$GVcPi`ei zl(Ti$!DxMWPlD?4T1l^^pZadx2T4kWTuip$WDJvQJfL{`h=&SZ6PSQie{tQYmJRYS z7A9I9-WMN!;+8vpclnmEX7xAK_0gK#l}3dXDp6h#?x&6>S8`-jxGAz#dN1!lMS7TZ z_mj4z|4Dm}0>KrrDt^^t=dJcBm^+aK_f)qlA~VdN(v; zsx!M<9;lwZv<5F^xv{F9j%34!;DK#j+SHZRjv%c$mDQ&?p^Dvv&s9#7Y$VEe`kM_C zzq6V*wRmasqGnK)mUxGS3&~3)2SMdy-c&-&FJ10P(wk{-Yj?V@dS~I$$(^ojeO%eD zUZ|{X?e=M#C3-Vxo*arAO+$Y~E2mW~^}X%GT26YV29E z(Km1w^X}DUP8Ai^=!-j#>5F1}?|8yTqEB7>f^ni=rB=3PRK*6smxQQ31rpi1d|`2Y ztwo~xWUlqi)w8P?D{HVqQNvCi(Rx`s9TW@45eLO_<5%%kyq8vf#F%buuA;QaVL95J zabL=AymI!6+5e-HSMVckTo`N@CD3PwrRV-dIw$FpK0HhqmklNlyLp)LL5At$7WPD1 zA|5+dPAwr~-^!_r<<$$Km5}&bCYt7JDZU#Irj5hsYP7w+%_zntYy8I2b^i_?ZWN?}(q7(_1d=6SCX9P!RM6H2N%2PID6+T0bMI%_92M zMd()&okc7qQS3^JcWM94f@5bPK~Z^#D;9l!PN-3GA9_XMt4J{L0%b^|L;=Q!Ic^qC zm-wcpGx)HE$|GNJYbi%sa$>{Qsd?N%LcQqUkd09fXaqO{WE&*4F%jHEM`hVN9ql^YWgu<~u9HZ$d2OS3SJ~&4 z#GDrNs`lEbGYURl=)luafH{B|QKHKcxH1goa2~BVVxX(VafSx9ORlJrXv5ptu9yvm zvLtLB_#Bbe9r1X;o!3mU;$5hSWOIHwHxC;|_()+!8|lvA6ZM!=yrWzwP8N^C6%rdf z-mrKRE#1Z0ewH@g<%k^pJ+xMOk?xBDue?p1fe9}k1a?8~gNQ;!lQ!j@33t}5so4|S ztar~_w`&{U+AXFPJwk7S4xEhr3F6gHcNPMfTTF)HDeXjX2DMIfHyXY$F2m!ZX)UUF zM%x2FrJ|>`t?fGUt#YC{QJ3}a=z3c~d9`_PqxOjJr?Ff_FuP`UHX!;71LTh7vBat_! zs{f6>Nu|vV28PmA`WRy%S~hy2vhnX|Zdi9vUiar%KZqJ-WxehQ#4~yN%{lx$WAw1Y z%PC5%1;aWnj79xR%@muiW|7ElWS)+X$IwT~f(_)RCAaY3#y#>~*=j2ska-dY9L63;A#$9Fgd=j=$pHzbMZI(uzPmBQZovV~B*%c}#zFkz7>6V{ zismWm)#Tyz7u9JA2H?&cgA(*$c4CBBVi;_ohJhi5q1xIz*c~P45X{tSl9O0ETd;tz zy1M9NcQ00feXZ<``@0q5>?56BhP$r?;$WcESu#p}W_ONOP)|cIjeQU0GlB77)rm6-c!Z)Ii2fgqKv!W1@1q+@ zmJ1^F_HuP29j`6iChLy7 z@oVSx&@C;b+?hf;l*#vDcm5gF8 zNhO8Tn{iKS%$sZQX<*9e*@Ie^eu-QyttSb%!R8jOp#^AO$GAfy5zVN!$WB~@D}eESzeX7L zwMm6i8In~pZRFNPHV+Qa>&MGH2cKYEAo#D~5Mmrsh_MD8g%Ximh$x(2X^cR}#Xca- zhnN#Sht7wXll@-DA?9QY)^P|y*)w(AFX@d=SD(=v6#Bf5-iwI1daEJgv60}sOvpF4 z*apl@5*~??BtTZ01?}hE#S>T0LC;4948Mb(2G&682Z*D+*xw3vX}_KL0g8pu?A8e4 zA*L9BQIu~mkrAZU zIRtLVW?-;M5Q-Nf6zJJ=#N3hdNo>5OxsDPsv&<>PzsxA#=%8Od{ud~24B8TAP}ufi z=t!8?@qKb?yO>E34udWG*pSkcfv*NabjI0v3LI2uS?$?w!y0I5HS7|l4X3*qn+>;z zlMOk}xrcVW)?Tr)^!9ajaq)9_Cd);+qM2=L!Rd2$Uo*5L(ma&Y5y&xRp|sH5mviXZ z`wnGjPjteCl zrKSj>?Q|wp`)r!n0;JR`yQ!A3A#~F>=t&%rQr_uR&K#^<&L!7Gl6LJ~R^(qU>fca` zxu?fq@L_%0`m)8Y5b-Cx@aiTq2c6KK^*7e4sf?>~aT%X5)r~Zr7WR87-MH%jQlWHU)b1$%82(WTX8 zkz!~MOzaABfPlDImfaww82>$Y2cTk$&v{VsBp4QsNsD-rCf6R$rujfMrmO&T3p%S2 zpbJr^8PWMekRk=N-Q1iyQIMm+^&wPDK69jw_Jh5{hC}-ZEjkWaKY$N?aj;(~<;1Ig zp;WeZ`GH4Ghur|DEDbwE6Sin@{+ly*5ey}aIE6r6<8L8D2I(2M&i7y7yL#Bh082o>5uPij^u zMwVi&HQdbT<(uBnyN$(ygcOy{Mh<}GP4s3X*-#~-*3}LDSwypiU5_IW%Iu-#%1)imjxzSnz|`FF~H64y9yv?qfCvVq(Hf*Hvk5)_fZ&o)do7H6i z!|KXq3|{Z5UJmcyTwhc`w!yf`^C9UnZ>vRqw@%i!U9VS`ibE^`!X%h}3>3;M~%T#s`|w$Dw>0wh{EsY7~| zxI(8_;*_dq&)gd9YQve``U=Robi2W!T2F$<#qQHX$FEmeT+)Li6)|zH^=t69gQ-9oh=F2G zaCi*HUaVsnA8dsB$Qepm6REp5qNucKv=86-dX1v-n{bbEcs4Ui;@(UGC4p5s4&j@$ zBowaE2E!xk_9b~8wH4g2c>;1+MZ@h*&ldR-YLF25;6;KR%J$gAJe4S20H*;@#nlW8 zOTROUg&NN(fT)T}B#J;L%zT9)!78B7%Wf!3FvfRSLZbcsshXcE)*%aX+`9Q@`}sNN z`^}e_-Vdu6D*s0~j`@@JO7MQii9YCD5RE$wxB{aMJJC+D3r)N@z)t|ah^iQf4yY$6 zl@2-o32i!$Q=7pP7$$n(#&R`_ux(M$D}doDj#!2$$6A^3Y!?kVyBV}4vP&gG)J;*! zYhht4@v@bb)2+Zenl;YACzL}n<;(;0_VmdG=+=NHNj}0b2z8A}2=av@uINOO*Pt^6 zrsRWsaKZ<-^jQe{1O`D@(4N+zFr+~Y;nYIa$NJ?S~lY2 zFg>qXtIMQVLTu);ba~gg%Z;{Jv9o~uu7j6_bZvArS4!6J%4W3+bswv~T!9Y6{wn1x z<{A=kJyE|%=acDtwafn`J!t&ykKZ6wa_H?B!iS_1IsW!7D|pl6JIl+oInlPLfIyTiK~piY;KT6b*j#Xtc$Nz_KU4)=L6QqEbj&0TO9u_ z(Q)_l(T%}RWp4^UgU$VyvOnQuU({U4y{frj9LL=EB#x$9jULTsn>21RZZS?6w~00& z|L}oF$kAzp`12qPbco+hx)Qrq$wFi3g^5dEN(ivv#2?O##>EwVL^5SWd2ODJ+@1j% z)r4$^rd)5=O08Y^i(|#Ij@U>um@l9yJ%aUtT$>}7KZo;V_2ocLL}MJL78g1N^Fg!LmWJK*#dDB7#zn$<9dH_~*xE%xQXzmt?-#FQ7S_j$&J9>Ird(Doa zf#LqXf#%kx)|TFm-k#C6-a~Ci+D6)f>CV~G%)z#<+4kP<8~4o~Zo^ub|1psS`Virr z8#oNm(2xUlKb?rhS^yZxCv*>?;DkG!mXc>;El5oe+0ZQwPig-uiISv?mzMOdt_5T* zCbm*FN|Cln#fcydt6cym2V3IxL3oe40R~0@B&1zlpc`-vV-dM8iXd4Zs~w;yj+(_8 zIoPR|(20$(Xf>t~QG-^O5}Jo8N301+MguVr)gj~_PY6M#FY%iT*;i{ut7nQPt1w$R zehJu&If|t_!(rZ9rYKzpBFC2+*@swBz&Da(hE!WC^Wsuail(N@LM@V|jhpq!G}vBy zQj8rSwrY-_q1GLcy{=AKXU2LDz_0|_6Ht?TDDgmYzh;&dcP3%Tw(QD@ZI0V^=AMpa zj@9LSn*RW2&5>F6M6-k8D>NzoYJ><4`9m>Lb9;Rqr*k=p2r5=tt6W^Frn^kyvL@MV z#8i3n0?+D}YLZZIxw=x}%(;YjTT11jLd%Ml-STGTu^nR3#(in0*TpL++tfJN7S8WZ z6+q1kPB@SvQPrA$o4kw9p)(5-sguB08UWi^fM1a9Ph8lfue4KTE+vO;j<{RS#OzTd zwW2u(gH?~;Rzezwv0|fc)sd#R_A=Ae!c^Hdwa#2^@62x}Ga_Mj@5y@A zm3a`oY8@inz;}L!)nRt14!gs_c7RPeL8xnP$2e-2MsSE&>~&Y8!b0 z_6XuvI1B9Ra>}dtxIu0@j3`lSWr@3w5j^DB&cXN&4P%q1>0Iz^9Ks2*f3ydtvX_hN z7V{-Y3M9$qF^tl9z&M7G$tw1ukJXDlmR&{xjP|k`%^#nl94_==fJ#}H6HDPAbI6@Q zLO2HHyqpb)PfxAsRTl^k*+;YLOdFzG3Zauzu!kHZzuImJuA5*#{HR7hD>`4`ERqlJ zz?6~Tj6XdR`iyK?tz=?UB~3vBTcl^L%)Go~6N*Nv+>D-*u%|`mVovUqTQt+a&gmyn zs6w?%RO2Wu9F11ZN3hZ1o3-`B!nc-BA3xltdDk~}@BAv7-63pW-FCU1eRURRdzkZ~ zl*EVX&|Gd?7!sCda@*pXxZ$%V74TRr;nhX)FfDBhO9P&{Xwd06JafTWbR3>JSd_4a z!95u_Fcd>iT1%#Fhuh(G7$5CVQ9RW2uwMEO>(Q54_P4Cl7=Zt)<^7ufM&JKz;kf%# z#TS{uo0_WOOPc-Y2b({g&3>t`DfcgZZvM$WWlWkkbAn)}(QEV>1K@aK!t`wzG5K&% zGW>D=g~svd7zC7vLq!G(8<<>nL_!$7IMR=_tUzOXG3n*ODDj%W_7I2$oOX*AjgVhv zC}PnrS$H?_QY=R3)CYyYd`M=)ajcjkB+?Q-f)!I_%<>r{_gEeGH%P}2G zBIB;2#P7qiX=GHIscOC?(UUs-4p`2{fwp$TIEXn@#7Tfek4SJ-f{_k_Z||VY17ha% ztgdduc(y!pOV>WEr@DF!<4jktVcgo$hq$+sG4y^KfC?aVK8>^9Mkdd?jj$e|X^ad6B1lT(Cu-vYX8J#weSpK}v;?Pl~7J=-3< zyPc(a&rDeo_s-06arCXoX}$YC9KbqlGxy6y7j%a1ZkL2EAG!U3Al}`he3(N6BOnzw z7^8-_&^=;g9_E&xdBzdK;KmatO(T5Tp?$`Q!^d&JbJNkIV?_*5r>Bq2m4b=s8;;!s z^AYemeZvjoW79W}j@@wlWN@hWVDI6vv9XDpXXeUx&CgBVGkNdq+}s>!8H@>Wn-7g7 zK4_Z6FbSnx{KjY|R#Ora+Z@T!HYfU4D3&vVkm-Cdy4o$!Q4CY!((v-BIIAm#<_(J! z!vIEZQcSleM6s08IBr}pO(oTYV3-sI%c4O#DSsfjQZb8+G_^?$Ttu=iWIL!Nd_@2* zTFSgm%#)+Ipp`8kiFsV)bSxLZfF621S@<`Otnb=kRVb$#o6R)_Zq0s^|~NkO=V_T8L> zk#I7?^a<|of%9G+XVvCIprVD?dd zud}=Q?w+G(I2@95@my{XhsUtFqINkvc9(l{@0IP!}D$gy)cc3{Ur5;%|(BoL4Y0%98q0waJvBrxLq{?A)gT|J!PN=5*C zc3EBLTW?jp$8-Fj|8wX5hJg)o=|=%YTLe_esnmH$Bpnt*A4gmx?&(Y_eS{=Dgw%0J zO07y3UAuY}TtcGNOdNH`0Ul25APEyQei`N+*lva*9J##eqhIW z-y7>0p-ojK6G{Y4PuW&C*W*v=Yqf02#p$a|-1g@Bs9P~X(gK~Rd!(CowQzO2n|^s6 zkCf`9p+#*_|+miCc!pL8?xbH!F#8RPG@%?J0J>F?^= zEj?H*+e8K?AC7C3AS2w0J%hd9p*{AL-JxB1tq0dyG!PFd;Xak=+o?>yd5xDxP49z~ zeBfX%gBw=UeYCz*;vYl!UftS!E81x%^-gKnMl`IH7X^+h35n5D$ap>^$8UFrjndvZ{xTP>#IL(+H#Z11v?7 z3v*dIVCaxKDXx$ZfHYt{tcPeX&nPQW2j~Y%frl<(snlT6708aij82{;RyUKG&6 zr}`a%OFNT=CU4-cQp_)OF$iInDXSy0(CF=K3(?2*8(J)_Zn7&O^YV+YQW%arF$0Q>OAh5f+o z0&Ro<1;S85^?{35cZ4=!xUN#hI(n~4b$=<^TyxT!~KbdfM2y{YRAC_ z-=~gK7j2N|HcY0}MKZlf4e^)%RJT+mx|y0)=NYqSLeOwkKuMJw2L6k_9rg9ZFq*gx zJ+fhDhF=ZN-hO8`dzms&Q!Yu-`XhLb;nYC);*InPD5^$h2p6$H7nB)xp@Sjldl$~; z&w~OicLnVsdyAb^0d$|mo?!-n>q2^BDSeJ)B(`^n8VYeq30?JRVm@>e5#|sJ1h))Y zw(RN`FPdtLRt#(USrMn|3D88r3LnUti5c4EJ9#}C_mv*ZLmWj;4h6rk!jT#2Cmcp(UmfnYMbJp~~X9w-fUJ{a~C*{8j{Z9bT zi7Ef>us^v!nzUwy;Zy3Ylz&(|bR<*$`6yD1cj3pTF^PwSTJ`HArKE2mG;}o4b@+lJ zrQaGr0Np?pvoqya*d3501jm_nS^yQIAnruCG@|~r1y2=r>@nO`zN6INYv8F#fbejA zSUcNij7JHO zGdeNWJ*Esbh^mcn>9~L&cY(rQo2dl`bz?x#kVTtKp>FX(HH#`4^-|niHEqO@FR7*M zS5@>5(`YWO3!iZ?-LIT3hq86RgJ}9@#z5_5e#Fxq!ulkG(h2XIwakpc(BUB9KkG+d zBcICu^nc7R|8el8F!^F>rt~jL=c}J1dC3<*_y2hL^HJkxYbER!6|aLUc(1n~(qKGb zhFRn;K-WOFg?O4Gk@_3)N=elI=wiZwV_gX>zgXbS+4?*{@W5NZ>lAo1_^?2b>~=qs zVAcY|xsg!7*6X$$!!JSZG88a~xUu5+dq8~$=u0T=L3MJMerlsr`WcI; z2qhBzCc3H#{#&)5ZIqY-e7gq!<*m*8rg08wE13|Tss?2Q_j`h!r@yMwW32ew+U_3_ z&?O>1=5GkMA~(1G1|aB2rpci{(&-Itvk_o~5BA|br5C`$Vi-7|06|8=aRy#lbD95B zVUyp^t_pF%5x_GhL~+r}@#_OrV^q@xsr0A;$&Z@xh(^-jAba~H1RFO5%txNhWa7>? z;dNG0mz8bvEXE5HMku~*uJ+VStO}@Z{m?+wO()jYG2dsd05c}{c`KPri;3E|DyN2K zZkh&N?UNC@a7>$V)J~YYfdUhht*JYjh>;u2LN2KgAD~`EcbiK~_9h0Wrjjc2b7xXj z23(^{@dHr36eg#@FQT%MHQ5)8mBZ=dTff~L5ODE~c8=B7E@X*)%=#TyB_`~4W{PNere;Xu!m(bX60BZg=pyr<< zH1=o8e>-mca=qF7&3cnq(>>rTr@cc!>wpoUau7enug54wy+aZJM+)=Vr5zFBIhfHR zmrpD5=3J(_S@#2^LdPI}*3V4I&YW^TIFz(=AJ7DTL|DmBr6jBcxFOPI0jf0Q1+7Kt z>&P&}4^i*9l#mxhtmU^yQF;9FkSF!Kw%i>n;miKEmIp>z260zc2jvM>g=Ac+O3<3n zeS-OM)gi35AjsT>3e{~-w#V9|?eX?Vd!oIYINGVPJ!74Ar9M5zXs%>e4E-48_zc;K zvxuhKIw*&=JL*PotcCfeLSSXnOZT`kFeM8I&DmztL_+eEUIkgA^hd(1zk(#Y>%;#!5FgZeM?JqkhvKVb!wpIp7m>N-;-C zDND6{=dPt%#^-xia7FF`9P%@jyASW*ume?Fvm@z@Fo=__-~U082Hj(JyLl5w@8->d zKiBzaw|4C<+m8&}TaIrV7~=)eXc8`v|4m;Hg`OpjR)j`YrfhkbmW8cC-W=r$>g>yG zE{yECKQ)$r%z+nel`=Sg1f`dB9g2sAiUKbuK{7%HvnRxjlX_AHFt<0v0K$8Kgm*SN zUeg7h_K;9ad!W&fLwP)YhPeTFxl9yoW)rT!Deh)ysnaTfz*eD+cp4e3((~|m51mK3 z#q(q$P?)N{72N2V+SvZbuqDB8*o<|xZR4FVy8^2U5zjHqM3IAOEKl|?>m>5h*P1}Wi zTO*f47T`K)aHWxEJZJV}jsC?Dt4)DR={Q<|HelH0WHA9KX)5|jw7z-m+Gxs2B{a)D ztkCPk@I|-Q9^43Tt$jAcluRW34@UPtO-5fWRv#`M??+mM50_7F>A71sKeDG&GxIU? z#=s^HHqgxn8(1j^8+GeDa$R>EM!;5qr4LT21l}L!SYFxzjj$We{4i{>&G;++yXDUX zzwal1NDk<~7gekOFiPsb8hzVu`~m?;V-QKyiDM(~h+)J{=8D_RCiD^27ye@)mW;uvkx;iMenRA)Y1zH9^*_KMy zZ0oY1)z*34jtUH`P{4*Gq)V+4gVbntn$3|`6Ck0cTGNu4?rcIeU!oC8IIp6cMxCYw zNh2OX+E8fI>F{)zDpqb9RPfKM#s_yE1?W{{Fne?ge9Y`OlF>&Dd_UVxh}(LgGOg$l zw^MQUOJ}=+R#SB_US_WA=?qt#vc>wpSg6l zqxoo%MoTK4t*Ckts)34Q@={B+KcKaky{x4x*2TDL-E^c)qxT)8v$ai&11uH+^hK64 z2a3-NNz>V8Wzy1AJ6TAuglJ4xFp7#vE8@-Cz&=Xms!^pk93ImuF(3<#X(KlH=68_+ zgU9}_c~^7N^i;U;S-M(1{<8n+@=pZ6!}k=QuM2BdtTjo zUcjG+l>FWl}(!nYe0QFW-_iVjEeuAPj$3J+bC`qRFIx zn!fnmcf7~Nlz8Y5R|1$iKfs`Lc4m3YuoCx7{ zHb&{Gnq7$`VcCeZL;_$*nn;!2V<64_1|Z|i1|LVCl5R1fi=bajOwuB3!Vi3T`Q$;TJ6VvzZecu~YY9=0PTxgB4@Q)Sz6ySnhaSh46+fd05|s zP~E29exYCmX;$GYSQ+s6EI=HCEgEJW71N-3_Q61LXZpn72s4`xod~iQAV^cuTo5nJ zm7jez`J4Vs>6iSE)&I!=`Bt2TV0Qk(8{zv`?@LOl+}+r?x%%+l2hGW+5=jGGsRRcG}zbE^cji zx)`W|WEdLw-tA6sAksN-5LH&18`@f0* zL4}@X=t-m%tsv488}~&>;v(yu?6@pTao2U*Mjw? zP3RC8aUN^r9JbL+S7?OO;1XM%DqiUXC8d;+QW^98_ksJS)^r~8~4lUZ%T?b zj&8P!UFfbNO2O(sBn16422!dB?r%(IWm$JPuL|Tyce$qA6Spm6eg+-hRN_ZSkrbenaiEK1n}kz9I5hUt51u zbyWRA^Bf0QUf>j^kxA{NB}xTPrGw^PnF%Op_|gh~0c*apJmf|8Ma7A{jq!n&mdorD zgA*90K#i*lwSlDQEaO)bN}ihxu3+laQ3B~rkCkevt0W?;r$T- z$SdG|<)=;*-ZZ_sSM|@A@S~cK*w#p4D>q3sc#ihYfE|z^h$2*pkxy<@G=T=dBqp3FBRM)?UUbw($`i?I*1YIQQ!CXEU?e(H=(a@@+wVBT66 z!p78=;f;~*nCI}-GKgxZ)TxYS8Y+v{cBd1W?>05S})th(7!EL&KbFeC(X2FP1BY$^B z<&h4DlPN&bSq+rVJmrBZ2zKRkSH}8LA)sH+u(4jx*S-UT!;nD%x`r+vIG9X?l%e&I z%R1;_j|N{CHbN``bUcY(Cc#%yy=9;(05*kZ z(_IfzKn7wA>Y>nic0l7%IyORSOsC4jfmNuX$Eb<3VyOF$Lo9&-A=DJlc&E|R@6@ah z@EGe^^XF7dXmJcCsxz7_%+xOemEZ(+J7x(+mFUK;wW|#+R>6;+SVelW;b7+l(f9O; zpB~HZLMJ2T&t!Py*Zt^QSpNRE|4qOA_3+Da@>eS7O21jTR{ctKrv5wCzg}tl9wEku zu&`?Q-{T#G#_u=|12Oo~)l_VZ0kDimkMJdodHN*t>UFsCM9s(M0s4Xm9WW{u@}&rF zVehNsf9zIRj8sU^w5viQyom~2d}EakK_{Rky>^8(LMPxz@J7d3Om#VBa)MLFCpl#r z!YyxcN=n(W!&1b`J=TNSCq&zKY){pT_5$0ApYZp3rQSRizH(!_MSdQhq5LEvze#Oo z){l~f{fAlZKX8!c>HRHlcHf@4`F(Tyc8yGQCe5&+-m8DkT8JKIU7Nw#!1itw0FH?T z5nc#>@C<^~KmsN@n6L!V(C8ARY6YB1xMqoL!?M}bvZ2@r6c=45NeCkp;|U`M;Rty4 z24jtdCrDg!Cnz5>|JXsLYy?7RQv88LaP}G%N!nq%Xby@Y=PFdu4;7!$auC zpw2q%9-rA*wZstb3nStP32O3^t-4Fbu2IyD6bY>~87jGP-PLDf_5C|{ZZ-#7qWTyd zl)=55AJi6F!YXl<*Eh%&VNV2E;nFgUfP;;ze_+=ik}5`NC^wkx#B7HQhXl!TCEg4myo#eRfIUZv0jY+xu4hu3*?jxn?%r*O_8g29w;`{XBj{qR>nmIEqIV zaO(3$&ee+XzqZZ3*OmtwDHcwv-6jSLCfU(0aWsqELsIhMQW-E&74ry%R|5)Oh{h0) zks>-I|EvX+3*Z)Cs(;cj-sKhYGd81@5t2X5aZ*X^(rTX6HLdcck&7cP#uz*bPkc-m zV_oE*oR64gJEa%wO+02%kCu=`P|^`w09~m}9z0q~7Hh%FT@X)@7mc*;DRPz3Y!^~vmnJ)Hqfhq1y4Czswcr%*qJ7dKu?PFW7`^1c*n@r5PWwfN#~$pfblN8j zl70Ls0A(xlrr~Ap18h1ySQ=L~E~lwPY+GUC@r(5m?TNNqa*c>DiplAe3{9Z=l$a7r z%gI`F^11n1sP@Y@olVP=B~P*mZXkp>hKCQ6Ogfa&Cn$}ClYhyNJ_O$Ti~fgx`FDa} z@>9mOF&x9y+CRlx_0OZf6*hhk@9ckqxM>XecASR7f0)>5{NQvo?4SUlZM7u_@nU63 z#j3+HDoGb>nl1*ZHR}yBAR{@B2JB&rWTRt02M7B@9L*4zcAXB ze#k=G5%CNDX;bD};}dL+w7f>#=(G^DT1xCX+G@4ht!kr89!jJ(-YvqC)H!IGZ3AHn zUENkqtV%8Nm|W5|#=hMCB_o#X1N@kRhXE{ucDWT1wAdal!3D<2*{1o!vJC}l3dw86 zFY{_8wuHK)Ab+iGr|BhfD`VB#l8nHRR4xfYE>RTx!`NFhpi_oyN>#hceIhzCnzX;S&QA_cUv^Eaxju8WW17glQ^w8%c@7*DAd(HD1XA?!Z13BLuJu^J&@H zw^F<(3~+Yg4hFKVwy-ovsRAUjSVuF%YpQ-K1B2?J*^osJ1?RZkMAgr0;r0Xz1r5{2 zwLLr?GK0aREIf!cC@Bo_WcD{^7iW4SEwYW&5$Ii>{n65M`w}yeELYg9fZz%hsxR}U z`NGHmMb&!vBR>~Ke?MN2KVSO!Qu&|Ne=SM=&)B)rKN)ti*2fR;#?UZT@8!bzhf7L0wm}%I9AfT~B z(`W$Qdq~z*jV_EabaG}IO*npR8C&6(^_bPYs>{A=f>OO>lvOWisPo>nCbN$1+w;Ds zq~#jp51J_BkLO)9BkLy#5%9d57S|(_;7=eMYg6a%vz)9mmXmdMM3C#bQ7vJg z6M$)aT=1gEd!q~Df!{YLciHc2Q@fF%BCkKaN3ZkD^d8%s*sGEc+p@dYN_gG z%yQR3UHyFrkFb0;e1aFZ`{Hqy2TpSz|KMrJ2>fH0wOqN#-TVu-d?`H5`X_oUgBzbb z;DyWK3f_o?mnl9vM7CBh`q=j^vb^|l?fK->THidS^_wB<@rg5Jc#S{)l-9rh#g|xa zevG4~(bt6LE$x2eS^Z4E`IdfqZ|w2QrNuY)XyeM8ukcfTqK9iB+5cqdB~L-gkR+EI zPkNPi*qc<=mswWde&&!@o4>Rtpurpsm-MrI=~?}pe)lc?oP9?wmD-heKfzCZ{`o!d zb{!4Ri@obr3a>wX{yCOczL)in_pTg!A$b1eOTpRR%k`6oPAt}XCk~xle7d*Nd#3kP z?_<4w@9mYP#aEB@7GFE|?#f#$#}|(+)_doAi;Jgw%dfn%a`ct+SGuoXxN`Bzb5}0C z{p^)Dj=gzoqWAi-Ppq8jJ-K*t?!+83f7C=b!S5VnP>-m^{-kCAizGOwNcnLx-V%7~0Qi zs-WG5(>SWy=bM1Dvh`T&dLnlPGcu3JEN=Ul{2T?@-~{G8Y$Am#I3HfF>;igu;TZf| z`OPtMD2uDrLl2l1;`XQz2y5J>;wtNi(lMRdjmk_9jCDoNss*?cC8=eEWa;V(2?VFu zi-P#el~4nPVBl?zh^)Z^b4uXQoscJ0Kzn-#`=R1F^A)m9uBNFCxsW1q?}Rt!5S zP7BG~1YgfMDayt&K^;Xl;fgg`MVIRtcw3G>4XOzw8ZC&BHs%RFx%ej}%-Css8CMaV zM2UH=>|F+MYTDy)B&}O|S0;FO<7&`Yx%vR3aY}KP@mOR87Af{8BUu~HR~x^1=TjTE zYqoTZYt6zsM)Z;5rsG=Mo_+HXJ~cSLQ(Cpsn$<5{ODVpwhTXe^SzPHej1x>bUaxIl zAI-OAEq52i#uZR<3;R%j%mo zWNkcX*fQf097--*NF-avY^V>`On~#&O&~^jbM2-yu~;5_MSXPUL@&@U6LSv` zi;10c7KQ7>)781$nJ1zlhH*iP1z~j+)779P&~ZHU+!BgDq75&Trh^cL&H`sv#S^b{ zC4K9DN(ZLOQieyS6w0x0m1qB@t&4wpt!;3_|WYGsx!$D%Pv zJ=%t*0DZ(^m3bp@pj49me!ezJs{r+fX!V>cVBz^Ga^~W-P z1oT<|DTPKffEq$oL9q>TdO9+{?ihpwVKXGlTy4)+(#vKQ`A5%YW*8ZZj7PAC>S#`AMkR%o9LLb|bx{pnQ(j?z`^LD;ma=qkh| zuJTls{sFTN8q!sp2hA{;ZyqvI1|FtRLTdfD5_rJ=NdK@AHH2oa$nGWSCj@+C>&4Q6-@`$FI&!1VvUpkAGShNa z1h2(jJrFh_3s-V95$cb&*&PdJbXfKyt`BCj4Q14r(?(dXbWR2HZZ%<5`m(TVtnPQK zHCClvTPb8$I+(4KElHZT?XQdl(zk8BIu=OZwsjaJ(Yn1O;GwL;U|`{Qkx}s&fEZ=n z=r7?r@PngR31EFY1I*bcdo0d$z+d=wVP-(F!N|Y>9@y|5_e#BTFX>f!)t>j;QD&vV zkOYLL9ZnvPUwFFwSN(tOC%+qt4c~|azksdX#kegPwJ@3(i}GhfujRlMPS0gtw*y}J3>9b z102f^meFi?!9|I#mN4qV?P%QDU=GYoX3=1Z8rPhx&@KYpH5J>c1JK!OXp^h0MmBdU zRkC8``6Io9W=e%*<8@3|$Wb7(o2Wma;PC!S-jcwHAdZ3K+SLF64akTVR*zGgtEg5R701`52l_Fm*F@VC>vz5O^ff1OYSxyE!vG zInVSY%}AP2m>FfQR$--8SXY4wL5iF40A`J8*=@Av)vj$u6VX9b^dzu)(#flH`R8J_7?tiB!gIor8#DD!dk zZ%pssSSQ_|u($aC)Xs(f(q7E{W5+K1zT+At<*U3Z7*=Q)wrMCMV<^)$EM*!-kyoQ) zVq`7bFg(LCl$*1ar!szEm_goEp%r>r!w%h0xgNd_4`n(Tk5DE74W1N7oNeVO?QY?u z+^k_{4aYoxCVTCj(+l$}^>*vpGzCM4yzBGGlwmx%XYHoIBYGq;RxyAa@)XMC_ z{OPI1>V~d;Q>!a?7M5l|R9~9BpGKRUnweRuFE5+3GuC|l?J|Fpx0V*}O|CYwI3RA# z*Os^oX`YkQQ;U;R^D~nxOH=d9x9Uso!p%49(<_C=)ths((@p2)`ut3NsbhI{c5?Z_ z@=E>QJGGIye7N2;Mqo1at)glr{!(AzOpH8ugMJ) zd1Sva4C^uerZe!E5Qbrzu4g%pGL_>huVjQ)-l`d)9gt{E1-5BPvX10XDbY(6-7{xIM%nnU zMp}kI#jKpPuKkH=|CY7Q`R}cNYGr;Z^XIRak;%~gd7Av`LBarWGh}KRA-&sH2DQ!@PR=%5 z&onYYXl8KDh76zsGJwuX;Vsxk#MqW6aL1;+p~@$BQ_aB>dCaY6 zo>Z3Fd9n-pm^KgQN3oxO64Tu8RSc_mG;d~Xt8(C=JUWkbsf=~N*kRav_Ewk?E{t!- zd}Wu|U(aKAdbj2cXLNUm@|*+4RwGk9E(v7HXV1x_^K6$q4j3amyz8g)%FAxrQ!%nn z?>S|h7~WkuyzS7oBkJI`ef!6^?XT_Ke{6VrWvsHZva9l>wWG4F^2G4*VWyrO^NvmH z2cGf*OTU{un4VnxQdh&Xc(pxcSr&aQ4xj^Dgd?zN>7Ztsk|8!pfrlh7@#LW=L@}8M zvpg)NNK6vU;csc1k%KTE~$+6`;g$SUReDsWZSpgvN5 zv1CY!Oyf5uS;nGNR~xZaibS3|gf={8WzQ#6)=EN{p5xgRD?^HztV#k~E>+`~VgA=K zvT1^zLlFs#7bX$Bv^kTKila?!q-k`CNtpkxwp(PLwUndRgV>41LFzBjBv_V+68Y5B z?aZmExvBZ-dUS4e{`Tz6x%$+@&dnZ7eMf4hE#dE2^r{d~5Ocp$df`P&lIxi&?n63h8P~$Sxv;df@U|a6Chx7T?4F*Qn_iuhlx`(8)JfK7 zCS_%Ldv@i{%+l1`lT-H=R_9l`rtj3J?@lJEvDDn3TAH1jUs?8KjhULAU*3d;1!84# zWnrTPJ7?+}mb}z(ZV+9bIs==T=zvtF!a7E3;E`lWj%no?V`-zqN{U`myfSSEkr%Oa=JN&g&MbXRyyN z%-2Jni%W4c)vK*9lT$14XO*DK3-{{wn+vn^U1Dt1)&=b$S;imJx;Hy78xvb{Np=

a(#J4HiGdW>PTJE=&^H{^kqG0-FPEs%LjFq7w2YIy0y7c4#n7~`As{t zb#m>vkD4Kpj}VGW`h_jNfiK@7?%IJ^lSXz{WU>j_h6&Fq_ukJV6-~__7a=b zW?ma^AlWiHdO&qH8bR-h8{ewfa8H`WW?Q|G?8_eZV6XAW9~+@Z{#e(HfBv0g5(ZoH zW72Qf*iPvm3unW|p(Jr*uawx*9i-*4aZB9N1TD@gsT$2vvl)I(0zO;{34YC}%4dc+>yy08X zaF`8DKO5}{ognKwozzo}2hMnxZ;kgHq%nB&#=8RjmdW#7iDJ`0nU267v3EMVGu6?+ z4Kk)fqaS{Ox|)W*hp@a?v!W5I zY7})-cmwtc$}V=k=TV<(mh4*AxtuuCR<>``LX^t4rCDs6#glJ9a4B7h9W~`U)=Q`G zDs*4If#-x2Kt6z>y9#;JpXel3-sB)i2?F+zG?l}+rB+I*s1~r}B%yqPvSlmMccqE5 zRZ5Zx6giq%yer)#bka?yl#bA3toZx|1onO2h>Q_%uB<`ekdHr#D+%|ferBapr>uiv zSo%GvHU@?Mw2e3YZOwqpH*ZdpkmO~U#+ezFxTyKqB^n`tmgGplsLIjPc!C7R7-V$D zyhF)``4SmlZM91Qb?gjnr_#ulJ}twKAwm~&bt|~FjG2=*qAFBYMO$lBeJrZjzLS?0 zFO147qrKvE$$P{XD1@|LK)VyKn0UJmO`|d)2lN`Bs*tOi1)4N(b0W`H%wlTyDrS`0 z^A)p{+6y(yET=DuV|r>2YdN!;zT|3ovm<>eR?JSZ_xsXF#mb1CadH$wMyjINIQdGx zOR=iEmQl=Cn1d{nGpaGst4F#GrAodqK9-F1qEv!jW=bC?fxwo5Sge}e#HozqMnOZ1 z9IcUb0wg<|d5*j?##Xz-=}XE`9`%*D3_7%#E>&3)W;)c}nhB4j3$$0JWSTw7HuBkk zG0Ujw4j6b7wSw7;t3yv1%d4o*PTDv}8`H&2-NhvZQ@U~=1?=LKFKNrT8MGsso;+k` zg_hzli88MW1tKs_76-%I&Frb7A)6t?hnHr^`KnfAIP9T{Mlq~K6eAVrPh^%UQbX4MlW<;Yf;F=b=~v~R%DIB1uhYQ`Uh zP-L1=RI`R}o6wMKsH~RZ3We3+{LUJM1Ej5&jF*kynrNYG4Vr`3tkr*O+kfQjaz5|= zq@DQ%{|~F)pKX4_{qF{TSNY%G@~bNR7eoJZU+&jW9xwd+lfLl{r8-rQdS1O{cA4Gg zR`XeN$^4M{Ve_lXSTMe+jEI$|$e&D8GSoG(&N5V(F)T+YT{bocYL~02U7npWvQWB$ zoN0uGjFE#XmUDSRCCfoE%a_=yoe;{`aY<>DP`=7yRE7LC32DUW7xLHZLjHOqgWGft z+s!<-+hR;f;9Fu07BF5aVvI-ZuMS6St(GHpRkf%JAxwzo)*iE&Yk^HH~9JT1mqF)oSmT+{&>Z8OC_Fd&oP{y{NMZXwQR)ez>VwfPxsenp#~ z)8@P4`2}tLm{`B4tsmFsm$msxZGK4&!t)J57khJ*MR2lQ@Y6NpE zqW?ZI%Ex_cWVd0yeQAVl*Za&R%x%vf!94g3rL+!d^BD))oHctysCCd-fXc z52}N`a0!NwILh+&T-pQ?>a|mj3WJgHUL&ZFSB&tZ7*noqJ)UOc-5A&PzK|uVv$gg_V@|!M0h0PmRGKCLK@2`dF3H*5cq`r2pP~T zLe3K_{~?lPAdm?!C9&yuNjs)15?3;os>I2ZY#~tM+2N$M*Ey^=JP$X|^G*HGhL>$` zK!q#hIZr5@aE+AbDoU^0AxjxbLM-$mT7hc#nve%!6?ffsQV7D9gii$J&?{I%n3T3G z#ck<&1qBt-N<)`C-K@|W;Tt)UcGD-4ri8-kdS!S}lDGB~3qG_~iH0i)(@D1|?WHuA ziW2BLa0v8DPAh$J3p^>In{h-y4M_D0GSiyF3P5}Mb>95pW`3vNu=?3tQ* za}~;prZs4S%Rc|?DDb@7llap7%HXT zphngoMzCBZo$V}O^CL)AK)Zf#b|ofeI(7Nf zZ8Xe`CTLEt_?m2@tJ$*w^(i6dJe;7JZ8TZtc749SJiA=cB+eGX5Jc8nkTJ8f_oi;w zCs&u|gcv!q3Q=mBV&`~C=$O!|I@{j#H_>FC4e6LE#nNk3d4B3%J(!243b9PMrUmS8 zFD%_n-z(`me9yb}2a`7+K-(4aWz^uSrfG%ZlD<^sWp#de>J}7j;lApKe-XxO&pZtu z6I@}Tw)Gzh*YIkzYDbG>OT}7~N$ZrRnr13Ch>yvr*_dAZz6inr?JNv>87}1w+FX)e-_UE> z`5%a^+?SG-`4s7txH6dx>fY27!t2_8TW$4gcHGppL5S6#J{M17Ned-p>VBOn_ymDU z)Yqop+3wrOp(yV67i4v#H@;n8($wyU5U(|o zAdACbYHv=pYuY0%amy5yhKRH5U!O!vi)@g=nkLyWL|dC|P=xkINgJ)DjBS%;+A1xR zh{Z$_v6RLmtg)`GbD3m+)8x3MF%}xVzXi6l4#X34WF7Ss(2@i;pCbhA=?|nA0njOo|tG;>6H^0N- z=oR0rdEOS!+w0|D_VPzO@3QCJ@VswxzB$h!!8f1Q=2>lC(&lMQ&pW5hr?h!do1*|e zO#iS4p}>013wBuxUa8NzV4boiEc{HKZp zI)X}24LXCapeN`J`cgj8yZP+Iu1qvyAItl(rIrt3%XmJFE&KCMY}u1{W6Q35Cbn$P zd)g8fohP!7GlHUh(m4|pK%|WN`Al)dKJPpuuimJi%an%fE6xphEkJ|?4gv>A%&7oryDy)M~Pz;EGf=>t+8@m=VGYYf=n{VQ~n-Jz=L=W6^63%4&yZG)d_lxIp)W z>s2(OI?khIsj$SEoLfGS<(sYl3B9?S94X4c0x*C2yM*8MzUr! z1p^B*&;ew5bUDdLaCh*Q9@xtQrIWDBpa=KHrpH z_^RN);(n;uF%bl4=DBrH8nh_@spy+j&4?k7ploi+>N zn|9>8no;8jX_^kEIW>;Z>s`&Lkzl&snp2}gxkXR2YLb_%X4NDwzGl@VFM;OOBrl<6 z!6Yv^&09%c@-RXuj}*Wy3jd?=Qq&xd#3#}WjO3-H`3=cSSu-4x7dQ^h1ge_nkoa_H zrbF`5X%01mb(zD>mu?eEXj+^eb8GXZ*96yS=*H${IQS;1$ zyp;Y6;pIqog)bEMS~x?(3KEXVAH<%TX~f5*OK(}{8OAX~JQ*)*V&-Dc9GY9kM384G3_)BOby@hVInA6br7T{NGN zz?;^rXKCDmhNQ5+i|e9ehH1d^l z;L{&8n?f4%jKqY|Ujx3uXXs+nkyyLaAZI1W+8qX)DhV%u3T=tf(B~vHy+28%Cv|^H z+<~CA?NgFeN@<{{+avd;?&lv7xts>NAc4M0xyGW+wa6wf=~uP*$+0FbaYb*82M92% z15}d$ximmk2N)#4AswJ236M_%bm#zE2=KHH(3u1%qyaj0fT3F1zO4gv)nxB4ja4&Z z2LkZkow!G-d$*1_Lc|9;Ku;2&lm_V00k#t0%W-XblMrP>=n>glb-p4Kk9jd!{cF|e zl6eU^m}jJUn86Udd6r4&a#tPzP=yV#72N_xCeI~-A~bi}HqrxdJ(~{t&=kE0lmSM6 zMtF_Flq4fkCSE4SD^QS-)3j}voJS)hbjn5r37kAz@ggymvD>(Bd`ofmHU8VM1-XEb zLugel+jY~q{ch$@)X$jS7wwboKex~MUvrA#_nbd>b3Yc^h3_LdQ&k03A#ssH11DjT z8`)A4S0DvBVZ3LgV3xoWih>fg0a1V$!^6i=PJrQwk;O1G5#_Q1a7R`Sh=Enb2x0#D z;22C7(=ri&fPaV;Ml0^r-I9l#&T$l*euG`f84&S}exk?DIm0JwJbIAzJf(V}Ci}wzblKiZS%R3HdM2%-)}!fliC8 z+7iM6Gn1=}9fE&vBlH4qzqQ~Z7yxh6x;OQXfY6I?tu8FB-kV&x1LNn;!raU{^01@n zxEs(^TT3Ju}X#t17S@d2n7MezrFQh9|>_ADi%_Y~({|VT(P+I8!0G zL?ns?uF6T2#f)A1SmcT%r4a%Pi$G&GhMmCDCj78TAqX=kQ~0sL$=&qrA^U*CzQZ?9 z`B7fZxZ@cEFsKJ030zkY3ST}OxZnycV6IkV0SU}@0FyOaNDHP$RtCtb1({9;mBD7B zSqSDE3elVlH1y3Jvb%^ywR|O6cd%bcCIq5)gv!Y~Qus~> zge47gDhDDMRHsn^-{o{`fSNtff-EC_2e=|iAhvX^nW(^#N!0bhunyTp5^WZD zn3$!kY^UrCl-4<&(~Uyd&L|5~Qu$ed$YnZcEx|N+`n{7kTib-Hl9e;L1oPF1{e&|? z)iNOZz-@K1gL7J%(vea!TQ5aTf@|j7l#I17CU~V!)VQ zAjNQk022tZ?+x3p$~W3-zv#Rw*#MdZ96V&tLU?gU?7B0XxVCrCLu>lh7@?4*G{iIz zDbp5K7+GV=#_!qfYfI7U+G{`UI?o==d{X_F%KKO5N%vcp?|;{F!=JWha$m5`!k3V6 z253tU45fPkwFB%HB5%j?BFHEQ-1vxsb1yJhOdDdZZMxYIGt170baLR~?2PRS z`HKl&&@aH+_+v2;5DFz_w$B>c57a1&iipw)`ouGvc#Y-~P`r`iGhRS~1JUE`jrx)> zi0d=%G_pr4^=$l@yuU94!)9e^;en$Ci~Hslre<2?mhVkHxCv5V@%GZxOg*1^uGU4i ztVQ-XN)d#6VI%;| zp|xawb=}5*lNq7LMkAIe27o@s499k(77G@;Zr%`wvQg92TJTVy{_RHeLGZK)gN*74 z#f?c`d4PCH~6b$~4llDO2o%vdl0(0E)8H0V+y~8PQs{ zn8kt(S_u9!*O}3aO1_Kj2K2&^eP;&bJ5P|9#e!`xK7rVpMyZ25Z%ZHlEX$Z-d>(mB z!}yF60fEhe!v7r8fq)W4MCOYMjJL5nWMcY+f>;e4QNnE!65>xQDS1tbexL*%^SesS zUs4s!zpKq(*5)s&3i!)!sS?o4pHrPcEZ^z`Q~9QZezC$*{ECb5axcc7Uff=l*Reiq z7y3a?PHY|kdpU^l?3N**n8Ua|HG*+vE5`E~jLTwN-OT#_^frw16^u(_{D#`T!!SOy zlOn#m3uCKL-aayh@l~}4^TT^FM)r|a|9))S4;(~-dYq&C&cii0EJrX7%hC9#9Dcu} zwh*wlv=3ma6#Mn#7(Jy!h8q9y3t)6l-VpPR>yR_mUF*Y`Ur{gcFn7I(llj^gz}_0> zr_^17S+}2j+OQ9OkDaDrPk&#rAGN=xzN@~+jAs8m^)m0ykr&uSy4OBbMV9yWTT9G+ zcNP{g-&tNjjG^xo`%ybPbm4V*3_mw1k6jmf?*Tg@X?>3NMU&V*|HISNj=+TD%vaP`nUsy({qLw*d45XW(a-m2iZTxQU0lt48sqcY2KCH zh08aT#Q&xmCA%M1qg_Vvd)nnU#f)Y@qjnh4PpQwSPXdnqb@d5(miAt}WRwnGnlMVY z-q>W6Kd!8@QU0=`1C_ro_Uk7upLl8F`kCuzZ=Sn$_VU>qXK$RieCD-@iSo(vnexS# zuAR7Y;@X+3XQs~0o|`#WKX-28x$?8+=gY55yfAU~?CJ8U@{{EkC!YD7I$OTm_Rs>fEzlZN8+ zrD_7GePS1hdPSpwfCc~CWjgYQZh=CgP=GTEi8cb8NmuH!2u-6?aT=u|m@ldUq*i<( zT1Y3$$rmaFZO^xKWXF?<9|BS4U@Qn+u*@-9nT}W0y@$4!zFC!mYX~oeMDGBV=+e&8 z8Doo7D@F-dbw3QH4Rw2K!}4_p6i3moz(KuA(c*p-S5dIi@>!)J#4bK~U2o z(1_XjGdkljO0kmfO)$Q8kut{ZlGq3!Holz{mOU+n5<-YnAuo0^mPVu~V%XwD7Scy0 z7av_PX&b9Jbpq+$9ubmG&wrgHZFDYlpZ|pKk8+ zAE(o(c@F4ooCD5^x(J~LX{dMiL)>rdFExBFpmsn9E;pPm)u-;)wcExZ@30Z_YJFwh z8N{U$&rB`K1MdbPyeC$V&~oOOiea$z~YIHPpYP28$m$y#O)u+Ip1NI}+rTX*& z>MSO4yDzB3`{{|Sum)>9%=_XhSj(6w2W&jSWzNhBg7N06=oINm1ErWv4VS+Mlzh42 zwLCYqe23_boG4L}^y>Vh68GMxa*yt5(Gk+pa+6w_0A#kxIv*UHN6Z$y{l+-@9yDFa zM**H*U#iOoY(PuX<7t>2Q?+7@#*1hYh#Czg35XJr$3gLX1&`lsrvwA8k@?F`{odlr zgI2J9zD*fLyZ(_ByPv-W0kWB5fBS0;RM(9)Ny9u!U&(q9zXgbYuK@Xzj!PF#m0n<- zZh%P}pIwgTA1ngR-qn&rGbKDm#G{!@a{x6%kslHVE0S-wJk#PeLy*pfvItcP-V8a)MIjb<()kb_kt)qJ-4d$o-``3JoJ$85FfObI8lhUrT~;4p^0*; z$fa4}<%IB^ot=m7ffU2?7KA3&PFjE`V?8tRwALna(a|b@(M(v9X^3USRb89v(E1?j z+TA0J?#`lSW_|J7G|{9I98#j@7iqMcguTBo$M{DYLF=WlrM~W5eg5{!okvoQM5rSx zQX9-xOAj_uo7XDv(2}>zHt$EwpgXPj+BRA)ranvcdkdtzT-G{f+6QzhW)S3Wz>n#ao|Sc?mb9)D*X8w}7so$z`@L_0D#$cxwZ-b_33x z=IABT5Q%_9YY8?Ika(=Z%F%@TCoIUcL(eU|oruIVI`kt^@_o$_khFj_hHmlMnM%{M zG1D!#(5u(s>N}bdS}WY$?ksJYg6>YPA}DbF+w|2I`PQwsU_p{GLq0V|OGuCjVLhAwb& z^{R`$^(~=-a6-PWA70nb%R5w$0Zy)t^&}$8rhnY}S>=3ES&$<~tz`?+;fh!KafmiQ z#u?lUTD*UAgAknCb%<|5(E_*Mu-}Ba&i=$9MBhrm#E~-E6c8Y@j;;aYUq;1=X2Co&E^xLtnrWhCMl`N~fdkA&3KH3i z-h&|u2>m0e+97TRu8^ZGqsSmhaGuliiumKw; z8fdbkIU9f=eBL5({91m1raBfhBTPxZEx|DDBmgsq*3n3P1~+os)qFIR2N%{`na3E#uqKo+W;+^7}24y5>gnT z3zcX*6lA@T&qqf?(K98s!NAU!^Ofje7=}?8gt|{4FH=Bbrh@DYEO7*I4#S}rJz%K) z$-_V9b9th4h_QM=OexDgUt~yh0KO$D9|0whi}nbUpU}H_gLMwA9gV9ItbaiJ`-$rc z&`b|`Bg$zlQMW`$L?#)OLp7$EVS5^(1`U|fGkPb!gz_^?I5|HjgFhs<&nb!tV0Ej# z4ev}otW|7APC;x{r6A3LGudjt(C|E&c#0l2uhMKzAp?3q&zz?uKoB;3YMl)$YN?=f z59P}(JO4P2F$R+GGl)jRbNeXKnI}6b{tAZdKg?1+w7Av@ET9>jYNk{^!w;+ zYkh62c_YM{Y-?yP`vBR(){fT~peihB0Szi$Y7<`r%)Ayv+hU@d+nD2Xtf_fg-597d z!N>E}py)PZ-l=WoV>Ct}`tL-VMAZh&fh4Up)oDws*8l#}8YHccjQMJ!D{QKYL=TRJ zqWaDd*rQOis-5Z*m1C_-VzOEY>t{6QvZ@T8DX)Vs#h=DCx6mKnWnCL%THh?Y2-ehO z?I*USLH7c*n$U{IK8gl3y;^S;?Rf+(<{Vjeb!%D#!kZ0MG!VLZ7w>_bi#Ov>ulQSN zHhV}bcM}1ymaj1^y^;<>A-)8>%#4Q25uCbUamkvGhndA~>l!x?H*gBZyNsch@+Z0v zTg!iH!rVsWPbn6Jqyi%7M$rJ%IxJJ5x%7UnJC~uv@py5F)u<49jtD;Np&)P@d==yr z*u>cTa~{EEQbGM?tzWuT+Qr7SWYfDWEy;L%NaMF|J zl(~xr{JdgAS8$@e6xOK?57^yET;OoFEL$o@ip9br5KKNUus;?S<~6;7aMamN0$)@| zeY<@x;OPA<9YOZEvQn}S&9Dz8iwvEkV)x-BHr7PkTo8@S$s%cpic6E-`0I zd8%AtT}IFmHhB`n7)DJg8cZ8;&?-nW%nKsbg{R*7G`%~XD$_gK%dgjQa5=^t5;tQT zPK0_J;4=+Smyc}_o!(&XCxstUn;IpPf{@tyJw=aRL=?Lwxs5{32M-0r9*|OrP{oD` zRis_?)?|63tqE`>(DP@S1@dZpTelNgUY)!l{r#Ae%Ng``QD5rQbey}!yE0afK1lk$ zd$6`I@B;UsY|-Fow}!|7754p%gkW<>h%72q;kcrd*F!oO+`K6QPv2JCGF7ruwfzIA zgv{hr?EuFYx(Qvq3mC4bf&?c^58|#4I^99$J(RUZN7?agwM}wy&vZFj*vrxKQ9r8o zuo`6eLwzsp!zssidcth8zQ{|onZ%gh5WIDC%=kcVf2o7hE1)l?DETgWyroQf-rtL&N1xH zTiEHloq0E3t{t-X>gVAK!u6d&p_3MS!gt1l?k>NBf)#NrN;@Kbk3fA8oGIt+IDszI z%}D~+^^*i5B8XaLd)=V4*Y>lYckE^?J z*J|J8XJgf$jB`GtsM(cmOs3QV@)hxDHl44_$2&biuTj1$6%<%;wpIhmn;rN4v1ldk zL}AVg!h^WyaydQ;lN*6+wb~Jt&(#Wz?=@^+bDoxH>=fxzL5^xT97cnnb`+WXT%yN2uLTscUQ>CR#fL=L|hy6(!lqlq}EV z6*`~mY@)w%x}Ycyj!)Pv_0Krl8?6dFKBEsED6Nbh@b@`|@p&;;sH#U*ijQQe|2vd8j`9@pK~dc_kWU z7>K%p&oieyd_#JSwDlI=FprB>)_YmEUGs_bl?AR=GDTSK=hT2~pV3lSZCy#AlgOk* zLvl>&V-EpsiVja}+BwGWnXWZck}qA!lB=Oy#BdO%5waG-Kxdg6j9+h{umk)}H*ZyT z*(W{c>GPSdt6x>#@0ln4Pg#ZV8`j<2pIdg}6CkjGQ?trx{I%SkLz98_6=W)z0LqoC zra}zG9VB>rSs>sl?~5HgHmWCGXdr?Prs1{bLBX17d(!}tq>*Ms*MmS~NKlC@;Tm>< zD9LMjrHE+X9Q5v~o)?5=pNA ztM#@rq-8p+Ej#Q>w)6aj%y-m3liSMPaQ_LT`j72O_`lo#NacRj^$Wl4+FV!GWx8et ztqGk5eFP1(g5IZVpkJPx=|BJs$yOlFzKDtCF%;4XhFPSWAaudG-|D7I8yV(86hVM) zvaFoGn2dFd*xD)WKH43UcpOfn`?1o61P-Nbdc_2))B-e;juc)M%~MxbZ!XWyJZM6J z+WS-+Y$XQx_fD@afv5nr(SjyP*bjZFpH@!=k6I_5cvjjXXnaR~saM3_KPvm=Kxvz1PkXpAu>uvO`9q&aAh;}WYOw=1m0Mi2XE zaS7Bxq0Sgp=di4v@rZL}kl~l)s0rg*&H)7y3=yPJsD+KVLx=H;=UN0F_L=(@t?#Sv zy7oWfeuDoy^M{$tKP&wFX7AsRz2W}LJ$L=z-upW${D1cSemVEMXU7Wv?yPORtuCm4 zpv-=AleyX4Wu7-LnXAY=e9Zi~GJjK4IPPeL#LK*@N8NMsRkq*ibyy_KGQ+jHp->YKtZ`t z5vhtTGWv&P^#3fdcO(sj2Jy0x-ChuhyO+dxRd$+p#CXNWxGe_kA@oLz9_rWf-19M5 zZGK;yU)1JLX!A?j z{3&g|r_G;K9i;PRZT_@&`HD8br_C>Dv$vD7Z0~|7_N*B1h_TPb*eSMORy~;AT~Nb* zMfE_n`g^Ji64iJQl%&HF;E)(UuQm^GY24;P2ozmg5Vja97;0qhJgxua-7^R&oTy`3 z=XQ@7_SRi&a_o~YqYdAFa}q5k_HU?H<>j~4OY-(R%H`qgxpxBd_zCRp=y`57@h09R zV({F&3r%_ZzHTF&**9RI{rL<_LvEY9LmvB|z9o-?m!6Twv5O)YnY(i73XlB2vuBVH z*ydiqJoX$Z=0VY8UU^>Zv*&Ry^uAs&3I|^6Hlp4s$lvD*+l=T#ukiX2jMBONdyUdl zSGn5q)#DiTCopb_?NbV2!qTU;`5A3~R-2zwFW~-FZU34!zpl-1X!8fO`AuzpOPfEa z%^%X{59_$!*7omc^GC!i|0klJy3#ee#i;a*9x^JuZ@5OK?*_);M1=g~Xtz_~&lr_+BU_Ei)8hNg?LnjR{HcObx&CBmR9+Y-?u!R7UOJ5N@ic`|5Jrn%Z2R|J5TRCzx~?4Gutn1e{SIN zz}0~(1J7CsXCU|0p>Xe;u)!M@kLrR&O=Iw6=?@bud9{R zEvbxL)hBpui+wNg!4v}(4+7K6;y_|?@v~Z7ApmY2v7>G&s8rDMIypg9bkTrq1Ksw@ zaY-aeftgdQ7~IUBgUY&{(v%ipZua*{?c%bb7#r6zB~9xhsR1qbP?)xHV+(7PRQc=F zWuU;F$-A_(h%QvbP3&HW1OqxPvJpHxI5Bd0EPwbAs6uJvWc~<=@|TBiis@1}%D_V^Aq}*LYXT8}LW`n>Vko1LI(laTsNBhq~hSGVE1;->9&qK~eS!q<_K>68dGT4JW(ki;o9CzrDJXc{6+0aHw*0;KOr_Y*L`+<{2# zs8%-q3Q5P+qeC*#7cN3i)gMbnjcz6_fx+mZ#AG1?sUo}4rs&*04Q~8{gW}ikarK5* zE@EswLzQ{Is&B)^d!>F}?!v8Z;C5qwB@Wh(doRY0&rdC(fOG{QQ)#Wkjs0t{V|@Q; z`}YBg7w~%%1OEu<`=+&6#x^|H2FUpKlfe6gzPBD2>}fun_dcQEi4<{HB1OC$Ur-Fd zFlpCE0JTV`~i1dcC_DD0NMS>V`i0uG+tp&h$~KcLR0W*6rKw zA{cyFxQ2*Kc9{{uUAhbN8&O&+dZ;ugE%9v{-R+LNF`w$WwGFYnF+v$F>@g*A`p(qS z?Rpz+lH8hv%5>5tw-(2xI!cC^xJ7!YkyPB)Lfz2{+T#V~Awh5yIP3sbBi=JwS|685;@zo>kHwmz^rWbFdS06OHU`vzrB#qqk zY+N`{UFRiN5A0m;`p`=V(*D&uUNY({3Vj+mZ`wXN!7i5=W$aN0U+{at1KnzDXDS-VC2A}h{k)_Is_al7@b^^W!HDC>C3D_``C%hoSy zikwiE?s|nuHLd9;Z)o$THD&FT6s>g;(fsm$BHCssIbOEjMF?b(`xch0IqSpLN4d5^ zxIn`ITY+xKWJX0V!oknLMs>1n7jQ$|kAJX<+~^p0z@nSRfT9RD zI_4e4;BLHN%;L2N-VXP^Mrex*Lx3b<9=JmxtO63wgr%@kahEeRLUi@GVHqWFVERGi zz&Ig2&lQG`{6M3WNWKWyH2w*`09bkl;B-_40`REU5X(F1ENPbXpJ8nEK9i|q1V}U@HOi{_@f+Li|xB8S#xtsp%4g`^wDzGDwr`e*h zvdCq)Tt*-OLJR002??JPfg&Vgz{kfIc#i?-%nYEy56oN5!^Z|&F6oUWF*R3I%MtlW zq%LjgDHwSULj{Z+dZbc8kvjVlHH_p?H zDdLy1Z;^;`x3r}B2r*+0Nz}EKUTiPDfSB7VeM^Iw*G0SA9!)^aZPBL8QJh}6VnZq5xly=mR!a&g-uiJBWu$@$Wqt^j$I z1BezLI9Mx$`-1~P22erJ6Yk6H_xFjLOt*!0XaYLKr9K62SNLP){E0?Q^QC+w)k7U_3b386INZ zGz#VTtGt61jq&@#q1+IN(wxcI7!<{}a$_w#5>Z?N3_Hr!CQNzfA-CHB6(x zWg;?|WF7*^up_m1R5V~7ypv^$9zz`ffgn+5C`+xz=YLt5qSNNI*0je;5_Q86-tnzu2_YdYoZdZ8A0a^p?XS z!;6R$u(g4sD`+uTo!lRU0&iSt6AS#9>~OWR^9;;F7x<59>_v_F&keS~Wd_WFm#q)| ziDmyWsOSI1{)Cz3}I|e>ao+w*z+JPX|ongc>nBxpBSE z97Z?jgn7z5ZN6$8GmaZi8ZUCMdls;nE&6RuLlA713Nc|-8xDvWmwjA@nnGd>i~N|*C4H8T>;W3Lzo$jCS% z#(oi(u8FZz=9JxlYK&c?WA|Vdqldn4Y!gh(I7k~~M-I0$VjPlGo)y`-OJY1D#tku^ z6XUEHm&JHWj4NV1Eygu5E{JipfN@@o>+*V0jOWETC&o*HyV+V~zfvwU`FB*wd`lS( zqC;iwZ66flm2#J1_3soAN_7+aKHKg(d`uqO4=oO- zEg1VpFb)i39NbHcL*iBwwuJ8 zvx_!BuPAJKE;R?w_$Y{g2TK{Auz)hyI&fb%PyRg72nnXya{*nzVb1X|&{&NFyEQ># z=olWTNqiCf0~8X7_PIEFUP(;rU=*N*PA$e<==@w@y#ND{Qnxt95E%Fb=n6?(3>+A! z@{`o%5^xymoK`qc2>1j&fa~fhw~jNswT?h123H8ofZ8J5|TH{ngNyK-e?T!UTAL#ip(UadFwQQg+)xh0wHw^X^S*Z?9$9hT8IMTAXzk9 zh*EHllnc?Dt&%o~lvVHr9`#g$B0>M2G*MQiDCWOEFcD8#8>h^CgEG?m^1l3iVt^Sz%$P;dQZsq~h$=T`2TeJ1KnGepX zMoCqd*3hjrd7~+$MRj+iJz5zj)pagX+;;xwMhwpnz~6irFY^O&EgLF)XziHoi$cHo zSeaCVhbVpx`q_Uhg+h(e2*<9$oqGsJb0Fo@Hi&dFb+S>n4OyNE$MLU@cNi|i!-KD? zMFiR*+HIV`zcgp={qxLHGWOpxo-}(i)f+ka7_$7|GDPl(lBm|J*wdcWm>DKpcsG9o z86us{g0^x7mRmRh`lf`0jPbAs$Dl|4*cd5|pEDS{10bCm^ip;9U8^ZcdWn{LBswIVR4S z2Wm*QN;D zZ4Ot^QIz7k5t!>d>;qulths)+ryTc7Gacv86NnhmJXvEje zt@5SPHa)5@bLGMVBibGvG`BG{I{W#4J49}kFPkw3HIIT`4XMi*h20@TLi&*&7IFWM z4w>7Dsgv0`W^&gAr+1RvY3{^&2pM@U5o%EC25qEASUWkj#}65+W%1H zoj*{&VrKr_`a;(GZ0-&Bf8Z9^|B!nv{IB!>jg|YAQkSt=?NU#hB_xG*LZmol9D+D; z(l~9L;}iyEfPXwayQ0s7YBTEh`^sV+^~0(`H0SCI%48>xF*nbwnj2+rfPp zerEwPSB{f*umdsN;9$Qp!hzmCBixe9b2o~YajJHw)tSw>8P^kK^JuC^MwqL7GF)ZC zbcTb^*~|Ar97JP(*v&f{!XXH)KO7?CprdF5fp{+@f&}9@h~jq=17j_KYAy21Axtpz z(8boL6uZblGb{HKgo3u$!iMqLHp|JGTk1$qiy!jpd!CdoaU*Na@SweQ5ZYRyRb6fu z5;VA+pP>seIcZY>>BJPR{~|>ZC&5uQ%e7VdWST{x92OH3LID-0alq`Kiea_-$k_2z zeZIatyL|f2)a-l{aNU(S-$=JL&(v3@W~NrA+~*br0OpAEq)oIjVqsp==CMgHQrdITZ>hZD$S-0)7mJ9I8CFMH62J%@Bl6T66{hro< zeUHr2qh8mvR8S5kYY@r$;I~%U_v3KWLCY?#fsI}Vl#aDiuSZ9>9U0eal3t_MnA2Xi z0hD=&BI0!j-;H3qkAmphg10H8I2n#-?c~0Gg0!D|5{)!8|{w`vjFeA%Qpv+ z5c16v&L*yQ+~=EneRI2SUhvIrzPTF~w{Mnqt@Zvh_w|U%TwT1 z5W%gX43b&gw78|wIxaIcOQvCe1H$H5)a2|9+8GF)Ow#NtZ15d!*a4@m1HGVK@EJSN zrkDJ}zPvTTJ%tVa61$VY=K{SKQAiS;3b<-3><1oF1Q@ajU`QDR2&yc?ZonZSAdy_q z4bGzwYy#<#gI5^>1j%c@RzC@4(9KZ{d-Hwy-l#laUvi$8GO5vAU(kC7fJbBF(!n~? z$Ii#i^$|to`XV&5A!m%C#4yFM#IVJ1#BeJF&BJ@^;w2+4o)}qi4&^H6ppWYjO;~Ce z6tV{pii7S#_6kO27rY*G08LvCjH_zdqEZgbAOJO5cWMS6R6mRzJHEj&DzP&|hhP+; zi-_Ar(1YqmK6V&BEY9j2*N??Nm`Sd_@Ny7+fn!{#xJJF7O4J3SCrD(0<6l(RKwS_W ffjt27R%}{C&7Y=Zv$wbFNA>hEN6*nS12k^pW)TEQU;vN=36LNT9(y$#Jw>1wbFsUJ zB-WO+UfHsC*S5U2thFRdme%yjYuP{K2>)FFkR6VY74rUZ*rAQ}4~Ih$-tZ9)9})H{ z9P;piUb~xNXWoBi*JS&g)zV~t%+w!x^*k4zpQ+1k}zQY21KZ&vuxv z0~Qnmmd^=R2pxe8jJ?1({8>hCXN-H^k_b;wB{RsyrZl~ua6PzJeZ(H1zVTzF2`1)G zzK{m$}oHjSBq=<^R@1T)d3p>qcBM0J8yGY9VMUfl1icE8i@T6f#`yt|vd-sXp`_3eRu*NlO@yWQK6 z9rV-gpc#YMS&cj0_4ZmR^+CHf=x(ibvmf2_2T$TaVw#% z_RO4!d#(49iIUW2_u}=<58`5fduwa6H;AcE=tW;AeeSLXgUtbIr}VXl8gpu+|6bhl z2Fax8#-L2+hcoCsY+<;(!AGr5GD}=D7#o--*7n)cV!Jc5ZDvcW-S1;uhkY|$Eo>)q zrj>TZ`5^AlKn5SSy8{eatGBtizUR4tZTu6`vG?xz!}gGoSX!i{mc{{LewN0*o&1Dc z(fQ}GhH3ja0v~~E8e4-rBxbEM)=4ge^`h1<>q4D&nHR)qOA7UZK-@`A6w0L61vY)u z88|+Ra3bV+k(IIW?ug-Kfh~EQ21lEPR^^|~UOLq6@g67ga}li0|#X$KVOAKQ-ksxPk6Q`J*&hcje2an zJCW<_$nyo&=G(q3$;ekH(M+LGr_>$`=b|9W`7E3$F=3lXYGfp$JZda(L5I1d0@FBO zL`6X#@EDp^v@u*EjX_kBu0lEB1Dc)~>qJRaDb{NGGMWe!3Q~C$l>^HYzEuzvpZlU@ zMHQSDjGZlFjJeN>3guP$3mEAkbgt}Q!T z-{<e88=_x6L{p978NnV z%vJd6T7k%NZH~1?Tnq6o;7~j;CulP`1||@OYkMxkQSK+l@XgKbL0q9ttT&*I8JmCS ze!IKT>aMz3ps#5YL}cHm6RAsk{N{!i5AL@*+x@}jx=+D%5NzNaqHsUn+Fa|lT{?1l zR6`zzL{hqW4ULN-41Wa zd=?fBS;@jyL)NpfY{(@O_L-v^SiD>wQb!!SDO@Uj!2#D_!s9Tw;QfuO;0h;kDMmWn zxB{*+VdWrv!O1o{L0rD}eCC5NutJyihP)LqfvF9S_|XKjZP-oUqEm;n-&+`43xNoZ zSkXTe@;{2A`d{KdaOa2eR|5UB;k)*~4O`ytmhJ{0PyCZ0_wVKpu_LU^j^PyftSEw~ z#$JerPBN@7%xPK$nEH6I9WfhQljAT&!q74;mL-Z^r2T3?Es=d#J*7s;SQ z9WIs{^5Q?DEVNr&<_Dis0(BvGadq`Jv5K#6+}iG1E0=D$E7{4DOLocY$&pjs@%C!` z(7%!$Os{WE-)Q&xaZ=Lll(|gvrD93h)RLv$Xg^%rY_Ap*spBHCkw)qWlZd3MYjkWq zdPv&s_08>#f!*HPiZ@mhvE%WZxOKPF&kqk$b7sztUd`^=#-5om|1pS-xPmSG{np0z z`sfgN+`+t%h%%IW=P3>m!yM9+DZ!}9G+`!97(dywOkSGyAbyyUnNz-%HZ3ipk&-=n z_Gbkx?xaYw8&)7A*}+a$4Fp1M`D;V8iK9l z%h$6!I||&LxTEK3;&hht(XP+zN*eET*+^{=lWLyScgMn-Juz z*4@}i!cky+5~L8H5SW*!Fic3_dvPc3en4WDVPGn~`2Fo}5Axs#5I_+NxiaoKJqXcQ zuesDu1|#3ca>B@9K9fA3qt{_UPP!yQ@Jd~9N)Sl1;Ys+Fq&_k?D;~4Wsns0oz4of#P@$g<0yT16ry=ZrQ z(%NVPB(^pE?D0>ElLT+F*N^yeOP5oU5tI38C3j1MsWCrGXL28yS2_a09DxZE>Zo{1 ztXP-D8N3VlN5upVWp%+5FNqgD@rDR+0`gNhzC|cLgPaiM&x?!VFphC?O`NnGA&>Lx zp16tg^)(Mjf*KZh8q63fS=dh;Z8^7f85KV4JWqVF&KP077zj$hX2XgbYLs zQu{n4dtX92CYfB6XgB~ommu7-i=knbsdM0pl}~~!h>?a^QEwBTjW`TtBC3z?(DBI=Gw}(|%16$4J(6-Ck zw@&a{V5zb%hdp>kJxh&3>r3E*(2(VMYC8>u$O$w=!aWFiTcYD)n60cPhuz^Q&$mj- z2$9Q(e@ynVtR+=W#>^_&M%>(Yc09||rp9v^01-YLXt>V=_He)g>q66X8xD;|USHHp zvZ=uM=D|#@=yG`CGT8R9?pvxdR{Jq^JgJ?&mtArMQ64D!Y=VKWmna`9i{uyZ9EXs= z!vm4XabI2z;fe?{gAsl7ao`_^JS#i|_}M}I;$nCTG#F~o12Um-US^R1KoF@+q@5I+ zcQ8dRq_s8Bi-V5S439yl0T2Tpgc-SN)}wmDqiM9b5TeEA2Lh6k4nyz?59O_k8UTs# z)Tyy9IHQrX0r03LxnG5O%d;_31iV}fNldG&IY2CE9vZo8>IC7DvGy1_YQpqe(0mxR z1z$~OgB2c6vp_gYB&^WmAZJe?e+uNF1Hi<9-#|c`G#wI=g(YaCR2n-QD3LNgK(9!I zhtoQ2WP-Ye%>o=UXb=~9di61lc^Qtt$1PgrFeeHW4-~B^%K}=RBn;?|2Gqn`Wl_q) z0Qn}eFd)Ek7FK0Z$-*`?T3OgBi&_@eWigqB-LjaeTq=vjEId&bOIf&F7RwYCjw|*h0Veh%aO5@y zx`=h;gvT0^pi0}$C36eA#!W;YZAg5#kOw-ii*DI*F2MOONeoS#HvgIiMjAhz3I zoygLB9w>DKWullP4S8ymB-Lw(d#WMNjp8hI4ROaC^5Q5?s8@U^Jlv2ks+S!#R<`i5 z=lfP5eTgZr0^7Gm6OH0Daf;LzTS3}~6b2k3L_6UY@Pv3ugjNCv(Bc4EIxBN50@f=I z;^kOjUqJXOV3irIu$0r8wrMVuuBypcpn zu%U!bOhMGQnGKm9@CCu4-z;QM0;YOV6!%+aKj!lLd_nz){|DCxQ22gcum$F^lYE(<1%gu*XP5*mKq^f@uTVAuenUGf!ex}u%yCj} z=rkR14Z_=eN%-DrAPp=}WYCP!ynr5TLQFDDc{mkvre^0DKL8=0lcdiNQ``}9j=~Xz z8csQ}M6Ok=Uw?~sga9;o?@CXp0>HuM1+j0vTaX3>Ed4Y-Q?3i?s#9Fth1bIe{S8#IhWVswV^ z@WG8s1p)_15dlc-!iecO&IxiDhbDJv3)Ll(%u{V&ArqyIlpOc++51hX&w8t=KJO;s zkA}kN^HL>Vt-bn1V3(~S!wnu1dHSigZY1_LI``wwgI0TEwUv-SGERhoEUW5Jn}!(a zWM{kAgMO}cA6RCq-`(iMEx@6j`;SrdJxRm&DX`T<2}qj1Ou;uoTMI+nHNm}I)0F#h zdo>9G%U!E=H=v7xJ}m1-LfTK2eVJCzG2D0D8|w1R`y;wMqq!??L3A=zC6Y?NfDs!P z#ZndBC!oJOrLIo{0uQ>7SqAO(t=2lpLZszOT2R>4x#I9W{oIeJ{4&WWful`lGwt0^ z2wqP1_$RsmzOpn+|4I5o+`swnhS~Rs&lKW#vP@4H535qh8LMvX^Q@-`b2}vVi$&Vj zfc6uA2z?T1;;@Mc8G?bru}?c%;XOL%!6S$Ti7mbW0B*R64+G994*D&N$cU>5u&;+W zK|B|lFt86rhFBGNc`veYo*neD?y*Zid*nsV84sGVGm`z93+^IK@O0Jp zm*uNTCRg|xE!JnIPvdxX3)U4JgJ8ue=-ZdM$>}>)--QwjHWKK`gr39GaBdie_H-7B-vek*nG}w9l%|GxVm?sVD@PM%4=aQYn|<9(8H2o1!l zqJ?zauy#fSN=6dO5iAE~UrdW#k~J0NtURQO6KdK2(ZoA~7d3nrSFS zpk5U851JSYh&>Xae}SbBjR$iB2YEafTh|J0JCn4%oFv8Qv&1wtsOMEuygW}r7Zw%c z_0s{Eq2thB6px=^;#9uC#97#)S<^@NgDE?pN`1OMQ!nkCtxwkH4&@8E!c4Vv zY_>X8EuASnZP~E4a)6=`d*i|e3U*>(qyw7Qh%o#TAQtFIj0qMgRgeSbR79Y`Mrm3H zNJA{CD!?OAiUor((q6#f#x~68OrV(|b#$uUu&{jaUtq&03pL`suxFuAM0h*6IL8nm zUBO7rqzI)Iv@wo`uNCI7X%X>q)OhZJ3x}%2=u*bY0&54pFDb#$LZ1UI4Gc>VNj0NL z%Lhwo4V!=vl|wcV94Uq?ga@wBxEg{i^#a`lCyw!;Ex~jP9mY^03|q6LMpG1Vm!Kit zHqnE11ec3yNBW!EN~XzqlXTmp>M&ejVZ~@UE@id`JM}z;Vf^)Y{chZw+4Vkdls3C7 z?M`p)jH~I9Cor11Y%7?HL?{* z&D_%Mdk3fcDR=I1i#KAR6P7oOKZ-kG=||=v!~_?KzjyVZ88Bd3OZaz-6p* z?*3q~1%4g^z;PZaT{M4w8Mp9?CY>(GcO!VW<7EY}PCt5LdSsmE7_3T7P@u(I;_MwuP10Vb#CZ)ol z+v#o@ON|!z@(#uZMl(LyPNeh4aMpb@0jFr(Qf1i&G)SXPKgJXe)@@`b37faon7Wnm z$Hs>iGv$**w6*&lj2B_b*&b|Tu!C_0OnS;a$@`>(7{|z-KRO%t`dQ;_c2?NlhH{UC z+i?`8wZIy87$Soz`gvlabnDL5*2>MRmtVTlx_;%>t&1;Rv7nAmerh2NJkg`Q5X=T( zwi6T-*3ooxJpvz_^&t|Ov$zObtvC#Hh1But88zXFOV)l5nStt?SG>^i?7uZ+M}fQs zfg4yb*n@>|2s9Q@xymWxlE=*P0uLH?cS&AXH83>RN|0A4fC&Y5Ugv=*F34qdfIuS{ z;E^a`N0vNh%-sMOr!g{+9t^_E3-Xfh`4W6@HSp9GU-F!Wg`V04JOgXO`Q$tMO3 zZRn$A}IlA3CsLPczL6GSk4qG(eHiW*Ldp_OayVjoB-3 zS`950j(S~r2LM3L;X+tKP%^l z!8gkKM+=Y>e$@D~@P4%PrQp9Uf2)xDrPGrzf;z>XWnH$(dhDB=ALbv41L6qopqvx0 ziZ6-}#Y6E+T>LT@f18W%aO=;5d&gEt&}tQ9TEs)z$tn2Lcxrei@ytNVgB2ERBbk8F zl{JgTLcV|^l1W@Lk+9~H1%xN?)bTX%ED8ogTjrDvD=r1BG%wi1aw4yk_XVs{K`SrNcG#vp_IUzUzD4D+pH`4M*w5JH9-?Z_ zf|-Zm7g%)|eNjXU2s!(L2f6J*F3;E+8H_!xNf+`ODM8*K?Z*vLdt4)Z$7iZQ4nF4r z0sj2S7UTC$9%16S0|%M7Fg?e__17T>|I{%!f1MwN`z`(y+~49CF^;F-Jpy-|FEQ(H z@%#9^7jHkyTxD?;-q~*qhyxbwnWjn~fKdFjUaw;Ok^ z=9{m%*WJ&$*W5Q7t5fmRoiDuZ&Nc6}UvA#%+;H#2U#Pe1@77=6zOilT1witPSPHmY zw3LXAi*)nI(fdfkvh^at_arbr9m=0?~#}k$wPY0=ZS=R*ah`7pF7j*AsNv2@S&*gfh|UL^AYI z(jwP2p+ksi{(Kspvn`GgmbO^W)l^a6p6uOKd z^v&pUT}CBkNU2E|QG=0DO%agSAqfu?G4L863Yaln!qqLbhz>zr z<+_49T&M}P0v8~d*C=I71OWzQTDhE9JkpDU7srKSUceyJv=yHo_YRUNfp*f(Fb{D# zj0$E1EC66Q3CbECd@F(`&;Zm!_LsVfSWtq<))k^cp#(7Cn1sq%Pa+yj3VbSN62;au zuow$kI}-nsc>v)b!keuEO}dw^NLN z#9T8-O1NoMJ)fv$l8@X>+l>p1n|~Isj7&j|=Q8T{orJW#@}cVhw7&R zeOEt8FyZo8xCJ}SxQCl^j){y{6Ah!eJm?PLh{xIEJ_^TFhx9uzilMeLMQJ{z$?Xuu z+1T9Jc|GwbWJmkREk~HoCTLH4w~6tiI8ZsG9XF`0W|0Zfkr`IPgf=NR1v8Rse8z|o zCbT<}QsB$z0*BVTk2GhPGQ`(11IGXBcD`dbLv36V{ZhQqATTV`OQ&Y6W%}E zBQxa;rrUM5h^&`0t#zmpPkmA_ z{TRm3lJ3Nk>Ri%-Nw2)_y60*e1M<~EiV9|1guAufzmIFNuq?Ch)9sGaZl)1WV5~j-iy)2xE_kV{L z08hq1$F2eIb?Y}X9T?k6{=YPu-i`le{T?%!P-X#T!0?1Yg1fN5A8iYdqQjsz2d$g> z@oI|eJjwe%4iN)Mfw-4k2mMLgY3~gj0EeV3tW90~b}D2>kPrD51R3K3lR+5w!{EBc zrKV0Tdg564d1opy00(sDH68k+B`{{86d1OJFTuKlhx2X;cQkPe65uQu5ChsnohYKX z025@w zwn7NC@8DMFBCzx%t_I3*CB*G* zy$x)ihRe4bvYUjde$Oo`+QKGY>PsL+b20-~xaAyhMl z=-1ZJ(PO5&X5i8*{b4#=$3`I?V;95kDNGFuX~zjNC$$9z&QHG!ln%2XxCm2WP#l2f z0#_}_{YMEY(Rst2(6 zdZ8ijA#y*I5K#ArB}AfX)r7$lW4EZ$0X`-VSI~+E7Mq3PmQlDMOY{dFEG&{D43MKq zvY5ellk>0(b%gMh`Dpuq3jf zXjM^1Q_w4Lm}C36R!u_*pbIHn(3bj{C+95aX!;2`X$$`Rcat^Z8sjut>>pih@#kpaT~?wg1`%k%FG04lfo5zHYblxoK>U|8RdX-DllVp zMZLZw!KMU-#a7H0!?WQVP{IT>!rgHy-z({8CaU|s0M*k06kliZSfj$B zfutz=Vyz+P)Z!>=sB>DDQBGd*agR!0*~GON`$`S#1C`Pr5y3oH;T4ll&=LNA9c}k< zlWGZLf|kLmXQ&B_mWO3x5fzAPAN7_DB*(N;KK=%XCFuUyo7mmm(?!C{R_x}C%Z%@DPzLs^fzu)--@Atefj(j7^5zAMW?b!R= z{Wb6IH~qh^#&iEn?TtTR|Ds#`B`;^WR%j{9iY(i*a^mvD<%=s2mlnfzupCD@wsMqK zwuPx}+twG>HqLuuEWL81dST=0`u5UVYk6gL{e`7BTALeN-i6K8?enYK!OH3@OPhqR z=*!F7FFKd6t_A!{y~3;OTgw+)OT;O4o&8}%&cZtBZq@~s^*evy_}1U~(K{rTE@iGP zjYGGz{~XIwUSvh8tYTY5Hf^LUh9c{>*oo9_vG!v-bVA+q)ZmdUQD#r*N)X$T9XXLi z7`m0@plig1Wz7u%5>$$y7fkz?y_<=nJ#u2gR1n#y_BZOu^c zRJ&@sb`m4fR4>;p*^O^fvPU`IrO?`;#opn!V&$$Lwy1YE{EymyBRiyMMAqRG2V*O; z51**_rIL=3ggd3$L#pvK)%f+PB;8J_ZXs1EO;y?{m9=B41ycEGDu2gR-`uIJM@gmA zRC>o$e{-i)&yvbcQ`w}7x_y}PiS^b_sa~M-lti~<6)^pJ&Zo5hP47hbRex{vk9{5g6aP2u{Le&w;VV(D`16r%)m2f|)TEkH zdl)B*OjV2{##Xj`+m5&$*Rvd7TRIop9wNZ?sKK~MiO?|fgrv1*Tw$+aPX<>b2r)+@ zaW`Xej|JkMj>QaY>Pv;itQ3lrFgk==?H(nj+Sd^Gejm3zI7pt(gjKSfy~87x&X4)F zr*)}T7puH$T&#iGfLIe&6RS}js1_^Ds#_^n;!3_!sN^bMMOR9dTD4wPp6@GP*}2G8 zf#tg**Njtp#&UALi4tjooz%~FBZsf-lNh8CpLRGNy`R1-qB2V=!iA{H&pA2cqCi9| zd5Smn@a(_}B!*JCfPW?{)%HfR5Ja8HPh4qW&d({To_aadBFiICcxE5U3zWBT!N}#X zZJ%1c;+h@6Axs>JDj%+3%eSvoP7pU^`>iSl2A#2dCmDYvL*78FQQ%^ZNRa^oRFX zGQPOEaXG88y}Gh%b^Yd<^@sOoreC6lvG`><7WyNUF0V3mW{tbBe8pLBy_(xN|5EG1 zw!3v@ZFSq-maY%CUt5xfLYw&N>dMmg#?tEgdTXT}!eh+C3G;AhA5)3V1Z*F(v9-*vOq*nx3&}FF66d%q zN$`-l+a?up64%U_zArOtk~_-4PCRAHoaqPRS8~eLw!d%Z#5?TqMz%_Hnfgqwl7;yB zwvSgBhSZ)26$TNemy0odW0q7N zPAFn(t7>8%EQq^Nq%4&m9I|Y;nMbPIIV0okU}Q|bie=+j5-Vrdt6Drqs)$`}PgrHK zrbkAxMoXiWk;-tz4jYv)tc8t|A9`WEQVHv&YM2Y%aIB)jW~o^jD;2__(okivGFV~? z(CM^*JeG+9iO*E%w5JUbx#H^}z&oN|#4{q7j%2FHW)o(DGy~GI=!9#Gw6|d)iH@*O zzcYtPPRkv~$xCr0VYYJ8=y+0annLCbS&N7G5iWC*)TSG(2xkI-?WABa+Y=5kqf_a+ z{K?H$D}DfZvbnyz_Q=LcOP_5mU%YM_SYBCai{)Bd7PHE~xODz?q|Zw0waPV~ms+p) zi0@{P`0h*gws#NtmDbkw=Emzwt;<)oU)L*}8&^sfDS3&V#}+|L6c`u}$$#{P)^;bv z6lf|bwIyD}6w%r=+mwq1gsy4%(mGAw@>jRg67o3KS{KRX#nrVHhg3Q8d8M@#c6|%R zd3l4zJw~?lHSYGF6kc^9>_9 zorq2iIciz;heI=C#f}OcFE`0fku9WT?RlZoh+Qc`=C@viOY^}6F`_3~>;cwN1ORh(*M)(Bp1w=TLgg@!Oq(-6vJ6@lcog^_L) z&+myRGkD{hN@jL1cU;9WxzTgX#8_rx6h^EwG3N%tyvM>@zWHnH)=ih4xbiF*fN47) z#Yq%e^D=*@M4Hg2!hEOOC-gy=-Kp}FP&MBmkw=zTY9Nxi-Wlk`gjvv&d ze$;(JQrpvqPegc(-c}wse4+t!bgC4&hgIWXTnG!o*m^j0H*(ika1C zXs7&7cQ+->hm&O8=CDX3iEQOMEhZt0vvxxGPX-Q4RyM#tDZMATiWeOy)eY5zhUD{M zE-5n1p`g&6Ajbs3Li15BDa9;1G(nC~N3tm24iV!>dM&AR%AU{a83m?sh{@}Q>+)!{(+Z$L$nofk37 z4%sYuYexf{m96nGOKA64U~^1hIl>Dp%wt>UTo&&lBUz2xN!>1y!7j4Aoh7S6KE$lH zRMH@L7$bXvnM|fKn-#vTWId~FJI9Vr zCLjf?Q4YtPEQkZfY9wPSvSRC7_h&l|&Sb{z@sB9yOX@RP|FZiN%KhJRUk$vEmVUk9 z|Gly2b6?)|(<=N2x)_T;~D-@d{R-9Kc_S+`oZS|=#(o3m2$EhMV4YACXtknnulpZ%tMa}>>f!KSLV zD@Rq@_h^ti;8{7=$09I&%QuHuAycKtK~*Y&*41La5psZLS?__iSntjk#Clh1JzX?kYbDCCth1G>SeMws zV69f`mUUOdV%k-tIb*IoB<^)%zH-;B z(stvWRSw^p@0`Ru`q&}NW2eM?=rG$*``lw*MMcj`WJUNXvkOdXStPb$Ti_m(C zXm#6!Z`7bYLQ@668$(HNVj5HBI$_3B43ZQj->wJe z!QC8+CJqt0o>ZnlI$KQ3IJ-rfAm)fHgS7CTa_nw7-B_}bPV4FfouqQkO5M@`dcf?` zea;=UYs_@R+-)iq4Cpg2Dzle&9KWJ`4i4HCA(u`^aUtV#b<+(|p|2@`A~H{M(3xBI z-$fChI7|sd?1YgM9f;VWP>SQ9IL-duoB#-s(oYT!6m>G4MpYsL{EGRaFX=$>X!nBd zB}Uq#rkkmsU!;!G?`iKtmk~|^OtsRSQ{qTYnqv-A)mRQ3(y~nP3^(abPmZ#4ewBX} zGL=(mx=So4J?Y3Al8a&q%eBALQ;bd%%r67-D*?oEKad6IaDXPCbXN2m-AitpH55ri?L31Pa&7J+dQYYL?j@qLtLt1KnA?fmqgS?9H`d)pH&AUbL$h`~)SMA%AsQq~b zlE%gjw=B`LjxIu{ao^bm(Ms#e#@6aKH#%D_PAJolm#=PY%U92NWwq~%vpN_`w=S=4 zSH(!1+97qh$1kO@OZ@fiQtP!VtDCK*?bXZVy?ps*2cUgdS1$Kh>)UlX6E!6tSvtS5 zacPM>wy$pO+SwbHB*yAW>qg;lfC(>2`pe6km$0sEu3l&v4@0hY32)=(tg^Z;*IY|$ ztDWxXJ*15}E#1hj&2pT~Qh4X2*5iA9W4+bBU^7x;$=sBc)>b*1-MG?P&zPzAg3Mpr z*ro+>+;*7WaW`J&O0aCCKyR;B zQlB=xObSm%$Kp$&AlOMIT9ps(gW?%wxQ>Rjmwu;x3(Dh zh17%bZ>Sq=BhwA#DV>(H?0nl;!);4iHs@y&(n@)oD^*I&5ys4!*D zoivt;SGP8Md2+{l+I;8QR+`nl#5K+RZd~HB$GzzvE7UCf+nw8IhbNA|(+NJ;VCK)^ zFfVgO9hcmF%AeyGQU@+M5|O{I>K5u$99GyIRj@9}>h%_@(93>h)X#7F`8S=XocBBL z;3h!)UUVK+FZq>mXTv$cjXZ*`=G zT-@$Z9W`gqIdjqt`#5i`a;PEaSkevb?>WGjrcY9bTt=u`fgAIT5Y{BeVsga9^`ysX zlANY-(M) zdDG~$#Y7L#BeJYp5APJ+E@wtm9vk(|Wu7a4oKLFEPUTuFr)>lS4d*BW`a#Dd|C}zo)2J(-xm{ zJQj}!@u&$M*Zafq(B2cf?tvh7BUk@Ck;1X067LFkm2^@~YPER4I0we{7i@8b14&&@ z`b#dC6+t|RD_0AhJ^DHKQJ+sJkC#_7F_K1D4+}}t{XgWayue9h`EXng%hX%f%D2Wt zdLtU@-VVe=VL2MA$E9#2EKTV*qf#73We)xGap3+1D(V zCf5!@>U|5TxD-i?8PUxsDGd8 z=iG154;`+UfSGdolXf@|*V7FWY+vTI!bcNL|*@BR@kGCk@?QgAXJ){+So?kei2 zmP{)5M`X7xpAcquT;xKhSab)%QJq9W!U6piA$&BROsRN0^rEq3Pno06`EWeh8;>a> zj`#8iqyOv=boiJ!8W zhhVDVa5!VCs&T?p-%{eD&Am(o&$OB98TVm7mRqBKrlNOYs$WoHBg{|f3-CwIZ9zBs z6QYJG3mk5;qY5JhQ(j1j`98I|CIy8b#`;JJUJp9|2O(+rSmTxs{kn2ZtOg5ToAM#f;({fD^v zxS%3jFbo$|gbV(?oSw)hJ=wzrjV`b0qmlIffc~%$EE*AhfMU^*;Rk;{9DyH(=+T}e zW5OA2D$4}=@Mn8C171j`4Oax2v_aptEp6Zmq|H9V6(Wen^iKkq&yTplj2BVpyohWvxW;xsRGnxNb`sB+oF)801%tnD{jn#$7rXG4Zc=WRTzPFvvgdFo+a^ znCNGaZ*mFQWspJ?M+L-$bWgV&eQTRVzR!q^k>r4|h}UKjL`KRY*Cdjw=yZx1;Ofh= z9y!OAj>zP}o}2@>EYWf@nKJ$LOpTMe?ixPJ!Xle`r1PN5JgAum=b*lqcOs8V%zQF0 zlWbCel^%~26RFyxZ+B02vzO%u7cG+5Vfk?onMJbLL6^dpgvlYJ)t85%7q`(^upz0V z$GC=4%uaG~NG+o(J=k%d&rW%6O6<8pf|(@>N~j`|}-evVGMR zhq|Jv6dkGva zoE#h1t5gFgjZ2c_?j`AFnJ;3v0L8cD_9(s7d(z)9d;LzgLCOXL(YSx?_}wL>Nj0L@~8l3utX??4DvXt6iUfm4MeAmvnm9X`)nppM6c)mR66`T zNOiZ6w4Fl$dx%y6+NDX%*e(>4yE-{Uog7da2wkrz`ULIjvZjioa`zV3CcHGE|5i6# zNHw8(2`tK>1QJhi)a`b%MzC3KRMd zl>4oo1d;T8JcWdp@dKm84dHZIm-eO2O1P#@MFxaI`N*s6woBr!MsyR}>ljrrSkx&* zF6yj)4K>jdc3EPvMBXWu5J@824HI7~a@vUxpeS945zfU{?2Dh-Ltc z946y3b<6NLiLT2S+ZuWW^_A4`lngUfE~4O&({e)|aJ_v9+d-Ge;aY!h?CYcMo!w+P zeUglscieli9hrZk2w{LWLAlD>I;Wp0cvKd{OeSawz&N5QFymgpjTVRr=Tbiv*#(cg zec3cVb1RuRG8Lo2U?-)92WW>)z!mTh`QdM%>Y3N8xCD(kg~UW>zcJ~=b4m&CqL zRt_F6h7^51ESp+Jzb^2eFG` z8VyTy67pKVrw~aeMLir8mJA1dAQe3&6M-t_^dt%&O?q!>kTX^oLkBkD1#;BA2fm7%i7nGe(Jq z)@UnLcI*8xj||s2VIm{!wN#tR*KLx6JI!%+#CemeQO?QbaskSSg&c~CV4&n0(E`{w zm!5M4bKzVx7tiJA3UkG|(p-7Y`g%T7IYI>tJ+1>z^0SIbdd&TR{eLO%|91YV_Wu#! zhR>p?^pgmNUkBXqcY+^t^1sI#_=mA=HIx9`_NaYo=BB_lkII0p^w58C(T1{i ziw;m-j@X7p3z)eIv|3Kis-^@>Yt6x-VA)_m!LJUFMBMWZZhK^mJe{GMV>we+&2sh+xB%TI1k9jIgTrE#CwGfA zFgPUElqJxH;pSko(imy_jasAFC^gEBfGw8Ss5Y9-p(YC#&j8&7&*q`G2$aC(4q0#u zH$c6ynCd~jrJy(NsU?Nr3uM%pi|w{Y?`IF>jhzJ}6m06xJHWTLTdvuˈp`w;Dcw~u(su6h5*!rk&2tjk=>$W8Ef3H+7; zuluhn0TCS>9ikgH=Xzt*clUDXAT}b~ZV!@%)ez7^6;0Q2J!XkjZZLH-XA7tf zx#DmpSF*|+mX%a(g7teb;m{A|)DRo?z=3q`k;Ifz>(JAoBzk;-tttz!NE8&o`hthZeJ8F}{Mq1D7X|I+JG? zmnW*(92&|lvLBZxYS^8CUR<7N8A~QTxV%EYm*Dbb3nnVxMu{7|BFiS$*C=~mgUdsL zLkA)&yWuo+17zM;wt6P7efGYeP|mNbk9+#J{NGpZ*TUZ}dEY<$F9!T?9C|+Y8;3t) zhre~p|6)f!a=qOBmajlpDcWB^FZ-mZ8~Vop0rL`o9g}QD@y&cLV@=-(2$R! zH2XgLrL{L{bglUxO*$Lbz(CeTD$#?_N(Z~TE(_4y0OX{GG<$U79DFQJoIbJ z%$gEfjhQSuI<=nBsf}d#IigeRYu^ftwk->Iqf;9Po)r&jD~}#+VIZAPOAY4J zUnIKds(y`mPV8S$23KhPN;=(I_j%mvJrrOZhve4Xu~D(TD{rjZ3&uJT7bWRE`I1<7 z7L0XA!Fb+ZHc=j^80)XAswnbWzpAP-nOk2}19h=~S`A2LtUsfg=KIg8rup7rKE-=I zwJr~vukTYJ2l4&50&9rwDBBo%HZNQ;|nz zjCFXwv3^9&9&o%l#d`UUJH^`N+$Glj)cQ-xG1|eGQtL0YKR?m7-@N-C%X&*Cla}=tjrk#Ce%hGJ z33dN4hY1|UNsQ076CF(suvU3vljYHzF%Pb9;C}oKQ5b&WjjNbTAEx12oxdb_#H~x` zFt?5QYWPmv?|DOfKB}9B(aA-CS7Kad`z9g8hz%3WuN|#x*zww+hRWZ zEGsVig%7cevFxj`hy7~!E}VZ=ZHv#>)iN7%=aqAlmh*E;%)e&LpI7IuT6*^7O;J3) zxQKb-WrU@EufK-*6KWC5eTV-f>c{8bLx8vP3f1s_Qr$ z@ZI1AVt>Bl`yNMp2T@+a5_?4GgmqoK%r^T9o{WYtoW~} zdrw$}v5%Tqh1pY5Okw_2DW;H|k{k=KhEHP^e?UFztGM`MihB8_FQ`XOSRYU)-%=;< zS-5!Sr8C=S!-Wqi@58DTEL?ibTX^BI3yT-dY&`W~@IiI*sl}(xytw${nRsD+@zUax ziyP0apSg7A$uk>E8_z$zxN-jJGlL7m3rnr~!V8NpE#4O_w;l*i1y|2LH~Grhm(RX) z@rlLOnbzXu!oh{t&OSf+J?i8e=iWHGdG?{;;o#n2B_p2doiQ4n|kp*$kZx+ zs48;elzMyqkcw*D*>mWRaAT8$hhNBvR%3*YaFcJ8Dv4s8sek*!Zs);Px$RCaR@qO82BDjY&(w zPPS5$v_Ujf+C}~tqJDi{CTUy~2aHrZXX9x)s<&cxPi4B!EJv;_(rWFHSh~5VSv~dW zq>>7QyC$)86E_c8BT?lry5H3_2wnAqV*JJ`}Le3h<>|^EhTnJ zA#;?)^RR?y>q-zi8Z;Mr+LXCPW4udP`X6m@*=}`O&yh+X%GQpkw~O|+sE@ZvM%rqo zJ}#vZs}n}Hut1PQy*{lTHwA{`Fmvh%sJe2EGA@3So&@}pD*FMOKH-lx+f$=K^PQ?_ z6R`mc&>~?6c6egh#*_{xFwMkdgi~@N%@7Wkm}Web{f6t;0g*NQGKESG$``e6B7#id zuqH2F?Q%g6%2&>?3Nd7Kh>x2l*&{q)TP}&Qc1ShDYc{+A&7ZTpwscDA@aTeWX!b3v;J!<*_%d*0ZluUPL=Aj z6eLbF&xKN=HYM(r{r#V`;eGAixz(bTZ#vk6z4v{mg^31#+Q~bYW{T=7vbS-*sc&i+ zM(Szz&Q7Vh$&+5qSWos5;szx#1Ry}h9aK+wL|9gM+pr5!BvyxF!rCbw0MCp@fht|o z{YE;chKP%-p*uT~Os0Na13{BZN0yI#NST*yk|B0peB_T~8xe<`pvuS&Qt6Sl1t38{ zSSt9y5zx3=J0#7FOCFOdXb1mm@#_882V6)~w1p<3Su)Sk_uv-F3u|kSiXQydsZ2fpI-gS; zm#?6eyuO`1we)R44)N(!fqX$;>ft4(Uc^J~VbMWut(a0DZf!5~PQ&usghBKIAFhKs z!~%PGRFs8J@#4_i$35hqYCkMwYWwydh`RnH?0;e+UX>TFZumsXx4}d-F7O!0`qEX= zl@@qFTYG$~cwuc7?(681XD5Xzd?ErwvEE7p`_B%qfl}17qeOX8>oVH%D@&p;KDe_t zr;3v|2r+8%o2~7uo9oxZGg>CorA9lg(Maz}kez2<>%X)kxZ}Hi$T`qHJ11SjPnqlZh$Yj{sp-|Ip9PKq~?!{I*AO10|-TH^Yx-}Z*@^k8@xlb z$`ex^gvE_8Dg)P{R%rD0uIqh`gvk192V_hAda#S+G`@p(Usf3+CNKkDAVND~V~SU< zo?lzNAh?(Ht&6SAKPgsb>_#A(CG&#cpBPHh+-j{|1ewLULEhq(x1*R3fWzdrTD;h` z8E?JFt5Qqp1B2#0D803Dx%GAsIMX-8=A>w-j3zfm>h$K&kKyUhAw~4$QvgpkFx&W^ z{`u|Op?*Sx`Z4&o8vua(jFN8$`jbAq*sjqvmq!`=hsTUF=i5O74P1i^VnPSMHhitG zfSuaqrr4p}D?sNqHoGVxgU#xP5Ou!{z*E@WybAfJ2`G{~68Y=ifc@z}i$u9p{(3=1 zqEjb-ppsJuu1tBw@#WsdC-yILz7>f7Tgth`Symrb?uQhwnSDw*ivXDX>wH0vQ;-qh zU*O`3*L>R-dqSq3Ph~DZee=SZx&55vd2j9(Lv>7*-|SqQf(YU%FEx)k6^c7N{fXML z+$$Q~l;mBFRrx)(JX`k=SBTXxMP_lzCxwxF`5oMaSymAp>lEe0Q%){#6?v%dfgGAI ze?%o{F35wTlEPeudPCl@=xE7fqT-oCnk2nWMDr2#QL=penSxGYZ)C!}dW42WaT--> z(Y{7!I<(8rbo8U9^ng1k+TG=86nWxFJ?2izXF+rz!BS1Tv+{{Xo?Ct|+UHUEeWJ%H zA08iXf?YySwL-q4-i=}%sZjlLDppeDu`V|$$v-Xvo7kO(;;vBHDwVD!1IJO+Q3P%em4=}1RqogSaJs(ZS<4;kl@gDF@# zkAj_K7pmZJl_B-GUH)UH*U1Fw(XcYdZkS?WDb@}&A9d-eWhVq(?RH@vPuG`MVXqN% ziqTGlQ9U3p0R7O7+h@Q88r6Eyb6vgtyI7j2;rar3FvW=`t`sL4of;^dG*CqROJ6_} z*U&oV^{#j>5g1@DM&a|Q8xNegoharn-W?9iCo|fBEO_yZL%@18lk5j}HlNJK2jjy* zJb&VHCwkLFAJ>P&!=c_2`|d3PB=hwL9bgX!lLPUg@KDK1=92kZyw^Anj_cpI#TD*N z4#i{P7&@i#Xb|tm71Tm^kA9kK&p7WskJ|QQc!sqJRN3?TbKHCFH{(Y9Ogc~(VC5hy?Qqn@ zDZeY82q)+*pkU9&v-)RLG~1(oI~z_!vvtsMbK&@uW|A0>hoTAKYNPR>`@7U}KAiJJ zEi?!0@ZD*SXg>2Yo4^jeS`&2GVCwyhfZVcz4rrbvJJ3_mfp9oFPzU8GQBbCSCZ3E& z!@)*88qI}6jTmKiWQggD!Q@ChlAi$~<;44pR1oj{zHl-eu7heB#m|oRCAX-29Wd9e zqAx1~N7U}6kEsyFG{Q;XBcA@Uo!^~BFu~DaG!zXtqRHgw>D#0I`sZvI&izF?tRq<_ zVDv!$6Fb}+?`aeFSUeR@CCRw{U+rkh(BJ(HT$GK9ywoo_9_~qQSK*(4sT?&M?~xAIW^BA)*tpSV<1E77XgH0~&(}lY3@L@F-?H;ErVZDpOg$9M0CW|Oo(2Tt z!w(?YdN~>psQy_yoI)n1v^W+|GDP6$FW8I)B-)hw>J;LRN?Fxg!<-co3d!j+Z z<$_Gz3#WVf8+J4Smq+fuw*`0z@({yr!sTy%s52Mr0ozzl1`LM>ee=MC5$xLF^|E=u za5(e8srbN-lfYa!&m0NT?I6<;Rrq%d*YE%wj`nny!^H`Q|E4XThQmk0QQ^Ol08SVc z4%d71Irnjy1fDS*J_-+{lfdWUaESyCVH9SC<8~P?kr>gg)~K>CIN-qI~tffLK#Q50fhR&YeoZ@^Jss2H0;x#GxGgM?Xc&O zP$S=eB^?bZ-8!S;XFH?e%bn36)j+8sHB3xEzN7PC~H2`;rHc&H=yh!1W)m7q5j5tJB6F zecFAbFUQ<8DI8Kfh(It+@CHq;F^D2Rc_=P`hm435rMdF1ZAtP?&PD~^29U*e(H^0F zA5^c0e)%z;n|49I%9i6@f~;hv&ocvwz@Zpk?7^_Q>`ZtY`5GGJs}u2}WAX^G7@!h) zC*-Ru$g6ZKXC8UYqtcS&$E#?#Am;9gi{O|*W!d}}B#&0YWRaTk>T)777Ahx?k+Tu< zyhaQ*Eia%Gwt3!hFA)?Em}>cHk9BG%Z(#??6UmeA6(N$WO7gHQK+NP^6x*Z17uS)V z76c*i1yYs>LqTqqQfqNQkr3G*NGlVpQD1a^B%}hh_epI8}$P-@pr0sqV zfSaKwtdNHly%joqZ4B^|06JB9Tu@_u7+`)}juwW< zm7oU$OtO9qumBVV0}PyyK{919z|R`^F9wT{XJgl_0O%z*Ih}$&`Smiv2PeSgj67m@ z{=7X!@BM&EQNSz{4GMUfMUa!6&rrY=E-2sw0;Xf$bO5vq3YgVR9}3tJ6tFBCQxq`! zpbQ1PhyMCO^%)tbW+TI|5YTfeOrS!~wQ<1ho~zJv13jCSSeAVk#`X6qajeK9w}cA} zEuB~fzdFv?%Dap(DSF+!-z!S=haMaVF{OF3729CmtqN*VhNXG3729SF74-0o|VeOQfyWRWgp0o z9LBOj{vkp3$Ff4U_tHO~Ry?ZG?jN3BH}Xw5tKR9K&H(Q&P2y(9GadwKJQo<|yDF$0qxc8^7<^MZka;Kq;st%d{iZz-&XgOmE% zsLCp{#B`U!7$3zp{L0$yp18sS`Q7atLQbH0$f?BBHYslXBDUdK7Mo38l&C`msG;91 z)PxGPlnR22g$i|EemL!ZSUwQUCPs1?3o^J^kRgW!89Xe=;A26C01GmNSdf9`6@r2z z+4jJ7<2=CS{OQY4vAZH*tumQbnK=QX{HkJ}C>WwlLzEv!A?ogC^$Gy)a2PygUScc26A1L7WqRw9@K;;^+H{CJGI^Vb@zSJv=VcU{v?ZAAyd7VOg#A_WcsWjQ`9suAX7ah zQ#~b9Jtb2;B~v{mljH@NB)7LsCeD&oN~6EaB}kh_`=HTh)I}psyb~Zr(m$pYUWiIs zhb;}PD zm`I#|r08=_Cp3-4D2h=K!+_EA;^I6|)0NBh8FnV$vRKGhq?K;ZWq2)seQAL>EAU+y5^FU#~VK=x(H3#U_FEzePP-5E6e;D&*< zPqH7-rza&IcM}{&^uyYpOAB|GnYoqATQOAhz_)E0{c~;8q^D>a&r9vr=HNeTwQ13w zHmKjFw`q4@)8xk@(x%A|N%S?1^~~F9nq={|nl=ZiKChU=N$^668?sJ+!S?dhki4w` zcW01ySPq6Yyx4`j`~8r2;vDOPyz|oo){n3%ka8VpPGV?(xt*L~VS|AdWGx ztYeHb>lh=>I#v>+ECvrjSjTw&!8%rxpOG4<@td!t;j-+QYahv+fqjcOS%|Vhv2V@X ze$BoubLVRIQE>~0yU98+gJiwVk1D3Q0T&d!mt;YW?c-25WO9hsY~@66mF z{O-!!Va>ifa|bp1p0=CcY<${20j!cMGARH9`C^HDu|&RDB3~@J2?P0JiF~n)eBnVI zhCYv=n~NldR-gkf;Ac@$Pj&+*u+5^MUZ-c-BL_ae?Ex6d=ao(#I?wt1P^!g+uv`qg z3%P~TLV2OGP+h1k3@p?anhS#qLm-z%7DgAw7IrO+FMx@%ey;P{05_J5hkeeGuPWYw z8+Sio^Aoz?az5qwZ+Xw>{v~gg{9CUY|99`d>imC>eO?NPtb&^0;Ex|lL?PW$tkwiU z33`&>BM}58*YtFW=D7{|-4UyvLLmoIt06x$!o3}@1BkE~)7%}8iZSW)a_Te&GjiSr zT*`5sSg~KU*hk<$&hH{BiKZkPKW*3qO*c5e&wAKH#vEygdykk-vCM6yQ?0UGaB9Zf zRp9>7*;~TY>F+7({0Kh^#xnz>Rk4Og#0sOqpvj)R5ex*CTDexL4G&jqwc5z=(D3MR zJ;?ExfYAWwLK&&v{RfW9B2X(hx5r!D3Lzv25)dHb;1Wzc8 zyH9hZI~437F$1fbs)CEAzsWqc<);_;%|mh}69HJHSQT-}`vyQ$P>nY)9SD|!coJfg zNHW4Y?+Ij1;;G)}>3j9F`&6+5qouvL{6fp9TL({OZ^*Y*ox5+R@9?XMnVxR1hHmGF zZL{~w27to!+V%Khcwt z7mm~4Cydv69@O@)`$0o31$3XO&YSX5Ex^^4qWO~i+Ap4qPT|d-u zD0&R)v3zuIkxe*z)v#Pl6lQ^WhU+O&266YY<4xr0t+oeJ&8+Y%sp5*sxvebl-OCvf zVpaYc8z%EZX8brAHyCveo;Ytr1%rnUdBw=$_qlLUpHQ);jpF7B!NQwAj-tBzv`G|6 zb`=`Z+U_=FH-}9#ft(TPud=fjEP`3JY zPKS9TSWYH9Zs)iitCyqGMRtgF#j{(k`K3J51W4)rC0fl#By^P>n`wir>yY(uOo55K zWEpj$f=s=other vector into the lhspublic fun append<Element>(lhs: &mut vector<Element>, mut other: vector<Element>) { other.reverse(); - while (!other.is_empty()) lhs.push_back(other.pop_back()); + while (other.length() != 0) lhs.push_back(other.pop_back()); other.destroy_empty(); } @@ -504,7 +504,7 @@ Aborts if i is out of bounds.

public fun swap_remove<Element>(v: &mut vector<Element>, i: u64): Element {
-    assert!(!v.is_empty(), EINDEX_OUT_OF_BOUNDS);
+    assert!(v.length() != 0, EINDEX_OUT_OF_BOUNDS);
     let last_idx = v.length() - 1;
     v.swap(i, last_idx);
     v.pop_back()
diff --git a/crates/sui-framework/docs/sui-framework/bls12381.md b/crates/sui-framework/docs/sui-framework/bls12381.md
index 5950e23b99f8f..e1bb511793f89 100644
--- a/crates/sui-framework/docs/sui-framework/bls12381.md
+++ b/crates/sui-framework/docs/sui-framework/bls12381.md
@@ -9,6 +9,7 @@ Group operations of BLS12-381.
 -  [Struct `G1`](#0x2_bls12381_G1)
 -  [Struct `G2`](#0x2_bls12381_G2)
 -  [Struct `GT`](#0x2_bls12381_GT)
+-  [Struct `UncompressedG1`](#0x2_bls12381_UncompressedG1)
 -  [Constants](#@Constants_0)
 -  [Function `bls12381_min_sig_verify`](#0x2_bls12381_bls12381_min_sig_verify)
 -  [Function `bls12381_min_pk_verify`](#0x2_bls12381_bls12381_min_pk_verify)
@@ -32,6 +33,7 @@ Group operations of BLS12-381.
 -  [Function `g1_neg`](#0x2_bls12381_g1_neg)
 -  [Function `hash_to_g1`](#0x2_bls12381_hash_to_g1)
 -  [Function `g1_multi_scalar_multiplication`](#0x2_bls12381_g1_multi_scalar_multiplication)
+-  [Function `g1_to_uncompressed_g1`](#0x2_bls12381_g1_to_uncompressed_g1)
 -  [Function `g2_from_bytes`](#0x2_bls12381_g2_from_bytes)
 -  [Function `g2_identity`](#0x2_bls12381_g2_identity)
 -  [Function `g2_generator`](#0x2_bls12381_g2_generator)
@@ -50,6 +52,8 @@ Group operations of BLS12-381.
 -  [Function `gt_div`](#0x2_bls12381_gt_div)
 -  [Function `gt_neg`](#0x2_bls12381_gt_neg)
 -  [Function `pairing`](#0x2_bls12381_pairing)
+-  [Function `uncompressed_g1_to_g1`](#0x2_bls12381_uncompressed_g1_to_g1)
+-  [Function `uncompressed_g1_sum`](#0x2_bls12381_uncompressed_g1_sum)
 
 
 
use 0x2::group_ops;
@@ -149,6 +153,33 @@ Group operations of BLS12-381.
 
 
 
+
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `UncompressedG1` + + + +
struct UncompressedG1
+
+ + +
Fields @@ -278,6 +309,15 @@ Group operations of BLS12-381. + + + + +
const UNCOMPRESSED_G1_TYPE: u8 = 4;
+
+ + + ## Function `bls12381_min_sig_verify` @@ -835,6 +875,31 @@ Aborts with EInputTooLong if the vectors are larger than 32 (may in +
+ + + +## Function `g1_to_uncompressed_g1` + +Convert an Element<G1> to uncompressed form. + + +
public fun g1_to_uncompressed_g1(e: &group_ops::Element<bls12381::G1>): group_ops::Element<bls12381::UncompressedG1>
+
+ + + +
+Implementation + + +
public fun g1_to_uncompressed_g1(e: &Element<G1>): Element<UncompressedG1> {
+    group_ops::convert(G1_TYPE, UNCOMPRESSED_G1_TYPE, e)
+}
+
+ + +
@@ -1280,4 +1345,56 @@ Returns e2 / e1, fails if scalar is zero. + + + + +## Function `uncompressed_g1_to_g1` + +UncompressedG1 group operations /// +Create a Element<G1> from its uncompressed form. + + +
public fun uncompressed_g1_to_g1(e: &group_ops::Element<bls12381::UncompressedG1>): group_ops::Element<bls12381::G1>
+
+ + + +
+Implementation + + +
public fun uncompressed_g1_to_g1(e: &Element<UncompressedG1>): Element<G1> {
+    group_ops::convert(UNCOMPRESSED_G1_TYPE, G1_TYPE, e)
+}
+
+ + + +
+ + + +## Function `uncompressed_g1_sum` + +Compute the sum of a list of uncompressed elements. +This is significantly faster and cheaper than summing the elements. + + +
public fun uncompressed_g1_sum(terms: &vector<group_ops::Element<bls12381::UncompressedG1>>): group_ops::Element<bls12381::UncompressedG1>
+
+ + + +
+Implementation + + +
public fun uncompressed_g1_sum(terms: &vector<Element<UncompressedG1>>): Element<UncompressedG1> {
+    group_ops::sum(UNCOMPRESSED_G1_TYPE, terms)
+}
+
+ + +
diff --git a/crates/sui-framework/docs/sui-framework/group_ops.md b/crates/sui-framework/docs/sui-framework/group_ops.md index de2f6a81a5828..84cbd99078404 100644 --- a/crates/sui-framework/docs/sui-framework/group_ops.md +++ b/crates/sui-framework/docs/sui-framework/group_ops.md @@ -17,6 +17,8 @@ Generic Move and native functions for group operations. - [Function `hash_to`](#0x2_group_ops_hash_to) - [Function `multi_scalar_multiplication`](#0x2_group_ops_multi_scalar_multiplication) - [Function `pairing`](#0x2_group_ops_pairing) +- [Function `convert`](#0x2_group_ops_convert) +- [Function `sum`](#0x2_group_ops_sum) - [Function `internal_validate`](#0x2_group_ops_internal_validate) - [Function `internal_add`](#0x2_group_ops_internal_add) - [Function `internal_sub`](#0x2_group_ops_internal_sub) @@ -25,6 +27,8 @@ Generic Move and native functions for group operations. - [Function `internal_hash_to`](#0x2_group_ops_internal_hash_to) - [Function `internal_multi_scalar_mul`](#0x2_group_ops_internal_multi_scalar_mul) - [Function `internal_pairing`](#0x2_group_ops_internal_pairing) +- [Function `internal_convert`](#0x2_group_ops_internal_convert) +- [Function `internal_sum`](#0x2_group_ops_internal_sum) - [Function `set_as_prefix`](#0x2_group_ops_set_as_prefix) @@ -364,6 +368,54 @@ Aborts with EInputTooLo + + + + +## Function `convert` + + + +
public(friend) fun convert<From, To>(from_type_: u8, to_type_: u8, e: &group_ops::Element<From>): group_ops::Element<To>
+
+ + + +
+Implementation + + +
public(package) fun convert<From, To>(from_type_: u8, to_type_: u8, e: &Element<From>): Element<To> {
+    Element<To> { bytes: internal_convert(from_type_, to_type_, &e.bytes) }
+}
+
+ + + +
+ + + +## Function `sum` + + + +
public(friend) fun sum<G>(type_: u8, terms: &vector<group_ops::Element<G>>): group_ops::Element<G>
+
+ + + +
+Implementation + + +
public(package) fun sum<G>(type_: u8, terms: &vector<Element<G>>): Element<G> {
+    Element<G> { bytes: internal_sum(type_, &(*terms).map!(|x| x.bytes)) }
+}
+
+ + +
@@ -544,6 +596,50 @@ Aborts with EInputTooLo + + + + +## Function `internal_convert` + + + +
fun internal_convert(from_type_: u8, to_type_: u8, e: &vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
native fun internal_convert(from_type_: u8, to_type_: u8, e: &vector<u8>): vector<u8>;
+
+ + + +
+ + + +## Function `internal_sum` + + + +
fun internal_sum(type_: u8, e: &vector<vector<u8>>): vector<u8>
+
+ + + +
+Implementation + + +
native fun internal_sum(type_: u8, e: &vector<vector<u8>>): vector<u8>;
+
+ + +
diff --git a/crates/sui-framework/docs/sui-framework/vec_map.md b/crates/sui-framework/docs/sui-framework/vec_map.md index 96b7c340f3f7f..ec5749359e1c4 100644 --- a/crates/sui-framework/docs/sui-framework/vec_map.md +++ b/crates/sui-framework/docs/sui-framework/vec_map.md @@ -262,7 +262,7 @@ Pop the most recently inserted entry from the map. Aborts if the map is empty.
public fun pop<K: copy, V>(self: &mut VecMap<K, V>): (K, V) {
-    assert!(!self.contents.is_empty(), EMapEmpty);
+    assert!(self.contents.length() != 0, EMapEmpty);
     let Entry { key, value } = self.contents.pop_back();
     (key, value)
 }
@@ -526,7 +526,7 @@ and are *not* sorted.
     keys.reverse();
     values.reverse();
     let mut map = empty();
-    while (!keys.is_empty()) map.insert(keys.pop_back(), values.pop_back());
+    while (keys.length() != 0) map.insert(keys.pop_back(), values.pop_back());
     keys.destroy_empty();
     values.destroy_empty();
     map
diff --git a/crates/sui-framework/docs/sui-framework/vec_set.md b/crates/sui-framework/docs/sui-framework/vec_set.md
index 25007f9b3a490..c6ee170d15863 100644
--- a/crates/sui-framework/docs/sui-framework/vec_set.md
+++ b/crates/sui-framework/docs/sui-framework/vec_set.md
@@ -309,7 +309,7 @@ and are *not* sorted.
 
public fun from_keys<K: copy + drop>(mut keys: vector<K>): VecSet<K> {
     keys.reverse();
     let mut set = empty();
-    while (!keys.is_empty()) set.insert(keys.pop_back());
+    while (keys.length() != 0) set.insert(keys.pop_back());
     set
 }
 
diff --git a/crates/sui-framework/docs/sui-system/stake_subsidy.md b/crates/sui-framework/docs/sui-system/stake_subsidy.md index f66978db04aec..8147ace90ae4f 100644 --- a/crates/sui-framework/docs/sui-system/stake_subsidy.md +++ b/crates/sui-framework/docs/sui-system/stake_subsidy.md @@ -9,6 +9,7 @@ title: Module `0x3::stake_subsidy` - [Function `create`](#0x3_stake_subsidy_create) - [Function `advance_epoch`](#0x3_stake_subsidy_advance_epoch) - [Function `current_epoch_subsidy_amount`](#0x3_stake_subsidy_current_epoch_subsidy_amount) +- [Function `get_distribution_counter`](#0x3_stake_subsidy_get_distribution_counter)
use 0x1::u64;
@@ -169,7 +170,6 @@ Advance the epoch counter and draw down the subsidy for the epoch.
 
     // Drawn down the subsidy for this epoch.
     let stake_subsidy = self.balance.split(to_withdraw);
-
     self.distribution_counter = self.distribution_counter + 1;
 
     // Decrease the subsidy amount only when the current period ends.
@@ -210,4 +210,29 @@ Returns the amount of stake subsidy to be added at the end of the current epoch.
 
 
 
+
+
+
+
+## Function `get_distribution_counter`
+
+Returns the number of distributions that have occurred.
+
+
+
public(friend) fun get_distribution_counter(self: &stake_subsidy::StakeSubsidy): u64
+
+ + + +
+Implementation + + +
public(package) fun get_distribution_counter(self: &StakeSubsidy): u64 {
+    self.distribution_counter
+}
+
+ + +
diff --git a/crates/sui-framework/docs/sui-system/sui_system_state_inner.md b/crates/sui-framework/docs/sui-system/sui_system_state_inner.md index f4357743de41d..ae65fd0a93402 100644 --- a/crates/sui-framework/docs/sui-system/sui_system_state_inner.md +++ b/crates/sui-framework/docs/sui-system/sui_system_state_inner.md @@ -2163,18 +2163,31 @@ gas coins. let storage_charge = storage_reward.value(); let computation_charge = computation_reward.value(); + let mut stake_subsidy = balance::zero(); + // during the transition from epoch N to epoch N + 1, ctx.epoch() will return N + let old_epoch = ctx.epoch(); // Include stake subsidy in the rewards given out to validators and stakers. // Delay distributing any stake subsidies until after `stake_subsidy_start_epoch`. // And if this epoch is shorter than the regular epoch duration, don't distribute any stake subsidy. - let stake_subsidy = - if (ctx.epoch() >= self.parameters.stake_subsidy_start_epoch && - epoch_start_timestamp_ms >= prev_epoch_start_timestamp + self.parameters.epoch_duration_ms) - { - self.stake_subsidy.advance_epoch() - } else { - balance::zero() + if (old_epoch >= self.parameters.stake_subsidy_start_epoch && + epoch_start_timestamp_ms >= prev_epoch_start_timestamp + self.parameters.epoch_duration_ms) + { + // special case for epoch 560 -> 561 change bug. add extra subsidies for "safe mode" + // where reward distribution was skipped. use distribution counter and epoch check to + // avoiding affecting devnet and testnet + if (self.stake_subsidy.get_distribution_counter() == 540 && old_epoch > 560) { + // safe mode was entered on the change from 560 to 561. so 560 was the first epoch without proper subsidy distribution + let first_safe_mode_epoch = 560; + let safe_mode_epoch_count = old_epoch - first_safe_mode_epoch; + safe_mode_epoch_count.do!(|_| { + stake_subsidy.join(self.stake_subsidy.advance_epoch()); + }); + // done with catchup for safe mode epochs. distribution counter is now >540, we won't hit this again + // fall through to the normal logic, which will add subsidies for the current epoch }; + stake_subsidy.join(self.stake_subsidy.advance_epoch()); + }; let stake_subsidy_amount = stake_subsidy.value(); computation_reward.join(stake_subsidy); diff --git a/crates/sui-framework/docs/sui-system/validator.md b/crates/sui-framework/docs/sui-system/validator.md index db50fa3400a9d..3c0250ae74815 100644 --- a/crates/sui-framework/docs/sui-system/validator.md +++ b/crates/sui-framework/docs/sui-system/validator.md @@ -1076,6 +1076,8 @@ Request to add stake to the validator's staking pool, processed at the end of th let sui = self.staking_pool.redeem_fungible_staked_sui(fungible_staked_sui, ctx); + self.next_epoch_stake = self.next_epoch_stake - sui.value(); + event::emit( RedeemingFungibleStakedSuiEvent { pool_id: self.staking_pool_id(), @@ -1346,7 +1348,8 @@ Process pending stakes and withdraws, called at the end of the epoch.
public(package) fun process_pending_stakes_and_withdraws(self: &mut Validator, ctx: &TxContext) {
     self.staking_pool.process_pending_stakes_and_withdraws(ctx);
-    assert!(stake_amount(self) == self.next_epoch_stake, EInvalidStakeAmount);
+    // TODO: bring this assertion back when we are ready.
+    // assert!(stake_amount(self) == self.next_epoch_stake, EInvalidStakeAmount);
 }
 
diff --git a/crates/sui-framework/docs/sui-system/voting_power.md b/crates/sui-framework/docs/sui-system/voting_power.md index 5b9be689ab2ab..d01fff559824d 100644 --- a/crates/sui-framework/docs/sui-system/voting_power.md +++ b/crates/sui-framework/docs/sui-system/voting_power.md @@ -378,7 +378,7 @@ Update validators with the decided voting power.
fun update_voting_power(validators: &mut vector<Validator>, mut info_list: vector<VotingPowerInfoV2>) {
-    while (!info_list.is_empty()) {
+    while (info_list.length() != 0) {
         let VotingPowerInfoV2 {
             validator_index,
             voting_power,
diff --git a/crates/sui-framework/packages/move-stdlib/sources/vector.move b/crates/sui-framework/packages/move-stdlib/sources/vector.move
index 96a2567855501..e9557b4d3f6ac 100644
--- a/crates/sui-framework/packages/move-stdlib/sources/vector.move
+++ b/crates/sui-framework/packages/move-stdlib/sources/vector.move
@@ -86,7 +86,7 @@ public fun reverse(v: &mut vector) {
 /// Pushes all of the elements of the `other` vector into the `lhs` vector.
 public fun append(lhs: &mut vector, mut other: vector) {
     other.reverse();
-    while (!other.is_empty()) lhs.push_back(other.pop_back());
+    while (other.length() != 0) lhs.push_back(other.pop_back());
     other.destroy_empty();
 }
 
@@ -156,7 +156,7 @@ public fun insert(v: &mut vector, e: Element, mut i: u64) {
 /// This is O(1), but does not preserve ordering of elements in the vector.
 /// Aborts if `i` is out of bounds.
 public fun swap_remove(v: &mut vector, i: u64): Element {
-    assert!(!v.is_empty(), EINDEX_OUT_OF_BOUNDS);
+    assert!(v.length() != 0, EINDEX_OUT_OF_BOUNDS);
     let last_idx = v.length() - 1;
     v.swap(i, last_idx);
     v.pop_back()
@@ -176,7 +176,7 @@ public macro fun tabulate<$T>($n: u64, $f: |u64| -> $T): vector<$T> {
 /// Does not preserve the order of elements in the vector (starts from the end of the vector).
 public macro fun destroy<$T>($v: vector<$T>, $f: |$T|) {
     let mut v = $v;
-    while (!v.is_empty()) $f(v.pop_back());
+    while (v.length() != 0) $f(v.pop_back());
     v.destroy_empty();
 }
 
@@ -185,7 +185,7 @@ public macro fun destroy<$T>($v: vector<$T>, $f: |$T|) {
 public macro fun do<$T>($v: vector<$T>, $f: |$T|) {
     let mut v = $v;
     v.reverse();
-    while (!v.is_empty()) $f(v.pop_back());
+    while (v.length() != 0) $f(v.pop_back());
     v.destroy_empty();
 }
 
diff --git a/crates/sui-framework/packages/sui-framework/sources/crypto/bls12381.move b/crates/sui-framework/packages/sui-framework/sources/crypto/bls12381.move
index a87eb451e93be..6a349b36acd8b 100644
--- a/crates/sui-framework/packages/sui-framework/sources/crypto/bls12381.move
+++ b/crates/sui-framework/packages/sui-framework/sources/crypto/bls12381.move
@@ -37,6 +37,7 @@ public struct Scalar {}
 public struct G1 {}
 public struct G2 {}
 public struct GT {}
+public struct UncompressedG1 {}
 
 // Scalars are encoded using big-endian byte order.
 // G1 and G2 are encoded using big-endian byte order and points are compressed. See
@@ -44,6 +45,9 @@ public struct GT {}
 // https://docs.rs/bls12_381/latest/bls12_381/notes/serialization/index.html for details.
 // GT is encoded using big-endian byte order and points are uncompressed and not intended
 // to be deserialized.
+// UncompressedG1 elements are G1 elements in uncompressed form. They are larger but faster to 
+// use since they do not have to be uncompressed before use. They can not be constructed 
+// on their own but have to be created from G1 elements.
 
 // Const elements.
 const SCALAR_ZERO_BYTES: vector =
@@ -68,6 +72,7 @@ const SCALAR_TYPE: u8 = 0;
 const G1_TYPE: u8 = 1;
 const G2_TYPE: u8 = 2;
 const GT_TYPE: u8 = 3;
+const UNCOMPRESSED_G1_TYPE: u8 = 4;
 
 ///////////////////////////////
 ////// Scalar operations //////
@@ -171,6 +176,11 @@ public fun g1_multi_scalar_multiplication(
     group_ops::multi_scalar_multiplication(G1_TYPE, scalars, elements)
 }
 
+/// Convert an `Element` to uncompressed form.
+public fun g1_to_uncompressed_g1(e: &Element): Element {
+    group_ops::convert(G1_TYPE, UNCOMPRESSED_G1_TYPE, e)
+}
+
 /////////////////////////////////
 ////// G2 group operations //////
 
@@ -264,3 +274,17 @@ public fun gt_neg(e: &Element): Element {
 public fun pairing(e1: &Element, e2: &Element): Element {
     group_ops::pairing(G1_TYPE, e1, e2)
 }
+
+///////////////////////////////////////
+/// UncompressedG1 group operations ///
+
+/// Create a `Element` from its uncompressed form.
+public fun uncompressed_g1_to_g1(e: &Element): Element {
+    group_ops::convert(UNCOMPRESSED_G1_TYPE, G1_TYPE, e)
+}
+
+/// Compute the sum of a list of uncompressed elements.
+/// This is significantly faster and cheaper than summing the elements.
+public fun uncompressed_g1_sum(terms: &vector>): Element {
+    group_ops::sum(UNCOMPRESSED_G1_TYPE, terms)
+}
diff --git a/crates/sui-framework/packages/sui-framework/sources/crypto/group_ops.move b/crates/sui-framework/packages/sui-framework/sources/crypto/group_ops.move
index 49d19ad7147ce..4892c9b8f94fc 100644
--- a/crates/sui-framework/packages/sui-framework/sources/crypto/group_ops.move
+++ b/crates/sui-framework/packages/sui-framework/sources/crypto/group_ops.move
@@ -88,6 +88,14 @@ public(package) fun pairing(
     Element { bytes: internal_pairing(type_, &e1.bytes, &e2.bytes) }
 }
 
+public(package) fun convert(from_type_: u8, to_type_: u8, e: &Element): Element {
+    Element { bytes: internal_convert(from_type_, to_type_, &e.bytes) }
+}
+
+public(package) fun sum(type_: u8, terms: &vector>): Element {
+    Element { bytes: internal_sum(type_, &(*terms).map!(|x| x.bytes)) }
+}
+
 //////////////////////////////
 ////// Native functions //////
 
@@ -114,6 +122,9 @@ native fun internal_multi_scalar_mul(
 // 'type' represents the type of e1, and the rest are determined automatically from e1.
 native fun internal_pairing(type_: u8, e1: &vector, e2: &vector): vector;
 
+native fun internal_convert(from_type_: u8, to_type_: u8, e: &vector): vector;
+native fun internal_sum(type_: u8, e: &vector>): vector;
+
 // Helper function for encoding a given u64 number as bytes in a given buffer.
 public(package) fun set_as_prefix(x: u64, big_endian: bool, buffer: &mut vector) {
     let buffer_len = buffer.length();
diff --git a/crates/sui-framework/packages/sui-framework/sources/vec_map.move b/crates/sui-framework/packages/sui-framework/sources/vec_map.move
index d1fb7646b7e57..6b38d57d289a8 100644
--- a/crates/sui-framework/packages/sui-framework/sources/vec_map.move
+++ b/crates/sui-framework/packages/sui-framework/sources/vec_map.move
@@ -58,7 +58,7 @@ public fun remove(self: &mut VecMap, key: &K): (K, V) {
 
 /// Pop the most recently inserted entry from the map. Aborts if the map is empty.
 public fun pop(self: &mut VecMap): (K, V) {
-    assert!(!self.contents.is_empty(), EMapEmpty);
+    assert!(self.contents.length() != 0, EMapEmpty);
     let Entry { key, value } = self.contents.pop_back();
     (key, value)
 }
@@ -144,7 +144,7 @@ public fun from_keys_values(mut keys: vector, mut values: vector<
     keys.reverse();
     values.reverse();
     let mut map = empty();
-    while (!keys.is_empty()) map.insert(keys.pop_back(), values.pop_back());
+    while (keys.length() != 0) map.insert(keys.pop_back(), values.pop_back());
     keys.destroy_empty();
     values.destroy_empty();
     map
diff --git a/crates/sui-framework/packages/sui-framework/sources/vec_set.move b/crates/sui-framework/packages/sui-framework/sources/vec_set.move
index e4d9301def975..c1b67c276191f 100644
--- a/crates/sui-framework/packages/sui-framework/sources/vec_set.move
+++ b/crates/sui-framework/packages/sui-framework/sources/vec_set.move
@@ -69,7 +69,7 @@ public fun into_keys(self: VecSet): vector {
 public fun from_keys(mut keys: vector): VecSet {
     keys.reverse();
     let mut set = empty();
-    while (!keys.is_empty()) set.insert(keys.pop_back());
+    while (keys.length() != 0) set.insert(keys.pop_back());
     set
 }
 
diff --git a/crates/sui-framework/packages/sui-framework/tests/crypto/bls12381_tests.move b/crates/sui-framework/packages/sui-framework/tests/crypto/bls12381_tests.move
index 9c80f51aac67a..504ec3e5a16ad 100644
--- a/crates/sui-framework/packages/sui-framework/tests/crypto/bls12381_tests.move
+++ b/crates/sui-framework/packages/sui-framework/tests/crypto/bls12381_tests.move
@@ -382,6 +382,70 @@ module sui::bls12381_tests {
         let _ = bls12381::hash_to_g1(&vector[]);
     }
 
+    #[random_test]
+    fun test_to_from_uncompressed_g1(scalar: u64) {
+        // Generator
+        let a = bls12381::g1_generator();
+        let a_uncompressed = bls12381::g1_to_uncompressed_g1(&a);
+        assert!(a_uncompressed.bytes() == x"17f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb08b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e1");
+        let reconstructed = bls12381::uncompressed_g1_to_g1(&a_uncompressed);
+        assert!(group_ops::equal(&a, &reconstructed));
+
+        // Identity element
+        let b = bls12381::g1_identity();
+        let b_uncompressed = bls12381::g1_to_uncompressed_g1(&b);
+        assert!(b_uncompressed.bytes() == x"400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000");
+        let reconstructed = bls12381::uncompressed_g1_to_g1(&b_uncompressed);
+        assert!(group_ops::equal(&b, &reconstructed));
+
+        // Random element
+        let scalar = bls12381::scalar_from_u64(scalar);
+        let c = bls12381::g1_mul(&scalar, &bls12381::g1_generator());
+        let c_uncompressed = bls12381::g1_to_uncompressed_g1(&c);
+        let reconstructed = bls12381::uncompressed_g1_to_g1(&c_uncompressed);
+        assert!(group_ops::equal(&c, &reconstructed));
+    }
+
+    #[test]
+    fun test_uncompressed_g1_sum() {
+        // Empty sum
+        let sum = bls12381::uncompressed_g1_sum(&vector[]);
+        assert!(group_ops::equal(&bls12381::g1_to_uncompressed_g1(&bls12381::g1_identity()), &sum));
+
+        // Sum with random terms
+        let mut gen = random::new_generator_for_testing();
+        let mut elements = vector[];
+        let mut i = 100;
+        let mut expected_result = bls12381::g1_identity();
+        while (i > 0) {
+            let scalar = bls12381::scalar_from_u64(gen.generate_u64());
+            let element = bls12381::g1_mul(&scalar, &bls12381::g1_generator());
+            expected_result = bls12381::g1_add(&expected_result, &element);
+            let uncompressed_element = bls12381::g1_to_uncompressed_g1(&element);
+            elements.push_back(uncompressed_element);
+            let actual_result = bls12381::uncompressed_g1_sum(&elements);
+            assert!(group_ops::equal(&bls12381::g1_to_uncompressed_g1(&expected_result), &actual_result));
+            i = i - 1;
+        };
+    }
+
+    #[test]
+    #[expected_failure(abort_code = group_ops::EInputTooLong)]
+    fun test_uncompressed_g1_sum_too_long() {
+        // Sum with random terms
+        let mut gen = random::new_generator_for_testing();
+        let mut elements = vector[];
+        let mut i = 2001;
+        while (i > 0) {
+            let scalar = bls12381::scalar_from_u64(gen.generate_u64());
+            let element = bls12381::g1_mul(&scalar, &bls12381::g1_generator());
+            let uncompressed_element = bls12381::g1_to_uncompressed_g1(&element);
+            elements.push_back(uncompressed_element);
+            i = i - 1;
+        };
+        let _ = bls12381::uncompressed_g1_sum(&elements);
+    }
+
     #[test]
     fun test_g2_ops() {
         let id = bls12381::g2_identity();
diff --git a/crates/sui-framework/packages/sui-system/sources/stake_subsidy.move b/crates/sui-framework/packages/sui-system/sources/stake_subsidy.move
index d0760d97a88c0..d355a69c8489a 100644
--- a/crates/sui-framework/packages/sui-system/sources/stake_subsidy.move
+++ b/crates/sui-framework/packages/sui-system/sources/stake_subsidy.move
@@ -65,7 +65,6 @@ module sui_system::stake_subsidy {
 
         // Drawn down the subsidy for this epoch.
         let stake_subsidy = self.balance.split(to_withdraw);
-
         self.distribution_counter = self.distribution_counter + 1;
 
         // Decrease the subsidy amount only when the current period ends.
@@ -83,9 +82,13 @@ module sui_system::stake_subsidy {
         self.current_distribution_amount.min(self.balance.value())
     }
 
-    #[test_only]
     /// Returns the number of distributions that have occurred.
     public(package) fun get_distribution_counter(self: &StakeSubsidy): u64 {
         self.distribution_counter
     }
+
+    #[test_only]
+    public(package) fun set_distribution_counter(self: &mut StakeSubsidy, distribution_counter: u64) {
+        self.distribution_counter = distribution_counter;
+    }
 }
diff --git a/crates/sui-framework/packages/sui-system/sources/sui_system.move b/crates/sui-framework/packages/sui-system/sources/sui_system.move
index 916c2cd55b33b..641cc3f9dd86e 100644
--- a/crates/sui-framework/packages/sui-system/sources/sui_system.move
+++ b/crates/sui-framework/packages/sui-system/sources/sui_system.move
@@ -726,6 +726,17 @@ module sui_system::sui_system {
         self.get_stake_subsidy_distribution_counter()
     }
 
+    #[test_only]
+    public fun set_stake_subsidy_distribution_counter(wrapper: &mut SuiSystemState, counter: u64) {
+        let self = load_system_state_mut(wrapper);
+        self.set_stake_subsidy_distribution_counter(counter)
+    }
+
+    #[test_only]
+    public fun inner_mut_for_testing(wrapper: &mut SuiSystemState): &mut SuiSystemStateInnerV2 {
+        wrapper.load_system_state_mut()
+    }
+
     // CAUTION: THIS CODE IS ONLY FOR TESTING AND THIS MACRO MUST NEVER EVER BE REMOVED.  Creates a
     // candidate validator - bypassing the proof of possession check and other metadata validation
     // in the process.
diff --git a/crates/sui-framework/packages/sui-system/sources/sui_system_state_inner.move b/crates/sui-framework/packages/sui-system/sources/sui_system_state_inner.move
index 121a12fc75b94..4384f3fcd2599 100644
--- a/crates/sui-framework/packages/sui-system/sources/sui_system_state_inner.move
+++ b/crates/sui-framework/packages/sui-system/sources/sui_system_state_inner.move
@@ -865,18 +865,31 @@ module sui_system::sui_system_state_inner {
 
         let storage_charge = storage_reward.value();
         let computation_charge = computation_reward.value();
+        let mut stake_subsidy = balance::zero();
 
+        // during the transition from epoch N to epoch N + 1, ctx.epoch() will return N
+        let old_epoch = ctx.epoch();
         // Include stake subsidy in the rewards given out to validators and stakers.
         // Delay distributing any stake subsidies until after `stake_subsidy_start_epoch`.
         // And if this epoch is shorter than the regular epoch duration, don't distribute any stake subsidy.
-        let stake_subsidy =
-            if (ctx.epoch() >= self.parameters.stake_subsidy_start_epoch  &&
-                epoch_start_timestamp_ms >= prev_epoch_start_timestamp + self.parameters.epoch_duration_ms)
-            {
-                self.stake_subsidy.advance_epoch()
-            } else {
-                balance::zero()
+        if (old_epoch >= self.parameters.stake_subsidy_start_epoch  &&
+            epoch_start_timestamp_ms >= prev_epoch_start_timestamp + self.parameters.epoch_duration_ms)
+        {
+            // special case for epoch 560 -> 561 change bug. add extra subsidies for "safe mode"
+            // where reward distribution was skipped. use distribution counter and epoch check to
+            // avoiding affecting devnet and testnet
+            if (self.stake_subsidy.get_distribution_counter() == 540 && old_epoch > 560) {
+                // safe mode was entered on the change from 560 to 561. so 560 was the first epoch without proper subsidy distribution
+                let first_safe_mode_epoch = 560;
+                let safe_mode_epoch_count = old_epoch - first_safe_mode_epoch;
+                safe_mode_epoch_count.do!(|_| {
+                    stake_subsidy.join(self.stake_subsidy.advance_epoch());
+                });
+                // done with catchup for safe mode epochs. distribution counter is now >540, we won't hit this again
+                // fall through to the normal logic, which will add subsidies for the current epoch
             };
+            stake_subsidy.join(self.stake_subsidy.advance_epoch());
+        };
 
         let stake_subsidy_amount = stake_subsidy.value();
         computation_reward.join(stake_subsidy);
@@ -1127,6 +1140,16 @@ module sui_system::sui_system_state_inner {
         self.validators.request_add_validator(min_joining_stake_for_testing, ctx);
     }
 
+    #[test_only]
+    public(package) fun set_stake_subsidy_distribution_counter(self: &mut SuiSystemStateInnerV2, counter: u64) {
+        self.stake_subsidy.set_distribution_counter(counter)
+    }
+
+    #[test_only]
+    public(package) fun epoch_duration_ms(self: &SuiSystemStateInnerV2): u64 {
+        self.parameters.epoch_duration_ms
+    }
+
     // CAUTION: THIS CODE IS ONLY FOR TESTING AND THIS MACRO MUST NEVER EVER BE REMOVED.  Creates a
     // candidate validator - bypassing the proof of possession check and other metadata validation
     // in the process.
diff --git a/crates/sui-framework/packages/sui-system/sources/validator.move b/crates/sui-framework/packages/sui-system/sources/validator.move
index da6157014541e..0019ea7dd42dd 100644
--- a/crates/sui-framework/packages/sui-system/sources/validator.move
+++ b/crates/sui-framework/packages/sui-system/sources/validator.move
@@ -352,6 +352,8 @@ module sui_system::validator {
 
         let sui = self.staking_pool.redeem_fungible_staked_sui(fungible_staked_sui, ctx);
 
+        self.next_epoch_stake = self.next_epoch_stake - sui.value();
+
         event::emit(
             RedeemingFungibleStakedSuiEvent {
                 pool_id: self.staking_pool_id(),
@@ -462,7 +464,8 @@ module sui_system::validator {
     /// Process pending stakes and withdraws, called at the end of the epoch.
     public(package) fun process_pending_stakes_and_withdraws(self: &mut Validator, ctx: &TxContext) {
         self.staking_pool.process_pending_stakes_and_withdraws(ctx);
-        assert!(stake_amount(self) == self.next_epoch_stake, EInvalidStakeAmount);
+        // TODO: bring this assertion back when we are ready.
+        // assert!(stake_amount(self) == self.next_epoch_stake, EInvalidStakeAmount);
     }
 
     /// Returns true if the validator is preactive.
diff --git a/crates/sui-framework/packages/sui-system/sources/voting_power.move b/crates/sui-framework/packages/sui-system/sources/voting_power.move
index 5a9672316602b..dd0fc336e83f7 100644
--- a/crates/sui-framework/packages/sui-system/sources/voting_power.move
+++ b/crates/sui-framework/packages/sui-system/sources/voting_power.move
@@ -127,7 +127,7 @@ module sui_system::voting_power {
 
     /// Update validators with the decided voting power.
     fun update_voting_power(validators: &mut vector, mut info_list: vector) {
-        while (!info_list.is_empty()) {
+        while (info_list.length() != 0) {
             let VotingPowerInfoV2 {
                 validator_index,
                 voting_power,
diff --git a/crates/sui-framework/packages/sui-system/tests/rewards_distribution_tests.move b/crates/sui-framework/packages/sui-system/tests/rewards_distribution_tests.move
index 7c04d28e61aca..ec94cbf81a1bf 100644
--- a/crates/sui-framework/packages/sui-system/tests/rewards_distribution_tests.move
+++ b/crates/sui-framework/packages/sui-system/tests/rewards_distribution_tests.move
@@ -3,6 +3,7 @@
 
 #[test_only]
 module sui_system::rewards_distribution_tests {
+    use sui::balance;
     use sui::test_scenario::{Self, Scenario};
     use sui_system::sui_system::SuiSystemState;
     use sui_system::validator_cap::UnverifiedValidatorOperationCap;
@@ -491,4 +492,140 @@ module sui_system::rewards_distribution_tests {
         scenario.return_to_sender(cap);
         test_scenario::return_shared(system_state);
     }
+
+    fun check_distribution_counter_invariant(system: &mut SuiSystemState, ctx: &TxContext) {
+        assert!(ctx.epoch() == system.epoch());
+        // first subsidy distribution was at epoch 20, so counter should always be ahead by 20
+        assert_eq(system.get_stake_subsidy_distribution_counter() + 20, ctx.epoch());
+    }
+
+    #[test]
+    fun test_stake_subsidy_with_safe_mode_epoch_562_to_563() {
+        set_up_sui_system_state_with_big_amounts();
+
+        let mut test = test_scenario::begin(VALIDATOR_ADDR_1);
+        let mut sui_system = test.take_shared();
+        let ctx = test.ctx();
+        // mimic state during epoch 562, if we're in safe mode since the 560 -> 561 epoch change
+        let start_epoch: u64 = 562;
+        let start_distribution_counter = 540;
+        let epoch_start_time = 100000000000;
+        let epoch_duration = sui_system.inner_mut_for_testing().epoch_duration_ms();
+
+        // increment epoch number (safe mode emulation)
+        start_epoch.do!(|_| ctx.increment_epoch_number());
+        sui_system.set_epoch_for_testing(start_epoch);
+        sui_system.set_stake_subsidy_distribution_counter(start_distribution_counter);
+
+        assert!(ctx.epoch() == start_epoch);
+        assert!(ctx.epoch() == sui_system.epoch());
+        assert!(sui_system.get_stake_subsidy_distribution_counter() == start_distribution_counter);
+
+        // perform advance epoch
+        sui_system
+            .inner_mut_for_testing()
+            .advance_epoch(start_epoch + 1, 65, balance::zero(), balance::zero(), 0, 0, 0, 0, epoch_start_time, ctx)
+            .destroy_for_testing(); // balance returned from `advance_epoch`
+        ctx.increment_epoch_number();
+
+        // should distribute 3 epochs worth of subsidies: 560, 561, 562
+        assert_eq(sui_system.get_stake_subsidy_distribution_counter(), start_distribution_counter + 3);
+        check_distribution_counter_invariant(&mut sui_system, ctx);
+
+        // ensure that next epoch change only distributes one epoch's worth
+        sui_system
+            .inner_mut_for_testing()
+            .advance_epoch(start_epoch + 2, 65, balance::zero(), balance::zero(), 0, 0, 0, 0, epoch_start_time + epoch_duration, ctx)
+            .destroy_for_testing(); // balance returned from `advance_epoch`
+        ctx.increment_epoch_number();
+
+        // should distribute 1 epoch's worth of subsidies: 563 only
+        assert_eq(sui_system.get_stake_subsidy_distribution_counter(), start_distribution_counter + 4);
+        check_distribution_counter_invariant(&mut sui_system, ctx);
+
+        test_scenario::return_shared(sui_system);
+        test.end();
+    }
+
+    #[test]
+    fun test_stake_subsidy_with_safe_mode_epoch_563_to_564() {
+        set_up_sui_system_state_with_big_amounts();
+
+        let mut test = test_scenario::begin(VALIDATOR_ADDR_1);
+        let mut sui_system = test.take_shared();
+        let ctx = test.ctx();
+        // mimic state during epoch 563, if we're in safe mode since the 560 -> 561 epoch change
+        let start_epoch: u64 = 563;
+        let start_distribution_counter = 540;
+        let epoch_start_time = 100000000000;
+        let epoch_duration = sui_system.inner_mut_for_testing().epoch_duration_ms();
+
+        // increment epoch number (safe mode emulation)
+        start_epoch.do!(|_| ctx.increment_epoch_number());
+        sui_system.set_epoch_for_testing(start_epoch);
+        sui_system.set_stake_subsidy_distribution_counter(start_distribution_counter);
+
+        assert!(ctx.epoch() == start_epoch);
+        assert!(ctx.epoch() == sui_system.epoch());
+        assert!(sui_system.get_stake_subsidy_distribution_counter() == start_distribution_counter);
+
+        // perform advance epoch
+        sui_system
+            .inner_mut_for_testing()
+            .advance_epoch(start_epoch + 1, 65, balance::zero(), balance::zero(), 0, 0, 0, 0, epoch_start_time, ctx)
+            .destroy_for_testing(); // balance returned from `advance_epoch`
+        ctx.increment_epoch_number();
+
+        // should distribute 4 epochs worth of subsidies: 560, 561, 562, 563
+        assert_eq(sui_system.get_stake_subsidy_distribution_counter(), start_distribution_counter + 4);
+        check_distribution_counter_invariant(&mut sui_system, ctx);
+
+        // ensure that next epoch change only distributes one epoch's worth
+        sui_system
+            .inner_mut_for_testing()
+            .advance_epoch(start_epoch + 2, 65, balance::zero(), balance::zero(), 0, 0, 0, 0, epoch_start_time + epoch_duration, ctx)
+            .destroy_for_testing(); // balance returned from `advance_epoch`
+        ctx.increment_epoch_number();
+
+        // should distribute 1 epoch's worth of subsidies
+        assert_eq(sui_system.get_stake_subsidy_distribution_counter(), start_distribution_counter + 5);
+        check_distribution_counter_invariant(&mut sui_system, ctx);
+
+        test_scenario::return_shared(sui_system);
+        test.end();
+    }
+
+    #[test]
+    // Test that the fix for the subsidy distribution doesn't affect testnet,
+    // where the distribution has no epoch delay, and the condition could result
+    // in arithmetic error.
+    fun test_stake_subsidy_with_safe_mode_testnet() {
+        use std::unit_test::assert_eq;
+
+        set_up_sui_system_state_with_big_amounts();
+
+        let mut test = test_scenario::begin(VALIDATOR_ADDR_1);
+        let mut sui_system = test.take_shared();
+
+        let ctx = test.ctx();
+
+        // increment epoch number (safe mode emulation)
+        540u64.do!(|_| ctx.increment_epoch_number());
+        sui_system.set_epoch_for_testing(540);
+        sui_system.set_stake_subsidy_distribution_counter(540);
+
+        assert!(ctx.epoch() == 540);
+        assert!(sui_system.get_stake_subsidy_distribution_counter() == 540);
+
+        // perform advance epoch
+        sui_system
+            .inner_mut_for_testing()
+            .advance_epoch(541, 65, balance::zero(), balance::zero(), 0, 0, 0, 0, 100000000000, ctx)
+            .destroy_for_testing(); // balance returned from `advance_epoch`
+
+        assert_eq!(sui_system.get_stake_subsidy_distribution_counter(), 541);
+
+        test_scenario::return_shared(sui_system);
+        test.end();
+    }
 }
diff --git a/crates/sui-framework/packages/sui-system/tests/sui_system_tests.move b/crates/sui-framework/packages/sui-system/tests/sui_system_tests.move
index d333f4e64a642..c7fd42a3d1b20 100644
--- a/crates/sui-framework/packages/sui-system/tests/sui_system_tests.move
+++ b/crates/sui-framework/packages/sui-system/tests/sui_system_tests.move
@@ -1095,8 +1095,8 @@ module sui_system::sui_system_tests {
         let mut system_state = scenario.take_shared();
 
         let staked_sui = system_state.request_add_stake_non_entry(
-            coin::mint_for_testing(100_000_000_000, scenario.ctx()), 
-            @0x1, 
+            coin::mint_for_testing(100_000_000_000, scenario.ctx()),
+            @0x1,
             scenario.ctx()
         );
 
@@ -1107,20 +1107,23 @@ module sui_system::sui_system_tests {
 
         let mut system_state = scenario.take_shared();
         let fungible_staked_sui = system_state.convert_to_fungible_staked_sui(
-            staked_sui, 
+            staked_sui,
             scenario.ctx()
         );
 
         assert!(fungible_staked_sui.value() == 100_000_000_000, 0);
 
         let sui = system_state.redeem_fungible_staked_sui(
-            fungible_staked_sui, 
+            fungible_staked_sui,
             scenario.ctx()
         );
 
         assert!(sui.value() == 100_000_000_000, 0);
 
         test_scenario::return_shared(system_state);
+
+        advance_epoch(scenario);
+
         sui::test_utils::destroy(sui);
         scenario_val.end();
     }
diff --git a/crates/sui-framework/packages_compiled/move-stdlib b/crates/sui-framework/packages_compiled/move-stdlib
index 191c7bd74e552887d4ce54cbdffbabd73852fb75..48a182f7e21f30daac43b23a2976615fd5bc6a8e 100644
GIT binary patch
delta 299
zcmY*TyGjE=6uo!u&g|?w@>sJwn;0Yp1T7RqkS$##-(nMlfCvFgF&{7>cA^BV1mpfd
zzCaL5YYVZERCZz_2=R1>^L!r8h7VVA^CG%we`o;!5`hux2gib+48k|OMLD95xFcmI
zbnmBffKj<0y`zgQ=1{>xEn>gu2A0cmTlcY%DcJd^gk1Zv+1hgl>d$UagCR>fR;a+G
zmKAO+_0TYjn&qtgGUowa`m_3OFp~DwPwLmsEsoZj0wJY@GIG>}7OeWC__RTxiXZ~w
zs2CBUnA36=KQUbQ)1;M1&

delta 288
zcmW-by-EW?6ov1dJAauwv&lx?O^nDk@vjg~HDy5ZHbF=urm?aU8>g`oZEYlry``0)
zK1Hw)UqIY=igVyA4&TrGeg1T7_rC4@))#94un2<0RGvC@J~K;vKzl`%ekA9g=_b9{
znPS&Mu1BpUW_iE}8Ahz6SEe5E6uES8xB^?!K|Ly|4+Gdt7u~aU)UE|&nrcR&6GT+o
zwKs88Iczj?J(NZ<0~*|Ak+YYXjH;4@ASaE9gElZZj5@W80{dyFSdbE(G%io-?ecT@
E2ers79RL6T

diff --git a/crates/sui-framework/packages_compiled/sui-framework b/crates/sui-framework/packages_compiled/sui-framework
index 343cb0eaea7cef905a76e8d4903ad910f624e0e8..02d56854d580acee4ae00f6bbcb0a828e2ff9bae 100644
GIT binary patch
delta 3737
zcmZWseQae#6`whG=6=n+_r3eRKl<_7zJ9;=`d(j`-fr97w!3YC?xMK67&TItrLM?s
zG21R53D`(b;*#tlH(*SRpc_LpQG|-|4`X78iN*+Oj3z`AK!X}d2oW$5QO~)z+b#Ig
z`jm-pMe)5G_HRAze7)oEN@t9DoavzJn)78YE^}Rei@(N=N3~yo
z`I4N|pU_+OuXSDhUe}#J>i-n(ZxwjIQzG%A0%r3rrZHx4#+hWCGiHK@XbFjkz7We1
z<_r^XEF&(IwMDAD9SIADLMM#rPSoshNtd-y=6DIl(gaBYk04~xJiVJk5Edb
z8HYj|gDI8~p|dN&Sw3Ho#ZIm(#x0D<28gO*ZTfhBTxBS-BejMV`r|feBm{!+!Rgwt
zWrJbcfPWD6qEG`=2w_ffkaqhmNa
zt`Kl(1Iw^1E8_CAw5Hf33aJ{NobBdL__^7_t!Yy+#V479N#?s#3*2CpT3WVCN^{7yR(ATjVbadBnG^IPXXHT+>gB}zdPDrJ!B^h;1A}YfX
zN_xW2Vr1cW4irlaSQ@_!ep&o-IBv&;4UPGH_^ShD4&iGD=C|@08P
zN6M4}PKTD3Sg@aVUP&06GXbKcb$t!Qk06k5!WT5-U-DULKBX%93o3N%SMo*Yo%~~x
z8LS6wF&FS7Ul;&p3O1vh!4t{YmPyzGRE^W-1hg5~5RB0dOcG2GGzs?62DAy1U}6qP
z1fRgO5OM@*8zDiERR|;04_6uUO}V)4;Nc=jgfc-Pv4vPkAao`XiUi$0!YFm@N+I;i
zG-FkQ-VDMJ!9W(Fk69Ul=ECkj;pLO%y_EBh@?P`tw@Wy91pS$S!r8wvDZwC&31b1_=pRE^C2^V
zkw<(L1%d|&IZHzdLgpfWRusF13Cf~=S4DAu%)uxg#5%5w$*CicNRPb6-J=3EI2!R`
zBn;I;aR&VZn8M_!*_oTgjKwo+XV=y@VYvm%d1yZ5IIROx81cCrB3B0NR^_Ys%Kz_6W7F}DN%3uyx%yhZmHcXG3s1Vq#(?A77
z2_p$zPo1H&ax?D>Z(r$6qZUwWkyZj;r>K2m<-SSFa1;w1$j1ZWg<%CQ6k>k@Eo`!I
zp%_c#jwC#OWa1+j81`2@@SzLUp6(YM?qO_x8M?`zAbpU63N@~DPD4-ZOGjUdyl0>{
zmKZyd@Z}>DOK6}X?*$k{bz1j(cDz?%D0XN?`XUU+O~*h%P=XO^ifCfx?dH+v<X4J4%Pa+fVaV1{p>SmIVVdsCqsX&YHXxlyQ^10DoI
zn9?@*pn^bcZ{Boo^2Ag9S9lw@z5s36K&E;d=_`rhlyv8dG4o;v#OI(lFve44Y2dT)8j8f6|t_@qCDs8nV16=Fr&Mq(v
zG1HsT@C_yoWTR3J9ze>Fqx;d`l(hqBZ_4^C+M9Z{hjYysE|s}yw7nH(EbtX(CJxb1
zX1Y#8nb`&nWtQt26>aY*6|;>oEUbC7H|@Et>zYiJ4{Kag{k(-ZvWR|Fqcx5=yM*$S1kzUE}J*{-*zn&sg}yHP6*w~w^its3Sw+g`ctWxn0I{W-Y&?pfK@GLhtX
zFH!ZkIP+0)9FC_xV@6`R1PE_to%EcaHE!Lhr7FTl)(1kVye%
zM2b7Y6-o>8j`U<89DG_PG}}-zC@4eeDH#+2H=E5^J2v=6OK50NicaocagxMkH{6=V2~`G$V9Rcv{dIuHRiSJi4LTLiomA=}&hB-8M^bWQ>ZV*;)g0p+}VPVX8EG
zZ`ly^+W2I^vaceBuq#$~XBbp5F3iUu;|N=1uD*hl$UWk@+b|;Vk&z)$rm@JMF=+anJB|Lo^@2HwL@5
zV6%olVTc}K;QNnpgQ2P4>2MdTNRXLe*sm#`V!;Sx$g_#+QJV#$$Y8bYC+_)pl0O~(
z=ZpD)s(hWl#f?8}-v;xdKBqsg&)64?ta{uK&Sm2{-F?;7z1PC&3*S6i$YJiKium;7
zS%wtOJ93ENmr(+RENY?DiBw>SgeYXB$mO$nPvEOd~2dh%`jZNcw$h?;OT;ReK`*#K7ax63CR-C
z!#dC-@-xDpT+DZvyY@YR(XkTVLi_Pv^LSu|Z(dyB`Ec^m5aK5;4Ol%$v^kICd-&3&
zTx~wLYuhWzE+FIPT)vnOJeg2zDTk{W{S;ryhd;Y?CpW``5BKo|v2iU4`0o}=#B971
f&{a0}A&J^b#^$&aX87d8BWU~^!tHk-zJ2q5(r%yy

delta 3115
zcmZWrYiwLc6`nJ9=DzRUz5Beo>&LFW_IlTAdu?yxYsb&{k)}=DmS704L_C5NszTHx
zkfb0>ZG~t^Aj$kdq$napL83nhl@U@~)b>viP!uYDAW=Z7Kt+gv3P>QeKPczyb!bvn
zJKvdmUUO#V%$a%fYvRvm#ShkJHXpaIcHMGx#+VOG=en-BXT+C5$g^-Aj2rx$-2ACn
z)W0H|_7A1*JSTPcCHa=-J!^6Qc}pZ-v^cY%z%<4TV8A2;z%JJiO(79MC(L&+ZI~Q~
z(qm#hkO-l|46;@{S)larooM4Gs~+zyudLt@s!h-{KSUCjS*T
zzAax9=G#_TzieM{?Wc1Y_l4Xo!F1Mxwz#j@Xfts%i1*$ko7`tRJjX03NQ96e>mzg%6cY$J
zg03XO5b2cygg%*KtRhpJjP<6mJ3!E%K`0Ycvk1ev6a(2+z+|?1dU1l_g!D1kPgsvx
zjcP|S+|$6w8SE@!+E0;9BR`aH<(u90?nY&((yBE3>V3nNk-o;j&_HXTIanVY9vB(i
zY}AHoty*)aJ~Ud_;(Vo)1t4?;;7V$Y>z!VVuuX`CnbaXM-l>KTwiWEywnL!flE;dK
zn_zB#v5r2E&5(DeSsaNLn`s@z?T^?>i>amH4O~JjvK3EX)R-(
zvyNg%iCkN;0q{8P4$@3f4=65jK*6Dh4gqy+xghYHt1HW)dU<(PSu0O$U0Zn!)H9;I
zb$l(Ur}6aK00f*sLr_=tm6}DzU@kb!^dJ~BOdre$&oSn0(Le!14kHfLt)hBk-ytZg
zmHmBblya0>B$XWNB}z~4|I(E0Fe`F-K6W5b#U>S(hcQRK%fS?3QW|LCfErnEf5_8=
zybC@C6BKz5Nd-xxfcM^^8l`wS_NAjQhrFkGCFU6Ka{6Kur{n#(^>!(x2jy+={k#@S
zEs{pzR{Q>&vi^v80*dkqe8hMQegx(XZBc(y%h-<#!#OLsdrrK>y*
z$7-8&Ibz`BK^NTN0k^qlN}l2<+In`QhZphGMS5i5$Sg9;!k3l@9$V#+rQZJF8g9bU
z!4Mqv;e*}aZk;;##pK412X2=l7g)%{HSN&rT)d!tR9+V6r14$r4b%K;YEgeB^^UNA
zo?mdE4FBeO|E`*e|5Q2ag`MyLmSP3g&j#5Jw!&7~8rx8NkM7yrPxa{i1nX3k?x&*k
zAnoh}1n;9Fbb#v5Bz*=1dDFu3Vc3jKkzkL5yuBC}+ef|osCN%}xz|JJqBNMHtEgPY
zs{$%z+!ko{;{C@(B72I1++Ki$UNGcdbI+e8TDcbOr!1ZW5zPByh(@ICuM?Rc9YbR6Mtjp<+Pt^LoYJZWnlk|7h>bP$>x_<1AkOc>
zekJG)%E3S|T*SdZ>l!an61wY
z%?!?kt*O>%v(?<$idy57qmwhOuKL*IShG}b@1K~QXi8&i^`SrFVb!+=9{vfp%%jH-
zA9?uLiPP6^eNOgFm{}=v}vjauuBT!NY2N%tdp;E_>TI^7yclld|5oDUOUx*
zi%One*xVU9;9zpPhLr+Wc*4=5(3QT7gsVGv%iD${qk>~NdP+t`yj>Zav$k#U34qT4
zEh=&6J?td0yRaPGt=(aFbIOdR3|0%F`@+|v9`&=+JqMQCHcMh1ih{CmY?h#IZ&aop
zp2&);+1jYjqdrk|@Yd0=7HT$+`q2oFSX3>GAymaKz1BLvXh6-J>3tv`DITaSOzEPz
z0X2-BPI}ToKS7cw(PVk#{|9qq5D!Nt8je4xzISG}w3MRoW2XWu5>{FVpBU({cInYf
zfYHRv?dXQoz@zoObqb%SaiqPW%|?SAhNuWbVnG6f)od2@xZq(It6e5)1S1ZlSTxKt
zXn+~Wf~L))CRV?Zt#3T~vE=+!|8`Zt_Z>b?b6VF85U;?X{0ICCj6Z2lbMu0}sK2Vu
z+UJa{bJY;;b>kV``?aV0zf%+Et``b9JdUX%K5!t*=mzh~A@U%v{(deS<}*^{!ffb^
zjBAQQrjRRyOyqN_bp9INH|qKGrLEtdKfv9Djc>t7D+^uPYWc!OO=KeJ@_6Y0BHhJG
z$ngct5=N)K8ZO7RFx86}@?F;dHmt%i5LUtsn!yXmhe%WZys#TWwd3Lt;-QO!yURXX
z%K5Mq(!yDWtYIy6Jc!)LLzgV_%cAJvi)JjHJYM}9N2)BfK=)`{k|EN+i

diff --git a/crates/sui-framework/packages_compiled/sui-system b/crates/sui-framework/packages_compiled/sui-system
index 2f5cc8064c3d9535e1ec7fc97f87d22e2eacf020..4f3423fe5d3c279bf8b3c386c5c4a69d6f374f02 100644
GIT binary patch
delta 3435
zcmaKudvI079mjva-93+ccF#HY+0kBFzU)lLfBLX9_7B@mr_=uSK*kF?J%4<5
z_q)H}?r(R`K0f$0JM~+3^>-P3Gx~j|$NqEPePdvu|H)LgqCKq7dl-x;?tl*xB@6Z`
z;Xu*|JyB$pq#Bv2O)Wpegdt;#VDD6K+o)6ejk=Woi!=d1A=u1TJX|YAnK5?K`Uo#G
z`@9%1e<_BYuNX=7g7F{IeNGAgl46iSA43QL831FzK>$cV0F!Y5R~P`6NkwiEk~TpI
zf`?S$5YUi>T3a(gU{-(#T&4-GX8_L-R5Z}PJ9@4EnW?qKHdHB|;mEj7RCI3MN%SIX
z{v>)+r{_20;z|#fxOybEe)^MpRre2P9kSNFKO`j#J{WJ8jCeQyBeWK|A`9
zyKpk?Da(<8r)=p2zA~f?{OH+*yuL-O6vmnr*0cCNv;W9<
znm-qL=R*;yzl(R#ecg2Yw@d*k%rcW%EW;`zbE0tucr;B&JpfBuHc)IHjXoz01j{j_
z!HG|e?AYkh9Z|49oSMJiMIQ2WwBkTVrI7-LG|!27w8#(=un2*OzI@ys)!)38|v95#s5l-$taLW0OJCyu~7Hvyy+dUtkE*Su8c!kx2$h}$S3gUu0s7Y_pTCJ>Gqx0j-N%yPo_QCJjo`azJ9v^~X
ze-;FjTdDXEAvN4RrwdZc?ld8_Z6RUw7Q%fx_XyraJQMS5%tvEBh8v?7U^6G6=KH^$=Dy&xNphrwL(x1EFq70O8E$9E8RG1rW~Dm_MIz%hEvz
zhZ~z9yiM1x8mohF&8lZ5sH};Y5rXIrPe!`YsvlGItTHHQBHV2w(AUvu4uHaY!
z!mGG658>;$l$0OD{ApwyZOq)eVq|_{)yVC$*A#|Y=jDpITU&?6rxqW5m(_N1i9=BDjWjUPM9%Xa?XCQEKEeY*{7`Ku^hW=J}J_uBpr+!c4bGv>Yg9^R^3J$Hhp28MP}%7PM>(e!_mn3hPu+xd|&Xv9Fu#U1{v$UIKZ>GEX{!UQprumzphsC9Y3qYH
zc22PiN5rqF{Ze%3wWH__ioRZZ+Sis+2Twy7VGY{#=3yVaG5nd@vAoCo##rMQ`R%>Y
zujyvLdss6`n;xZyUeXLzvy-5h&5R{`Ro=0je4_{*Tjm>;GAa^ENfk|A>Y!(%r)r9*S{)rrT@X)@1U)K
zkssEH57GDt^=@;}a+>4GDR|w~VISGiYnNJ~ttRbwR+901EE!x&QX#z}@23g~Xto<;
z-`d!>SC$I0wo=u}KG|ET=F(9CemWDI9;a-Wk=?4nZIFUSjv>up
zqnZ^5sPw6j4i!E0lVPZ`vQgz^hpLiYy{cLo$|YT|u_xPUGWAnN`y^2=skHH5
z5^2dl%gD4i&5bkMN;Jz^5r1k~%M-{t=%3zI?LaVBVKGp`!(yr+Dh-Do@CE&LkB_`3
zqoYq1{0qqD=v@(?DZ%5>i%<0qZDu{wk_xu4UR{{JH_jvLD=(MW+-X|~j<9|j)b4f{
ShQ_1()93Jksmo8dR{ak&r=VH@

delta 3187
zcmaKuX>b(B702J}o@=LjW_ET*tKF4^RzlM1TqGo+`l{B{9xJ!sGz-7`hL+|#+;F8xEkUi*nK06-#G%#u7(
z4Q>Jo+pkc=V!&e7x#H`(iHg4Z{
z;PA0>f3Xu~lBYN_t_mmIH~AEL;U|-iqgUQBC69K>#Ukc3U)RH%Q-d@`6o3NfIyCL*
zVQK+V;r*#y7!Ut5^?1^grY(F=TEh0@QWwIHhv%m4#-RMqX&+1&`n6H9YLdwmZ*11)
zY5Jmt=2z9C{fcig-qdRC54Ax4UV9In^M>tRH#G3Eni^Pzhr$XW0aiMM4Xy{!5cdI=CaCsAl6p_0;_`rS1{I%F
z5xxu|h7;)RH>N4yyE+Yv8Ed;?Py=u*TtBjrlOZ$&&Gv8I}m
zX-j7SmMDxU^caK=ReKk+#7w#d;1*_U0dAKtT?f#Y9S1N;S@bKsjrBQz#q|vU>2xDN
zVY~;}yk`Ld&Uku=cKh^gL_XYA0Ij8yPK!2kU52>(sZQb@+@KowaII?n91SY;343P{
zr_P|dkzZgSw+45UqbNPL&p;{LdNLrl_T<3H%^zY2POH(biiz{LtD<-D5>-rDWT@it
z#ZOS-<{leD?zS2WiBpzRNO$_xh02Y$^a!m!s0{;~BL@J|pD!xOt9
zF?YEUKw|wg0}|V&6Sk|`3%DX|I8<$}ruxfNpLwXir0QSC6~tE}-cU^b4Cl^+Af0c7
zAe)~ALCr=3g4$JthFLxYP5BH20|QebC@M^zLYOvZ2!i3}76|64-o-l_AXqxvgJ8u>
z3Bjs<@?G6aSTjIa+ecVOT3fJTkZhjG*FkVn`Mr%hauD3YIkgb{2d--fW#D9SN$H>2ryvs6>#oGB^KIaP^jK$md5c>>Z{i&)BL9LTjyW#+|DGC?YRF~$B!8Cg=ZE+~zK0*@r}!*R
zw+b}X2(S(Rq_kYi9|;d$2yk`y(uGeu*2%a~pPjhy{1=dEWNYY=g8o@#x@u?Cjpd&T
zU%NQIbOxEBwE9RpG^<-(WN|HM(Xyjf586#ypl73N8cm*m5b#B0COH?q(a)NR{uPv5
zb-Fy}9~YdyeH)lnX~HT@EdTgmWb+h@sT_>au;lb5WVTYwNMZglf&Pa~$`(fxE!V$XES*Q$Pl2SrMt0a8
zqCX{jU0!r$yQ>jvZM)#li;k2@N9?eOiKeLK1gRF8)Q`kuQY5ug6xH=t%0Mp_Q3fKA
zt!^v1x6vrAG^pt9mwk3^zpN7-{h}@{`m%C@quCSs(nU`e!~|jXr%jQST~5L7>LaZ)
zoOQLX@ebvr^v{LMulEhFVZCFL
t^w+XJ)fl^pFCgo$Y(LK?joE_#EE}NocXl~2yeqta{Q^E$UKMs^{|m|Uk;ec4

diff --git a/crates/sui-framework/published_api.txt b/crates/sui-framework/published_api.txt
index 4d9f1f6f33262..8a91b732b83ee 100644
--- a/crates/sui-framework/published_api.txt
+++ b/crates/sui-framework/published_api.txt
@@ -691,6 +691,9 @@ advance_epoch
 current_epoch_subsidy_amount
 	public fun
 	0x3::stake_subsidy
+get_distribution_counter
+	public(package) fun
+	0x3::stake_subsidy
 SystemParameters
 	public struct
 	0x3::sui_system_state_inner
@@ -1561,6 +1564,12 @@ multi_scalar_multiplication
 pairing
 	public(package) fun
 	0x2::group_ops
+convert
+	public(package) fun
+	0x2::group_ops
+sum
+	public(package) fun
+	0x2::group_ops
 internal_validate
 	fun
 	0x2::group_ops
@@ -1585,6 +1594,12 @@ internal_multi_scalar_mul
 internal_pairing
 	fun
 	0x2::group_ops
+internal_convert
+	fun
+	0x2::group_ops
+internal_sum
+	fun
+	0x2::group_ops
 set_as_prefix
 	public(package) fun
 	0x2::group_ops
@@ -1600,6 +1615,9 @@ G2
 GT
 	public struct
 	0x2::bls12381
+UncompressedG1
+	public struct
+	0x2::bls12381
 bls12381_min_sig_verify
 	public fun
 	0x2::bls12381
@@ -1666,6 +1684,9 @@ hash_to_g1
 g1_multi_scalar_multiplication
 	public fun
 	0x2::bls12381
+g1_to_uncompressed_g1
+	public fun
+	0x2::bls12381
 g2_from_bytes
 	public fun
 	0x2::bls12381
@@ -1720,6 +1741,12 @@ gt_neg
 pairing
 	public fun
 	0x2::bls12381
+uncompressed_g1_to_g1
+	public fun
+	0x2::bls12381
+uncompressed_g1_sum
+	public fun
+	0x2::bls12381
 Referent
 	public struct
 	0x2::borrow
diff --git a/crates/sui-graphql-e2e-tests/tests/stable/packages/types.exp b/crates/sui-graphql-e2e-tests/tests/stable/packages/types.exp
index 078ee9b351126..452e3b571a172 100644
--- a/crates/sui-graphql-e2e-tests/tests/stable/packages/types.exp
+++ b/crates/sui-graphql-e2e-tests/tests/stable/packages/types.exp
@@ -141,7 +141,7 @@ Response: {
   "data": null,
   "errors": [
     {
-      "message": "Bad type: unexpected token Name(\"not_a_type\"), expected type tag",
+      "message": "Bad type: unexpected end of tokens",
       "locations": [
         {
           "line": 3,
diff --git a/crates/sui-graphql-rpc/src/test_infra/cluster.rs b/crates/sui-graphql-rpc/src/test_infra/cluster.rs
index 27ee4f5a23b31..4317b44de285a 100644
--- a/crates/sui-graphql-rpc/src/test_infra/cluster.rs
+++ b/crates/sui-graphql-rpc/src/test_infra/cluster.rs
@@ -132,6 +132,8 @@ pub async fn start_network_cluster() -> NetworkCluster {
         None,
         Some(data_ingestion_path.path().to_path_buf()),
         Some(cancellation_token.clone()),
+        None, /* start_checkpoint */
+        None, /* end_checkpoint */
     )
     .await;
 
@@ -187,6 +189,8 @@ pub async fn serve_executor(
         retention_config,
         Some(data_ingestion_path),
         Some(cancellation_token.clone()),
+        None,
+        None,
     )
     .await;
 
diff --git a/crates/sui-graphql-rpc/src/types/move_registry/named_move_package.rs b/crates/sui-graphql-rpc/src/types/move_registry/named_move_package.rs
index 781db11ba552d..93a8ebf0bfdc6 100644
--- a/crates/sui-graphql-rpc/src/types/move_registry/named_move_package.rs
+++ b/crates/sui-graphql-rpc/src/types/move_registry/named_move_package.rs
@@ -24,7 +24,7 @@ pub(crate) struct NamedMovePackage;
 
 impl NamedMovePackage {
     /// Queries a package by name (and version, encoded in the name but optional).
-    /// Name's format should be `{organization}/{application}:v{version}`.
+    /// Name's format should be `{organization}/{application}/{version}`.
     pub(crate) async fn query(
         ctx: &Context<'_>,
         name: &str,
diff --git a/crates/sui-indexer-alt/Cargo.toml b/crates/sui-indexer-alt/Cargo.toml
new file mode 100644
index 0000000000000..31a95d3a292c2
--- /dev/null
+++ b/crates/sui-indexer-alt/Cargo.toml
@@ -0,0 +1,47 @@
+[package]
+name = "sui-indexer-alt"
+version.workspace = true
+authors = ["Mysten Labs "]
+license = "Apache-2.0"
+publish = false
+edition = "2021"
+
+[[bin]]
+name = "sui-indexer-alt"
+path = "src/main.rs"
+
+[dependencies]
+anyhow.workspace = true
+async-trait.workspace = true
+axum.workspace = true
+backoff.workspace = true
+bb8 = "0.8.5"
+bcs.workspace = true
+chrono.workspace = true
+clap.workspace = true
+diesel = { workspace = true, features = ["chrono"] }
+diesel-async = { workspace = true, features = ["bb8", "postgres", "async-connection-wrapper"] }
+diesel_migrations.workspace = true
+futures.workspace = true
+prometheus.workspace = true
+reqwest.workspace = true
+serde.workspace = true
+telemetry-subscribers.workspace = true
+thiserror.workspace = true
+tokio.workspace = true
+tokio-stream.workspace = true
+tokio-util.workspace = true
+tracing.workspace = true
+url.workspace = true
+
+mysten-metrics.workspace = true
+sui-field-count.workspace = true
+sui-storage.workspace = true
+sui-types.workspace = true
+
+[dev-dependencies]
+rand.workspace = true
+wiremock.workspace = true
+tempfile.workspace = true
+
+sui-types = { workspace = true, features = ["test-utils"] }
diff --git a/crates/sui-indexer-alt/diesel.toml b/crates/sui-indexer-alt/diesel.toml
new file mode 100644
index 0000000000000..054029ff39a8a
--- /dev/null
+++ b/crates/sui-indexer-alt/diesel.toml
@@ -0,0 +1,6 @@
+[print_schema]
+file = "src/schema.rs"
+patch_file = "schema.patch"
+
+[migrations_directory]
+dir = "migrations"
diff --git a/crates/sui-indexer-alt/generate_schema.sh b/crates/sui-indexer-alt/generate_schema.sh
new file mode 100755
index 0000000000000..c65b178011d33
--- /dev/null
+++ b/crates/sui-indexer-alt/generate_schema.sh
@@ -0,0 +1,77 @@
+#!/bin/bash
+# Copyright (c) Mysten Labs, Inc.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Update sui-indexer's generated src/schema.rs based on the schema after
+# running all its migrations on a clean database. Expects the first argument to
+# be a port to run the temporary database on (defaults to 5433).
+
+set -x
+set -e
+
+if ! command -v git &> /dev/null; then
+    echo "Please install git: e.g. brew install git" >&2
+    exit 1
+fi
+
+for PG in psql initdb postgres pg_isready pg_ctl; do
+    if ! command -v $PG &> /dev/null; then
+        echo "Could not find $PG. Please install postgres: e.g. brew install postgresql@15" >&2
+        exit 1
+    fi
+done
+
+if ! command -v diesel &> /dev/null; then
+    echo "Please install diesel: e.g. cargo install diesel_cli --features postgres" >&2
+    exit 1
+fi
+
+REPO=$(git rev-parse --show-toplevel)
+
+# Create a temporary directory to store the ephemeral DB.
+TMP=$(mktemp -d)
+
+# Set-up a trap to clean everything up on EXIT (stop DB, delete temp directory)
+function cleanup {
+  pg_ctl stop -D "$TMP" -mfast
+  set +x
+  echo "Postgres STDOUT:"
+  cat "$TMP/db.stdout"
+  echo "Postgres STDERR:"
+  cat "$TMP/db.stderr"
+  set -x
+  rm -rf "$TMP"
+}
+trap cleanup EXIT
+
+# Create a new database in the temporary directory
+initdb -D "$TMP" --user postgres
+
+# Run the DB in the background, on the port provided and capture its output
+PORT=${1:-5433}
+postgres -D "$TMP" -p "$PORT" -c unix_socket_directories=                      \
+   > "$TMP/db.stdout"                                                          \
+  2> "$TMP/db.stderr"                                                          &
+
+# Wait for postgres to report as ready
+RETRIES=0
+while ! pg_isready -p "$PORT" --host "localhost" --username "postgres"; do
+  if [ $RETRIES -gt 5 ]; then
+    echo "Postgres failed to start" >&2
+    exit 1
+  fi
+  sleep 1
+  RETRIES=$((RETRIES + 1))
+done
+
+# Run all migrations on the new database
+diesel migration run                                                          \
+  --database-url "postgres://postgres:postgrespw@localhost:$PORT"             \
+  --migration-dir "$REPO/crates/sui-indexer-alt/migrations"
+
+# Generate the schema.rs file, excluding partition tables and including the
+# copyright notice.
+diesel print-schema                                                           \
+  --database-url "postgres://postgres:postgrespw@localhost:$PORT"             \
+  --patch-file "$REPO/crates/sui-indexer-alt/schema.patch"                    \
+  > "$REPO/crates/sui-indexer-alt/src/schema.rs"
diff --git a/crates/sui-indexer-alt/migrations/00000000000000_diesel_initial_setup/down.sql b/crates/sui-indexer-alt/migrations/00000000000000_diesel_initial_setup/down.sql
new file mode 100644
index 0000000000000..a9f526091194b
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/00000000000000_diesel_initial_setup/down.sql
@@ -0,0 +1,6 @@
+-- This file was automatically created by Diesel to setup helper functions
+-- and other internal bookkeeping. This file is safe to edit, any future
+-- changes will be added to existing projects as new migrations.
+
+DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass);
+DROP FUNCTION IF EXISTS diesel_set_updated_at();
diff --git a/crates/sui-indexer-alt/migrations/00000000000000_diesel_initial_setup/up.sql b/crates/sui-indexer-alt/migrations/00000000000000_diesel_initial_setup/up.sql
new file mode 100644
index 0000000000000..d68895b1a7b7d
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/00000000000000_diesel_initial_setup/up.sql
@@ -0,0 +1,36 @@
+-- This file was automatically created by Diesel to setup helper functions
+-- and other internal bookkeeping. This file is safe to edit, any future
+-- changes will be added to existing projects as new migrations.
+
+
+
+
+-- Sets up a trigger for the given table to automatically set a column called
+-- `updated_at` whenever the row is modified (unless `updated_at` was included
+-- in the modified columns)
+--
+-- # Example
+--
+-- ```sql
+-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW());
+--
+-- SELECT diesel_manage_updated_at('users');
+-- ```
+CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$
+BEGIN
+    EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s
+                    FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl);
+END;
+$$ LANGUAGE plpgsql;
+
+CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$
+BEGIN
+    IF (
+        NEW IS DISTINCT FROM OLD AND
+        NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at
+    ) THEN
+        NEW.updated_at := current_timestamp;
+    END IF;
+    RETURN NEW;
+END;
+$$ LANGUAGE plpgsql;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-14-123213_checkpoints/down.sql b/crates/sui-indexer-alt/migrations/2024-10-14-123213_checkpoints/down.sql
new file mode 100644
index 0000000000000..837ea1e8355cc
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-14-123213_checkpoints/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS kv_checkpoints;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-14-123213_checkpoints/up.sql b/crates/sui-indexer-alt/migrations/2024-10-14-123213_checkpoints/up.sql
new file mode 100644
index 0000000000000..f177da0844341
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-14-123213_checkpoints/up.sql
@@ -0,0 +1,6 @@
+CREATE TABLE IF NOT EXISTS kv_checkpoints
+(
+    sequence_number                     BIGINT       PRIMARY KEY,
+    certified_checkpoint                BYTEA        NOT NULL,
+    checkpoint_contents                 BYTEA        NOT NULL
+);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-15-143704_objects/down.sql b/crates/sui-indexer-alt/migrations/2024-10-15-143704_objects/down.sql
new file mode 100644
index 0000000000000..5d09c2f77e34c
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-15-143704_objects/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS kv_objects;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-15-143704_objects/up.sql b/crates/sui-indexer-alt/migrations/2024-10-15-143704_objects/up.sql
new file mode 100644
index 0000000000000..471144af9840e
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-15-143704_objects/up.sql
@@ -0,0 +1,7 @@
+CREATE TABLE IF NOT EXISTS kv_objects
+(
+    object_id                   bytea         NOT NULL,
+    object_version              bigint        NOT NULL,
+    serialized_object           bytea,
+    PRIMARY KEY (object_id, object_version)
+);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-15-170316_transactions/down.sql b/crates/sui-indexer-alt/migrations/2024-10-15-170316_transactions/down.sql
new file mode 100644
index 0000000000000..fa46db0f19143
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-15-170316_transactions/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS kv_transactions;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-15-170316_transactions/up.sql b/crates/sui-indexer-alt/migrations/2024-10-15-170316_transactions/up.sql
new file mode 100644
index 0000000000000..cb0cdd5d68b01
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-15-170316_transactions/up.sql
@@ -0,0 +1,12 @@
+CREATE TABLE IF NOT EXISTS kv_transactions
+(
+    tx_digest                   BYTEA         PRIMARY KEY,
+    cp_sequence_number          BIGINT        NOT NULL,
+    timestamp_ms                BIGINT        NOT NULL,
+    -- BCS serialized TransactionData
+    raw_transaction             BYTEA         NOT NULL,
+    -- BCS serialized TransactionEffects
+    raw_effects                 BYTEA         NOT NULL,
+    -- BCS serialized array of Events
+    events                      BYTEA         NOT NULL
+);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-16-002409_tx_affected_objects/down.sql b/crates/sui-indexer-alt/migrations/2024-10-16-002409_tx_affected_objects/down.sql
new file mode 100644
index 0000000000000..b0868da73b0f2
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-16-002409_tx_affected_objects/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS tx_affected_objects;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-16-002409_tx_affected_objects/up.sql b/crates/sui-indexer-alt/migrations/2024-10-16-002409_tx_affected_objects/up.sql
new file mode 100644
index 0000000000000..5eeb8f40a3893
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-16-002409_tx_affected_objects/up.sql
@@ -0,0 +1,13 @@
+CREATE TABLE IF NOT EXISTS tx_affected_objects (
+    tx_sequence_number          BIGINT       NOT NULL,
+    -- Object ID of the object touched by this transaction.
+    affected                    BYTEA        NOT NULL,
+    sender                      BYTEA        NOT NULL,
+    PRIMARY KEY(affected, tx_sequence_number)
+);
+
+CREATE INDEX IF NOT EXISTS tx_affected_objects_tx_sequence_number
+ON tx_affected_objects (tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS tx_affected_objects_sender
+ON tx_affected_objects (sender, affected, tx_sequence_number);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-16-211445_tx_balance_changes/down.sql b/crates/sui-indexer-alt/migrations/2024-10-16-211445_tx_balance_changes/down.sql
new file mode 100644
index 0000000000000..e36b0a7736cc2
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-16-211445_tx_balance_changes/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS tx_balance_changes;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-16-211445_tx_balance_changes/up.sql b/crates/sui-indexer-alt/migrations/2024-10-16-211445_tx_balance_changes/up.sql
new file mode 100644
index 0000000000000..790c5aa14d543
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-16-211445_tx_balance_changes/up.sql
@@ -0,0 +1,6 @@
+CREATE TABLE IF NOT EXISTS tx_balance_changes
+(
+    tx_sequence_number          BIGINT        PRIMARY KEY,
+    -- BCS serialized array of BalanceChanges
+    balance_changes             BYTEA         NOT NULL
+);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-16-225607_watermarks/down.sql b/crates/sui-indexer-alt/migrations/2024-10-16-225607_watermarks/down.sql
new file mode 100644
index 0000000000000..e9de336153f62
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-16-225607_watermarks/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS watermarks;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-16-225607_watermarks/up.sql b/crates/sui-indexer-alt/migrations/2024-10-16-225607_watermarks/up.sql
new file mode 100644
index 0000000000000..1fd0d890d29b1
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-16-225607_watermarks/up.sql
@@ -0,0 +1,38 @@
+CREATE TABLE IF NOT EXISTS watermarks
+(
+    -- The pipeline governed by this watermark, i.e `epochs`, `checkpoints`,
+    -- `transactions`.
+    pipeline                    TEXT          PRIMARY KEY,
+    -- Inclusive upper epoch bound for this entity's data. Committer updates
+    -- this field. Pruner uses this to determine if pruning is necessary based
+    -- on the retention policy.
+    epoch_hi_inclusive          BIGINT        NOT NULL,
+    -- Inclusive upper checkpoint bound for this entity's data. Committer
+    -- updates this field. All data of this entity in the checkpoint must be
+    -- persisted before advancing this watermark. The committer refers to this
+    -- on disaster recovery to resume writing.
+    checkpoint_hi_inclusive     BIGINT        NOT NULL,
+    -- Exclusive upper transaction sequence number bound for this entity's
+    -- data. Committer updates this field.
+    tx_hi                       BIGINT        NOT NULL,
+    -- Inclusive upper timestamp bound (in milliseconds). Committer updates
+    -- this field once it can guarantee that all checkpoints at or before this
+    -- timestamp have been written to the database.
+    timestamp_ms_hi_inclusive   BIGINT        NOT NULL,
+    -- Inclusive lower epoch bound for this entity's data. Pruner updates this
+    -- field when the epoch range exceeds the retention policy.
+    epoch_lo                    BIGINT        NOT NULL,
+    -- Inclusive low watermark that the pruner advances. Corresponds to the
+    -- epoch id, checkpoint sequence number, or tx sequence number depending on
+    -- the entity. Data before this watermark is considered pruned by a reader.
+    -- The underlying data may still exist in the db instance.
+    reader_lo                   BIGINT        NOT NULL,
+    -- Updated using the database's current timestamp when the pruner sees that
+    -- some data needs to be dropped. The pruner uses this column to determine
+    -- whether to prune or wait long enough that all in-flight reads complete
+    -- or timeout before it acts on an updated watermark.
+    pruner_timestamp_ms         BIGINT        NOT NULL,
+    -- Column used by the pruner to track its true progress. Data below this
+    -- watermark can be immediately pruned.
+    pruner_hi                   BIGINT        NOT NULL
+);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-19-113135_ev_indices/down.sql b/crates/sui-indexer-alt/migrations/2024-10-19-113135_ev_indices/down.sql
new file mode 100644
index 0000000000000..b1948d3bcfee0
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-19-113135_ev_indices/down.sql
@@ -0,0 +1,2 @@
+DROP TABLE IF EXISTS ev_emit_mod;
+DROP TABLE IF EXISTS ev_struct_inst;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-19-113135_ev_indices/up.sql b/crates/sui-indexer-alt/migrations/2024-10-19-113135_ev_indices/up.sql
new file mode 100644
index 0000000000000..8e553a30bfa0c
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-19-113135_ev_indices/up.sql
@@ -0,0 +1,56 @@
+CREATE TABLE IF NOT EXISTS ev_emit_mod
+(
+    package                     BYTEA,
+    module                      TEXT,
+    tx_sequence_number          BIGINT,
+    sender                      BYTEA         NOT NULL,
+    PRIMARY KEY(package, module, tx_sequence_number)
+);
+
+CREATE INDEX IF NOT EXISTS ev_emit_mod_tx_sequence_number
+ON ev_emit_mod (tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_emit_mod_sender
+ON ev_emit_mod (sender, package, module, tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_emit_pkg
+ON ev_emit_mod (package, tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_emit_pkg_sender
+ON ev_emit_mod (sender, package, tx_sequence_number);
+
+CREATE TABLE IF NOT EXISTS ev_struct_inst
+(
+    package                     BYTEA,
+    module                      TEXT,
+    name                        TEXT,
+    -- BCS encoded array of TypeTags for type parameters.
+    instantiation               BYTEA,
+    tx_sequence_number          BIGINT,
+    sender                      BYTEA         NOT NULL,
+    PRIMARY KEY(package, module, name, instantiation, tx_sequence_number)
+);
+
+CREATE INDEX IF NOT EXISTS ev_struct_inst_tx_sequence_number
+ON ev_struct_inst (tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_struct_inst_sender
+ON ev_struct_inst (sender, package, module, name, instantiation, tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_struct_name
+ON ev_struct_inst (package, module, name, tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_struct_name_sender
+ON ev_struct_inst (sender, package, module, name, tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_struct_mod
+ON ev_struct_inst (package, module, tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_struct_mod_sender
+ON ev_struct_inst (sender, package, module, tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_struct_pkg
+ON ev_struct_inst (package, tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_struct_pkg_sender
+ON ev_struct_inst (sender, package, tx_sequence_number);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-27-150938_sum_obj_types/down.sql b/crates/sui-indexer-alt/migrations/2024-10-27-150938_sum_obj_types/down.sql
new file mode 100644
index 0000000000000..4056a62d85e54
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-27-150938_sum_obj_types/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS sum_obj_types;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-27-150938_sum_obj_types/up.sql b/crates/sui-indexer-alt/migrations/2024-10-27-150938_sum_obj_types/up.sql
new file mode 100644
index 0000000000000..4658689f7823a
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-27-150938_sum_obj_types/up.sql
@@ -0,0 +1,62 @@
+-- A summary table of live objects, with owner and type information
+--
+-- This can be used to paginate the live object set at an instant in time,
+-- filtering by a combination of owner and/or type.
+CREATE TABLE IF NOT EXISTS sum_obj_types
+(
+    object_id                   BYTEA         PRIMARY KEY,
+    object_version              BIGINT        NOT NULL,
+    -- An enum describing the object's ownership model:
+    --
+    --   Immutable = 0,
+    --   Address-owned = 1,
+    --   Object-owned (dynamic field) = 2,
+    --   Shared = 3.
+    --
+    -- Note that there is a distinction between an object that is owned by
+    -- another object (kind 2), which relates to dynamic fields, and an object
+    -- that is owned by another object's address (kind 1), which relates to
+    -- transfer-to-object.
+    owner_kind                  SMALLINT      NOT NULL,
+    -- The address for address-owned objects, and the parent object for
+    -- object-owned objects.
+    owner_id                    BYTEA,
+    -- The following fields relate to the object's type. These only apply to
+    -- Move Objects. For Move Packages they will all be NULL.
+    --
+    -- The type's package ID.
+    package                     BYTEA,
+    -- The type's module name.
+    module                      TEXT,
+    -- The type's name.
+    name                        TEXT,
+    -- The type's type parameters, as a BCS-encoded array of TypeTags.
+    instantiation               BYTEA
+);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_owner
+ON sum_obj_types (owner_kind, owner_id, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_pkg
+ON sum_obj_types (package, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_mod
+ON sum_obj_types (package, module, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_name
+ON sum_obj_types (package, module, name, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_inst
+ON sum_obj_types (package, module, name, instantiation, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_owner_pkg
+ON sum_obj_types (owner_kind, owner_id, package, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_owner_mod
+ON sum_obj_types (owner_kind, owner_id, package, module, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_owner_name
+ON sum_obj_types (owner_kind, owner_id, package, module, name, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_owner_inst
+ON sum_obj_types (owner_kind, owner_id, package, module, name, instantiation, object_id, object_version);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-28-144002_sum_coin_balances/down.sql b/crates/sui-indexer-alt/migrations/2024-10-28-144002_sum_coin_balances/down.sql
new file mode 100644
index 0000000000000..68b45da3c6d9a
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-28-144002_sum_coin_balances/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS sum_coin_balances;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-28-144002_sum_coin_balances/up.sql b/crates/sui-indexer-alt/migrations/2024-10-28-144002_sum_coin_balances/up.sql
new file mode 100644
index 0000000000000..dbd93cc74539c
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-28-144002_sum_coin_balances/up.sql
@@ -0,0 +1,20 @@
+-- A summary table for coins owned by addresses
+--
+-- This can be used to paginate the coin balances of a given address at an
+-- instant in time, returning coins in descending balance order.
+CREATE TABLE IF NOT EXISTS sum_coin_balances
+(
+    object_id                   BYTEA         PRIMARY KEY,
+    object_version              BIGINT        NOT NULL,
+    -- The address that owns this version of the coin (it is guaranteed to be
+    -- address-owned).
+    owner_id                    BYTEA         NOT NULL,
+    -- The type of the coin, as a BCS-serialized `TypeTag`. This is only the
+    -- marker type, and not the full object type (e.g. `0x0...02::sui::SUI`).
+    coin_type                   BYTEA         NOT NULL,
+    -- The balance of the coin at this version.
+    coin_balance                BIGINT        NOT NULL
+);
+
+CREATE INDEX IF NOT EXISTS sum_coin_balances_owner_type
+ON sum_coin_balances (owner_id, coin_type, coin_balance, object_id, object_version);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-30-142219_obj_versions/down.sql b/crates/sui-indexer-alt/migrations/2024-10-30-142219_obj_versions/down.sql
new file mode 100644
index 0000000000000..f32188e89f020
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-30-142219_obj_versions/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS obj_versions;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-30-142219_obj_versions/up.sql b/crates/sui-indexer-alt/migrations/2024-10-30-142219_obj_versions/up.sql
new file mode 100644
index 0000000000000..31939132ae0b5
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-30-142219_obj_versions/up.sql
@@ -0,0 +1,14 @@
+-- This table is used to answer queries of the form: Give me the latest version
+-- of an object O with version less than or equal to V at checkpoint C. These
+-- are useful for looking up dynamic fields on objects (live or historical).
+CREATE TABLE IF NOT EXISTS obj_versions
+(
+    object_id                   BYTEA         NOT NULL,
+    object_version              BIGINT        NOT NULL,
+    object_digest               BYTEA         NOT NULL,
+    cp_sequence_number          BIGINT        NOT NULL,
+    PRIMARY KEY (object_id, object_version)
+);
+
+CREATE INDEX IF NOT EXISTS obj_versions_cp_sequence_number
+ON obj_versions (cp_sequence_number);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-30-214852_wal_obj_types/down.sql b/crates/sui-indexer-alt/migrations/2024-10-30-214852_wal_obj_types/down.sql
new file mode 100644
index 0000000000000..e9a511867961e
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-30-214852_wal_obj_types/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS wal_obj_types;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-30-214852_wal_obj_types/up.sql b/crates/sui-indexer-alt/migrations/2024-10-30-214852_wal_obj_types/up.sql
new file mode 100644
index 0000000000000..b7149fc50b29c
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-30-214852_wal_obj_types/up.sql
@@ -0,0 +1,76 @@
+-- Write-ahead log for `sum_obj_types`.
+--
+-- It contains the same columns and indices as `sum_obj_types`, but with the
+-- following changes:
+--
+-- - A `cp_sequence_number` column (and an index on it), to support pruning by
+--   checkpoint.
+--
+-- - The primary key includes the version, as the table may contain multiple
+--   versions per object ID.
+--
+-- - The `owner_kind` column is nullable, because this table also tracks
+--   deleted and wrapped objects (where all the fields except the ID, version,
+--   and checkpoint are NULL).
+--
+-- - There is an additional index on ID and version for querying the latest
+--   version of every object.
+--
+-- This table is used in conjunction with `sum_obj_types` to support consistent
+-- live object set queries: `sum_obj_types` holds the state of the live object
+-- set at some checkpoint `C < T` where `T` is the tip of the chain, and
+-- `wal_obj_types` stores all the updates and deletes between `C` and `T`.
+--
+-- To reconstruct the the live object set at some snapshot checkpoint `S`
+-- between `C` and `T`, a query can be constructed that starts with the set
+-- from `sum_obj_types` and adds updates in `wal_obj_types` from
+-- `cp_sequence_number <= S`.
+--
+-- See `up.sql` for the original `sum_obj_types` table for documentation on
+-- columns.
+CREATE TABLE IF NOT EXISTS wal_obj_types
+(
+    object_id                   BYTEA         NOT NULL,
+    object_version              BIGINT        NOT NULL,
+    owner_kind                  SMALLINT,
+    owner_id                    BYTEA,
+    package                     BYTEA,
+    module                      TEXT,
+    name                        TEXT,
+    instantiation               BYTEA,
+    cp_sequence_number          BIGINT        NOT NULL,
+    PRIMARY KEY (object_id, object_version)
+);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_cp_sequence_number
+ON wal_obj_types (cp_sequence_number);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_version
+ON wal_obj_types (object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_owner
+ON wal_obj_types (owner_kind, owner_id, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_pkg
+ON wal_obj_types (package, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_mod
+ON wal_obj_types (package, module, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_name
+ON wal_obj_types (package, module, name, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_inst
+ON wal_obj_types (package, module, name, instantiation, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_owner_pkg
+ON wal_obj_types (owner_kind, owner_id, package, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_owner_mod
+ON wal_obj_types (owner_kind, owner_id, package, module, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_owner_name
+ON wal_obj_types (owner_kind, owner_id, package, module, name, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_owner_inst
+ON wal_obj_types (owner_kind, owner_id, package, module, name, instantiation, object_id, object_version);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-30-232206_wal_coin_balances/down.sql b/crates/sui-indexer-alt/migrations/2024-10-30-232206_wal_coin_balances/down.sql
new file mode 100644
index 0000000000000..a60919b661e84
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-30-232206_wal_coin_balances/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS wal_coin_balances;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-30-232206_wal_coin_balances/up.sql b/crates/sui-indexer-alt/migrations/2024-10-30-232206_wal_coin_balances/up.sql
new file mode 100644
index 0000000000000..9a78eeea9303b
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-30-232206_wal_coin_balances/up.sql
@@ -0,0 +1,49 @@
+-- Write-ahead log for `sum_coin_balances`.
+--
+-- It contains the same columns and indices as `sum_coin_balances`, but with
+-- the following changes:
+--
+-- - A `cp_sequence_number` column (and an index on it), to support pruning by
+--   checkpoint.
+--
+-- - The primary key includes the version, as the table may contain multiple
+--   versions per object ID.
+--
+-- - The other fields are nullable, because this table also tracks deleted and
+--   wrapped objects.
+--
+-- - There is an additional index on ID and version for querying the latest
+--   version of every object.
+--
+-- This table is used in conjunction with `sum_coin_balances` to support
+-- consistent live object set queries: `sum_coin_balances` holds the state of
+-- the live object set at some checkpoint `C < T` where `T` is the tip of the
+-- chain, and `wal_coin_balances` stores all the updates and deletes between
+-- `C` and `T`.
+--
+-- To reconstruct the the live object set at some snapshot checkpoint `S`
+-- between `C` and `T`, a query can be constructed that starts with the set
+-- from `sum_coin_balances` and adds updates in `wal_coin_balances` from
+-- `cp_sequence_number <= S`.
+--
+-- See `up.sql` for the original `sum_coin_balances` table for documentation on
+-- columns.
+CREATE TABLE IF NOT EXISTS wal_coin_balances
+(
+    object_id                   BYTEA         NOT NULL,
+    object_version              BIGINT        NOT NULL,
+    owner_id                    BYTEA,
+    coin_type                   BYTEA,
+    coin_balance                BIGINT,
+    cp_sequence_number          BIGINT        NOT NULL,
+    PRIMARY KEY (object_id, object_version)
+);
+
+CREATE INDEX IF NOT EXISTS wal_coin_balances_cp_sequence_number
+ON wal_coin_balances (cp_sequence_number);
+
+CREATE INDEX IF NOT EXISTS wal_coin_balances_version
+ON wal_coin_balances (object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_coin_balances_owner_type
+ON wal_coin_balances (owner_id, coin_type, coin_balance, object_id, object_version);
diff --git a/crates/sui-indexer-alt/schema.patch b/crates/sui-indexer-alt/schema.patch
new file mode 100644
index 0000000000000..ee683461f7a7d
--- /dev/null
+++ b/crates/sui-indexer-alt/schema.patch
@@ -0,0 +1,7 @@
+diff --git a/crates/sui-indexer-alt/src/schema.rs b/crates/sui-indexer-alt/src/schema.rs
+--- a/crates/sui-indexer-alt/src/schema.rs
++++ b/crates/sui-indexer-alt/src/schema.rs
+@@ -1 +1,3 @@
++// Copyright (c) Mysten Labs, Inc.
++// SPDX-License-Identifier: Apache-2.0
+ // @generated automatically by Diesel CLI.
diff --git a/crates/sui-indexer-alt/src/args.rs b/crates/sui-indexer-alt/src/args.rs
new file mode 100644
index 0000000000000..fd6fa3e20bd84
--- /dev/null
+++ b/crates/sui-indexer-alt/src/args.rs
@@ -0,0 +1,37 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::db::DbConfig;
+use crate::IndexerConfig;
+use clap::Subcommand;
+
+#[derive(clap::Parser, Debug, Clone)]
+pub struct Args {
+    #[command(flatten)]
+    pub db_config: DbConfig,
+
+    #[command(subcommand)]
+    pub command: Command,
+}
+
+#[allow(clippy::large_enum_variant)]
+#[derive(Subcommand, Clone, Debug)]
+pub enum Command {
+    /// Run the indexer.
+    Indexer {
+        #[command(flatten)]
+        indexer: IndexerConfig,
+
+        /// Number of checkpoints to delay indexing summary tables for.
+        #[clap(long)]
+        consistent_range: Option,
+    },
+
+    /// Wipe the database of its contents
+    ResetDatabase {
+        /// If true, only drop all tables but do not run the migrations.
+        /// That is, no tables will exist in the DB after the reset.
+        #[clap(long, default_value_t = false)]
+        skip_migrations: bool,
+    },
+}
diff --git a/crates/sui-indexer-alt/src/db.rs b/crates/sui-indexer-alt/src/db.rs
new file mode 100644
index 0000000000000..cc2658099c742
--- /dev/null
+++ b/crates/sui-indexer-alt/src/db.rs
@@ -0,0 +1,158 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use anyhow::anyhow;
+use diesel::migration::MigrationVersion;
+use diesel_async::async_connection_wrapper::AsyncConnectionWrapper;
+use diesel_async::{
+    pooled_connection::{
+        bb8::{Pool, PooledConnection, RunError},
+        AsyncDieselConnectionManager, PoolError,
+    },
+    AsyncPgConnection, RunQueryDsl,
+};
+use diesel_migrations::{embed_migrations, EmbeddedMigrations};
+use std::time::Duration;
+use tracing::info;
+use url::Url;
+
+const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations");
+
+#[derive(Clone)]
+pub struct Db {
+    pool: Pool,
+}
+
+#[derive(clap::Args, Debug, Clone)]
+pub struct DbConfig {
+    /// The URL of the database to connect to.
+    #[arg(long)]
+    database_url: Url,
+
+    /// Number of connections to keep in the pool.
+    #[arg(long, default_value_t = 100)]
+    connection_pool_size: u32,
+
+    /// Time spent waiting for a connection from the pool to become available.
+    #[arg(
+        long,
+        default_value = "60",
+        value_name = "SECONDS",
+        value_parser = |s: &str| s.parse().map(Duration::from_secs)
+    )]
+    connection_timeout: Duration,
+}
+
+pub type Connection<'p> = PooledConnection<'p, AsyncPgConnection>;
+
+impl Db {
+    /// Construct a new DB connection pool. Instances of [Db] can be cloned to share access to the
+    /// same pool.
+    pub async fn new(config: DbConfig) -> Result {
+        let manager = AsyncDieselConnectionManager::new(config.database_url.as_str());
+
+        let pool = Pool::builder()
+            .max_size(config.connection_pool_size)
+            .connection_timeout(config.connection_timeout)
+            .build(manager)
+            .await?;
+
+        Ok(Self { pool })
+    }
+
+    /// Retrieves a connection from the pool. Can fail with a timeout if a connection cannot be
+    /// established before the [DbConfig::connection_timeout] has elapsed.
+    pub(crate) async fn connect(&self) -> Result, RunError> {
+        self.pool.get().await
+    }
+
+    /// Statistics about the connection pool
+    pub(crate) fn state(&self) -> bb8::State {
+        self.pool.state()
+    }
+
+    async fn clear_database(&self) -> Result<(), anyhow::Error> {
+        info!("Clearing the database...");
+        let mut conn = self.connect().await?;
+        let drop_all_tables = "
+        DO $$ DECLARE
+            r RECORD;
+        BEGIN
+        FOR r IN (SELECT tablename FROM pg_tables WHERE schemaname = 'public')
+            LOOP
+                EXECUTE 'DROP TABLE IF EXISTS ' || quote_ident(r.tablename) || ' CASCADE';
+            END LOOP;
+        END $$;";
+        diesel::sql_query(drop_all_tables)
+            .execute(&mut conn)
+            .await?;
+        info!("Dropped all tables.");
+
+        let drop_all_procedures = "
+        DO $$ DECLARE
+            r RECORD;
+        BEGIN
+            FOR r IN (SELECT proname, oidvectortypes(proargtypes) as argtypes
+                      FROM pg_proc INNER JOIN pg_namespace ns ON (pg_proc.pronamespace = ns.oid)
+                      WHERE ns.nspname = 'public' AND prokind = 'p')
+            LOOP
+                EXECUTE 'DROP PROCEDURE IF EXISTS ' || quote_ident(r.proname) || '(' || r.argtypes || ') CASCADE';
+            END LOOP;
+        END $$;";
+        diesel::sql_query(drop_all_procedures)
+            .execute(&mut conn)
+            .await?;
+        info!("Dropped all procedures.");
+
+        let drop_all_functions = "
+        DO $$ DECLARE
+            r RECORD;
+        BEGIN
+            FOR r IN (SELECT proname, oidvectortypes(proargtypes) as argtypes
+                      FROM pg_proc INNER JOIN pg_namespace ON (pg_proc.pronamespace = pg_namespace.oid)
+                      WHERE pg_namespace.nspname = 'public' AND prokind = 'f')
+            LOOP
+                EXECUTE 'DROP FUNCTION IF EXISTS ' || quote_ident(r.proname) || '(' || r.argtypes || ') CASCADE';
+            END LOOP;
+        END $$;";
+        diesel::sql_query(drop_all_functions)
+            .execute(&mut conn)
+            .await?;
+        info!("Database cleared.");
+        Ok(())
+    }
+
+    pub(crate) async fn run_migrations(
+        &self,
+    ) -> Result>, anyhow::Error> {
+        use diesel_migrations::MigrationHarness;
+
+        info!("Running migrations ...");
+        let conn = self.pool.dedicated_connection().await?;
+        let mut wrapper: AsyncConnectionWrapper =
+            diesel_async::async_connection_wrapper::AsyncConnectionWrapper::from(conn);
+
+        let finished_migrations = tokio::task::spawn_blocking(move || {
+            wrapper
+                .run_pending_migrations(MIGRATIONS)
+                .map(|versions| versions.iter().map(MigrationVersion::as_owned).collect())
+        })
+        .await?
+        .map_err(|e| anyhow!("Failed to run migrations: {:?}", e))?;
+        info!("Migrations complete.");
+        Ok(finished_migrations)
+    }
+}
+
+/// Drop all tables and rerunning migrations.
+pub async fn reset_database(
+    db_config: DbConfig,
+    skip_migrations: bool,
+) -> Result<(), anyhow::Error> {
+    let db = Db::new(db_config).await?;
+    db.clear_database().await?;
+    if !skip_migrations {
+        db.run_migrations().await?;
+    }
+    Ok(())
+}
diff --git a/crates/sui-indexer-alt/src/handlers/ev_emit_mod.rs b/crates/sui-indexer-alt/src/handlers/ev_emit_mod.rs
new file mode 100644
index 0000000000000..bc3e5f607c2c8
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/ev_emit_mod.rs
@@ -0,0 +1,62 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{collections::BTreeSet, sync::Arc};
+
+use anyhow::Result;
+use diesel_async::RunQueryDsl;
+use sui_types::full_checkpoint_content::CheckpointData;
+
+use crate::{
+    db, models::events::StoredEvEmitMod, pipeline::concurrent::Handler, pipeline::Processor,
+    schema::ev_emit_mod,
+};
+pub struct EvEmitMod;
+
+impl Processor for EvEmitMod {
+    const NAME: &'static str = "ev_emit_mod";
+
+    type Value = StoredEvEmitMod;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        let CheckpointData {
+            transactions,
+            checkpoint_summary,
+            ..
+        } = checkpoint.as_ref();
+
+        let mut values = BTreeSet::new();
+        let first_tx = checkpoint_summary.network_total_transactions as usize - transactions.len();
+
+        for (i, tx) in transactions.iter().enumerate() {
+            values.extend(
+                tx.events
+                    .iter()
+                    .flat_map(|evs| &evs.data)
+                    .map(|ev| StoredEvEmitMod {
+                        package: ev.package_id.to_vec(),
+                        module: ev.transaction_module.to_string(),
+                        tx_sequence_number: (first_tx + i) as i64,
+                        sender: ev.sender.to_vec(),
+                    }),
+            );
+        }
+
+        Ok(values.into_iter().collect())
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for EvEmitMod {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        Ok(diesel::insert_into(ev_emit_mod::table)
+            .values(values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/ev_struct_inst.rs b/crates/sui-indexer-alt/src/handlers/ev_struct_inst.rs
new file mode 100644
index 0000000000000..0a55d60172752
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/ev_struct_inst.rs
@@ -0,0 +1,66 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{collections::BTreeSet, sync::Arc};
+
+use anyhow::{Context, Result};
+use diesel_async::RunQueryDsl;
+use sui_types::full_checkpoint_content::CheckpointData;
+
+use crate::{
+    db, models::events::StoredEvStructInst, pipeline::concurrent::Handler, pipeline::Processor,
+    schema::ev_struct_inst,
+};
+
+pub struct EvStructInst;
+
+impl Processor for EvStructInst {
+    const NAME: &'static str = "ev_struct_inst";
+
+    type Value = StoredEvStructInst;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        let CheckpointData {
+            transactions,
+            checkpoint_summary,
+            ..
+        } = checkpoint.as_ref();
+
+        let mut values = BTreeSet::new();
+        let first_tx = checkpoint_summary.network_total_transactions as usize - transactions.len();
+
+        for (i, tx) in transactions.iter().enumerate() {
+            let tx_sequence_number = (first_tx + i) as i64;
+            for (j, ev) in tx.events.iter().flat_map(|evs| evs.data.iter().enumerate()) {
+                values.insert(StoredEvStructInst {
+                    package: ev.type_.address.to_vec(),
+                    module: ev.type_.module.to_string(),
+                    name: ev.type_.name.to_string(),
+                    instantiation: bcs::to_bytes(&ev.type_.type_params)
+                        .with_context(|| format!(
+                            "Failed to serialize type parameters for event ({tx_sequence_number}, {j})"
+                        ))?,
+                    tx_sequence_number: (first_tx + i) as i64,
+                    sender: ev.sender.to_vec(),
+                });
+            }
+        }
+
+        Ok(values.into_iter().collect())
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for EvStructInst {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        Ok(diesel::insert_into(ev_struct_inst::table)
+            .values(values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/kv_checkpoints.rs b/crates/sui-indexer-alt/src/handlers/kv_checkpoints.rs
new file mode 100644
index 0000000000000..ede9640b1f44b
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/kv_checkpoints.rs
@@ -0,0 +1,43 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use anyhow::{Context, Result};
+use diesel_async::RunQueryDsl;
+use sui_types::full_checkpoint_content::CheckpointData;
+
+use crate::{
+    db, models::checkpoints::StoredCheckpoint, pipeline::concurrent::Handler, pipeline::Processor,
+    schema::kv_checkpoints,
+};
+
+pub struct KvCheckpoints;
+
+impl Processor for KvCheckpoints {
+    const NAME: &'static str = "kv_checkpoints";
+
+    type Value = StoredCheckpoint;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        let sequence_number = checkpoint.checkpoint_summary.sequence_number as i64;
+        Ok(vec![StoredCheckpoint {
+            sequence_number,
+            certified_checkpoint: bcs::to_bytes(&checkpoint.checkpoint_summary)
+                .with_context(|| format!("Serializing checkpoint {sequence_number} summary"))?,
+            checkpoint_contents: bcs::to_bytes(&checkpoint.checkpoint_contents)
+                .with_context(|| format!("Serializing checkpoint {sequence_number} contents"))?,
+        }])
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for KvCheckpoints {
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        Ok(diesel::insert_into(kv_checkpoints::table)
+            .values(values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/kv_objects.rs b/crates/sui-indexer-alt/src/handlers/kv_objects.rs
new file mode 100644
index 0000000000000..f645cceab347f
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/kv_objects.rs
@@ -0,0 +1,69 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use anyhow::{Context, Result};
+use diesel_async::RunQueryDsl;
+use sui_types::full_checkpoint_content::CheckpointData;
+
+use crate::{
+    db, models::objects::StoredObject, pipeline::concurrent::Handler, pipeline::Processor,
+    schema::kv_objects,
+};
+
+pub struct KvObjects;
+
+impl Processor for KvObjects {
+    const NAME: &'static str = "kv_objects";
+    type Value = StoredObject;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        let deleted_objects = checkpoint
+            .eventually_removed_object_refs_post_version()
+            .into_iter()
+            .map(|(id, version, _)| {
+                Ok(StoredObject {
+                    object_id: id.to_vec(),
+                    object_version: version.value() as i64,
+                    serialized_object: None,
+                })
+            });
+
+        let created_objects =
+            checkpoint
+                .transactions
+                .iter()
+                .flat_map(|txn| txn.output_objects.iter())
+                .map(|o| {
+                    let id = o.id();
+                    let version = o.version().value();
+                    Ok(StoredObject {
+                        object_id: id.to_vec(),
+                        object_version: version as i64,
+                        serialized_object: Some(bcs::to_bytes(o).with_context(|| {
+                            format!("Serializing object {id} version {version}")
+                        })?),
+                    })
+                });
+
+        deleted_objects
+            .chain(created_objects)
+            .collect::, _>>()
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for KvObjects {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        Ok(diesel::insert_into(kv_objects::table)
+            .values(values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/kv_transactions.rs b/crates/sui-indexer-alt/src/handlers/kv_transactions.rs
new file mode 100644
index 0000000000000..d3144032705d6
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/kv_transactions.rs
@@ -0,0 +1,72 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use anyhow::{Context, Result};
+use diesel_async::RunQueryDsl;
+use sui_types::full_checkpoint_content::CheckpointData;
+
+use crate::{
+    db, models::transactions::StoredTransaction, pipeline::concurrent::Handler,
+    pipeline::Processor, schema::kv_transactions,
+};
+
+pub struct KvTransactions;
+
+impl Processor for KvTransactions {
+    const NAME: &'static str = "kv_transactions";
+
+    type Value = StoredTransaction;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        let CheckpointData {
+            transactions,
+            checkpoint_summary,
+            ..
+        } = checkpoint.as_ref();
+
+        let cp_sequence_number = checkpoint_summary.sequence_number as i64;
+
+        let mut values = Vec::with_capacity(transactions.len());
+        for (i, tx) in transactions.iter().enumerate() {
+            let tx_digest = tx.transaction.digest();
+            let transaction = &tx.transaction.data().intent_message().value;
+
+            let effects = &tx.effects;
+            let events: Vec<_> = tx.events.iter().flat_map(|e| e.data.iter()).collect();
+
+            values.push(StoredTransaction {
+                tx_digest: tx_digest.inner().into(),
+                cp_sequence_number,
+                timestamp_ms: checkpoint_summary.timestamp_ms as i64,
+                raw_transaction: bcs::to_bytes(transaction).with_context(|| {
+                    format!("Serializing transaction {tx_digest} (cp {cp_sequence_number}, tx {i})")
+                })?,
+                raw_effects: bcs::to_bytes(effects).with_context(|| {
+                    format!("Serializing effects for transaction {tx_digest} (cp {cp_sequence_number}, tx {i})")
+                })?,
+                events: bcs::to_bytes(&events).with_context(|| {
+                    format!("Serializing events for transaction {tx_digest} (cp {cp_sequence_number}, tx {i})")
+                })?,
+            });
+        }
+
+        Ok(values)
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for KvTransactions {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        Ok(diesel::insert_into(kv_transactions::table)
+            .values(values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/mod.rs b/crates/sui-indexer-alt/src/handlers/mod.rs
new file mode 100644
index 0000000000000..055ceb870d8a9
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/mod.rs
@@ -0,0 +1,15 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+pub mod ev_emit_mod;
+pub mod ev_struct_inst;
+pub mod kv_checkpoints;
+pub mod kv_objects;
+pub mod kv_transactions;
+pub mod obj_versions;
+pub mod sum_coin_balances;
+pub mod sum_obj_types;
+pub mod tx_affected_objects;
+pub mod tx_balance_changes;
+pub mod wal_coin_balances;
+pub mod wal_obj_types;
diff --git a/crates/sui-indexer-alt/src/handlers/obj_versions.rs b/crates/sui-indexer-alt/src/handlers/obj_versions.rs
new file mode 100644
index 0000000000000..7ca0973cc03f0
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/obj_versions.rs
@@ -0,0 +1,62 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use anyhow::Result;
+use diesel_async::RunQueryDsl;
+use sui_types::full_checkpoint_content::CheckpointData;
+
+use crate::{
+    db,
+    models::objects::StoredObjVersion,
+    pipeline::{concurrent::Handler, Processor},
+    schema::obj_versions,
+};
+
+pub struct ObjVersions;
+
+impl Processor for ObjVersions {
+    const NAME: &'static str = "obj_versions";
+    type Value = StoredObjVersion;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        let CheckpointData {
+            transactions,
+            checkpoint_summary,
+            ..
+        } = checkpoint.as_ref();
+
+        let cp_sequence_number = checkpoint_summary.sequence_number as i64;
+        Ok(transactions
+            .iter()
+            .flat_map(|txn| txn.output_objects.iter())
+            .map(|o| {
+                let id = o.id();
+                let version = o.version().value();
+                let digest = o.digest();
+                StoredObjVersion {
+                    object_id: id.to_vec(),
+                    object_version: version as i64,
+                    object_digest: digest.inner().into(),
+                    cp_sequence_number,
+                }
+            })
+            .collect())
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for ObjVersions {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        Ok(diesel::insert_into(obj_versions::table)
+            .values(values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/sum_coin_balances.rs b/crates/sui-indexer-alt/src/handlers/sum_coin_balances.rs
new file mode 100644
index 0000000000000..745430dba9353
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/sum_coin_balances.rs
@@ -0,0 +1,188 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{
+    collections::{btree_map::Entry, BTreeMap},
+    sync::Arc,
+};
+
+use anyhow::{anyhow, bail, ensure};
+use diesel::{upsert::excluded, ExpressionMethods};
+use diesel_async::RunQueryDsl;
+use futures::future::try_join_all;
+use sui_types::{
+    base_types::ObjectID, effects::TransactionEffectsAPI, full_checkpoint_content::CheckpointData,
+    object::Owner,
+};
+
+use crate::{
+    db,
+    models::objects::{StoredObjectUpdate, StoredSumCoinBalance},
+    pipeline::{sequential::Handler, Processor},
+    schema::sum_coin_balances,
+};
+
+/// Each insert or update will include at most this many rows -- the size is chosen to maximize the
+/// rows without hitting the limit on bind parameters.
+const UPDATE_CHUNK_ROWS: usize = i16::MAX as usize / 5;
+
+/// Each deletion will include at most this many rows.
+const DELETE_CHUNK_ROWS: usize = i16::MAX as usize;
+
+pub struct SumCoinBalances;
+
+impl Processor for SumCoinBalances {
+    const NAME: &'static str = "sum_coin_balances";
+
+    type Value = StoredObjectUpdate;
+
+    fn process(checkpoint: &Arc) -> anyhow::Result> {
+        let CheckpointData {
+            transactions,
+            checkpoint_summary,
+            ..
+        } = checkpoint.as_ref();
+
+        let cp_sequence_number = checkpoint_summary.sequence_number;
+        let mut values: BTreeMap = BTreeMap::new();
+        let mut coin_types: BTreeMap> = BTreeMap::new();
+
+        // Iterate over transactions in reverse so we see the latest version of each object first.
+        for tx in transactions.iter().rev() {
+            // Find all coins in the transaction's inputs and outputs.
+            for object in tx.input_objects.iter().chain(tx.output_objects.iter()) {
+                if let Some(coin_type) = object.type_().and_then(|t| t.coin_type_maybe()) {
+                    let serialized = bcs::to_bytes(&coin_type)
+                        .map_err(|_| anyhow!("Failed to serialize type for {}", object.id()))?;
+
+                    coin_types.insert(object.id(), serialized);
+                }
+            }
+
+            // Deleted and wrapped coins
+            for change in tx.effects.object_changes() {
+                // The object is not deleted/wrapped, or if it is it was unwrapped in the same
+                // transaction.
+                if change.output_digest.is_some() || change.input_version.is_none() {
+                    continue;
+                }
+
+                // Object is not a coin
+                if !coin_types.contains_key(&change.id) {
+                    continue;
+                }
+
+                let object_id = change.id;
+                let object_version = tx.effects.lamport_version().value();
+                match values.entry(object_id) {
+                    Entry::Occupied(entry) => {
+                        ensure!(entry.get().object_version > object_version);
+                    }
+
+                    Entry::Vacant(entry) => {
+                        entry.insert(StoredObjectUpdate {
+                            object_id,
+                            object_version,
+                            cp_sequence_number,
+                            update: None,
+                        });
+                    }
+                }
+            }
+
+            // Modified and created coins.
+            for object in &tx.output_objects {
+                let object_id = object.id();
+                let object_version = object.version().value();
+
+                let Some(coin_type) = coin_types.get(&object_id) else {
+                    continue;
+                };
+
+                // Coin balance only tracks address-owned objects
+                let Owner::AddressOwner(owner_id) = object.owner() else {
+                    continue;
+                };
+
+                let Some(coin) = object.as_coin_maybe() else {
+                    bail!("Failed to deserialize Coin for {object_id}");
+                };
+
+                match values.entry(object_id) {
+                    Entry::Occupied(entry) => {
+                        ensure!(entry.get().object_version > object_version);
+                    }
+
+                    Entry::Vacant(entry) => {
+                        entry.insert(StoredObjectUpdate {
+                            object_id,
+                            object_version,
+                            cp_sequence_number,
+                            update: Some(StoredSumCoinBalance {
+                                object_id: object_id.to_vec(),
+                                object_version: object_version as i64,
+                                owner_id: owner_id.to_vec(),
+                                coin_type: coin_type.clone(),
+                                coin_balance: coin.balance.value() as i64,
+                            }),
+                        });
+                    }
+                }
+            }
+        }
+
+        Ok(values.into_values().collect())
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for SumCoinBalances {
+    type Batch = BTreeMap;
+
+    fn batch(batch: &mut Self::Batch, updates: Vec) {
+        // `updates` are guaranteed to be provided in checkpoint order, so blindly inserting them
+        // will result in the batch containing the most up-to-date update for each object.
+        for update in updates {
+            batch.insert(update.object_id, update);
+        }
+    }
+
+    async fn commit(batch: &Self::Batch, conn: &mut db::Connection<'_>) -> anyhow::Result {
+        let mut updates = vec![];
+        let mut deletes = vec![];
+
+        for update in batch.values() {
+            if let Some(update) = &update.update {
+                updates.push(update.clone());
+            } else {
+                deletes.push(update.object_id.to_vec());
+            }
+        }
+
+        let update_chunks = updates.chunks(UPDATE_CHUNK_ROWS).map(|chunk| {
+            diesel::insert_into(sum_coin_balances::table)
+                .values(chunk)
+                .on_conflict(sum_coin_balances::object_id)
+                .do_update()
+                .set((
+                    sum_coin_balances::object_version
+                        .eq(excluded(sum_coin_balances::object_version)),
+                    sum_coin_balances::owner_id.eq(excluded(sum_coin_balances::owner_id)),
+                    sum_coin_balances::coin_balance.eq(excluded(sum_coin_balances::coin_balance)),
+                ))
+                .execute(conn)
+        });
+
+        let updated: usize = try_join_all(update_chunks).await?.into_iter().sum();
+
+        let delete_chunks = deletes.chunks(DELETE_CHUNK_ROWS).map(|chunk| {
+            diesel::delete(sum_coin_balances::table)
+                .filter(sum_coin_balances::object_id.eq_any(chunk.iter().cloned()))
+                .execute(conn)
+        });
+
+        let deleted: usize = try_join_all(delete_chunks).await?.into_iter().sum();
+
+        Ok(updated + deleted)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/sum_obj_types.rs b/crates/sui-indexer-alt/src/handlers/sum_obj_types.rs
new file mode 100644
index 0000000000000..c118a684f26df
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/sum_obj_types.rs
@@ -0,0 +1,184 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{
+    collections::{btree_map::Entry, BTreeMap},
+    sync::Arc,
+};
+
+use anyhow::{anyhow, ensure};
+use diesel::{upsert::excluded, ExpressionMethods};
+use diesel_async::RunQueryDsl;
+use futures::future::try_join_all;
+use sui_types::{
+    base_types::ObjectID, effects::TransactionEffectsAPI, full_checkpoint_content::CheckpointData,
+    object::Owner,
+};
+
+use crate::{
+    db,
+    models::objects::{StoredObjectUpdate, StoredOwnerKind, StoredSumObjType},
+    pipeline::{sequential::Handler, Processor},
+    schema::sum_obj_types,
+};
+
+/// Each insert or update will include at most this many rows -- the size is chosen to maximize the
+/// rows without hitting the limit on bind parameters.
+const UPDATE_CHUNK_ROWS: usize = i16::MAX as usize / 8;
+
+/// Each deletion will include at most this many rows.
+const DELETE_CHUNK_ROWS: usize = i16::MAX as usize;
+
+pub struct SumObjTypes;
+
+impl Processor for SumObjTypes {
+    const NAME: &'static str = "sum_obj_types";
+
+    type Value = StoredObjectUpdate;
+
+    fn process(checkpoint: &Arc) -> anyhow::Result> {
+        let CheckpointData {
+            transactions,
+            checkpoint_summary,
+            ..
+        } = checkpoint.as_ref();
+
+        let cp_sequence_number = checkpoint_summary.sequence_number;
+        let mut values: BTreeMap = BTreeMap::new();
+
+        // Iterate over transactions in reverse so we see the latest version of each object first.
+        for tx in transactions.iter().rev() {
+            // Deleted and wrapped objects -- objects that show up without a digest in
+            // `object_changes` are either deleted or wrapped. Objects without an input version
+            // must have been unwrapped and deleted, meaning they do not need to be deleted from
+            // our records.
+            for change in tx.effects.object_changes() {
+                if change.output_digest.is_some() || change.input_version.is_none() {
+                    continue;
+                }
+
+                let object_id = change.id;
+                let object_version = tx.effects.lamport_version().value();
+                match values.entry(object_id) {
+                    Entry::Occupied(entry) => {
+                        ensure!(entry.get().object_version > object_version);
+                    }
+
+                    Entry::Vacant(entry) => {
+                        entry.insert(StoredObjectUpdate {
+                            object_id,
+                            object_version,
+                            cp_sequence_number,
+                            update: None,
+                        });
+                    }
+                }
+            }
+
+            // Modified and created objects.
+            for object in &tx.output_objects {
+                let object_id = object.id();
+                let object_version = object.version().value();
+                match values.entry(object_id) {
+                    Entry::Occupied(entry) => {
+                        ensure!(entry.get().object_version > object_version);
+                    }
+
+                    Entry::Vacant(entry) => {
+                        let type_ = object.type_();
+                        entry.insert(StoredObjectUpdate {
+                            object_id,
+                            object_version,
+                            cp_sequence_number,
+                            update: Some(StoredSumObjType {
+                                object_id: object_id.to_vec(),
+                                object_version: object_version as i64,
+
+                                owner_kind: match object.owner() {
+                                    Owner::AddressOwner(_) => StoredOwnerKind::Address,
+                                    Owner::ObjectOwner(_) => StoredOwnerKind::Object,
+                                    Owner::Shared { .. } => StoredOwnerKind::Shared,
+                                    Owner::Immutable => StoredOwnerKind::Immutable,
+                                },
+
+                                owner_id: match object.owner() {
+                                    Owner::AddressOwner(a) => Some(a.to_vec()),
+                                    Owner::ObjectOwner(o) => Some(o.to_vec()),
+                                    _ => None,
+                                },
+
+                                package: type_.map(|t| t.address().to_vec()),
+                                module: type_.map(|t| t.module().to_string()),
+                                name: type_.map(|t| t.name().to_string()),
+                                instantiation: type_
+                                    .map(|t| bcs::to_bytes(&t.type_params()))
+                                    .transpose()
+                                    .map_err(|e| {
+                                        anyhow!(
+                                            "Failed to serialize type parameters for {}: {e}",
+                                            object
+                                                .id()
+                                                .to_canonical_display(/* with_prefix */ true),
+                                        )
+                                    })?,
+                            }),
+                        });
+                    }
+                }
+            }
+        }
+
+        Ok(values.into_values().collect())
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for SumObjTypes {
+    type Batch = BTreeMap;
+
+    fn batch(batch: &mut Self::Batch, updates: Vec) {
+        // `updates` are guaranteed to be provided in checkpoint order, so blindly inserting them
+        // will result in the batch containing the most up-to-date update for each object.
+        for update in updates {
+            batch.insert(update.object_id, update);
+        }
+    }
+
+    async fn commit(values: &Self::Batch, conn: &mut db::Connection<'_>) -> anyhow::Result {
+        let mut updates = vec![];
+        let mut deletes = vec![];
+
+        for update in values.values() {
+            if let Some(update) = &update.update {
+                updates.push(update.clone());
+            } else {
+                deletes.push(update.object_id.to_vec());
+            }
+        }
+
+        let update_chunks = updates.chunks(UPDATE_CHUNK_ROWS).map(|chunk| {
+            diesel::insert_into(sum_obj_types::table)
+                .values(chunk)
+                .on_conflict(sum_obj_types::object_id)
+                .do_update()
+                .set((
+                    sum_obj_types::object_version.eq(excluded(sum_obj_types::object_version)),
+                    sum_obj_types::owner_kind.eq(excluded(sum_obj_types::owner_kind)),
+                    sum_obj_types::owner_id.eq(excluded(sum_obj_types::owner_id)),
+                ))
+                .execute(conn)
+        });
+
+        let updated: usize = try_join_all(update_chunks).await?.into_iter().sum();
+
+        let delete_chunks = deletes.chunks(DELETE_CHUNK_ROWS).map(|chunk| {
+            diesel::delete(sum_obj_types::table)
+                .filter(sum_obj_types::object_id.eq_any(chunk.iter().cloned()))
+                .execute(conn)
+        });
+
+        let deleted: usize = try_join_all(delete_chunks).await?.into_iter().sum();
+
+        Ok(updated + deleted)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/tx_affected_objects.rs b/crates/sui-indexer-alt/src/handlers/tx_affected_objects.rs
new file mode 100644
index 0000000000000..309af2c08a300
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/tx_affected_objects.rs
@@ -0,0 +1,65 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use anyhow::Result;
+use diesel_async::RunQueryDsl;
+use sui_types::{effects::TransactionEffectsAPI, full_checkpoint_content::CheckpointData};
+
+use crate::{
+    db, models::transactions::StoredTxAffectedObject, pipeline::concurrent::Handler,
+    pipeline::Processor, schema::tx_affected_objects,
+};
+
+pub struct TxAffectedObjects;
+
+impl Processor for TxAffectedObjects {
+    const NAME: &'static str = "tx_affected_objects";
+
+    type Value = StoredTxAffectedObject;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        let CheckpointData {
+            transactions,
+            checkpoint_summary,
+            ..
+        } = checkpoint.as_ref();
+
+        let mut values = Vec::new();
+        let first_tx = checkpoint_summary.network_total_transactions as usize - transactions.len();
+
+        for (i, tx) in transactions.iter().enumerate() {
+            let tx_sequence_number = (first_tx + i) as i64;
+            let sender = tx.transaction.sender_address();
+
+            values.extend(
+                tx.effects
+                    .object_changes()
+                    .iter()
+                    .map(|o| StoredTxAffectedObject {
+                        tx_sequence_number,
+                        affected: o.id.to_vec(),
+                        sender: sender.to_vec(),
+                    }),
+            );
+        }
+
+        Ok(values)
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for TxAffectedObjects {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        Ok(diesel::insert_into(tx_affected_objects::table)
+            .values(values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/tx_balance_changes.rs b/crates/sui-indexer-alt/src/handlers/tx_balance_changes.rs
new file mode 100644
index 0000000000000..1f97e806fd1ec
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/tx_balance_changes.rs
@@ -0,0 +1,105 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{collections::BTreeMap, sync::Arc};
+
+use anyhow::{Context, Result};
+use diesel_async::RunQueryDsl;
+use sui_types::{
+    coin::Coin,
+    effects::TransactionEffectsAPI,
+    full_checkpoint_content::{CheckpointData, CheckpointTransaction},
+    gas_coin::GAS,
+};
+
+use crate::{
+    db,
+    models::transactions::{BalanceChange, StoredTxBalanceChange},
+    pipeline::concurrent::Handler,
+    pipeline::Processor,
+    schema::tx_balance_changes,
+};
+
+pub struct TxBalanceChanges;
+
+impl Processor for TxBalanceChanges {
+    const NAME: &'static str = "tx_balance_changes";
+
+    type Value = StoredTxBalanceChange;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        let CheckpointData {
+            transactions,
+            checkpoint_summary,
+            ..
+        } = checkpoint.as_ref();
+
+        let mut values = Vec::new();
+        let first_tx = checkpoint_summary.network_total_transactions as usize - transactions.len();
+
+        for (i, tx) in transactions.iter().enumerate() {
+            let tx_sequence_number = (first_tx + i) as i64;
+            let balance_changes = balance_changes(tx).with_context(|| {
+                format!("Calculating balance changes for transaction {tx_sequence_number}")
+            })?;
+
+            values.push(StoredTxBalanceChange {
+                tx_sequence_number,
+                balance_changes: bcs::to_bytes(&balance_changes).with_context(|| {
+                    format!("Serializing balance changes for transaction {tx_sequence_number}")
+                })?,
+            });
+        }
+
+        Ok(values)
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for TxBalanceChanges {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        Ok(diesel::insert_into(tx_balance_changes::table)
+            .values(values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
+
+/// Calculate balance changes based on the object's input and output objects.
+fn balance_changes(transaction: &CheckpointTransaction) -> Result> {
+    // Shortcut if the transaction failed -- we know that only gas was charged.
+    if transaction.effects.status().is_err() {
+        return Ok(vec![BalanceChange::V1 {
+            owner: transaction.effects.gas_object().1,
+            coin_type: GAS::type_tag().to_canonical_string(/* with_prefix */ true),
+            amount: -(transaction.effects.gas_cost_summary().net_gas_usage() as i128),
+        }]);
+    }
+
+    let mut changes = BTreeMap::new();
+    for object in &transaction.input_objects {
+        if let Some((type_, balance)) = Coin::extract_balance_if_coin(object)? {
+            *changes.entry((object.owner(), type_)).or_insert(0i128) -= balance as i128;
+        }
+    }
+
+    for object in &transaction.output_objects {
+        if let Some((type_, balance)) = Coin::extract_balance_if_coin(object)? {
+            *changes.entry((object.owner(), type_)).or_insert(0i128) += balance as i128;
+        }
+    }
+
+    Ok(changes
+        .into_iter()
+        .map(|((owner, coin_type), amount)| BalanceChange::V1 {
+            owner: *owner,
+            coin_type: coin_type.to_canonical_string(/* with_prefix */ true),
+            amount,
+        })
+        .collect())
+}
diff --git a/crates/sui-indexer-alt/src/handlers/wal_coin_balances.rs b/crates/sui-indexer-alt/src/handlers/wal_coin_balances.rs
new file mode 100644
index 0000000000000..6482d6fc94bb6
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/wal_coin_balances.rs
@@ -0,0 +1,59 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use anyhow::Result;
+use diesel_async::RunQueryDsl;
+use sui_types::full_checkpoint_content::CheckpointData;
+
+use crate::{
+    db,
+    models::objects::{StoredObjectUpdate, StoredSumCoinBalance, StoredWalCoinBalance},
+    pipeline::{concurrent::Handler, Processor},
+    schema::wal_coin_balances,
+};
+
+use super::sum_coin_balances::SumCoinBalances;
+
+pub struct WalCoinBalances;
+
+impl Processor for WalCoinBalances {
+    const NAME: &'static str = "wal_coin_balances";
+
+    type Value = StoredObjectUpdate;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        SumCoinBalances::process(checkpoint)
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for WalCoinBalances {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        let values: Vec<_> = values
+            .iter()
+            .map(|value| StoredWalCoinBalance {
+                object_id: value.object_id.to_vec(),
+                object_version: value.object_version as i64,
+
+                owner_id: value.update.as_ref().map(|o| o.owner_id.clone()),
+
+                coin_type: value.update.as_ref().map(|o| o.coin_type.clone()),
+                coin_balance: value.update.as_ref().map(|o| o.coin_balance),
+
+                cp_sequence_number: value.cp_sequence_number as i64,
+            })
+            .collect();
+
+        Ok(diesel::insert_into(wal_coin_balances::table)
+            .values(&values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/wal_obj_types.rs b/crates/sui-indexer-alt/src/handlers/wal_obj_types.rs
new file mode 100644
index 0000000000000..68cdb2b39945f
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/wal_obj_types.rs
@@ -0,0 +1,62 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use anyhow::Result;
+use diesel_async::RunQueryDsl;
+use sui_types::full_checkpoint_content::CheckpointData;
+
+use crate::{
+    db,
+    models::objects::{StoredObjectUpdate, StoredSumObjType, StoredWalObjType},
+    pipeline::{concurrent::Handler, Processor},
+    schema::wal_obj_types,
+};
+
+use super::sum_obj_types::SumObjTypes;
+
+pub struct WalObjTypes;
+
+impl Processor for WalObjTypes {
+    const NAME: &'static str = "wal_obj_types";
+
+    type Value = StoredObjectUpdate;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        SumObjTypes::process(checkpoint)
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for WalObjTypes {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        let values: Vec<_> = values
+            .iter()
+            .map(|value| StoredWalObjType {
+                object_id: value.object_id.to_vec(),
+                object_version: value.object_version as i64,
+
+                owner_kind: value.update.as_ref().map(|o| o.owner_kind),
+                owner_id: value.update.as_ref().and_then(|o| o.owner_id.clone()),
+
+                package: value.update.as_ref().and_then(|o| o.package.clone()),
+                module: value.update.as_ref().and_then(|o| o.module.clone()),
+                name: value.update.as_ref().and_then(|o| o.name.clone()),
+                instantiation: value.update.as_ref().and_then(|o| o.instantiation.clone()),
+
+                cp_sequence_number: value.cp_sequence_number as i64,
+            })
+            .collect();
+
+        Ok(diesel::insert_into(wal_obj_types::table)
+            .values(&values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/ingestion/broadcaster.rs b/crates/sui-indexer-alt/src/ingestion/broadcaster.rs
new file mode 100644
index 0000000000000..8b20b2693415e
--- /dev/null
+++ b/crates/sui-indexer-alt/src/ingestion/broadcaster.rs
@@ -0,0 +1,103 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use backoff::backoff::Constant;
+use futures::{future::try_join_all, TryStreamExt};
+use mysten_metrics::spawn_monitored_task;
+use sui_types::full_checkpoint_content::CheckpointData;
+use tokio::{sync::mpsc, task::JoinHandle};
+use tokio_stream::{wrappers::ReceiverStream, StreamExt};
+use tokio_util::sync::CancellationToken;
+use tracing::{debug, error, info};
+
+use crate::{ingestion::error::Error, metrics::IndexerMetrics};
+
+use super::{client::IngestionClient, IngestionConfig};
+
+/// The broadcaster task is responsible for taking a stream of checkpoint sequence numbers from
+/// `checkpoint_rx`, fetching them using the `client` and disseminating them to all subscribers in
+/// `subscribers`.
+///
+/// The task will shut down if the `cancel` token is signalled, or if the `checkpoint_rx` channel
+/// closes.
+pub(super) fn broadcaster(
+    config: IngestionConfig,
+    client: IngestionClient,
+    metrics: Arc,
+    checkpoint_rx: mpsc::Receiver,
+    subscribers: Vec>>,
+    cancel: CancellationToken,
+) -> JoinHandle<()> {
+    spawn_monitored_task!(async move {
+        info!("Starting ingestion broadcaster");
+
+        match ReceiverStream::new(checkpoint_rx)
+            .map(Ok)
+            .try_for_each_concurrent(/* limit */ config.ingest_concurrency, |cp| {
+                let client = client.clone();
+                let metrics = metrics.clone();
+                let subscribers = subscribers.clone();
+
+                // One clone is for the supervisor to signal a cancel if it detects a
+                // subscriber that wants to wind down ingestion, and the other is to pass to
+                // each worker to detect cancellation.
+                let supervisor_cancel = cancel.clone();
+                let cancel = cancel.clone();
+
+                // Repeatedly retry if the checkpoint is not found, assuming that we are at the
+                // tip of the network and it will become available soon.
+                let backoff = Constant::new(config.retry_interval);
+                let fetch = move || {
+                    let client = client.clone();
+                    let metrics = metrics.clone();
+                    let cancel = cancel.clone();
+
+                    async move {
+                        use backoff::Error as BE;
+                        if cancel.is_cancelled() {
+                            return Err(BE::permanent(Error::Cancelled));
+                        }
+
+                        client.fetch(cp, &cancel).await.map_err(|e| match e {
+                            Error::NotFound(checkpoint) => {
+                                debug!(checkpoint, "Checkpoint not found, retrying...");
+                                metrics.total_ingested_not_found_retries.inc();
+                                BE::transient(e)
+                            }
+                            e => BE::permanent(e),
+                        })
+                    }
+                };
+
+                async move {
+                    let checkpoint = backoff::future::retry(backoff, fetch).await?;
+                    let futures = subscribers.iter().map(|s| s.send(checkpoint.clone()));
+
+                    if try_join_all(futures).await.is_err() {
+                        info!("Subscription dropped, signalling shutdown");
+                        supervisor_cancel.cancel();
+                        Err(Error::Cancelled)
+                    } else {
+                        Ok(())
+                    }
+                }
+            })
+            .await
+        {
+            Ok(()) => {
+                info!("Checkpoints done, stopping ingestion broadcaster");
+            }
+
+            Err(Error::Cancelled) => {
+                info!("Shutdown received, stopping ingestion broadcaster");
+            }
+
+            Err(e) => {
+                error!("Ingestion broadcaster failed: {}", e);
+                cancel.cancel();
+            }
+        }
+    })
+}
diff --git a/crates/sui-indexer-alt/src/ingestion/client.rs b/crates/sui-indexer-alt/src/ingestion/client.rs
new file mode 100644
index 0000000000000..b16a7c51daef1
--- /dev/null
+++ b/crates/sui-indexer-alt/src/ingestion/client.rs
@@ -0,0 +1,155 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::ingestion::local_client::LocalIngestionClient;
+use crate::ingestion::remote_client::RemoteIngestionClient;
+use crate::ingestion::Error as IngestionError;
+use crate::ingestion::Result as IngestionResult;
+use crate::metrics::IndexerMetrics;
+use backoff::Error as BE;
+use backoff::ExponentialBackoff;
+use std::path::PathBuf;
+use std::sync::Arc;
+use std::time::Duration;
+use sui_storage::blob::Blob;
+use sui_types::full_checkpoint_content::CheckpointData;
+use tokio_util::bytes::Bytes;
+use tokio_util::sync::CancellationToken;
+use tracing::debug;
+use url::Url;
+
+/// Wait at most this long between retries for transient errors.
+const MAX_TRANSIENT_RETRY_INTERVAL: Duration = Duration::from_secs(60);
+
+#[async_trait::async_trait]
+pub(crate) trait IngestionClientTrait: Send + Sync {
+    async fn fetch(&self, checkpoint: u64) -> FetchResult;
+}
+
+#[derive(thiserror::Error, Debug)]
+pub enum FetchError {
+    #[error("Checkpoint not found")]
+    NotFound,
+    #[error("Failed to fetch checkpoint due to permanent error: {0}")]
+    Permanent(#[from] anyhow::Error),
+    #[error("Failed to fetch checkpoint due to {reason}: {error}")]
+    Transient {
+        reason: &'static str,
+        #[source]
+        error: anyhow::Error,
+    },
+}
+
+pub type FetchResult = Result;
+
+#[derive(Clone)]
+pub(crate) struct IngestionClient {
+    client: Arc,
+    /// Wrap the metrics in an `Arc` to keep copies of the client cheap.
+    metrics: Arc,
+}
+
+impl IngestionClient {
+    pub(crate) fn new_remote(url: Url, metrics: Arc) -> IngestionResult {
+        let client = Arc::new(RemoteIngestionClient::new(url)?);
+        Ok(IngestionClient { client, metrics })
+    }
+
+    pub(crate) fn new_local(path: PathBuf, metrics: Arc) -> Self {
+        let client = Arc::new(LocalIngestionClient::new(path));
+        IngestionClient { client, metrics }
+    }
+
+    /// Repeatedly retries transient errors with an exponential backoff (up to [MAX_RETRY_INTERVAL]).
+    /// Transient errors are either defined by the client implementation that
+    /// returns a `FetchError::Transient` error variant, or within this function
+    /// if we fail to deserialize the result as [CheckpointData].
+    /// The function will immediately return on:
+    /// - non-transient errors determined by the client implementation,
+    ///   This includes both the FetcherError::NotFound and FetcherError::Permanent variants.
+    /// - cancellation of the supplied `cancel` token.
+    pub(crate) async fn fetch(
+        &self,
+        checkpoint: u64,
+        cancel: &CancellationToken,
+    ) -> IngestionResult> {
+        let client = self.client.clone();
+        let request = move || {
+            let client = client.clone();
+            async move {
+                if cancel.is_cancelled() {
+                    return Err(BE::permanent(IngestionError::Cancelled));
+                }
+
+                let bytes = client.fetch(checkpoint).await.map_err(|err| match err {
+                    FetchError::NotFound => BE::permanent(IngestionError::NotFound(checkpoint)),
+                    FetchError::Permanent(error) => {
+                        BE::permanent(IngestionError::FetchError(checkpoint, error))
+                    }
+                    FetchError::Transient { reason, error } => self.metrics.inc_retry(
+                        checkpoint,
+                        reason,
+                        IngestionError::FetchError(checkpoint, error),
+                    ),
+                })?;
+
+                self.metrics.total_ingested_bytes.inc_by(bytes.len() as u64);
+                let data: CheckpointData = Blob::from_bytes(&bytes).map_err(|e| {
+                    self.metrics.inc_retry(
+                        checkpoint,
+                        "deserialization",
+                        IngestionError::DeserializationError(checkpoint, e),
+                    )
+                })?;
+
+                Ok(data)
+            }
+        };
+
+        // Keep backing off until we are waiting for the max interval, but don't give up.
+        let backoff = ExponentialBackoff {
+            max_interval: MAX_TRANSIENT_RETRY_INTERVAL,
+            max_elapsed_time: None,
+            ..Default::default()
+        };
+
+        let guard = self.metrics.ingested_checkpoint_latency.start_timer();
+        let data = backoff::future::retry(backoff, request).await?;
+        let elapsed = guard.stop_and_record();
+
+        debug!(
+            checkpoint,
+            elapsed_ms = elapsed * 1000.0,
+            "Fetched checkpoint"
+        );
+
+        self.metrics.total_ingested_checkpoints.inc();
+
+        self.metrics
+            .total_ingested_transactions
+            .inc_by(data.transactions.len() as u64);
+
+        self.metrics.total_ingested_events.inc_by(
+            data.transactions
+                .iter()
+                .map(|tx| tx.events.as_ref().map_or(0, |evs| evs.data.len()) as u64)
+                .sum(),
+        );
+
+        self.metrics.total_ingested_inputs.inc_by(
+            data.transactions
+                .iter()
+                .map(|tx| tx.input_objects.len() as u64)
+                .sum(),
+        );
+
+        self.metrics.total_ingested_outputs.inc_by(
+            data.transactions
+                .iter()
+                .map(|tx| tx.output_objects.len() as u64)
+                .sum(),
+        );
+
+        Ok(Arc::new(data))
+    }
+}
diff --git a/crates/sui-indexer-alt/src/ingestion/error.rs b/crates/sui-indexer-alt/src/ingestion/error.rs
new file mode 100644
index 0000000000000..17cafe495aa80
--- /dev/null
+++ b/crates/sui-indexer-alt/src/ingestion/error.rs
@@ -0,0 +1,25 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+pub type Result = std::result::Result;
+
+#[derive(thiserror::Error, Debug)]
+pub enum Error {
+    #[error("Checkpoint {0} not found")]
+    NotFound(u64),
+
+    #[error("Failed to deserialize checkpoint {0}: {1}")]
+    DeserializationError(u64, #[source] anyhow::Error),
+
+    #[error("Failed to fetch checkpoint {0}: {1}")]
+    FetchError(u64, #[source] anyhow::Error),
+
+    #[error(transparent)]
+    ReqwestError(#[from] reqwest::Error),
+
+    #[error("No subscribers for ingestion service")]
+    NoSubscribers,
+
+    #[error("Shutdown signal received, stopping ingestion service")]
+    Cancelled,
+}
diff --git a/crates/sui-indexer-alt/src/ingestion/local_client.rs b/crates/sui-indexer-alt/src/ingestion/local_client.rs
new file mode 100644
index 0000000000000..2efb6708939ff
--- /dev/null
+++ b/crates/sui-indexer-alt/src/ingestion/local_client.rs
@@ -0,0 +1,65 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::ingestion::client::{FetchError, FetchResult, IngestionClientTrait};
+use axum::body::Bytes;
+use std::path::PathBuf;
+
+pub struct LocalIngestionClient {
+    path: PathBuf,
+}
+
+impl LocalIngestionClient {
+    pub fn new(path: PathBuf) -> Self {
+        LocalIngestionClient { path }
+    }
+}
+
+#[async_trait::async_trait]
+impl IngestionClientTrait for LocalIngestionClient {
+    async fn fetch(&self, checkpoint: u64) -> FetchResult {
+        let path = self.path.join(format!("{}.chk", checkpoint));
+        let bytes = tokio::fs::read(path).await.map_err(|e| {
+            if e.kind() == std::io::ErrorKind::NotFound {
+                FetchError::NotFound
+            } else {
+                FetchError::Transient {
+                    reason: "io_error",
+                    error: e.into(),
+                }
+            }
+        })?;
+        Ok(Bytes::from(bytes))
+    }
+}
+
+#[cfg(test)]
+pub(crate) mod tests {
+    use crate::ingestion::client::IngestionClient;
+    use crate::ingestion::test_utils::test_checkpoint_data;
+    use crate::metrics::tests::test_metrics;
+    use std::sync::Arc;
+    use sui_storage::blob::{Blob, BlobEncoding};
+    use tokio_util::sync::CancellationToken;
+
+    #[tokio::test]
+    async fn local_test_fetch() {
+        let tempdir = tempfile::tempdir().unwrap().into_path();
+        let path = tempdir.join("1.chk");
+        let test_checkpoint = test_checkpoint_data(1);
+        tokio::fs::write(&path, &test_checkpoint).await.unwrap();
+
+        let metrics = Arc::new(test_metrics());
+        let local_client = IngestionClient::new_local(tempdir, metrics);
+        let checkpoint = local_client
+            .fetch(1, &CancellationToken::new())
+            .await
+            .unwrap();
+        assert_eq!(
+            Blob::encode(&*checkpoint, BlobEncoding::Bcs)
+                .unwrap()
+                .to_bytes(),
+            test_checkpoint
+        );
+    }
+}
diff --git a/crates/sui-indexer-alt/src/ingestion/mod.rs b/crates/sui-indexer-alt/src/ingestion/mod.rs
new file mode 100644
index 0000000000000..46e24b2f33d4e
--- /dev/null
+++ b/crates/sui-indexer-alt/src/ingestion/mod.rs
@@ -0,0 +1,432 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+// Allow use of `unbounded_channel` in `ingestion` -- it is used by the regulator task to receive
+// feedback. Traffic through this task should be minimal, but if a bound is applied to it and that
+// bound is hit, the indexer could deadlock.
+#![allow(clippy::disallowed_methods)]
+
+use std::{path::PathBuf, sync::Arc, time::Duration};
+
+use sui_types::full_checkpoint_content::CheckpointData;
+use tokio::{sync::mpsc, task::JoinHandle};
+use tokio_util::sync::CancellationToken;
+use url::Url;
+
+use crate::ingestion::broadcaster::broadcaster;
+use crate::ingestion::client::IngestionClient;
+use crate::ingestion::error::{Error, Result};
+use crate::ingestion::regulator::regulator;
+use crate::metrics::IndexerMetrics;
+
+mod broadcaster;
+mod client;
+pub mod error;
+mod local_client;
+mod regulator;
+mod remote_client;
+#[cfg(test)]
+mod test_utils;
+
+pub struct IngestionService {
+    config: IngestionConfig,
+    client: IngestionClient,
+    metrics: Arc,
+    ingest_hi_tx: mpsc::UnboundedSender<(&'static str, u64)>,
+    ingest_hi_rx: mpsc::UnboundedReceiver<(&'static str, u64)>,
+    subscribers: Vec>>,
+    cancel: CancellationToken,
+}
+
+#[derive(clap::Args, Debug, Clone)]
+pub struct IngestionConfig {
+    /// Remote Store to fetch checkpoints from.
+    #[arg(long, required = true, group = "source")]
+    remote_store_url: Option,
+
+    /// Path to the local ingestion directory.
+    /// If both remote_store_url and local_ingestion_path are provided, remote_store_url will be used.
+    #[arg(long, required = true, group = "source")]
+    local_ingestion_path: Option,
+
+    /// Maximum size of checkpoint backlog across all workers downstream of the ingestion service.
+    #[arg(long, default_value_t = 5000)]
+    checkpoint_buffer_size: usize,
+
+    /// Maximum number of checkpoints to attempt to fetch concurrently.
+    #[arg(long, default_value_t = 200)]
+    ingest_concurrency: usize,
+
+    /// Polling interval to retry fetching checkpoints that do not exist.
+    #[arg(
+        long,
+        default_value = "200",
+        value_name = "MILLISECONDS",
+        value_parser = |s: &str| s.parse().map(Duration::from_millis)
+    )]
+    retry_interval: Duration,
+}
+
+impl IngestionService {
+    pub fn new(
+        config: IngestionConfig,
+        metrics: Arc,
+        cancel: CancellationToken,
+    ) -> Result {
+        // TODO: Potentially support a hybrid mode where we can fetch from both local and remote.
+        let client = if let Some(url) = config.remote_store_url.as_ref() {
+            IngestionClient::new_remote(url.clone(), metrics.clone())?
+        } else if let Some(path) = config.local_ingestion_path.as_ref() {
+            IngestionClient::new_local(path.clone(), metrics.clone())
+        } else {
+            panic!("Either remote_store_url or local_ingestion_path must be provided");
+        };
+        let subscribers = Vec::new();
+        let (ingest_hi_tx, ingest_hi_rx) = mpsc::unbounded_channel();
+        Ok(Self {
+            config,
+            client,
+            metrics,
+            ingest_hi_tx,
+            ingest_hi_rx,
+            subscribers,
+            cancel,
+        })
+    }
+
+    /// Add a new subscription to the ingestion service. Note that the service is susceptible to
+    /// the "slow receiver" problem: If one receiver is slower to process checkpoints than the
+    /// checkpoint ingestion rate, it will eventually hold up all receivers.
+    ///
+    /// The ingestion service can optionally receive checkpoint high watermarks from its
+    /// subscribers. If a subscriber provides a watermark, the ingestion service will commit to not
+    /// run ahead of the watermark by more than the config's buffer_size.
+    ///
+    /// Returns the channel to receive checkpoints from and the channel to accept watermarks from.
+    pub fn subscribe(
+        &mut self,
+    ) -> (
+        mpsc::Receiver>,
+        mpsc::UnboundedSender<(&'static str, u64)>,
+    ) {
+        let (sender, receiver) = mpsc::channel(self.config.checkpoint_buffer_size);
+        self.subscribers.push(sender);
+        (receiver, self.ingest_hi_tx.clone())
+    }
+
+    /// Start the ingestion service as a background task, consuming it in the process.
+    ///
+    /// Checkpoints are fetched concurrently from the `checkpoints` iterator, and pushed to
+    /// subscribers' channels (potentially out-of-order). Subscribers can communicate with the
+    /// ingestion service via their channels in the following ways:
+    ///
+    /// - If a subscriber is lagging (not receiving checkpoints fast enough), it will eventually
+    ///   provide back-pressure to the ingestion service, which will stop fetching new checkpoints.
+    /// - If a subscriber closes its channel, the ingestion service will interpret that as a signal
+    ///   to shutdown as well.
+    ///
+    /// If ingestion reaches the leading edge of the network, it will encounter checkpoints that do
+    /// not exist yet. These will be retried repeatedly on a fixed `retry_interval` until they
+    /// become available.
+    pub async fn run(self, checkpoints: I) -> Result<(JoinHandle<()>, JoinHandle<()>)>
+    where
+        I: IntoIterator + Send + Sync + 'static,
+        I::IntoIter: Send + Sync + 'static,
+    {
+        let IngestionService {
+            config,
+            client,
+            metrics,
+            ingest_hi_tx: _,
+            ingest_hi_rx,
+            subscribers,
+            cancel,
+        } = self;
+
+        if subscribers.is_empty() {
+            return Err(Error::NoSubscribers);
+        }
+
+        let (checkpoint_tx, checkpoint_rx) = mpsc::channel(config.ingest_concurrency);
+
+        let regulator = regulator(
+            checkpoints,
+            config.checkpoint_buffer_size,
+            ingest_hi_rx,
+            checkpoint_tx,
+            cancel.clone(),
+        );
+
+        let broadcaster = broadcaster(
+            config,
+            client,
+            metrics,
+            checkpoint_rx,
+            subscribers,
+            cancel.clone(),
+        );
+
+        Ok((regulator, broadcaster))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::sync::Mutex;
+
+    use mysten_metrics::spawn_monitored_task;
+    use reqwest::StatusCode;
+    use wiremock::{MockServer, Request};
+
+    use crate::ingestion::remote_client::tests::{respond_with, status};
+    use crate::ingestion::test_utils::test_checkpoint_data;
+    use crate::metrics::tests::test_metrics;
+
+    use super::*;
+
+    async fn test_ingestion(
+        uri: String,
+        checkpoint_buffer_size: usize,
+        ingest_concurrency: usize,
+        cancel: CancellationToken,
+    ) -> IngestionService {
+        IngestionService::new(
+            IngestionConfig {
+                remote_store_url: Some(Url::parse(&uri).unwrap()),
+                local_ingestion_path: None,
+                checkpoint_buffer_size,
+                ingest_concurrency,
+                retry_interval: Duration::from_millis(200),
+            },
+            Arc::new(test_metrics()),
+            cancel,
+        )
+        .unwrap()
+    }
+
+    async fn test_subscriber(
+        stop_after: usize,
+        mut rx: mpsc::Receiver>,
+        cancel: CancellationToken,
+    ) -> JoinHandle> {
+        spawn_monitored_task!(async move {
+            let mut seqs = vec![];
+            for _ in 0..stop_after {
+                tokio::select! {
+                    _ = cancel.cancelled() => break,
+                    Some(checkpoint) = rx.recv() => {
+                        seqs.push(checkpoint.checkpoint_summary.sequence_number);
+                    }
+                }
+            }
+
+            rx.close();
+            seqs
+        })
+    }
+
+    /// If the ingestion service has no subscribers, it will fail fast (before fetching any
+    /// checkpoints).
+    #[tokio::test]
+    async fn fail_on_no_subscribers() {
+        telemetry_subscribers::init_for_testing();
+
+        // The mock server will repeatedly return 404, so if the service does try to fetch a
+        // checkpoint, it will be stuck repeatedly retrying.
+        let server = MockServer::start().await;
+        respond_with(&server, status(StatusCode::NOT_FOUND)).await;
+
+        let cancel = CancellationToken::new();
+        let ingestion_service = test_ingestion(server.uri(), 1, 1, cancel.clone()).await;
+
+        let err = ingestion_service.run(0..).await.unwrap_err();
+        assert!(matches!(err, Error::NoSubscribers));
+    }
+
+    /// The subscriber has no effective limit, and the mock server will always return checkpoint
+    /// information, but the ingestion service can still be stopped using the cancellation token.
+    #[tokio::test]
+    async fn shutdown_on_cancel() {
+        telemetry_subscribers::init_for_testing();
+
+        let server = MockServer::start().await;
+        respond_with(
+            &server,
+            status(StatusCode::OK).set_body_bytes(test_checkpoint_data(42)),
+        )
+        .await;
+
+        let cancel = CancellationToken::new();
+        let mut ingestion_service = test_ingestion(server.uri(), 1, 1, cancel.clone()).await;
+
+        let (rx, _) = ingestion_service.subscribe();
+        let subscriber = test_subscriber(usize::MAX, rx, cancel.clone()).await;
+        let (regulator, broadcaster) = ingestion_service.run(0..).await.unwrap();
+
+        cancel.cancel();
+        subscriber.await.unwrap();
+        regulator.await.unwrap();
+        broadcaster.await.unwrap();
+    }
+
+    /// The subscriber will stop after receiving a single checkpoint, and this will trigger the
+    /// ingestion service to stop as well, even if there are more checkpoints to fetch.
+    #[tokio::test]
+    async fn shutdown_on_subscriber_drop() {
+        telemetry_subscribers::init_for_testing();
+
+        let server = MockServer::start().await;
+        respond_with(
+            &server,
+            status(StatusCode::OK).set_body_bytes(test_checkpoint_data(42)),
+        )
+        .await;
+
+        let cancel = CancellationToken::new();
+        let mut ingestion_service = test_ingestion(server.uri(), 1, 1, cancel.clone()).await;
+
+        let (rx, _) = ingestion_service.subscribe();
+        let subscriber = test_subscriber(1, rx, cancel.clone()).await;
+        let (regulator, broadcaster) = ingestion_service.run(0..).await.unwrap();
+
+        cancel.cancelled().await;
+        subscriber.await.unwrap();
+        regulator.await.unwrap();
+        broadcaster.await.unwrap();
+    }
+
+    /// If fetching the checkpoint throws an unexpected error, the whole pipeline will be shut
+    /// down.
+    #[tokio::test]
+    async fn shutdown_on_unexpected_error() {
+        telemetry_subscribers::init_for_testing();
+
+        let server = MockServer::start().await;
+        respond_with(&server, status(StatusCode::IM_A_TEAPOT)).await;
+
+        let cancel = CancellationToken::new();
+        let mut ingestion_service = test_ingestion(server.uri(), 1, 1, cancel.clone()).await;
+
+        let (rx, _) = ingestion_service.subscribe();
+        let subscriber = test_subscriber(usize::MAX, rx, cancel.clone()).await;
+        let (regulator, broadcaster) = ingestion_service.run(0..).await.unwrap();
+
+        cancel.cancelled().await;
+        subscriber.await.unwrap();
+        regulator.await.unwrap();
+        broadcaster.await.unwrap();
+    }
+
+    /// The service will retry fetching a checkpoint that does not exist, in this test, the 4th
+    /// checkpoint will return 404 a couple of times, before eventually succeeding.
+    #[tokio::test]
+    async fn retry_on_not_found() {
+        telemetry_subscribers::init_for_testing();
+
+        let server = MockServer::start().await;
+        let times: Mutex = Mutex::new(0);
+        respond_with(&server, move |_: &Request| {
+            let mut times = times.lock().unwrap();
+            *times += 1;
+            match *times {
+                1..4 => status(StatusCode::OK).set_body_bytes(test_checkpoint_data(*times)),
+                4..6 => status(StatusCode::NOT_FOUND),
+                _ => status(StatusCode::OK).set_body_bytes(test_checkpoint_data(*times)),
+            }
+        })
+        .await;
+
+        let cancel = CancellationToken::new();
+        let mut ingestion_service = test_ingestion(server.uri(), 1, 1, cancel.clone()).await;
+
+        let (rx, _) = ingestion_service.subscribe();
+        let subscriber = test_subscriber(5, rx, cancel.clone()).await;
+        let (regulator, broadcaster) = ingestion_service.run(0..).await.unwrap();
+
+        cancel.cancelled().await;
+        let seqs = subscriber.await.unwrap();
+        regulator.await.unwrap();
+        broadcaster.await.unwrap();
+
+        assert_eq!(seqs, vec![1, 2, 3, 6, 7]);
+    }
+
+    /// Similar to the previous test, but now it's a transient error that causes the retry.
+    #[tokio::test]
+    async fn retry_on_transient_error() {
+        telemetry_subscribers::init_for_testing();
+
+        let server = MockServer::start().await;
+        let times: Mutex = Mutex::new(0);
+        respond_with(&server, move |_: &Request| {
+            let mut times = times.lock().unwrap();
+            *times += 1;
+            match *times {
+                1..4 => status(StatusCode::OK).set_body_bytes(test_checkpoint_data(*times)),
+                4..6 => status(StatusCode::REQUEST_TIMEOUT),
+                _ => status(StatusCode::OK).set_body_bytes(test_checkpoint_data(*times)),
+            }
+        })
+        .await;
+
+        let cancel = CancellationToken::new();
+        let mut ingestion_service = test_ingestion(server.uri(), 1, 1, cancel.clone()).await;
+
+        let (rx, _) = ingestion_service.subscribe();
+        let subscriber = test_subscriber(5, rx, cancel.clone()).await;
+        let (regulator, broadcaster) = ingestion_service.run(0..).await.unwrap();
+
+        cancel.cancelled().await;
+        let seqs = subscriber.await.unwrap();
+        regulator.await.unwrap();
+        broadcaster.await.unwrap();
+
+        assert_eq!(seqs, vec![1, 2, 3, 6, 7]);
+    }
+
+    /// One subscriber is going to stop processing checkpoints, so even though the service can keep
+    /// fetching checkpoints, it will stop short because of the slow receiver. Other subscribers
+    /// can keep processing checkpoints that were buffered for the slow one.
+    #[tokio::test]
+    async fn back_pressure_and_buffering() {
+        telemetry_subscribers::init_for_testing();
+
+        let server = MockServer::start().await;
+        let times: Mutex = Mutex::new(0);
+        respond_with(&server, move |_: &Request| {
+            let mut times = times.lock().unwrap();
+            *times += 1;
+            status(StatusCode::OK).set_body_bytes(test_checkpoint_data(*times))
+        })
+        .await;
+
+        let cancel = CancellationToken::new();
+        let mut ingestion_service =
+            test_ingestion(server.uri(), /* buffer */ 3, 1, cancel.clone()).await;
+
+        // This subscriber will take its sweet time processing checkpoints.
+        let (mut laggard, _) = ingestion_service.subscribe();
+        async fn unblock(laggard: &mut mpsc::Receiver>) -> u64 {
+            let checkpoint = laggard.recv().await.unwrap();
+            checkpoint.checkpoint_summary.sequence_number
+        }
+
+        let (rx, _) = ingestion_service.subscribe();
+        let subscriber = test_subscriber(5, rx, cancel.clone()).await;
+        let (regulator, broadcaster) = ingestion_service.run(0..).await.unwrap();
+
+        // At this point, the service will have been able to pass 3 checkpoints to the non-lagging
+        // subscriber, while the laggard's buffer fills up. Now the laggard will pull two
+        // checkpoints, which will allow the rest of the pipeline to progress enough for the live
+        // subscriber to receive its quota.
+        assert_eq!(unblock(&mut laggard).await, 1);
+        assert_eq!(unblock(&mut laggard).await, 2);
+
+        cancel.cancelled().await;
+        let seqs = subscriber.await.unwrap();
+        regulator.await.unwrap();
+        broadcaster.await.unwrap();
+
+        assert_eq!(seqs, vec![1, 2, 3, 4, 5]);
+    }
+}
diff --git a/crates/sui-indexer-alt/src/ingestion/regulator.rs b/crates/sui-indexer-alt/src/ingestion/regulator.rs
new file mode 100644
index 0000000000000..de2e6e738a506
--- /dev/null
+++ b/crates/sui-indexer-alt/src/ingestion/regulator.rs
@@ -0,0 +1,257 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::collections::HashMap;
+
+use mysten_metrics::spawn_monitored_task;
+use tokio::{sync::mpsc, task::JoinHandle};
+use tokio_util::sync::CancellationToken;
+use tracing::info;
+
+/// The regulator task is responsible for writing out checkpoint sequence numbers from the
+/// `checkpoints` iterator to `checkpoint_tx`, bounded by the high watermark dictated by
+/// subscribers.
+///
+/// Subscribers can share their high watermarks on `ingest_hi_rx`. The regulator remembers these,
+/// and stops serving checkpoints if they are over the minimum subscriber watermark plus the
+/// ingestion `buffer_size`.
+///
+/// This offers a form of back-pressure that is sensitive to ordering, which is useful for
+/// subscribers that need to commit information in order: Without it, those subscribers may need to
+/// buffer unboundedly many updates from checkpoints while they wait for the checkpoint that they
+/// need to commit.
+///
+/// Note that back-pressure is optional, and will only be applied if a subscriber provides a
+/// watermark, at which point it must keep updating the watermark to allow the ingestion service to
+/// continue making progress.
+///
+/// The task will shut down if the `cancel` token is signalled, or if the `checkpoints` iterator
+/// runs out.
+pub(super) fn regulator(
+    checkpoints: I,
+    buffer_size: usize,
+    mut ingest_hi_rx: mpsc::UnboundedReceiver<(&'static str, u64)>,
+    checkpoint_tx: mpsc::Sender,
+    cancel: CancellationToken,
+) -> JoinHandle<()>
+where
+    I: IntoIterator + Send + Sync + 'static,
+    I::IntoIter: Send + Sync + 'static,
+{
+    spawn_monitored_task!(async move {
+        let mut ingest_hi = None;
+        let mut subscribers_hi = HashMap::new();
+        let mut checkpoints = checkpoints.into_iter().peekable();
+
+        info!("Starting ingestion regulator");
+
+        loop {
+            let Some(cp) = checkpoints.peek() else {
+                info!("Checkpoints done, stopping regulator");
+                break;
+            };
+
+            tokio::select! {
+                _ = cancel.cancelled() => {
+                    info!("Shutdown received, stopping regulator");
+                    break;
+                }
+
+                Some((name, hi)) = ingest_hi_rx.recv() => {
+                    subscribers_hi.insert(name, hi);
+                    ingest_hi = subscribers_hi.values().copied().min().map(|hi| hi + buffer_size as u64);
+                }
+
+                res = checkpoint_tx.send(*cp), if ingest_hi.map_or(true, |hi| *cp <= hi) => if res.is_ok() {
+                    checkpoints.next();
+                } else {
+                    info!("Checkpoint channel closed, stopping regulator");
+                    break;
+                }
+            }
+        }
+    })
+}
+
+#[cfg(test)]
+mod tests {
+    use std::time::Duration;
+
+    use tokio::time::{error::Elapsed, timeout};
+
+    use super::*;
+
+    /// Wait up to a second for a response on the channel, and return it, expecting this operation
+    /// to succeed.
+    async fn expect_recv(rx: &mut mpsc::Receiver) -> Option {
+        timeout(Duration::from_secs(1), rx.recv()).await.unwrap()
+    }
+
+    /// Wait up to a second for a response on the channel, but expecting this operation to timeout.
+    async fn expect_timeout(rx: &mut mpsc::Receiver) -> Elapsed {
+        timeout(Duration::from_secs(1), rx.recv())
+            .await
+            .unwrap_err()
+    }
+
+    #[tokio::test]
+    async fn finite_list_of_checkpoints() {
+        let (_, hi_rx) = mpsc::unbounded_channel();
+        let (cp_tx, mut cp_rx) = mpsc::channel(1);
+        let cancel = CancellationToken::new();
+
+        let cps = 0..5;
+        let h_regulator = regulator(cps, 0, hi_rx, cp_tx, cancel.clone());
+
+        for i in 0..5 {
+            assert_eq!(Some(i), expect_recv(&mut cp_rx).await);
+        }
+
+        h_regulator.await.unwrap();
+    }
+
+    #[tokio::test]
+    async fn shutdown_on_sender_closed() {
+        let (_, hi_rx) = mpsc::unbounded_channel();
+        let (cp_tx, mut cp_rx) = mpsc::channel(1);
+        let cancel = CancellationToken::new();
+
+        let h_regulator = regulator(0.., 0, hi_rx, cp_tx, cancel.clone());
+
+        for i in 0..5 {
+            assert_eq!(Some(i), expect_recv(&mut cp_rx).await);
+        }
+
+        drop(cp_rx);
+        h_regulator.await.unwrap();
+    }
+
+    #[tokio::test]
+    async fn shutdown_on_cancel() {
+        let (_, hi_rx) = mpsc::unbounded_channel();
+        let (cp_tx, mut cp_rx) = mpsc::channel(1);
+        let cancel = CancellationToken::new();
+
+        let h_regulator = regulator(0.., 0, hi_rx, cp_tx, cancel.clone());
+
+        for i in 0..5 {
+            assert_eq!(Some(i), expect_recv(&mut cp_rx).await);
+        }
+
+        cancel.cancel();
+        h_regulator.await.unwrap();
+    }
+
+    #[tokio::test]
+    async fn halted() {
+        let (hi_tx, hi_rx) = mpsc::unbounded_channel();
+        let (cp_tx, mut cp_rx) = mpsc::channel(1);
+        let cancel = CancellationToken::new();
+
+        hi_tx.send(("test", 4)).unwrap();
+
+        let h_regulator = regulator(0.., 0, hi_rx, cp_tx, cancel.clone());
+
+        for _ in 0..=4 {
+            expect_recv(&mut cp_rx).await;
+        }
+
+        // Regulator stopped because of watermark.
+        expect_timeout(&mut cp_rx).await;
+
+        cancel.cancel();
+        h_regulator.await.unwrap();
+    }
+
+    #[tokio::test]
+    async fn halted_buffered() {
+        let (hi_tx, hi_rx) = mpsc::unbounded_channel();
+        let (cp_tx, mut cp_rx) = mpsc::channel(1);
+        let cancel = CancellationToken::new();
+
+        hi_tx.send(("test", 2)).unwrap();
+
+        let h_regulator = regulator(0.., 2, hi_rx, cp_tx, cancel.clone());
+
+        for i in 0..=4 {
+            assert_eq!(Some(i), expect_recv(&mut cp_rx).await);
+        }
+
+        // Regulator stopped because of watermark (plus buffering).
+        expect_timeout(&mut cp_rx).await;
+
+        cancel.cancel();
+        h_regulator.await.unwrap();
+    }
+
+    #[tokio::test]
+    async fn resumption() {
+        let (hi_tx, hi_rx) = mpsc::unbounded_channel();
+        let (cp_tx, mut cp_rx) = mpsc::channel(1);
+        let cancel = CancellationToken::new();
+
+        hi_tx.send(("test", 2)).unwrap();
+
+        let h_regulator = regulator(0.., 0, hi_rx, cp_tx, cancel.clone());
+
+        for i in 0..=2 {
+            assert_eq!(Some(i), expect_recv(&mut cp_rx).await);
+        }
+
+        // Regulator stopped because of watermark, but resumes when that watermark is updated.
+        expect_timeout(&mut cp_rx).await;
+        hi_tx.send(("test", 4)).unwrap();
+
+        for i in 3..=4 {
+            assert_eq!(Some(i), expect_recv(&mut cp_rx).await);
+        }
+
+        // Halted again.
+        expect_timeout(&mut cp_rx).await;
+
+        cancel.cancel();
+        h_regulator.await.unwrap();
+    }
+
+    #[tokio::test]
+    async fn multiple_subscribers() {
+        let (hi_tx, hi_rx) = mpsc::unbounded_channel();
+        let (cp_tx, mut cp_rx) = mpsc::channel(1);
+        let cancel = CancellationToken::new();
+
+        hi_tx.send(("a", 2)).unwrap();
+        hi_tx.send(("b", 3)).unwrap();
+
+        let cps = 0..10;
+        let h_regulator = regulator(cps, 0, hi_rx, cp_tx, cancel.clone());
+
+        for i in 0..=2 {
+            assert_eq!(Some(i), expect_recv(&mut cp_rx).await);
+        }
+
+        // Watermark stopped because of a's watermark.
+        expect_timeout(&mut cp_rx).await;
+
+        // Updating b's watermark doesn't make a difference.
+        hi_tx.send(("b", 4)).unwrap();
+        expect_timeout(&mut cp_rx).await;
+
+        // But updating a's watermark does.
+        hi_tx.send(("a", 3)).unwrap();
+        assert_eq!(Some(3), expect_recv(&mut cp_rx).await);
+
+        // ...by one checkpoint.
+        expect_timeout(&mut cp_rx).await;
+
+        // And we can make more progress by updating it again.
+        hi_tx.send(("a", 4)).unwrap();
+        assert_eq!(Some(4), expect_recv(&mut cp_rx).await);
+
+        // But another update to "a" will now not make a difference, because "b" is still behind.
+        hi_tx.send(("a", 5)).unwrap();
+        expect_timeout(&mut cp_rx).await;
+
+        cancel.cancel();
+        h_regulator.await.unwrap();
+    }
+}
diff --git a/crates/sui-indexer-alt/src/ingestion/remote_client.rs b/crates/sui-indexer-alt/src/ingestion/remote_client.rs
new file mode 100644
index 0000000000000..c4f91fee57990
--- /dev/null
+++ b/crates/sui-indexer-alt/src/ingestion/remote_client.rs
@@ -0,0 +1,292 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::ingestion::client::{FetchError, FetchResult, IngestionClientTrait};
+use crate::ingestion::Result as IngestionResult;
+use reqwest::{Client, StatusCode};
+use tracing::{debug, error};
+use url::Url;
+
+#[derive(thiserror::Error, Debug, Eq, PartialEq)]
+pub enum HttpError {
+    #[error("HTTP error with status code: {0}")]
+    Http(StatusCode),
+}
+
+fn status_code_to_error(code: StatusCode) -> anyhow::Error {
+    HttpError::Http(code).into()
+}
+
+pub(crate) struct RemoteIngestionClient {
+    url: Url,
+    client: Client,
+}
+
+impl RemoteIngestionClient {
+    pub(crate) fn new(url: Url) -> IngestionResult {
+        Ok(Self {
+            url,
+            client: Client::builder().build()?,
+        })
+    }
+}
+
+#[async_trait::async_trait]
+impl IngestionClientTrait for RemoteIngestionClient {
+    /// Fetch a checkpoint from the remote store.
+    ///
+    /// Transient errors include:
+    ///
+    /// - failures to issue a request, (network errors, redirect issues, etc)
+    /// - request timeouts,
+    /// - rate limiting,
+    /// - server errors (5xx),
+    /// - issues getting a full response.
+    async fn fetch(&self, checkpoint: u64) -> FetchResult {
+        // SAFETY: The path being joined is statically known to be valid.
+        let url = self
+            .url
+            .join(&format!("/{checkpoint}.chk"))
+            .expect("Unexpected invalid URL");
+
+        let response = self
+            .client
+            .get(url)
+            .send()
+            .await
+            .map_err(|e| FetchError::Transient {
+                reason: "request",
+                error: e.into(),
+            })?;
+
+        match response.status() {
+            code if code.is_success() => {
+                // Failure to extract all the bytes from the payload, or to deserialize the
+                // checkpoint from them is considered a transient error -- the store being
+                // fetched from needs to be corrected, and ingestion will keep retrying it
+                // until it is.
+                response.bytes().await.map_err(|e| FetchError::Transient {
+                    reason: "bytes",
+                    error: e.into(),
+                })
+            }
+
+            // Treat 404s as a special case so we can match on this error type.
+            code @ StatusCode::NOT_FOUND => {
+                debug!(checkpoint, %code, "Checkpoint not found");
+                Err(FetchError::NotFound)
+            }
+
+            // Timeouts are a client error but they are usually transient.
+            code @ StatusCode::REQUEST_TIMEOUT => Err(FetchError::Transient {
+                reason: "timeout",
+                error: status_code_to_error(code),
+            }),
+
+            // Rate limiting is also a client error, but the backoff will eventually widen the
+            // interval appropriately.
+            code @ StatusCode::TOO_MANY_REQUESTS => Err(FetchError::Transient {
+                reason: "too_many_requests",
+                error: status_code_to_error(code),
+            }),
+
+            // Assume that if the server is facing difficulties, it will recover eventually.
+            code if code.is_server_error() => Err(FetchError::Transient {
+                reason: "server_error",
+                error: status_code_to_error(code),
+            }),
+
+            // For everything else, assume it's a permanent error and don't retry.
+            code => {
+                error!(checkpoint, %code, "Permanent error, giving up!");
+                Err(FetchError::Permanent(status_code_to_error(code)))
+            }
+        }
+    }
+}
+
+#[cfg(test)]
+pub(crate) mod tests {
+    use super::*;
+    use crate::ingestion::client::IngestionClient;
+    use crate::ingestion::error::Error;
+    use crate::ingestion::test_utils::test_checkpoint_data;
+    use crate::metrics::tests::test_metrics;
+    use axum::http::StatusCode;
+    use std::sync::{Arc, Mutex};
+    use tokio_util::sync::CancellationToken;
+    use wiremock::{
+        matchers::{method, path_regex},
+        Mock, MockServer, Request, Respond, ResponseTemplate,
+    };
+
+    pub(crate) async fn respond_with(server: &MockServer, response: impl Respond + 'static) {
+        Mock::given(method("GET"))
+            .and(path_regex(r"/\d+.chk"))
+            .respond_with(response)
+            .mount(server)
+            .await;
+    }
+
+    pub(crate) fn status(code: StatusCode) -> ResponseTemplate {
+        ResponseTemplate::new(code.as_u16())
+    }
+
+    fn remote_test_client(uri: String) -> IngestionClient {
+        IngestionClient::new_remote(Url::parse(&uri).unwrap(), Arc::new(test_metrics())).unwrap()
+    }
+
+    fn assert_http_error(error: Error, checkpoint: u64, code: StatusCode) {
+        let Error::FetchError(c, inner) = error else {
+            panic!("Expected FetchError, got: {:?}", error);
+        };
+        assert_eq!(c, checkpoint);
+        let Some(http_error) = inner.downcast_ref::() else {
+            panic!("Expected HttpError, got: {:?}", inner);
+        };
+        assert_eq!(http_error, &HttpError::Http(code));
+    }
+
+    #[tokio::test]
+    async fn fail_on_not_found() {
+        let server = MockServer::start().await;
+        respond_with(&server, status(StatusCode::NOT_FOUND)).await;
+
+        let client = remote_test_client(server.uri());
+        let error = client
+            .fetch(42, &CancellationToken::new())
+            .await
+            .unwrap_err();
+
+        assert!(matches!(error, Error::NotFound(42)));
+    }
+
+    #[tokio::test]
+    async fn fail_on_client_error() {
+        let server = MockServer::start().await;
+        respond_with(&server, status(StatusCode::IM_A_TEAPOT)).await;
+
+        let client = remote_test_client(server.uri());
+        let error = client
+            .fetch(42, &CancellationToken::new())
+            .await
+            .unwrap_err();
+
+        assert_http_error(error, 42, StatusCode::IM_A_TEAPOT);
+    }
+
+    /// Even if the server is repeatedly returning transient errors, it is possible to cancel the
+    /// fetch request via its cancellation token.
+    #[tokio::test]
+    async fn fail_on_cancel() {
+        let cancel = CancellationToken::new();
+        let server = MockServer::start().await;
+
+        // This mock server repeatedly returns internal server errors, but will also send a
+        // cancellation with the second request (this is a bit of a contrived test set-up).
+        let times: Mutex = Mutex::new(0);
+        let server_cancel = cancel.clone();
+        respond_with(&server, move |_: &Request| {
+            let mut times = times.lock().unwrap();
+            *times += 1;
+
+            if *times > 2 {
+                server_cancel.cancel();
+            }
+
+            status(StatusCode::INTERNAL_SERVER_ERROR)
+        })
+        .await;
+
+        let client = remote_test_client(server.uri());
+        let error = client.fetch(42, &cancel.clone()).await.unwrap_err();
+
+        assert!(matches!(error, Error::Cancelled));
+    }
+
+    /// Assume that failures to send the request to the remote store are due to temporary
+    /// connectivity issues, and retry them.
+    #[tokio::test]
+    async fn retry_on_request_error() {
+        let server = MockServer::start().await;
+
+        let times: Mutex = Mutex::new(0);
+        respond_with(&server, move |r: &Request| {
+            let mut times = times.lock().unwrap();
+            *times += 1;
+            match (*times, r.url.path()) {
+                // The first request will trigger a redirect to 0.chk no matter what the original
+                // request was for -- triggering a request error.
+                (1, _) => status(StatusCode::MOVED_PERMANENTLY).append_header("Location", "/0.chk"),
+
+                // Set-up checkpoint 0 as an infinite redirect loop.
+                (_, "/0.chk") => {
+                    status(StatusCode::MOVED_PERMANENTLY).append_header("Location", r.url.as_str())
+                }
+
+                // Subsequently, requests will fail with a permanent error, this is what we expect
+                // to see.
+                _ => status(StatusCode::IM_A_TEAPOT),
+            }
+        })
+        .await;
+
+        let client = remote_test_client(server.uri());
+        let error = client
+            .fetch(42, &CancellationToken::new())
+            .await
+            .unwrap_err();
+
+        assert_http_error(error, 42, StatusCode::IM_A_TEAPOT);
+    }
+
+    /// Assume that certain errors will recover by themselves, and keep retrying with an
+    /// exponential back-off. These errors include: 5xx (server) errors, 408 (timeout), and 429
+    /// (rate limiting).
+    #[tokio::test]
+    async fn retry_on_transient_server_error() {
+        let server = MockServer::start().await;
+        let times: Mutex = Mutex::new(0);
+        respond_with(&server, move |_: &Request| {
+            let mut times = times.lock().unwrap();
+            *times += 1;
+            status(match *times {
+                1 => StatusCode::INTERNAL_SERVER_ERROR,
+                2 => StatusCode::REQUEST_TIMEOUT,
+                3 => StatusCode::TOO_MANY_REQUESTS,
+                _ => StatusCode::IM_A_TEAPOT,
+            })
+        })
+        .await;
+
+        let client = remote_test_client(server.uri());
+        let error = client
+            .fetch(42, &CancellationToken::new())
+            .await
+            .unwrap_err();
+
+        assert_http_error(error, 42, StatusCode::IM_A_TEAPOT);
+    }
+
+    /// Treat deserialization failure as another kind of transient error -- all checkpoint data
+    /// that is fetched should be valid (deserializable as a `CheckpointData`).
+    #[tokio::test]
+    async fn retry_on_deserialization_error() {
+        let server = MockServer::start().await;
+        let times: Mutex = Mutex::new(0);
+        respond_with(&server, move |_: &Request| {
+            let mut times = times.lock().unwrap();
+            *times += 1;
+            if *times < 3 {
+                status(StatusCode::OK).set_body_bytes(vec![])
+            } else {
+                status(StatusCode::OK).set_body_bytes(test_checkpoint_data(42))
+            }
+        })
+        .await;
+
+        let client = remote_test_client(server.uri());
+        let checkpoint = client.fetch(42, &CancellationToken::new()).await.unwrap();
+        assert_eq!(42, checkpoint.checkpoint_summary.sequence_number)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/ingestion/test_utils.rs b/crates/sui-indexer-alt/src/ingestion/test_utils.rs
new file mode 100644
index 0000000000000..99f130927d0bf
--- /dev/null
+++ b/crates/sui-indexer-alt/src/ingestion/test_utils.rs
@@ -0,0 +1,56 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use rand::prelude::StdRng;
+use rand::SeedableRng;
+use sui_storage::blob::{Blob, BlobEncoding};
+use sui_types::crypto::KeypairTraits;
+use sui_types::full_checkpoint_content::CheckpointData;
+use sui_types::gas::GasCostSummary;
+use sui_types::messages_checkpoint::{
+    CertifiedCheckpointSummary, CheckpointContents, CheckpointSummary, SignedCheckpointSummary,
+};
+use sui_types::supported_protocol_versions::ProtocolConfig;
+use sui_types::utils::make_committee_key;
+
+const RNG_SEED: [u8; 32] = [
+    21, 23, 199, 200, 234, 250, 252, 178, 94, 15, 202, 178, 62, 186, 88, 137, 233, 192, 130, 157,
+    179, 179, 65, 9, 31, 249, 221, 123, 225, 112, 199, 247,
+];
+
+pub(crate) fn test_checkpoint_data(cp: u64) -> Vec {
+    let mut rng = StdRng::from_seed(RNG_SEED);
+    let (keys, committee) = make_committee_key(&mut rng);
+    let contents = CheckpointContents::new_with_digests_only_for_tests(vec![]);
+    let summary = CheckpointSummary::new(
+        &ProtocolConfig::get_for_max_version_UNSAFE(),
+        0,
+        cp,
+        0,
+        &contents,
+        None,
+        GasCostSummary::default(),
+        None,
+        0,
+        Vec::new(),
+    );
+
+    let sign_infos: Vec<_> = keys
+        .iter()
+        .map(|k| {
+            let name = k.public().into();
+            SignedCheckpointSummary::sign(committee.epoch, &summary, k, name)
+        })
+        .collect();
+
+    let checkpoint_data = CheckpointData {
+        checkpoint_summary: CertifiedCheckpointSummary::new(summary, sign_infos, &committee)
+            .unwrap(),
+        checkpoint_contents: contents,
+        transactions: vec![],
+    };
+
+    Blob::encode(&checkpoint_data, BlobEncoding::Bcs)
+        .unwrap()
+        .to_bytes()
+}
diff --git a/crates/sui-indexer-alt/src/lib.rs b/crates/sui-indexer-alt/src/lib.rs
new file mode 100644
index 0000000000000..fdfe8057966dd
--- /dev/null
+++ b/crates/sui-indexer-alt/src/lib.rs
@@ -0,0 +1,274 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{collections::BTreeSet, net::SocketAddr, sync::Arc};
+
+use anyhow::{Context, Result};
+use db::{Db, DbConfig};
+use ingestion::{IngestionConfig, IngestionService};
+use metrics::{IndexerMetrics, MetricsService};
+use models::watermarks::CommitterWatermark;
+use pipeline::{concurrent, sequential, PipelineConfig, Processor};
+use task::graceful_shutdown;
+use tokio::task::JoinHandle;
+use tokio_util::sync::CancellationToken;
+use tracing::info;
+
+pub mod args;
+pub mod db;
+pub mod handlers;
+pub mod ingestion;
+pub mod metrics;
+pub mod models;
+pub mod pipeline;
+pub mod schema;
+pub mod task;
+
+pub struct Indexer {
+    /// Connection pool to the database.
+    db: Db,
+
+    /// Prometheus Metrics.
+    metrics: Arc,
+
+    /// Service for serving Prometheis metrics.
+    metrics_service: MetricsService,
+
+    /// Service for downloading and disseminating checkpoint data.
+    ingestion_service: IngestionService,
+
+    /// Parameters for the committers of each pipeline.
+    pipeline_config: PipelineConfig,
+
+    /// Optional override of the checkpoint lowerbound.
+    first_checkpoint: Option,
+
+    /// Optional override of the checkpoint upperbound.
+    last_checkpoint: Option,
+
+    /// Optional override of enabled pipelines.
+    enabled_pipelines: BTreeSet,
+
+    /// Cancellation token shared among all continuous tasks in the service.
+    cancel: CancellationToken,
+
+    /// The checkpoint lowerbound derived from watermarks of pipelines added to the indexer. When
+    /// the indexer runs, it will start from this point, unless this has been overridden by
+    /// [Self::first_checkpoint].
+    first_checkpoint_from_watermark: u64,
+
+    /// The handles for every task spawned by this indexer, used to manage graceful shutdown.
+    handles: Vec>,
+}
+
+#[derive(clap::Args, Debug, Clone)]
+pub struct IndexerConfig {
+    #[command(flatten)]
+    pub ingestion_config: IngestionConfig,
+
+    #[command(flatten)]
+    pub pipeline_config: PipelineConfig,
+
+    /// Override for the checkpoint to start ingestion from -- useful for backfills. By default,
+    /// ingestion will start just after the lowest checkpoint watermark across all active
+    /// pipelines.
+    #[arg(long)]
+    first_checkpoint: Option,
+
+    /// Override for the checkpoint to end ingestion at (inclusive) -- useful for backfills. By
+    /// default, ingestion will not stop, and will continue to poll for new checkpoints.
+    #[arg(long)]
+    last_checkpoint: Option,
+
+    /// Only run the following pipelines -- useful for backfills. If not provided, all pipelines
+    /// will be run.
+    #[arg(long, action = clap::ArgAction::Append)]
+    pipeline: Vec,
+
+    /// Address to serve Prometheus Metrics from.
+    #[arg(long, default_value = "0.0.0.0:9184")]
+    pub metrics_address: SocketAddr,
+}
+
+impl Indexer {
+    pub async fn new(
+        db_config: DbConfig,
+        indexer_config: IndexerConfig,
+        cancel: CancellationToken,
+    ) -> Result {
+        let IndexerConfig {
+            ingestion_config,
+            pipeline_config,
+            first_checkpoint,
+            last_checkpoint,
+            pipeline,
+            metrics_address,
+        } = indexer_config;
+
+        let db = Db::new(db_config)
+            .await
+            .context("Failed to connect to database")?;
+
+        // At indexer initialization, we ensure that the DB schema is up-to-date.
+        db.run_migrations()
+            .await
+            .context("Failed to run pending migrations")?;
+
+        let (metrics, metrics_service) =
+            MetricsService::new(metrics_address, db.clone(), cancel.clone())?;
+        let ingestion_service =
+            IngestionService::new(ingestion_config, metrics.clone(), cancel.clone())?;
+
+        Ok(Self {
+            db,
+            metrics,
+            metrics_service,
+            ingestion_service,
+            pipeline_config,
+            first_checkpoint,
+            last_checkpoint,
+            enabled_pipelines: pipeline.into_iter().collect(),
+            cancel,
+            first_checkpoint_from_watermark: u64::MAX,
+            handles: vec![],
+        })
+    }
+
+    /// Adds a new pipeline to this indexer and starts it up. Although their tasks have started,
+    /// they will be idle until the ingestion service starts, and serves it checkpoint data.
+    ///
+    /// Concurrent pipelines commit checkpoint data out-of-order to maximise throughput, and they
+    /// keep the watermark table up-to-date with the highest point they can guarantee all data
+    /// exists for, for their pipeline.
+    pub async fn concurrent_pipeline(&mut self) -> Result<()> {
+        let Some(watermark) = self.add_pipeline::().await? else {
+            return Ok(());
+        };
+
+        let (processor, collector, committer, watermark) = concurrent::pipeline::(
+            watermark,
+            self.pipeline_config.clone(),
+            self.db.clone(),
+            self.ingestion_service.subscribe().0,
+            self.metrics.clone(),
+            self.cancel.clone(),
+        );
+
+        self.handles.push(processor);
+        self.handles.push(collector);
+        self.handles.push(committer);
+        self.handles.push(watermark);
+
+        Ok(())
+    }
+
+    /// Adds a new pipeline to this indexer and starts it up. Although their tasks have started,
+    /// they will be idle until the ingestion service starts, and serves it checkpoint data.
+    ///
+    /// Sequential pipelines commit checkpoint data in-order which sacrifices throughput, but may
+    /// be required to handle pipelines that modify data in-place (where each update is not an
+    /// insert, but could be a modification of an existing row, where ordering between updates is
+    /// important).
+    ///
+    /// The pipeline can optionally be configured to lag behind the ingestion service by a fixed
+    /// number of checkpoints (configured by `checkpoint_lag`).
+    pub async fn sequential_pipeline(
+        &mut self,
+        checkpoint_lag: Option,
+    ) -> Result<()> {
+        let Some(watermark) = self.add_pipeline::().await? else {
+            return Ok(());
+        };
+
+        let (checkpoint_rx, watermark_tx) = self.ingestion_service.subscribe();
+
+        let (processor, committer) = sequential::pipeline::(
+            watermark,
+            self.pipeline_config.clone(),
+            checkpoint_lag,
+            self.db.clone(),
+            checkpoint_rx,
+            watermark_tx,
+            self.metrics.clone(),
+            self.cancel.clone(),
+        );
+
+        self.handles.push(processor);
+        self.handles.push(committer);
+
+        Ok(())
+    }
+
+    /// Start ingesting checkpoints. Ingestion either starts from the configured
+    /// `first_checkpoint`, or it is calculated based on the watermarks of all active pipelines.
+    /// Ingestion will stop after consuming the configured `last_checkpoint`, if one is provided,
+    /// or will continue until it tracks the tip of the network.
+    pub async fn run(mut self) -> Result> {
+        let metrics_handle = self
+            .metrics_service
+            .run()
+            .await
+            .context("Failed to start metrics service")?;
+
+        // If an override has been provided, start ingestion from there, otherwise start ingestion
+        // from just after the lowest committer watermark across all enabled pipelines.
+        let first_checkpoint = self
+            .first_checkpoint
+            .unwrap_or(self.first_checkpoint_from_watermark);
+
+        let last_checkpoint = self.last_checkpoint.unwrap_or(u64::MAX);
+
+        info!(first_checkpoint, last_checkpoint = ?self.last_checkpoint, "Ingestion range");
+
+        let (regulator_handle, broadcaster_handle) = self
+            .ingestion_service
+            .run(first_checkpoint..=last_checkpoint)
+            .await
+            .context("Failed to start ingestion service")?;
+
+        self.handles.push(regulator_handle);
+        self.handles.push(broadcaster_handle);
+
+        let cancel = self.cancel.clone();
+        Ok(tokio::spawn(async move {
+            // Wait for the ingestion service and all its related tasks to wind down gracefully:
+            // If ingestion has been configured to only handle a specific range of checkpoints, we
+            // want to make sure that tasks are allowed to run to completion before shutting them
+            // down.
+            graceful_shutdown(self.handles, self.cancel).await;
+
+            info!("Indexing pipeline gracefully shut down");
+
+            // Pick off any stragglers (in this case, just the metrics service).
+            cancel.cancel();
+            metrics_handle.await.unwrap();
+        }))
+    }
+
+    /// Update the indexer's first checkpoint based on the watermark for the pipeline by adding for
+    /// handler `H` (as long as it's enabled). Returns `Ok(None)` if the pipeline is disabled,
+    /// `Ok(Some(None))` if the pipeline is enabled but its watermark is not found, and
+    /// `Ok(Some(Some(watermark)))` if the pipeline is enabled and the watermark is found.
+    async fn add_pipeline(
+        &mut self,
+    ) -> Result>>> {
+        if !self.enabled_pipelines.is_empty() && !self.enabled_pipelines.contains(P::NAME) {
+            info!("Skipping pipeline {}", P::NAME);
+            return Ok(None);
+        }
+
+        let mut conn = self.db.connect().await.context("Failed DB connection")?;
+
+        let watermark = CommitterWatermark::get(&mut conn, P::NAME)
+            .await
+            .with_context(|| format!("Failed to get watermark for {}", P::NAME))?;
+
+        // TODO(amnn): Test this (depends on supporting migrations and tempdb).
+        self.first_checkpoint_from_watermark = watermark
+            .as_ref()
+            .map_or(0, |w| w.checkpoint_hi_inclusive as u64 + 1)
+            .min(self.first_checkpoint_from_watermark);
+
+        Ok(Some(watermark))
+    }
+}
diff --git a/crates/sui-indexer-alt/src/main.rs b/crates/sui-indexer-alt/src/main.rs
new file mode 100644
index 0000000000000..fd8e30cc8bee5
--- /dev/null
+++ b/crates/sui-indexer-alt/src/main.rs
@@ -0,0 +1,63 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use anyhow::{Context, Result};
+use clap::Parser;
+use sui_indexer_alt::args::Command;
+use sui_indexer_alt::db::reset_database;
+use sui_indexer_alt::{
+    args::Args,
+    handlers::{
+        ev_emit_mod::EvEmitMod, ev_struct_inst::EvStructInst, kv_checkpoints::KvCheckpoints,
+        kv_objects::KvObjects, kv_transactions::KvTransactions, obj_versions::ObjVersions,
+        sum_coin_balances::SumCoinBalances, sum_obj_types::SumObjTypes,
+        tx_affected_objects::TxAffectedObjects, tx_balance_changes::TxBalanceChanges,
+        wal_coin_balances::WalCoinBalances, wal_obj_types::WalObjTypes,
+    },
+    Indexer,
+};
+use tokio_util::sync::CancellationToken;
+
+#[tokio::main]
+async fn main() -> Result<()> {
+    let args = Args::parse();
+
+    // Enable tracing, configured by environment variables.
+    let _guard = telemetry_subscribers::TelemetryConfig::new()
+        .with_env()
+        .init();
+
+    let cancel = CancellationToken::new();
+
+    match args.command {
+        Command::Indexer {
+            indexer,
+            consistent_range: lag,
+        } => {
+            let mut indexer = Indexer::new(args.db_config, indexer, cancel.clone()).await?;
+
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.sequential_pipeline::(lag).await?;
+            indexer.sequential_pipeline::(lag).await?;
+
+            let h_indexer = indexer.run().await.context("Failed to start indexer")?;
+
+            cancel.cancelled().await;
+            let _ = h_indexer.await;
+        }
+        Command::ResetDatabase { skip_migrations } => {
+            reset_database(args.db_config, skip_migrations).await?;
+        }
+    }
+
+    Ok(())
+}
diff --git a/crates/sui-indexer-alt/src/metrics.rs b/crates/sui-indexer-alt/src/metrics.rs
new file mode 100644
index 0000000000000..819298f4b915e
--- /dev/null
+++ b/crates/sui-indexer-alt/src/metrics.rs
@@ -0,0 +1,592 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{net::SocketAddr, sync::Arc};
+
+use anyhow::Result;
+use axum::{extract::Extension, routing::get, Router};
+use mysten_metrics::RegistryService;
+use prometheus::{
+    core::{Collector, Desc},
+    proto::{Counter, Gauge, LabelPair, Metric, MetricFamily, MetricType, Summary},
+    register_histogram_vec_with_registry, register_histogram_with_registry,
+    register_int_counter_vec_with_registry, register_int_counter_with_registry,
+    register_int_gauge_vec_with_registry, Histogram, HistogramVec, IntCounter, IntCounterVec,
+    IntGaugeVec, Registry,
+};
+use tokio::{net::TcpListener, task::JoinHandle};
+use tokio_util::sync::CancellationToken;
+use tracing::{info, warn};
+
+use crate::{db::Db, ingestion::error::Error};
+
+/// Histogram buckets for the distribution of checkpoint fetching latencies.
+const INGESTION_LATENCY_SEC_BUCKETS: &[f64] = &[
+    0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0,
+];
+
+/// Histogram buckets for the distribution of latencies for processing a checkpoint in the indexer
+/// (without having to call out to other services).
+const PROCESSING_LATENCY_SEC_BUCKETS: &[f64] = &[
+    0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0,
+];
+
+/// Histogram buckets for the distribution of latencies for writing to the database.
+const DB_UPDATE_LATENCY_SEC_BUCKETS: &[f64] = &[
+    0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0,
+    2000.0, 5000.0, 10000.0,
+];
+
+/// Histogram buckets for the distribution of batch sizes (number of rows) written to the database.
+const BATCH_SIZE_BUCKETS: &[f64] = &[
+    1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0,
+];
+
+/// Service to expose prometheus metrics from the indexer.
+pub struct MetricsService {
+    addr: SocketAddr,
+    service: RegistryService,
+    cancel: CancellationToken,
+}
+
+#[derive(Clone)]
+pub struct IndexerMetrics {
+    // Statistics related to fetching data from the remote store.
+    pub total_ingested_checkpoints: IntCounter,
+    pub total_ingested_transactions: IntCounter,
+    pub total_ingested_events: IntCounter,
+    pub total_ingested_inputs: IntCounter,
+    pub total_ingested_outputs: IntCounter,
+    pub total_ingested_bytes: IntCounter,
+    pub total_ingested_transient_retries: IntCounterVec,
+    pub total_ingested_not_found_retries: IntCounter,
+
+    pub ingested_checkpoint_latency: Histogram,
+
+    // Statistics related to individual ingestion pipelines' handlers.
+    pub total_handler_checkpoints_received: IntCounterVec,
+    pub total_handler_checkpoints_processed: IntCounterVec,
+    pub total_handler_rows_created: IntCounterVec,
+
+    pub handler_checkpoint_latency: HistogramVec,
+
+    // Statistics related to individual ingestion pipelines' committers.
+    pub total_collector_rows_received: IntCounterVec,
+    pub total_collector_batches_created: IntCounterVec,
+    pub total_committer_batches_attempted: IntCounterVec,
+    pub total_committer_batches_succeeded: IntCounterVec,
+    pub total_committer_rows_committed: IntCounterVec,
+    pub total_committer_rows_affected: IntCounterVec,
+    pub total_watermarks_out_of_order: IntCounterVec,
+
+    pub collector_gather_latency: HistogramVec,
+    pub collector_batch_size: HistogramVec,
+    pub committer_commit_latency: HistogramVec,
+    pub watermark_gather_latency: HistogramVec,
+    pub watermark_commit_latency: HistogramVec,
+
+    pub watermark_epoch: IntGaugeVec,
+    pub watermark_checkpoint: IntGaugeVec,
+    pub watermark_transaction: IntGaugeVec,
+    pub watermark_timestamp_ms: IntGaugeVec,
+
+    pub watermark_epoch_in_db: IntGaugeVec,
+    pub watermark_checkpoint_in_db: IntGaugeVec,
+    pub watermark_transaction_in_db: IntGaugeVec,
+    pub watermark_timestamp_in_db_ms: IntGaugeVec,
+}
+
+/// Collects information about the database connection pool.
+struct DbConnectionStatsCollector {
+    db: Db,
+    desc: Vec<(MetricType, Desc)>,
+}
+
+impl MetricsService {
+    /// Create a new metrics service, exposing Mysten-wide metrics, and Indexer-specific metrics.
+    /// Returns the Indexer-specific metrics and the service itself (which must be run with
+    /// [Self::run]).
+    pub fn new(
+        addr: SocketAddr,
+        db: Db,
+        cancel: CancellationToken,
+    ) -> Result<(Arc, MetricsService)> {
+        let registry = Registry::new_custom(Some("indexer_alt".to_string()), None)?;
+
+        let metrics = IndexerMetrics::new(®istry);
+        mysten_metrics::init_metrics(®istry);
+        registry.register(Box::new(DbConnectionStatsCollector::new(db)))?;
+
+        let service = Self {
+            addr,
+            service: RegistryService::new(registry),
+            cancel,
+        };
+
+        Ok((Arc::new(metrics), service))
+    }
+
+    /// Start the service. The service will run until the cancellation token is triggered.
+    pub async fn run(self) -> Result> {
+        let listener = TcpListener::bind(&self.addr).await?;
+        let app = Router::new()
+            .route("/metrics", get(mysten_metrics::metrics))
+            .layer(Extension(self.service));
+
+        Ok(tokio::spawn(async move {
+            info!("Starting metrics service on {}", self.addr);
+            axum::serve(listener, app)
+                .with_graceful_shutdown(async move {
+                    self.cancel.cancelled().await;
+                    info!("Shutdown received, stopping metrics service");
+                })
+                .await
+                .unwrap();
+        }))
+    }
+}
+
+impl IndexerMetrics {
+    pub fn new(registry: &Registry) -> Self {
+        Self {
+            total_ingested_checkpoints: register_int_counter_with_registry!(
+                "indexer_total_ingested_checkpoints",
+                "Total number of checkpoints fetched from the remote store",
+                registry,
+            )
+            .unwrap(),
+            total_ingested_transactions: register_int_counter_with_registry!(
+                "indexer_total_ingested_transactions",
+                "Total number of transactions fetched from the remote store",
+                registry,
+            )
+            .unwrap(),
+            total_ingested_events: register_int_counter_with_registry!(
+                "indexer_total_ingested_events",
+                "Total number of events fetched from the remote store",
+                registry,
+            )
+            .unwrap(),
+            total_ingested_inputs: register_int_counter_with_registry!(
+                "indexer_total_ingested_inputs",
+                "Total number of input objects fetched from the remote store",
+                registry,
+            )
+            .unwrap(),
+            total_ingested_outputs: register_int_counter_with_registry!(
+                "indexer_total_ingested_outputs",
+                "Total number of output objects fetched from the remote store",
+                registry,
+            )
+            .unwrap(),
+            total_ingested_bytes: register_int_counter_with_registry!(
+                "indexer_total_ingested_bytes",
+                "Total number of bytes fetched from the remote store",
+                registry,
+            )
+            .unwrap(),
+            total_ingested_transient_retries: register_int_counter_vec_with_registry!(
+                "indexer_total_ingested_retries",
+                "Total number of retries due to transient errors while fetching data from the \
+                 remote store",
+                &["reason"],
+                registry,
+            )
+            .unwrap(),
+            total_ingested_not_found_retries: register_int_counter_with_registry!(
+                "indexer_total_ingested_not_found_retries",
+                "Total number of retries due to the not found errors while fetching data from the \
+                 remote store",
+                registry,
+            )
+            .unwrap(),
+            ingested_checkpoint_latency: register_histogram_with_registry!(
+                "indexer_ingested_checkpoint_latency",
+                "Time taken to fetch a checkpoint from the remote store, including retries",
+                INGESTION_LATENCY_SEC_BUCKETS.to_vec(),
+                registry,
+            )
+            .unwrap(),
+            total_handler_checkpoints_received: register_int_counter_vec_with_registry!(
+                "indexer_total_handler_checkpoints_received",
+                "Total number of checkpoints received by this handler",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            total_handler_checkpoints_processed: register_int_counter_vec_with_registry!(
+                "indexer_total_handler_checkpoints_processed",
+                "Total number of checkpoints processed (converted into rows) by this handler",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            total_handler_rows_created: register_int_counter_vec_with_registry!(
+                "indexer_total_handler_rows_created",
+                "Total number of rows created by this handler",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            handler_checkpoint_latency: register_histogram_vec_with_registry!(
+                "indexer_handler_checkpoint_latency",
+                "Time taken to process a checkpoint by this handler",
+                &["pipeline"],
+                PROCESSING_LATENCY_SEC_BUCKETS.to_vec(),
+                registry,
+            )
+            .unwrap(),
+            total_collector_rows_received: register_int_counter_vec_with_registry!(
+                "indexer_total_collector_rows_received",
+                "Total number of rows received by this collector",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            total_collector_batches_created: register_int_counter_vec_with_registry!(
+                "indexer_total_collector_batches_created",
+                "Total number of batches created by this collector",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            total_committer_batches_attempted: register_int_counter_vec_with_registry!(
+                "indexer_total_committer_batches_attempted",
+                "Total number of batches writes attempted by this committer",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            total_committer_batches_succeeded: register_int_counter_vec_with_registry!(
+                "indexer_total_committer_batches_succeeded",
+                "Total number of successful batches writes by this committer",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            total_committer_rows_committed: register_int_counter_vec_with_registry!(
+                "indexer_total_committer_rows_committed",
+                "Total number of rows sent to the database by this committer",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            total_committer_rows_affected: register_int_counter_vec_with_registry!(
+                "indexer_total_committer_rows_affected",
+                "Total number of rows actually written to the database by this committer",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            total_watermarks_out_of_order: register_int_counter_vec_with_registry!(
+                "indexer_watermark_out_of_order",
+                "Number of times this committer encountered a batch for a checkpoint before its watermark",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            collector_gather_latency: register_histogram_vec_with_registry!(
+                "indexer_collector_gather_latency",
+                "Time taken to gather rows into a batch by this collector",
+                &["pipeline"],
+                PROCESSING_LATENCY_SEC_BUCKETS.to_vec(),
+                registry,
+            )
+            .unwrap(),
+            collector_batch_size: register_histogram_vec_with_registry!(
+                "indexer_collector_batch_size",
+                "Number of rows in a batch written to the database by this collector",
+                &["pipeline"],
+                BATCH_SIZE_BUCKETS.to_vec(),
+                registry,
+            )
+            .unwrap(),
+            committer_commit_latency: register_histogram_vec_with_registry!(
+                "indexer_committer_commit_latency",
+                "Time taken to write a batch of rows to the database by this committer",
+                &["pipeline"],
+                DB_UPDATE_LATENCY_SEC_BUCKETS.to_vec(),
+                registry,
+            )
+            .unwrap(),
+            watermark_gather_latency: register_histogram_vec_with_registry!(
+                "indexer_watermark_gather_latency",
+                "Time taken to calculate the new high watermark after a write by this committer",
+                &["pipeline"],
+                PROCESSING_LATENCY_SEC_BUCKETS.to_vec(),
+                registry,
+            )
+            .unwrap(),
+            watermark_commit_latency: register_histogram_vec_with_registry!(
+                "indexer_watermark_commit_latency",
+                "Time taken to write the new high watermark to the database by this committer",
+                &["pipeline"],
+                DB_UPDATE_LATENCY_SEC_BUCKETS.to_vec(),
+                registry,
+            )
+            .unwrap(),
+            watermark_epoch: register_int_gauge_vec_with_registry!(
+                "indexer_watermark_epoch",
+                "Current epoch high watermark for this committer",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            watermark_checkpoint: register_int_gauge_vec_with_registry!(
+                "indexer_watermark_checkpoint",
+                "Current checkpoint high watermark for this committer",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            watermark_transaction: register_int_gauge_vec_with_registry!(
+                "indexer_watermark_transaction",
+                "Current transaction high watermark for this committer",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            watermark_timestamp_ms: register_int_gauge_vec_with_registry!(
+                "indexer_watermark_timestamp_ms",
+                "Current timestamp high watermark for this committer, in milliseconds",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            watermark_epoch_in_db: register_int_gauge_vec_with_registry!(
+                "indexer_watermark_epoch_in_db",
+                "Last epoch high watermark this committer wrote to the DB",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            watermark_checkpoint_in_db: register_int_gauge_vec_with_registry!(
+                "indexer_watermark_checkpoint_in_db",
+                "Last checkpoint high watermark this committer wrote to the DB",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            watermark_transaction_in_db: register_int_gauge_vec_with_registry!(
+                "indexer_watermark_transaction_in_db",
+                "Last transaction high watermark this committer wrote to the DB",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            watermark_timestamp_in_db_ms: register_int_gauge_vec_with_registry!(
+                "indexer_watermark_timestamp_ms_in_db",
+                "Last timestamp high watermark this committer wrote to the DB, in milliseconds",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+        }
+    }
+
+    /// Register that we're retrying a checkpoint fetch due to a transient error, logging the
+    /// reason and error.
+    pub(crate) fn inc_retry(
+        &self,
+        checkpoint: u64,
+        reason: &str,
+        error: Error,
+    ) -> backoff::Error {
+        warn!(checkpoint, reason, "Retrying due to error: {error}");
+
+        self.total_ingested_transient_retries
+            .with_label_values(&[reason])
+            .inc();
+
+        backoff::Error::transient(error)
+    }
+}
+
+impl DbConnectionStatsCollector {
+    fn new(db: Db) -> Self {
+        let desc = vec![
+            (
+                MetricType::GAUGE,
+                desc(
+                    "db_connections",
+                    "Number of connections currently being managed by the pool",
+                ),
+            ),
+            (
+                MetricType::GAUGE,
+                desc(
+                    "db_idle_connections",
+                    "Number of idle connections in the pool",
+                ),
+            ),
+            (
+                MetricType::COUNTER,
+                desc("db_connect_direct", "Connections that did not have to wait"),
+            ),
+            (
+                MetricType::SUMMARY,
+                desc("db_connect_waited", "Connections that had to wait"),
+            ),
+            (
+                MetricType::COUNTER,
+                desc(
+                    "db_connect_timed_out",
+                    "Connections that timed out waiting for a connection",
+                ),
+            ),
+            (
+                MetricType::COUNTER,
+                desc(
+                    "db_connections_created",
+                    "Connections that have been created in the pool",
+                ),
+            ),
+            (
+                MetricType::COUNTER,
+                desc_with_labels(
+                    "db_connections_closed",
+                    "Total connections that were closed",
+                    &["reason"],
+                ),
+            ),
+        ];
+
+        Self { db, desc }
+    }
+}
+
+impl Collector for DbConnectionStatsCollector {
+    fn desc(&self) -> Vec<&Desc> {
+        self.desc.iter().map(|d| &d.1).collect()
+    }
+
+    fn collect(&self) -> Vec {
+        let state = self.db.state();
+        let stats = state.statistics;
+
+        vec![
+            gauge(&self.desc[0].1, state.connections as f64),
+            gauge(&self.desc[1].1, state.idle_connections as f64),
+            counter(&self.desc[2].1, stats.get_direct as f64),
+            summary(
+                &self.desc[3].1,
+                stats.get_wait_time.as_millis() as f64,
+                stats.get_waited + stats.get_timed_out,
+            ),
+            counter(&self.desc[4].1, stats.get_timed_out as f64),
+            counter(&self.desc[5].1, stats.connections_created as f64),
+            counter_with_labels(
+                &self.desc[6].1,
+                &[
+                    ("reason", "broken", stats.connections_closed_broken as f64),
+                    ("reason", "invalid", stats.connections_closed_invalid as f64),
+                    (
+                        "reason",
+                        "max_lifetime",
+                        stats.connections_closed_max_lifetime as f64,
+                    ),
+                    (
+                        "reason",
+                        "idle_timeout",
+                        stats.connections_closed_idle_timeout as f64,
+                    ),
+                ],
+            ),
+        ]
+    }
+}
+
+fn desc(name: &str, help: &str) -> Desc {
+    desc_with_labels(name, help, &[])
+}
+
+fn desc_with_labels(name: &str, help: &str, labels: &[&str]) -> Desc {
+    Desc::new(
+        name.to_string(),
+        help.to_string(),
+        labels.iter().map(|s| s.to_string()).collect(),
+        Default::default(),
+    )
+    .expect("Bad metric description")
+}
+
+fn gauge(desc: &Desc, value: f64) -> MetricFamily {
+    let mut g = Gauge::default();
+    let mut m = Metric::default();
+    let mut mf = MetricFamily::new();
+
+    g.set_value(value);
+    m.set_gauge(g);
+
+    mf.mut_metric().push(m);
+    mf.set_name(desc.fq_name.clone());
+    mf.set_help(desc.help.clone());
+    mf.set_field_type(MetricType::COUNTER);
+    mf
+}
+
+fn counter(desc: &Desc, value: f64) -> MetricFamily {
+    let mut c = Counter::default();
+    let mut m = Metric::default();
+    let mut mf = MetricFamily::new();
+
+    c.set_value(value);
+    m.set_counter(c);
+
+    mf.mut_metric().push(m);
+    mf.set_name(desc.fq_name.clone());
+    mf.set_help(desc.help.clone());
+    mf.set_field_type(MetricType::GAUGE);
+    mf
+}
+
+fn counter_with_labels(desc: &Desc, values: &[(&str, &str, f64)]) -> MetricFamily {
+    let mut mf = MetricFamily::new();
+
+    for (name, label, value) in values {
+        let mut c = Counter::default();
+        let mut l = LabelPair::default();
+        let mut m = Metric::default();
+
+        c.set_value(*value);
+        l.set_name(name.to_string());
+        l.set_value(label.to_string());
+
+        m.set_counter(c);
+        m.mut_label().push(l);
+        mf.mut_metric().push(m);
+    }
+
+    mf.set_name(desc.fq_name.clone());
+    mf.set_help(desc.help.clone());
+    mf.set_field_type(MetricType::COUNTER);
+    mf
+}
+
+fn summary(desc: &Desc, sum: f64, count: u64) -> MetricFamily {
+    let mut s = Summary::default();
+    let mut m = Metric::default();
+    let mut mf = MetricFamily::new();
+
+    s.set_sample_sum(sum);
+    s.set_sample_count(count);
+    m.set_summary(s);
+
+    mf.mut_metric().push(m);
+    mf.set_name(desc.fq_name.clone());
+    mf.set_help(desc.help.clone());
+    mf.set_field_type(MetricType::SUMMARY);
+    mf
+}
+
+#[cfg(test)]
+pub(crate) mod tests {
+    use prometheus::Registry;
+
+    use super::IndexerMetrics;
+
+    /// Construct metrics for test purposes.
+    pub fn test_metrics() -> IndexerMetrics {
+        IndexerMetrics::new(&Registry::new())
+    }
+}
diff --git a/crates/sui-indexer-alt/src/models/checkpoints.rs b/crates/sui-indexer-alt/src/models/checkpoints.rs
new file mode 100644
index 0000000000000..0a7c625eb95e6
--- /dev/null
+++ b/crates/sui-indexer-alt/src/models/checkpoints.rs
@@ -0,0 +1,26 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::schema::kv_checkpoints;
+use diesel::prelude::*;
+use sui_field_count::FieldCount;
+
+#[derive(Insertable, Debug, Clone, FieldCount)]
+#[diesel(table_name = kv_checkpoints)]
+pub struct StoredCheckpoint {
+    pub sequence_number: i64,
+    /// BCS serialized CertifiedCheckpointSummary
+    pub certified_checkpoint: Vec,
+    /// BCS serialized CheckpointContents
+    pub checkpoint_contents: Vec,
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_stored_checkpoint_field_count() {
+        assert_eq!(StoredCheckpoint::field_count(), 3);
+    }
+}
diff --git a/crates/sui-indexer-alt/src/models/events.rs b/crates/sui-indexer-alt/src/models/events.rs
new file mode 100644
index 0000000000000..ceb25d3e48a1a
--- /dev/null
+++ b/crates/sui-indexer-alt/src/models/events.rs
@@ -0,0 +1,41 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::schema::{ev_emit_mod, ev_struct_inst};
+use diesel::prelude::*;
+use sui_field_count::FieldCount;
+
+#[derive(Insertable, Debug, Clone, Eq, PartialEq, Ord, PartialOrd, FieldCount)]
+#[diesel(table_name = ev_emit_mod)]
+pub struct StoredEvEmitMod {
+    pub package: Vec,
+    pub module: String,
+    pub tx_sequence_number: i64,
+    pub sender: Vec,
+}
+
+#[derive(Insertable, Debug, Clone, Eq, PartialEq, Ord, PartialOrd, FieldCount)]
+#[diesel(table_name = ev_struct_inst)]
+pub struct StoredEvStructInst {
+    pub package: Vec,
+    pub module: String,
+    pub name: String,
+    pub instantiation: Vec,
+    pub tx_sequence_number: i64,
+    pub sender: Vec,
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_stored_event_field_count() {
+        assert_eq!(StoredEvEmitMod::field_count(), 4);
+    }
+
+    #[test]
+    fn test_stored_struct_inst_field_count() {
+        assert_eq!(StoredEvStructInst::field_count(), 6);
+    }
+}
diff --git a/crates/sui-indexer-alt/src/models/mod.rs b/crates/sui-indexer-alt/src/models/mod.rs
new file mode 100644
index 0000000000000..b20e260b29176
--- /dev/null
+++ b/crates/sui-indexer-alt/src/models/mod.rs
@@ -0,0 +1,8 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+pub mod checkpoints;
+pub mod events;
+pub mod objects;
+pub mod transactions;
+pub mod watermarks;
diff --git a/crates/sui-indexer-alt/src/models/objects.rs b/crates/sui-indexer-alt/src/models/objects.rs
new file mode 100644
index 0000000000000..46a5ac8d5a03b
--- /dev/null
+++ b/crates/sui-indexer-alt/src/models/objects.rs
@@ -0,0 +1,148 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use diesel::{
+    backend::Backend, deserialize, expression::AsExpression, prelude::*, serialize,
+    sql_types::SmallInt, FromSqlRow,
+};
+use sui_field_count::FieldCount;
+use sui_types::base_types::ObjectID;
+
+use crate::schema::{
+    kv_objects, obj_versions, sum_coin_balances, sum_obj_types, wal_coin_balances, wal_obj_types,
+};
+
+#[derive(Insertable, Debug, Clone, FieldCount)]
+#[diesel(table_name = kv_objects, primary_key(object_id, object_version))]
+pub struct StoredObject {
+    pub object_id: Vec,
+    pub object_version: i64,
+    pub serialized_object: Option>,
+}
+
+#[derive(Insertable, Debug, Clone)]
+#[diesel(table_name = obj_versions, primary_key(object_id, object_version))]
+pub struct StoredObjVersion {
+    pub object_id: Vec,
+    pub object_version: i64,
+    pub object_digest: Vec,
+    pub cp_sequence_number: i64,
+}
+
+/// An insert/update or deletion of an object record, keyed on a particular Object ID and version.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub struct StoredObjectUpdate {
+    pub object_id: ObjectID,
+    pub object_version: u64,
+    pub cp_sequence_number: u64,
+    /// `None` means the object was deleted or wrapped at this version, `Some(x)` means it was
+    /// changed to `x`.
+    pub update: Option,
+}
+
+#[derive(AsExpression, FromSqlRow, Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+#[diesel(sql_type = SmallInt)]
+#[repr(i16)]
+pub enum StoredOwnerKind {
+    Immutable = 0,
+    Address = 1,
+    Object = 2,
+    Shared = 3,
+}
+
+#[derive(Insertable, Debug, Clone, FieldCount)]
+#[diesel(table_name = sum_coin_balances, primary_key(object_id))]
+pub struct StoredSumCoinBalance {
+    pub object_id: Vec,
+    pub object_version: i64,
+    pub owner_id: Vec,
+    pub coin_type: Vec,
+    pub coin_balance: i64,
+}
+
+#[derive(Insertable, Debug, Clone, FieldCount)]
+#[diesel(table_name = sum_obj_types, primary_key(object_id))]
+pub struct StoredSumObjType {
+    pub object_id: Vec,
+    pub object_version: i64,
+    pub owner_kind: StoredOwnerKind,
+    pub owner_id: Option>,
+    pub package: Option>,
+    pub module: Option,
+    pub name: Option,
+    pub instantiation: Option>,
+}
+
+#[derive(Insertable, Debug, Clone)]
+#[diesel(table_name = wal_coin_balances, primary_key(object_id, object_version))]
+pub struct StoredWalCoinBalance {
+    pub object_id: Vec,
+    pub object_version: i64,
+    pub owner_id: Option>,
+    pub coin_type: Option>,
+    pub coin_balance: Option,
+    pub cp_sequence_number: i64,
+}
+
+#[derive(Insertable, Debug, Clone)]
+#[diesel(table_name = wal_obj_types, primary_key(object_id, object_version))]
+pub struct StoredWalObjType {
+    pub object_id: Vec,
+    pub object_version: i64,
+    pub owner_kind: Option,
+    pub owner_id: Option>,
+    pub package: Option>,
+    pub module: Option,
+    pub name: Option,
+    pub instantiation: Option>,
+    pub cp_sequence_number: i64,
+}
+
+impl serialize::ToSql for StoredOwnerKind
+where
+    i16: serialize::ToSql,
+{
+    fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, DB>) -> serialize::Result {
+        match self {
+            StoredOwnerKind::Immutable => 0.to_sql(out),
+            StoredOwnerKind::Address => 1.to_sql(out),
+            StoredOwnerKind::Object => 2.to_sql(out),
+            StoredOwnerKind::Shared => 3.to_sql(out),
+        }
+    }
+}
+
+impl deserialize::FromSql for StoredOwnerKind
+where
+    i16: deserialize::FromSql,
+{
+    fn from_sql(raw: DB::RawValue<'_>) -> deserialize::Result {
+        Ok(match i16::from_sql(raw)? {
+            0 => StoredOwnerKind::Immutable,
+            1 => StoredOwnerKind::Address,
+            2 => StoredOwnerKind::Object,
+            3 => StoredOwnerKind::Shared,
+            o => return Err(format!("Unexpected StoredOwnerKind: {o}").into()),
+        })
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_stored_owner_kind_field_count() {
+        assert_eq!(StoredObject::field_count(), 3);
+    }
+
+    #[test]
+    fn test_stored_sum_coin_balance_field_count() {
+        assert_eq!(StoredSumCoinBalance::field_count(), 5);
+    }
+
+    #[test]
+    fn test_stored_sum_obj_type_field_count() {
+        assert_eq!(StoredSumObjType::field_count(), 8);
+    }
+}
diff --git a/crates/sui-indexer-alt/src/models/transactions.rs b/crates/sui-indexer-alt/src/models/transactions.rs
new file mode 100644
index 0000000000000..3a7325c484793
--- /dev/null
+++ b/crates/sui-indexer-alt/src/models/transactions.rs
@@ -0,0 +1,69 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::schema::{kv_transactions, tx_affected_objects, tx_balance_changes};
+use diesel::prelude::*;
+use serde::{Deserialize, Serialize};
+use sui_field_count::FieldCount;
+use sui_types::object::Owner;
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub enum BalanceChange {
+    V1 {
+        /// Owner whose balance changed
+        owner: Owner,
+
+        /// Type of the Coin (just the one-time witness type).
+        coin_type: String,
+
+        /// The amount the balance changed by. A negative amount means the net flow of value is
+        /// from the owner, and a positive amount means the net flow of value is to the owner.
+        amount: i128,
+    },
+}
+
+#[derive(Insertable, Debug, Clone, FieldCount)]
+#[diesel(table_name = kv_transactions)]
+pub struct StoredTransaction {
+    pub tx_digest: Vec,
+    pub cp_sequence_number: i64,
+    pub timestamp_ms: i64,
+    pub raw_transaction: Vec,
+    pub raw_effects: Vec,
+    pub events: Vec,
+}
+
+#[derive(Insertable, Debug, Clone, FieldCount)]
+#[diesel(table_name = tx_affected_objects)]
+pub struct StoredTxAffectedObject {
+    pub tx_sequence_number: i64,
+    pub affected: Vec,
+    pub sender: Vec,
+}
+
+#[derive(Insertable, Debug, Clone, FieldCount)]
+#[diesel(table_name = tx_balance_changes)]
+pub struct StoredTxBalanceChange {
+    pub tx_sequence_number: i64,
+    pub balance_changes: Vec,
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_stored_transaction_field_count() {
+        assert_eq!(StoredTransaction::field_count(), 6);
+    }
+
+    #[test]
+    fn test_stored_tx_affected_object_field_count() {
+        assert_eq!(StoredTxAffectedObject::field_count(), 3);
+    }
+
+    #[test]
+    fn test_stored_tx_balance_change_field_count() {
+        assert_eq!(StoredTxBalanceChange::field_count(), 2);
+    }
+}
diff --git a/crates/sui-indexer-alt/src/models/watermarks.rs b/crates/sui-indexer-alt/src/models/watermarks.rs
new file mode 100644
index 0000000000000..d26d968796ba4
--- /dev/null
+++ b/crates/sui-indexer-alt/src/models/watermarks.rs
@@ -0,0 +1,117 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::borrow::Cow;
+
+use chrono::{DateTime, Utc};
+use diesel::prelude::*;
+use diesel_async::RunQueryDsl;
+use sui_field_count::FieldCount;
+
+use crate::{db::Connection, schema::watermarks};
+
+#[derive(Insertable, Debug, Clone, FieldCount)]
+#[diesel(table_name = watermarks)]
+pub struct StoredWatermark {
+    pub pipeline: String,
+    pub epoch_hi_inclusive: i64,
+    pub checkpoint_hi_inclusive: i64,
+    pub tx_hi: i64,
+    pub timestamp_ms_hi_inclusive: i64,
+    pub epoch_lo: i64,
+    pub reader_lo: i64,
+    pub pruner_timestamp_ms: i64,
+    pub pruner_hi: i64,
+}
+
+/// Fields that the committer is responsible for setting.
+#[derive(AsChangeset, Selectable, Queryable, Debug, Clone, FieldCount)]
+#[diesel(table_name = watermarks)]
+pub struct CommitterWatermark<'p> {
+    pub pipeline: Cow<'p, str>,
+    pub epoch_hi_inclusive: i64,
+    pub checkpoint_hi_inclusive: i64,
+    pub tx_hi: i64,
+    pub timestamp_ms_hi_inclusive: i64,
+}
+
+impl CommitterWatermark<'static> {
+    /// Get the current high watermark for the pipeline.
+    pub async fn get(
+        conn: &mut Connection<'_>,
+        pipeline: &'static str,
+    ) -> QueryResult> {
+        watermarks::table
+            .select(CommitterWatermark::as_select())
+            .filter(watermarks::pipeline.eq(pipeline))
+            .first(conn)
+            .await
+            .optional()
+    }
+}
+
+impl<'p> CommitterWatermark<'p> {
+    /// A new watermark with the given pipeline name indicating zero progress.
+    pub fn initial(pipeline: Cow<'p, str>) -> Self {
+        CommitterWatermark {
+            pipeline,
+            epoch_hi_inclusive: 0,
+            checkpoint_hi_inclusive: 0,
+            tx_hi: 0,
+            timestamp_ms_hi_inclusive: 0,
+        }
+    }
+
+    /// The consensus timestamp associated with this checkpoint.
+    pub fn timestamp(&self) -> DateTime {
+        DateTime::from_timestamp_millis(self.timestamp_ms_hi_inclusive).unwrap_or_default()
+    }
+
+    /// Upsert the high watermark as long as it raises the watermark stored in the database.
+    /// Returns a boolean indicating whether the watermark was actually updated or not.
+    ///
+    /// TODO(amnn): Test this (depends on supporting migrations and tempdb).
+    pub async fn update(&self, conn: &mut Connection<'_>) -> QueryResult {
+        use diesel::query_dsl::methods::FilterDsl;
+        Ok(diesel::insert_into(watermarks::table)
+            .values(StoredWatermark::from(self.clone()))
+            .on_conflict(watermarks::pipeline)
+            .do_update()
+            .set(self)
+            .filter(watermarks::checkpoint_hi_inclusive.lt(self.checkpoint_hi_inclusive))
+            .execute(conn)
+            .await?
+            > 0)
+    }
+}
+
+impl<'p> From> for StoredWatermark {
+    fn from(watermark: CommitterWatermark<'p>) -> Self {
+        StoredWatermark {
+            pipeline: watermark.pipeline.into_owned(),
+            epoch_hi_inclusive: watermark.epoch_hi_inclusive,
+            checkpoint_hi_inclusive: watermark.checkpoint_hi_inclusive,
+            tx_hi: watermark.tx_hi,
+            timestamp_ms_hi_inclusive: watermark.timestamp_ms_hi_inclusive,
+            epoch_lo: 0,
+            reader_lo: 0,
+            pruner_timestamp_ms: 0,
+            pruner_hi: 0,
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_stored_watermark_field_count() {
+        assert_eq!(StoredWatermark::field_count(), 9);
+    }
+
+    #[test]
+    fn test_committer_watermark_field_count() {
+        assert_eq!(CommitterWatermark::<'static>::field_count(), 5);
+    }
+}
diff --git a/crates/sui-indexer-alt/src/pipeline/concurrent/collector.rs b/crates/sui-indexer-alt/src/pipeline/concurrent/collector.rs
new file mode 100644
index 0000000000000..1bf3459d6817a
--- /dev/null
+++ b/crates/sui-indexer-alt/src/pipeline/concurrent/collector.rs
@@ -0,0 +1,181 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{collections::BTreeMap, sync::Arc};
+
+use mysten_metrics::spawn_monitored_task;
+use tokio::{
+    sync::mpsc,
+    task::JoinHandle,
+    time::{interval, MissedTickBehavior},
+};
+use tokio_util::sync::CancellationToken;
+use tracing::{debug, info};
+
+use crate::{
+    metrics::IndexerMetrics,
+    pipeline::{Indexed, PipelineConfig, WatermarkPart},
+};
+
+use super::{Batched, Handler};
+
+/// Processed values that are waiting to be written to the database. This is an internal type used
+/// by the concurrent collector to hold data it is waiting to send to the committer.
+struct Pending {
+    /// Values to be inserted into the database from this checkpoint
+    values: Vec,
+    /// The watermark associated with this checkpoint and the part of it that is left to commit
+    watermark: WatermarkPart,
+}
+
+impl Pending {
+    /// Whether there are values left to commit from this indexed checkpoint.
+    fn is_empty(&self) -> bool {
+        debug_assert!(self.watermark.batch_rows == 0);
+        self.values.is_empty()
+    }
+
+    /// Adds data from this indexed checkpoint to the `batch`, honoring the handler's bounds on
+    /// chunk size.
+    fn batch_into(&mut self, batch: &mut Batched) {
+        if batch.values.len() + self.values.len() > H::MAX_CHUNK_ROWS {
+            let mut for_batch = self
+                .values
+                .split_off(H::MAX_CHUNK_ROWS - batch.values.len());
+
+            std::mem::swap(&mut self.values, &mut for_batch);
+            batch.watermark.push(self.watermark.take(for_batch.len()));
+            batch.values.extend(for_batch);
+        } else {
+            batch.watermark.push(self.watermark.take(self.values.len()));
+            batch.values.extend(std::mem::take(&mut self.values));
+        }
+    }
+}
+
+impl From> for Pending {
+    fn from(indexed: Indexed) -> Self {
+        Self {
+            watermark: WatermarkPart {
+                watermark: indexed.watermark,
+                batch_rows: indexed.values.len(),
+                total_rows: indexed.values.len(),
+            },
+            values: indexed.values,
+        }
+    }
+}
+
+/// The collector task is responsible for gathering rows into batches which it then sends to a
+/// committer task to write to the database. The task publishes batches in the following
+/// circumstances:
+///
+/// - If `H::BATCH_SIZE` rows are pending, it will immediately schedule a batch to be gathered.
+///
+/// - If after sending one batch there is more data to be sent, it will immediately schedule the
+///   next batch to be gathered (Each batch will contain at most `H::CHUNK_SIZ` rows).
+///
+/// - Otherwise, it will check for any data to write out at a regular interval (controlled by
+///   `config.collect_interval`).
+///
+/// This task will shutdown if canceled via the `cancel` token, or if any of its channels are
+/// closed.
+pub(super) fn collector(
+    config: PipelineConfig,
+    mut rx: mpsc::Receiver>,
+    tx: mpsc::Sender>,
+    metrics: Arc,
+    cancel: CancellationToken,
+) -> JoinHandle<()> {
+    spawn_monitored_task!(async move {
+        // The `poll` interval controls the maximum time to wait between collecting batches,
+        // regardless of number of rows pending.
+        let mut poll = interval(config.collect_interval);
+        poll.set_missed_tick_behavior(MissedTickBehavior::Delay);
+
+        // Data for checkpoints that haven't been written yet.
+        let mut pending: BTreeMap> = BTreeMap::new();
+        let mut pending_rows = 0;
+
+        info!(pipeline = H::NAME, "Starting collector");
+
+        loop {
+            tokio::select! {
+                _ = cancel.cancelled() => {
+                    info!(pipeline = H::NAME, "Shutdown received, stopping collector");
+                    break;
+                }
+
+                // Time to create another batch and push it to the committer.
+                _ = poll.tick() => {
+                    let guard = metrics
+                        .collector_gather_latency
+                        .with_label_values(&[H::NAME])
+                        .start_timer();
+
+                    let mut batch = Batched::new();
+                    while !batch.is_full() {
+                        let Some(mut entry) = pending.first_entry() else {
+                            break;
+                        };
+
+                        let indexed = entry.get_mut();
+                        indexed.batch_into(&mut batch);
+                        if indexed.is_empty() {
+                            entry.remove();
+                        }
+                    }
+
+                    pending_rows -= batch.len();
+                    let elapsed = guard.stop_and_record();
+                    debug!(
+                        pipeline = H::NAME,
+                        elapsed_ms = elapsed * 1000.0,
+                        rows = batch.len(),
+                        pending = pending_rows,
+                        "Gathered batch",
+                    );
+
+                    metrics
+                        .total_collector_batches_created
+                        .with_label_values(&[H::NAME])
+                        .inc();
+
+                    metrics
+                        .collector_batch_size
+                        .with_label_values(&[H::NAME])
+                        .observe(batch.len() as f64);
+
+                    if tx.send(batch).await.is_err() {
+                        info!(pipeline = H::NAME, "Committer closed channel, stopping collector");
+                        break;
+                    }
+
+                    if pending_rows > 0 {
+                        poll.reset_immediately();
+                    } else if rx.is_closed() && rx.is_empty() {
+                        info!(
+                            pipeline = H::NAME,
+                            "Processor closed channel, pending rows empty, stopping collector",
+                        );
+                        break;
+                    }
+                }
+
+                Some(indexed) = rx.recv(), if pending_rows < H::MAX_PENDING_ROWS => {
+                    metrics
+                        .total_collector_rows_received
+                        .with_label_values(&[H::NAME])
+                        .inc_by(indexed.values.len() as u64);
+
+                    pending_rows += indexed.values.len();
+                    pending.insert(indexed.checkpoint(), indexed.into());
+
+                    if pending_rows >= H::MIN_EAGER_ROWS {
+                        poll.reset_immediately()
+                    }
+                }
+            }
+        }
+    })
+}
diff --git a/crates/sui-indexer-alt/src/pipeline/concurrent/committer.rs b/crates/sui-indexer-alt/src/pipeline/concurrent/committer.rs
new file mode 100644
index 0000000000000..0a6ba850cc24b
--- /dev/null
+++ b/crates/sui-indexer-alt/src/pipeline/concurrent/committer.rs
@@ -0,0 +1,182 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{sync::Arc, time::Duration};
+
+use backoff::ExponentialBackoff;
+use futures::TryStreamExt;
+use mysten_metrics::spawn_monitored_task;
+use tokio::{sync::mpsc, task::JoinHandle};
+use tokio_stream::{wrappers::ReceiverStream, StreamExt};
+use tokio_util::sync::CancellationToken;
+use tracing::{debug, error, info, warn};
+
+use crate::{
+    db::Db,
+    metrics::IndexerMetrics,
+    pipeline::{Break, PipelineConfig, WatermarkPart},
+};
+
+use super::{Batched, Handler};
+
+/// If the committer needs to retry a commit, it will wait this long initially.
+const INITIAL_RETRY_INTERVAL: Duration = Duration::from_millis(100);
+
+/// If the commiter needs to retry a commit, it will wait at most this long between retries.
+const MAX_RETRY_INTERVAL: Duration = Duration::from_secs(1);
+
+/// The committer task is responsible for writing batches of rows to the database. It receives
+/// batches on `rx` and writes them out to the `db` concurrently (`config.write_concurrency`
+/// controls the degree of fan-out).
+///
+/// The writing of each batch will be repeatedly retried on an exponential back-off until it
+/// succeeds. Once the write succeeds, the [WatermarkPart]s for that batch are sent on `tx` to the
+/// watermark task.
+///
+/// This task will shutdown via its `cancel`lation token, or if its receiver or sender channels are
+/// closed.
+pub(super) fn committer(
+    config: PipelineConfig,
+    rx: mpsc::Receiver>,
+    tx: mpsc::Sender>,
+    db: Db,
+    metrics: Arc,
+    cancel: CancellationToken,
+) -> JoinHandle<()> {
+    spawn_monitored_task!(async move {
+        info!(pipeline = H::NAME, "Starting committer");
+
+        match ReceiverStream::new(rx)
+            .map(Ok)
+            .try_for_each_concurrent(config.write_concurrency, |Batched { values, watermark }| {
+                let values = Arc::new(values);
+                let tx = tx.clone();
+                let db = db.clone();
+                let metrics = metrics.clone();
+                let cancel = cancel.clone();
+
+                // Repeatedly try to get a connection to the DB and write the batch. Use an
+                // exponential backoff in case the failure is due to contention over the DB
+                // connection pool.
+                let backoff = ExponentialBackoff {
+                    initial_interval: INITIAL_RETRY_INTERVAL,
+                    current_interval: INITIAL_RETRY_INTERVAL,
+                    max_interval: MAX_RETRY_INTERVAL,
+                    max_elapsed_time: None,
+                    ..Default::default()
+                };
+
+                use backoff::Error as BE;
+                let commit = move || {
+                    let values = values.clone();
+                    let db = db.clone();
+                    let metrics = metrics.clone();
+                    async move {
+                        metrics
+                            .total_committer_batches_attempted
+                            .with_label_values(&[H::NAME])
+                            .inc();
+
+                        let affected = if values.is_empty() {
+                            0
+                        } else {
+                            let guard = metrics
+                                .committer_commit_latency
+                                .with_label_values(&[H::NAME])
+                                .start_timer();
+
+                            let mut conn = db.connect().await.map_err(|e| {
+                                warn!(
+                                    pipeline = H::NAME,
+                                    "Committed failed to get connection for DB"
+                                );
+                                BE::transient(Break::Err(e.into()))
+                            })?;
+
+                            let affected = H::commit(values.as_slice(), &mut conn).await;
+                            let elapsed = guard.stop_and_record();
+
+                            match affected {
+                                Ok(affected) => {
+                                    debug!(
+                                        pipeline = H::NAME,
+                                        elapsed_ms = elapsed * 1000.0,
+                                        affected,
+                                        committed = values.len(),
+                                        "Wrote batch",
+                                    );
+
+                                    affected
+                                }
+
+                                Err(e) => {
+                                    warn!(
+                                        pipeline = H::NAME,
+                                        elapsed_ms = elapsed * 1000.0,
+                                        committed = values.len(),
+                                        "Error writing batch: {e}",
+                                    );
+
+                                    return Err(BE::transient(Break::Err(e)));
+                                }
+                            }
+                        };
+
+                        metrics
+                            .total_committer_batches_succeeded
+                            .with_label_values(&[H::NAME])
+                            .inc();
+
+                        metrics
+                            .total_committer_rows_committed
+                            .with_label_values(&[H::NAME])
+                            .inc_by(values.len() as u64);
+
+                        metrics
+                            .total_committer_rows_affected
+                            .with_label_values(&[H::NAME])
+                            .inc_by(affected as u64);
+
+                        Ok(())
+                    }
+                };
+
+                async move {
+                    tokio::select! {
+                        _ = cancel.cancelled() => {
+                            return Err(Break::Cancel);
+                        }
+
+                        // Double check that the commit actually went through, (this backoff should
+                        // not produce any permanent errors, but if it does, we need to shutdown
+                        // the pipeline).
+                        commit = backoff::future::retry(backoff, commit) => {
+                            let () = commit?;
+                        }
+                    };
+
+                    if !config.skip_watermark && tx.send(watermark).await.is_err() {
+                        info!(pipeline = H::NAME, "Watermark closed channel");
+                        return Err(Break::Cancel);
+                    }
+
+                    Ok(())
+                }
+            })
+            .await
+        {
+            Ok(()) => {
+                info!(pipeline = H::NAME, "Batches done, stopping committer");
+            }
+
+            Err(Break::Cancel) => {
+                info!(pipeline = H::NAME, "Shutdown received, stopping committer");
+            }
+
+            Err(Break::Err(e)) => {
+                error!(pipeline = H::NAME, "Error from committer: {e}");
+                cancel.cancel();
+            }
+        }
+    })
+}
diff --git a/crates/sui-indexer-alt/src/pipeline/concurrent/mod.rs b/crates/sui-indexer-alt/src/pipeline/concurrent/mod.rs
new file mode 100644
index 0000000000000..3c3f91f0f89da
--- /dev/null
+++ b/crates/sui-indexer-alt/src/pipeline/concurrent/mod.rs
@@ -0,0 +1,153 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use sui_types::full_checkpoint_content::CheckpointData;
+use tokio::{sync::mpsc, task::JoinHandle};
+use tokio_util::sync::CancellationToken;
+
+use crate::{
+    db::{self, Db},
+    metrics::IndexerMetrics,
+    models::watermarks::CommitterWatermark,
+};
+
+use super::{processor::processor, PipelineConfig, Processor, WatermarkPart, PIPELINE_BUFFER};
+
+use self::{collector::collector, committer::committer, watermark::watermark};
+
+mod collector;
+mod committer;
+mod watermark;
+
+/// The maximum number of watermarks that can show up in a single batch. This limit exists to deal
+/// with pipelines that produce no data for a majority of checkpoints -- the size of these
+/// pipeline's batches will be dominated by watermark updates.
+const MAX_WATERMARK_UPDATES: usize = 10_000;
+
+/// Handlers implement the logic for a given indexing pipeline: How to process checkpoint data (by
+/// implementing [Processor]) into rows for their table, and how to write those rows to the database.
+///
+/// The handler is also responsible for tuning the various parameters of the pipeline (provided as
+/// associated values). Reasonable defaults have been chosen to balance concurrency with memory
+/// usage, but each handle may choose to override these defaults, e.g.
+///
+/// - Handlers that produce many small rows may wish to increase their batch/chunk/max-pending
+///   sizes).
+/// - Handlers that do more work during processing may wish to increase their fanout so more of it
+///   can be done concurrently, to preserve throughput.
+///
+/// Concurrent handlers can only be used in concurrent pipelines, where checkpoint data is
+/// processed and committed out-of-order and a watermark table is kept up-to-date with the latest
+/// checkpoint below which all data has been committed.
+///
+/// Back-pressure is handled through the `MAX_PENDING_SIZE` constant -- if more than this many rows
+/// build up, the collector will stop accepting new checkpoints, which will eventually propagate
+/// back to the ingestion service.
+#[async_trait::async_trait]
+pub trait Handler: Processor {
+    /// If at least this many rows are pending, the committer will commit them eagerly.
+    const MIN_EAGER_ROWS: usize = 50;
+
+    /// If there are more than this many rows pending, the committer will only commit this many in
+    /// one operation.
+    const MAX_CHUNK_ROWS: usize = 200;
+
+    /// If there are more than this many rows pending, the committer applies backpressure.
+    const MAX_PENDING_ROWS: usize = 1000;
+
+    /// Take a chunk of values and commit them to the database, returning the number of rows
+    /// affected.
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>)
+        -> anyhow::Result;
+}
+
+/// Values ready to be written to the database. This is an internal type used to communicate
+/// between the collector and the committer parts of the pipeline.
+struct Batched {
+    /// The rows to write
+    values: Vec,
+    /// Proportions of all the watermarks that are represented in this chunk
+    watermark: Vec,
+}
+
+impl Batched {
+    fn new() -> Self {
+        Self {
+            values: vec![],
+            watermark: vec![],
+        }
+    }
+
+    /// Number of rows in this batch.
+    fn len(&self) -> usize {
+        self.values.len()
+    }
+
+    /// The batch is full if it has more than enough values to write to the database, or more than
+    /// enough watermarks to update.
+    fn is_full(&self) -> bool {
+        self.values.len() >= H::MAX_CHUNK_ROWS || self.watermark.len() >= MAX_WATERMARK_UPDATES
+    }
+}
+
+/// Start a new concurrent (out-of-order) indexing pipeline served by the handler, `H`. Starting
+/// strictly after the `watermark` (or from the beginning if no watermark was provided).
+///
+/// Each pipeline consists of a processor task which takes checkpoint data and breaks it down into
+/// rows, ready for insertion, a collector which batches those rows into an appropriate size for
+/// the database, a committer which writes the rows out concurrently, and a watermark task to
+/// update the high watermark.
+///
+/// Committing is performed out-of-order: the pipeline may write out checkpoints out-of-order,
+/// either because it received the checkpoints out-of-order or because of variance in processing
+/// time.
+///
+/// The pipeline also maintains a row in the `watermarks` table for the pipeline which tracks the
+/// watermark below which all data has been committed (modulo pruning).
+///
+/// Checkpoint data is fed into the pipeline through the `checkpoint_rx` channel, and internal
+/// channels are created to communicate between its various components. The pipeline can be
+/// shutdown using its `cancel` token, and will also shutdown if any of its independent tasks
+/// reports an issue.
+pub(crate) fn pipeline(
+    initial_watermark: Option>,
+    config: PipelineConfig,
+    db: Db,
+    checkpoint_rx: mpsc::Receiver>,
+    metrics: Arc,
+    cancel: CancellationToken,
+) -> (
+    JoinHandle<()>,
+    JoinHandle<()>,
+    JoinHandle<()>,
+    JoinHandle<()>,
+) {
+    let (processor_tx, collector_rx) = mpsc::channel(H::FANOUT + PIPELINE_BUFFER);
+    let (collector_tx, committer_rx) = mpsc::channel(config.write_concurrency + PIPELINE_BUFFER);
+    let (committer_tx, watermark_rx) = mpsc::channel(config.write_concurrency + PIPELINE_BUFFER);
+
+    let processor = processor::(checkpoint_rx, processor_tx, metrics.clone(), cancel.clone());
+
+    let collector = collector::(
+        config.clone(),
+        collector_rx,
+        collector_tx,
+        metrics.clone(),
+        cancel.clone(),
+    );
+
+    let committer = committer::(
+        config.clone(),
+        committer_rx,
+        committer_tx,
+        db.clone(),
+        metrics.clone(),
+        cancel.clone(),
+    );
+
+    let watermark = watermark::(initial_watermark, config, watermark_rx, db, metrics, cancel);
+
+    (processor, collector, committer, watermark)
+}
diff --git a/crates/sui-indexer-alt/src/pipeline/concurrent/watermark.rs b/crates/sui-indexer-alt/src/pipeline/concurrent/watermark.rs
new file mode 100644
index 0000000000000..073bbe298ea61
--- /dev/null
+++ b/crates/sui-indexer-alt/src/pipeline/concurrent/watermark.rs
@@ -0,0 +1,278 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{
+    cmp::Ordering,
+    collections::{btree_map::Entry, BTreeMap},
+    sync::Arc,
+};
+
+use mysten_metrics::spawn_monitored_task;
+use tokio::{
+    sync::mpsc,
+    task::JoinHandle,
+    time::{interval, MissedTickBehavior},
+};
+use tokio_util::sync::CancellationToken;
+use tracing::{debug, error, info, warn};
+
+use crate::{
+    db::Db,
+    metrics::IndexerMetrics,
+    models::watermarks::CommitterWatermark,
+    pipeline::{
+        PipelineConfig, WatermarkPart, LOUD_WATERMARK_UPDATE_INTERVAL, WARN_PENDING_WATERMARKS,
+    },
+};
+
+use super::Handler;
+
+/// The watermark task is responsible for keeping track of a pipeline's out-of-order commits and
+/// updating its row in the `watermarks` table when a continuous run of checkpoints have landed
+/// since the last watermark update.
+///
+/// It receives watermark "parts" that detail the proportion of each checkpoint's data that has
+/// been written out by the committer and periodically (on a configurable interval) checks if the
+/// watermark for the pipeline can be pushed forward. The watermark can be pushed forward if there
+/// is one or more complete (all data for that checkpoint written out) watermarks spanning
+/// contiguously from the current high watermark into the future.
+///
+/// If it detects that more than [WARN_PENDING_WATERMARKS] watermarks have built up, it will issue
+/// a warning, as this could be the indication of a memory leak, and the caller probably intended
+/// to run the indexer with watermarking disabled (e.g. if they are running a backfill).
+///
+/// The task regularly traces its progress, outputting at a higher log level every
+/// [LOUD_WATERMARK_UPDATE_INTERVAL]-many checkpoints.
+///
+/// The task will shutdown if the `cancel` token is signalled, or if the `rx` channel closes and
+/// the watermark cannot be progressed. If the `config` specifies `skip_watermark`, the task will
+/// shutdown immediately.
+pub(super) fn watermark(
+    initial_watermark: Option>,
+    config: PipelineConfig,
+    mut rx: mpsc::Receiver>,
+    db: Db,
+    metrics: Arc,
+    cancel: CancellationToken,
+) -> JoinHandle<()> {
+    spawn_monitored_task!(async move {
+        if config.skip_watermark {
+            info!(pipeline = H::NAME, "Skipping watermark task");
+            return;
+        }
+
+        let mut poll = interval(config.watermark_interval);
+        poll.set_missed_tick_behavior(MissedTickBehavior::Delay);
+
+        // To correctly update the watermark, the task tracks the watermark it last tried to write
+        // and the watermark parts for any checkpoints that have been written since then
+        // ("pre-committed"). After each batch is written, the task will try to progress the
+        // watermark as much as possible without going over any holes in the sequence of
+        // checkpoints (entirely missing watermarks, or incomplete watermarks).
+        let mut precommitted: BTreeMap = BTreeMap::new();
+        let (mut watermark, mut next_checkpoint) = if let Some(watermark) = initial_watermark {
+            let next = watermark.checkpoint_hi_inclusive + 1;
+            (watermark, next)
+        } else {
+            (CommitterWatermark::initial(H::NAME.into()), 0)
+        };
+
+        // The watermark task will periodically output a log message at a higher log level to
+        // demonstrate that the pipeline is making progress.
+        let mut next_loud_watermark_update =
+            watermark.checkpoint_hi_inclusive + LOUD_WATERMARK_UPDATE_INTERVAL;
+
+        info!(pipeline = H::NAME, ?watermark, "Starting watermark");
+
+        loop {
+            tokio::select! {
+                _ = cancel.cancelled() => {
+                    info!(pipeline = H::NAME, "Shutdown received");
+                    break;
+                }
+
+                _ = poll.tick() => {
+                    if precommitted.len() > WARN_PENDING_WATERMARKS {
+                        warn!(
+                            pipeline = H::NAME,
+                            pending = precommitted.len(),
+                            "Pipeline has a large number of pending watermarks",
+                        );
+                    }
+
+                    let Ok(mut conn) = db.connect().await else {
+                        warn!(pipeline = H::NAME, "Committer failed to get connection for DB");
+                        continue;
+                    };
+
+                    // Check if the pipeline's watermark needs to be updated
+                    let guard = metrics
+                        .watermark_gather_latency
+                        .with_label_values(&[H::NAME])
+                        .start_timer();
+
+                    let mut watermark_needs_update = false;
+                    while let Some(pending) = precommitted.first_entry() {
+                        let part = pending.get();
+
+                        // Some rows from the next watermark have not landed yet.
+                        if !part.is_complete() {
+                            break;
+                        }
+
+                        match next_checkpoint.cmp(&part.watermark.checkpoint_hi_inclusive) {
+                            // Next pending checkpoint is from the future.
+                            Ordering::Less => break,
+
+                            // This is the next checkpoint -- include it.
+                            Ordering::Equal => {
+                                watermark = pending.remove().watermark;
+                                watermark_needs_update = true;
+                                next_checkpoint += 1;
+                            }
+
+                            // Next pending checkpoint is in the past. Out of order watermarks can
+                            // be encountered when a pipeline is starting up, because ingestion
+                            // must start at the lowest checkpoint across all pipelines, or because
+                            // of a backfill, where the initial checkpoint has been overridden.
+                            Ordering::Greater => {
+                                // Track how many we see to make sure it doesn't grow without
+                                // bound.
+                                metrics
+                                    .total_watermarks_out_of_order
+                                    .with_label_values(&[H::NAME])
+                                    .inc();
+
+                                pending.remove();
+                            }
+                        }
+                    }
+
+                    let elapsed = guard.stop_and_record();
+
+                    metrics
+                        .watermark_epoch
+                        .with_label_values(&[H::NAME])
+                        .set(watermark.epoch_hi_inclusive);
+
+                    metrics
+                        .watermark_checkpoint
+                        .with_label_values(&[H::NAME])
+                        .set(watermark.checkpoint_hi_inclusive);
+
+                    metrics
+                        .watermark_transaction
+                        .with_label_values(&[H::NAME])
+                        .set(watermark.tx_hi);
+
+                    metrics
+                        .watermark_timestamp_ms
+                        .with_label_values(&[H::NAME])
+                        .set(watermark.timestamp_ms_hi_inclusive);
+
+                    debug!(
+                        pipeline = H::NAME,
+                        elapsed_ms = elapsed * 1000.0,
+                        watermark = watermark.checkpoint_hi_inclusive,
+                        timestamp = %watermark.timestamp(),
+                        pending = precommitted.len(),
+                        "Gathered watermarks",
+                    );
+
+                    if watermark_needs_update {
+                        let guard = metrics
+                            .watermark_commit_latency
+                            .with_label_values(&[H::NAME])
+                            .start_timer();
+
+                        match watermark.update(&mut conn).await {
+                            // If there's an issue updating the watermark, log it but keep going,
+                            // it's OK for the watermark to lag from a correctness perspective.
+                            Err(e) => {
+                                let elapsed = guard.stop_and_record();
+                                error!(
+                                    pipeline = H::NAME,
+                                    elapsed_ms = elapsed * 1000.0,
+                                    ?watermark,
+                                    "Error updating watermark: {e}",
+                                );
+                            }
+
+                            Ok(updated) => {
+                                let elapsed = guard.stop_and_record();
+
+                                if updated {
+                                    metrics
+                                        .watermark_epoch_in_db
+                                        .with_label_values(&[H::NAME])
+                                        .set(watermark.epoch_hi_inclusive);
+
+                                    metrics
+                                        .watermark_checkpoint_in_db
+                                        .with_label_values(&[H::NAME])
+                                        .set(watermark.checkpoint_hi_inclusive);
+
+                                    metrics
+                                        .watermark_transaction_in_db
+                                        .with_label_values(&[H::NAME])
+                                        .set(watermark.tx_hi);
+
+                                    metrics
+                                        .watermark_timestamp_in_db_ms
+                                        .with_label_values(&[H::NAME])
+                                        .set(watermark.timestamp_ms_hi_inclusive);
+                                }
+
+                                if watermark.checkpoint_hi_inclusive > next_loud_watermark_update {
+                                    next_loud_watermark_update += LOUD_WATERMARK_UPDATE_INTERVAL;
+                                    info!(
+                                        pipeline = H::NAME,
+                                        epoch = watermark.epoch_hi_inclusive,
+                                        checkpoint = watermark.checkpoint_hi_inclusive,
+                                        transaction = watermark.tx_hi,
+                                        timestamp = %watermark.timestamp(),
+                                        updated,
+                                        elapsed_ms = elapsed * 1000.0,
+                                        "Watermark",
+                                    );
+                                } else {
+                                    debug!(
+                                        pipeline = H::NAME,
+                                        epoch = watermark.epoch_hi_inclusive,
+                                        checkpoint = watermark.checkpoint_hi_inclusive,
+                                        transaction = watermark.tx_hi,
+                                        timestamp = %watermark.timestamp(),
+                                        updated,
+                                        elapsed_ms = elapsed * 1000.0,
+                                        "Watermark",
+                                    );
+                                }
+                            }
+                        }
+                    }
+
+                    if rx.is_closed() && rx.is_empty() {
+                        info!(pipeline = H::NAME, "Committer closed channel");
+                        break;
+                    }
+                }
+
+                Some(parts) = rx.recv() => {
+                    for part in parts {
+                        match precommitted.entry(part.checkpoint()) {
+                            Entry::Vacant(entry) => {
+                                entry.insert(part);
+                            }
+
+                            Entry::Occupied(mut entry) => {
+                                entry.get_mut().add(part);
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        info!(pipeline = H::NAME, ?watermark, "Stopping watermark task");
+    })
+}
diff --git a/crates/sui-indexer-alt/src/pipeline/mod.rs b/crates/sui-indexer-alt/src/pipeline/mod.rs
new file mode 100644
index 0000000000000..56005e6ba138e
--- /dev/null
+++ b/crates/sui-indexer-alt/src/pipeline/mod.rs
@@ -0,0 +1,152 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::time::Duration;
+
+use crate::models::watermarks::CommitterWatermark;
+
+pub use processor::Processor;
+
+pub(crate) mod concurrent;
+mod processor;
+pub(crate) mod sequential;
+
+/// Tracing message for the watermark update will be logged at info level at least this many
+/// checkpoints.
+const LOUD_WATERMARK_UPDATE_INTERVAL: i64 = 5 * 10;
+
+/// Extra buffer added to channels between tasks in a pipeline. There does not need to be a huge
+/// capacity here because tasks already buffer rows to insert internally.
+const PIPELINE_BUFFER: usize = 5;
+
+/// Issue a warning every time the number of pending watermarks exceeds this number. This can
+/// happen if the pipeline was started with its initial checkpoint overridden to be strictly
+/// greater than its current watermark -- in that case, the pipeline will never be able to update
+/// its watermarks.
+///
+/// This may be a legitimate thing to do when backfilling a table, but in that case
+/// `--skip-watermarks` should be used.
+const WARN_PENDING_WATERMARKS: usize = 10000;
+
+#[derive(clap::Args, Debug, Clone)]
+pub struct PipelineConfig {
+    /// Number of concurrent writers per pipeline
+    #[arg(long, default_value_t = 5)]
+    write_concurrency: usize,
+
+    /// The collector will check for pending data at least this often
+    #[arg(
+        long,
+        default_value = "500",
+        value_name = "MILLISECONDS",
+        value_parser = |s: &str| s.parse().map(Duration::from_millis),
+    )]
+    collect_interval: Duration,
+
+    /// Watermark task will check for pending watermarks this often
+    #[arg(
+        long,
+        default_value = "500",
+        value_name = "MILLISECONDS",
+        value_parser = |s: &str| s.parse().map(Duration::from_millis),
+    )]
+    watermark_interval: Duration,
+
+    /// Avoid writing to the watermark table
+    #[arg(long)]
+    skip_watermark: bool,
+}
+
+/// Processed values associated with a single checkpoint. This is an internal type used to
+/// communicate between the processor and the collector parts of the pipeline.
+struct Indexed {
+    /// Values to be inserted into the database from this checkpoint
+    values: Vec,
+    /// The watermark associated with this checkpoint
+    watermark: CommitterWatermark<'static>,
+}
+
+/// A representation of the proportion of a watermark.
+#[derive(Debug)]
+struct WatermarkPart {
+    /// The watermark itself
+    watermark: CommitterWatermark<'static>,
+    /// The number of rows from this watermark that are in this part
+    batch_rows: usize,
+    /// The total number of rows from this watermark
+    total_rows: usize,
+}
+
+/// Internal type used by workers to propagate errors or shutdown signals up to their
+/// supervisor.
+#[derive(thiserror::Error, Debug)]
+enum Break {
+    #[error("Shutdown received")]
+    Cancel,
+
+    #[error(transparent)]
+    Err(#[from] anyhow::Error),
+}
+
+impl Indexed

{ + fn new( + epoch: u64, + cp_sequence_number: u64, + tx_hi: u64, + timestamp_ms: u64, + values: Vec, + ) -> Self { + Self { + watermark: CommitterWatermark { + pipeline: P::NAME.into(), + epoch_hi_inclusive: epoch as i64, + checkpoint_hi_inclusive: cp_sequence_number as i64, + tx_hi: tx_hi as i64, + timestamp_ms_hi_inclusive: timestamp_ms as i64, + }, + values, + } + } + + /// Number of rows from this checkpoint + fn len(&self) -> usize { + self.values.len() + } + + /// The checkpoint sequence number that this data is from + fn checkpoint(&self) -> u64 { + self.watermark.checkpoint_hi_inclusive as u64 + } +} + +impl WatermarkPart { + fn checkpoint(&self) -> u64 { + self.watermark.checkpoint_hi_inclusive as u64 + } + + /// Check if all the rows from this watermark are represented in this part. + fn is_complete(&self) -> bool { + self.batch_rows == self.total_rows + } + + /// Add the rows from `other` to this part. + fn add(&mut self, other: WatermarkPart) { + debug_assert_eq!(self.checkpoint(), other.checkpoint()); + self.batch_rows += other.batch_rows; + } + + /// Record that `rows` have been taken from this part. + fn take(&mut self, rows: usize) -> WatermarkPart { + debug_assert!( + self.batch_rows >= rows, + "Can't take more rows than are available" + ); + + self.batch_rows -= rows; + WatermarkPart { + watermark: self.watermark.clone(), + batch_rows: rows, + total_rows: self.total_rows, + } + } +} diff --git a/crates/sui-indexer-alt/src/pipeline/processor.rs b/crates/sui-indexer-alt/src/pipeline/processor.rs new file mode 100644 index 0000000000000..ce5f91194a536 --- /dev/null +++ b/crates/sui-indexer-alt/src/pipeline/processor.rs @@ -0,0 +1,128 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::sync::Arc; + +use futures::TryStreamExt; +use mysten_metrics::spawn_monitored_task; +use sui_types::full_checkpoint_content::CheckpointData; +use tokio::{sync::mpsc, task::JoinHandle}; +use tokio_stream::{wrappers::ReceiverStream, StreamExt}; +use tokio_util::sync::CancellationToken; +use tracing::{debug, error, info}; + +use crate::{metrics::IndexerMetrics, pipeline::Break}; + +use super::Indexed; + +/// Implementors of this trait are responsible for transforming checkpoint into rows for their +/// table. The `FANOUT` associated value controls how many concurrent workers will be used to +/// process checkpoint information. +pub trait Processor { + /// Used to identify the pipeline in logs and metrics. + const NAME: &'static str; + + /// How much concurrency to use when processing checkpoint data. + const FANOUT: usize = 10; + + /// The type of value being inserted by the handler. + type Value: Send + Sync + 'static; + + /// The processing logic for turning a checkpoint into rows of the table. + fn process(checkpoint: &Arc) -> anyhow::Result>; +} + +/// The processor task is responsible for taking checkpoint data and breaking it down into rows +/// ready to commit. It spins up a supervisor that waits on the `rx` channel for checkpoints, and +/// distributes them among `H::FANOUT` workers. +/// +/// Each worker processes a checkpoint into rows and sends them on to the committer using the `tx` +/// channel. +/// +/// The task will shutdown if the `cancel` token is cancelled, or if any of the workers encounters +/// an error -- there is no retry logic at this level. +pub(super) fn processor( + rx: mpsc::Receiver>, + tx: mpsc::Sender>, + metrics: Arc, + cancel: CancellationToken, +) -> JoinHandle<()> { + spawn_monitored_task!(async move { + info!(pipeline = P::NAME, "Starting processor"); + + match ReceiverStream::new(rx) + .map(Ok) + .try_for_each_concurrent(P::FANOUT, |checkpoint| { + let tx = tx.clone(); + let metrics = metrics.clone(); + let cancel = cancel.clone(); + async move { + if cancel.is_cancelled() { + return Err(Break::Cancel); + } + + metrics + .total_handler_checkpoints_received + .with_label_values(&[P::NAME]) + .inc(); + + let guard = metrics + .handler_checkpoint_latency + .with_label_values(&[P::NAME]) + .start_timer(); + + let values = P::process(&checkpoint)?; + let elapsed = guard.stop_and_record(); + + let epoch = checkpoint.checkpoint_summary.epoch; + let cp_sequence_number = checkpoint.checkpoint_summary.sequence_number; + let tx_hi = checkpoint.checkpoint_summary.network_total_transactions; + let timestamp_ms = checkpoint.checkpoint_summary.timestamp_ms; + + debug!( + pipeline = P::NAME, + checkpoint = cp_sequence_number, + elapsed_ms = elapsed * 1000.0, + "Processed checkpoint", + ); + + metrics + .total_handler_checkpoints_processed + .with_label_values(&[P::NAME]) + .inc(); + + metrics + .total_handler_rows_created + .with_label_values(&[P::NAME]) + .inc_by(values.len() as u64); + + tx.send(Indexed::new( + epoch, + cp_sequence_number, + tx_hi, + timestamp_ms, + values, + )) + .await + .map_err(|_| Break::Cancel)?; + + Ok(()) + } + }) + .await + { + Ok(()) => { + info!(pipeline = P::NAME, "Checkpoints done, stopping processor"); + } + + Err(Break::Cancel) => { + info!(pipeline = P::NAME, "Shutdown received, stopping processor"); + } + + Err(Break::Err(e)) => { + error!(pipeline = P::NAME, "Error from handler: {e}"); + cancel.cancel(); + } + }; + }) +} diff --git a/crates/sui-indexer-alt/src/pipeline/sequential/committer.rs b/crates/sui-indexer-alt/src/pipeline/sequential/committer.rs new file mode 100644 index 0000000000000..7b392099dd793 --- /dev/null +++ b/crates/sui-indexer-alt/src/pipeline/sequential/committer.rs @@ -0,0 +1,408 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::{cmp::Ordering, collections::BTreeMap, sync::Arc}; + +use diesel_async::{scoped_futures::ScopedFutureExt, AsyncConnection}; +use mysten_metrics::spawn_monitored_task; +use tokio::{ + sync::mpsc, + task::JoinHandle, + time::{interval, MissedTickBehavior}, +}; +use tokio_util::sync::CancellationToken; +use tracing::{debug, info, warn}; + +use crate::{ + db::Db, + metrics::IndexerMetrics, + models::watermarks::CommitterWatermark, + pipeline::{Indexed, PipelineConfig, LOUD_WATERMARK_UPDATE_INTERVAL, WARN_PENDING_WATERMARKS}, +}; + +use super::Handler; + +/// The committer task gathers rows into batches and writes them to the database. +/// +/// Data arrives out of order, grouped by checkpoint, on `rx`. The task orders them and waits to +/// write them until either a configural polling interval has passed (controlled by +/// `config.collect_interval`), or `H::BATCH_SIZE` rows have been accumulated and we have received +/// the next expected checkpoint. +/// +/// Writes are performed on checkpoint boundaries (more than one checkpoint can be present in a +/// single write), in a single transaction that includes all row updates and an update to the +/// watermark table. +/// +/// The committer can optionally be configured to lag behind the ingestion service by a fixed +/// number of checkpoints (configured by `checkpoint_lag`). +/// +/// Upon successful write, the task sends its new watermark back to the ingestion service, to +/// unblock its regulator. +/// +/// The task can be shutdown using its `cancel` token or if either of its channels are closed. +pub(super) fn committer( + config: PipelineConfig, + checkpoint_lag: Option, + watermark: Option>, + mut rx: mpsc::Receiver>, + tx: mpsc::UnboundedSender<(&'static str, u64)>, + db: Db, + metrics: Arc, + cancel: CancellationToken, +) -> JoinHandle<()> { + spawn_monitored_task!(async move { + // The `poll` interval controls the maximum time to wait between commits, regardless of the + // amount of data available. + let mut poll = interval(config.collect_interval); + poll.set_missed_tick_behavior(MissedTickBehavior::Delay); + + // Buffer to gather the next batch to write. A checkpoint's data is only added to the batch + // when it is known to come from the next checkpoint after `watermark` (the current tip of + // the batch), and data from previous checkpoints will be discarded to avoid double writes. + // + // The batch may be non-empty at top of a tick of the committer's loop if the previous + // attempt at a write failed. Attempt is incremented every time a batch write fails, and is + // reset when it succeeds. + let mut attempt = 0; + let mut batch = H::Batch::default(); + let mut batch_rows = 0; + let mut batch_checkpoints = 0; + + // The task keeps track of the highest (inclusive) checkpoint it has added to the batch, + // and whether that batch needs to be written out. By extension it also knows the next + // checkpoint to expect and add to the batch. + let mut watermark_needs_update = false; + let (mut watermark, mut next_checkpoint) = if let Some(watermark) = watermark { + let next = watermark.checkpoint_hi_inclusive as u64 + 1; + (watermark, next) + } else { + (CommitterWatermark::initial(H::NAME.into()), 0) + }; + + // The committer task will periodically output a log message at a higher log level to + // demonstrate that the pipeline is making progress. + let mut next_loud_watermark_update = + watermark.checkpoint_hi_inclusive + LOUD_WATERMARK_UPDATE_INTERVAL; + + // Data for checkpoint that haven't been written yet. Note that `pending_rows` includes + // rows in `batch`. + let mut pending: BTreeMap> = BTreeMap::new(); + let mut pending_rows = 0; + + info!(pipeline = H::NAME, ?watermark, "Starting committer"); + + loop { + tokio::select! { + _ = cancel.cancelled() => { + info!(pipeline = H::NAME, "Shutdown received"); + break; + } + + _ = poll.tick() => { + if pending.len() > WARN_PENDING_WATERMARKS { + warn!( + pipeline = H::NAME, + pending = pending.len(), + "Pipeline has a large number of pending watermarks", + ); + } + + let Ok(mut conn) = db.connect().await else { + warn!(pipeline = H::NAME, "Failed to get connection for DB"); + continue; + }; + + // Determine whether we need to hold back checkpoints from being committed + // because of checkpoint lag. + // + // TODO(amnn): Test this (depends on migrations and tempdb) + let commit_hi_inclusive = match (checkpoint_lag, pending.last_key_value()) { + (Some(lag), None) => { + debug!(pipeline = H::NAME, lag, "No pending checkpoints"); + if rx.is_closed() && rx.is_empty() { + info!(pipeline = H::NAME, "Processor closed channel before priming"); + break; + } else { + continue; + } + } + + (Some(lag), Some((pending_hi, _))) if *pending_hi < lag => { + debug!(pipeline = H::NAME, lag, pending_hi, "Priming pipeline"); + if rx.is_closed() && rx.is_empty() { + info!(pipeline = H::NAME, "Processor closed channel while priming"); + break; + } else { + continue; + } + } + + (Some(lag), Some((pending_hi, _))) => Some(*pending_hi - lag), + (None, _) => None, + }; + + let guard = metrics + .collector_gather_latency + .with_label_values(&[H::NAME]) + .start_timer(); + + // Push data into the next batch as long as it's from contiguous checkpoints, + // outside of the checkpoint lag and we haven't gathered information from too + // many checkpoints already. + // + // We don't worry about overall size because the handler may have optimized + // writes by combining rows, but we will limit the number of checkpoints we try + // and batch together as a way to impose some limit on the size of the batch + // (and therefore the length of the write transaction). + while batch_checkpoints < H::MAX_BATCH_CHECKPOINTS { + let Some(entry) = pending.first_entry() else { + break; + }; + + if matches!(commit_hi_inclusive, Some(hi) if hi < *entry.key()) { + break; + } + + match next_checkpoint.cmp(entry.key()) { + // Next pending checkpoint is from the future. + Ordering::Less => break, + + // This is the next checkpoint -- include it. + Ordering::Equal => { + let indexed = entry.remove(); + batch_rows += indexed.len(); + batch_checkpoints += 1; + H::batch(&mut batch, indexed.values); + watermark = indexed.watermark; + watermark_needs_update = true; + next_checkpoint += 1; + } + + // Next pending checkpoint is in the past, ignore it to avoid double + // writes. + Ordering::Greater => { + metrics + .total_watermarks_out_of_order + .with_label_values(&[H::NAME]) + .inc(); + let indexed = entry.remove(); + pending_rows -= indexed.len(); + continue; + } + } + } + + let elapsed = guard.stop_and_record(); + debug!( + pipeline = H::NAME, + elapsed_ms = elapsed * 1000.0, + rows = batch_rows, + pending = pending_rows, + "Gathered batch", + ); + + metrics + .collector_batch_size + .with_label_values(&[H::NAME]) + .observe(batch_rows as f64); + + metrics + .total_committer_batches_attempted + .with_label_values(&[H::NAME]) + .inc(); + + metrics + .watermark_epoch + .with_label_values(&[H::NAME]) + .set(watermark.epoch_hi_inclusive); + + metrics + .watermark_checkpoint + .with_label_values(&[H::NAME]) + .set(watermark.checkpoint_hi_inclusive); + + metrics + .watermark_transaction + .with_label_values(&[H::NAME]) + .set(watermark.tx_hi); + + metrics + .watermark_timestamp_ms + .with_label_values(&[H::NAME]) + .set(watermark.timestamp_ms_hi_inclusive); + + let guard = metrics + .committer_commit_latency + .with_label_values(&[H::NAME]) + .start_timer(); + + // Write all the object updates out along with the watermark update, in a + // single transaction. The handler's `commit` implementation is responsible for + // chunking up the writes into a manageable size. + let affected = conn.transaction::<_, anyhow::Error, _>(|conn| async { + watermark.update(conn).await?; + H::commit(&batch, conn).await + }.scope_boxed()).await; + + // Drop the connection eagerly to avoid it holding on to references borrowed by + // the transaction closure. + drop(conn); + + let elapsed = guard.stop_and_record(); + + let affected = match affected { + Ok(affected) => affected, + + Err(e) => { + warn!( + pipeline = H::NAME, + elapsed_ms = elapsed * 1000.0, + attempt, + committed = batch_rows, + pending = pending_rows, + "Error writing batch: {e}", + ); + + attempt += 1; + continue; + } + }; + + debug!( + pipeline = H::NAME, + elapsed_ms = elapsed * 1000.0, + attempt, + affected, + committed = batch_rows, + pending = pending_rows, + "Wrote batch", + ); + + metrics + .total_committer_batches_succeeded + .with_label_values(&[H::NAME]) + .inc(); + + metrics + .total_committer_rows_committed + .with_label_values(&[H::NAME]) + .inc_by(batch_rows as u64); + + metrics + .total_committer_rows_affected + .with_label_values(&[H::NAME]) + .inc_by(affected as u64); + + metrics + .watermark_epoch_in_db + .with_label_values(&[H::NAME]) + .set(watermark.epoch_hi_inclusive); + + metrics + .watermark_checkpoint_in_db + .with_label_values(&[H::NAME]) + .set(watermark.checkpoint_hi_inclusive); + + metrics + .watermark_transaction_in_db + .with_label_values(&[H::NAME]) + .set(watermark.tx_hi); + + metrics + .watermark_timestamp_in_db_ms + .with_label_values(&[H::NAME]) + .set(watermark.timestamp_ms_hi_inclusive); + + if watermark.checkpoint_hi_inclusive > next_loud_watermark_update { + next_loud_watermark_update += LOUD_WATERMARK_UPDATE_INTERVAL; + info!( + pipeline = H::NAME, + epoch = watermark.epoch_hi_inclusive, + checkpoint = watermark.checkpoint_hi_inclusive, + transaction = watermark.tx_hi, + timestamp = %watermark.timestamp(), + "Watermark", + ); + } else { + debug!( + pipeline = H::NAME, + epoch = watermark.epoch_hi_inclusive, + checkpoint = watermark.checkpoint_hi_inclusive, + transaction = watermark.tx_hi, + timestamp = %watermark.timestamp(), + "Watermark", + ); + } + + if watermark_needs_update { + // Ignore the result -- the ingestion service will close this channel + // once it is done, but there may still be checkpoints buffered that need + // processing. + let _ = tx.send((H::NAME, watermark.checkpoint_hi_inclusive as u64)); + } + + let _ = std::mem::take(&mut batch); + watermark_needs_update = false; + pending_rows -= batch_rows; + batch_checkpoints = 0; + batch_rows = 0; + attempt = 0; + + // If there is a pending checkpoint, no greater than the expected next + // checkpoint, and less than or equal to the inclusive upperbound due to + // checkpoint lag, then the pipeline can do more work immediately (without + // waiting). + // + // Otherwise, if its channels have been closed, we know that it is guaranteed + // not to make any more progress, and we can stop the task. + if pending + .first_key_value() + .is_some_and(|(next, _)| { + *next <= next_checkpoint && commit_hi_inclusive.map_or(true, |hi| *next <= hi) + }) + { + poll.reset_immediately(); + } else if rx.is_closed() && rx.is_empty() { + info!(pipeline = H::NAME, "Processor closed channel, pending rows empty"); + break; + } + } + + Some(indexed) = rx.recv() => { + pending_rows += indexed.len(); + pending.insert(indexed.checkpoint(), indexed); + + // Once data has been inserted, check if we need to schedule a write before the + // next polling interval. This is appropriate if there are a minimum number of + // rows to write, and they are already in the batch, or we can process the next + // checkpoint to extract them. + + if pending_rows < H::MIN_EAGER_ROWS { + continue; + } + + if batch_rows > 0 { + poll.reset_immediately(); + continue; + } + + let Some((next, _)) = pending.first_key_value() else { + continue; + }; + + match (checkpoint_lag, pending.last_key_value()) { + (Some(_), None) => continue, + (Some(lag), Some((last, _))) if last.saturating_sub(lag) <= *next => { + continue; + } + _ => if *next <= next_checkpoint { + poll.reset_immediately(); + } + } + } + } + } + + info!(pipeline = H::NAME, ?watermark, "Stopping committer"); + }) +} diff --git a/crates/sui-indexer-alt/src/pipeline/sequential/mod.rs b/crates/sui-indexer-alt/src/pipeline/sequential/mod.rs new file mode 100644 index 0000000000000..4ad82276a315e --- /dev/null +++ b/crates/sui-indexer-alt/src/pipeline/sequential/mod.rs @@ -0,0 +1,114 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::sync::Arc; + +use sui_types::full_checkpoint_content::CheckpointData; +use tokio::{sync::mpsc, task::JoinHandle}; +use tokio_util::sync::CancellationToken; + +use crate::{ + db::{self, Db}, + metrics::IndexerMetrics, + models::watermarks::CommitterWatermark, +}; + +use super::{processor::processor, PipelineConfig, Processor, PIPELINE_BUFFER}; + +use self::committer::committer; + +mod committer; + +/// Handlers implement the logic for a given indexing pipeline: How to process checkpoint data (by +/// implementing [Processor]) into rows for their table, how to combine multiple rows into a single +/// DB operation, and then how to write those rows atomically to the database. +/// +/// The handler is also responsible for tuning the various parameters of the pipeline (provided as +/// associated values). +/// +/// Sequential handlers can only be used in sequential pipelines, where checkpoint data is +/// processed out-of-order, but then gathered and written in order. If multiple checkpoints are +/// available, the pipeline will attempt to combine their writes taking advantage of batching to +/// avoid emitting redundant writes. +/// +/// Back-pressure is handled by setting a high watermark on the ingestion service: The pipeline +/// notifies the ingestion service of the checkpoint it last successfully wrote to the database +/// for, and in turn the ingestion service will only run ahead by its buffer size. This guarantees +/// liveness and limits the amount of memory the pipeline can consume, by bounding the number of +/// checkpoints that can be received before the next checkpoint. +#[async_trait::async_trait] +pub trait Handler: Processor { + /// If at least this many rows are pending, the committer will commit them eagerly. + const MIN_EAGER_ROWS: usize = 50; + + /// Maximum number of checkpoints to try and write in a single batch. The larger this number + /// is, the more chances the pipeline has to merge redundant writes, but the longer each write + /// transaction is likely to be. + const MAX_BATCH_CHECKPOINTS: usize = 5 * 60; + + /// A type to combine multiple `Self::Value`-s into. This can be used to avoid redundant writes + /// by combining multiple rows into one (e.g. if one row supersedes another, the latter can be + /// omitted). + type Batch: Default + Send + Sync + 'static; + + /// Add `values` from processing a checkpoint to the current `batch`. Checkpoints are + /// guaranteed to be presented to the batch in checkpoint order. + fn batch(batch: &mut Self::Batch, values: Vec); + + /// Take a batch of values and commit them to the database, returning the number of rows + /// affected. + async fn commit(batch: &Self::Batch, conn: &mut db::Connection<'_>) -> anyhow::Result; +} + +/// Start a new sequential (in-order) indexing pipeline, served by the handler, `H`. Starting +/// strictly after the `watermark` (or from the beginning if no watermark was provided). +/// +/// Each pipeline consists of a processor which takes checkpoint data and breaks it down into rows, +/// ready for insertion, and a committer which orders the rows and combines them into batches to +/// write to the database. +/// +/// Commits are performed in checkpoint order, potentially involving multiple checkpoints at a +/// time. The call to [Handler::commit] and the associated watermark update are performed in a +/// transaction to ensure atomicity. Unlike in the case of concurrent pipelines, the data passed to +/// [Handler::commit] is not chunked up, so the handler must perform this step itself, if +/// necessary. +/// +/// The pipeline can optionally be configured to lag behind the ingestion service by a fixed number +/// of checkpoints (configured by `checkpoint_lag`). +/// +/// Watermarks are also shared with the ingestion service, which is guaranteed to bound the +/// checkpoint height it pre-fetches to some constant additive factor above the pipeline's +/// watermark. +/// +/// Checkpoint data is fed into the pipeline through the `checkpoint_rx` channel, watermark updates +/// are communicated to the ingestion service through the `watermark_tx` channel and internal +/// channels are created to communicate between its various components. The pipeline can be +/// shutdown using its `cancel` token, and will also shutdown if any of its input or output +/// channels close, or any of its independent tasks fail. +pub(crate) fn pipeline( + initial_watermark: Option>, + config: PipelineConfig, + checkpoint_lag: Option, + db: Db, + checkpoint_rx: mpsc::Receiver>, + watermark_tx: mpsc::UnboundedSender<(&'static str, u64)>, + metrics: Arc, + cancel: CancellationToken, +) -> (JoinHandle<()>, JoinHandle<()>) { + let (processor_tx, committer_rx) = mpsc::channel(H::FANOUT + PIPELINE_BUFFER); + + let processor = processor::(checkpoint_rx, processor_tx, metrics.clone(), cancel.clone()); + + let committer = committer::( + config.clone(), + checkpoint_lag, + initial_watermark, + committer_rx, + watermark_tx, + db.clone(), + metrics.clone(), + cancel.clone(), + ); + + (processor, committer) +} diff --git a/crates/sui-indexer-alt/src/schema.rs b/crates/sui-indexer-alt/src/schema.rs new file mode 100644 index 0000000000000..7b492f46d9753 --- /dev/null +++ b/crates/sui-indexer-alt/src/schema.rs @@ -0,0 +1,152 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +// @generated automatically by Diesel CLI. + +diesel::table! { + ev_emit_mod (package, module, tx_sequence_number) { + package -> Bytea, + module -> Text, + tx_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + ev_struct_inst (package, module, name, instantiation, tx_sequence_number) { + package -> Bytea, + module -> Text, + name -> Text, + instantiation -> Bytea, + tx_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + kv_checkpoints (sequence_number) { + sequence_number -> Int8, + certified_checkpoint -> Bytea, + checkpoint_contents -> Bytea, + } +} + +diesel::table! { + kv_objects (object_id, object_version) { + object_id -> Bytea, + object_version -> Int8, + serialized_object -> Nullable, + } +} + +diesel::table! { + kv_transactions (tx_digest) { + tx_digest -> Bytea, + cp_sequence_number -> Int8, + timestamp_ms -> Int8, + raw_transaction -> Bytea, + raw_effects -> Bytea, + events -> Bytea, + } +} + +diesel::table! { + obj_versions (object_id, object_version) { + object_id -> Bytea, + object_version -> Int8, + object_digest -> Bytea, + cp_sequence_number -> Int8, + } +} + +diesel::table! { + sum_coin_balances (object_id) { + object_id -> Bytea, + object_version -> Int8, + owner_id -> Bytea, + coin_type -> Bytea, + coin_balance -> Int8, + } +} + +diesel::table! { + sum_obj_types (object_id) { + object_id -> Bytea, + object_version -> Int8, + owner_kind -> Int2, + owner_id -> Nullable, + package -> Nullable, + module -> Nullable, + name -> Nullable, + instantiation -> Nullable, + } +} + +diesel::table! { + tx_affected_objects (affected, tx_sequence_number) { + tx_sequence_number -> Int8, + affected -> Bytea, + sender -> Bytea, + } +} + +diesel::table! { + tx_balance_changes (tx_sequence_number) { + tx_sequence_number -> Int8, + balance_changes -> Bytea, + } +} + +diesel::table! { + wal_coin_balances (object_id, object_version) { + object_id -> Bytea, + object_version -> Int8, + owner_id -> Nullable, + coin_type -> Nullable, + coin_balance -> Nullable, + cp_sequence_number -> Int8, + } +} + +diesel::table! { + wal_obj_types (object_id, object_version) { + object_id -> Bytea, + object_version -> Int8, + owner_kind -> Nullable, + owner_id -> Nullable, + package -> Nullable, + module -> Nullable, + name -> Nullable, + instantiation -> Nullable, + cp_sequence_number -> Int8, + } +} + +diesel::table! { + watermarks (pipeline) { + pipeline -> Text, + epoch_hi_inclusive -> Int8, + checkpoint_hi_inclusive -> Int8, + tx_hi -> Int8, + timestamp_ms_hi_inclusive -> Int8, + epoch_lo -> Int8, + reader_lo -> Int8, + pruner_timestamp_ms -> Int8, + pruner_hi -> Int8, + } +} + +diesel::allow_tables_to_appear_in_same_query!( + ev_emit_mod, + ev_struct_inst, + kv_checkpoints, + kv_objects, + kv_transactions, + obj_versions, + sum_coin_balances, + sum_obj_types, + tx_affected_objects, + tx_balance_changes, + wal_coin_balances, + wal_obj_types, + watermarks, +); diff --git a/crates/sui-indexer-alt/src/task.rs b/crates/sui-indexer-alt/src/task.rs new file mode 100644 index 0000000000000..d027541a78310 --- /dev/null +++ b/crates/sui-indexer-alt/src/task.rs @@ -0,0 +1,43 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::iter; + +use futures::future::{self, Either}; +use tokio::{signal, sync::oneshot, task::JoinHandle}; +use tokio_util::sync::CancellationToken; + +/// Manages cleanly exiting the process, either because one of its constituent services has stopped +/// or because an interrupt signal was sent to the process. +pub async fn graceful_shutdown( + services: impl IntoIterator>, + cancel: CancellationToken, +) { + // If the service is naturalling winding down, we don't need to wait for an interrupt signal. + // This channel is used to short-circuit the await in that case. + let (cancel_ctrl_c_tx, cancel_ctrl_c_rx) = oneshot::channel(); + + let interrupt = async { + tokio::select! { + _ = cancel_ctrl_c_rx => {} + _ = cancel.cancelled() => {} + _ = signal::ctrl_c() => cancel.cancel(), + } + + Ok(()) + }; + + tokio::pin!(interrupt); + let futures: Vec<_> = services + .into_iter() + .map(Either::Left) + .chain(iter::once(Either::Right(interrupt))) + .collect(); + + // Wait for the first service to finish, or for an interrupt signal. + let (_, _, rest) = future::select_all(futures).await; + let _ = cancel_ctrl_c_tx.send(()); + + // Wait for the remaining services to finish. + let _ = future::join_all(rest).await; +} diff --git a/crates/sui-indexer-builder/src/indexer_builder.rs b/crates/sui-indexer-builder/src/indexer_builder.rs index b89ff975ae562..30f96ed2bb15c 100644 --- a/crates/sui-indexer-builder/src/indexer_builder.rs +++ b/crates/sui-indexer-builder/src/indexer_builder.rs @@ -310,6 +310,11 @@ impl Indexer { { &self.storage } + + #[cfg(any(feature = "test-utils", test))] + pub fn test_only_name(&self) -> String { + self.name.clone() + } } #[async_trait] diff --git a/crates/sui-indexer/Cargo.toml b/crates/sui-indexer/Cargo.toml index fa4490741b163..0b22f81ff2bba 100644 --- a/crates/sui-indexer/Cargo.toml +++ b/crates/sui-indexer/Cargo.toml @@ -8,7 +8,7 @@ edition = "2021" [dependencies] anyhow.workspace = true -rand = "0.8.5" +rand = "0.8.5" async-trait.workspace = true axum.workspace = true backoff.workspace = true @@ -64,6 +64,7 @@ sui-protocol-config.workspace = true telemetry-subscribers.workspace = true sui-rest-api.workspace = true sui-transaction-builder.workspace = true +sui-synthetic-ingestion.workspace = true move-core-types.workspace = true move-bytecode-utils.workspace = true @@ -77,6 +78,7 @@ dashmap.workspace = true [dev-dependencies] sui-keys.workspace = true sui-move-build.workspace = true +sui-swarm-config.workspace = true sui-test-transaction-builder.workspace = true test-cluster.workspace = true ntest.workspace = true diff --git a/crates/sui-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/down.sql b/crates/sui-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/down.sql new file mode 100644 index 0000000000000..807c01dca462d --- /dev/null +++ b/crates/sui-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/down.sql @@ -0,0 +1,6 @@ +CREATE INDEX IF NOT EXISTS objects_history_owner ON objects_history (checkpoint_sequence_number, owner_type, owner_id) WHERE owner_type BETWEEN 1 AND 2 AND owner_id IS NOT NULL; +CREATE INDEX IF NOT EXISTS objects_history_coin_owner ON objects_history (checkpoint_sequence_number, owner_id, coin_type, object_id) WHERE coin_type IS NOT NULL AND owner_type = 1; +CREATE INDEX IF NOT EXISTS objects_history_coin_only ON objects_history (checkpoint_sequence_number, coin_type, object_id) WHERE coin_type IS NOT NULL; +CREATE INDEX IF NOT EXISTS objects_history_type ON objects_history (checkpoint_sequence_number, object_type); +CREATE INDEX IF NOT EXISTS objects_history_package_module_name_full_type ON objects_history (checkpoint_sequence_number, object_type_package, object_type_module, object_type_name, object_type); +CREATE INDEX IF NOT EXISTS objects_history_owner_package_module_name_full_type ON objects_history (checkpoint_sequence_number, owner_id, object_type_package, object_type_module, object_type_name, object_type); diff --git a/crates/sui-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/up.sql b/crates/sui-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/up.sql new file mode 100644 index 0000000000000..754e719819f1e --- /dev/null +++ b/crates/sui-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/up.sql @@ -0,0 +1,6 @@ +DROP INDEX IF EXISTS objects_history_owner; +DROP INDEX IF EXISTS objects_history_coin_owner; +DROP INDEX IF EXISTS objects_history_coin_only; +DROP INDEX IF EXISTS objects_history_type; +DROP INDEX IF EXISTS objects_history_package_module_name_full_type; +DROP INDEX IF EXISTS objects_history_owner_package_module_name_full_type; diff --git a/crates/sui-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/down.sql b/crates/sui-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/down.sql new file mode 100644 index 0000000000000..b9fcef3e1f439 --- /dev/null +++ b/crates/sui-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/down.sql @@ -0,0 +1,18 @@ +CREATE TABLE tx_senders ( + tx_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(sender, tx_sequence_number) +); + +CREATE INDEX IF NOT EXISTS tx_senders_tx_sequence_number + ON tx_senders (tx_sequence_number); + +CREATE TABLE tx_recipients ( + tx_sequence_number BIGINT NOT NULL, + recipient BYTEA NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(recipient, tx_sequence_number) +); + +CREATE INDEX IF NOT EXISTS tx_recipients_sender + ON tx_recipients (sender, recipient, tx_sequence_number); diff --git a/crates/sui-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/up.sql b/crates/sui-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/up.sql new file mode 100644 index 0000000000000..fb259ea615d84 --- /dev/null +++ b/crates/sui-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/up.sql @@ -0,0 +1,2 @@ +DROP TABLE IF EXISTS tx_senders; +DROP TABLE IF EXISTS tx_recipients; diff --git a/crates/sui-indexer/src/apis/read_api.rs b/crates/sui-indexer/src/apis/read_api.rs index 78b8715e16ce7..3e3de5343869d 100644 --- a/crates/sui-indexer/src/apis/read_api.rs +++ b/crates/sui-indexer/src/apis/read_api.rs @@ -87,7 +87,11 @@ impl ReadApiServer for ReadApi { object_read_to_object_response(&self.inner, object_read, options.clone()).await }); - futures::future::try_join_all(futures).await + let mut objects = futures::future::try_join_all(futures).await?; + // Resort the objects by the order of the object id. + objects.sort_by_key(|obj| obj.data.as_ref().map(|data| data.object_id)); + + Ok(objects) } async fn get_total_transaction_blocks(&self) -> RpcResult> { diff --git a/crates/sui-indexer/src/backfill/backfill_instances/ingestion_backfills/digest_task.rs b/crates/sui-indexer/src/backfill/backfill_instances/ingestion_backfills/digest_task.rs new file mode 100644 index 0000000000000..8273bcdaa3b7b --- /dev/null +++ b/crates/sui-indexer/src/backfill/backfill_instances/ingestion_backfills/digest_task.rs @@ -0,0 +1,26 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_instances::ingestion_backfills::IngestionBackfillTrait; +use crate::database::ConnectionPool; +use sui_types::full_checkpoint_content::CheckpointData; +use tracing::info; + +/// Dummy backfill that only prints the sequence number and checkpoint of the digest. Intended to +/// benchmark backfill performance. +pub struct DigestBackfill; + +#[async_trait::async_trait] +impl IngestionBackfillTrait for DigestBackfill { + type ProcessedType = (); + + fn process_checkpoint(checkpoint: &CheckpointData) -> Vec { + let cp = checkpoint.checkpoint_summary.sequence_number; + let digest = checkpoint.checkpoint_summary.content_digest; + info!("{cp}: {digest}"); + + vec![] + } + + async fn commit_chunk(_pool: ConnectionPool, _processed_data: Vec) {} +} diff --git a/crates/sui-indexer/src/backfill/backfill_instances/ingestion_backfills/mod.rs b/crates/sui-indexer/src/backfill/backfill_instances/ingestion_backfills/mod.rs index 17bbc29d7dc5c..935ba5562bd9c 100644 --- a/crates/sui-indexer/src/backfill/backfill_instances/ingestion_backfills/mod.rs +++ b/crates/sui-indexer/src/backfill/backfill_instances/ingestion_backfills/mod.rs @@ -1,6 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +pub(crate) mod digest_task; pub(crate) mod ingestion_backfill_task; pub(crate) mod raw_checkpoints; pub(crate) mod tx_affected_objects; diff --git a/crates/sui-indexer/src/backfill/backfill_instances/mod.rs b/crates/sui-indexer/src/backfill/backfill_instances/mod.rs index 27c96dd6c9234..304ed4e715e1d 100644 --- a/crates/sui-indexer/src/backfill/backfill_instances/mod.rs +++ b/crates/sui-indexer/src/backfill/backfill_instances/mod.rs @@ -1,6 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use crate::backfill::backfill_instances::ingestion_backfills::digest_task::DigestBackfill; use crate::backfill::backfill_instances::ingestion_backfills::ingestion_backfill_task::IngestionBackfillTask; use crate::backfill::backfill_instances::ingestion_backfills::raw_checkpoints::RawCheckpointsBackFill; use crate::backfill::backfill_instances::ingestion_backfills::tx_affected_objects::TxAffectedObjectsBackfill; @@ -28,6 +29,13 @@ pub async fn get_backfill_task( kind, remote_store_url, } => match kind { + IngestionBackfillKind::Digest => Arc::new( + IngestionBackfillTask::::new( + remote_store_url, + range_start as CheckpointSequenceNumber, + ) + .await, + ), IngestionBackfillKind::RawCheckpoints => Arc::new( IngestionBackfillTask::::new( remote_store_url, diff --git a/crates/sui-indexer/src/backfill/mod.rs b/crates/sui-indexer/src/backfill/mod.rs index 453d11baeeed2..e17ba40628ef1 100644 --- a/crates/sui-indexer/src/backfill/mod.rs +++ b/crates/sui-indexer/src/backfill/mod.rs @@ -29,6 +29,7 @@ pub enum BackfillTaskKind { #[derive(ValueEnum, Clone, Debug)] pub enum IngestionBackfillKind { + Digest, RawCheckpoints, TxAffectedObjects, } diff --git a/crates/sui-indexer/src/benchmark.rs b/crates/sui-indexer/src/benchmark.rs new file mode 100644 index 0000000000000..96df25cba9fa6 --- /dev/null +++ b/crates/sui-indexer/src/benchmark.rs @@ -0,0 +1,130 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::config::{BenchmarkConfig, IngestionConfig, IngestionSources, UploadOptions}; +use crate::database::ConnectionPool; +use crate::db::{reset_database, run_migrations}; +use crate::errors::IndexerError; +use crate::indexer::Indexer; +use crate::metrics::IndexerMetrics; +use crate::store::PgIndexerStore; +use std::path::PathBuf; +use sui_synthetic_ingestion::benchmark::{run_benchmark, BenchmarkableIndexer}; +use sui_synthetic_ingestion::{IndexerProgress, SyntheticIngestionConfig}; +use tokio::sync::watch; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; + +pub async fn run_indexer_benchmark( + config: BenchmarkConfig, + pool: ConnectionPool, + metrics: IndexerMetrics, +) { + if config.reset_db { + reset_database(pool.dedicated_connection().await.unwrap()) + .await + .unwrap(); + } else { + run_migrations(pool.dedicated_connection().await.unwrap()) + .await + .unwrap(); + } + let store = PgIndexerStore::new(pool, UploadOptions::default(), metrics.clone()); + let ingestion_dir = config + .workload_dir + .clone() + .unwrap_or_else(|| tempfile::tempdir().unwrap().into_path()); + // If we are using a non-temp directory, we should not delete the ingestion directory. + let gc_checkpoint_files = config.workload_dir.is_none(); + let synthetic_ingestion_config = SyntheticIngestionConfig { + ingestion_dir: ingestion_dir.clone(), + checkpoint_size: config.checkpoint_size, + num_checkpoints: config.num_checkpoints, + starting_checkpoint: config.starting_checkpoint, + }; + let indexer = BenchmarkIndexer::new(store, metrics, ingestion_dir, gc_checkpoint_files); + run_benchmark(synthetic_ingestion_config, indexer).await; +} + +pub struct BenchmarkIndexer { + inner: Option, + cancel: CancellationToken, + committed_checkpoints_rx: watch::Receiver>, + handle: Option>>, +} + +struct BenchmarkIndexerInner { + ingestion_dir: PathBuf, + gc_checkpoint_files: bool, + store: PgIndexerStore, + metrics: IndexerMetrics, + committed_checkpoints_tx: watch::Sender>, +} + +impl BenchmarkIndexer { + pub fn new( + store: PgIndexerStore, + metrics: IndexerMetrics, + ingestion_dir: PathBuf, + gc_checkpoint_files: bool, + ) -> Self { + let cancel = CancellationToken::new(); + let (committed_checkpoints_tx, committed_checkpoints_rx) = watch::channel(None); + Self { + inner: Some(BenchmarkIndexerInner { + ingestion_dir, + gc_checkpoint_files, + store, + metrics, + committed_checkpoints_tx, + }), + cancel, + committed_checkpoints_rx, + handle: None, + } + } +} + +#[async_trait::async_trait] +impl BenchmarkableIndexer for BenchmarkIndexer { + fn subscribe_to_committed_checkpoints(&self) -> watch::Receiver> { + self.committed_checkpoints_rx.clone() + } + + async fn start(&mut self) { + let BenchmarkIndexerInner { + ingestion_dir, + gc_checkpoint_files, + store, + metrics, + committed_checkpoints_tx, + } = self.inner.take().unwrap(); + let ingestion_config = IngestionConfig { + sources: IngestionSources { + data_ingestion_path: Some(ingestion_dir), + ..Default::default() + }, + gc_checkpoint_files, + ..Default::default() + }; + let cancel = self.cancel.clone(); + let handle = tokio::task::spawn(async move { + Indexer::start_writer( + ingestion_config, + store, + metrics, + Default::default(), + None, + cancel, + Some(committed_checkpoints_tx), + ) + .await + }); + self.handle = Some(handle); + } + + async fn stop(mut self) { + self.cancel.cancel(); + self.handle.unwrap().await.unwrap().unwrap(); + } +} diff --git a/crates/sui-indexer/src/config.rs b/crates/sui-indexer/src/config.rs index f51d18ab1ff88..6db349aa64747 100644 --- a/crates/sui-indexer/src/config.rs +++ b/crates/sui-indexer/src/config.rs @@ -114,6 +114,16 @@ pub struct IngestionConfig { )] pub checkpoint_download_queue_size: usize, + /// Start checkpoint to ingest from, this is optional and if not provided, the ingestion will + /// start from the next checkpoint after the latest committed checkpoint. + #[arg(long, env = "START_CHECKPOINT")] + pub start_checkpoint: Option, + + /// End checkpoint to ingest until, this is optional and if not provided, the ingestion will + /// continue until u64::MAX. + #[arg(long, env = "END_CHECKPOINT")] + pub end_checkpoint: Option, + #[arg( long, default_value_t = Self::DEFAULT_CHECKPOINT_DOWNLOAD_TIMEOUT, @@ -129,6 +139,11 @@ pub struct IngestionConfig { env = "CHECKPOINT_PROCESSING_BATCH_DATA_LIMIT", )] pub checkpoint_download_queue_size_bytes: usize, + + /// Whether to delete processed checkpoint files from the local directory, + /// when running Fullnode-colocated indexer. + #[arg(long, default_value_t = true)] + pub gc_checkpoint_files: bool, } impl IngestionConfig { @@ -141,10 +156,13 @@ impl Default for IngestionConfig { fn default() -> Self { Self { sources: Default::default(), + start_checkpoint: None, + end_checkpoint: None, checkpoint_download_queue_size: Self::DEFAULT_CHECKPOINT_DOWNLOAD_QUEUE_SIZE, checkpoint_download_timeout: Self::DEFAULT_CHECKPOINT_DOWNLOAD_TIMEOUT, checkpoint_download_queue_size_bytes: Self::DEFAULT_CHECKPOINT_DOWNLOAD_QUEUE_SIZE_BYTES, + gc_checkpoint_files: true, } } } @@ -186,6 +204,10 @@ pub enum Command { ResetDatabase { #[clap(long)] force: bool, + /// If true, only drop all tables but do not run the migrations. + /// That is, no tables will exist in the DB after the reset. + #[clap(long, default_value_t = false)] + skip_migrations: bool, }, /// Run through the migration scripts. RunMigrations, @@ -210,6 +232,7 @@ pub enum Command { }, /// Restore the database from formal snaphots. Restore(RestoreConfig), + Benchmark(BenchmarkConfig), } #[derive(Args, Default, Debug, Clone)] @@ -378,6 +401,41 @@ impl Default for RestoreConfig { } } +#[derive(Args, Debug, Clone)] +pub struct BenchmarkConfig { + #[arg( + long, + default_value_t = 200, + help = "Number of transactions in a checkpoint." + )] + pub checkpoint_size: u64, + #[arg( + long, + default_value_t = 2000, + help = "Total number of synthetic checkpoints to generate." + )] + pub num_checkpoints: u64, + #[arg( + long, + default_value_t = 1, + help = "Customize the first checkpoint sequence number to be committed, must be non-zero." + )] + pub starting_checkpoint: u64, + #[arg( + long, + default_value_t = false, + help = "Whether to reset the database before running." + )] + pub reset_db: bool, + #[arg( + long, + help = "Path to workload directory. If not provided, a temporary directory will be created.\ + If provided, synthetic workload generator will either load data from it if it exists or generate new data.\ + This avoids repeat generation of the same data." + )] + pub workload_dir: Option, +} + #[cfg(test)] mod test { use super::*; diff --git a/crates/sui-indexer/src/db.rs b/crates/sui-indexer/src/db.rs index 9937b61ce2655..4a2893603bb10 100644 --- a/crates/sui-indexer/src/db.rs +++ b/crates/sui-indexer/src/db.rs @@ -196,7 +196,14 @@ pub mod setup_postgres { pub async fn reset_database(mut conn: Connection<'static>) -> Result<(), anyhow::Error> { info!("Resetting PG database ..."); + clear_database(&mut conn).await?; + run_migrations(conn).await?; + info!("Reset database complete."); + Ok(()) + } + pub async fn clear_database(conn: &mut Connection<'static>) -> Result<(), anyhow::Error> { + info!("Clearing the database..."); let drop_all_tables = " DO $$ DECLARE r RECORD; @@ -206,9 +213,7 @@ pub mod setup_postgres { EXECUTE 'DROP TABLE IF EXISTS ' || quote_ident(r.tablename) || ' CASCADE'; END LOOP; END $$;"; - diesel::sql_query(drop_all_tables) - .execute(&mut conn) - .await?; + diesel::sql_query(drop_all_tables).execute(conn).await?; info!("Dropped all tables."); let drop_all_procedures = " @@ -222,9 +227,7 @@ pub mod setup_postgres { EXECUTE 'DROP PROCEDURE IF EXISTS ' || quote_ident(r.proname) || '(' || r.argtypes || ') CASCADE'; END LOOP; END $$;"; - diesel::sql_query(drop_all_procedures) - .execute(&mut conn) - .await?; + diesel::sql_query(drop_all_procedures).execute(conn).await?; info!("Dropped all procedures."); let drop_all_functions = " @@ -238,17 +241,13 @@ pub mod setup_postgres { EXECUTE 'DROP FUNCTION IF EXISTS ' || quote_ident(r.proname) || '(' || r.argtypes || ') CASCADE'; END LOOP; END $$;"; - diesel::sql_query(drop_all_functions) - .execute(&mut conn) - .await?; - info!("Dropped all functions."); - - run_migrations(conn).await?; - info!("Reset database complete."); + diesel::sql_query(drop_all_functions).execute(conn).await?; + info!("Database cleared."); Ok(()) } pub async fn run_migrations(conn: Connection<'static>) -> Result<(), anyhow::Error> { + info!("Running migrations ..."); conn.run_pending_migrations(MIGRATIONS) .await .map_err(|e| anyhow!("Failed to run migrations {e}"))?; diff --git a/crates/sui-indexer/src/handlers/checkpoint_handler.rs b/crates/sui-indexer/src/handlers/checkpoint_handler.rs index be4e0d375a923..170bda5ff6108 100644 --- a/crates/sui-indexer/src/handlers/checkpoint_handler.rs +++ b/crates/sui-indexer/src/handlers/checkpoint_handler.rs @@ -8,12 +8,13 @@ use async_trait::async_trait; use itertools::Itertools; use sui_types::dynamic_field::DynamicFieldInfo; use tokio_util::sync::CancellationToken; -use tracing::info; +use tracing::{info, warn}; use move_core_types::language_storage::{StructTag, TypeTag}; use mysten_metrics::{get_metrics, spawn_monitored_task}; use sui_data_ingestion_core::Worker; use sui_rest_api::{CheckpointData, CheckpointTransaction}; +use sui_synthetic_ingestion::IndexerProgress; use sui_types::dynamic_field::DynamicFieldType; use sui_types::effects::{ObjectChange, TransactionEffectsAPI}; use sui_types::event::SystemEpochInfoEvent; @@ -24,12 +25,13 @@ use sui_types::object::Object; use sui_types::object::Owner; use sui_types::sui_system_state::{get_sui_system_state, SuiSystemStateTrait}; use sui_types::transaction::TransactionDataAPI; +use tokio::sync::watch; use crate::errors::IndexerError; use crate::handlers::committer::start_tx_checkpoint_commit_task; use crate::metrics::IndexerMetrics; use crate::models::display::StoredDisplay; -use crate::models::epoch::{EndOfEpochUpdate, StartOfEpochUpdate}; +use crate::models::epoch::{EndOfEpochUpdate, EpochEndInfo, EpochStartInfo, StartOfEpochUpdate}; use crate::models::obj_indices::StoredObjectVersion; use crate::store::{IndexerStore, PgIndexerStore}; use crate::types::{ @@ -48,9 +50,20 @@ const CHECKPOINT_QUEUE_SIZE: usize = 100; pub async fn new_handlers( state: PgIndexerStore, metrics: IndexerMetrics, - next_checkpoint_sequence_number: CheckpointSequenceNumber, cancel: CancellationToken, -) -> Result { + committed_checkpoints_tx: Option>>, + start_checkpoint_opt: Option, + end_checkpoint_opt: Option, +) -> Result<(CheckpointHandler, u64), IndexerError> { + let start_checkpoint = match start_checkpoint_opt { + Some(start_checkpoint) => start_checkpoint, + None => state + .get_latest_checkpoint_sequence_number() + .await? + .map(|seq| seq.saturating_add(1)) + .unwrap_or_default(), + }; + let checkpoint_queue_size = std::env::var("CHECKPOINT_QUEUE_SIZE") .unwrap_or(CHECKPOINT_QUEUE_SIZE.to_string()) .parse::() @@ -70,13 +83,14 @@ pub async fn new_handlers( state_clone, metrics_clone, indexed_checkpoint_receiver, - next_checkpoint_sequence_number, - cancel.clone() + cancel.clone(), + committed_checkpoints_tx, + start_checkpoint, + end_checkpoint_opt, )); - Ok(CheckpointHandler::new( - state, - metrics, - indexed_checkpoint_sender, + Ok(( + CheckpointHandler::new(state, metrics, indexed_checkpoint_sender), + start_checkpoint, )) } @@ -153,12 +167,7 @@ impl CheckpointHandler { get_sui_system_state(&checkpoint_object_store)?.into_sui_system_state_summary(); return Ok(Some(EpochToCommit { last_epoch: None, - new_epoch: StartOfEpochUpdate::new( - system_state_summary, - 0, //first_checkpoint_id - 0, // first_tx_sequence_number - None, - ), + new_epoch: StartOfEpochUpdate::new(system_state_summary, EpochStartInfo::default()), })); } @@ -170,24 +179,34 @@ impl CheckpointHandler { let system_state_summary = get_sui_system_state(&checkpoint_object_store)?.into_sui_system_state_summary(); - let epoch_event = transactions + let epoch_event_opt = transactions .iter() - .flat_map(|t| t.events.as_ref().map(|e| &e.data)) - .flatten() - .find(|ev| ev.is_system_epoch_info_event()) - .unwrap_or_else(|| { - panic!( - "Can't find SystemEpochInfoEvent in epoch end checkpoint {}", - checkpoint_summary.sequence_number() - ) - }); - - let event = bcs::from_bytes::(&epoch_event.contents)?; + .find_map(|t| { + t.events.as_ref()?.data.iter().find_map(|ev| { + if ev.is_system_epoch_info_event() { + Some(bcs::from_bytes::(&ev.contents)) + } else { + None + } + }) + }) + .transpose()?; + if epoch_event_opt.is_none() { + warn!( + "No SystemEpochInfoEvent found at end of epoch {}, some epoch data will be set to default.", + checkpoint_summary.epoch, + ); + assert!( + system_state_summary.safe_mode, + "Sui is not in safe mode but no SystemEpochInfoEvent found at end of epoch {}", + checkpoint_summary.epoch + ); + } // At some point while committing data in epoch X - 1, we will encounter a new epoch X. We // want to retrieve X - 2's network total transactions to calculate the number of // transactions that occurred in epoch X - 1. - let network_tx_count_prev_epoch = match system_state_summary.epoch { + let first_tx_sequence_number = match system_state_summary.epoch { // If first epoch change, this number is 0 1 => Ok(0), _ => { @@ -204,18 +223,20 @@ impl CheckpointHandler { } }?; + let epoch_end_info = EpochEndInfo::new(epoch_event_opt.as_ref()); + let epoch_start_info = EpochStartInfo::new( + checkpoint_summary.sequence_number.saturating_add(1), + checkpoint_summary.network_total_transactions, + epoch_event_opt.as_ref(), + ); + Ok(Some(EpochToCommit { last_epoch: Some(EndOfEpochUpdate::new( checkpoint_summary, - &event, - network_tx_count_prev_epoch, + first_tx_sequence_number, + epoch_end_info, )), - new_epoch: StartOfEpochUpdate::new( - system_state_summary, - checkpoint_summary.sequence_number + 1, // first_checkpoint_id - checkpoint_summary.network_total_transactions, - Some(&event), - ), + new_epoch: StartOfEpochUpdate::new(system_state_summary, epoch_start_info), })) } diff --git a/crates/sui-indexer/src/handlers/committer.rs b/crates/sui-indexer/src/handlers/committer.rs index e9b06191047e8..b63e8b42a981e 100644 --- a/crates/sui-indexer/src/handlers/committer.rs +++ b/crates/sui-indexer/src/handlers/committer.rs @@ -3,14 +3,16 @@ use std::collections::{BTreeMap, HashMap}; +use sui_synthetic_ingestion::IndexerProgress; +use sui_types::messages_checkpoint::CheckpointSequenceNumber; use tap::tap::TapFallible; +use tokio::sync::watch; use tokio_util::sync::CancellationToken; use tracing::instrument; use tracing::{error, info}; -use sui_types::messages_checkpoint::CheckpointSequenceNumber; - use crate::metrics::IndexerMetrics; +use crate::models::raw_checkpoints::StoredRawCheckpoint; use crate::store::IndexerStore; use crate::types::IndexerResult; @@ -22,8 +24,10 @@ pub async fn start_tx_checkpoint_commit_task( state: S, metrics: IndexerMetrics, tx_indexing_receiver: mysten_metrics::metered_channel::Receiver, - mut next_checkpoint_sequence_number: CheckpointSequenceNumber, cancel: CancellationToken, + mut committed_checkpoints_tx: Option>>, + mut next_checkpoint_sequence_number: CheckpointSequenceNumber, + end_checkpoint_opt: Option, ) -> IndexerResult<()> where S: IndexerStore + Clone + Sync + Send + 'static, @@ -60,7 +64,14 @@ where // The batch will consist of contiguous checkpoints and at most one epoch boundary at // the end. if batch.len() == checkpoint_commit_batch_size || epoch.is_some() { - commit_checkpoints(&state, batch, epoch, &metrics).await; + commit_checkpoints( + &state, + batch, + epoch, + &metrics, + &mut committed_checkpoints_tx, + ) + .await; batch = vec![]; } if let Some(epoch_number) = epoch_number_option { @@ -72,11 +83,24 @@ where ); })?; } + // stop adding to the commit batch if we've reached the end checkpoint + if let Some(end_checkpoint_sequence_number) = end_checkpoint_opt { + if next_checkpoint_sequence_number > end_checkpoint_sequence_number { + break; + } + } } if !batch.is_empty() { - commit_checkpoints(&state, batch, None, &metrics).await; + commit_checkpoints(&state, batch, None, &metrics, &mut committed_checkpoints_tx).await; batch = vec![]; } + + // stop the commit task if we've reached the end checkpoint + if let Some(end_checkpoint_sequence_number) = end_checkpoint_opt { + if next_checkpoint_sequence_number > end_checkpoint_sequence_number { + break; + } + } } Ok(()) } @@ -95,6 +119,7 @@ async fn commit_checkpoints( indexed_checkpoint_batch: Vec, epoch: Option, metrics: &IndexerMetrics, + committed_checkpoints_tx: &mut Option>>, ) where S: IndexerStore + Clone + Sync + Send + 'static, { @@ -135,8 +160,13 @@ async fn commit_checkpoints( packages_batch.push(packages); } - let first_checkpoint_seq = checkpoint_batch.first().as_ref().unwrap().sequence_number; - let committer_watermark = CommitterWatermark::from(checkpoint_batch.last().unwrap()); + let first_checkpoint_seq = checkpoint_batch.first().unwrap().sequence_number; + let last_checkpoint = checkpoint_batch.last().unwrap(); + let indexer_progress = IndexerProgress { + checkpoint: last_checkpoint.sequence_number, + network_total_transactions: last_checkpoint.network_total_transactions, + }; + let committer_watermark = CommitterWatermark::from(last_checkpoint); let guard = metrics.checkpoint_db_commit_latency.start_timer(); let tx_batch = tx_batch.into_iter().flatten().collect::>(); @@ -156,7 +186,7 @@ async fn commit_checkpoints( let raw_checkpoints_batch = checkpoint_batch .iter() .map(|c| c.into()) - .collect::>(); + .collect::>(); { let _step_1_guard = metrics.checkpoint_db_commit_latency_step_1.start_timer(); @@ -267,4 +297,13 @@ async fn commit_checkpoints( metrics .thousand_transaction_avg_db_commit_latency .observe(elapsed * 1000.0 / tx_count as f64); + + if let Some(committed_checkpoints_tx) = committed_checkpoints_tx.as_mut() { + if let Err(err) = committed_checkpoints_tx.send(Some(indexer_progress)) { + error!( + "Failed to send committed checkpoints to the watch channel: {}", + err + ); + } + } } diff --git a/crates/sui-indexer/src/handlers/mod.rs b/crates/sui-indexer/src/handlers/mod.rs index a6c6412f3a42c..403ee8e22706c 100644 --- a/crates/sui-indexer/src/handlers/mod.rs +++ b/crates/sui-indexer/src/handlers/mod.rs @@ -92,6 +92,8 @@ impl CommonHandler { &self, cp_receiver: mysten_metrics::metered_channel::Receiver<(CommitterWatermark, T)>, cancel: CancellationToken, + start_checkpoint: u64, + end_checkpoint_opt: Option, ) -> IndexerResult<()> { let checkpoint_commit_batch_size = std::env::var("CHECKPOINT_COMMIT_BATCH_SIZE") .unwrap_or(CHECKPOINT_COMMIT_BATCH_SIZE.to_string()) @@ -104,12 +106,7 @@ impl CommonHandler { // just the checkpoint sequence number, and the tuple is (CommitterWatermark, T). let mut unprocessed: BTreeMap = BTreeMap::new(); let mut tuple_batch = vec![]; - let mut next_cp_to_process = self - .handler - .get_watermark_hi() - .await? - .map(|n| n.saturating_add(1)) - .unwrap_or_default(); + let mut next_cp_to_process = start_checkpoint; loop { if cancel.is_cancelled() { @@ -140,7 +137,12 @@ impl CommonHandler { // Process unprocessed checkpoints, even no new checkpoints from stream let checkpoint_lag_limiter = self.handler.get_max_committable_checkpoint().await?; - while next_cp_to_process <= checkpoint_lag_limiter { + let max_commitable_cp = std::cmp::min( + checkpoint_lag_limiter, + end_checkpoint_opt.unwrap_or(u64::MAX), + ); + // Stop pushing to tuple_batch if we've reached the end checkpoint. + while next_cp_to_process <= max_commitable_cp { if let Some(data_tuple) = unprocessed.remove(&next_cp_to_process) { tuple_batch.push(data_tuple); next_cp_to_process += 1; @@ -162,6 +164,16 @@ impl CommonHandler { self.handler.set_watermark_hi(committer_watermark).await?; tuple_batch = vec![]; } + + if let Some(end_checkpoint) = end_checkpoint_opt { + if next_cp_to_process > end_checkpoint { + tracing::info!( + "Reached end checkpoint, stopping handler {}...", + self.handler.name() + ); + return Ok(()); + } + } } Err(IndexerError::ChannelClosed(format!( "Checkpoint channel is closed unexpectedly for handler {}", diff --git a/crates/sui-indexer/src/handlers/objects_snapshot_handler.rs b/crates/sui-indexer/src/handlers/objects_snapshot_handler.rs index 816b416fc3743..d37d532827947 100644 --- a/crates/sui-indexer/src/handlers/objects_snapshot_handler.rs +++ b/crates/sui-indexer/src/handlers/objects_snapshot_handler.rs @@ -90,6 +90,8 @@ pub async fn start_objects_snapshot_handler( metrics: IndexerMetrics, snapshot_config: SnapshotLagConfig, cancel: CancellationToken, + start_checkpoint_opt: Option, + end_checkpoint_opt: Option, ) -> IndexerResult<(ObjectsSnapshotHandler, u64)> { info!("Starting object snapshot handler..."); @@ -104,10 +106,20 @@ pub async fn start_objects_snapshot_handler( let objects_snapshot_handler = ObjectsSnapshotHandler::new(store.clone(), sender, metrics.clone(), snapshot_config); - let watermark_hi = objects_snapshot_handler.get_watermark_hi().await?; + let next_cp_from_db = objects_snapshot_handler + .get_watermark_hi() + .await? + .map(|cp| cp.saturating_add(1)) + .unwrap_or_default(); + let start_checkpoint = start_checkpoint_opt.unwrap_or(next_cp_from_db); let common_handler = CommonHandler::new(Box::new(objects_snapshot_handler.clone())); - spawn_monitored_task!(common_handler.start_transform_and_load(receiver, cancel)); - Ok((objects_snapshot_handler, watermark_hi.unwrap_or_default())) + spawn_monitored_task!(common_handler.start_transform_and_load( + receiver, + cancel, + start_checkpoint, + end_checkpoint_opt, + )); + Ok((objects_snapshot_handler, start_checkpoint)) } impl ObjectsSnapshotHandler { diff --git a/crates/sui-indexer/src/indexer.rs b/crates/sui-indexer/src/indexer.rs index 240e295179094..d1819a90a7416 100644 --- a/crates/sui-indexer/src/indexer.rs +++ b/crates/sui-indexer/src/indexer.rs @@ -6,7 +6,7 @@ use std::env; use anyhow::Result; use prometheus::Registry; -use tokio::sync::oneshot; +use tokio::sync::{oneshot, watch}; use tokio_util::sync::CancellationToken; use tracing::info; @@ -16,6 +16,7 @@ use mysten_metrics::spawn_monitored_task; use sui_data_ingestion_core::{ DataIngestionMetrics, IndexerExecutor, ProgressStore, ReaderOptions, WorkerPool, }; +use sui_synthetic_ingestion::IndexerProgress; use sui_types::messages_checkpoint::CheckpointSequenceNumber; use crate::build_json_rpc_server; @@ -33,12 +34,13 @@ pub struct Indexer; impl Indexer { pub async fn start_writer( - config: &IngestionConfig, + config: IngestionConfig, store: PgIndexerStore, metrics: IndexerMetrics, snapshot_config: SnapshotLagConfig, retention_config: Option, cancel: CancellationToken, + committed_checkpoints_tx: Option>>, ) -> Result<(), IndexerError> { info!( "Sui Indexer Writer (version {:?}) started...", @@ -46,17 +48,11 @@ impl Indexer { ); info!("Sui Indexer Writer config: {config:?}",); - let primary_watermark = store - .get_latest_checkpoint_sequence_number() - .await - .expect("Failed to get latest tx checkpoint sequence number from DB") - .map(|seq| seq + 1) - .unwrap_or_default(); - let extra_reader_options = ReaderOptions { batch_size: config.checkpoint_download_queue_size, timeout_secs: config.checkpoint_download_timeout, data_limit: config.checkpoint_download_queue_size_bytes, + gc_checkpoint_files: config.gc_checkpoint_files, ..Default::default() }; @@ -66,6 +62,8 @@ impl Indexer { metrics.clone(), snapshot_config, cancel.clone(), + config.start_checkpoint, + config.end_checkpoint, ) .await?; @@ -87,6 +85,16 @@ impl Indexer { let mut exit_senders = vec![]; let mut executors = vec![]; + + let (worker, primary_watermark) = new_handlers( + store, + metrics, + cancel.clone(), + committed_checkpoints_tx, + config.start_checkpoint, + config.end_checkpoint, + ) + .await?; // Ingestion task watermarks are snapshotted once on indexer startup based on the // corresponding watermark table before being handed off to the ingestion task. let progress_store = ShimIndexerProgressStore::new(vec![ @@ -98,7 +106,7 @@ impl Indexer { 2, DataIngestionMetrics::new(&Registry::new()), ); - let worker = new_handlers(store, metrics, primary_watermark, cancel.clone()).await?; + let worker_pool = WorkerPool::new( worker, "primary".to_string(), diff --git a/crates/sui-indexer/src/lib.rs b/crates/sui-indexer/src/lib.rs index e759370c72798..f40b0fdfcfb8a 100644 --- a/crates/sui-indexer/src/lib.rs +++ b/crates/sui-indexer/src/lib.rs @@ -27,6 +27,7 @@ use errors::IndexerError; pub mod apis; pub mod backfill; +pub mod benchmark; pub mod config; pub mod database; pub mod db; diff --git a/crates/sui-indexer/src/main.rs b/crates/sui-indexer/src/main.rs index 8978d072d8dea..85782cff9689e 100644 --- a/crates/sui-indexer/src/main.rs +++ b/crates/sui-indexer/src/main.rs @@ -3,8 +3,10 @@ use clap::Parser; use sui_indexer::backfill::backfill_runner::BackfillRunner; +use sui_indexer::benchmark::run_indexer_benchmark; use sui_indexer::config::{Command, UploadOptions}; use sui_indexer::database::ConnectionPool; +use sui_indexer::db::setup_postgres::clear_database; use sui_indexer::db::{ check_db_migration_consistency, check_prunable_tables_valid, reset_database, run_migrations, }; @@ -55,12 +57,13 @@ async fn main() -> anyhow::Result<()> { let store = PgIndexerStore::new(pool, upload_options, indexer_metrics.clone()); Indexer::start_writer( - &ingestion_config, + ingestion_config, store, indexer_metrics, snapshot_config, retention_config, CancellationToken::new(), + None, ) .await?; } @@ -70,14 +73,21 @@ async fn main() -> anyhow::Result<()> { Indexer::start_reader(&json_rpc_config, ®istry, pool, CancellationToken::new()) .await?; } - Command::ResetDatabase { force } => { + Command::ResetDatabase { + force, + skip_migrations, + } => { if !force { return Err(anyhow::anyhow!( "Resetting the DB requires use of the `--force` flag", )); } - reset_database(pool.dedicated_connection().await?).await?; + if skip_migrations { + clear_database(&mut pool.dedicated_connection().await?).await?; + } else { + reset_database(pool.dedicated_connection().await?).await?; + } } Command::RunMigrations => { run_migrations(pool.dedicated_connection().await?).await?; @@ -98,6 +108,9 @@ async fn main() -> anyhow::Result<()> { IndexerFormalSnapshotRestorer::new(store, restore_config).await?; formal_restorer.restore().await?; } + Command::Benchmark(benchmark_config) => { + run_indexer_benchmark(benchmark_config, pool, indexer_metrics).await; + } } Ok(()) diff --git a/crates/sui-indexer/src/models/epoch.rs b/crates/sui-indexer/src/models/epoch.rs index 0918e50c72c35..d8e943f4c245c 100644 --- a/crates/sui-indexer/src/models/epoch.rs +++ b/crates/sui-indexer/src/models/epoch.rs @@ -117,36 +117,81 @@ pub struct QueryableEpochSystemState { pub system_state: Vec, } -impl StartOfEpochUpdate { +#[derive(Default)] +pub struct EpochStartInfo { + pub first_checkpoint_id: u64, + pub first_tx_sequence_number: u64, + pub total_stake: u64, + pub storage_fund_balance: u64, +} + +impl EpochStartInfo { pub fn new( - new_system_state_summary: SuiSystemStateSummary, first_checkpoint_id: u64, first_tx_sequence_number: u64, - event: Option<&SystemEpochInfoEvent>, + epoch_event_opt: Option<&SystemEpochInfoEvent>, + ) -> Self { + Self { + first_checkpoint_id, + first_tx_sequence_number, + total_stake: epoch_event_opt.map(|e| e.total_stake).unwrap_or_default(), + storage_fund_balance: epoch_event_opt + .map(|e| e.storage_fund_balance) + .unwrap_or_default(), + } + } +} + +impl StartOfEpochUpdate { + pub fn new( + new_system_state_summary: SuiSystemStateSummary, + epoch_start_info: EpochStartInfo, ) -> Self { Self { epoch: new_system_state_summary.epoch as i64, system_state_summary_json: serde_json::to_value(new_system_state_summary.clone()) .unwrap(), - first_checkpoint_id: first_checkpoint_id as i64, - first_tx_sequence_number: first_tx_sequence_number as i64, + first_checkpoint_id: epoch_start_info.first_checkpoint_id as i64, + first_tx_sequence_number: epoch_start_info.first_tx_sequence_number as i64, epoch_start_timestamp: new_system_state_summary.epoch_start_timestamp_ms as i64, reference_gas_price: new_system_state_summary.reference_gas_price as i64, protocol_version: new_system_state_summary.protocol_version as i64, - // NOTE: total_stake and storage_fund_balance are about new epoch, - // although the event is generated at the end of the previous epoch, - // the event is optional b/c no such event for the first epoch. - total_stake: event.map(|e| e.total_stake as i64).unwrap_or(0), - storage_fund_balance: event.map(|e| e.storage_fund_balance as i64).unwrap_or(0), + total_stake: epoch_start_info.total_stake as i64, + storage_fund_balance: epoch_start_info.storage_fund_balance as i64, } } } +#[derive(Default)] +pub struct EpochEndInfo { + pub storage_fund_reinvestment: u64, + pub storage_charge: u64, + pub storage_rebate: u64, + pub leftover_storage_fund_inflow: u64, + pub stake_subsidy_amount: u64, + pub total_gas_fees: u64, + pub total_stake_rewards_distributed: u64, +} + +impl EpochEndInfo { + pub fn new(epoch_event_opt: Option<&SystemEpochInfoEvent>) -> Self { + epoch_event_opt.map_or_else(Self::default, |epoch_event| Self { + storage_fund_reinvestment: epoch_event.storage_fund_reinvestment, + storage_charge: epoch_event.storage_charge, + storage_rebate: epoch_event.storage_rebate, + leftover_storage_fund_inflow: epoch_event.leftover_storage_fund_inflow, + stake_subsidy_amount: epoch_event.stake_subsidy_amount, + total_gas_fees: epoch_event.total_gas_fees, + total_stake_rewards_distributed: epoch_event.total_stake_rewards_distributed, + }) + } +} + impl EndOfEpochUpdate { pub fn new( last_checkpoint_summary: &CertifiedCheckpointSummary, - event: &SystemEpochInfoEvent, first_tx_sequence_number: u64, + epoch_end_info: EpochEndInfo, ) -> Self { Self { epoch: last_checkpoint_summary.epoch as i64, @@ -154,13 +199,13 @@ impl EndOfEpochUpdate { - first_tx_sequence_number) as i64, last_checkpoint_id: *last_checkpoint_summary.sequence_number() as i64, epoch_end_timestamp: last_checkpoint_summary.timestamp_ms as i64, - storage_fund_reinvestment: event.storage_fund_reinvestment as i64, - storage_charge: event.storage_charge as i64, - storage_rebate: event.storage_rebate as i64, - leftover_storage_fund_inflow: event.leftover_storage_fund_inflow as i64, - stake_subsidy_amount: event.stake_subsidy_amount as i64, - total_gas_fees: event.total_gas_fees as i64, - total_stake_rewards_distributed: event.total_stake_rewards_distributed as i64, + storage_fund_reinvestment: epoch_end_info.storage_fund_reinvestment as i64, + storage_charge: epoch_end_info.storage_charge as i64, + storage_rebate: epoch_end_info.storage_rebate as i64, + leftover_storage_fund_inflow: epoch_end_info.leftover_storage_fund_inflow as i64, + stake_subsidy_amount: epoch_end_info.stake_subsidy_amount as i64, + total_gas_fees: epoch_end_info.total_gas_fees as i64, + total_stake_rewards_distributed: epoch_end_info.total_stake_rewards_distributed as i64, epoch_commitments: bcs::to_bytes( &last_checkpoint_summary .end_of_epoch_data diff --git a/crates/sui-indexer/src/schema.rs b/crates/sui-indexer/src/schema.rs index aceb54597c9c5..447b45557922c 100644 --- a/crates/sui-indexer/src/schema.rs +++ b/crates/sui-indexer/src/schema.rs @@ -354,21 +354,6 @@ diesel::table! { } } -diesel::table! { - tx_recipients (recipient, tx_sequence_number) { - tx_sequence_number -> Int8, - recipient -> Bytea, - sender -> Bytea, - } -} - -diesel::table! { - tx_senders (sender, tx_sequence_number) { - tx_sequence_number -> Int8, - sender -> Bytea, - } -} - diesel::table! { watermarks (pipeline) { pipeline -> Text, @@ -415,7 +400,5 @@ diesel::allow_tables_to_appear_in_same_query!( tx_digests, tx_input_objects, tx_kinds, - tx_recipients, - tx_senders, watermarks, ); diff --git a/crates/sui-indexer/src/store/pg_indexer_store.rs b/crates/sui-indexer/src/store/pg_indexer_store.rs index 2f9aaa5d81cb3..b1d1af7b31ed6 100644 --- a/crates/sui-indexer/src/store/pg_indexer_store.rs +++ b/crates/sui-indexer/src/store/pg_indexer_store.rs @@ -298,13 +298,10 @@ impl PgIndexerStore { let mut connection = self.pool.get().await?; - watermarks::table - .select(watermarks::checkpoint_hi_inclusive) - .filter(watermarks::pipeline.eq("objects_snapshot")) - .first::(&mut connection) + objects_snapshot::table + .select(max(objects_snapshot::checkpoint_sequence_number)) + .first::>(&mut connection) .await - // Handle case where the watermark is not set yet - .optional() .map_err(Into::into) .map(|v| v.map(|v| v as u64)) .context( diff --git a/crates/sui-indexer/src/test_utils.rs b/crates/sui-indexer/src/test_utils.rs index 6a208f8e4c6db..431d0dc5854bc 100644 --- a/crates/sui-indexer/src/test_utils.rs +++ b/crates/sui-indexer/src/test_utils.rs @@ -75,6 +75,8 @@ pub async fn start_indexer_writer_for_testing( retention_config: Option, data_ingestion_path: Option, cancel: Option, + start_checkpoint: Option, + end_checkpoint: Option, ) -> ( PgIndexerStore, JoinHandle>, @@ -117,18 +119,23 @@ pub async fn start_indexer_writer_for_testing( crate::db::reset_database(connection).await.unwrap(); let store_clone = store.clone(); - let mut ingestion_config = IngestionConfig::default(); + let mut ingestion_config = IngestionConfig { + start_checkpoint, + end_checkpoint, + ..Default::default() + }; ingestion_config.sources.data_ingestion_path = data_ingestion_path; let token_clone = token.clone(); tokio::spawn(async move { Indexer::start_writer( - &ingestion_config, + ingestion_config, store_clone, indexer_metrics, snapshot_config, retention_config, token_clone, + None, ) .await }) @@ -250,6 +257,42 @@ pub async fn set_up( None, Some(data_ingestion_path), None, /* cancel */ + None, /* start_checkpoint */ + None, /* end_checkpoint */ + ) + .await; + (server_handle, pg_store, pg_handle, database) +} + +pub async fn set_up_with_start_and_end_checkpoints( + sim: Arc, + data_ingestion_path: PathBuf, + start_checkpoint: u64, + end_checkpoint: u64, +) -> ( + JoinHandle<()>, + PgIndexerStore, + JoinHandle>, + TempDb, +) { + let database = TempDb::new().unwrap(); + let server_url: SocketAddr = format!("127.0.0.1:{}", get_available_port()) + .parse() + .unwrap(); + let server_handle = tokio::spawn(async move { + sui_rest_api::RestService::new_without_version(sim) + .start_service(server_url) + .await; + }); + // Starts indexer + let (pg_store, pg_handle, _) = start_indexer_writer_for_testing( + database.database().url().as_str().to_owned(), + None, + None, + Some(data_ingestion_path), + None, /* cancel */ + Some(start_checkpoint), + Some(end_checkpoint), ) .await; (server_handle, pg_store, pg_handle, database) diff --git a/crates/sui-indexer/tests/ingestion_tests.rs b/crates/sui-indexer/tests/ingestion_tests.rs index f2207d5091783..2b6a31286b27a 100644 --- a/crates/sui-indexer/tests/ingestion_tests.rs +++ b/crates/sui-indexer/tests/ingestion_tests.rs @@ -14,7 +14,9 @@ use sui_indexer::models::{ }; use sui_indexer::schema::{checkpoints, objects, objects_snapshot, transactions}; use sui_indexer::store::indexer_store::IndexerStore; -use sui_indexer::test_utils::{set_up, wait_for_checkpoint, wait_for_objects_snapshot}; +use sui_indexer::test_utils::{ + set_up, set_up_with_start_and_end_checkpoints, wait_for_checkpoint, wait_for_objects_snapshot, +}; use sui_indexer::types::EventIndex; use sui_indexer::types::IndexedDeletedObject; use sui_indexer::types::IndexedObject; @@ -71,6 +73,72 @@ pub async fn test_transaction_table() -> Result<(), IndexerError> { Ok(()) } +#[tokio::test] +pub async fn test_checkpoint_range_ingestion() -> Result<(), IndexerError> { + let tempdir = tempdir().unwrap(); + let mut sim = Simulacrum::new(); + let data_ingestion_path = tempdir.path().to_path_buf(); + sim.set_data_ingestion_path(data_ingestion_path.clone()); + + // Create multiple checkpoints + for _ in 0..10 { + let transfer_recipient = SuiAddress::random_for_testing_only(); + let (transaction, _) = sim.transfer_txn(transfer_recipient); + let (_, err) = sim.execute_transaction(transaction).unwrap(); + assert!(err.is_none()); + sim.create_checkpoint(); + } + + // Set up indexer with specific start and end checkpoints + let start_checkpoint = 2; + let end_checkpoint = 4; + let (_, pg_store, _, _database) = set_up_with_start_and_end_checkpoints( + Arc::new(sim), + data_ingestion_path, + start_checkpoint, + end_checkpoint, + ) + .await; + + // Wait for the indexer to catch up to the end checkpoint + wait_for_checkpoint(&pg_store, end_checkpoint).await?; + + // Verify that only checkpoints within the specified range were ingested + let mut connection = pg_store.pool().dedicated_connection().await.unwrap(); + let checkpoint_count: i64 = checkpoints::table + .count() + .get_result(&mut connection) + .await + .expect("Failed to count checkpoints"); + assert_eq!(checkpoint_count, 3, "Expected 3 checkpoints to be ingested"); + + // Verify the range of ingested checkpoints + let min_checkpoint = checkpoints::table + .select(diesel::dsl::min(checkpoints::sequence_number)) + .first::>(&mut connection) + .await + .expect("Failed to get min checkpoint") + .expect("Min checkpoint should be Some"); + let max_checkpoint = checkpoints::table + .select(diesel::dsl::max(checkpoints::sequence_number)) + .first::>(&mut connection) + .await + .expect("Failed to get max checkpoint") + .expect("Max checkpoint should be Some"); + assert_eq!( + min_checkpoint, start_checkpoint as i64, + "Minimum ingested checkpoint should be {}", + start_checkpoint + ); + assert_eq!( + max_checkpoint, end_checkpoint as i64, + "Maximum ingested checkpoint should be {}", + end_checkpoint + ); + + Ok(()) +} + #[tokio::test] pub async fn test_object_type() -> Result<(), IndexerError> { let tempdir = tempdir().unwrap(); diff --git a/crates/sui-indexer/tests/json_rpc_tests.rs b/crates/sui-indexer/tests/json_rpc_tests.rs new file mode 100644 index 0000000000000..15e501a5f0aa2 --- /dev/null +++ b/crates/sui-indexer/tests/json_rpc_tests.rs @@ -0,0 +1,243 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::path::PathBuf; + +use sui_json_rpc_api::{CoinReadApiClient, IndexerApiClient, ReadApiClient}; +use sui_json_rpc_types::{ + CoinPage, EventFilter, SuiObjectDataOptions, SuiObjectResponse, SuiObjectResponseQuery, +}; +use sui_swarm_config::genesis_config::DEFAULT_GAS_AMOUNT; +use sui_test_transaction_builder::publish_package; +use sui_types::{event::EventID, transaction::CallArg}; +use test_cluster::TestClusterBuilder; + +#[tokio::test] +async fn test_get_owned_objects() -> Result<(), anyhow::Error> { + let cluster = TestClusterBuilder::new() + .with_indexer_backed_rpc() + .build() + .await; + + let http_client = cluster.rpc_client(); + let address = cluster.get_address_0(); + + let data_option = SuiObjectDataOptions::new().with_owner(); + let objects = http_client + .get_owned_objects( + address, + Some(SuiObjectResponseQuery::new_with_options( + data_option.clone(), + )), + None, + None, + ) + .await? + .data; + let fullnode_objects = cluster + .fullnode_handle + .rpc_client + .get_owned_objects( + address, + Some(SuiObjectResponseQuery::new_with_options( + data_option.clone(), + )), + None, + None, + ) + .await? + .data; + assert_eq!(5, objects.len()); + // TODO: right now we compare the results from indexer and fullnode, but as we deprecate fullnode rpc, + // we should change this to compare the results with the object id/digest from genesis potentially. + assert_eq!(objects, fullnode_objects); + + for obj in &objects { + let oref = obj.clone().into_object().unwrap(); + let result = http_client + .get_object(oref.object_id, Some(data_option.clone())) + .await?; + assert!( + matches!(result, SuiObjectResponse { data: Some(object), .. } if oref.object_id == object.object_id && object.owner.unwrap().get_owner_address()? == address) + ); + } + + // Multiget objectIDs test + let object_ids: Vec<_> = objects + .iter() + .map(|o| o.object().unwrap().object_id) + .collect(); + + let object_resp = http_client + .multi_get_objects(object_ids.clone(), None) + .await?; + let fullnode_object_resp = cluster + .fullnode_handle + .rpc_client + .multi_get_objects(object_ids, None) + .await?; + assert_eq!(5, object_resp.len()); + // TODO: right now we compare the results from indexer and fullnode, but as we deprecate fullnode rpc, + // we should change this to compare the results with the object id/digest from genesis potentially. + assert_eq!(object_resp, fullnode_object_resp); + Ok(()) +} + +#[tokio::test] +async fn test_get_coins() -> Result<(), anyhow::Error> { + let cluster = TestClusterBuilder::new() + .with_indexer_backed_rpc() + .build() + .await; + let http_client = cluster.rpc_client(); + let address = cluster.get_address_0(); + + let result: CoinPage = http_client.get_coins(address, None, None, None).await?; + assert_eq!(5, result.data.len()); + assert!(!result.has_next_page); + + // We should get 0 coins for a non-existent coin type. + let result: CoinPage = http_client + .get_coins(address, Some("0x2::sui::TestCoin".into()), None, None) + .await?; + assert_eq!(0, result.data.len()); + + // We should get all the 5 coins for SUI with the right balance. + let result: CoinPage = http_client + .get_coins(address, Some("0x2::sui::SUI".into()), None, None) + .await?; + assert_eq!(5, result.data.len()); + assert_eq!(result.data[0].balance, DEFAULT_GAS_AMOUNT); + assert!(!result.has_next_page); + + // When we have more than 3 coins, we should get a next page. + let result: CoinPage = http_client + .get_coins(address, Some("0x2::sui::SUI".into()), None, Some(3)) + .await?; + assert_eq!(3, result.data.len()); + assert!(result.has_next_page); + + // We should get the remaining 2 coins with the next page. + let result: CoinPage = http_client + .get_coins( + address, + Some("0x2::sui::SUI".into()), + result.next_cursor, + Some(3), + ) + .await?; + assert_eq!(2, result.data.len(), "{:?}", result); + assert!(!result.has_next_page); + + // No more coins after the last page. + let result: CoinPage = http_client + .get_coins( + address, + Some("0x2::sui::SUI".into()), + result.next_cursor, + None, + ) + .await?; + assert_eq!(0, result.data.len(), "{:?}", result); + assert!(!result.has_next_page); + + Ok(()) +} + +#[tokio::test] +async fn test_events() -> Result<(), anyhow::Error> { + let cluster = TestClusterBuilder::new() + .with_indexer_backed_rpc() + .build() + .await; + + // publish package + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests/move_test_code"); + let move_package = publish_package(&cluster.wallet, path).await.0; + + // execute a transaction to generate events + let function = "emit_3"; + let arguments = vec![CallArg::Pure(bcs::to_bytes(&5u64).unwrap())]; + let transaction = cluster + .test_transaction_builder() + .await + .move_call(move_package, "events_queries", function, arguments) + .build(); + let signed_transaction = cluster.wallet.sign_transaction(&transaction); + cluster.execute_transaction(signed_transaction).await; + + // query for events + let http_client = cluster.rpc_client(); + + // start with ascending order + let event_filter = EventFilter::All([]); + let mut cursor: Option = None; + let mut limit = None; + let mut descending_order = Some(false); + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(3, result.data.len()); + assert!(!result.has_next_page); + let forward_paginated_events = result.data; + + // Fetch the initial event + limit = Some(1); + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(1, result.data.len()); + assert!(result.has_next_page); + assert_eq!(forward_paginated_events[0], result.data[0]); + + // Fetch remaining events + cursor = result.next_cursor; + limit = None; + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(2, result.data.len()); + assert_eq!(forward_paginated_events[1..], result.data[..]); + + // now descending order - make sure to reset parameters + cursor = None; + descending_order = Some(true); + limit = None; + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(3, result.data.len()); + assert!(!result.has_next_page); + let backward_paginated_events = result.data; + + // Fetch the initial event + limit = Some(1); + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(1, result.data.len()); + assert!(result.has_next_page); + assert_eq!(backward_paginated_events[0], result.data[0]); + assert_eq!(forward_paginated_events[2], result.data[0]); + + // Fetch remaining events + cursor = result.next_cursor; + limit = None; + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(2, result.data.len()); + assert_eq!(backward_paginated_events[1..], result.data[..]); + + // check that the forward and backward paginated events are in reverse order + assert_eq!( + forward_paginated_events + .into_iter() + .rev() + .collect::>(), + backward_paginated_events + ); + + Ok(()) +} diff --git a/crates/sui-indexer/tests/move_test_code/Move.toml b/crates/sui-indexer/tests/move_test_code/Move.toml new file mode 100644 index 0000000000000..09e9e50f000f0 --- /dev/null +++ b/crates/sui-indexer/tests/move_test_code/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "move_test_code" +version = "0.0.1" +edition = "2024.beta" + +[dependencies] +Sui = { local = "../../../sui-framework/packages/sui-framework" } + +[addresses] +move_test_code = "0x0" diff --git a/crates/sui-indexer/tests/move_test_code/sources/events.move b/crates/sui-indexer/tests/move_test_code/sources/events.move new file mode 100644 index 0000000000000..f32cc7fe109f3 --- /dev/null +++ b/crates/sui-indexer/tests/move_test_code/sources/events.move @@ -0,0 +1,26 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + + +module move_test_code::events_queries { + use sui::event; + + public struct EventA has copy, drop { + new_value: u64 + } + + public entry fun emit_1(value: u64) { + event::emit(EventA { new_value: value }) + } + + public entry fun emit_2(value: u64) { + event::emit(EventA { new_value: value }); + event::emit(EventA { new_value: value + 1}) + } + + public entry fun emit_3(value: u64) { + event::emit(EventA { new_value: value }); + event::emit(EventA { new_value: value + 1}); + event::emit(EventA { new_value: value + 2}); + } +} diff --git a/crates/sui-json-rpc-api/src/lib.rs b/crates/sui-json-rpc-api/src/lib.rs index 4e7072c73f07e..3f14c38516083 100644 --- a/crates/sui-json-rpc-api/src/lib.rs +++ b/crates/sui-json-rpc-api/src/lib.rs @@ -337,6 +337,7 @@ pub fn read_size_from_env(var_name: &str) -> Option { .ok() } +pub const CLIENT_REQUEST_METHOD_HEADER: &str = "client-request-method"; pub const CLIENT_SDK_TYPE_HEADER: &str = "client-sdk-type"; /// The version number of the SDK itself. This can be different from the API version. pub const CLIENT_SDK_VERSION_HEADER: &str = "client-sdk-version"; diff --git a/crates/sui-json-rpc-types/src/sui_transaction.rs b/crates/sui-json-rpc-types/src/sui_transaction.rs index 67859f5591354..d67345aa35db1 100644 --- a/crates/sui-json-rpc-types/src/sui_transaction.rs +++ b/crates/sui-json-rpc-types/src/sui_transaction.rs @@ -2345,7 +2345,7 @@ impl From for SuiTransactionBlockEffects { #[serde_as] #[derive(Clone, Debug, JsonSchema, Serialize, Deserialize)] pub enum TransactionFilter { - /// Query by checkpoint. + /// CURRENTLY NOT SUPPORTED. Query by checkpoint. Checkpoint( #[schemars(with = "BigInt")] #[serde_as(as = "Readable, _>")] @@ -2369,7 +2369,7 @@ pub enum TransactionFilter { ToAddress(SuiAddress), /// Query by sender and recipient address. FromAndToAddress { from: SuiAddress, to: SuiAddress }, - /// Query txs that have a given address as sender or recipient. + /// CURRENTLY NOT SUPPORTED. Query txs that have a given address as sender or recipient. FromOrToAddress { addr: SuiAddress }, /// Query by transaction kind TransactionKind(String), diff --git a/crates/sui-json-rpc/src/balance_changes.rs b/crates/sui-json-rpc/src/balance_changes.rs index eaf4480832d08..60eb9199e9028 100644 --- a/crates/sui-json-rpc/src/balance_changes.rs +++ b/crates/sui-json-rpc/src/balance_changes.rs @@ -148,7 +148,7 @@ async fn fetch_coins, E>( o.owner, coin_type, // we know this is a coin, safe to unwrap - Coin::extract_balance_if_coin(&o).unwrap().unwrap(), + Coin::extract_balance_if_coin(&o).unwrap().unwrap().1, )) } } diff --git a/crates/sui-json-rpc/src/coin_api.rs b/crates/sui-json-rpc/src/coin_api.rs index aeda2cb6dcb38..43a880f525513 100644 --- a/crates/sui-json-rpc/src/coin_api.rs +++ b/crates/sui-json-rpc/src/coin_api.rs @@ -276,9 +276,7 @@ async fn find_package_object_id( spawn_monitored_task!(async move { let publish_txn_digest = state.find_publish_txn_digest(package_id)?; - let (_, effect) = state - .get_executed_transaction_and_effects(publish_txn_digest, kv_store) - .await?; + let effect = kv_store.get_fx_by_tx_digest(publish_txn_digest).await?; for ((id, _, _), _) in effect.created() { if let Ok(object_read) = state.get_object_read(&id) { @@ -290,7 +288,7 @@ async fn find_package_object_id( } } Err(SuiRpcInputError::GenericNotFound(format!( - "Cannot find object [{}] from [{}] package event.", + "Cannot find object with type [{}] from [{}] package created objects.", object_struct_tag, package_id, )) .into()) @@ -1401,8 +1399,8 @@ mod tests { .expect_find_publish_txn_digest() .return_once(move |_| Ok(transaction_digest)); mock_state - .expect_get_executed_transaction_and_effects() - .return_once(move |_, _| Ok((create_fake_transaction(), transaction_effects))); + .expect_multi_get() + .return_once(move |_, _, _| Ok((vec![], vec![Some(transaction_effects)], vec![]))); let coin_read_api = CoinReadApi::new_for_tests(Arc::new(mock_state), None); let response = coin_read_api.get_total_supply(coin_name.clone()).await; @@ -1410,9 +1408,9 @@ mod tests { assert!(response.is_err()); let error_result = response.unwrap_err(); let error_object: ErrorObjectOwned = error_result.into(); - let expected = expect!["-32602"]; + let expected = expect!["-32000"]; expected.assert_eq(&error_object.code().to_string()); - let expected = expect!["Cannot find object [0x2::coin::TreasuryCap<0xf::test_coin::TEST_COIN>] from [0x000000000000000000000000000000000000000000000000000000000000000f] package event."]; + let expected = expect!["task 1 panicked"]; expected.assert_eq(error_object.message()); } diff --git a/crates/sui-json-rpc/src/lib.rs b/crates/sui-json-rpc/src/lib.rs index 3075471af9d1b..d9704bbcef767 100644 --- a/crates/sui-json-rpc/src/lib.rs +++ b/crates/sui-json-rpc/src/lib.rs @@ -25,7 +25,8 @@ pub use balance_changes::*; pub use object_changes::*; pub use sui_config::node::ServerType; use sui_json_rpc_api::{ - CLIENT_SDK_TYPE_HEADER, CLIENT_SDK_VERSION_HEADER, CLIENT_TARGET_API_VERSION_HEADER, + CLIENT_REQUEST_METHOD_HEADER, CLIENT_SDK_TYPE_HEADER, CLIENT_SDK_VERSION_HEADER, + CLIENT_TARGET_API_VERSION_HEADER, }; use sui_open_rpc::{Module, Project}; @@ -121,6 +122,7 @@ impl JsonRpcServerBuilder { HeaderName::from_static(CLIENT_SDK_VERSION_HEADER), HeaderName::from_static(CLIENT_TARGET_API_VERSION_HEADER), HeaderName::from_static(APP_NAME_HEADER), + HeaderName::from_static(CLIENT_REQUEST_METHOD_HEADER), ]); Ok(cors) } diff --git a/crates/sui-json/Cargo.toml b/crates/sui-json/Cargo.toml index 15ae8d06f647b..c05042ef52717 100644 --- a/crates/sui-json/Cargo.toml +++ b/crates/sui-json/Cargo.toml @@ -18,7 +18,7 @@ sui-types.workspace = true move-binary-format.workspace = true move-bytecode-utils.workspace = true move-core-types.workspace = true -fastcrypto = { workspace = true } +fastcrypto.workspace = true [dev-dependencies] test-fuzz.workspace = true diff --git a/crates/sui-kvstore/Cargo.toml b/crates/sui-kvstore/Cargo.toml new file mode 100644 index 0000000000000..a06bacd9ace66 --- /dev/null +++ b/crates/sui-kvstore/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "sui-kvstore" +authors = ["Mysten Labs "] +license = "Apache-2.0" +publish = false +edition = "2021" +version.workspace = true + +[dependencies] +anyhow.workspace = true +async-trait.workspace = true +base64.workspace = true +bcs.workspace = true +http.workspace = true +gcp_auth.workspace = true +prometheus.workspace = true +prost.workspace = true +prost-types.workspace = true +serde.workspace = true +sui-data-ingestion-core.workspace = true +sui-types.workspace = true +telemetry-subscribers.workspace = true +tokio = { workspace = true, features = ["full"] } +tonic = {version = "0.12.2",features = ["tls", "transport"] } +tracing.workspace = true diff --git a/crates/sui-kvstore/src/bigtable/README.md b/crates/sui-kvstore/src/bigtable/README.md new file mode 100644 index 0000000000000..bc1abd9166323 --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/README.md @@ -0,0 +1,16 @@ +## Setup + +### Local development +- install the `cbt` CLI tool +```sh +gcloud components install cbt +``` +- start the emulator +```sh +gcloud beta emulators bigtable start +``` +- set `BIGTABLE_EMULATOR_HOST` environment variable +```sh +$(gcloud beta emulators bigtable env-init) +``` +- Run `./src/bigtable/init.sh` to configure the emulator \ No newline at end of file diff --git a/crates/sui-kvstore/src/bigtable/client.rs b/crates/sui-kvstore/src/bigtable/client.rs new file mode 100644 index 0000000000000..9bbd7b138d023 --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/client.rs @@ -0,0 +1,463 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::bigtable::proto::bigtable::v2::bigtable_client::BigtableClient as BigtableInternalClient; +use crate::bigtable::proto::bigtable::v2::mutate_rows_request::Entry; +use crate::bigtable::proto::bigtable::v2::mutation::SetCell; +use crate::bigtable::proto::bigtable::v2::read_rows_response::cell_chunk::RowStatus; +use crate::bigtable::proto::bigtable::v2::{ + mutation, MutateRowsRequest, MutateRowsResponse, Mutation, ReadRowsRequest, RowSet, +}; +use crate::{Checkpoint, KeyValueStoreReader, KeyValueStoreWriter, TransactionData}; +use anyhow::{anyhow, Result}; +use async_trait::async_trait; +use gcp_auth::{Token, TokenProvider}; +use http::{HeaderValue, Request, Response}; +use std::future::Future; +use std::pin::Pin; +use std::sync::{Arc, RwLock}; +use std::task::{Context, Poll}; +use std::time::Duration; +use sui_types::base_types::TransactionDigest; +use sui_types::digests::CheckpointDigest; +use sui_types::full_checkpoint_content::CheckpointData; +use sui_types::messages_checkpoint::CheckpointSequenceNumber; +use sui_types::object::Object; +use sui_types::storage::ObjectKey; +use tonic::body::BoxBody; +use tonic::codegen::Service; +use tonic::transport::{Certificate, Channel, ClientTlsConfig}; +use tonic::Streaming; +use tracing::error; + +const OBJECTS_TABLE: &str = "objects"; +const TRANSACTIONS_TABLE: &str = "transactions"; +const CHECKPOINTS_TABLE: &str = "checkpoints"; +const CHECKPOINTS_BY_DIGEST_TABLE: &str = "checkpoints_by_digest"; + +const COLUMN_FAMILY_NAME: &str = "sui"; +const DEFAULT_COLUMN_QUALIFIER: &str = ""; +const CHECKPOINT_SUMMARY_COLUMN_QUALIFIER: &str = "s"; +const CHECKPOINT_SIGNATURES_COLUMN_QUALIFIER: &str = "sg"; +const CHECKPOINT_CONTENTS_COLUMN_QUALIFIER: &str = "c"; +const TRANSACTION_COLUMN_QUALIFIER: &str = "tx"; +const EFFECTS_COLUMN_QUALIFIER: &str = "ef"; +const EVENTS_COLUMN_QUALIFIER: &str = "ev"; +const TIMESTAMP_COLUMN_QUALIFIER: &str = "ts"; +const CHECKPOINT_NUMBER_COLUMN_QUALIFIER: &str = "cn"; + +type Bytes = Vec; + +#[derive(Clone)] +struct AuthChannel { + channel: Channel, + policy: String, + token_provider: Option>, + token: Arc>>>, +} + +#[derive(Clone)] +pub struct BigTableClient { + table_prefix: String, + client: BigtableInternalClient, +} + +#[async_trait] +impl KeyValueStoreWriter for BigTableClient { + async fn save_objects(&mut self, objects: &[&Object]) -> Result<()> { + let mut items = Vec::with_capacity(objects.len()); + for object in objects { + let object_key = ObjectKey(object.id(), object.version()); + items.push(( + Self::raw_object_key(&object_key)?, + vec![(DEFAULT_COLUMN_QUALIFIER, bcs::to_bytes(object)?)], + )); + } + self.multi_set(OBJECTS_TABLE, items).await + } + + async fn save_transactions(&mut self, transactions: &[TransactionData]) -> Result<()> { + let mut items = Vec::with_capacity(transactions.len()); + for transaction in transactions { + let cells = vec![ + ( + TRANSACTION_COLUMN_QUALIFIER, + bcs::to_bytes(&transaction.transaction)?, + ), + ( + EFFECTS_COLUMN_QUALIFIER, + bcs::to_bytes(&transaction.effects)?, + ), + (EVENTS_COLUMN_QUALIFIER, bcs::to_bytes(&transaction.events)?), + ( + TIMESTAMP_COLUMN_QUALIFIER, + bcs::to_bytes(&transaction.timestamp)?, + ), + ( + CHECKPOINT_NUMBER_COLUMN_QUALIFIER, + bcs::to_bytes(&transaction.checkpoint_number)?, + ), + ]; + items.push((transaction.transaction.digest().inner().to_vec(), cells)); + } + self.multi_set(TRANSACTIONS_TABLE, items).await + } + + async fn save_checkpoint(&mut self, checkpoint: &CheckpointData) -> Result<()> { + let summary = &checkpoint.checkpoint_summary.data(); + let contents = &checkpoint.checkpoint_contents; + let signatures = &checkpoint.checkpoint_summary.auth_sig(); + let key = summary.sequence_number.to_be_bytes().to_vec(); + let cells = vec![ + (CHECKPOINT_SUMMARY_COLUMN_QUALIFIER, bcs::to_bytes(summary)?), + ( + CHECKPOINT_SIGNATURES_COLUMN_QUALIFIER, + bcs::to_bytes(signatures)?, + ), + ( + CHECKPOINT_CONTENTS_COLUMN_QUALIFIER, + bcs::to_bytes(contents)?, + ), + ]; + self.multi_set(CHECKPOINTS_TABLE, [(key.clone(), cells)]) + .await?; + self.multi_set( + CHECKPOINTS_BY_DIGEST_TABLE, + [( + checkpoint.checkpoint_summary.digest().inner().to_vec(), + vec![(DEFAULT_COLUMN_QUALIFIER, key)], + )], + ) + .await + } +} + +#[async_trait] +impl KeyValueStoreReader for BigTableClient { + async fn get_objects(&mut self, object_keys: &[ObjectKey]) -> Result> { + let keys: Result<_, _> = object_keys.iter().map(Self::raw_object_key).collect(); + let mut objects = vec![]; + for row in self.multi_get(OBJECTS_TABLE, keys?).await? { + for (_, value) in row { + objects.push(bcs::from_bytes(&value)?); + } + } + Ok(objects) + } + + async fn get_transactions( + &mut self, + transactions: &[TransactionDigest], + ) -> Result> { + let keys = transactions.iter().map(|tx| tx.inner().to_vec()).collect(); + let mut result = vec![]; + for row in self.multi_get(TRANSACTIONS_TABLE, keys).await? { + let mut transaction = None; + let mut effects = None; + let mut events = None; + let mut timestamp = 0; + let mut checkpoint_number = 0; + + for (column, value) in row { + match std::str::from_utf8(&column)? { + TRANSACTION_COLUMN_QUALIFIER => transaction = Some(bcs::from_bytes(&value)?), + EFFECTS_COLUMN_QUALIFIER => effects = Some(bcs::from_bytes(&value)?), + EVENTS_COLUMN_QUALIFIER => events = Some(bcs::from_bytes(&value)?), + TIMESTAMP_COLUMN_QUALIFIER => timestamp = bcs::from_bytes(&value)?, + CHECKPOINT_NUMBER_COLUMN_QUALIFIER => { + checkpoint_number = bcs::from_bytes(&value)? + } + _ => error!("unexpected column {:?} in transactions table", column), + } + } + result.push(TransactionData { + transaction: transaction.ok_or_else(|| anyhow!("transaction field is missing"))?, + effects: effects.ok_or_else(|| anyhow!("effects field is missing"))?, + events: events.ok_or_else(|| anyhow!("events field is missing"))?, + timestamp, + checkpoint_number, + }) + } + Ok(result) + } + + async fn get_checkpoints( + &mut self, + sequence_numbers: &[CheckpointSequenceNumber], + ) -> Result> { + let keys = sequence_numbers + .iter() + .map(|sq| sq.to_be_bytes().to_vec()) + .collect(); + let mut checkpoints = vec![]; + for row in self.multi_get(CHECKPOINTS_TABLE, keys).await? { + let mut summary = None; + let mut contents = None; + let mut signatures = None; + for (column, value) in row { + match std::str::from_utf8(&column)? { + CHECKPOINT_SUMMARY_COLUMN_QUALIFIER => summary = Some(bcs::from_bytes(&value)?), + CHECKPOINT_CONTENTS_COLUMN_QUALIFIER => { + contents = Some(bcs::from_bytes(&value)?) + } + CHECKPOINT_SIGNATURES_COLUMN_QUALIFIER => { + signatures = Some(bcs::from_bytes(&value)?) + } + _ => error!("unexpected column {:?} in checkpoints table", column), + } + } + let checkpoint = Checkpoint { + summary: summary.ok_or_else(|| anyhow!("summary field is missing"))?, + contents: contents.ok_or_else(|| anyhow!("contents field is missing"))?, + signatures: signatures.ok_or_else(|| anyhow!("signatures field is missing"))?, + }; + checkpoints.push(checkpoint); + } + Ok(checkpoints) + } + + async fn get_checkpoint_by_digest( + &mut self, + digest: CheckpointDigest, + ) -> Result> { + let key = digest.inner().to_vec(); + let mut response = self + .multi_get(CHECKPOINTS_BY_DIGEST_TABLE, vec![key]) + .await?; + if let Some(row) = response.pop() { + if let Some((_, value)) = row.into_iter().next() { + let sequence_number = u64::from_be_bytes(value.as_slice().try_into()?); + if let Some(chk) = self.get_checkpoints(&[sequence_number]).await?.pop() { + return Ok(Some(chk)); + } + } + } + Ok(None) + } +} + +impl BigTableClient { + pub async fn new_local(instance_id: String) -> Result { + let emulator_host = std::env::var("BIGTABLE_EMULATOR_HOST")?; + let auth_channel = AuthChannel { + channel: Channel::from_shared(format!("http://{emulator_host}"))?.connect_lazy(), + policy: "https://www.googleapis.com/auth/bigtable.data".to_string(), + token_provider: None, + token: Arc::new(RwLock::new(None)), + }; + Ok(Self { + table_prefix: format!("projects/emulator/instances/{}/tables/", instance_id), + client: BigtableInternalClient::new(auth_channel), + }) + } + + pub async fn new_remote( + instance_id: String, + is_read_only: bool, + timeout: Option, + ) -> Result { + let policy = if is_read_only { + "https://www.googleapis.com/auth/bigtable.data.readonly" + } else { + "https://www.googleapis.com/auth/bigtable.data" + }; + let token_provider = gcp_auth::provider().await?; + let tls_config = ClientTlsConfig::new() + .ca_certificate(Certificate::from_pem(include_bytes!("./proto/google.pem"))) + .domain_name("bigtable.googleapis.com"); + let mut endpoint = Channel::from_static("https://bigtable.googleapis.com") + .http2_keep_alive_interval(Duration::from_secs(60)) + .keep_alive_while_idle(true) + .tls_config(tls_config)?; + if let Some(timeout) = timeout { + endpoint = endpoint.timeout(timeout); + } + let table_prefix = format!( + "projects/{}/instances/{}/tables/", + token_provider.project_id().await?, + instance_id + ); + let auth_channel = AuthChannel { + channel: endpoint.connect_lazy(), + policy: policy.to_string(), + token_provider: Some(token_provider), + token: Arc::new(RwLock::new(None)), + }; + Ok(Self { + table_prefix, + client: BigtableInternalClient::new(auth_channel), + }) + } + + pub async fn mutate_rows( + &mut self, + request: MutateRowsRequest, + ) -> Result> { + Ok(self.client.mutate_rows(request).await?.into_inner()) + } + + pub async fn read_rows( + &mut self, + request: ReadRowsRequest, + ) -> Result, Vec<(Vec, Vec)>)>> { + let mut result = vec![]; + let mut response = self.client.read_rows(request).await?.into_inner(); + + let mut row_key = None; + let mut row = vec![]; + let mut cell_value = vec![]; + let mut cell_name = None; + let mut timestamp = 0; + + while let Some(message) = response.message().await? { + for mut chunk in message.chunks.into_iter() { + // new row check + if !chunk.row_key.is_empty() { + row_key = Some(chunk.row_key); + } + match chunk.qualifier { + // new cell started + Some(qualifier) => { + if let Some(cell_name) = cell_name { + row.push((cell_name, cell_value)); + cell_value = vec![]; + } + cell_name = Some(qualifier); + timestamp = chunk.timestamp_micros; + cell_value.append(&mut chunk.value); + } + None => { + if chunk.timestamp_micros == 0 { + cell_value.append(&mut chunk.value); + } else if chunk.timestamp_micros >= timestamp { + // newer version of cell is available + timestamp = chunk.timestamp_micros; + cell_value = chunk.value; + } + } + } + if chunk.row_status.is_some() { + if let Some(RowStatus::CommitRow(_)) = chunk.row_status { + if let Some(cell_name) = cell_name { + row.push((cell_name, cell_value)); + } + if let Some(row_key) = row_key { + result.push((row_key, row)); + } + } + row_key = None; + row = vec![]; + cell_value = vec![]; + cell_name = None; + } + } + } + Ok(result) + } + + async fn multi_set( + &mut self, + table_name: &str, + values: impl IntoIterator)> + std::marker::Send, + ) -> Result<()> { + let mut entries = vec![]; + for (row_key, cells) in values { + let mutations = cells + .into_iter() + .map(|(column_name, value)| Mutation { + mutation: Some(mutation::Mutation::SetCell(SetCell { + family_name: COLUMN_FAMILY_NAME.to_string(), + column_qualifier: column_name.to_owned().into_bytes(), + // The timestamp of the cell into which new data should be written. + // Use -1 for current Bigtable server time. + timestamp_micros: -1, + value, + })), + }) + .collect(); + entries.push(Entry { row_key, mutations }); + } + let request = MutateRowsRequest { + table_name: format!("{}{}", self.table_prefix, table_name), + entries, + ..MutateRowsRequest::default() + }; + self.mutate_rows(request).await?; + Ok(()) + } + + pub async fn multi_get( + &mut self, + table_name: &str, + keys: Vec>, + ) -> Result>> { + let request = ReadRowsRequest { + table_name: format!("{}{}", self.table_prefix, table_name), + rows_limit: keys.len() as i64, + rows: Some(RowSet { + row_keys: keys, + row_ranges: vec![], + }), + ..ReadRowsRequest::default() + }; + let mut result = vec![]; + for (_, cells) in self.read_rows(request).await? { + result.push(cells); + } + Ok(result) + } + + fn raw_object_key(object_key: &ObjectKey) -> Result> { + let mut raw_key = object_key.0.to_vec(); + raw_key.extend(object_key.1.value().to_be_bytes()); + Ok(raw_key) + } +} + +impl Service> for AuthChannel { + type Response = Response; + type Error = Box; + #[allow(clippy::type_complexity)] + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.channel.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, mut request: Request) -> Self::Future { + let cloned_channel = self.channel.clone(); + let cloned_token = self.token.clone(); + let mut inner = std::mem::replace(&mut self.channel, cloned_channel); + let policy = self.policy.clone(); + let token_provider = self.token_provider.clone(); + + let mut auth_token = None; + if token_provider.is_some() { + let guard = self.token.read().expect("failed to acquire a read lock"); + if let Some(token) = &*guard { + if !token.has_expired() { + auth_token = Some(token.clone()); + } + } + } + + Box::pin(async move { + if let Some(ref provider) = token_provider { + let token = match auth_token { + None => { + let new_token = provider.token(&[policy.as_ref()]).await?; + let mut guard = cloned_token.write().unwrap(); + *guard = Some(new_token.clone()); + new_token + } + Some(token) => token, + }; + let token_string = token.as_str().parse::()?; + let header = + HeaderValue::from_str(format!("Bearer {}", token_string.as_str()).as_str())?; + request.headers_mut().insert("authorization", header); + } + Ok(inner.call(request).await?) + }) + } +} diff --git a/crates/sui-kvstore/src/bigtable/init.sh b/crates/sui-kvstore/src/bigtable/init.sh new file mode 100755 index 0000000000000..f96ac5c1e9827 --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/init.sh @@ -0,0 +1,20 @@ +# Copyright (c) Mysten Labs, Inc. +# SPDX-License-Identifier: Apache-2.0 +INSTANCE_ID=${1:-sui} +command=( + cbt + -instance + "$INSTANCE_ID" +) +if [[ -n $BIGTABLE_EMULATOR_HOST ]]; then + command+=(-project emulator) +fi + +for table in objects transactions checkpoints checkpoints_by_digest; do + ( + set -x + "${command[@]}" createtable $table + "${command[@]}" createfamily $table sui + "${command[@]}" setgcpolicy $table sui maxversions=1 + ) +done diff --git a/crates/sui-kvstore/src/bigtable/mod.rs b/crates/sui-kvstore/src/bigtable/mod.rs new file mode 100644 index 0000000000000..9be9541c15ec4 --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/mod.rs @@ -0,0 +1,6 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +pub(crate) mod client; +mod proto; +pub(crate) mod worker; diff --git a/crates/sui-kvstore/src/bigtable/proto.rs b/crates/sui-kvstore/src/bigtable/proto.rs new file mode 100644 index 0000000000000..3d976cad4b54f --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/proto.rs @@ -0,0 +1,14 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::all)] +#[path = "proto"] +pub mod bigtable { + #[path = "google.bigtable.v2.rs"] + pub mod v2; +} + +#[path = "proto/google.rpc.rs"] +pub mod rpc; + +#[path = "proto/google.api.rs"] +pub mod api; diff --git a/crates/sui-kvstore/src/bigtable/proto/google.api.rs b/crates/sui-kvstore/src/bigtable/proto/google.api.rs new file mode 100644 index 0000000000000..36a4d5390e66e --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/proto/google.api.rs @@ -0,0 +1,1591 @@ +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Http { + /// A list of HTTP configuration rules that apply to individual API methods. + /// + /// **NOTE:** All service configuration rules follow "last one wins" order. + #[prost(message, repeated, tag = "1")] + pub rules: ::prost::alloc::vec::Vec, + /// When set to true, URL path parameters will be fully URI-decoded except in + /// cases of single segment matches in reserved expansion, where "%2F" will be + /// left encoded. + /// + /// The default behavior is to not decode RFC 6570 reserved characters in multi + /// segment matches. + #[prost(bool, tag = "2")] + pub fully_decode_reserved_expansion: bool, +} +/// ```ignore +/// # gRPC Transcoding +/// +/// gRPC Transcoding is a feature for mapping between a gRPC method and one or +/// more HTTP REST endpoints. It allows developers to build a single API service +/// that supports both gRPC APIs and REST APIs. Many systems, including [Google +/// APIs](), +/// [Cloud Endpoints](), [gRPC +/// Gateway](), +/// and \[Envoy\]() proxy support this feature +/// and use it for large scale production services. +/// +/// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +/// how different portions of the gRPC request message are mapped to the URL +/// path, URL query parameters, and HTTP request body. It also controls how the +/// gRPC response message is mapped to the HTTP response body. `HttpRule` is +/// typically specified as an `google.api.http` annotation on the gRPC method. +/// +/// Each mapping specifies a URL path template and an HTTP method. The path +/// template may refer to one or more fields in the gRPC request message, as long +/// as each field is a non-repeated field with a primitive (non-message) type. +/// The path template controls how fields of the request message are mapped to +/// the URL path. +/// +/// Example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get: "/v1/{name=messages/*}" +/// }; +/// } +/// } +/// message GetMessageRequest { +/// string name = 1; // Mapped to URL path. +/// } +/// message Message { +/// string text = 1; // The resource content. +/// } +/// +/// This enables an HTTP REST to gRPC mapping as below: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +/// +/// Any fields in the request message which are not bound by the path template +/// automatically become HTTP query parameters if there is no HTTP request body. +/// For example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get:"/v1/messages/{message_id}" +/// }; +/// } +/// } +/// message GetMessageRequest { +/// message SubMessage { +/// string subfield = 1; +/// } +/// string message_id = 1; // Mapped to URL path. +/// int64 revision = 2; // Mapped to URL query parameter `revision`. +/// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +/// } +/// +/// This enables a HTTP JSON to RPC mapping as below: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +/// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +/// "foo"))` +/// +/// Note that fields which are mapped to URL query parameters must have a +/// primitive type or a repeated primitive type or a non-repeated message type. +/// In the case of a repeated type, the parameter can be repeated in the URL +/// as `...?param=A¶m=B`. In the case of a message type, each field of the +/// message is mapped to a separate parameter, such as +/// `...?foo.a=A&foo.b=B&foo.c=C`. +/// +/// For HTTP methods that allow a request body, the `body` field +/// specifies the mapping. Consider a REST update method on the +/// message resource collection: +/// +/// service Messaging { +/// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// patch: "/v1/messages/{message_id}" +/// body: "message" +/// }; +/// } +/// } +/// message UpdateMessageRequest { +/// string message_id = 1; // mapped to the URL +/// Message message = 2; // mapped to the body +/// } +/// +/// The following HTTP JSON to RPC mapping is enabled, where the +/// representation of the JSON in the request body is determined by +/// protos JSON encoding: +/// +/// HTTP | gRPC +/// -----|----- +/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +/// "123456" message { text: "Hi!" })` +/// +/// The special name `*` can be used in the body mapping to define that +/// every field not bound by the path template should be mapped to the +/// request body. This enables the following alternative definition of +/// the update method: +/// +/// service Messaging { +/// rpc UpdateMessage(Message) returns (Message) { +/// option (google.api.http) = { +/// patch: "/v1/messages/{message_id}" +/// body: "*" +/// }; +/// } +/// } +/// message Message { +/// string message_id = 1; +/// string text = 2; +/// } +/// +/// +/// The following HTTP JSON to RPC mapping is enabled: +/// +/// HTTP | gRPC +/// -----|----- +/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +/// "123456" text: "Hi!")` +/// +/// Note that when using `*` in the body mapping, it is not possible to +/// have HTTP parameters, as all fields not bound by the path end in +/// the body. This makes this option more rarely used in practice when +/// defining REST APIs. The common usage of `*` is in custom methods +/// which don't use the URL at all for transferring data. +/// +/// It is possible to define multiple HTTP methods for one RPC by using +/// the `additional_bindings` option. Example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get: "/v1/messages/{message_id}" +/// additional_bindings { +/// get: "/v1/users/{user_id}/messages/{message_id}" +/// } +/// }; +/// } +/// } +/// message GetMessageRequest { +/// string message_id = 1; +/// string user_id = 2; +/// } +/// +/// This enables the following two alternative HTTP JSON to RPC mappings: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +/// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +/// "123456")` +/// +/// ## Rules for HTTP mapping +/// +/// 1. Leaf request fields (recursive expansion nested messages in the request +/// message) are classified into three categories: +/// - Fields referred by the path template. They are passed via the URL path. +/// - Fields referred by the \[HttpRule.body][google.api.HttpRule.body\]. They +/// are passed via the HTTP +/// request body. +/// - All other fields are passed via the URL query parameters, and the +/// parameter name is the field path in the request message. A repeated +/// field can be represented as multiple query parameters under the same +/// name. +/// 2. If \[HttpRule.body][google.api.HttpRule.body\] is "*", there is no URL +/// query parameter, all fields +/// are passed via URL path and HTTP request body. +/// 3. If \[HttpRule.body][google.api.HttpRule.body\] is omitted, there is no HTTP +/// request body, all +/// fields are passed via URL path and URL query parameters. +/// +/// ### Path template syntax +/// +/// Template = "/" Segments [ Verb ] ; +/// Segments = Segment { "/" Segment } ; +/// Segment = "*" | "**" | LITERAL | Variable ; +/// Variable = "{" FieldPath [ "=" Segments ] "}" ; +/// FieldPath = IDENT { "." IDENT } ; +/// Verb = ":" LITERAL ; +/// +/// The syntax `*` matches a single URL path segment. The syntax `**` matches +/// zero or more URL path segments, which must be the last part of the URL path +/// except the `Verb`. +/// +/// The syntax `Variable` matches part of the URL path as specified by its +/// template. A variable template must not contain other variables. If a variable +/// matches a single path segment, its template may be omitted, e.g. `{var}` +/// is equivalent to `{var=*}`. +/// +/// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +/// contains any reserved character, such characters should be percent-encoded +/// before the matching. +/// +/// If a variable contains exactly one path segment, such as `"{var}"` or +/// `"{var=*}"`, when such a variable is expanded into a URL path on the client +/// side, all characters except `\[-_.~0-9a-zA-Z\]` are percent-encoded. The +/// server side does the reverse decoding. Such variables show up in the +/// [Discovery +/// Document]() as +/// `{var}`. +/// +/// If a variable contains multiple path segments, such as `"{var=foo/*}"` +/// or `"{var=**}"`, when such a variable is expanded into a URL path on the +/// client side, all characters except `\[-_.~/0-9a-zA-Z\]` are percent-encoded. +/// The server side does the reverse decoding, except "%2F" and "%2f" are left +/// unchanged. Such variables show up in the +/// [Discovery +/// Document]() as +/// `{+var}`. +/// +/// ## Using gRPC API Service Configuration +/// +/// gRPC API Service Configuration (service config) is a configuration language +/// for configuring a gRPC service to become a user-facing product. The +/// service config is simply the YAML representation of the `google.api.Service` +/// proto message. +/// +/// As an alternative to annotating your proto file, you can configure gRPC +/// transcoding in your service config YAML files. You do this by specifying a +/// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +/// effect as the proto annotation. This can be particularly useful if you +/// have a proto that is reused in multiple services. Note that any transcoding +/// specified in the service config will override any matching transcoding +/// configuration in the proto. +/// +/// Example: +/// +/// http: +/// rules: +/// # Selects a gRPC method and applies HttpRule to it. +/// - selector: example.v1.Messaging.GetMessage +/// get: /v1/messages/{message_id}/{sub.subfield} +/// +/// ## Special notes +/// +/// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +/// proto to JSON conversion must follow the [proto3 +/// specification](). +/// +/// While the single segment variable follows the semantics of +/// [RFC 6570]() Section 3.2.2 Simple String +/// Expansion, the multi segment variable **does not** follow RFC 6570 Section +/// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +/// does not expand special characters like `?` and `#`, which would lead +/// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +/// for multi segment variables. +/// +/// The path variables **must not** refer to any repeated or mapped field, +/// because client libraries are not capable of handling such variable expansion. +/// +/// The path variables **must not** capture the leading "/" character. The reason +/// is that the most common use case "{var}" does not capture the leading "/" +/// character. For consistency, all path variables must share the same behavior. +/// +/// Repeated message fields must not be mapped to URL query parameters, because +/// no client library can support such complicated mapping. +/// +/// If an API needs to use a JSON array for request or response body, it can map +/// the request or response body to a repeated field. However, some gRPC +/// Transcoding implementations may not support this feature. +/// ``` +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HttpRule { + /// Selects a method to which this rule applies. + /// + /// Refer to \[selector][google.api.DocumentationRule.selector\] for syntax + /// details. + #[prost(string, tag = "1")] + pub selector: ::prost::alloc::string::String, + /// The name of the request field whose value is mapped to the HTTP request + /// body, or `*` for mapping all request fields not captured by the path + /// pattern to the HTTP body, or omitted for not having any HTTP request body. + /// + /// NOTE: the referred field must be present at the top-level of the request + /// message type. + #[prost(string, tag = "7")] + pub body: ::prost::alloc::string::String, + /// Optional. The name of the response field whose value is mapped to the HTTP + /// response body. When omitted, the entire response message will be used + /// as the HTTP response body. + /// + /// NOTE: The referred field must be present at the top-level of the response + /// message type. + #[prost(string, tag = "12")] + pub response_body: ::prost::alloc::string::String, + /// Additional HTTP bindings for the selector. Nested bindings must + /// not contain an `additional_bindings` field themselves (that is, + /// the nesting may only be one level deep). + #[prost(message, repeated, tag = "11")] + pub additional_bindings: ::prost::alloc::vec::Vec, + /// Determines the URL pattern is matched by this rules. This pattern can be + /// used with any of the {get|put|post|delete|patch} methods. A custom method + /// can be defined using the 'custom' field. + #[prost(oneof = "http_rule::Pattern", tags = "2, 3, 4, 5, 6, 8")] + pub pattern: ::core::option::Option, +} +/// Nested message and enum types in `HttpRule`. +pub mod http_rule { + /// Determines the URL pattern is matched by this rules. This pattern can be + /// used with any of the {get|put|post|delete|patch} methods. A custom method + /// can be defined using the 'custom' field. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Pattern { + /// Maps to HTTP GET. Used for listing and getting information about + /// resources. + #[prost(string, tag = "2")] + Get(::prost::alloc::string::String), + /// Maps to HTTP PUT. Used for replacing a resource. + #[prost(string, tag = "3")] + Put(::prost::alloc::string::String), + /// Maps to HTTP POST. Used for creating a resource or performing an action. + #[prost(string, tag = "4")] + Post(::prost::alloc::string::String), + /// Maps to HTTP DELETE. Used for deleting a resource. + #[prost(string, tag = "5")] + Delete(::prost::alloc::string::String), + /// Maps to HTTP PATCH. Used for updating a resource. + #[prost(string, tag = "6")] + Patch(::prost::alloc::string::String), + /// The custom pattern is used for specifying an HTTP method that is not + /// included in the `pattern` field, such as HEAD, or "*" to leave the + /// HTTP method unspecified for this rule. The wild-card rule is useful + /// for services that provide content to Web (HTML) clients. + #[prost(message, tag = "8")] + Custom(super::CustomHttpPattern), + } +} +/// A custom pattern is used for defining custom HTTP verb. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CustomHttpPattern { + /// The name of this custom HTTP verb. + #[prost(string, tag = "1")] + pub kind: ::prost::alloc::string::String, + /// The path matched by this custom verb. + #[prost(string, tag = "2")] + pub path: ::prost::alloc::string::String, +} +/// The launch stage as defined by [Google Cloud Platform +/// Launch Stages](). +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum LaunchStage { + /// Do not use this default value. + Unspecified = 0, + /// The feature is not yet implemented. Users can not use it. + Unimplemented = 6, + /// Prelaunch features are hidden from users and are only visible internally. + Prelaunch = 7, + /// Early Access features are limited to a closed group of testers. To use + /// these features, you must sign up in advance and sign a Trusted Tester + /// agreement (which includes confidentiality provisions). These features may + /// be unstable, changed in backward-incompatible ways, and are not + /// guaranteed to be released. + EarlyAccess = 1, + /// Alpha is a limited availability test for releases before they are cleared + /// for widespread use. By Alpha, all significant design issues are resolved + /// and we are in the process of verifying functionality. Alpha customers + /// need to apply for access, agree to applicable terms, and have their + /// projects allowlisted. Alpha releases don't have to be feature complete, + /// no SLAs are provided, and there are no technical support obligations, but + /// they will be far enough along that customers can actually use them in + /// test environments or for limited-use tests -- just like they would in + /// normal production cases. + Alpha = 2, + /// Beta is the point at which we are ready to open a release for any + /// customer to use. There are no SLA or technical support obligations in a + /// Beta release. Products will be complete from a feature perspective, but + /// may have some open outstanding issues. Beta releases are suitable for + /// limited production use cases. + Beta = 3, + /// GA features are open to all developers and are considered stable and + /// fully qualified for production use. + Ga = 4, + /// Deprecated features are scheduled to be shut down and removed. For more + /// information, see the "Deprecation Policy" section of our [Terms of + /// Service]() + /// and the [Google Cloud Platform Subject to the Deprecation + /// Policy]() documentation. + Deprecated = 5, +} +impl LaunchStage { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + LaunchStage::Unspecified => "LAUNCH_STAGE_UNSPECIFIED", + LaunchStage::Unimplemented => "UNIMPLEMENTED", + LaunchStage::Prelaunch => "PRELAUNCH", + LaunchStage::EarlyAccess => "EARLY_ACCESS", + LaunchStage::Alpha => "ALPHA", + LaunchStage::Beta => "BETA", + LaunchStage::Ga => "GA", + LaunchStage::Deprecated => "DEPRECATED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "LAUNCH_STAGE_UNSPECIFIED" => Some(Self::Unspecified), + "UNIMPLEMENTED" => Some(Self::Unimplemented), + "PRELAUNCH" => Some(Self::Prelaunch), + "EARLY_ACCESS" => Some(Self::EarlyAccess), + "ALPHA" => Some(Self::Alpha), + "BETA" => Some(Self::Beta), + "GA" => Some(Self::Ga), + "DEPRECATED" => Some(Self::Deprecated), + _ => None, + } + } +} +/// Required information for every language. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CommonLanguageSettings { + /// Link to automatically generated reference documentation. Example: + /// + #[deprecated] + #[prost(string, tag = "1")] + pub reference_docs_uri: ::prost::alloc::string::String, + /// The destination where API teams want this client library to be published. + #[prost(enumeration = "ClientLibraryDestination", repeated, tag = "2")] + pub destinations: ::prost::alloc::vec::Vec, +} +/// Details about how and where to publish client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClientLibrarySettings { + /// Version of the API to apply these settings to. This is the full protobuf + /// package for the API, ending in the version element. + /// Examples: "google.cloud.speech.v1" and "google.spanner.admin.database.v1". + #[prost(string, tag = "1")] + pub version: ::prost::alloc::string::String, + /// Launch stage of this version of the API. + #[prost(enumeration = "LaunchStage", tag = "2")] + pub launch_stage: i32, + /// When using transport=rest, the client request will encode enums as + /// numbers rather than strings. + #[prost(bool, tag = "3")] + pub rest_numeric_enums: bool, + /// Settings for legacy Java features, supported in the Service YAML. + #[prost(message, optional, tag = "21")] + pub java_settings: ::core::option::Option, + /// Settings for C++ client libraries. + #[prost(message, optional, tag = "22")] + pub cpp_settings: ::core::option::Option, + /// Settings for PHP client libraries. + #[prost(message, optional, tag = "23")] + pub php_settings: ::core::option::Option, + /// Settings for Python client libraries. + #[prost(message, optional, tag = "24")] + pub python_settings: ::core::option::Option, + /// Settings for Node client libraries. + #[prost(message, optional, tag = "25")] + pub node_settings: ::core::option::Option, + /// Settings for .NET client libraries. + #[prost(message, optional, tag = "26")] + pub dotnet_settings: ::core::option::Option, + /// Settings for Ruby client libraries. + #[prost(message, optional, tag = "27")] + pub ruby_settings: ::core::option::Option, + /// Settings for Go client libraries. + #[prost(message, optional, tag = "28")] + pub go_settings: ::core::option::Option, +} +/// This message configures the settings for publishing [Google Cloud Client +/// libraries]() +/// generated from the service config. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Publishing { + /// A list of API method settings, e.g. the behavior for methods that use the + /// long-running operation pattern. + #[prost(message, repeated, tag = "2")] + pub method_settings: ::prost::alloc::vec::Vec, + /// Link to a *public* URI where users can report issues. Example: + /// + #[prost(string, tag = "101")] + pub new_issue_uri: ::prost::alloc::string::String, + /// Link to product home page. Example: + /// + #[prost(string, tag = "102")] + pub documentation_uri: ::prost::alloc::string::String, + /// Used as a tracking tag when collecting data about the APIs developer + /// relations artifacts like docs, packages delivered to package managers, + /// etc. Example: "speech". + #[prost(string, tag = "103")] + pub api_short_name: ::prost::alloc::string::String, + /// GitHub label to apply to issues and pull requests opened for this API. + #[prost(string, tag = "104")] + pub github_label: ::prost::alloc::string::String, + /// GitHub teams to be added to CODEOWNERS in the directory in GitHub + /// containing source code for the client libraries for this API. + #[prost(string, repeated, tag = "105")] + pub codeowner_github_teams: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// A prefix used in sample code when demarking regions to be included in + /// documentation. + #[prost(string, tag = "106")] + pub doc_tag_prefix: ::prost::alloc::string::String, + /// For whom the client library is being published. + #[prost(enumeration = "ClientLibraryOrganization", tag = "107")] + pub organization: i32, + /// Client library settings. If the same version string appears multiple + /// times in this list, then the last one wins. Settings from earlier + /// settings with the same version string are discarded. + #[prost(message, repeated, tag = "109")] + pub library_settings: ::prost::alloc::vec::Vec, + /// Optional link to proto reference documentation. Example: + /// + #[prost(string, tag = "110")] + pub proto_reference_documentation_uri: ::prost::alloc::string::String, +} +/// Settings for Java client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JavaSettings { + /// The package name to use in Java. Clobbers the java_package option + /// set in the protobuf. This should be used **only** by APIs + /// who have already set the language_settings.java.package_name" field + /// in gapic.yaml. API teams should use the protobuf java_package option + /// where possible. + /// + /// Example of a YAML configuration:: + /// + /// publishing: + /// java_settings: + /// library_package: com.google.cloud.pubsub.v1 + #[prost(string, tag = "1")] + pub library_package: ::prost::alloc::string::String, + /// Configure the Java class name to use instead of the service's for its + /// corresponding generated GAPIC client. Keys are fully-qualified + /// service names as they appear in the protobuf (including the full + /// the language_settings.java.interface_names" field in gapic.yaml. API + /// teams should otherwise use the service name as it appears in the + /// protobuf. + /// + /// Example of a YAML configuration:: + /// + /// publishing: + /// java_settings: + /// service_class_names: + /// - google.pubsub.v1.Publisher: TopicAdmin + /// - google.pubsub.v1.Subscriber: SubscriptionAdmin + #[prost(map = "string, string", tag = "2")] + pub service_class_names: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + /// Some settings. + #[prost(message, optional, tag = "3")] + pub common: ::core::option::Option, +} +/// Settings for C++ client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CppSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Settings for Php client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PhpSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Settings for Python client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PythonSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Settings for Node client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Settings for Dotnet client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DotnetSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, + /// Map from original service names to renamed versions. + /// This is used when the default generated types + /// would cause a naming conflict. (Neither name is + /// fully-qualified.) + /// Example: Subscriber to SubscriberServiceApi. + #[prost(map = "string, string", tag = "2")] + pub renamed_services: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + /// Map from full resource types to the effective short name + /// for the resource. This is used when otherwise resource + /// named from different services would cause naming collisions. + /// Example entry: + /// "datalabeling.googleapis.com/Dataset": "DataLabelingDataset" + #[prost(map = "string, string", tag = "3")] + pub renamed_resources: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + /// List of full resource types to ignore during generation. + /// This is typically used for API-specific Location resources, + /// which should be handled by the generator as if they were actually + /// the common Location resources. + /// Example entry: "documentai.googleapis.com/Location" + #[prost(string, repeated, tag = "4")] + pub ignored_resources: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Namespaces which must be aliased in snippets due to + /// a known (but non-generator-predictable) naming collision + #[prost(string, repeated, tag = "5")] + pub forced_namespace_aliases: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Method signatures (in the form "service.method(signature)") + /// which are provided separately, so shouldn't be generated. + /// Snippets *calling* these methods are still generated, however. + #[prost(string, repeated, tag = "6")] + pub handwritten_signatures: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Settings for Ruby client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RubySettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Settings for Go client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GoSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Describes the generator configuration for a method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MethodSettings { + /// The fully qualified name of the method, for which the options below apply. + /// This is used to find the method to apply the options. + #[prost(string, tag = "1")] + pub selector: ::prost::alloc::string::String, + /// Describes settings to use for long-running operations when generating + /// API methods for RPCs. Complements RPCs that use the annotations in + /// google/longrunning/operations.proto. + /// + /// Example of a YAML configuration:: + /// + /// publishing: + /// method_settings: + /// - selector: google.cloud.speech.v2.Speech.BatchRecognize + /// long_running: + /// initial_poll_delay: + /// seconds: 60 # 1 minute + /// poll_delay_multiplier: 1.5 + /// max_poll_delay: + /// seconds: 360 # 6 minutes + /// total_poll_timeout: + /// seconds: 54000 # 90 minutes + #[prost(message, optional, tag = "2")] + pub long_running: ::core::option::Option, + /// List of top-level fields of the request message, that should be + /// automatically populated by the client libraries based on their + /// (google.api.field_info).format. Currently supported format: UUID4. + /// + /// Example of a YAML configuration: + /// + /// publishing: + /// method_settings: + /// - selector: google.example.v1.ExampleService.CreateExample + /// auto_populated_fields: + /// - request_id + #[prost(string, repeated, tag = "3")] + pub auto_populated_fields: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Nested message and enum types in `MethodSettings`. +pub mod method_settings { + /// Describes settings to use when generating API methods that use the + /// long-running operation pattern. + /// All default values below are from those used in the client library + /// generators (e.g. + /// \[Java\]()). + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct LongRunning { + /// Initial delay after which the first poll request will be made. + /// Default value: 5 seconds. + #[prost(message, optional, tag = "1")] + pub initial_poll_delay: ::core::option::Option<::prost_types::Duration>, + /// Multiplier to gradually increase delay between subsequent polls until it + /// reaches max_poll_delay. + /// Default value: 1.5. + #[prost(float, tag = "2")] + pub poll_delay_multiplier: f32, + /// Maximum time between two subsequent poll requests. + /// Default value: 45 seconds. + #[prost(message, optional, tag = "3")] + pub max_poll_delay: ::core::option::Option<::prost_types::Duration>, + /// Total polling timeout. + /// Default value: 5 minutes. + #[prost(message, optional, tag = "4")] + pub total_poll_timeout: ::core::option::Option<::prost_types::Duration>, + } +} +/// The organization for which the client libraries are being published. +/// Affects the url where generated docs are published, etc. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ClientLibraryOrganization { + /// Not useful. + Unspecified = 0, + /// Google Cloud Platform Org. + Cloud = 1, + /// Ads (Advertising) Org. + Ads = 2, + /// Photos Org. + Photos = 3, + /// Street View Org. + StreetView = 4, + /// Shopping Org. + Shopping = 5, + /// Geo Org. + Geo = 6, + /// Generative AI - + GenerativeAi = 7, +} +impl ClientLibraryOrganization { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ClientLibraryOrganization::Unspecified => "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED", + ClientLibraryOrganization::Cloud => "CLOUD", + ClientLibraryOrganization::Ads => "ADS", + ClientLibraryOrganization::Photos => "PHOTOS", + ClientLibraryOrganization::StreetView => "STREET_VIEW", + ClientLibraryOrganization::Shopping => "SHOPPING", + ClientLibraryOrganization::Geo => "GEO", + ClientLibraryOrganization::GenerativeAi => "GENERATIVE_AI", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED" => Some(Self::Unspecified), + "CLOUD" => Some(Self::Cloud), + "ADS" => Some(Self::Ads), + "PHOTOS" => Some(Self::Photos), + "STREET_VIEW" => Some(Self::StreetView), + "SHOPPING" => Some(Self::Shopping), + "GEO" => Some(Self::Geo), + "GENERATIVE_AI" => Some(Self::GenerativeAi), + _ => None, + } + } +} +/// To where should client libraries be published? +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ClientLibraryDestination { + /// Client libraries will neither be generated nor published to package + /// managers. + Unspecified = 0, + /// Generate the client library in a repo under github.com/googleapis, + /// but don't publish it to package managers. + Github = 10, + /// Publish the library to package managers like nuget.org and npmjs.com. + PackageManager = 20, +} +impl ClientLibraryDestination { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ClientLibraryDestination::Unspecified => "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED", + ClientLibraryDestination::Github => "GITHUB", + ClientLibraryDestination::PackageManager => "PACKAGE_MANAGER", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED" => Some(Self::Unspecified), + "GITHUB" => Some(Self::Github), + "PACKAGE_MANAGER" => Some(Self::PackageManager), + _ => None, + } + } +} +/// An indicator of the behavior of a given field (for example, that a field +/// is required in requests, or given as output but ignored as input). +/// This **does not** change the behavior in protocol buffers itself; it only +/// denotes the behavior and may affect how API tooling handles the field. +/// +/// Note: This enum **may** receive new values in the future. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum FieldBehavior { + /// Conventional default for enums. Do not use this. + Unspecified = 0, + /// Specifically denotes a field as optional. + /// While all fields in protocol buffers are optional, this may be specified + /// for emphasis if appropriate. + Optional = 1, + /// Denotes a field as required. + /// This indicates that the field **must** be provided as part of the request, + /// and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + Required = 2, + /// Denotes a field as output only. + /// This indicates that the field is provided in responses, but including the + /// field in a request does nothing (the server *must* ignore it and + /// *must not* throw an error as a result of the field's presence). + OutputOnly = 3, + /// Denotes a field as input only. + /// This indicates that the field is provided in requests, and the + /// corresponding field is not included in output. + InputOnly = 4, + /// Denotes a field as immutable. + /// This indicates that the field may be set once in a request to create a + /// resource, but may not be changed thereafter. + Immutable = 5, + /// Denotes that a (repeated) field is an unordered list. + /// This indicates that the service may provide the elements of the list + /// in any arbitrary order, rather than the order the user originally + /// provided. Additionally, the list's order may or may not be stable. + UnorderedList = 6, + /// Denotes that this field returns a non-empty default value if not set. + /// This indicates that if the user provides the empty value in a request, + /// a non-empty value will be returned. The user will not be aware of what + /// non-empty value to expect. + NonEmptyDefault = 7, + /// Denotes that the field in a resource (a message annotated with + /// google.api.resource) is used in the resource name to uniquely identify the + /// resource. For AIP-compliant APIs, this should only be applied to the + /// `name` field on the resource. + /// + /// This behavior should not be applied to references to other resources within + /// the message. + /// + /// The identifier field of resources often have different field behavior + /// depending on the request it is embedded in (e.g. for Create methods name + /// is optional and unused, while for Update methods it is required). Instead + /// of method-specific annotations, only `IDENTIFIER` is required. + Identifier = 8, +} +impl FieldBehavior { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + FieldBehavior::Unspecified => "FIELD_BEHAVIOR_UNSPECIFIED", + FieldBehavior::Optional => "OPTIONAL", + FieldBehavior::Required => "REQUIRED", + FieldBehavior::OutputOnly => "OUTPUT_ONLY", + FieldBehavior::InputOnly => "INPUT_ONLY", + FieldBehavior::Immutable => "IMMUTABLE", + FieldBehavior::UnorderedList => "UNORDERED_LIST", + FieldBehavior::NonEmptyDefault => "NON_EMPTY_DEFAULT", + FieldBehavior::Identifier => "IDENTIFIER", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "FIELD_BEHAVIOR_UNSPECIFIED" => Some(Self::Unspecified), + "OPTIONAL" => Some(Self::Optional), + "REQUIRED" => Some(Self::Required), + "OUTPUT_ONLY" => Some(Self::OutputOnly), + "INPUT_ONLY" => Some(Self::InputOnly), + "IMMUTABLE" => Some(Self::Immutable), + "UNORDERED_LIST" => Some(Self::UnorderedList), + "NON_EMPTY_DEFAULT" => Some(Self::NonEmptyDefault), + "IDENTIFIER" => Some(Self::Identifier), + _ => None, + } + } +} +/// ```ignore +/// A simple descriptor of a resource type. +/// +/// ResourceDescriptor annotates a resource message (either by means of a +/// protobuf annotation or use in the service config), and associates the +/// resource's schema, the resource type, and the pattern of the resource name. +/// +/// Example: +/// +/// message Topic { +/// // Indicates this message defines a resource schema. +/// // Declares the resource type in the format of {service}/{kind}. +/// // For Kubernetes resources, the format is {api group}/{kind}. +/// option (google.api.resource) = { +/// type: "pubsub.googleapis.com/Topic" +/// pattern: "projects/{project}/topics/{topic}" +/// }; +/// } +/// +/// The ResourceDescriptor Yaml config will look like: +/// +/// resources: +/// - type: "pubsub.googleapis.com/Topic" +/// pattern: "projects/{project}/topics/{topic}" +/// +/// Sometimes, resources have multiple patterns, typically because they can +/// live under multiple parents. +/// +/// Example: +/// +/// message LogEntry { +/// option (google.api.resource) = { +/// type: "logging.googleapis.com/LogEntry" +/// pattern: "projects/{project}/logs/{log}" +/// pattern: "folders/{folder}/logs/{log}" +/// pattern: "organizations/{organization}/logs/{log}" +/// pattern: "billingAccounts/{billing_account}/logs/{log}" +/// }; +/// } +/// +/// The ResourceDescriptor Yaml config will look like: +/// +/// resources: +/// - type: 'logging.googleapis.com/LogEntry' +/// pattern: "projects/{project}/logs/{log}" +/// pattern: "folders/{folder}/logs/{log}" +/// pattern: "organizations/{organization}/logs/{log}" +/// pattern: "billingAccounts/{billing_account}/logs/{log}" +/// ``` +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResourceDescriptor { + /// The resource type. It must be in the format of + /// {service_name}/{resource_type_kind}. The `resource_type_kind` must be + /// singular and must not include version numbers. + /// + /// Example: `storage.googleapis.com/Bucket` + /// + /// The value of the resource_type_kind must follow the regular expression + /// /\[A-Za-z][a-zA-Z0-9\]+/. It should start with an upper case character and + /// should use PascalCase (UpperCamelCase). The maximum number of + /// characters allowed for the `resource_type_kind` is 100. + #[prost(string, tag = "1")] + pub r#type: ::prost::alloc::string::String, + /// ```ignore + /// Optional. The relative resource name pattern associated with this resource + /// type. The DNS prefix of the full resource name shouldn't be specified here. + /// + /// The path pattern must follow the syntax, which aligns with HTTP binding + /// syntax: + /// + /// Template = Segment { "/" Segment } ; + /// Segment = LITERAL | Variable ; + /// Variable = "{" LITERAL "}" ; + /// + /// Examples: + /// + /// - "projects/{project}/topics/{topic}" + /// - "projects/{project}/knowledgeBases/{knowledge_base}" + /// + /// The components in braces correspond to the IDs for each resource in the + /// hierarchy. It is expected that, if multiple patterns are provided, + /// the same component name (e.g. "project") refers to IDs of the same + /// type of resource. + /// ``` + #[prost(string, repeated, tag = "2")] + pub pattern: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Optional. The field on the resource that designates the resource name + /// field. If omitted, this is assumed to be "name". + #[prost(string, tag = "3")] + pub name_field: ::prost::alloc::string::String, + /// ```ignore + /// Optional. The historical or future-looking state of the resource pattern. + /// + /// Example: + /// + /// // The InspectTemplate message originally only supported resource + /// // names with organization, and project was added later. + /// message InspectTemplate { + /// option (google.api.resource) = { + /// type: "dlp.googleapis.com/InspectTemplate" + /// pattern: + /// "organizations/{organization}/inspectTemplates/{inspect_template}" + /// pattern: "projects/{project}/inspectTemplates/{inspect_template}" + /// history: ORIGINALLY_SINGLE_PATTERN + /// }; + /// } + /// ``` + #[prost(enumeration = "resource_descriptor::History", tag = "4")] + pub history: i32, + /// The plural name used in the resource name and permission names, such as + /// 'projects' for the resource name of 'projects/{project}' and the permission + /// name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same + /// concept of the `plural` field in k8s CRD spec + /// + /// + /// Note: The plural form is required even for singleton resources. See + /// + #[prost(string, tag = "5")] + pub plural: ::prost::alloc::string::String, + /// The same concept of the `singular` field in k8s CRD spec + /// + /// Such as "project" for the `resourcemanager.googleapis.com/Project` type. + #[prost(string, tag = "6")] + pub singular: ::prost::alloc::string::String, + /// Style flag(s) for this resource. + /// These indicate that a resource is expected to conform to a given + /// style. See the specific style flags for additional information. + #[prost(enumeration = "resource_descriptor::Style", repeated, tag = "10")] + pub style: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `ResourceDescriptor`. +pub mod resource_descriptor { + /// A description of the historical or future-looking state of the + /// resource pattern. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum History { + /// The "unset" value. + Unspecified = 0, + /// The resource originally had one pattern and launched as such, and + /// additional patterns were added later. + OriginallySinglePattern = 1, + /// The resource has one pattern, but the API owner expects to add more + /// later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents + /// that from being necessary once there are multiple patterns.) + FutureMultiPattern = 2, + } + impl History { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + History::Unspecified => "HISTORY_UNSPECIFIED", + History::OriginallySinglePattern => "ORIGINALLY_SINGLE_PATTERN", + History::FutureMultiPattern => "FUTURE_MULTI_PATTERN", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "HISTORY_UNSPECIFIED" => Some(Self::Unspecified), + "ORIGINALLY_SINGLE_PATTERN" => Some(Self::OriginallySinglePattern), + "FUTURE_MULTI_PATTERN" => Some(Self::FutureMultiPattern), + _ => None, + } + } + } + /// A flag representing a specific style that a resource claims to conform to. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum Style { + /// The unspecified value. Do not use. + Unspecified = 0, + /// This resource is intended to be "declarative-friendly". + /// + /// Declarative-friendly resources must be more strictly consistent, and + /// setting this to true communicates to tools that this resource should + /// adhere to declarative-friendly expectations. + /// + /// Note: This is used by the API linter (linter.aip.dev) to enable + /// additional checks. + DeclarativeFriendly = 1, + } + impl Style { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Style::Unspecified => "STYLE_UNSPECIFIED", + Style::DeclarativeFriendly => "DECLARATIVE_FRIENDLY", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STYLE_UNSPECIFIED" => Some(Self::Unspecified), + "DECLARATIVE_FRIENDLY" => Some(Self::DeclarativeFriendly), + _ => None, + } + } + } +} +/// Defines a proto annotation that describes a string field that refers to +/// an API resource. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResourceReference { + /// ```ignore + /// The resource type that the annotated field references. + /// + /// Example: + /// + /// message Subscription { + /// string topic = 2 [(google.api.resource_reference) = { + /// type: "pubsub.googleapis.com/Topic" + /// }]; + /// } + /// + /// Occasionally, a field may reference an arbitrary resource. In this case, + /// APIs use the special value * in their resource reference. + /// + /// Example: + /// + /// message GetIamPolicyRequest { + /// string resource = 2 [(google.api.resource_reference) = { + /// type: "*" + /// }]; + /// } + /// ``` + #[prost(string, tag = "1")] + pub r#type: ::prost::alloc::string::String, + /// ```ignore + /// The resource type of a child collection that the annotated field + /// references. This is useful for annotating the `parent` field that + /// doesn't have a fixed resource type. + /// + /// Example: + /// + /// message ListLogEntriesRequest { + /// string parent = 1 [(google.api.resource_reference) = { + /// child_type: "logging.googleapis.com/LogEntry" + /// }; + /// } + /// ``` + #[prost(string, tag = "2")] + pub child_type: ::prost::alloc::string::String, +} +/// ```ignore +/// Specifies the routing information that should be sent along with the request +/// in the form of routing header. +/// **NOTE:** All service configuration rules follow the "last one wins" order. +/// +/// The examples below will apply to an RPC which has the following request type: +/// +/// Message Definition: +/// +/// message Request { +/// // The name of the Table +/// // Values can be of the following formats: +/// // - `projects//tables/` +/// // - `projects//instances//tables/
` +/// // - `region//zones//tables/
` +/// string table_name = 1; +/// +/// // This value specifies routing for replication. +/// // It can be in the following formats: +/// // - `profiles/` +/// // - a legacy `profile_id` that can be any string +/// string app_profile_id = 2; +/// } +/// +/// Example message: +/// +/// { +/// table_name: projects/proj_foo/instances/instance_bar/table/table_baz, +/// app_profile_id: profiles/prof_qux +/// } +/// +/// The routing header consists of one or multiple key-value pairs. Every key +/// and value must be percent-encoded, and joined together in the format of +/// `key1=value1&key2=value2`. +/// In the examples below I am skipping the percent-encoding for readablity. +/// +/// Example 1 +/// +/// Extracting a field from the request to put into the routing header +/// unchanged, with the key equal to the field name. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // Take the `app_profile_id`. +/// routing_parameters { +/// field: "app_profile_id" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: app_profile_id=profiles/prof_qux +/// +/// Example 2 +/// +/// Extracting a field from the request to put into the routing header +/// unchanged, with the key different from the field name. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // Take the `app_profile_id`, but name it `routing_id` in the header. +/// routing_parameters { +/// field: "app_profile_id" +/// path_template: "{routing_id=**}" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: routing_id=profiles/prof_qux +/// +/// Example 3 +/// +/// Extracting a field from the request to put into the routing +/// header, while matching a path template syntax on the field's value. +/// +/// NB: it is more useful to send nothing than to send garbage for the purpose +/// of dynamic routing, since garbage pollutes cache. Thus the matching. +/// +/// Sub-example 3a +/// +/// The field matches the template. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // Take the `table_name`, if it's well-formed (with project-based +/// // syntax). +/// routing_parameters { +/// field: "table_name" +/// path_template: "{table_name=projects/*/instances/*/**}" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: +/// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +/// +/// Sub-example 3b +/// +/// The field does not match the template. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // Take the `table_name`, if it's well-formed (with region-based +/// // syntax). +/// routing_parameters { +/// field: "table_name" +/// path_template: "{table_name=regions/*/zones/*/**}" +/// } +/// }; +/// +/// result: +/// +/// +/// +/// Sub-example 3c +/// +/// Multiple alternative conflictingly named path templates are +/// specified. The one that matches is used to construct the header. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // Take the `table_name`, if it's well-formed, whether +/// // using the region- or projects-based syntax. +/// +/// routing_parameters { +/// field: "table_name" +/// path_template: "{table_name=regions/*/zones/*/**}" +/// } +/// routing_parameters { +/// field: "table_name" +/// path_template: "{table_name=projects/*/instances/*/**}" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: +/// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +/// +/// Example 4 +/// +/// Extracting a single routing header key-value pair by matching a +/// template syntax on (a part of) a single request field. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // Take just the project id from the `table_name` field. +/// routing_parameters { +/// field: "table_name" +/// path_template: "{routing_id=projects/*}/**" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: routing_id=projects/proj_foo +/// +/// Example 5 +/// +/// Extracting a single routing header key-value pair by matching +/// several conflictingly named path templates on (parts of) a single request +/// field. The last template to match "wins" the conflict. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // If the `table_name` does not have instances information, +/// // take just the project id for routing. +/// // Otherwise take project + instance. +/// +/// routing_parameters { +/// field: "table_name" +/// path_template: "{routing_id=projects/*}/**" +/// } +/// routing_parameters { +/// field: "table_name" +/// path_template: "{routing_id=projects/*/instances/*}/**" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: +/// routing_id=projects/proj_foo/instances/instance_bar +/// +/// Example 6 +/// +/// Extracting multiple routing header key-value pairs by matching +/// several non-conflicting path templates on (parts of) a single request field. +/// +/// Sub-example 6a +/// +/// Make the templates strict, so that if the `table_name` does not +/// have an instance information, nothing is sent. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // The routing code needs two keys instead of one composite +/// // but works only for the tables with the "project-instance" name +/// // syntax. +/// +/// routing_parameters { +/// field: "table_name" +/// path_template: "{project_id=projects/*}/instances/*/**" +/// } +/// routing_parameters { +/// field: "table_name" +/// path_template: "projects/*/{instance_id=instances/*}/**" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: +/// project_id=projects/proj_foo&instance_id=instances/instance_bar +/// +/// Sub-example 6b +/// +/// Make the templates loose, so that if the `table_name` does not +/// have an instance information, just the project id part is sent. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // The routing code wants two keys instead of one composite +/// // but will work with just the `project_id` for tables without +/// // an instance in the `table_name`. +/// +/// routing_parameters { +/// field: "table_name" +/// path_template: "{project_id=projects/*}/**" +/// } +/// routing_parameters { +/// field: "table_name" +/// path_template: "projects/*/{instance_id=instances/*}/**" +/// } +/// }; +/// +/// result (is the same as 6a for our example message because it has the instance +/// information): +/// +/// x-goog-request-params: +/// project_id=projects/proj_foo&instance_id=instances/instance_bar +/// +/// Example 7 +/// +/// Extracting multiple routing header key-value pairs by matching +/// several path templates on multiple request fields. +/// +/// NB: note that here there is no way to specify sending nothing if one of the +/// fields does not match its template. E.g. if the `table_name` is in the wrong +/// format, the `project_id` will not be sent, but the `routing_id` will be. +/// The backend routing code has to be aware of that and be prepared to not +/// receive a full complement of keys if it expects multiple. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // The routing needs both `project_id` and `routing_id` +/// // (from the `app_profile_id` field) for routing. +/// +/// routing_parameters { +/// field: "table_name" +/// path_template: "{project_id=projects/*}/**" +/// } +/// routing_parameters { +/// field: "app_profile_id" +/// path_template: "{routing_id=**}" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: +/// project_id=projects/proj_foo&routing_id=profiles/prof_qux +/// +/// Example 8 +/// +/// Extracting a single routing header key-value pair by matching +/// several conflictingly named path templates on several request fields. The +/// last template to match "wins" the conflict. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // The `routing_id` can be a project id or a region id depending on +/// // the table name format, but only if the `app_profile_id` is not set. +/// // If `app_profile_id` is set it should be used instead. +/// +/// routing_parameters { +/// field: "table_name" +/// path_template: "{routing_id=projects/*}/**" +/// } +/// routing_parameters { +/// field: "table_name" +/// path_template: "{routing_id=regions/*}/**" +/// } +/// routing_parameters { +/// field: "app_profile_id" +/// path_template: "{routing_id=**}" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: routing_id=profiles/prof_qux +/// +/// Example 9 +/// +/// Bringing it all together. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // For routing both `table_location` and a `routing_id` are needed. +/// // +/// // table_location can be either an instance id or a region+zone id. +/// // +/// // For `routing_id`, take the value of `app_profile_id` +/// // - If it's in the format `profiles/`, send +/// // just the `` part. +/// // - If it's any other literal, send it as is. +/// // If the `app_profile_id` is empty, and the `table_name` starts with +/// // the project_id, send that instead. +/// +/// routing_parameters { +/// field: "table_name" +/// path_template: "projects/*/{table_location=instances/*}/tables/*" +/// } +/// routing_parameters { +/// field: "table_name" +/// path_template: "{table_location=regions/*/zones/*}/tables/*" +/// } +/// routing_parameters { +/// field: "table_name" +/// path_template: "{routing_id=projects/*}/**" +/// } +/// routing_parameters { +/// field: "app_profile_id" +/// path_template: "{routing_id=**}" +/// } +/// routing_parameters { +/// field: "app_profile_id" +/// path_template: "profiles/{routing_id=*}" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: +/// table_location=instances/instance_bar&routing_id=prof_qux +/// ``` +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RoutingRule { + /// A collection of Routing Parameter specifications. + /// **NOTE:** If multiple Routing Parameters describe the same key + /// (via the `path_template` field or via the `field` field when + /// `path_template` is not provided), "last one wins" rule + /// determines which Parameter gets used. + /// See the examples for more details. + #[prost(message, repeated, tag = "2")] + pub routing_parameters: ::prost::alloc::vec::Vec, +} +/// A projection from an input message to the GRPC or REST header. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RoutingParameter { + /// A request field to extract the header key-value pair from. + #[prost(string, tag = "1")] + pub field: ::prost::alloc::string::String, + /// ```ignore + /// A pattern matching the key-value field. Optional. + /// If not specified, the whole field specified in the `field` field will be + /// taken as value, and its name used as key. If specified, it MUST contain + /// exactly one named segment (along with any number of unnamed segments) The + /// pattern will be matched over the field specified in the `field` field, then + /// if the match is successful: + /// - the name of the single named segment will be used as a header name, + /// - the match value of the segment will be used as a header value; + /// if the match is NOT successful, nothing will be sent. + /// + /// Example: + /// + /// -- This is a field in the request message + /// | that the header value will be extracted from. + /// | + /// | -- This is the key name in the + /// | | routing header. + /// V | + /// field: "table_name" v + /// path_template: "projects/*/{table_location=instances/*}/tables/*" + /// ^ ^ + /// | | + /// In the {} brackets is the pattern that -- | + /// specifies what to extract from the | + /// field as a value to be sent. | + /// | + /// The string in the field must match the whole pattern -- + /// before brackets, inside brackets, after brackets. + /// + /// When looking at this specific example, we can see that: + /// - A key-value pair with the key `table_location` + /// and the value matching `instances/*` should be added + /// to the x-goog-request-params routing header. + /// - The value is extracted from the request message's `table_name` field + /// if it matches the full pattern specified: + /// `projects/*/instances/*/tables/*`. + /// + /// **NB:** If the `path_template` field is not provided, the key name is + /// equal to the field name, and the whole field should be sent as a value. + /// This makes the pattern for the field and the value functionally equivalent + /// to `**`, and the configuration + /// + /// { + /// field: "table_name" + /// } + /// + /// is a functionally equivalent shorthand to: + /// + /// { + /// field: "table_name" + /// path_template: "{table_name=**}" + /// } + /// + /// See Example 1 for more details. + /// ``` + #[prost(string, tag = "2")] + pub path_template: ::prost::alloc::string::String, +} diff --git a/crates/sui-kvstore/src/bigtable/proto/google.bigtable.v2.rs b/crates/sui-kvstore/src/bigtable/proto/google.bigtable.v2.rs new file mode 100644 index 0000000000000..90571abf5c5ea --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/proto/google.bigtable.v2.rs @@ -0,0 +1,1734 @@ +/// Specifies the complete (requested) contents of a single row of a table. +/// Rows which exceed 256MiB in size cannot be read in full. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Row { + /// The unique key which identifies this row within its table. This is the same + /// key that's used to identify the row in, for example, a MutateRowRequest. + /// May contain any non-empty byte string up to 4KiB in length. + #[prost(bytes = "vec", tag = "1")] + pub key: ::prost::alloc::vec::Vec, + /// May be empty, but only if the entire row is empty. + /// The mutual ordering of column families is not specified. + #[prost(message, repeated, tag = "2")] + pub families: ::prost::alloc::vec::Vec, +} +/// Specifies (some of) the contents of a single row/column family intersection +/// of a table. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Family { + /// The unique key which identifies this family within its row. This is the + /// same key that's used to identify the family in, for example, a RowFilter + /// which sets its "family_name_regex_filter" field. + /// Must match `\[-_.a-zA-Z0-9\]+`, except that AggregatingRowProcessors may + /// produce cells in a sentinel family with an empty name. + /// Must be no greater than 64 characters in length. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Must not be empty. Sorted in order of increasing "qualifier". + #[prost(message, repeated, tag = "2")] + pub columns: ::prost::alloc::vec::Vec, +} +/// Specifies (some of) the contents of a single row/column intersection of a +/// table. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Column { + /// The unique key which identifies this column within its family. This is the + /// same key that's used to identify the column in, for example, a RowFilter + /// which sets its `column_qualifier_regex_filter` field. + /// May contain any byte string, including the empty string, up to 16kiB in + /// length. + #[prost(bytes = "vec", tag = "1")] + pub qualifier: ::prost::alloc::vec::Vec, + /// Must not be empty. Sorted in order of decreasing "timestamp_micros". + #[prost(message, repeated, tag = "2")] + pub cells: ::prost::alloc::vec::Vec, +} +/// Specifies (some of) the contents of a single row/column/timestamp of a table. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Cell { + /// The cell's stored timestamp, which also uniquely identifies it within + /// its column. + /// Values are always expressed in microseconds, but individual tables may set + /// a coarser granularity to further restrict the allowed values. For + /// example, a table which specifies millisecond granularity will only allow + /// values of `timestamp_micros` which are multiples of 1000. + #[prost(int64, tag = "1")] + pub timestamp_micros: i64, + /// The value stored in the cell. + /// May contain any byte string, including the empty string, up to 100MiB in + /// length. + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, + /// Labels applied to the cell by a \[RowFilter][google.bigtable.v2.RowFilter\]. + #[prost(string, repeated, tag = "3")] + pub labels: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Specifies a contiguous range of rows. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RowRange { + /// The row key at which to start the range. + /// If neither field is set, interpreted as the empty string, inclusive. + #[prost(oneof = "row_range::StartKey", tags = "1, 2")] + pub start_key: ::core::option::Option, + /// The row key at which to end the range. + /// If neither field is set, interpreted as the infinite row key, exclusive. + #[prost(oneof = "row_range::EndKey", tags = "3, 4")] + pub end_key: ::core::option::Option, +} +/// Nested message and enum types in `RowRange`. +pub mod row_range { + /// The row key at which to start the range. + /// If neither field is set, interpreted as the empty string, inclusive. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum StartKey { + /// Used when giving an inclusive lower bound for the range. + #[prost(bytes, tag = "1")] + StartKeyClosed(::prost::alloc::vec::Vec), + /// Used when giving an exclusive lower bound for the range. + #[prost(bytes, tag = "2")] + StartKeyOpen(::prost::alloc::vec::Vec), + } + /// The row key at which to end the range. + /// If neither field is set, interpreted as the infinite row key, exclusive. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum EndKey { + /// Used when giving an exclusive upper bound for the range. + #[prost(bytes, tag = "3")] + EndKeyOpen(::prost::alloc::vec::Vec), + /// Used when giving an inclusive upper bound for the range. + #[prost(bytes, tag = "4")] + EndKeyClosed(::prost::alloc::vec::Vec), + } +} +/// Specifies a non-contiguous set of rows. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RowSet { + /// Single rows included in the set. + #[prost(bytes = "vec", repeated, tag = "1")] + pub row_keys: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + /// Contiguous row ranges included in the set. + #[prost(message, repeated, tag = "2")] + pub row_ranges: ::prost::alloc::vec::Vec, +} +/// Specifies a contiguous range of columns within a single column family. +/// The range spans from <column_family>:<start_qualifier> to +/// <column_family>:<end_qualifier>, where both bounds can be either +/// inclusive or exclusive. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ColumnRange { + /// The name of the column family within which this range falls. + #[prost(string, tag = "1")] + pub family_name: ::prost::alloc::string::String, + /// The column qualifier at which to start the range (within `column_family`). + /// If neither field is set, interpreted as the empty string, inclusive. + #[prost(oneof = "column_range::StartQualifier", tags = "2, 3")] + pub start_qualifier: ::core::option::Option, + /// The column qualifier at which to end the range (within `column_family`). + /// If neither field is set, interpreted as the infinite string, exclusive. + #[prost(oneof = "column_range::EndQualifier", tags = "4, 5")] + pub end_qualifier: ::core::option::Option, +} +/// Nested message and enum types in `ColumnRange`. +pub mod column_range { + /// The column qualifier at which to start the range (within `column_family`). + /// If neither field is set, interpreted as the empty string, inclusive. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum StartQualifier { + /// Used when giving an inclusive lower bound for the range. + #[prost(bytes, tag = "2")] + StartQualifierClosed(::prost::alloc::vec::Vec), + /// Used when giving an exclusive lower bound for the range. + #[prost(bytes, tag = "3")] + StartQualifierOpen(::prost::alloc::vec::Vec), + } + /// The column qualifier at which to end the range (within `column_family`). + /// If neither field is set, interpreted as the infinite string, exclusive. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum EndQualifier { + /// Used when giving an inclusive upper bound for the range. + #[prost(bytes, tag = "4")] + EndQualifierClosed(::prost::alloc::vec::Vec), + /// Used when giving an exclusive upper bound for the range. + #[prost(bytes, tag = "5")] + EndQualifierOpen(::prost::alloc::vec::Vec), + } +} +/// Specified a contiguous range of microsecond timestamps. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TimestampRange { + /// Inclusive lower bound. If left empty, interpreted as 0. + #[prost(int64, tag = "1")] + pub start_timestamp_micros: i64, + /// Exclusive upper bound. If left empty, interpreted as infinity. + #[prost(int64, tag = "2")] + pub end_timestamp_micros: i64, +} +/// Specifies a contiguous range of raw byte values. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ValueRange { + /// The value at which to start the range. + /// If neither field is set, interpreted as the empty string, inclusive. + #[prost(oneof = "value_range::StartValue", tags = "1, 2")] + pub start_value: ::core::option::Option, + /// The value at which to end the range. + /// If neither field is set, interpreted as the infinite string, exclusive. + #[prost(oneof = "value_range::EndValue", tags = "3, 4")] + pub end_value: ::core::option::Option, +} +/// Nested message and enum types in `ValueRange`. +pub mod value_range { + /// The value at which to start the range. + /// If neither field is set, interpreted as the empty string, inclusive. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum StartValue { + /// Used when giving an inclusive lower bound for the range. + #[prost(bytes, tag = "1")] + StartValueClosed(::prost::alloc::vec::Vec), + /// Used when giving an exclusive lower bound for the range. + #[prost(bytes, tag = "2")] + StartValueOpen(::prost::alloc::vec::Vec), + } + /// The value at which to end the range. + /// If neither field is set, interpreted as the infinite string, exclusive. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum EndValue { + /// Used when giving an inclusive upper bound for the range. + #[prost(bytes, tag = "3")] + EndValueClosed(::prost::alloc::vec::Vec), + /// Used when giving an exclusive upper bound for the range. + #[prost(bytes, tag = "4")] + EndValueOpen(::prost::alloc::vec::Vec), + } +} +/// Takes a row as input and produces an alternate view of the row based on +/// specified rules. For example, a RowFilter might trim down a row to include +/// just the cells from columns matching a given regular expression, or might +/// return all the cells of a row but not their values. More complicated filters +/// can be composed out of these components to express requests such as, "within +/// every column of a particular family, give just the two most recent cells +/// which are older than timestamp X." +/// +/// There are two broad categories of RowFilters (true filters and transformers), +/// as well as two ways to compose simple filters into more complex ones +/// (chains and interleaves). They work as follows: +/// +/// * True filters alter the input row by excluding some of its cells wholesale +/// from the output row. An example of a true filter is the `value_regex_filter`, +/// which excludes cells whose values don't match the specified pattern. All +/// regex true filters use RE2 syntax () +/// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An +/// important point to keep in mind is that `RE2(.)` is equivalent by default to +/// `RE2(\[^\n\])`, meaning that it does not match newlines. When attempting to +/// match an arbitrary byte, you should therefore use the escape sequence `\C`, +/// which may need to be further escaped as `\\C` in your client language. +/// +/// * Transformers alter the input row by changing the values of some of its +/// cells in the output, without excluding them completely. Currently, the only +/// supported transformer is the `strip_value_transformer`, which replaces every +/// cell's value with the empty string. +/// +/// * Chains and interleaves are described in more detail in the +/// RowFilter.Chain and RowFilter.Interleave documentation. +/// +/// The total serialized size of a RowFilter message must not +/// exceed 20480 bytes, and RowFilters may not be nested within each other +/// (in Chains or Interleaves) to a depth of more than 20. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RowFilter { + /// Which of the possible RowFilter types to apply. If none are set, this + /// RowFilter returns all cells in the input row. + #[prost( + oneof = "row_filter::Filter", + tags = "1, 2, 3, 16, 17, 18, 4, 14, 5, 6, 7, 8, 9, 15, 10, 11, 12, 13, 19" + )] + pub filter: ::core::option::Option, +} +/// Nested message and enum types in `RowFilter`. +pub mod row_filter { + /// A RowFilter which sends rows through several RowFilters in sequence. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Chain { + /// The elements of "filters" are chained together to process the input row: + /// in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row + /// The full chain is executed atomically. + #[prost(message, repeated, tag = "1")] + pub filters: ::prost::alloc::vec::Vec, + } + /// A RowFilter which sends each row to each of several component + /// RowFilters and interleaves the results. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Interleave { + /// The elements of "filters" all process a copy of the input row, and the + /// results are pooled, sorted, and combined into a single output row. + /// If multiple cells are produced with the same column and timestamp, + /// they will all appear in the output row in an unspecified mutual order. + /// Consider the following example, with three filters: + ///```ignore + /// input row + /// | + /// ----------------------------------------------------- + /// | | | + /// f(0) f(1) f(2) + /// | | | + /// 1: foo,bar,10,x foo,bar,10,z far,bar,7,a + /// 2: foo,blah,11,z far,blah,5,x far,blah,5,x + /// | | | + /// ----------------------------------------------------- + /// | + /// 1: foo,bar,10,z // could have switched with #2 + /// 2: foo,bar,10,x // could have switched with #1 + /// 3: foo,blah,11,z + /// 4: far,bar,7,a + /// 5: far,blah,5,x // identical to #6 + /// 6: far,blah,5,x // identical to #5 + /// + /// All interleaved filters are executed atomically. + #[prost(message, repeated, tag = "1")] + pub filters: ::prost::alloc::vec::Vec, + } + /// A RowFilter which evaluates one of two possible RowFilters, depending on + /// whether or not a predicate RowFilter outputs any cells from the input row. + /// + /// IMPORTANT NOTE: The predicate filter does not execute atomically with the + /// true and false filters, which may lead to inconsistent or unexpected + /// results. Additionally, Condition filters have poor performance, especially + /// when filters are set for the false condition. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Condition { + /// If `predicate_filter` outputs any cells, then `true_filter` will be + /// evaluated on the input row. Otherwise, `false_filter` will be evaluated. + #[prost(message, optional, boxed, tag = "1")] + pub predicate_filter: ::core::option::Option<::prost::alloc::boxed::Box>, + /// The filter to apply to the input row if `predicate_filter` returns any + /// results. If not provided, no results will be returned in the true case. + #[prost(message, optional, boxed, tag = "2")] + pub true_filter: ::core::option::Option<::prost::alloc::boxed::Box>, + /// The filter to apply to the input row if `predicate_filter` does not + /// return any results. If not provided, no results will be returned in the + /// false case. + #[prost(message, optional, boxed, tag = "3")] + pub false_filter: ::core::option::Option<::prost::alloc::boxed::Box>, + } + /// Which of the possible RowFilter types to apply. If none are set, this + /// RowFilter returns all cells in the input row. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Filter { + /// Applies several RowFilters to the data in sequence, progressively + /// narrowing the results. + #[prost(message, tag = "1")] + Chain(Chain), + /// Applies several RowFilters to the data in parallel and combines the + /// results. + #[prost(message, tag = "2")] + Interleave(Interleave), + /// Applies one of two possible RowFilters to the data based on the output of + /// a predicate RowFilter. + #[prost(message, tag = "3")] + Condition(::prost::alloc::boxed::Box), + /// ADVANCED USE ONLY. + /// Hook for introspection into the RowFilter. Outputs all cells directly to + /// the output of the read rather than to any parent filter. Consider the + /// following example: + ///```ignore + /// Chain( + /// FamilyRegex("A"), + /// Interleave( + /// All(), + /// Chain(Label("foo"), Sink()) + /// ), + /// QualifierRegex("B") + /// ) + /// + /// A,A,1,w + /// A,B,2,x + /// B,B,4,z + /// | + /// FamilyRegex("A") + /// | + /// A,A,1,w + /// A,B,2,x + /// | + /// +------------+-------------+ + /// | | + /// All() Label(foo) + /// | | + /// A,A,1,w A,A,1,w,labels:\[foo\] + /// A,B,2,x A,B,2,x,labels:\[foo\] + /// | | + /// | Sink() --------------+ + /// | | | + /// +------------+ x------+ A,A,1,w,labels:\[foo\] + /// | A,B,2,x,labels:\[foo\] + /// A,A,1,w | + /// A,B,2,x | + /// | | + /// QualifierRegex("B") | + /// | | + /// A,B,2,x | + /// | | + /// +--------------------------------+ + /// | + /// A,A,1,w,labels:\[foo\] + /// A,B,2,x,labels:\[foo\] // could be switched + /// A,B,2,x // could be switched + /// + /// Despite being excluded by the qualifier filter, a copy of every cell + /// that reaches the sink is present in the final result. + /// + /// As with an \[Interleave][google.bigtable.v2.RowFilter.Interleave\], + /// duplicate cells are possible, and appear in an unspecified mutual order. + /// In this case we have a duplicate with column "A:B" and timestamp 2, + /// because one copy passed through the all filter while the other was + /// passed through the label and sink. Note that one copy has label "foo", + /// while the other does not. + /// + /// Cannot be used within the `predicate_filter`, `true_filter`, or + /// `false_filter` of a \[Condition][google.bigtable.v2.RowFilter.Condition\]. + #[prost(bool, tag = "16")] + Sink(bool), + /// Matches all cells, regardless of input. Functionally equivalent to + /// leaving `filter` unset, but included for completeness. + #[prost(bool, tag = "17")] + PassAllFilter(bool), + /// Does not match any cells, regardless of input. Useful for temporarily + /// disabling just part of a filter. + #[prost(bool, tag = "18")] + BlockAllFilter(bool), + /// Matches only cells from rows whose keys satisfy the given RE2 regex. In + /// other words, passes through the entire row when the key matches, and + /// otherwise produces an empty row. + /// Note that, since row keys can contain arbitrary bytes, the `\C` escape + /// sequence must be used if a true wildcard is desired. The `.` character + /// will not match the new line character `\n`, which may be present in a + /// binary key. + #[prost(bytes, tag = "4")] + RowKeyRegexFilter(::prost::alloc::vec::Vec), + /// Matches all cells from a row with probability p, and matches no cells + /// from the row with probability 1-p. + #[prost(double, tag = "14")] + RowSampleFilter(f64), + /// Matches only cells from columns whose families satisfy the given RE2 + /// regex. For technical reasons, the regex must not contain the `:` + /// character, even if it is not being used as a literal. + /// Note that, since column families cannot contain the new line character + /// `\n`, it is sufficient to use `.` as a full wildcard when matching + /// column family names. + #[prost(string, tag = "5")] + FamilyNameRegexFilter(::prost::alloc::string::String), + /// Matches only cells from columns whose qualifiers satisfy the given RE2 + /// regex. + /// Note that, since column qualifiers can contain arbitrary bytes, the `\C` + /// escape sequence must be used if a true wildcard is desired. The `.` + /// character will not match the new line character `\n`, which may be + /// present in a binary qualifier. + #[prost(bytes, tag = "6")] + ColumnQualifierRegexFilter(::prost::alloc::vec::Vec), + /// Matches only cells from columns within the given range. + #[prost(message, tag = "7")] + ColumnRangeFilter(super::ColumnRange), + /// Matches only cells with timestamps within the given range. + #[prost(message, tag = "8")] + TimestampRangeFilter(super::TimestampRange), + /// Matches only cells with values that satisfy the given regular expression. + /// Note that, since cell values can contain arbitrary bytes, the `\C` escape + /// sequence must be used if a true wildcard is desired. The `.` character + /// will not match the new line character `\n`, which may be present in a + /// binary value. + #[prost(bytes, tag = "9")] + ValueRegexFilter(::prost::alloc::vec::Vec), + /// Matches only cells with values that fall within the given range. + #[prost(message, tag = "15")] + ValueRangeFilter(super::ValueRange), + /// Skips the first N cells of each row, matching all subsequent cells. + /// If duplicate cells are present, as is possible when using an Interleave, + /// each copy of the cell is counted separately. + #[prost(int32, tag = "10")] + CellsPerRowOffsetFilter(i32), + /// Matches only the first N cells of each row. + /// If duplicate cells are present, as is possible when using an Interleave, + /// each copy of the cell is counted separately. + #[prost(int32, tag = "11")] + CellsPerRowLimitFilter(i32), + /// Matches only the most recent N cells within each column. For example, + /// if N=2, this filter would match column `foo:bar` at timestamps 10 and 9, + /// skip all earlier cells in `foo:bar`, and then begin matching again in + /// column `foo:bar2`. + /// If duplicate cells are present, as is possible when using an Interleave, + /// each copy of the cell is counted separately. + #[prost(int32, tag = "12")] + CellsPerColumnLimitFilter(i32), + /// Replaces each cell's value with the empty string. + #[prost(bool, tag = "13")] + StripValueTransformer(bool), + /// Applies the given label to all cells in the output row. This allows + /// the client to determine which results were produced from which part of + /// the filter. + /// + /// Values must be at most 15 characters in length, and match the RE2 + /// pattern `\[a-z0-9\\-\]+` + /// + /// Due to a technical limitation, it is not currently possible to apply + /// multiple labels to a cell. As a result, a Chain may have no more than + /// one sub-filter which contains a `apply_label_transformer`. It is okay for + /// an Interleave to contain multiple `apply_label_transformers`, as they + /// will be applied to separate copies of the input. This may be relaxed in + /// the future. + #[prost(string, tag = "19")] + ApplyLabelTransformer(::prost::alloc::string::String), + } +} +/// Specifies a particular change to be made to the contents of a row. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Mutation { + /// Which of the possible Mutation types to apply. + #[prost(oneof = "mutation::Mutation", tags = "1, 2, 3, 4")] + pub mutation: ::core::option::Option, +} +/// Nested message and enum types in `Mutation`. +pub mod mutation { + /// A Mutation which sets the value of the specified cell. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct SetCell { + /// The name of the family into which new data should be written. + /// Must match `\[-_.a-zA-Z0-9\]+` + #[prost(string, tag = "1")] + pub family_name: ::prost::alloc::string::String, + /// The qualifier of the column into which new data should be written. + /// Can be any byte string, including the empty string. + #[prost(bytes = "vec", tag = "2")] + pub column_qualifier: ::prost::alloc::vec::Vec, + /// The timestamp of the cell into which new data should be written. + /// Use -1 for current Bigtable server time. + /// Otherwise, the client should set this value itself, noting that the + /// default value is a timestamp of zero if the field is left unspecified. + /// Values must match the granularity of the table (e.g. micros, millis). + #[prost(int64, tag = "3")] + pub timestamp_micros: i64, + /// The value to be written into the specified cell. + #[prost(bytes = "vec", tag = "4")] + pub value: ::prost::alloc::vec::Vec, + } + /// A Mutation which deletes cells from the specified column, optionally + /// restricting the deletions to a given timestamp range. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct DeleteFromColumn { + /// The name of the family from which cells should be deleted. + /// Must match `\[-_.a-zA-Z0-9\]+` + #[prost(string, tag = "1")] + pub family_name: ::prost::alloc::string::String, + /// The qualifier of the column from which cells should be deleted. + /// Can be any byte string, including the empty string. + #[prost(bytes = "vec", tag = "2")] + pub column_qualifier: ::prost::alloc::vec::Vec, + /// The range of timestamps within which cells should be deleted. + #[prost(message, optional, tag = "3")] + pub time_range: ::core::option::Option, + } + /// A Mutation which deletes all cells from the specified column family. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct DeleteFromFamily { + /// The name of the family from which cells should be deleted. + /// Must match `\[-_.a-zA-Z0-9\]+` + #[prost(string, tag = "1")] + pub family_name: ::prost::alloc::string::String, + } + /// A Mutation which deletes all cells from the containing row. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct DeleteFromRow {} + /// Which of the possible Mutation types to apply. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Mutation { + /// Set a cell's value. + #[prost(message, tag = "1")] + SetCell(SetCell), + /// Deletes cells from a column. + #[prost(message, tag = "2")] + DeleteFromColumn(DeleteFromColumn), + /// Deletes cells from a column family. + #[prost(message, tag = "3")] + DeleteFromFamily(DeleteFromFamily), + /// Deletes cells from the entire row. + #[prost(message, tag = "4")] + DeleteFromRow(DeleteFromRow), + } +} +/// Specifies an atomic read/modify/write operation on the latest value of the +/// specified column. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadModifyWriteRule { + /// The name of the family to which the read/modify/write should be applied. + /// Must match `\[-_.a-zA-Z0-9\]+` + #[prost(string, tag = "1")] + pub family_name: ::prost::alloc::string::String, + /// The qualifier of the column to which the read/modify/write should be + /// applied. + /// Can be any byte string, including the empty string. + #[prost(bytes = "vec", tag = "2")] + pub column_qualifier: ::prost::alloc::vec::Vec, + /// The rule used to determine the column's new latest value from its current + /// latest value. + #[prost(oneof = "read_modify_write_rule::Rule", tags = "3, 4")] + pub rule: ::core::option::Option, +} +/// Nested message and enum types in `ReadModifyWriteRule`. +pub mod read_modify_write_rule { + /// The rule used to determine the column's new latest value from its current + /// latest value. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Rule { + /// Rule specifying that `append_value` be appended to the existing value. + /// If the targeted cell is unset, it will be treated as containing the + /// empty string. + #[prost(bytes, tag = "3")] + AppendValue(::prost::alloc::vec::Vec), + /// Rule specifying that `increment_amount` be added to the existing value. + /// If the targeted cell is unset, it will be treated as containing a zero. + /// Otherwise, the targeted cell must contain an 8-byte value (interpreted + /// as a 64-bit big-endian signed integer), or the entire request will fail. + #[prost(int64, tag = "4")] + IncrementAmount(i64), + } +} +/// NOTE: This API is intended to be used by Apache Beam BigtableIO. +/// A partition of a change stream. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StreamPartition { + /// The row range covered by this partition and is specified by + /// [`start_key_closed`, `end_key_open`). + #[prost(message, optional, tag = "1")] + pub row_range: ::core::option::Option, +} +/// NOTE: This API is intended to be used by Apache Beam BigtableIO. +/// The information required to continue reading the data from multiple +/// `StreamPartitions` from where a previous read left off. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StreamContinuationTokens { + /// List of continuation tokens. + #[prost(message, repeated, tag = "1")] + pub tokens: ::prost::alloc::vec::Vec, +} +/// NOTE: This API is intended to be used by Apache Beam BigtableIO. +/// The information required to continue reading the data from a +/// `StreamPartition` from where a previous read left off. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StreamContinuationToken { + /// The partition that this token applies to. + #[prost(message, optional, tag = "1")] + pub partition: ::core::option::Option, + /// An encoded position in the stream to restart reading from. + #[prost(string, tag = "2")] + pub token: ::prost::alloc::string::String, +} +/// ReadIterationStats captures information about the iteration of rows or cells +/// over the course of a read, e.g. how many results were scanned in a read +/// operation versus the results returned. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadIterationStats { + /// The rows seen (scanned) as part of the request. This includes the count of + /// rows returned, as captured below. + #[prost(int64, tag = "1")] + pub rows_seen_count: i64, + /// The rows returned as part of the request. + #[prost(int64, tag = "2")] + pub rows_returned_count: i64, + /// The cells seen (scanned) as part of the request. This includes the count of + /// cells returned, as captured below. + #[prost(int64, tag = "3")] + pub cells_seen_count: i64, + /// The cells returned as part of the request. + #[prost(int64, tag = "4")] + pub cells_returned_count: i64, +} +/// RequestLatencyStats provides a measurement of the latency of the request as +/// it interacts with different systems over its lifetime, e.g. how long the +/// request took to execute within a frontend server. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestLatencyStats { + /// The latency measured by the frontend server handling this request, from + /// when the request was received, to when this value is sent back in the + /// response. For more context on the component that is measuring this latency, + /// see: + /// + /// Note: This value may be slightly shorter than the value reported into + /// aggregate latency metrics in Monitoring for this request + /// () as this value + /// needs to be sent in the response before the latency measurement including + /// that transmission is finalized. + /// + /// Note: This value includes the end-to-end latency of contacting nodes in + /// the targeted cluster, e.g. measuring from when the first byte arrives at + /// the frontend server, to when this value is sent back as the last value in + /// the response, including any latency incurred by contacting nodes, waiting + /// for results from nodes, and finally sending results from nodes back to the + /// caller. + #[prost(message, optional, tag = "1")] + pub frontend_server_latency: ::core::option::Option<::prost_types::Duration>, +} +/// FullReadStatsView captures all known information about a read. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FullReadStatsView { + /// Iteration stats describe how efficient the read is, e.g. comparing + /// rows seen vs. rows returned or cells seen vs cells returned can provide an + /// indication of read efficiency (the higher the ratio of seen to retuned the + /// better). + #[prost(message, optional, tag = "1")] + pub read_iteration_stats: ::core::option::Option, + /// Request latency stats describe the time taken to complete a request, from + /// the server side. + #[prost(message, optional, tag = "2")] + pub request_latency_stats: ::core::option::Option, +} +/// RequestStats is the container for additional information pertaining to a +/// single request, helpful for evaluating the performance of the sent request. +/// Currently, there are the following supported methods: +/// * google.bigtable.v2.ReadRows +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestStats { + /// Information pertaining to each request type received. The type is chosen + /// based on the requested view. + /// + /// See the messages above for additional context. + #[prost(oneof = "request_stats::StatsView", tags = "1")] + pub stats_view: ::core::option::Option, +} +/// Nested message and enum types in `RequestStats`. +pub mod request_stats { + /// Information pertaining to each request type received. The type is chosen + /// based on the requested view. + /// + /// See the messages above for additional context. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum StatsView { + /// Available with the ReadRowsRequest.RequestStatsView.REQUEST_STATS_FULL + /// view, see package google.bigtable.v2. + #[prost(message, tag = "1")] + FullReadStatsView(super::FullReadStatsView), + } +} +/// Request message for Bigtable.ReadRows. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadRowsRequest { + /// Required. The unique name of the table from which to read. + /// Values are of the form + /// `projects//instances//tables/
`. + #[prost(string, tag = "1")] + pub table_name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + #[prost(string, tag = "5")] + pub app_profile_id: ::prost::alloc::string::String, + /// The row keys and/or ranges to read sequentially. If not specified, reads + /// from all rows. + #[prost(message, optional, tag = "2")] + pub rows: ::core::option::Option, + /// The filter to apply to the contents of the specified row(s). If unset, + /// reads the entirety of each row. + #[prost(message, optional, tag = "3")] + pub filter: ::core::option::Option, + /// The read will stop after committing to N rows' worth of results. The + /// default (zero) is to return all results. + #[prost(int64, tag = "4")] + pub rows_limit: i64, + /// The view into RequestStats, as described above. + #[prost(enumeration = "read_rows_request::RequestStatsView", tag = "6")] + pub request_stats_view: i32, + /// Experimental API - Please note that this API is currently experimental + /// and can change in the future. + /// + /// Return rows in lexiographical descending order of the row keys. The row + /// contents will not be affected by this flag. + /// + /// Example result set: + ///```ignore + /// [ + /// {key: "k2", "f:col1": "v1", "f:col2": "v1"}, + /// {key: "k1", "f:col1": "v2", "f:col2": "v2"} + /// ] + #[prost(bool, tag = "7")] + pub reversed: bool, +} +/// Nested message and enum types in `ReadRowsRequest`. +pub mod read_rows_request { + /// The desired view into RequestStats that should be returned in the response. + /// + /// See also: RequestStats message. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum RequestStatsView { + /// The default / unset value. The API will default to the NONE option below. + Unspecified = 0, + /// Do not include any RequestStats in the response. This will leave the + /// RequestStats embedded message unset in the response. + RequestStatsNone = 1, + /// Include the full set of available RequestStats in the response, + /// applicable to this read. + RequestStatsFull = 2, + } + impl RequestStatsView { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + RequestStatsView::Unspecified => "REQUEST_STATS_VIEW_UNSPECIFIED", + RequestStatsView::RequestStatsNone => "REQUEST_STATS_NONE", + RequestStatsView::RequestStatsFull => "REQUEST_STATS_FULL", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "REQUEST_STATS_VIEW_UNSPECIFIED" => Some(Self::Unspecified), + "REQUEST_STATS_NONE" => Some(Self::RequestStatsNone), + "REQUEST_STATS_FULL" => Some(Self::RequestStatsFull), + _ => None, + } + } + } +} +/// Response message for Bigtable.ReadRows. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadRowsResponse { + /// A collection of a row's contents as part of the read request. + #[prost(message, repeated, tag = "1")] + pub chunks: ::prost::alloc::vec::Vec, + /// Optionally the server might return the row key of the last row it + /// has scanned. The client can use this to construct a more + /// efficient retry request if needed: any row keys or portions of + /// ranges less than this row key can be dropped from the request. + /// This is primarily useful for cases where the server has read a + /// lot of data that was filtered out since the last committed row + /// key, allowing the client to skip that work on a retry. + #[prost(bytes = "vec", tag = "2")] + pub last_scanned_row_key: ::prost::alloc::vec::Vec, + /// + /// If requested, provide enhanced query performance statistics. The semantics + /// dictate: + /// * request_stats is empty on every (streamed) response, except + /// * request_stats has non-empty information after all chunks have been + /// streamed, where the ReadRowsResponse message only contains + /// request_stats. + /// * For example, if a read request would have returned an empty + /// response instead a single ReadRowsResponse is streamed with empty + /// chunks and request_stats filled. + /// + /// Visually, response messages will stream as follows: + /// ... -> {chunks: \[...\]} -> {chunks: [], request_stats: {...}} + /// \______________________/ \________________________________/ + /// Primary response Trailer of RequestStats info + /// + /// Or if the read did not return any values: + /// {chunks: [], request_stats: {...}} + /// \________________________________/ + /// Trailer of RequestStats info + #[prost(message, optional, tag = "3")] + pub request_stats: ::core::option::Option, +} +/// Nested message and enum types in `ReadRowsResponse`. +pub mod read_rows_response { + /// Specifies a piece of a row's contents returned as part of the read + /// response stream. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct CellChunk { + /// The row key for this chunk of data. If the row key is empty, + /// this CellChunk is a continuation of the same row as the previous + /// CellChunk in the response stream, even if that CellChunk was in a + /// previous ReadRowsResponse message. + #[prost(bytes = "vec", tag = "1")] + pub row_key: ::prost::alloc::vec::Vec, + /// The column family name for this chunk of data. If this message + /// is not present this CellChunk is a continuation of the same column + /// family as the previous CellChunk. The empty string can occur as a + /// column family name in a response so clients must check + /// explicitly for the presence of this message, not just for + /// `family_name.value` being non-empty. + #[prost(message, optional, tag = "2")] + pub family_name: ::core::option::Option<::prost::alloc::string::String>, + /// The column qualifier for this chunk of data. If this message + /// is not present, this CellChunk is a continuation of the same column + /// as the previous CellChunk. Column qualifiers may be empty so + /// clients must check for the presence of this message, not just + /// for `qualifier.value` being non-empty. + #[prost(message, optional, tag = "3")] + pub qualifier: ::core::option::Option<::prost::alloc::vec::Vec>, + /// The cell's stored timestamp, which also uniquely identifies it + /// within its column. Values are always expressed in + /// microseconds, but individual tables may set a coarser + /// granularity to further restrict the allowed values. For + /// example, a table which specifies millisecond granularity will + /// only allow values of `timestamp_micros` which are multiples of + /// 1000. Timestamps are only set in the first CellChunk per cell + /// (for cells split into multiple chunks). + #[prost(int64, tag = "4")] + pub timestamp_micros: i64, + /// Labels applied to the cell by a + /// \[RowFilter][google.bigtable.v2.RowFilter\]. Labels are only set + /// on the first CellChunk per cell. + #[prost(string, repeated, tag = "5")] + pub labels: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// The value stored in the cell. Cell values can be split across + /// multiple CellChunks. In that case only the value field will be + /// set in CellChunks after the first: the timestamp and labels + /// will only be present in the first CellChunk, even if the first + /// CellChunk came in a previous ReadRowsResponse. + #[prost(bytes = "vec", tag = "6")] + pub value: ::prost::alloc::vec::Vec, + /// If this CellChunk is part of a chunked cell value and this is + /// not the final chunk of that cell, value_size will be set to the + /// total length of the cell value. The client can use this size + /// to pre-allocate memory to hold the full cell value. + #[prost(int32, tag = "7")] + pub value_size: i32, + /// Signals to the client concerning previous CellChunks received. + #[prost(oneof = "cell_chunk::RowStatus", tags = "8, 9")] + pub row_status: ::core::option::Option, + } + /// Nested message and enum types in `CellChunk`. + pub mod cell_chunk { + /// Signals to the client concerning previous CellChunks received. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum RowStatus { + /// Indicates that the client should drop all previous chunks for + /// `row_key`, as it will be re-read from the beginning. + #[prost(bool, tag = "8")] + ResetRow(bool), + /// Indicates that the client can safely process all previous chunks for + /// `row_key`, as its data has been fully read. + #[prost(bool, tag = "9")] + CommitRow(bool), + } + } +} +/// Request message for Bigtable.SampleRowKeys. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SampleRowKeysRequest { + /// Required. The unique name of the table from which to sample row keys. + /// Values are of the form + /// `projects//instances//tables/
`. + #[prost(string, tag = "1")] + pub table_name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + #[prost(string, tag = "2")] + pub app_profile_id: ::prost::alloc::string::String, +} +/// Response message for Bigtable.SampleRowKeys. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SampleRowKeysResponse { + /// Sorted streamed sequence of sample row keys in the table. The table might + /// have contents before the first row key in the list and after the last one, + /// but a key containing the empty string indicates "end of table" and will be + /// the last response given, if present. + /// Note that row keys in this list may not have ever been written to or read + /// from, and users should therefore not make any assumptions about the row key + /// structure that are specific to their use case. + #[prost(bytes = "vec", tag = "1")] + pub row_key: ::prost::alloc::vec::Vec, + /// Approximate total storage space used by all rows in the table which precede + /// `row_key`. Buffering the contents of all rows between two subsequent + /// samples would require space roughly equal to the difference in their + /// `offset_bytes` fields. + #[prost(int64, tag = "2")] + pub offset_bytes: i64, +} +/// Request message for Bigtable.MutateRow. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MutateRowRequest { + /// Required. The unique name of the table to which the mutation should be + /// applied. Values are of the form + /// `projects//instances//tables/
`. + #[prost(string, tag = "1")] + pub table_name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + #[prost(string, tag = "4")] + pub app_profile_id: ::prost::alloc::string::String, + /// Required. The key of the row to which the mutation should be applied. + #[prost(bytes = "vec", tag = "2")] + pub row_key: ::prost::alloc::vec::Vec, + /// Required. Changes to be atomically applied to the specified row. Entries + /// are applied in order, meaning that earlier mutations can be masked by later + /// ones. Must contain at least one entry and at most 100000. + #[prost(message, repeated, tag = "3")] + pub mutations: ::prost::alloc::vec::Vec, +} +/// Response message for Bigtable.MutateRow. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MutateRowResponse {} +/// Request message for BigtableService.MutateRows. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MutateRowsRequest { + /// Required. The unique name of the table to which the mutations should be + /// applied. + #[prost(string, tag = "1")] + pub table_name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + #[prost(string, tag = "3")] + pub app_profile_id: ::prost::alloc::string::String, + /// Required. The row keys and corresponding mutations to be applied in bulk. + /// Each entry is applied as an atomic mutation, but the entries may be + /// applied in arbitrary order (even between entries for the same row). + /// At least one entry must be specified, and in total the entries can + /// contain at most 100000 mutations. + #[prost(message, repeated, tag = "2")] + pub entries: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `MutateRowsRequest`. +pub mod mutate_rows_request { + /// A mutation for a given row. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Entry { + /// The key of the row to which the `mutations` should be applied. + #[prost(bytes = "vec", tag = "1")] + pub row_key: ::prost::alloc::vec::Vec, + /// Required. Changes to be atomically applied to the specified row. + /// Mutations are applied in order, meaning that earlier mutations can be + /// masked by later ones. You must specify at least one mutation. + #[prost(message, repeated, tag = "2")] + pub mutations: ::prost::alloc::vec::Vec, + } +} +/// Response message for BigtableService.MutateRows. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MutateRowsResponse { + /// One or more results for Entries from the batch request. + #[prost(message, repeated, tag = "1")] + pub entries: ::prost::alloc::vec::Vec, + /// Information about how client should limit the rate (QPS). Primirily used by + /// supported official Cloud Bigtable clients. If unset, the rate limit info is + /// not provided by the server. + #[prost(message, optional, tag = "3")] + pub rate_limit_info: ::core::option::Option, +} +/// Nested message and enum types in `MutateRowsResponse`. +pub mod mutate_rows_response { + /// The result of applying a passed mutation in the original request. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Entry { + /// The index into the original request's `entries` list of the Entry + /// for which a result is being reported. + #[prost(int64, tag = "1")] + pub index: i64, + /// The result of the request Entry identified by `index`. + /// Depending on how requests are batched during execution, it is possible + /// for one Entry to fail due to an error with another Entry. In the event + /// that this occurs, the same error will be reported for both entries. + #[prost(message, optional, tag = "2")] + pub status: ::core::option::Option, + } +} +/// Information about how client should adjust the load to Bigtable. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RateLimitInfo { + /// Time that clients should wait before adjusting the target rate again. + /// If clients adjust rate too frequently, the impact of the previous + /// adjustment may not have been taken into account and may + /// over-throttle or under-throttle. If clients adjust rate too slowly, they + /// will not be responsive to load changes on server side, and may + /// over-throttle or under-throttle. + #[prost(message, optional, tag = "1")] + pub period: ::core::option::Option<::prost_types::Duration>, + /// If it has been at least one `period` since the last load adjustment, the + /// client should multiply the current load by this value to get the new target + /// load. For example, if the current load is 100 and `factor` is 0.8, the new + /// target load should be 80. After adjusting, the client should ignore + /// `factor` until another `period` has passed. + /// + /// The client can measure its load using any unit that's comparable over time + /// For example, QPS can be used as long as each request involves a similar + /// amount of work. + #[prost(double, tag = "2")] + pub factor: f64, +} +/// Request message for Bigtable.CheckAndMutateRow. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CheckAndMutateRowRequest { + /// Required. The unique name of the table to which the conditional mutation + /// should be applied. Values are of the form + /// `projects//instances//tables/
`. + #[prost(string, tag = "1")] + pub table_name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + #[prost(string, tag = "7")] + pub app_profile_id: ::prost::alloc::string::String, + /// Required. The key of the row to which the conditional mutation should be + /// applied. + #[prost(bytes = "vec", tag = "2")] + pub row_key: ::prost::alloc::vec::Vec, + /// The filter to be applied to the contents of the specified row. Depending + /// on whether or not any results are yielded, either `true_mutations` or + /// `false_mutations` will be executed. If unset, checks that the row contains + /// any values at all. + #[prost(message, optional, tag = "6")] + pub predicate_filter: ::core::option::Option, + /// Changes to be atomically applied to the specified row if `predicate_filter` + /// yields at least one cell when applied to `row_key`. Entries are applied in + /// order, meaning that earlier mutations can be masked by later ones. + /// Must contain at least one entry if `false_mutations` is empty, and at most + /// 100000. + #[prost(message, repeated, tag = "4")] + pub true_mutations: ::prost::alloc::vec::Vec, + /// Changes to be atomically applied to the specified row if `predicate_filter` + /// does not yield any cells when applied to `row_key`. Entries are applied in + /// order, meaning that earlier mutations can be masked by later ones. + /// Must contain at least one entry if `true_mutations` is empty, and at most + /// 100000. + #[prost(message, repeated, tag = "5")] + pub false_mutations: ::prost::alloc::vec::Vec, +} +/// Response message for Bigtable.CheckAndMutateRow. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CheckAndMutateRowResponse { + /// Whether or not the request's `predicate_filter` yielded any results for + /// the specified row. + #[prost(bool, tag = "1")] + pub predicate_matched: bool, +} +/// Request message for client connection keep-alive and warming. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PingAndWarmRequest { + /// Required. The unique name of the instance to check permissions for as well + /// as respond. Values are of the form + /// `projects//instances/`. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + #[prost(string, tag = "2")] + pub app_profile_id: ::prost::alloc::string::String, +} +/// Response message for Bigtable.PingAndWarm connection keepalive and warming. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PingAndWarmResponse {} +/// Request message for Bigtable.ReadModifyWriteRow. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadModifyWriteRowRequest { + /// Required. The unique name of the table to which the read/modify/write rules + /// should be applied. Values are of the form + /// `projects//instances//tables/
`. + #[prost(string, tag = "1")] + pub table_name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + #[prost(string, tag = "4")] + pub app_profile_id: ::prost::alloc::string::String, + /// Required. The key of the row to which the read/modify/write rules should be + /// applied. + #[prost(bytes = "vec", tag = "2")] + pub row_key: ::prost::alloc::vec::Vec, + /// Required. Rules specifying how the specified row's contents are to be + /// transformed into writes. Entries are applied in order, meaning that earlier + /// rules will affect the results of later ones. + #[prost(message, repeated, tag = "3")] + pub rules: ::prost::alloc::vec::Vec, +} +/// Response message for Bigtable.ReadModifyWriteRow. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadModifyWriteRowResponse { + /// A Row containing the new contents of all cells modified by the request. + #[prost(message, optional, tag = "1")] + pub row: ::core::option::Option, +} +/// NOTE: This API is intended to be used by Apache Beam BigtableIO. +/// Request message for Bigtable.GenerateInitialChangeStreamPartitions. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GenerateInitialChangeStreamPartitionsRequest { + /// Required. The unique name of the table from which to get change stream + /// partitions. Values are of the form + /// `projects//instances//tables/
`. + /// Change streaming must be enabled on the table. + #[prost(string, tag = "1")] + pub table_name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + /// Single cluster routing must be configured on the profile. + #[prost(string, tag = "2")] + pub app_profile_id: ::prost::alloc::string::String, +} +/// NOTE: This API is intended to be used by Apache Beam BigtableIO. +/// Response message for Bigtable.GenerateInitialChangeStreamPartitions. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GenerateInitialChangeStreamPartitionsResponse { + /// A partition of the change stream. + #[prost(message, optional, tag = "1")] + pub partition: ::core::option::Option, +} +/// NOTE: This API is intended to be used by Apache Beam BigtableIO. +/// Request message for Bigtable.ReadChangeStream. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadChangeStreamRequest { + /// Required. The unique name of the table from which to read a change stream. + /// Values are of the form + /// `projects//instances//tables/
`. + /// Change streaming must be enabled on the table. + #[prost(string, tag = "1")] + pub table_name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + /// Single cluster routing must be configured on the profile. + #[prost(string, tag = "2")] + pub app_profile_id: ::prost::alloc::string::String, + /// The partition to read changes from. + #[prost(message, optional, tag = "3")] + pub partition: ::core::option::Option, + /// If specified, OK will be returned when the stream advances beyond + /// this time. Otherwise, changes will be continuously delivered on the stream. + /// This value is inclusive and will be truncated to microsecond granularity. + #[prost(message, optional, tag = "5")] + pub end_time: ::core::option::Option<::prost_types::Timestamp>, + /// If specified, the duration between `Heartbeat` messages on the stream. + /// Otherwise, defaults to 5 seconds. + #[prost(message, optional, tag = "7")] + pub heartbeat_duration: ::core::option::Option<::prost_types::Duration>, + /// Options for describing where we want to start reading from the stream. + #[prost(oneof = "read_change_stream_request::StartFrom", tags = "4, 6")] + pub start_from: ::core::option::Option, +} +/// Nested message and enum types in `ReadChangeStreamRequest`. +pub mod read_change_stream_request { + /// Options for describing where we want to start reading from the stream. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum StartFrom { + /// Start reading the stream at the specified timestamp. This timestamp must + /// be within the change stream retention period, less than or equal to the + /// current time, and after change stream creation, whichever is greater. + /// This value is inclusive and will be truncated to microsecond granularity. + #[prost(message, tag = "4")] + StartTime(::prost_types::Timestamp), + /// Tokens that describe how to resume reading a stream where reading + /// previously left off. If specified, changes will be read starting at the + /// the position. Tokens are delivered on the stream as part of `Heartbeat` + /// and `CloseStream` messages. + /// + /// If a single token is provided, the token’s partition must exactly match + /// the request’s partition. If multiple tokens are provided, as in the case + /// of a partition merge, the union of the token partitions must exactly + /// cover the request’s partition. Otherwise, INVALID_ARGUMENT will be + /// returned. + #[prost(message, tag = "6")] + ContinuationTokens(super::StreamContinuationTokens), + } +} +/// NOTE: This API is intended to be used by Apache Beam BigtableIO. +/// Response message for Bigtable.ReadChangeStream. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadChangeStreamResponse { + /// The data or control message on the stream. + #[prost(oneof = "read_change_stream_response::StreamRecord", tags = "1, 2, 3")] + pub stream_record: ::core::option::Option, +} +/// Nested message and enum types in `ReadChangeStreamResponse`. +pub mod read_change_stream_response { + /// A partial or complete mutation. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct MutationChunk { + /// If set, then the mutation is a `SetCell` with a chunked value across + /// multiple messages. + #[prost(message, optional, tag = "1")] + pub chunk_info: ::core::option::Option, + /// If this is a continuation of a chunked message (`chunked_value_offset` > + /// 0), ignore all fields except the `SetCell`'s value and merge it with + /// the previous message by concatenating the value fields. + #[prost(message, optional, tag = "2")] + pub mutation: ::core::option::Option, + } + /// Nested message and enum types in `MutationChunk`. + pub mod mutation_chunk { + /// Information about the chunking of this mutation. + /// Only `SetCell` mutations can be chunked, and all chunks for a `SetCell` + /// will be delivered contiguously with no other mutation types interleaved. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct ChunkInfo { + /// The total value size of all the chunks that make up the `SetCell`. + #[prost(int32, tag = "1")] + pub chunked_value_size: i32, + /// The byte offset of this chunk into the total value size of the + /// mutation. + #[prost(int32, tag = "2")] + pub chunked_value_offset: i32, + /// When true, this is the last chunk of a chunked `SetCell`. + #[prost(bool, tag = "3")] + pub last_chunk: bool, + } + } + /// A message corresponding to one or more mutations to the partition + /// being streamed. A single logical `DataChange` message may also be split + /// across a sequence of multiple individual messages. Messages other than + /// the first in a sequence will only have the `type` and `chunks` fields + /// populated, with the final message in the sequence also containing `done` + /// set to true. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct DataChange { + /// The type of the mutation. + #[prost(enumeration = "data_change::Type", tag = "1")] + pub r#type: i32, + /// The cluster where the mutation was applied. + /// Not set when `type` is `GARBAGE_COLLECTION`. + #[prost(string, tag = "2")] + pub source_cluster_id: ::prost::alloc::string::String, + /// The row key for all mutations that are part of this `DataChange`. + /// If the `DataChange` is chunked across multiple messages, then this field + /// will only be set for the first message. + #[prost(bytes = "vec", tag = "3")] + pub row_key: ::prost::alloc::vec::Vec, + /// The timestamp at which the mutation was applied on the Bigtable server. + #[prost(message, optional, tag = "4")] + pub commit_timestamp: ::core::option::Option<::prost_types::Timestamp>, + /// A value that lets stream consumers reconstruct Bigtable's + /// conflict resolution semantics. + /// + /// In the event that the same row key, column family, column qualifier, + /// timestamp are modified on different clusters at the same + /// `commit_timestamp`, the mutation with the larger `tiebreaker` will be the + /// one chosen for the eventually consistent state of the system. + #[prost(int32, tag = "5")] + pub tiebreaker: i32, + /// The mutations associated with this change to the partition. + /// May contain complete mutations or chunks of a multi-message chunked + /// `DataChange` record. + #[prost(message, repeated, tag = "6")] + pub chunks: ::prost::alloc::vec::Vec, + /// When true, indicates that the entire `DataChange` has been read + /// and the client can safely process the message. + #[prost(bool, tag = "8")] + pub done: bool, + /// An encoded position for this stream's partition to restart reading from. + /// This token is for the StreamPartition from the request. + #[prost(string, tag = "9")] + pub token: ::prost::alloc::string::String, + /// An estimate of the commit timestamp that is usually lower than or equal + /// to any timestamp for a record that will be delivered in the future on the + /// stream. It is possible that, under particular circumstances that a future + /// record has a timestamp is is lower than a previously seen timestamp. For + /// an example usage see + /// + #[prost(message, optional, tag = "10")] + pub estimated_low_watermark: ::core::option::Option<::prost_types::Timestamp>, + } + /// Nested message and enum types in `DataChange`. + pub mod data_change { + /// The type of mutation. + #[derive( + Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, + )] + #[repr(i32)] + pub enum Type { + /// The type is unspecified. + Unspecified = 0, + /// A user-initiated mutation. + User = 1, + /// A system-initiated mutation as part of garbage collection. + /// + GarbageCollection = 2, + /// This is a continuation of a multi-message change. + Continuation = 3, + } + impl Type { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Type::Unspecified => "TYPE_UNSPECIFIED", + Type::User => "USER", + Type::GarbageCollection => "GARBAGE_COLLECTION", + Type::Continuation => "CONTINUATION", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "TYPE_UNSPECIFIED" => Some(Self::Unspecified), + "USER" => Some(Self::User), + "GARBAGE_COLLECTION" => Some(Self::GarbageCollection), + "CONTINUATION" => Some(Self::Continuation), + _ => None, + } + } + } + } + /// A periodic message with information that can be used to checkpoint + /// the state of a stream. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Heartbeat { + /// A token that can be provided to a subsequent `ReadChangeStream` call + /// to pick up reading at the current stream position. + #[prost(message, optional, tag = "1")] + pub continuation_token: ::core::option::Option, + /// An estimate of the commit timestamp that is usually lower than or equal + /// to any timestamp for a record that will be delivered in the future on the + /// stream. It is possible that, under particular circumstances that a future + /// record has a timestamp is is lower than a previously seen timestamp. For + /// an example usage see + /// + #[prost(message, optional, tag = "2")] + pub estimated_low_watermark: ::core::option::Option<::prost_types::Timestamp>, + } + /// A message indicating that the client should stop reading from the stream. + /// If status is OK and `continuation_tokens` & `new_partitions` are empty, the + /// stream has finished (for example if there was an `end_time` specified). + /// If `continuation_tokens` & `new_partitions` are present, then a change in + /// partitioning requires the client to open a new stream for each token to + /// resume reading. Example: + /// [B, D) ends + /// | + /// v + /// new_partitions: [A, C) [C, E) + /// continuation_tokens.partitions: [B,C) [C,D) + /// ^---^ ^---^ + /// ^ ^ + /// | | + /// | StreamContinuationToken 2 + /// | + /// StreamContinuationToken 1 + /// To read the new partition [A,C), supply the continuation tokens whose + /// ranges cover the new partition, for example ContinuationToken[A,B) & + /// ContinuationToken[B,C). + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct CloseStream { + /// The status of the stream. + #[prost(message, optional, tag = "1")] + pub status: ::core::option::Option, + /// If non-empty, contains the information needed to resume reading their + /// associated partitions. + #[prost(message, repeated, tag = "2")] + pub continuation_tokens: ::prost::alloc::vec::Vec, + /// If non-empty, contains the new partitions to start reading from, which + /// are related to but not necessarily identical to the partitions for the + /// above `continuation_tokens`. + #[prost(message, repeated, tag = "3")] + pub new_partitions: ::prost::alloc::vec::Vec, + } + /// The data or control message on the stream. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum StreamRecord { + /// A mutation to the partition. + #[prost(message, tag = "1")] + DataChange(DataChange), + /// A periodic heartbeat message. + #[prost(message, tag = "2")] + Heartbeat(Heartbeat), + /// An indication that the stream should be closed. + #[prost(message, tag = "3")] + CloseStream(CloseStream), + } +} +/// Generated client implementations. +pub mod bigtable_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::http::Uri; + use tonic::codegen::*; + /// Service for reading from and writing to existing Bigtable tables. + #[derive(Debug, Clone)] + pub struct BigtableClient { + inner: tonic::client::Grpc, + } + impl BigtableClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl BigtableClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> BigtableClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + >>::Error: + Into + Send + Sync, + { + BigtableClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Streams back the contents of all requested rows in key order, optionally + /// applying the same Reader filter to each. Depending on their size, + /// rows and cells may be broken up across multiple responses, but + /// atomicity of each row will still be preserved. See the + /// ReadRowsResponse documentation for details. + pub async fn read_rows( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = + http::uri::PathAndQuery::from_static("/google.bigtable.v2.Bigtable/ReadRows"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("google.bigtable.v2.Bigtable", "ReadRows")); + self.inner.server_streaming(req, path, codec).await + } + /// Returns a sample of row keys in the table. The returned row keys will + /// delimit contiguous sections of the table of approximately equal size, + /// which can be used to break up the data for distributed tasks like + /// mapreduces. + pub async fn sample_row_keys( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = + http::uri::PathAndQuery::from_static("/google.bigtable.v2.Bigtable/SampleRowKeys"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.bigtable.v2.Bigtable", + "SampleRowKeys", + )); + self.inner.server_streaming(req, path, codec).await + } + /// Mutates a row atomically. Cells already present in the row are left + /// unchanged unless explicitly changed by `mutation`. + pub async fn mutate_row( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = + http::uri::PathAndQuery::from_static("/google.bigtable.v2.Bigtable/MutateRow"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("google.bigtable.v2.Bigtable", "MutateRow")); + self.inner.unary(req, path, codec).await + } + /// Mutates multiple rows in a batch. Each individual row is mutated + /// atomically as in MutateRow, but the entire batch is not executed + /// atomically. + pub async fn mutate_rows( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = + http::uri::PathAndQuery::from_static("/google.bigtable.v2.Bigtable/MutateRows"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("google.bigtable.v2.Bigtable", "MutateRows")); + self.inner.server_streaming(req, path, codec).await + } + /// Mutates a row atomically based on the output of a predicate Reader filter. + pub async fn check_and_mutate_row( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.bigtable.v2.Bigtable/CheckAndMutateRow", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.bigtable.v2.Bigtable", + "CheckAndMutateRow", + )); + self.inner.unary(req, path, codec).await + } + /// Warm up associated instance metadata for this connection. + /// This call is not required but may be useful for connection keep-alive. + pub async fn ping_and_warm( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = + http::uri::PathAndQuery::from_static("/google.bigtable.v2.Bigtable/PingAndWarm"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.bigtable.v2.Bigtable", + "PingAndWarm", + )); + self.inner.unary(req, path, codec).await + } + /// Modifies a row atomically on the server. The method reads the latest + /// existing timestamp and value from the specified columns and writes a new + /// entry based on pre-defined read/modify/write rules. The new value for the + /// timestamp is the greater of the existing timestamp or the current server + /// time. The method returns the new contents of all modified cells. + pub async fn read_modify_write_row( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.bigtable.v2.Bigtable", + "ReadModifyWriteRow", + )); + self.inner.unary(req, path, codec).await + } + /// NOTE: This API is intended to be used by Apache Beam BigtableIO. + /// Returns the current list of partitions that make up the table's + /// change stream. The union of partitions will cover the entire keyspace. + /// Partitions can be read with `ReadChangeStream`. + pub async fn generate_initial_change_stream_partitions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response< + tonic::codec::Streaming, + >, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.bigtable.v2.Bigtable", + "GenerateInitialChangeStreamPartitions", + )); + self.inner.server_streaming(req, path, codec).await + } + /// NOTE: This API is intended to be used by Apache Beam BigtableIO. + /// Reads changes from a table's change stream. Changes will + /// reflect both user-initiated mutations and mutations that are caused by + /// garbage collection. + pub async fn read_change_stream( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.bigtable.v2.Bigtable/ReadChangeStream", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.bigtable.v2.Bigtable", + "ReadChangeStream", + )); + self.inner.server_streaming(req, path, codec).await + } + } +} diff --git a/crates/sui-kvstore/src/bigtable/proto/google.pem b/crates/sui-kvstore/src/bigtable/proto/google.pem new file mode 100644 index 0000000000000..bf10a673253bb --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/proto/google.pem @@ -0,0 +1,1128 @@ +# Operating CA: DigiCert +# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Label: "Baltimore CyberTrust Root" +# Serial: 33554617 +# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4 +# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74 +# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ +RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD +VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX +DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y +ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy +VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr +mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr +IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK +mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu +XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy +dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye +jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1 +BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3 +DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92 +9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx +jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0 +Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz +ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS +R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +# Operating CA: DigiCert +# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root CA" +# Serial: 17154717934120587862167794914071425081 +# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72 +# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43 +# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +# Operating CA: DigiCert +# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G2" +# Serial: 15385348160840213938643033620894905419 +# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d +# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f +# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85 +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA +n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc +biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp +EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA +bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu +YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW +BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI +QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I +0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni +lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 +B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv +ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +# Operating CA: DigiCert +# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G3" +# Serial: 15459312981008553731928384953135426796 +# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb +# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89 +# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2 +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg +RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf +Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q +RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD +AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY +JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv +6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +# Operating CA: DigiCert +# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root CA" +# Serial: 10944719598952040374951832963794454346 +# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e +# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36 +# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61 +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +# Operating CA: DigiCert +# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G2" +# Serial: 4293743540046975378534879503202253541 +# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44 +# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4 +# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +# Operating CA: DigiCert +# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G3" +# Serial: 7089244469030293291760083333884364146 +# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca +# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e +# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0 +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe +Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw +EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x +IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG +fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO +Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd +BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx +AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ +oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 +sycX +-----END CERTIFICATE----- + +# Operating CA: DigiCert +# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert High Assurance EV Root CA" +# Serial: 3553400076410547919724730734378100087 +# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a +# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25 +# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- + +# Operating CA: DigiCert +# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Trusted Root G4" +# Serial: 7451500558977370777930084869016614236 +# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49 +# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4 +# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88 +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ +-----END CERTIFICATE----- + +# Operating CA: Entrust Datacard +# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Label: "Entrust Root Certification Authority" +# Serial: 1164660820 +# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4 +# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9 +# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +# Operating CA: Entrust Datacard +# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - EC1" +# Serial: 51543124481930649114116133369 +# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc +# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47 +# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5 +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG +A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 +d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu +dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq +RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy +MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD +VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g +Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi +A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt +ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH +Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC +R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX +hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +# Operating CA: Entrust Datacard +# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - G2" +# Serial: 1246989352 +# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2 +# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4 +# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39 +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 +cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs +IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz +dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy +NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu +dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt +dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 +aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T +RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN +cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW +wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 +U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 +jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN +BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ +jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v +1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R +nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH +VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== +-----END CERTIFICATE----- + +# Operating CA: Entrust Datacard +# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Label: "Entrust.net Premium 2048 Secure Server CA" +# Serial: 946069240 +# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90 +# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31 +# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77 +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3 +MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub +j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo +U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b +u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+ +bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er +fF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +# Operating CA: Entrust Datacard +# Issuer: CN=AffirmTrust Commercial O=AffirmTrust +# Subject: CN=AffirmTrust Commercial O=AffirmTrust +# Label: "AffirmTrust Commercial" +# Serial: 8608355977964138876 +# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7 +# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7 +# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7 +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +# Operating CA: Entrust Datacard +# Issuer: CN=AffirmTrust Networking O=AffirmTrust +# Subject: CN=AffirmTrust Networking O=AffirmTrust +# Label: "AffirmTrust Networking" +# Serial: 8957382827206547757 +# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f +# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f +# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +# Operating CA: Entrust Datacard +# Issuer: CN=AffirmTrust Premium O=AffirmTrust +# Subject: CN=AffirmTrust Premium O=AffirmTrust +# Label: "AffirmTrust Premium" +# Serial: 7893706540734352110 +# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57 +# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27 +# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- + +# Operating CA: Entrust Datacard +# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust +# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust +# Label: "AffirmTrust Premium ECC" +# Serial: 8401224907861490260 +# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d +# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb +# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23 +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- + +# Operating CA: GlobalSign +# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Label: "GlobalSign Root CA" +# Serial: 4835703278459707669005204 +# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a +# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c +# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99 +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG +A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv +b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw +MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i +YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT +aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ +jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp +xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp +1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG +snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ +U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 +9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B +AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz +yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE +38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP +AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad +DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME +HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +# Operating CA: GlobalSign +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Label: "GlobalSign Root CA - R3" +# Serial: 4835703278459759426209954 +# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28 +# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad +# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- + +# Operating CA: GlobalSign +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Label: "GlobalSign ECC Root CA - R5" +# Serial: 32785792099990507226680698011560947931244 +# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08 +# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa +# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24 +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc +8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke +hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI +KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg +515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO +xwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +# Operating CA: GlobalSign +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Label: "GlobalSign Root CA - R6" +# Serial: 1417766617973444989252670301619537 +# MD5 Fingerprint: 4f:dd:07:e4:d4:22:64:39:1e:0c:37:42:ea:d1:c6:ae +# SHA1 Fingerprint: 80:94:64:0e:b5:a7:a1:ca:11:9c:1f:dd:d5:9f:81:02:63:a7:fb:d1 +# SHA256 Fingerprint: 2c:ab:ea:fe:37:d0:6c:a2:2a:ba:73:91:c0:03:3d:25:98:29:52:c4:53:64:73:49:76:3a:3a:b5:ad:6c:cf:69 +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEg +MB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQx +MjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSNjET +MBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQssgrRI +xutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1k +ZguSgMpE3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxD +aNc9PIrFsmbVkJq3MQbFvuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJw +LnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqMPKq0pPbzlUoSB239jLKJz9CgYXfIWHSw +1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+azayOeSsJDa38O+2HBNX +k7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05OWgtH8wY2 +SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/h +bguyCLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4n +WUx2OVvq+aWh2IMP0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpY +rZxCRXluDocZXFSxZba/jJvcE+kNb7gu3GduyYsRtYQUigAZcIN5kZeR1Bonvzce +MgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNVHSMEGDAWgBSu +bAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN +nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGt +Ixg93eFyRJa0lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr61 +55wsTLxDKZmOMNOsIeDjHfrYBzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLj +vUYAGm0CuiVdjaExUd1URhxN25mW7xocBFymFe944Hn+Xds+qkxV/ZoVqW/hpvvf +cDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr3TsTjxKM4kEaSHpz +oHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB10jZp +nOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfs +pA9MRf/TuTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+v +JJUEeKgDu+6B5dpffItKoZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R +8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+tJDfLRVpOoERIyNiwmcUVhAn21klJwGW4 +5hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA= +-----END CERTIFICATE----- + +# Note: "GlobalSign Root CA - R7" not added on purpose. It is P-521. + +# Operating CA: GoDaddy +# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Label: "Go Daddy Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01 +# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b +# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- + +# Operating CA: GoDaddy +# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96 +# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e +# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5 +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +# Operating CA: GoDaddy +# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Label: "Starfield Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24 +# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a +# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58 +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +# Operating CA: GoDaddy +# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Label: "Go Daddy Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67 +# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4 +# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4 +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh +MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE +YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3 +MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo +ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg +MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN +ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA +PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w +wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi +EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY +avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+ +YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE +sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h +/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5 +IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy +OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P +TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER +dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf +ReYNnyicsbkqWletNw+vHX/bvZ8= +-----END CERTIFICATE----- + +# Operating CA: Sectigo +# Issuer: CN=AAA Certificate Services O=Comodo CA Limited +# Subject: CN=AAA Certificate Services O=Comodo CA Limited +# Label: "Comodo AAA Services root" +# Serial: 1 +# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0 +# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49 +# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4 +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj +YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM +GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua +BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe +3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4 +YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR +rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm +ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU +oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v +QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t +b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF +AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q +GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2 +G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi +l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 +smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +# Operating CA: Sectigo +# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO Certification Authority O=COMODO CA Limited +# Label: "COMODO Certification Authority" +# Serial: 43390818032842818540635488309124489234 +# MD5 Fingerprint: 20:E7:4F:82:C2:7E:94:80:34:82:8A:13:A9:17:1D:97 +# SHA1 Fingerprint EE:86:93:87:FF:FD:83:49:AB:5A:D1:43:22:58:87:89:A4:57:B0:12 +# SHA256 Fingerprint: 1A:0D:20:44:5D:E5:BA:18:62:D1:9E:F8:80:85:8C:BC:E5:01:02:B3:6E:8F:0A:04:0C:3C:69:E7:45:22:FE:6E +-----BEGIN CERTIFICATE----- +MIID0DCCArigAwIBAgIQIKTEf93f4cdTYwcTiHdgEjANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xMTAxMDEwMDAw +MDBaFw0zMDEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo0IwQDAdBgNVHQ4EFgQUC1jli8ZMFTekQKkwqSG+RzZaVv8w +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBAC/JxBwHO89hAgCx2SFRdXIDMLDEFh9sAIsQrK/xR9SuEDwMGvjUk2ysEDd8 +t6aDZK3N3w6HM503sMZ7OHKx8xoOo/lVem0DZgMXlUrxsXrfViEGQo+x06iF3u6X +HWLrp+cxEmbDD6ZLLkGC9/3JG6gbr+48zuOcrigHoSybJMIPIyaDMouGDx8rEkYl +Fo92kANr3ryqImhrjKGsKxE5pttwwn1y6TPn/CbxdFqR5p2ErPioBhlG5qfpqjQi +pKGfeq23sqSaM4hxAjwu1nqyH6LKwN0vEJT9s4yEIHlG1QXUEOTS22RPuFvuG8Ug +R1uUq27UlTMdphVx8fiUylQ5PsE= +-----END CERTIFICATE----- + +# Operating CA: Sectigo +# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Label: "COMODO ECC Certification Authority" +# Serial: 41578283867086692638256921589707938090 +# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23 +# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11 +# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7 +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +# Operating CA: Sectigo +# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Label: "COMODO RSA Certification Authority" +# Serial: 101909084537582093308941363524873193117 +# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18 +# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4 +# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34 +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR +6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X +pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC +9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV +/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf +Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z ++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w +qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah +SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC +u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf +Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq +crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl +wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM +4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV +2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna +FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ +CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK +boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke +jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL +S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb +QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl +0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB +NVOFBkpdn627G190 +-----END CERTIFICATE----- + +# Operating CA: Sectigo +# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Label: "USERTrust ECC Certification Authority" +# Serial: 123013823720199481456569720443997572134 +# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1 +# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0 +# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +# Operating CA: Sectigo +# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Label: "USERTrust RSA Certification Authority" +# Serial: 2645093764781058787591871645665788717 +# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5 +# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e +# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2 +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +# Operating CA: Google Trust Services LLC +# Subject: C = US, O = Google Trust Services LLC, CN = GTS Root R1 +# Issuer: C = US, O = Google Trust Services LLC, CN = GTS Root R1 +# Label: "GTS Root R1" +# Serial: 0203E5936F31B01349886BA217 +# MD5 Fingerprint: 05:FE:D0:BF:71:A8:A3:76:63:DA:01:E0:D8:52:DC:40 +# SHA1 Fingerprint: E5:8C:1C:C4:91:3B:38:63:4B:E9:10:6E:E3:AD:8E:6B:9D:D9:81:4A +# SHA256 Fingerprint: D9:47:43:2A:BD:E7:B7:FA:90:FC:2E:6B:59:10:1B:12:80:E0:E1:C7:E4:E4:0F:A3:C6:88:7F:FF:57:A7:F4:CF +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo +27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w +Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw +TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl +qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH +szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8 +Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk +MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92 +wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p +aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN +VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb +C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe +QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy +h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4 +7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J +ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef +MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/ +Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT +6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ +0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm +2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb +bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c +-----END CERTIFICATE----- + +# Operating CA: Google Trust Services LLC +# Subject: C = US, O = Google Trust Services LLC, CN = GTS Root R2 +# Issuer: C = US, O = Google Trust Services LLC, CN = GTS Root R2 +# Label: "GTS Root R2" +# Serial: 0203E5AEC58D04251AAB1125AA +# MD5 Fingerprint=1E:39:C0:53:E6:1E:29:82:0B:CA:52:55:36:5D:57:DC +# SHA1 Fingerprint=9A:44:49:76:32:DB:DE:FA:D0:BC:FB:5A:7B:17:BD:9E:56:09:24:94 +# SHA256 Fingerprint=8D:25:CD:97:22:9D:BF:70:35:6B:DA:4E:B3:CC:73:40:31:E2:4C:F0:0F:AF:CF:D3:2D:C7:6E:B5:84:1C:7E:A8 +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlrsWNBCUaqxElqjANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3LvCvpt +nfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3KgGjSY +6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9BuXvAu +MC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOdre7k +RXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXuPuWg +f9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1mKPV ++3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K8Yzo +dDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqjx5RW +Ir9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsRnTKa +G73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0kzCq +gc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9OktwID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBAB/Kzt3H +vqGf2SdMC9wXmBFqiN495nFWcrKeGk6c1SuYJF2ba3uwM4IJvd8lRuqYnrYb/oM8 +0mJhwQTtzuDFycgTE1XnqGOtjHsB/ncw4c5omwX4Eu55MaBBRTUoCnGkJE+M3DyC +B19m3H0Q/gxhswWV7uGugQ+o+MePTagjAiZrHYNSVc61LwDKgEDg4XSsYPWHgJ2u +NmSRXbBoGOqKYcl3qJfEycel/FVL8/B/uWU9J2jQzGv6U53hkRrJXRqWbTKH7QMg +yALOWr7Z6v2yTcQvG99fevX4i8buMTolUVVnjWQye+mew4K6Ki3pHrTgSAai/Gev +HyICc/sgCq+dVEuhzf9gR7A/Xe8bVr2XIZYtCtFenTgCR2y59PYjJbigapordwj6 +xLEokCZYCDzifqrXPW+6MYgKBesntaFJ7qBFVHvmJ2WZICGoo7z7GJa7Um8M7YNR +TOlZ4iBgxcJlkoKM8xAfDoqXvneCbT+PHV28SSe9zE8P4c52hgQjxcCMElv924Sg +JPFI/2R80L5cFtHvma3AH/vLrrw4IgYmZNralw4/KBVEqE8AyvCazM90arQ+POuV +7LXTWtiBmelDGDfrs7vRWGJB82bSj6p4lVQgw1oudCvV0b4YacCs1aTPObpRhANl +6WLAYv7YTVWW4tAR+kg0Eeye7QUd5MjWHYbL +-----END CERTIFICATE----- + +# Operating CA: Google Trust Services LLC +# Subject: C = US, O = Google Trust Services LLC, CN = GTS Root R3 +# Issuer: C = US, O = Google Trust Services LLC, CN = GTS Root R3 +# Label: "GTS Root R3" +# Serial: 0203E5B882EB20F825276D3D66 +# MD5 Fingerprint: 3E:E7:9D:58:02:94:46:51:94:E5:E0:22:4A:8B:E7:73 +# SHA1 Fingerprint: ED:E5:71:80:2B:C8:92:B9:5B:83:3C:D2:32:68:3F:09:CD:A0:1E:46 +# SHA256 Fingerprint: 34:D8:A7:3E:E2:08:D9:BC:DB:0D:95:65:20:93:4B:4E:40:E6:94:82:59:6E:8B:6F:73:C8:42:6B:01:0A:6F:48 +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPluILrIPglJ209ZjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout736G +jOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2ADDL2 +4CejQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEA9uEglRR7 +VKOQFhG/hMjqb2sXnh5GmCCbn9MN2azTL818+FsuVbu/3ZL3pAzcMeGiAjEA/Jdm +ZuVDFhOD3cffL74UOO0BzrEXGhF16b0DjyZ+hOXJYKaV11RZt+cRLInUue4X +-----END CERTIFICATE----- + +# Operating CA: Google Trust Services LLC +# Subject: C = US, O = Google Trust Services LLC, CN = GTS Root R4 +# Issuer: C = US, O = Google Trust Services LLC, CN = GTS Root R4 +# Label: "GTS Root R4" +# Serial: 0203E5C068EF631A9C72905052 +# MD5 Fingerprint=43:96:83:77:19:4D:76:B3:9D:65:52:E4:1D:22:A5:E8 +# SHA1 Fingerprint=77:D3:03:67:B5:E0:0C:15:F6:0C:38:61:DF:7C:E1:3B:92:46:4D:47 +# SHA256 Fingerprint=34:9D:FA:40:58:C5:E2:63:12:3B:39:8A:E7:95:57:3C:4E:13:13:C8:3F:E6:8F:93:55:6C:D5:E8:03:1B:3C:7D +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPlwGjvYxqccpBQUjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzuhXyi +QHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/lxKvR +HYqjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNpADBmAjEA6ED/g94D +9J+uHXqnLrmvT/aDHQ4thQEd0dlq7A/Cr8deVl5c1RxYIigL9zC2L7F8AjEA8GE8 +p/SgguMh1YQdc4acLa/KNJvxn7kjNuK8YAOdgLOaVsjh4rsUecrNIdSUtUlD +-----END CERTIFICATE----- + +# Operating CA: Google Trust Services LLC +# Subject: OU = GlobalSign ECC Root CA - R4, O = GlobalSign, CN = GlobalSign +# Issuer: OU = GlobalSign ECC Root CA - R4, O = GlobalSign, CN = GlobalSign +# Label: "GlobalSign R4" +# Serial: 0203E57EF53F93FDA50921B2A6 +# MD5 Fingerprint: 26:29:F8:6D:E1:88:BF:A2:65:7F:AA:C4:CD:0F:7F:FC +# SHA1 Fingerprint: 6B:A0:B0:98:E1:71:EF:5A:AD:FE:48:15:80:77:10:F4:BD:6F:0B:28 +# SHA256 Fingerprint: B0:85:D7:0B:96:4F:19:1A:73:E4:AF:0D:54:AE:7A:0E:07:AA:FD:AF:9B:71:DD:08:62:13:8A:B7:32:5A:24:A2 +-----BEGIN CERTIFICATE----- +MIIB3DCCAYOgAwIBAgINAgPlfvU/k/2lCSGypjAKBggqhkjOPQQDAjBQMSQwIgYD +VQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTIxMTEzMDAwMDAwWhcNMzgw +MTE5MDMxNDA3WjBQMSQwIgYDVQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0g +UjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wWTAT +BgcqhkjOPQIBBggqhkjOPQMBBwNCAAS4xnnTj2wlDp8uORkcA6SumuU5BwkWymOx +uYb4ilfBV85C+nOh92VC/x7BALJucw7/xyHlGKSq2XE/qNS5zowdo0IwQDAOBgNV +HQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVLB7rUW44kB/ ++wpu+74zyTyjhNUwCgYIKoZIzj0EAwIDRwAwRAIgIk90crlgr/HmnKAWBVBfw147 +bmF0774BxL4YSFlhgjICICadVGNA3jdgUM/I2O2dgq43mLyjj0xMqTQrbO/7lZsm +-----END CERTIFICATE----- diff --git a/crates/sui-kvstore/src/bigtable/proto/google.rpc.rs b/crates/sui-kvstore/src/bigtable/proto/google.rpc.rs new file mode 100644 index 0000000000000..22c01090fc8bb --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/proto/google.rpc.rs @@ -0,0 +1,24 @@ +/// The `Status` type defines a logical error model that is suitable for +/// different programming environments, including REST APIs and RPC APIs. It is +/// used by \[gRPC\](). Each `Status` message contains +/// three pieces of data: error code, error message, and error details. +/// +/// You can find out more about this error model and how to work with it in the +/// [API Design Guide](). +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Status { + /// The status code, which should be an enum value of + /// \[google.rpc.Code][google.rpc.Code\]. + #[prost(int32, tag = "1")] + pub code: i32, + /// A developer-facing error message, which should be in English. Any + /// user-facing error message should be localized and sent in the + /// \[google.rpc.Status.details][google.rpc.Status.details\] field, or localized + /// by the client. + #[prost(string, tag = "2")] + pub message: ::prost::alloc::string::String, + /// A list of messages that carry the error details. There is a common set of + /// message types for APIs to use. + #[prost(message, repeated, tag = "3")] + pub details: ::prost::alloc::vec::Vec<::prost_types::Any>, +} diff --git a/crates/sui-kvstore/src/bigtable/worker.rs b/crates/sui-kvstore/src/bigtable/worker.rs new file mode 100644 index 0000000000000..4698bab17de40 --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/worker.rs @@ -0,0 +1,39 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{BigTableClient, KeyValueStoreWriter, TransactionData}; +use async_trait::async_trait; +use sui_data_ingestion_core::Worker; +use sui_types::full_checkpoint_content::CheckpointData; + +pub struct KvWorker { + pub client: BigTableClient, +} + +#[async_trait] +impl Worker for KvWorker { + type Result = (); + + async fn process_checkpoint(&self, checkpoint: &CheckpointData) -> anyhow::Result<()> { + let mut client = self.client.clone(); + let mut objects = vec![]; + let mut transactions = vec![]; + for transaction in &checkpoint.transactions { + let full_transaction = TransactionData { + transaction: transaction.transaction.clone(), + effects: transaction.effects.clone(), + events: transaction.events.clone(), + checkpoint_number: checkpoint.checkpoint_summary.sequence_number, + timestamp: checkpoint.checkpoint_summary.timestamp_ms, + }; + for object in &transaction.output_objects { + objects.push(object); + } + transactions.push(full_transaction); + } + client.save_objects(&objects).await?; + client.save_transactions(&transactions).await?; + client.save_checkpoint(checkpoint).await?; + Ok(()) + } +} diff --git a/crates/sui-kvstore/src/lib.rs b/crates/sui-kvstore/src/lib.rs new file mode 100644 index 0000000000000..caee844739fb9 --- /dev/null +++ b/crates/sui-kvstore/src/lib.rs @@ -0,0 +1,57 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +mod bigtable; +use anyhow::Result; +use async_trait::async_trait; +pub use bigtable::client::BigTableClient; +pub use bigtable::worker::KvWorker; +use sui_types::crypto::AuthorityStrongQuorumSignInfo; +use sui_types::digests::{CheckpointDigest, TransactionDigest}; +use sui_types::effects::{TransactionEffects, TransactionEvents}; +use sui_types::full_checkpoint_content::CheckpointData; +use sui_types::messages_checkpoint::{ + CheckpointContents, CheckpointSequenceNumber, CheckpointSummary, +}; +use sui_types::object::Object; +use sui_types::storage::ObjectKey; +use sui_types::transaction::Transaction; + +#[async_trait] +pub trait KeyValueStoreReader { + async fn get_objects(&mut self, objects: &[ObjectKey]) -> Result>; + async fn get_transactions( + &mut self, + transactions: &[TransactionDigest], + ) -> Result>; + async fn get_checkpoints( + &mut self, + sequence_numbers: &[CheckpointSequenceNumber], + ) -> Result>; + async fn get_checkpoint_by_digest( + &mut self, + digest: CheckpointDigest, + ) -> Result>; +} + +#[async_trait] +pub trait KeyValueStoreWriter { + async fn save_objects(&mut self, objects: &[&Object]) -> Result<()>; + async fn save_transactions(&mut self, transactions: &[TransactionData]) -> Result<()>; + async fn save_checkpoint(&mut self, checkpoint: &CheckpointData) -> Result<()>; +} + +#[derive(Clone, Debug)] +pub struct Checkpoint { + pub summary: CheckpointSummary, + pub contents: CheckpointContents, + pub signatures: AuthorityStrongQuorumSignInfo, +} + +#[derive(Clone, Debug)] +pub struct TransactionData { + pub transaction: Transaction, + pub effects: TransactionEffects, + pub events: Option, + pub checkpoint_number: CheckpointSequenceNumber, + pub timestamp: u64, +} diff --git a/crates/sui-kvstore/src/main.rs b/crates/sui-kvstore/src/main.rs new file mode 100644 index 0000000000000..82f9ceac61315 --- /dev/null +++ b/crates/sui-kvstore/src/main.rs @@ -0,0 +1,35 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +use anyhow::Result; +use sui_data_ingestion_core::setup_single_workflow; +use sui_kvstore::BigTableClient; +use sui_kvstore::KvWorker; +use telemetry_subscribers::TelemetryConfig; + +#[tokio::main] +async fn main() -> Result<()> { + let _guard = TelemetryConfig::new().with_env().init(); + let args: Vec = std::env::args().collect(); + if args.len() < 3 { + eprintln!("Please provide BigTable instance id and network name"); + std::process::exit(1); + } + let instance_id = args[1].to_string(); + let network = args[2].to_string(); + assert!( + network == "mainnet" || network == "testnet", + "Invalid network name" + ); + + let client = BigTableClient::new_remote(instance_id, false, None).await?; + let (executor, _term_sender) = setup_single_workflow( + KvWorker { client }, + format!("https://checkpoints.{}.sui.io", network), + 0, + 1, + None, + ) + .await?; + executor.await?; + Ok(()) +} diff --git a/crates/sui-move/src/build.rs b/crates/sui-move/src/build.rs index 1d765e49e8120..42789e2ef9b35 100644 --- a/crates/sui-move/src/build.rs +++ b/crates/sui-move/src/build.rs @@ -88,11 +88,12 @@ impl Build { if generate_struct_layouts { let layout_str = serde_yaml::to_string(&pkg.generate_struct_layouts()).unwrap(); // store under /build//layouts/struct_layouts.yaml - let layout_filename = rerooted_path + let dir_name = rerooted_path .join("build") .join(pkg.package.compiled_package_info.package_name.as_str()) - .join(LAYOUTS_DIR) - .join(STRUCT_LAYOUTS_FILENAME); + .join(LAYOUTS_DIR); + let layout_filename = dir_name.join(STRUCT_LAYOUTS_FILENAME); + fs::create_dir_all(dir_name)?; fs::write(layout_filename, layout_str)? } diff --git a/crates/sui-move/src/main.rs b/crates/sui-move/src/main.rs index 49676c20b498a..4d763b8182a33 100644 --- a/crates/sui-move/src/main.rs +++ b/crates/sui-move/src/main.rs @@ -26,10 +26,10 @@ struct Args { #[clap(long = "path", short = 'p', global = true)] pub package_path: Option, /// If true, run the Move bytecode verifier on the bytecode from a successful build - #[clap(long = "path", short = 'p', global = true)] + #[clap(long, global = true)] pub run_bytecode_verifier: bool, /// If true, print build diagnostics to stderr--no printing if false - #[clap(long = "path", short = 'p', global = true)] + #[clap(long, global = true)] pub print_diags_to_stderr: bool, /// Package build options #[clap(flatten)] diff --git a/crates/sui-mvr-indexer/Cargo.toml b/crates/sui-mvr-indexer/Cargo.toml new file mode 100644 index 0000000000000..fd869ae522ddf --- /dev/null +++ b/crates/sui-mvr-indexer/Cargo.toml @@ -0,0 +1,89 @@ +[package] +name = "sui-mvr-indexer" +version.workspace = true +authors = ["Mysten Labs "] +license = "Apache-2.0" +publish = false +edition = "2021" + +[dependencies] +anyhow.workspace = true +rand = "0.8.5" +async-trait.workspace = true +axum.workspace = true +backoff.workspace = true +bb8 = "0.8.5" +bcs.workspace = true +bytes.workspace = true +chrono.workspace = true +clap = { workspace = true, features = ["env"] } +csv.workspace = true +diesel = { workspace = true, features = ["chrono", "serde_json"] } +diesel-async = { workspace = true, features = ["bb8", "postgres", "async-connection-wrapper"] } +futures.workspace = true +hex.workspace = true +indicatif.workspace = true +itertools.workspace = true +jsonrpsee.workspace = true +object_store.workspace = true +prometheus.workspace = true +rayon.workspace = true +regex.workspace = true +serde.workspace = true +serde_json.workspace = true +serde_with.workspace = true +strum.workspace = true +strum_macros.workspace = true +tap.workspace = true +tempfile.workspace = true +thiserror.workspace = true +tokio = { workspace = true, features = ["full"] } +tokio-util = { workspace = true, features = ["rt"] } +toml.workspace = true +tracing.workspace = true +url.workspace = true + +fastcrypto = { workspace = true, features = ["copy_key"] } +mysten-metrics.workspace = true +simulacrum.workspace = true +sui-config.workspace = true +sui-archival.workspace = true +sui-core.workspace = true +sui-data-ingestion-core.workspace = true +sui-json.workspace = true +sui-json-rpc.workspace = true +sui-json-rpc-api.workspace = true +sui-json-rpc-types.workspace = true +sui-open-rpc.workspace = true +sui-sdk.workspace = true +sui-snapshot.workspace = true +sui-storage.workspace = true +sui-types.workspace = true +sui-package-resolver.workspace = true +sui-protocol-config.workspace = true +telemetry-subscribers.workspace = true +sui-rest-api.workspace = true +sui-transaction-builder.workspace = true +sui-synthetic-ingestion.workspace = true + +move-core-types.workspace = true +move-bytecode-utils.workspace = true +move-binary-format.workspace = true + +diesel_migrations.workspace = true +cached.workspace = true +tokio-stream.workspace = true +dashmap.workspace = true + +[dev-dependencies] +sui-keys.workspace = true +sui-move-build.workspace = true +sui-swarm-config.workspace = true +sui-test-transaction-builder.workspace = true +test-cluster.workspace = true +ntest.workspace = true +criterion.workspace = true + +[[bin]] +name = "sui-mvr-indexer" +path = "src/main.rs" diff --git a/crates/sui-mvr-indexer/README.md b/crates/sui-mvr-indexer/README.md new file mode 100644 index 0000000000000..e579bc76ac3ad --- /dev/null +++ b/crates/sui-mvr-indexer/README.md @@ -0,0 +1,27 @@ +The MVR indexer is a spin-off of the Sui indexer. It has a subset of the full indexer schema, limited to just the tables needed to support MVR. The required tables are `epochs`, `checkpoints`, `packages`, `objects_snapshot`, and `objects_history`. This enables the custom indexer to support the `package_by_name` and `type_by_name` queries on GraphQL. + +# Running this indexer +## Start the Postgres Service + +Postgres must run as a service in the background for other tools to communicate with. If it was installed using homebrew, it can be started as a service with: + +``` sh +brew services start postgresql@version +``` + +## DB reset +When making db-related changes, you may find yourself having to run migrations and reset dbs often. The commands below are how you can invoke these actions. +```sh +cargo run --bin sui-mvr-indexer -- --database-url "" reset-database --force +``` + +## Start the indexer +```SH +cargo run --bin sui-mvr-indexer -- --db-url "" indexer --rpc-client-url "https://fullnode.devnet.sui.io:443" --remote-store-url http://lax-suifn-t99eb.devnet.sui.io:9000/rest +``` + +## Migrations + +To add a new table, run `diesel migration generate your_table_name`, and modify the newly created `up.sql` and `down.sql` files. + +You would apply the migration with `diesel migration run`, and run the script in `./scripts/generate_indexer_schema.sh` to update the `schema.rs` file. diff --git a/crates/sui-mvr-indexer/diesel.toml b/crates/sui-mvr-indexer/diesel.toml new file mode 100644 index 0000000000000..4430344705b42 --- /dev/null +++ b/crates/sui-mvr-indexer/diesel.toml @@ -0,0 +1,8 @@ +# For documentation on how to configure this file, +# see https://diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "src/schema/pg.rs" + +[migrations_directory] +dir = "migrations/pg" diff --git a/crates/sui-mvr-indexer/migrations/pg/00000000000000_diesel_initial_setup/down.sql b/crates/sui-mvr-indexer/migrations/pg/00000000000000_diesel_initial_setup/down.sql new file mode 100644 index 0000000000000..a9f526091194b --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/00000000000000_diesel_initial_setup/down.sql @@ -0,0 +1,6 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + +DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); +DROP FUNCTION IF EXISTS diesel_set_updated_at(); diff --git a/crates/sui-mvr-indexer/migrations/pg/00000000000000_diesel_initial_setup/up.sql b/crates/sui-mvr-indexer/migrations/pg/00000000000000_diesel_initial_setup/up.sql new file mode 100644 index 0000000000000..d68895b1a7b7d --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/00000000000000_diesel_initial_setup/up.sql @@ -0,0 +1,36 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + + + + +-- Sets up a trigger for the given table to automatically set a column called +-- `updated_at` whenever the row is modified (unless `updated_at` was included +-- in the modified columns) +-- +-- # Example +-- +-- ```sql +-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); +-- +-- SELECT diesel_manage_updated_at('users'); +-- ``` +CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ +BEGIN + EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ +BEGIN + IF ( + NEW IS DISTINCT FROM OLD AND + NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at + ) THEN + NEW.updated_at := current_timestamp; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044020_events/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044020_events/down.sql new file mode 100644 index 0000000000000..57f1de973b1d2 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044020_events/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS chain_identifier; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044020_events/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044020_events/up.sql new file mode 100644 index 0000000000000..14aa6a098161f --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044020_events/up.sql @@ -0,0 +1,26 @@ +-- TODO: modify queries in indexer reader to take advantage of the new indices +CREATE TABLE events +( + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + transaction_digest bytea NOT NULL, + -- array of SuiAddress in bytes. All signers of the transaction. + senders bytea[] NOT NULL, + -- bytes of the entry package ID. Notice that the package and module here + -- are the package and module of the function that emitted the event, diffrent + -- from the package and module of the event type. + package bytea NOT NULL, + -- entry module name + module text NOT NULL, + -- StructTag in Display format, fully qualified including type parameters + event_type text NOT NULL, + -- timestamp of the checkpoint when the event was emitted + timestamp_ms BIGINT NOT NULL, + -- bcs of the Event contents (Event.contents) + bcs BYTEA NOT NULL, + PRIMARY KEY(tx_sequence_number, event_sequence_number) +) PARTITION BY RANGE (tx_sequence_number); +CREATE TABLE events_partition_0 PARTITION OF events FOR VALUES FROM (0) TO (MAXVALUE); +CREATE INDEX events_package ON events (package, tx_sequence_number, event_sequence_number); +CREATE INDEX events_package_module ON events (package, module, tx_sequence_number, event_sequence_number); +CREATE INDEX events_event_type ON events (event_type text_pattern_ops, tx_sequence_number, event_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044023_objects/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044023_objects/down.sql new file mode 100644 index 0000000000000..edea7960b79d7 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044023_objects/down.sql @@ -0,0 +1,3 @@ +DROP TABLE IF EXISTS objects; +DROP TABLE IF EXISTS objects_history; +DROP TABLE IF EXISTS objects_snapshot; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044023_objects/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044023_objects/up.sql new file mode 100644 index 0000000000000..54854fabf4359 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044023_objects/up.sql @@ -0,0 +1,95 @@ +CREATE TABLE objects ( + object_id bytea PRIMARY KEY, + object_version bigint NOT NULL, + object_digest bytea NOT NULL, + checkpoint_sequence_number bigint NOT NULL, + -- Immutable/Address/Object/Shared, see types.rs + owner_type smallint NOT NULL, + -- bytes of SuiAddress/ObjectID of the owner ID. + -- Non-null for objects with an owner: Addresso or Objects + owner_id bytea, + -- Object type + object_type text, + -- Components of the StructTag: package, module, name (name of the struct, without type parameters) + object_type_package bytea, + object_type_module text, + object_type_name text, + -- bcs serialized Object + serialized_object bytea NOT NULL, + -- Non-null when the object is a coin. + -- e.g. `0x2::sui::SUI` + coin_type text, + -- Non-null when the object is a coin. + coin_balance bigint, + -- DynamicField/DynamicObject, see types.rs + -- Non-null when the object is a dynamic field + df_kind smallint, + -- bcs serialized DynamicFieldName + -- Non-null when the object is a dynamic field + df_name bytea, + -- object_type in DynamicFieldInfo. + df_object_type text, + -- object_id in DynamicFieldInfo. + df_object_id bytea +); + +-- OwnerType: 1: Address, 2: Object, see types.rs +CREATE INDEX objects_owner ON objects (owner_type, owner_id) WHERE owner_type BETWEEN 1 AND 2 AND owner_id IS NOT NULL; +CREATE INDEX objects_coin ON objects (owner_id, coin_type) WHERE coin_type IS NOT NULL AND owner_type = 1; +CREATE INDEX objects_checkpoint_sequence_number ON objects (checkpoint_sequence_number); +CREATE INDEX objects_package_module_name_full_type ON objects (object_type_package, object_type_module, object_type_name, object_type); +CREATE INDEX objects_owner_package_module_name_full_type ON objects (owner_id, object_type_package, object_type_module, object_type_name, object_type); + +-- similar to objects table, except that +-- 1. the primary key to store multiple object versions and partitions by checkpoint_sequence_number +-- 2. allow null values in some columns for deleted / wrapped objects +-- 3. object_status to mark the status of the object, which is either Active or WrappedOrDeleted +CREATE TABLE objects_history ( + object_id bytea NOT NULL, + object_version bigint NOT NULL, + object_status smallint NOT NULL, + object_digest bytea, + checkpoint_sequence_number bigint NOT NULL, + owner_type smallint, + owner_id bytea, + object_type text, + object_type_package bytea, + object_type_module text, + object_type_name text, + serialized_object bytea, + coin_type text, + coin_balance bigint, + df_kind smallint, + df_name bytea, + df_object_type text, + df_object_id bytea, + CONSTRAINT objects_history_pk PRIMARY KEY (checkpoint_sequence_number, object_id, object_version) +) PARTITION BY RANGE (checkpoint_sequence_number); +CREATE INDEX objects_history_id_version ON objects_history (object_id, object_version, checkpoint_sequence_number); +-- init with first partition of the history table +CREATE TABLE objects_history_partition_0 PARTITION OF objects_history FOR VALUES FROM (0) TO (MAXVALUE); + +-- snapshot table by folding objects_history table until certain checkpoint, +-- effectively the snapshot of objects at the same checkpoint, +-- except that it also includes deleted or wrapped objects with the corresponding object_status. +CREATE TABLE objects_snapshot ( + object_id bytea PRIMARY KEY, + object_version bigint NOT NULL, + object_status smallint NOT NULL, + object_digest bytea, + checkpoint_sequence_number bigint NOT NULL, + owner_type smallint, + owner_id bytea, + object_type text, + object_type_package bytea, + object_type_module text, + object_type_name text, + serialized_object bytea, + coin_type text, + coin_balance bigint, + df_kind smallint, + df_name bytea, + df_object_type text, + df_object_id bytea +); +CREATE INDEX objects_snapshot_checkpoint_sequence_number ON objects_snapshot (checkpoint_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044026_transactions/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044026_transactions/down.sql new file mode 100644 index 0000000000000..15e9dc9f1cb82 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044026_transactions/down.sql @@ -0,0 +1,3 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS transactions; +DROP TABLE IF EXISTS transactions_partition_0; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044026_transactions/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044026_transactions/up.sql new file mode 100644 index 0000000000000..f5404e3610751 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044026_transactions/up.sql @@ -0,0 +1,23 @@ +CREATE TABLE transactions ( + tx_sequence_number BIGINT NOT NULL, + transaction_digest bytea NOT NULL, + -- bcs serialized SenderSignedData bytes + raw_transaction bytea NOT NULL, + -- bcs serialized TransactionEffects bytes + raw_effects bytea NOT NULL, + checkpoint_sequence_number BIGINT NOT NULL, + timestamp_ms BIGINT NOT NULL, + -- array of bcs serialized IndexedObjectChange bytes + object_changes bytea[] NOT NULL, + -- array of bcs serialized BalanceChange bytes + balance_changes bytea[] NOT NULL, + -- array of bcs serialized StoredEvent bytes + events bytea[] NOT NULL, + -- SystemTransaction/ProgrammableTransaction. See types.rs + transaction_kind smallint NOT NULL, + -- number of successful commands in this transaction, bound by number of command + -- in a programmaable transaction. + success_command_count smallint NOT NULL, + PRIMARY KEY (tx_sequence_number) +) PARTITION BY RANGE (tx_sequence_number); +CREATE TABLE transactions_partition_0 PARTITION OF transactions FOR VALUES FROM (0) TO (MAXVALUE); diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044044_checkpoints/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044044_checkpoints/down.sql new file mode 100644 index 0000000000000..fba5a8b5468c6 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044044_checkpoints/down.sql @@ -0,0 +1,3 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS checkpoints; +DROP TABLE IF EXISTS pruner_cp_watermark; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044044_checkpoints/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044044_checkpoints/up.sql new file mode 100644 index 0000000000000..ddb63b020de70 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044044_checkpoints/up.sql @@ -0,0 +1,36 @@ +CREATE TABLE checkpoints +( + sequence_number BIGINT PRIMARY KEY, + checkpoint_digest BYTEA NOT NULL, + epoch BIGINT NOT NULL, + -- total transactions in the network at the end of this checkpoint (including itself) + network_total_transactions BIGINT NOT NULL, + previous_checkpoint_digest BYTEA, + -- if this checkpoitn is the last checkpoint of an epoch + end_of_epoch boolean NOT NULL, + -- array of TranscationDigest in bytes included in this checkpoint + tx_digests BYTEA[] NOT NULL, + timestamp_ms BIGINT NOT NULL, + total_gas_cost BIGINT NOT NULL, + computation_cost BIGINT NOT NULL, + storage_cost BIGINT NOT NULL, + storage_rebate BIGINT NOT NULL, + non_refundable_storage_fee BIGINT NOT NULL, + -- bcs serialized Vec bytes + checkpoint_commitments BYTEA NOT NULL, + -- bcs serialized AggregateAuthoritySignature bytes + validator_signature BYTEA NOT NULL, + -- bcs serialzied EndOfEpochData bytes, if the checkpoint marks end of an epoch + end_of_epoch_data BYTEA, + min_tx_sequence_number BIGINT, + max_tx_sequence_number BIGINT +); + +CREATE INDEX checkpoints_epoch ON checkpoints (epoch, sequence_number); +CREATE INDEX checkpoints_digest ON checkpoints USING HASH (checkpoint_digest); + +CREATE TABLE pruner_cp_watermark ( + checkpoint_sequence_number BIGINT PRIMARY KEY, + min_tx_sequence_number BIGINT NOT NULL, + max_tx_sequence_number BIGINT NOT NULL +) diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044052_epochs/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044052_epochs/down.sql new file mode 100644 index 0000000000000..ddb05ac2ebe8b --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044052_epochs/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS epochs; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044052_epochs/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044052_epochs/up.sql new file mode 100644 index 0000000000000..5b540121cb849 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044052_epochs/up.sql @@ -0,0 +1,47 @@ +CREATE TABLE epochs +( + epoch BIGINT PRIMARY KEY, + first_checkpoint_id BIGINT NOT NULL, + epoch_start_timestamp BIGINT NOT NULL, + reference_gas_price BIGINT NOT NULL, + protocol_version BIGINT NOT NULL, + total_stake BIGINT NOT NULL, + storage_fund_balance BIGINT NOT NULL, + system_state bytea NOT NULL, + -- The following fields are nullable because they are filled in + -- only at the end of an epoch. + epoch_total_transactions BIGINT, + last_checkpoint_id BIGINT, + epoch_end_timestamp BIGINT, + -- The following fields are from SystemEpochInfoEvent emitted + -- **after** advancing to the next epoch + storage_fund_reinvestment BIGINT, + storage_charge BIGINT, + storage_rebate BIGINT, + stake_subsidy_amount BIGINT, + total_gas_fees BIGINT, + total_stake_rewards_distributed BIGINT, + leftover_storage_fund_inflow BIGINT, + -- bcs serialized Vec bytes, found in last CheckpointSummary + -- of the epoch + epoch_commitments bytea +); + +-- Table storing the protocol configs for each protocol version. +-- Examples include gas schedule, transaction limits, etc. +CREATE TABLE protocol_configs +( + protocol_version BIGINT NOT NULL, + config_name TEXT NOT NULL, + config_value TEXT, + PRIMARY KEY(protocol_version, config_name) +); + +-- Table storing the feature flags for each protocol version. +CREATE TABLE feature_flags +( + protocol_version BIGINT NOT NULL, + flag_name TEXT NOT NULL, + flag_value BOOLEAN NOT NULL, + PRIMARY KEY(protocol_version, flag_name) +); diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-060729_packages/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-060729_packages/down.sql new file mode 100644 index 0000000000000..6b473dc06f4a2 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-060729_packages/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS packages; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-060729_packages/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-060729_packages/up.sql new file mode 100644 index 0000000000000..f08a5549608eb --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-060729_packages/up.sql @@ -0,0 +1,14 @@ +CREATE TABLE packages +( + package_id bytea NOT NULL, + original_id bytea NOT NULL, + package_version bigint NOT NULL, + -- bcs serialized MovePackage + move_package bytea NOT NULL, + checkpoint_sequence_number bigint NOT NULL, + CONSTRAINT packages_pkey PRIMARY KEY (package_id, original_id, package_version), + CONSTRAINT packages_unique_package_id UNIQUE (package_id) +); + +CREATE INDEX packages_cp_id_version ON packages (checkpoint_sequence_number, original_id, package_version); +CREATE INDEX packages_id_version_cp ON packages (original_id, package_version, checkpoint_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-10-06-204335_tx_indices/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-10-06-204335_tx_indices/down.sql new file mode 100644 index 0000000000000..f5604c0db5357 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-10-06-204335_tx_indices/down.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS tx_senders; +DROP TABLE IF EXISTS tx_recipients; +DROP TABLE IF EXISTS tx_input_objects; +DROP TABLE IF EXISTS tx_changed_objects; +DROP TABLE IF EXISTS tx_calls_pkg; +DROP TABLE IF EXISTS tx_calls_mod; +DROP TABLE IF EXISTS tx_calls_fun; +DROP TABLE IF EXISTS tx_digests; +DROP TABLE IF EXISTS tx_kinds; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-10-06-204335_tx_indices/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-10-06-204335_tx_indices/up.sql new file mode 100644 index 0000000000000..563df854b97ef --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-10-06-204335_tx_indices/up.sql @@ -0,0 +1,67 @@ +CREATE TABLE tx_senders ( + tx_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(sender, tx_sequence_number) +); + +CREATE TABLE tx_recipients ( + tx_sequence_number BIGINT NOT NULL, + recipient BYTEA NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(recipient, tx_sequence_number) +); +CREATE INDEX tx_recipients_sender ON tx_recipients (sender, recipient, tx_sequence_number); + +CREATE TABLE tx_input_objects ( + tx_sequence_number BIGINT NOT NULL, + object_id BYTEA NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(object_id, tx_sequence_number) +); +CREATE INDEX tx_input_objects_sender ON tx_input_objects (sender, object_id, tx_sequence_number); + +CREATE TABLE tx_changed_objects ( + tx_sequence_number BIGINT NOT NULL, + object_id BYTEA NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(object_id, tx_sequence_number) +); +CREATE INDEX tx_changed_objects_sender ON tx_changed_objects (sender, object_id, tx_sequence_number); + +CREATE TABLE tx_calls_pkg ( + tx_sequence_number BIGINT NOT NULL, + package BYTEA NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, tx_sequence_number) +); +CREATE INDEX tx_calls_pkg_sender ON tx_calls_pkg (sender, package, tx_sequence_number); + +CREATE TABLE tx_calls_mod ( + tx_sequence_number BIGINT NOT NULL, + package BYTEA NOT NULL, + module TEXT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, tx_sequence_number) +); +CREATE INDEX tx_calls_mod_sender ON tx_calls_mod (sender, package, module, tx_sequence_number); + +CREATE TABLE tx_calls_fun ( + tx_sequence_number BIGINT NOT NULL, + package BYTEA NOT NULL, + module TEXT NOT NULL, + func TEXT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, func, tx_sequence_number) +); +CREATE INDEX tx_calls_fun_sender ON tx_calls_fun (sender, package, module, func, tx_sequence_number); + +CREATE TABLE tx_digests ( + tx_digest BYTEA PRIMARY KEY, + tx_sequence_number BIGINT NOT NULL +); + +CREATE TABLE tx_kinds ( + tx_sequence_number BIGINT NOT NULL, + tx_kind SMALLINT NOT NULL, + PRIMARY KEY(tx_kind, tx_sequence_number) +); diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-10-07-160139_display/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-10-07-160139_display/down.sql new file mode 100644 index 0000000000000..f73e497c406d3 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-10-07-160139_display/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS display; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-10-07-160139_display/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-10-07-160139_display/up.sql new file mode 100644 index 0000000000000..c82918e253c8c --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-10-07-160139_display/up.sql @@ -0,0 +1,7 @@ +CREATE TABLE display +( + object_type text PRIMARY KEY, + id BYTEA NOT NULL, + version SMALLINT NOT NULL, + bcs BYTEA NOT NULL +); diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-11-29-193859_advance_partition/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-11-29-193859_advance_partition/down.sql new file mode 100644 index 0000000000000..bab0311186e1d --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-11-29-193859_advance_partition/down.sql @@ -0,0 +1,2 @@ +DROP PROCEDURE IF EXISTS advance_partition; +DROP PROCEDURE IF EXISTS drop_partition; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-11-29-193859_advance_partition/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-11-29-193859_advance_partition/up.sql new file mode 100644 index 0000000000000..8ca64b86a7081 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-11-29-193859_advance_partition/up.sql @@ -0,0 +1,17 @@ +CREATE OR REPLACE PROCEDURE advance_partition(table_name TEXT, last_epoch BIGINT, new_epoch BIGINT, last_epoch_start BIGINT, new_epoch_start BIGINT) +LANGUAGE plpgsql +AS $$ +BEGIN + EXECUTE format('ALTER TABLE %I DETACH PARTITION %I_partition_%s', table_name, table_name, last_epoch); + EXECUTE format('ALTER TABLE %I ATTACH PARTITION %I_partition_%s FOR VALUES FROM (%L) TO (%L)', table_name, table_name, last_epoch, last_epoch_start, new_epoch_start); + EXECUTE format('CREATE TABLE IF NOT EXISTS %I_partition_%s PARTITION OF %I FOR VALUES FROM (%L) TO (MAXVALUE)', table_name, new_epoch, table_name, new_epoch_start); +END; +$$; + +CREATE OR REPLACE PROCEDURE drop_partition(table_name TEXT, epoch BIGINT) +LANGUAGE plpgsql +AS $$ +BEGIN + EXECUTE format('DROP TABLE IF EXISTS %I_partition_%s', table_name, epoch); +END; +$$; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-05-05-155158_obj_indices/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-05-05-155158_obj_indices/down.sql new file mode 100644 index 0000000000000..7a3a7670f24c2 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-05-05-155158_obj_indices/down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS objects_version; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-05-05-155158_obj_indices/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-05-05-155158_obj_indices/up.sql new file mode 100644 index 0000000000000..666e5a2423319 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-05-05-155158_obj_indices/up.sql @@ -0,0 +1,31 @@ +-- Indexing table mapping an object's ID and version to its checkpoint +-- sequence number, partitioned by the first byte of its Object ID. +CREATE TABLE objects_version ( + object_id bytea NOT NULL, + object_version bigint NOT NULL, + cp_sequence_number bigint NOT NULL, + PRIMARY KEY (object_id, object_version) +) PARTITION BY RANGE (object_id); + +-- Create a partition for each first byte value. +DO $$ +DECLARE + lo text; + hi text; +BEGIN + FOR i IN 0..254 LOOP + lo := LPAD(TO_HEX(i), 2, '0'); + hi := LPAD(TO_HEX(i + 1), 2, '0'); + EXECUTE FORMAT($F$ + CREATE TABLE objects_version_%1$s PARTITION OF objects_version FOR VALUES + FROM (E'\\x%1$s00000000000000000000000000000000000000000000000000000000000000') + TO (E'\\x%2$s00000000000000000000000000000000000000000000000000000000000000'); + $F$, lo, hi); + END LOOP; +END; +$$ LANGUAGE plpgsql; + +-- Special case for the last partition, because of the upper bound. +CREATE TABLE objects_version_ff PARTITION OF objects_version FOR VALUES +FROM (E'\\xff00000000000000000000000000000000000000000000000000000000000000') +TO (MAXVALUE); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-06-14-045801_event_indices/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-06-14-045801_event_indices/down.sql new file mode 100644 index 0000000000000..3583887435168 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-06-14-045801_event_indices/down.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS event_emit_package; +DROP TABLE IF EXISTS event_emit_module; +DROP TABLE IF EXISTS event_struct_package; +DROP TABLE IF EXISTS event_struct_module; +DROP TABLE IF EXISTS event_struct_name; +DROP TABLE IF EXISTS event_struct_instantiation; +DROP TABLE IF EXISTS event_senders; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-06-14-045801_event_indices/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-06-14-045801_event_indices/up.sql new file mode 100644 index 0000000000000..a89625146a9fd --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-06-14-045801_event_indices/up.sql @@ -0,0 +1,74 @@ +CREATE TABLE event_emit_package +( + package BYTEA NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_emit_package_sender ON event_emit_package (sender, package, tx_sequence_number, event_sequence_number); + +CREATE TABLE event_emit_module +( + package BYTEA NOT NULL, + module TEXT NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_emit_module_sender ON event_emit_module (sender, package, module, tx_sequence_number, event_sequence_number); + +CREATE TABLE event_struct_package +( + package BYTEA NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_struct_package_sender ON event_struct_package (sender, package, tx_sequence_number, event_sequence_number); + + +CREATE TABLE event_struct_module +( + package BYTEA NOT NULL, + module TEXT NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_struct_module_sender ON event_struct_module (sender, package, module, tx_sequence_number, event_sequence_number); + +CREATE TABLE event_struct_name +( + package BYTEA NOT NULL, + module TEXT NOT NULL, + type_name TEXT NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, type_name, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_struct_name_sender ON event_struct_name (sender, package, module, type_name, tx_sequence_number, event_sequence_number); + +CREATE TABLE event_struct_instantiation +( + package BYTEA NOT NULL, + module TEXT NOT NULL, + type_instantiation TEXT NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, type_instantiation, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_struct_instantiation_sender ON event_struct_instantiation (sender, package, module, type_instantiation, tx_sequence_number, event_sequence_number); + +CREATE TABLE event_senders +( + sender BYTEA NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + PRIMARY KEY(sender, tx_sequence_number, event_sequence_number) +); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-07-13-003534_chain_identifier/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-07-13-003534_chain_identifier/down.sql new file mode 100644 index 0000000000000..57f1de973b1d2 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-07-13-003534_chain_identifier/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS chain_identifier; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-07-13-003534_chain_identifier/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-07-13-003534_chain_identifier/up.sql new file mode 100644 index 0000000000000..205e3a89f63e5 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-07-13-003534_chain_identifier/up.sql @@ -0,0 +1,6 @@ +-- Your SQL goes here +CREATE TABLE chain_identifier +( + checkpoint_digest BYTEA NOT NULL, + PRIMARY KEY(checkpoint_digest) +); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-05-164455_full_objects_history/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-05-164455_full_objects_history/down.sql new file mode 100644 index 0000000000000..619fc41782e68 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-05-164455_full_objects_history/down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS full_objects_history; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-05-164455_full_objects_history/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-05-164455_full_objects_history/up.sql new file mode 100644 index 0000000000000..1504a21e51658 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-05-164455_full_objects_history/up.sql @@ -0,0 +1,10 @@ +-- This table will store every history version of each object, and never get pruned. +-- Since it can grow indefinitely, we keep minimum amount of information in this table for the purpose +-- of point lookups. +CREATE TABLE full_objects_history +( + object_id bytea NOT NULL, + object_version bigint NOT NULL, + serialized_object bytea, + PRIMARY KEY (object_id, object_version) +); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-10-195655_drop-df-columns/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-10-195655_drop-df-columns/down.sql new file mode 100644 index 0000000000000..8490a091b30f4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-10-195655_drop-df-columns/down.sql @@ -0,0 +1,15 @@ +ALTER TABLE objects +ADD COLUMN df_name bytea, +ADD COLUMN df_object_type text, +ADD COLUMN df_object_id bytea, +ADD COLUMN checkpoint_sequence_number bigint; + +ALTER TABLE objects_snapshot +ADD COLUMN df_name bytea, +ADD COLUMN df_object_type text, +ADD COLUMN df_object_id bytea; + +ALTER TABLE objects_history +ADD COLUMN df_name bytea, +ADD COLUMN df_object_type text, +ADD COLUMN df_object_id bytea; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-10-195655_drop-df-columns/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-10-195655_drop-df-columns/up.sql new file mode 100644 index 0000000000000..4782193c4edc9 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-10-195655_drop-df-columns/up.sql @@ -0,0 +1,15 @@ +ALTER TABLE objects +DROP COLUMN df_name, +DROP COLUMN df_object_type, +DROP COLUMN df_object_id, +DROP COLUMN checkpoint_sequence_number; + +ALTER TABLE objects_snapshot +DROP COLUMN df_name, +DROP COLUMN df_object_type, +DROP COLUMN df_object_id; + +ALTER TABLE objects_history +DROP COLUMN df_name, +DROP COLUMN df_object_type, +DROP COLUMN df_object_id; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-12-150939_tx_affected/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-12-150939_tx_affected/down.sql new file mode 100644 index 0000000000000..98cc9c0a36ce9 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-12-150939_tx_affected/down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS tx_affected_addresses; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-12-150939_tx_affected/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-12-150939_tx_affected/up.sql new file mode 100644 index 0000000000000..4f71554f1394a --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-12-150939_tx_affected/up.sql @@ -0,0 +1,9 @@ +CREATE TABLE tx_affected_addresses ( + tx_sequence_number BIGINT NOT NULL, + affected BYTEA NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(affected, tx_sequence_number) +); + +CREATE INDEX tx_affected_addresses_tx_sequence_number_index ON tx_affected_addresses (tx_sequence_number); +CREATE INDEX tx_affected_addresses_sender ON tx_affected_addresses (sender, affected, tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-12-213234_watermarks/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-12-213234_watermarks/down.sql new file mode 100644 index 0000000000000..e9de336153f62 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-12-213234_watermarks/down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS watermarks; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-12-213234_watermarks/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-12-213234_watermarks/up.sql new file mode 100644 index 0000000000000..73bdc70055246 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-12-213234_watermarks/up.sql @@ -0,0 +1,34 @@ +CREATE TABLE IF NOT EXISTS watermarks +( + -- The pipeline governed by this watermark, i.e `epochs`, `checkpoints`, + -- `transactions`. + pipeline TEXT PRIMARY KEY, + -- Inclusive upper epoch bound for this entity's data. Committer updates + -- this field. Pruner uses this to determine if pruning is necessary based + -- on the retention policy. + epoch_hi_inclusive BIGINT NOT NULL, + -- Inclusive upper checkpoint bound for this entity's data. Committer + -- updates this field. All data of this entity in the checkpoint must be + -- persisted before advancing this watermark. The committer refers to this + -- on disaster recovery to resume writing. + checkpoint_hi_inclusive BIGINT NOT NULL, + -- Exclusive upper transaction sequence number bound for this entity's + -- data. Committer updates this field. + tx_hi BIGINT NOT NULL, + -- Inclusive lower epoch bound for this entity's data. Pruner updates this + -- field when the epoch range exceeds the retention policy. + epoch_lo BIGINT NOT NULL, + -- Inclusive low watermark that the pruner advances. Corresponds to the + -- epoch id, checkpoint sequence number, or tx sequence number depending on + -- the entity. Data before this watermark is considered pruned by a reader. + -- The underlying data may still exist in the db instance. + reader_lo BIGINT NOT NULL, + -- Updated using the database's current timestamp when the pruner sees that + -- some data needs to be dropped. The pruner uses this column to determine + -- whether to prune or wait long enough that all in-flight reads complete + -- or timeout before it acts on an updated watermark. + timestamp_ms BIGINT NOT NULL, + -- Column used by the pruner to track its true progress. Data below this + -- watermark can be immediately pruned. + pruner_hi BIGINT NOT NULL +); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-18-003318_epochs_add_json_system_state/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-18-003318_epochs_add_json_system_state/down.sql new file mode 100644 index 0000000000000..9de241cfe20dc --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-18-003318_epochs_add_json_system_state/down.sql @@ -0,0 +1 @@ +ALTER TABLE epochs DROP COLUMN system_state_summary_json; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-18-003318_epochs_add_json_system_state/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-18-003318_epochs_add_json_system_state/up.sql new file mode 100644 index 0000000000000..4dce2a5a9ecfd --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-18-003318_epochs_add_json_system_state/up.sql @@ -0,0 +1 @@ +ALTER TABLE epochs ADD COLUMN system_state_summary_json JSONB; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-19-011238_raw_checkpoints/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-19-011238_raw_checkpoints/down.sql new file mode 100644 index 0000000000000..9cfef48c9b5f6 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-19-011238_raw_checkpoints/down.sql @@ -0,0 +1 @@ +DROP TABLE raw_checkpoints; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-19-011238_raw_checkpoints/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-19-011238_raw_checkpoints/up.sql new file mode 100644 index 0000000000000..26791856ff4c9 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-19-011238_raw_checkpoints/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE raw_checkpoints +( + sequence_number BIGINT PRIMARY KEY, + certified_checkpoint BYTEA NOT NULL, + checkpoint_contents BYTEA NOT NULL +); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-19-121113_tx_affected_objects/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-19-121113_tx_affected_objects/down.sql new file mode 100644 index 0000000000000..b0868da73b0f2 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-19-121113_tx_affected_objects/down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS tx_affected_objects; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-19-121113_tx_affected_objects/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-19-121113_tx_affected_objects/up.sql new file mode 100644 index 0000000000000..146f78b2f5063 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-19-121113_tx_affected_objects/up.sql @@ -0,0 +1,9 @@ +CREATE TABLE tx_affected_objects ( + tx_sequence_number BIGINT NOT NULL, + affected BYTEA NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(affected, tx_sequence_number) +); + +CREATE INDEX tx_affected_objects_tx_sequence_number_index ON tx_affected_objects (tx_sequence_number); +CREATE INDEX tx_affected_objects_sender ON tx_affected_objects (sender, affected, tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-24-213054_epochs_system_state_nullable/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-24-213054_epochs_system_state_nullable/down.sql new file mode 100644 index 0000000000000..e6697b4849a4e --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-24-213054_epochs_system_state_nullable/down.sql @@ -0,0 +1 @@ +ALTER TABLE epochs ALTER COLUMN system_state SET NOT NULL; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-24-213054_epochs_system_state_nullable/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-24-213054_epochs_system_state_nullable/up.sql new file mode 100644 index 0000000000000..a6e7f167c48cc --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-24-213054_epochs_system_state_nullable/up.sql @@ -0,0 +1 @@ +ALTER TABLE epochs ALTER COLUMN system_state DROP NOT NULL; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/down.sql new file mode 100644 index 0000000000000..825855f3d700b --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS event_emit_module_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/up.sql new file mode 100644 index 0000000000000..ac69bc8488758 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + event_emit_module_tx_sequence_number +ON event_emit_module (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/down.sql new file mode 100644 index 0000000000000..30b5fdb6cead6 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS event_emit_package_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/up.sql new file mode 100644 index 0000000000000..231ab598b4766 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + event_emit_package_tx_sequence_number +ON event_emit_package (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/down.sql new file mode 100644 index 0000000000000..e9b5b0b903ed5 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS event_senders_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/up.sql new file mode 100644 index 0000000000000..b5883b8a3a4ce --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + event_senders_tx_sequence_number +ON event_senders (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/down.sql new file mode 100644 index 0000000000000..43b1d27d9ed2e --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS event_struct_instantiation_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/up.sql new file mode 100644 index 0000000000000..7847620e936f3 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + event_struct_instantiation_tx_sequence_number +ON event_struct_instantiation (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/down.sql new file mode 100644 index 0000000000000..76606ab0400a6 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS event_struct_module_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/up.sql new file mode 100644 index 0000000000000..748a4095da169 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + event_struct_module_tx_sequence_number +ON event_struct_module (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/down.sql new file mode 100644 index 0000000000000..944405cf172e3 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS event_struct_name_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/up.sql new file mode 100644 index 0000000000000..2ca251c139af9 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + event_struct_name_tx_sequence_number +ON event_struct_name (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/down.sql new file mode 100644 index 0000000000000..40fde7e4578b6 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS event_struct_package_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/up.sql new file mode 100644 index 0000000000000..00e88fcfb5273 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + event_struct_package_tx_sequence_number +ON event_struct_package (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/down.sql new file mode 100644 index 0000000000000..da1519d208f7a --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_calls_fun_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/up.sql new file mode 100644 index 0000000000000..c868f6e55a66f --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_calls_fun_tx_sequence_number +ON tx_calls_fun (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/down.sql new file mode 100644 index 0000000000000..16bf8eb87dbef --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_calls_mod_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/up.sql new file mode 100644 index 0000000000000..debc152d98f2d --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_calls_mod_tx_sequence_number +ON tx_calls_mod (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/down.sql new file mode 100644 index 0000000000000..f6ef795109c61 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_calls_pkg_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/up.sql new file mode 100644 index 0000000000000..0e6c1f1bf7d30 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_calls_pkg_tx_sequence_number +ON tx_calls_pkg (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/down.sql new file mode 100644 index 0000000000000..1dfcf480b9e86 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_changed_objects_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/up.sql new file mode 100644 index 0000000000000..4ef5b459dbf05 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_changed_objects_tx_sequence_number +ON tx_changed_objects (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/down.sql new file mode 100644 index 0000000000000..d0bd714bc60b2 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_digests_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/up.sql new file mode 100644 index 0000000000000..efdff9cbe7a56 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_digests_tx_sequence_number +ON tx_digests (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/down.sql new file mode 100644 index 0000000000000..5061884270f6b --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_input_objects_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/up.sql new file mode 100644 index 0000000000000..39d01c598a4a5 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_input_objects_tx_sequence_number +ON tx_input_objects (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/down.sql new file mode 100644 index 0000000000000..10f5e96b1ce17 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_kinds_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/up.sql new file mode 100644 index 0000000000000..6227a18f8eb46 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_kinds_tx_sequence_number +ON tx_kinds (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/down.sql new file mode 100644 index 0000000000000..138ba02741229 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_recipients_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/up.sql new file mode 100644 index 0000000000000..d2294ac2561ed --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_recipients_tx_sequence_number +ON tx_recipients (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/down.sql new file mode 100644 index 0000000000000..c09d44eb4660d --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_senders_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/up.sql new file mode 100644 index 0000000000000..f22a06e2bb548 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_senders_tx_sequence_number +ON tx_senders (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-30-153705_add_event_sender/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-30-153705_add_event_sender/down.sql new file mode 100644 index 0000000000000..89e6710d92f1b --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-30-153705_add_event_sender/down.sql @@ -0,0 +1 @@ +ALTER TABLE events DROP COLUMN sender; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-30-153705_add_event_sender/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-30-153705_add_event_sender/up.sql new file mode 100644 index 0000000000000..7ea312c09453c --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-30-153705_add_event_sender/up.sql @@ -0,0 +1 @@ +ALTER TABLE events ADD COLUMN sender BYTEA; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-10-08-025030_partial_index_instead/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-10-08-025030_partial_index_instead/down.sql new file mode 100644 index 0000000000000..82659b80658c0 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-10-08-025030_partial_index_instead/down.sql @@ -0,0 +1,7 @@ +-- Drop the new partial indices +DROP INDEX IF EXISTS objects_history_owner_partial; +DROP INDEX IF EXISTS objects_history_coin_owner_partial; +DROP INDEX IF EXISTS objects_history_coin_only_partial; +DROP INDEX IF EXISTS objects_history_type_partial; +DROP INDEX IF EXISTS objects_history_package_module_name_full_type_partial; +DROP INDEX IF EXISTS objects_history_owner_package_module_name_full_type_partial; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-10-08-025030_partial_index_instead/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-10-08-025030_partial_index_instead/up.sql new file mode 100644 index 0000000000000..800f77b3f540b --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-10-08-025030_partial_index_instead/up.sql @@ -0,0 +1,18 @@ +-- Create new partial indices with object_status = 0 condition +CREATE INDEX IF NOT EXISTS objects_history_owner_partial ON objects_history (checkpoint_sequence_number, owner_type, owner_id) +WHERE owner_type BETWEEN 1 AND 2 AND owner_id IS NOT NULL AND object_status = 0; + +CREATE INDEX IF NOT EXISTS objects_history_coin_owner_partial ON objects_history (checkpoint_sequence_number, owner_id, coin_type, object_id) +WHERE coin_type IS NOT NULL AND owner_type = 1 AND object_status = 0; + +CREATE INDEX IF NOT EXISTS objects_history_coin_only_partial ON objects_history (checkpoint_sequence_number, coin_type, object_id) +WHERE coin_type IS NOT NULL AND object_status = 0; + +CREATE INDEX IF NOT EXISTS objects_history_type_partial ON objects_history (checkpoint_sequence_number, object_type) +WHERE object_status = 0; + +CREATE INDEX IF NOT EXISTS objects_history_package_module_name_full_type_partial ON objects_history (checkpoint_sequence_number, object_type_package, object_type_module, object_type_name, object_type) +WHERE object_status = 0; + +CREATE INDEX IF NOT EXISTS objects_history_owner_package_module_name_full_type_partial ON objects_history (checkpoint_sequence_number, owner_id, object_type_package, object_type_module, object_type_name, object_type) +WHERE object_status = 0; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-10-09-180628_add_network_total_transactions_to_epochs/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-10-09-180628_add_network_total_transactions_to_epochs/down.sql new file mode 100644 index 0000000000000..e088120452e58 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-10-09-180628_add_network_total_transactions_to_epochs/down.sql @@ -0,0 +1 @@ +ALTER TABLE epochs DROP COLUMN first_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-10-09-180628_add_network_total_transactions_to_epochs/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-10-09-180628_add_network_total_transactions_to_epochs/up.sql new file mode 100644 index 0000000000000..becdb61fe5e83 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-10-09-180628_add_network_total_transactions_to_epochs/up.sql @@ -0,0 +1 @@ +ALTER TABLE epochs ADD COLUMN first_tx_sequence_number bigint; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/down.sql new file mode 100644 index 0000000000000..807c01dca462d --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/down.sql @@ -0,0 +1,6 @@ +CREATE INDEX IF NOT EXISTS objects_history_owner ON objects_history (checkpoint_sequence_number, owner_type, owner_id) WHERE owner_type BETWEEN 1 AND 2 AND owner_id IS NOT NULL; +CREATE INDEX IF NOT EXISTS objects_history_coin_owner ON objects_history (checkpoint_sequence_number, owner_id, coin_type, object_id) WHERE coin_type IS NOT NULL AND owner_type = 1; +CREATE INDEX IF NOT EXISTS objects_history_coin_only ON objects_history (checkpoint_sequence_number, coin_type, object_id) WHERE coin_type IS NOT NULL; +CREATE INDEX IF NOT EXISTS objects_history_type ON objects_history (checkpoint_sequence_number, object_type); +CREATE INDEX IF NOT EXISTS objects_history_package_module_name_full_type ON objects_history (checkpoint_sequence_number, object_type_package, object_type_module, object_type_name, object_type); +CREATE INDEX IF NOT EXISTS objects_history_owner_package_module_name_full_type ON objects_history (checkpoint_sequence_number, owner_id, object_type_package, object_type_module, object_type_name, object_type); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/up.sql new file mode 100644 index 0000000000000..754e719819f1e --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/up.sql @@ -0,0 +1,6 @@ +DROP INDEX IF EXISTS objects_history_owner; +DROP INDEX IF EXISTS objects_history_coin_owner; +DROP INDEX IF EXISTS objects_history_coin_only; +DROP INDEX IF EXISTS objects_history_type; +DROP INDEX IF EXISTS objects_history_package_module_name_full_type; +DROP INDEX IF EXISTS objects_history_owner_package_module_name_full_type; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/down.sql new file mode 100644 index 0000000000000..b9fcef3e1f439 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/down.sql @@ -0,0 +1,18 @@ +CREATE TABLE tx_senders ( + tx_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(sender, tx_sequence_number) +); + +CREATE INDEX IF NOT EXISTS tx_senders_tx_sequence_number + ON tx_senders (tx_sequence_number); + +CREATE TABLE tx_recipients ( + tx_sequence_number BIGINT NOT NULL, + recipient BYTEA NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(recipient, tx_sequence_number) +); + +CREATE INDEX IF NOT EXISTS tx_recipients_sender + ON tx_recipients (sender, recipient, tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/up.sql new file mode 100644 index 0000000000000..fb259ea615d84 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/up.sql @@ -0,0 +1,2 @@ +DROP TABLE IF EXISTS tx_senders; +DROP TABLE IF EXISTS tx_recipients; diff --git a/crates/sui-mvr-indexer/src/apis/coin_api.rs b/crates/sui-mvr-indexer/src/apis/coin_api.rs new file mode 100644 index 0000000000000..13a5a2c55a819 --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/coin_api.rs @@ -0,0 +1,153 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::indexer_reader::IndexerReader; +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use jsonrpsee::RpcModule; +use sui_json_rpc::coin_api::{parse_to_struct_tag, parse_to_type_tag}; +use sui_json_rpc::SuiRpcModule; +use sui_json_rpc_api::{cap_page_limit, CoinReadApiServer}; +use sui_json_rpc_types::{Balance, CoinPage, Page, SuiCoinMetadata}; +use sui_open_rpc::Module; +use sui_types::balance::Supply; +use sui_types::base_types::{ObjectID, SuiAddress}; +use sui_types::gas_coin::{GAS, TOTAL_SUPPLY_MIST}; + +pub(crate) struct CoinReadApi { + inner: IndexerReader, +} + +impl CoinReadApi { + pub fn new(inner: IndexerReader) -> Self { + Self { inner } + } +} + +#[async_trait] +impl CoinReadApiServer for CoinReadApi { + async fn get_coins( + &self, + owner: SuiAddress, + coin_type: Option, + cursor: Option, + limit: Option, + ) -> RpcResult { + let limit = cap_page_limit(limit); + if limit == 0 { + return Ok(CoinPage::empty()); + } + + // Normalize coin type tag and default to Gas + let coin_type = + parse_to_type_tag(coin_type)?.to_canonical_string(/* with_prefix */ true); + + let cursor = match cursor { + Some(c) => c, + // If cursor is not specified, we need to start from the beginning of the coin type, which is the minimal possible ObjectID. + None => ObjectID::ZERO, + }; + let mut results = self + .inner + .get_owned_coins(owner, Some(coin_type), cursor, limit + 1) + .await?; + + let has_next_page = results.len() > limit; + results.truncate(limit); + let next_cursor = results.last().map(|o| o.coin_object_id); + Ok(Page { + data: results, + next_cursor, + has_next_page, + }) + } + + async fn get_all_coins( + &self, + owner: SuiAddress, + cursor: Option, + limit: Option, + ) -> RpcResult { + let limit = cap_page_limit(limit); + if limit == 0 { + return Ok(CoinPage::empty()); + } + + let cursor = match cursor { + Some(c) => c, + // If cursor is not specified, we need to start from the beginning of the coin type, which is the minimal possible ObjectID. + None => ObjectID::ZERO, + }; + let mut results = self + .inner + .get_owned_coins(owner, None, cursor, limit + 1) + .await?; + + let has_next_page = results.len() > limit; + results.truncate(limit); + let next_cursor = results.last().map(|o| o.coin_object_id); + Ok(Page { + data: results, + next_cursor, + has_next_page, + }) + } + + async fn get_balance( + &self, + owner: SuiAddress, + coin_type: Option, + ) -> RpcResult { + // Normalize coin type tag and default to Gas + let coin_type = + parse_to_type_tag(coin_type)?.to_canonical_string(/* with_prefix */ true); + + let mut results = self + .inner + .get_coin_balances(owner, Some(coin_type.clone())) + .await?; + if results.is_empty() { + return Ok(Balance::zero(coin_type)); + } + Ok(results.swap_remove(0)) + } + + async fn get_all_balances(&self, owner: SuiAddress) -> RpcResult> { + self.inner + .get_coin_balances(owner, None) + .await + .map_err(Into::into) + } + + async fn get_coin_metadata(&self, coin_type: String) -> RpcResult> { + let coin_struct = parse_to_struct_tag(&coin_type)?; + self.inner + .get_coin_metadata(coin_struct) + .await + .map_err(Into::into) + } + + async fn get_total_supply(&self, coin_type: String) -> RpcResult { + let coin_struct = parse_to_struct_tag(&coin_type)?; + if GAS::is_gas(&coin_struct) { + Ok(Supply { + value: TOTAL_SUPPLY_MIST, + }) + } else { + self.inner + .get_total_supply(coin_struct) + .await + .map_err(Into::into) + } + } +} + +impl SuiRpcModule for CoinReadApi { + fn rpc(self) -> RpcModule { + self.into_rpc() + } + + fn rpc_doc_module() -> Module { + sui_json_rpc_api::CoinReadApiOpenRpc::module_doc() + } +} diff --git a/crates/sui-mvr-indexer/src/apis/extended_api.rs b/crates/sui-mvr-indexer/src/apis/extended_api.rs new file mode 100644 index 0000000000000..9b9827ea2bbe1 --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/extended_api.rs @@ -0,0 +1,83 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::indexer_reader::IndexerReader; +use jsonrpsee::{core::RpcResult, RpcModule}; +use sui_json_rpc::SuiRpcModule; +use sui_json_rpc_api::{validate_limit, ExtendedApiServer, QUERY_MAX_RESULT_LIMIT_CHECKPOINTS}; +use sui_json_rpc_types::{ + CheckpointedObjectID, EpochInfo, EpochPage, Page, QueryObjectsPage, SuiObjectResponseQuery, +}; +use sui_open_rpc::Module; +use sui_types::sui_serde::BigInt; + +pub(crate) struct ExtendedApi { + inner: IndexerReader, +} + +impl ExtendedApi { + pub fn new(inner: IndexerReader) -> Self { + Self { inner } + } +} + +#[async_trait::async_trait] +impl ExtendedApiServer for ExtendedApi { + async fn get_epochs( + &self, + cursor: Option>, + limit: Option, + descending_order: Option, + ) -> RpcResult { + let limit = validate_limit(limit, QUERY_MAX_RESULT_LIMIT_CHECKPOINTS)?; + let mut epochs = self + .inner + .get_epochs( + cursor.map(|x| *x), + limit + 1, + descending_order.unwrap_or(false), + ) + .await?; + + let has_next_page = epochs.len() > limit; + epochs.truncate(limit); + let next_cursor = epochs.last().map(|e| e.epoch); + Ok(Page { + data: epochs, + next_cursor: next_cursor.map(|id| id.into()), + has_next_page, + }) + } + + async fn get_current_epoch(&self) -> RpcResult { + let stored_epoch = self.inner.get_latest_epoch_info_from_db().await?; + EpochInfo::try_from(stored_epoch).map_err(Into::into) + } + + async fn query_objects( + &self, + _query: SuiObjectResponseQuery, + _cursor: Option, + _limit: Option, + ) -> RpcResult { + Err(jsonrpsee::types::error::CallError::Custom( + jsonrpsee::types::error::ErrorCode::MethodNotFound.into(), + ) + .into()) + } + + async fn get_total_transactions(&self) -> RpcResult> { + let latest_checkpoint = self.inner.get_latest_checkpoint().await?; + Ok(latest_checkpoint.network_total_transactions.into()) + } +} + +impl SuiRpcModule for ExtendedApi { + fn rpc(self) -> RpcModule { + self.into_rpc() + } + + fn rpc_doc_module() -> Module { + sui_json_rpc_api::ExtendedApiOpenRpc::module_doc() + } +} diff --git a/crates/sui-mvr-indexer/src/apis/governance_api.rs b/crates/sui-mvr-indexer/src/apis/governance_api.rs new file mode 100644 index 0000000000000..0cb52dc8e3a11 --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/governance_api.rs @@ -0,0 +1,295 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::BTreeMap; + +use crate::{errors::IndexerError, indexer_reader::IndexerReader}; +use async_trait::async_trait; +use jsonrpsee::{core::RpcResult, RpcModule}; + +use cached::{proc_macro::cached, SizedCache}; +use sui_json_rpc::{governance_api::ValidatorExchangeRates, SuiRpcModule}; +use sui_json_rpc_api::GovernanceReadApiServer; +use sui_json_rpc_types::{ + DelegatedStake, EpochInfo, StakeStatus, SuiCommittee, SuiObjectDataFilter, ValidatorApys, +}; +use sui_open_rpc::Module; +use sui_types::{ + base_types::{MoveObjectType, ObjectID, SuiAddress}, + committee::EpochId, + governance::StakedSui, + sui_serde::BigInt, + sui_system_state::{sui_system_state_summary::SuiSystemStateSummary, PoolTokenExchangeRate}, +}; + +#[derive(Clone)] +pub struct GovernanceReadApi { + inner: IndexerReader, +} + +impl GovernanceReadApi { + pub fn new(inner: IndexerReader) -> Self { + Self { inner } + } + + pub async fn get_epoch_info(&self, epoch: Option) -> Result { + match self.inner.get_epoch_info(epoch).await { + Ok(Some(epoch_info)) => Ok(epoch_info), + Ok(None) => Err(IndexerError::InvalidArgumentError(format!( + "Missing epoch {epoch:?}" + ))), + Err(e) => Err(e), + } + } + + async fn get_latest_sui_system_state(&self) -> Result { + self.inner.get_latest_sui_system_state().await + } + + async fn get_stakes_by_ids( + &self, + ids: Vec, + ) -> Result, IndexerError> { + let mut stakes = vec![]; + for stored_object in self.inner.multi_get_objects(ids).await? { + let object = sui_types::object::Object::try_from(stored_object)?; + let stake_object = StakedSui::try_from(&object)?; + stakes.push(stake_object); + } + + self.get_delegated_stakes(stakes).await + } + + async fn get_staked_by_owner( + &self, + owner: SuiAddress, + ) -> Result, IndexerError> { + let mut stakes = vec![]; + for stored_object in self + .inner + .get_owned_objects( + owner, + Some(SuiObjectDataFilter::StructType( + MoveObjectType::staked_sui().into(), + )), + None, + // Allow querying for up to 1000 staked objects + 1000, + ) + .await? + { + let object = sui_types::object::Object::try_from(stored_object)?; + let stake_object = StakedSui::try_from(&object)?; + stakes.push(stake_object); + } + + self.get_delegated_stakes(stakes).await + } + + pub async fn get_delegated_stakes( + &self, + stakes: Vec, + ) -> Result, IndexerError> { + let pools = stakes + .into_iter() + .fold(BTreeMap::<_, Vec<_>>::new(), |mut pools, stake| { + pools.entry(stake.pool_id()).or_default().push(stake); + pools + }); + + let system_state_summary = self.get_latest_sui_system_state().await?; + let epoch = system_state_summary.epoch; + + let rates = exchange_rates(self, &system_state_summary) + .await? + .into_iter() + .map(|rates| (rates.pool_id, rates)) + .collect::>(); + + let mut delegated_stakes = vec![]; + for (pool_id, stakes) in pools { + // Rate table and rate can be null when the pool is not active + let rate_table = rates.get(&pool_id).ok_or_else(|| { + IndexerError::InvalidArgumentError( + "Cannot find rates for staking pool {pool_id}".to_string(), + ) + })?; + let current_rate = rate_table.rates.first().map(|(_, rate)| rate); + + let mut delegations = vec![]; + for stake in stakes { + let status = if epoch >= stake.activation_epoch() { + let estimated_reward = if let Some(current_rate) = current_rate { + let stake_rate = rate_table + .rates + .iter() + .find_map(|(epoch, rate)| { + if *epoch == stake.activation_epoch() { + Some(rate.clone()) + } else { + None + } + }) + .unwrap_or_default(); + let estimated_reward = ((stake_rate.rate() / current_rate.rate()) - 1.0) + * stake.principal() as f64; + std::cmp::max(0, estimated_reward.round() as u64) + } else { + 0 + }; + StakeStatus::Active { estimated_reward } + } else { + StakeStatus::Pending + }; + delegations.push(sui_json_rpc_types::Stake { + staked_sui_id: stake.id(), + // TODO: this might change when we implement warm up period. + stake_request_epoch: stake.activation_epoch().saturating_sub(1), + stake_active_epoch: stake.activation_epoch(), + principal: stake.principal(), + status, + }) + } + delegated_stakes.push(DelegatedStake { + validator_address: rate_table.address, + staking_pool: pool_id, + stakes: delegations, + }) + } + Ok(delegated_stakes) + } +} + +/// Cached exchange rates for validators for the given epoch, the cache size is 1, it will be cleared when the epoch changes. +/// rates are in descending order by epoch. +#[cached( + type = "SizedCache>", + create = "{ SizedCache::with_size(1) }", + convert = " { system_state_summary.epoch } ", + result = true +)] +pub async fn exchange_rates( + state: &GovernanceReadApi, + system_state_summary: &SuiSystemStateSummary, +) -> Result, IndexerError> { + // Get validator rate tables + let mut tables = vec![]; + + for validator in &system_state_summary.active_validators { + tables.push(( + validator.sui_address, + validator.staking_pool_id, + validator.exchange_rates_id, + validator.exchange_rates_size, + true, + )); + } + + // Get inactive validator rate tables + for df in state + .inner + .get_dynamic_fields( + system_state_summary.inactive_pools_id, + None, + system_state_summary.inactive_pools_size as usize, + ) + .await? + { + let pool_id: sui_types::id::ID = bcs::from_bytes(&df.bcs_name).map_err(|e| { + sui_types::error::SuiError::ObjectDeserializationError { + error: e.to_string(), + } + })?; + let inactive_pools_id = system_state_summary.inactive_pools_id; + let validator = state + .inner + .get_validator_from_table(inactive_pools_id, pool_id) + .await?; + tables.push(( + validator.sui_address, + validator.staking_pool_id, + validator.exchange_rates_id, + validator.exchange_rates_size, + false, + )); + } + + let mut exchange_rates = vec![]; + // Get exchange rates for each validator + for (address, pool_id, exchange_rates_id, exchange_rates_size, active) in tables { + let mut rates = vec![]; + for df in state + .inner + .get_dynamic_fields_raw(exchange_rates_id, None, exchange_rates_size as usize) + .await? + { + let dynamic_field = df + .to_dynamic_field::() + .ok_or_else(|| sui_types::error::SuiError::ObjectDeserializationError { + error: "dynamic field malformed".to_owned(), + })?; + + rates.push((dynamic_field.name, dynamic_field.value)); + } + + rates.sort_by(|(a, _), (b, _)| a.cmp(b).reverse()); + + exchange_rates.push(ValidatorExchangeRates { + address, + pool_id, + active, + rates, + }); + } + Ok(exchange_rates) +} + +#[async_trait] +impl GovernanceReadApiServer for GovernanceReadApi { + async fn get_stakes_by_ids( + &self, + staked_sui_ids: Vec, + ) -> RpcResult> { + self.get_stakes_by_ids(staked_sui_ids) + .await + .map_err(Into::into) + } + + async fn get_stakes(&self, owner: SuiAddress) -> RpcResult> { + self.get_staked_by_owner(owner).await.map_err(Into::into) + } + + async fn get_committee_info(&self, epoch: Option>) -> RpcResult { + let epoch = self.get_epoch_info(epoch.as_deref().copied()).await?; + Ok(epoch.committee().map_err(IndexerError::from)?.into()) + } + + async fn get_latest_sui_system_state(&self) -> RpcResult { + self.get_latest_sui_system_state().await.map_err(Into::into) + } + + async fn get_reference_gas_price(&self) -> RpcResult> { + let epoch = self.get_epoch_info(None).await?; + Ok(BigInt::from(epoch.reference_gas_price.ok_or_else( + || { + IndexerError::PersistentStorageDataCorruptionError( + "missing latest reference gas price".to_owned(), + ) + }, + )?)) + } + + async fn get_validators_apy(&self) -> RpcResult { + Ok(self.get_validators_apy().await?) + } +} + +impl SuiRpcModule for GovernanceReadApi { + fn rpc(self) -> RpcModule { + self.into_rpc() + } + + fn rpc_doc_module() -> Module { + sui_json_rpc_api::GovernanceReadApiOpenRpc::module_doc() + } +} diff --git a/crates/sui-mvr-indexer/src/apis/indexer_api.rs b/crates/sui-mvr-indexer/src/apis/indexer_api.rs new file mode 100644 index 0000000000000..7c3dbf0308f16 --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/indexer_api.rs @@ -0,0 +1,428 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use jsonrpsee::types::SubscriptionEmptyError; +use jsonrpsee::types::SubscriptionResult; +use jsonrpsee::{RpcModule, SubscriptionSink}; +use tap::TapFallible; + +use sui_json_rpc::name_service::{Domain, NameRecord, NameServiceConfig, NameServiceError}; +use sui_json_rpc::SuiRpcModule; +use sui_json_rpc_api::{cap_page_limit, IndexerApiServer}; +use sui_json_rpc_types::{ + DynamicFieldPage, EventFilter, EventPage, ObjectsPage, Page, SuiObjectResponse, + SuiObjectResponseQuery, SuiTransactionBlockResponseQuery, TransactionBlocksPage, + TransactionFilter, +}; +use sui_open_rpc::Module; +use sui_types::base_types::{ObjectID, SuiAddress}; +use sui_types::digests::TransactionDigest; +use sui_types::dynamic_field::{DynamicFieldName, Field}; +use sui_types::error::SuiObjectResponseError; +use sui_types::event::EventID; +use sui_types::object::ObjectRead; +use sui_types::TypeTag; + +use crate::indexer_reader::IndexerReader; +use crate::IndexerError; + +pub(crate) struct IndexerApi { + inner: IndexerReader, + name_service_config: NameServiceConfig, +} + +impl IndexerApi { + pub fn new(inner: IndexerReader, name_service_config: NameServiceConfig) -> Self { + Self { + inner, + name_service_config, + } + } + + async fn get_owned_objects_internal( + &self, + address: SuiAddress, + query: Option, + cursor: Option, + limit: usize, + ) -> RpcResult { + let SuiObjectResponseQuery { filter, options } = query.unwrap_or_default(); + let options = options.unwrap_or_default(); + let objects = self + .inner + .get_owned_objects(address, filter, cursor, limit + 1) + .await?; + + let mut object_futures = vec![]; + for object in objects { + object_futures.push(tokio::task::spawn( + object.try_into_object_read(self.inner.package_resolver()), + )); + } + let mut objects = futures::future::join_all(object_futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + tracing::error!("Error joining object read futures."); + jsonrpsee::core::Error::Custom(format!("Error joining object read futures. {}", e)) + })? + .into_iter() + .collect::, _>>() + .tap_err(|e| tracing::error!("Error converting object to object read: {}", e))?; + let has_next_page = objects.len() > limit; + objects.truncate(limit); + + let next_cursor = objects.last().map(|o_read| o_read.object_id()); + let mut parallel_tasks = vec![]; + for o in objects { + let inner_clone = self.inner.clone(); + let options = options.clone(); + parallel_tasks.push(tokio::task::spawn(async move { + match o { + ObjectRead::NotExists(id) => Ok(SuiObjectResponse::new_with_error( + SuiObjectResponseError::NotExists { object_id: id }, + )), + ObjectRead::Exists(object_ref, o, layout) => { + if options.show_display { + match inner_clone.get_display_fields(&o, &layout).await { + Ok(rendered_fields) => Ok(SuiObjectResponse::new_with_data( + (object_ref, o, layout, options, Some(rendered_fields)) + .try_into()?, + )), + Err(e) => Ok(SuiObjectResponse::new( + Some((object_ref, o, layout, options, None).try_into()?), + Some(SuiObjectResponseError::DisplayError { + error: e.to_string(), + }), + )), + } + } else { + Ok(SuiObjectResponse::new_with_data( + (object_ref, o, layout, options, None).try_into()?, + )) + } + } + ObjectRead::Deleted((object_id, version, digest)) => Ok( + SuiObjectResponse::new_with_error(SuiObjectResponseError::Deleted { + object_id, + version, + digest, + }), + ), + } + })); + } + let data = futures::future::join_all(parallel_tasks) + .await + .into_iter() + .collect::, _>>() + .map_err(|e: tokio::task::JoinError| anyhow::anyhow!(e))? + .into_iter() + .collect::, anyhow::Error>>()?; + + Ok(Page { + data, + next_cursor, + has_next_page, + }) + } +} + +#[async_trait] +impl IndexerApiServer for IndexerApi { + async fn get_owned_objects( + &self, + address: SuiAddress, + query: Option, + cursor: Option, + limit: Option, + ) -> RpcResult { + let limit = cap_page_limit(limit); + if limit == 0 { + return Ok(ObjectsPage::empty()); + } + self.get_owned_objects_internal(address, query, cursor, limit) + .await + } + + async fn query_transaction_blocks( + &self, + query: SuiTransactionBlockResponseQuery, + cursor: Option, + limit: Option, + descending_order: Option, + ) -> RpcResult { + let limit = cap_page_limit(limit); + if limit == 0 { + return Ok(TransactionBlocksPage::empty()); + } + let mut results = self + .inner + .query_transaction_blocks( + query.filter, + query.options.unwrap_or_default(), + cursor, + limit + 1, + descending_order.unwrap_or(false), + ) + .await + .map_err(|e: IndexerError| anyhow::anyhow!(e))?; + + let has_next_page = results.len() > limit; + results.truncate(limit); + let next_cursor = results.last().map(|o| o.digest); + Ok(Page { + data: results, + next_cursor, + has_next_page, + }) + } + + async fn query_events( + &self, + query: EventFilter, + // exclusive cursor if `Some`, otherwise start from the beginning + cursor: Option, + limit: Option, + descending_order: Option, + ) -> RpcResult { + let limit = cap_page_limit(limit); + if limit == 0 { + return Ok(EventPage::empty()); + } + let descending_order = descending_order.unwrap_or(false); + let mut results = self + .inner + .query_events(query, cursor, limit + 1, descending_order) + .await?; + + let has_next_page = results.len() > limit; + results.truncate(limit); + let next_cursor = results.last().map(|o| o.id); + Ok(Page { + data: results, + next_cursor, + has_next_page, + }) + } + + async fn get_dynamic_fields( + &self, + parent_object_id: ObjectID, + cursor: Option, + limit: Option, + ) -> RpcResult { + let limit = cap_page_limit(limit); + if limit == 0 { + return Ok(DynamicFieldPage::empty()); + } + let mut results = self + .inner + .get_dynamic_fields(parent_object_id, cursor, limit + 1) + .await?; + + let has_next_page = results.len() > limit; + results.truncate(limit); + let next_cursor = results.last().map(|o| o.object_id); + Ok(Page { + data: results, + next_cursor, + has_next_page, + }) + } + + async fn get_dynamic_field_object( + &self, + parent_object_id: ObjectID, + name: DynamicFieldName, + ) -> RpcResult { + let name_bcs_value = self.inner.bcs_name_from_dynamic_field_name(&name).await?; + // Try as Dynamic Field + let id = sui_types::dynamic_field::derive_dynamic_field_id( + parent_object_id, + &name.type_, + &name_bcs_value, + ) + .expect("deriving dynamic field id can't fail"); + + let options = sui_json_rpc_types::SuiObjectDataOptions::full_content(); + match self.inner.get_object_read(id).await? { + sui_types::object::ObjectRead::NotExists(_) + | sui_types::object::ObjectRead::Deleted(_) => {} + sui_types::object::ObjectRead::Exists(object_ref, o, layout) => { + return Ok(SuiObjectResponse::new_with_data( + (object_ref, o, layout, options, None).try_into()?, + )); + } + } + + // Try as Dynamic Field Object + let dynamic_object_field_struct = + sui_types::dynamic_field::DynamicFieldInfo::dynamic_object_field_wrapper(name.type_); + let dynamic_object_field_type = TypeTag::Struct(Box::new(dynamic_object_field_struct)); + let dynamic_object_field_id = sui_types::dynamic_field::derive_dynamic_field_id( + parent_object_id, + &dynamic_object_field_type, + &name_bcs_value, + ) + .expect("deriving dynamic field id can't fail"); + match self.inner.get_object_read(dynamic_object_field_id).await? { + sui_types::object::ObjectRead::NotExists(_) + | sui_types::object::ObjectRead::Deleted(_) => {} + sui_types::object::ObjectRead::Exists(object_ref, o, layout) => { + return Ok(SuiObjectResponse::new_with_data( + (object_ref, o, layout, options, None).try_into()?, + )); + } + } + + Ok(SuiObjectResponse::new_with_error( + sui_types::error::SuiObjectResponseError::DynamicFieldNotFound { parent_object_id }, + )) + } + + fn subscribe_event(&self, _sink: SubscriptionSink, _filter: EventFilter) -> SubscriptionResult { + Err(SubscriptionEmptyError) + } + + fn subscribe_transaction( + &self, + _sink: SubscriptionSink, + _filter: TransactionFilter, + ) -> SubscriptionResult { + Err(SubscriptionEmptyError) + } + + async fn resolve_name_service_address(&self, name: String) -> RpcResult> { + let domain: Domain = name.parse().map_err(IndexerError::NameServiceError)?; + let parent_domain = domain.parent(); + + // construct the record ids to lookup. + let record_id = self.name_service_config.record_field_id(&domain); + let parent_record_id = self.name_service_config.record_field_id(&parent_domain); + + // get latest timestamp to check expiration. + let current_timestamp = self.inner.get_latest_checkpoint().await?.timestamp_ms; + + // gather the requests to fetch in the multi_get_objs. + let mut requests = vec![record_id]; + + // we only want to fetch both the child and the parent if the domain is a subdomain. + if domain.is_subdomain() { + requests.push(parent_record_id); + } + + // fetch both parent (if subdomain) and child records in a single get query. + // We do this as we do not know if the subdomain is a node or leaf record. + let domains: Vec<_> = self + .inner + .multi_get_objects(requests) + .await? + .into_iter() + .map(|o| sui_types::object::Object::try_from(o).ok()) + .collect(); + + // Find the requested object in the list of domains. + // We need to loop (in an array of maximum size 2), as we cannot guarantee + // the order of the returned objects. + let Some(requested_object) = domains + .iter() + .find(|o| o.as_ref().is_some_and(|o| o.id() == record_id)) + .and_then(|o| o.clone()) + else { + return Ok(None); + }; + + let name_record: NameRecord = requested_object.try_into().map_err(IndexerError::from)?; + + // Handle NODE record case. + if !name_record.is_leaf_record() { + return if !name_record.is_node_expired(current_timestamp) { + Ok(name_record.target_address) + } else { + Err(IndexerError::NameServiceError(NameServiceError::NameExpired).into()) + }; + } + + // repeat the process for the parent object too. + let Some(requested_object) = domains + .iter() + .find(|o| o.as_ref().is_some_and(|o| o.id() == parent_record_id)) + .and_then(|o| o.clone()) + else { + return Err(IndexerError::NameServiceError(NameServiceError::NameExpired).into()); + }; + + let parent_record: NameRecord = requested_object.try_into().map_err(IndexerError::from)?; + + if parent_record.is_valid_leaf_parent(&name_record) + && !parent_record.is_node_expired(current_timestamp) + { + Ok(name_record.target_address) + } else { + Err(IndexerError::NameServiceError(NameServiceError::NameExpired).into()) + } + } + + async fn resolve_name_service_names( + &self, + address: SuiAddress, + _cursor: Option, + _limit: Option, + ) -> RpcResult> { + let reverse_record_id = self + .name_service_config + .reverse_record_field_id(address.as_ref()); + + let mut result = Page { + data: vec![], + next_cursor: None, + has_next_page: false, + }; + + let Some(field_reverse_record_object) = + self.inner.get_object(&reverse_record_id, None).await? + else { + return Ok(result); + }; + + let domain = field_reverse_record_object + .to_rust::>() + .ok_or_else(|| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Malformed Object {reverse_record_id}" + )) + })? + .value; + + let domain_name = domain.to_string(); + + // Tries to resolve the name, to verify it is not expired. + let resolved_address = self + .resolve_name_service_address(domain_name.clone()) + .await?; + + // If we do not have a resolved address, we do not include the domain in the result. + if resolved_address.is_none() { + return Ok(result); + } + + // We push the domain name to the result and return it. + result.data.push(domain_name); + + Ok(result) + } +} + +impl SuiRpcModule for IndexerApi { + fn rpc(self) -> RpcModule { + self.into_rpc() + } + + fn rpc_doc_module() -> Module { + sui_json_rpc_api::IndexerApiOpenRpc::module_doc() + } +} diff --git a/crates/sui-mvr-indexer/src/apis/mod.rs b/crates/sui-mvr-indexer/src/apis/mod.rs new file mode 100644 index 0000000000000..e797c7ecdc239 --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/mod.rs @@ -0,0 +1,20 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +pub(crate) use coin_api::CoinReadApi; +pub(crate) use extended_api::ExtendedApi; +pub use governance_api::GovernanceReadApi; +pub(crate) use indexer_api::IndexerApi; +pub(crate) use move_utils::MoveUtilsApi; +pub(crate) use read_api::ReadApi; +pub(crate) use transaction_builder_api::TransactionBuilderApi; +pub(crate) use write_api::WriteApi; + +mod coin_api; +mod extended_api; +pub mod governance_api; +mod indexer_api; +mod move_utils; +pub mod read_api; +mod transaction_builder_api; +mod write_api; diff --git a/crates/sui-mvr-indexer/src/apis/move_utils.rs b/crates/sui-mvr-indexer/src/apis/move_utils.rs new file mode 100644 index 0000000000000..2bb75b9831dbf --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/move_utils.rs @@ -0,0 +1,143 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::BTreeMap; + +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use jsonrpsee::RpcModule; +use move_binary_format::normalized::Module as NormalizedModule; + +use sui_json_rpc::error::SuiRpcInputError; +use sui_json_rpc::SuiRpcModule; +use sui_json_rpc_api::MoveUtilsServer; +use sui_json_rpc_types::ObjectValueKind; +use sui_json_rpc_types::SuiMoveNormalizedType; +use sui_json_rpc_types::{ + MoveFunctionArgType, SuiMoveNormalizedFunction, SuiMoveNormalizedModule, + SuiMoveNormalizedStruct, +}; +use sui_open_rpc::Module; +use sui_types::base_types::ObjectID; + +use crate::indexer_reader::IndexerReader; + +pub struct MoveUtilsApi { + inner: IndexerReader, +} + +impl MoveUtilsApi { + pub fn new(inner: IndexerReader) -> Self { + Self { inner } + } +} + +#[async_trait] +impl MoveUtilsServer for MoveUtilsApi { + async fn get_normalized_move_modules_by_package( + &self, + package_id: ObjectID, + ) -> RpcResult> { + let resolver_modules = self.inner.get_package(package_id).await?.modules().clone(); + let sui_normalized_modules = resolver_modules + .into_iter() + .map(|(k, v)| (k, NormalizedModule::new(v.bytecode()).into())) + .collect::>(); + Ok(sui_normalized_modules) + } + + async fn get_normalized_move_module( + &self, + package: ObjectID, + module_name: String, + ) -> RpcResult { + let mut modules = self.get_normalized_move_modules_by_package(package).await?; + let module = modules.remove(&module_name).ok_or_else(|| { + SuiRpcInputError::GenericNotFound(format!( + "No module was found with name {module_name}", + )) + })?; + Ok(module) + } + + async fn get_normalized_move_struct( + &self, + package: ObjectID, + module_name: String, + struct_name: String, + ) -> RpcResult { + let mut module = self + .get_normalized_move_module(package, module_name) + .await?; + module + .structs + .remove(&struct_name) + .ok_or_else(|| { + SuiRpcInputError::GenericNotFound(format!( + "No struct was found with struct name {struct_name}" + )) + }) + .map_err(Into::into) + } + + async fn get_normalized_move_function( + &self, + package: ObjectID, + module_name: String, + function_name: String, + ) -> RpcResult { + let mut module = self + .get_normalized_move_module(package, module_name) + .await?; + module + .exposed_functions + .remove(&function_name) + .ok_or_else(|| { + SuiRpcInputError::GenericNotFound(format!( + "No function was found with function name {function_name}", + )) + }) + .map_err(Into::into) + } + + async fn get_move_function_arg_types( + &self, + package: ObjectID, + module: String, + function: String, + ) -> RpcResult> { + let function = self + .get_normalized_move_function(package, module, function) + .await?; + let args = function + .parameters + .iter() + .map(|p| match p { + SuiMoveNormalizedType::Struct { .. } => { + MoveFunctionArgType::Object(ObjectValueKind::ByValue) + } + SuiMoveNormalizedType::Vector(_) => { + MoveFunctionArgType::Object(ObjectValueKind::ByValue) + } + SuiMoveNormalizedType::Reference(_) => { + MoveFunctionArgType::Object(ObjectValueKind::ByImmutableReference) + } + SuiMoveNormalizedType::MutableReference(_) => { + MoveFunctionArgType::Object(ObjectValueKind::ByMutableReference) + } + _ => MoveFunctionArgType::Pure, + }) + .collect::>(); + Ok(args) + } +} + +impl SuiRpcModule for MoveUtilsApi { + fn rpc(self) -> RpcModule { + self.into_rpc() + } + + fn rpc_doc_module() -> Module { + sui_json_rpc_api::MoveUtilsOpenRpc::module_doc() + } +} diff --git a/crates/sui-mvr-indexer/src/apis/read_api.rs b/crates/sui-mvr-indexer/src/apis/read_api.rs new file mode 100644 index 0000000000000..3e3de5343869d --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/read_api.rs @@ -0,0 +1,305 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use jsonrpsee::RpcModule; +use sui_json_rpc::error::SuiRpcInputError; +use sui_types::error::SuiObjectResponseError; +use sui_types::object::ObjectRead; + +use crate::errors::IndexerError; +use crate::indexer_reader::IndexerReader; +use sui_json_rpc::SuiRpcModule; +use sui_json_rpc_api::{ReadApiServer, QUERY_MAX_RESULT_LIMIT}; +use sui_json_rpc_types::{ + Checkpoint, CheckpointId, CheckpointPage, ProtocolConfigResponse, SuiEvent, + SuiGetPastObjectRequest, SuiObjectDataOptions, SuiObjectResponse, SuiPastObjectResponse, + SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, +}; +use sui_open_rpc::Module; +use sui_protocol_config::{ProtocolConfig, ProtocolVersion}; +use sui_types::base_types::{ObjectID, SequenceNumber}; +use sui_types::digests::{ChainIdentifier, TransactionDigest}; +use sui_types::sui_serde::BigInt; + +#[derive(Clone)] +pub struct ReadApi { + inner: IndexerReader, +} + +impl ReadApi { + pub fn new(inner: IndexerReader) -> Self { + Self { inner } + } + + async fn get_checkpoint(&self, id: CheckpointId) -> Result { + match self.inner.get_checkpoint(id).await { + Ok(Some(epoch_info)) => Ok(epoch_info), + Ok(None) => Err(IndexerError::InvalidArgumentError(format!( + "Checkpoint {id:?} not found" + ))), + Err(e) => Err(e), + } + } + + async fn get_latest_checkpoint(&self) -> Result { + self.inner.get_latest_checkpoint().await + } + + async fn get_chain_identifier(&self) -> RpcResult { + let genesis_checkpoint = self.get_checkpoint(CheckpointId::SequenceNumber(0)).await?; + Ok(ChainIdentifier::from(genesis_checkpoint.digest)) + } +} + +#[async_trait] +impl ReadApiServer for ReadApi { + async fn get_object( + &self, + object_id: ObjectID, + options: Option, + ) -> RpcResult { + let object_read = self.inner.get_object_read(object_id).await?; + object_read_to_object_response(&self.inner, object_read, options.unwrap_or_default()).await + } + + // For ease of implementation we just forward to the single object query, although in the + // future we may want to improve the performance by having a more naitive multi_get + // functionality + async fn multi_get_objects( + &self, + object_ids: Vec, + options: Option, + ) -> RpcResult> { + if object_ids.len() > *QUERY_MAX_RESULT_LIMIT { + return Err( + SuiRpcInputError::SizeLimitExceeded(QUERY_MAX_RESULT_LIMIT.to_string()).into(), + ); + } + let stored_objects = self.inner.multi_get_objects(object_ids).await?; + let options = options.unwrap_or_default(); + + let futures = stored_objects.into_iter().map(|stored_object| async { + let object_read = stored_object + .try_into_object_read(self.inner.package_resolver()) + .await?; + object_read_to_object_response(&self.inner, object_read, options.clone()).await + }); + + let mut objects = futures::future::try_join_all(futures).await?; + // Resort the objects by the order of the object id. + objects.sort_by_key(|obj| obj.data.as_ref().map(|data| data.object_id)); + + Ok(objects) + } + + async fn get_total_transaction_blocks(&self) -> RpcResult> { + let checkpoint = self.get_latest_checkpoint().await?; + Ok(BigInt::from(checkpoint.network_total_transactions)) + } + + async fn get_transaction_block( + &self, + digest: TransactionDigest, + options: Option, + ) -> RpcResult { + let mut txn = self + .multi_get_transaction_blocks(vec![digest], options) + .await?; + + let txn = txn.pop().ok_or_else(|| { + IndexerError::InvalidArgumentError(format!("Transaction {digest} not found")) + })?; + + Ok(txn) + } + + async fn multi_get_transaction_blocks( + &self, + digests: Vec, + options: Option, + ) -> RpcResult> { + let num_digests = digests.len(); + if num_digests > *QUERY_MAX_RESULT_LIMIT { + Err(SuiRpcInputError::SizeLimitExceeded( + QUERY_MAX_RESULT_LIMIT.to_string(), + ))? + } + + let options = options.unwrap_or_default(); + let txns = self + .inner + .multi_get_transaction_block_response_in_blocking_task(digests, options) + .await?; + + Ok(txns) + } + + async fn try_get_past_object( + &self, + _object_id: ObjectID, + _version: SequenceNumber, + _options: Option, + ) -> RpcResult { + Err(jsonrpsee::types::error::CallError::Custom( + jsonrpsee::types::error::ErrorCode::MethodNotFound.into(), + ) + .into()) + } + + async fn try_get_object_before_version( + &self, + _: ObjectID, + _: SequenceNumber, + ) -> RpcResult { + Err(jsonrpsee::types::error::CallError::Custom( + jsonrpsee::types::error::ErrorCode::MethodNotFound.into(), + ) + .into()) + } + + async fn try_multi_get_past_objects( + &self, + _past_objects: Vec, + _options: Option, + ) -> RpcResult> { + Err(jsonrpsee::types::error::CallError::Custom( + jsonrpsee::types::error::ErrorCode::MethodNotFound.into(), + ) + .into()) + } + + async fn get_latest_checkpoint_sequence_number(&self) -> RpcResult> { + let checkpoint = self.get_latest_checkpoint().await?; + Ok(BigInt::from(checkpoint.sequence_number)) + } + + async fn get_checkpoint(&self, id: CheckpointId) -> RpcResult { + self.get_checkpoint(id).await.map_err(Into::into) + } + + async fn get_checkpoints( + &self, + cursor: Option>, + limit: Option, + descending_order: bool, + ) -> RpcResult { + let cursor = cursor.map(BigInt::into_inner); + let limit = sui_json_rpc_api::validate_limit( + limit, + sui_json_rpc_api::QUERY_MAX_RESULT_LIMIT_CHECKPOINTS, + ) + .map_err(SuiRpcInputError::from)?; + + let mut checkpoints = self + .inner + .get_checkpoints(cursor, limit + 1, descending_order) + .await?; + + let has_next_page = checkpoints.len() > limit; + checkpoints.truncate(limit); + + let next_cursor = checkpoints.last().map(|d| d.sequence_number.into()); + + Ok(CheckpointPage { + data: checkpoints, + next_cursor, + has_next_page, + }) + } + + async fn get_checkpoints_deprecated_limit( + &self, + cursor: Option>, + limit: Option>, + descending_order: bool, + ) -> RpcResult { + self.get_checkpoints( + cursor, + limit.map(|l| l.into_inner() as usize), + descending_order, + ) + .await + } + + async fn get_events(&self, transaction_digest: TransactionDigest) -> RpcResult> { + self.inner + .get_transaction_events(transaction_digest) + .await + .map_err(Into::into) + } + + async fn get_protocol_config( + &self, + version: Option>, + ) -> RpcResult { + let chain = self.get_chain_identifier().await?.chain(); + let version = if let Some(version) = version { + (*version).into() + } else { + let latest_epoch = self.inner.get_latest_epoch_info_from_db().await?; + (latest_epoch.protocol_version as u64).into() + }; + + ProtocolConfig::get_for_version_if_supported(version, chain) + .ok_or(SuiRpcInputError::ProtocolVersionUnsupported( + ProtocolVersion::MIN.as_u64(), + ProtocolVersion::MAX.as_u64(), + )) + .map_err(Into::into) + .map(ProtocolConfigResponse::from) + } + + async fn get_chain_identifier(&self) -> RpcResult { + self.get_chain_identifier().await.map(|id| id.to_string()) + } +} + +impl SuiRpcModule for ReadApi { + fn rpc(self) -> RpcModule { + self.into_rpc() + } + + fn rpc_doc_module() -> Module { + sui_json_rpc_api::ReadApiOpenRpc::module_doc() + } +} + +async fn object_read_to_object_response( + indexer_reader: &IndexerReader, + object_read: ObjectRead, + options: SuiObjectDataOptions, +) -> RpcResult { + match object_read { + ObjectRead::NotExists(id) => Ok(SuiObjectResponse::new_with_error( + SuiObjectResponseError::NotExists { object_id: id }, + )), + ObjectRead::Exists(object_ref, o, layout) => { + let mut display_fields = None; + if options.show_display { + match indexer_reader.get_display_fields(&o, &layout).await { + Ok(rendered_fields) => display_fields = Some(rendered_fields), + Err(e) => { + return Ok(SuiObjectResponse::new( + Some((object_ref, o, layout, options, None).try_into()?), + Some(SuiObjectResponseError::DisplayError { + error: e.to_string(), + }), + )); + } + } + } + Ok(SuiObjectResponse::new_with_data( + (object_ref, o, layout, options, display_fields).try_into()?, + )) + } + ObjectRead::Deleted((object_id, version, digest)) => Ok(SuiObjectResponse::new_with_error( + SuiObjectResponseError::Deleted { + object_id, + version, + digest, + }, + )), + } +} diff --git a/crates/sui-mvr-indexer/src/apis/transaction_builder_api.rs b/crates/sui-mvr-indexer/src/apis/transaction_builder_api.rs new file mode 100644 index 0000000000000..c98ce9c371c10 --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/transaction_builder_api.rs @@ -0,0 +1,70 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use super::governance_api::GovernanceReadApi; +use crate::indexer_reader::IndexerReader; +use async_trait::async_trait; +use move_core_types::language_storage::StructTag; +use sui_json_rpc::transaction_builder_api::TransactionBuilderApi as SuiTransactionBuilderApi; +use sui_json_rpc_types::{SuiObjectDataFilter, SuiObjectDataOptions, SuiObjectResponse}; +use sui_transaction_builder::DataReader; +use sui_types::base_types::{ObjectID, ObjectInfo, SuiAddress}; +use sui_types::object::Object; + +pub(crate) struct TransactionBuilderApi { + inner: IndexerReader, +} + +impl TransactionBuilderApi { + #[allow(clippy::new_ret_no_self)] + pub fn new(inner: IndexerReader) -> SuiTransactionBuilderApi { + SuiTransactionBuilderApi::new_with_data_reader(std::sync::Arc::new(Self { inner })) + } +} + +#[async_trait] +impl DataReader for TransactionBuilderApi { + async fn get_owned_objects( + &self, + address: SuiAddress, + object_type: StructTag, + ) -> Result, anyhow::Error> { + let stored_objects = self + .inner + .get_owned_objects( + address, + Some(SuiObjectDataFilter::StructType(object_type)), + None, + 50, // Limit the number of objects returned to 50 + ) + .await?; + + stored_objects + .into_iter() + .map(|object| { + let object = Object::try_from(object)?; + let object_ref = object.compute_object_reference(); + let info = ObjectInfo::new(&object_ref, &object); + Ok(info) + }) + .collect::, _>>() + } + + async fn get_object_with_options( + &self, + object_id: ObjectID, + options: SuiObjectDataOptions, + ) -> Result { + let result = self.inner.get_object_read(object_id).await?; + Ok((result, options).try_into()?) + } + + async fn get_reference_gas_price(&self) -> Result { + let epoch_info = GovernanceReadApi::new(self.inner.clone()) + .get_epoch_info(None) + .await?; + Ok(epoch_info + .reference_gas_price + .ok_or_else(|| anyhow::anyhow!("missing latest reference_gas_price"))?) + } +} diff --git a/crates/sui-mvr-indexer/src/apis/write_api.rs b/crates/sui-mvr-indexer/src/apis/write_api.rs new file mode 100644 index 0000000000000..71a54c356635b --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/write_api.rs @@ -0,0 +1,90 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use async_trait::async_trait; +use fastcrypto::encoding::Base64; +use jsonrpsee::core::RpcResult; +use jsonrpsee::http_client::HttpClient; +use jsonrpsee::RpcModule; + +use sui_json_rpc::SuiRpcModule; +use sui_json_rpc_api::{WriteApiClient, WriteApiServer}; +use sui_json_rpc_types::{ + DevInspectArgs, DevInspectResults, DryRunTransactionBlockResponse, SuiTransactionBlockResponse, + SuiTransactionBlockResponseOptions, +}; +use sui_open_rpc::Module; +use sui_types::base_types::SuiAddress; +use sui_types::quorum_driver_types::ExecuteTransactionRequestType; +use sui_types::sui_serde::BigInt; + +use crate::types::SuiTransactionBlockResponseWithOptions; + +pub(crate) struct WriteApi { + fullnode: HttpClient, +} + +impl WriteApi { + pub fn new(fullnode_client: HttpClient) -> Self { + Self { + fullnode: fullnode_client, + } + } +} + +#[async_trait] +impl WriteApiServer for WriteApi { + async fn execute_transaction_block( + &self, + tx_bytes: Base64, + signatures: Vec, + options: Option, + request_type: Option, + ) -> RpcResult { + let sui_transaction_response = self + .fullnode + .execute_transaction_block(tx_bytes, signatures, options.clone(), request_type) + .await?; + Ok(SuiTransactionBlockResponseWithOptions { + response: sui_transaction_response, + options: options.unwrap_or_default(), + } + .into()) + } + + async fn dev_inspect_transaction_block( + &self, + sender_address: SuiAddress, + tx_bytes: Base64, + gas_price: Option>, + epoch: Option>, + additional_args: Option, + ) -> RpcResult { + self.fullnode + .dev_inspect_transaction_block( + sender_address, + tx_bytes, + gas_price, + epoch, + additional_args, + ) + .await + } + + async fn dry_run_transaction_block( + &self, + tx_bytes: Base64, + ) -> RpcResult { + self.fullnode.dry_run_transaction_block(tx_bytes).await + } +} + +impl SuiRpcModule for WriteApi { + fn rpc(self) -> RpcModule { + self.into_rpc() + } + + fn rpc_doc_module() -> Module { + sui_json_rpc_api::WriteApiOpenRpc::module_doc() + } +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/digest_task.rs b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/digest_task.rs new file mode 100644 index 0000000000000..8273bcdaa3b7b --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/digest_task.rs @@ -0,0 +1,26 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_instances::ingestion_backfills::IngestionBackfillTrait; +use crate::database::ConnectionPool; +use sui_types::full_checkpoint_content::CheckpointData; +use tracing::info; + +/// Dummy backfill that only prints the sequence number and checkpoint of the digest. Intended to +/// benchmark backfill performance. +pub struct DigestBackfill; + +#[async_trait::async_trait] +impl IngestionBackfillTrait for DigestBackfill { + type ProcessedType = (); + + fn process_checkpoint(checkpoint: &CheckpointData) -> Vec { + let cp = checkpoint.checkpoint_summary.sequence_number; + let digest = checkpoint.checkpoint_summary.content_digest; + info!("{cp}: {digest}"); + + vec![] + } + + async fn commit_chunk(_pool: ConnectionPool, _processed_data: Vec) {} +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/ingestion_backfill_task.rs b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/ingestion_backfill_task.rs new file mode 100644 index 0000000000000..2702f755c0842 --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/ingestion_backfill_task.rs @@ -0,0 +1,98 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_instances::ingestion_backfills::IngestionBackfillTrait; +use crate::backfill::backfill_task::BackfillTask; +use crate::database::ConnectionPool; +use dashmap::DashMap; +use std::ops::RangeInclusive; +use std::sync::Arc; +use sui_data_ingestion_core::{setup_single_workflow, ReaderOptions, Worker}; +use sui_types::full_checkpoint_content::CheckpointData; +use sui_types::messages_checkpoint::CheckpointSequenceNumber; +use tokio::sync::Notify; + +pub struct IngestionBackfillTask { + ready_checkpoints: Arc>>, + notify: Arc, + _exit_sender: tokio::sync::oneshot::Sender<()>, +} + +impl IngestionBackfillTask { + pub async fn new(remote_store_url: String, start_checkpoint: CheckpointSequenceNumber) -> Self { + let ready_checkpoints = Arc::new(DashMap::new()); + let notify = Arc::new(Notify::new()); + let adapter: Adapter = Adapter { + ready_checkpoints: ready_checkpoints.clone(), + notify: notify.clone(), + }; + let reader_options = ReaderOptions { + batch_size: 200, + ..Default::default() + }; + let (executor, _exit_sender) = setup_single_workflow( + adapter, + remote_store_url, + start_checkpoint, + 200, + Some(reader_options), + ) + .await + .unwrap(); + tokio::task::spawn(async move { + executor.await.unwrap(); + }); + Self { + ready_checkpoints, + notify, + _exit_sender, + } + } +} + +pub struct Adapter { + ready_checkpoints: Arc>>, + notify: Arc, +} + +#[async_trait::async_trait] +impl Worker for Adapter { + type Result = (); + async fn process_checkpoint(&self, checkpoint: &CheckpointData) -> anyhow::Result<()> { + let processed = T::process_checkpoint(checkpoint); + self.ready_checkpoints + .insert(checkpoint.checkpoint_summary.sequence_number, processed); + self.notify.notify_waiters(); + Ok(()) + } +} + +#[async_trait::async_trait] +impl BackfillTask for IngestionBackfillTask { + async fn backfill_range(&self, pool: ConnectionPool, range: &RangeInclusive) { + let mut processed_data = vec![]; + let mut start = *range.start(); + let end = *range.end(); + loop { + while start <= end { + if let Some((_, processed)) = self + .ready_checkpoints + .remove(&(start as CheckpointSequenceNumber)) + { + processed_data.extend(processed); + start += 1; + } else { + break; + } + } + if start <= end { + self.notify.notified().await; + } else { + break; + } + } + // TODO: Limit the size of each chunk. + // postgres has a parameter limit of 65535, meaning that row_count * col_count <= 65536. + T::commit_chunk(pool.clone(), processed_data).await; + } +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/mod.rs b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/mod.rs new file mode 100644 index 0000000000000..935ba5562bd9c --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/mod.rs @@ -0,0 +1,18 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +pub(crate) mod digest_task; +pub(crate) mod ingestion_backfill_task; +pub(crate) mod raw_checkpoints; +pub(crate) mod tx_affected_objects; + +use crate::database::ConnectionPool; +use sui_types::full_checkpoint_content::CheckpointData; + +#[async_trait::async_trait] +pub trait IngestionBackfillTrait: Send + Sync { + type ProcessedType: Send + Sync; + + fn process_checkpoint(checkpoint: &CheckpointData) -> Vec; + async fn commit_chunk(pool: ConnectionPool, processed_data: Vec); +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/raw_checkpoints.rs b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/raw_checkpoints.rs new file mode 100644 index 0000000000000..aec4f0263ee80 --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/raw_checkpoints.rs @@ -0,0 +1,34 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_instances::ingestion_backfills::IngestionBackfillTrait; +use crate::database::ConnectionPool; +use crate::models::raw_checkpoints::StoredRawCheckpoint; +use crate::schema::raw_checkpoints::dsl::raw_checkpoints; +use diesel_async::RunQueryDsl; +use sui_types::full_checkpoint_content::CheckpointData; + +pub struct RawCheckpointsBackFill; + +#[async_trait::async_trait] +impl IngestionBackfillTrait for RawCheckpointsBackFill { + type ProcessedType = StoredRawCheckpoint; + + fn process_checkpoint(checkpoint: &CheckpointData) -> Vec { + vec![StoredRawCheckpoint { + sequence_number: checkpoint.checkpoint_summary.sequence_number as i64, + certified_checkpoint: bcs::to_bytes(&checkpoint.checkpoint_summary).unwrap(), + checkpoint_contents: bcs::to_bytes(&checkpoint.checkpoint_contents).unwrap(), + }] + } + + async fn commit_chunk(pool: ConnectionPool, processed_data: Vec) { + let mut conn = pool.get().await.unwrap(); + diesel::insert_into(raw_checkpoints) + .values(processed_data) + .on_conflict_do_nothing() + .execute(&mut conn) + .await + .unwrap(); + } +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/tx_affected_objects.rs b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/tx_affected_objects.rs new file mode 100644 index 0000000000000..4e6f6efa6a897 --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/tx_affected_objects.rs @@ -0,0 +1,48 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_instances::ingestion_backfills::IngestionBackfillTrait; +use crate::database::ConnectionPool; +use crate::models::tx_indices::StoredTxAffectedObjects; +use crate::schema::tx_affected_objects; +use diesel_async::RunQueryDsl; +use sui_types::effects::TransactionEffectsAPI; +use sui_types::full_checkpoint_content::CheckpointData; + +pub struct TxAffectedObjectsBackfill; + +#[async_trait::async_trait] +impl IngestionBackfillTrait for TxAffectedObjectsBackfill { + type ProcessedType = StoredTxAffectedObjects; + + fn process_checkpoint(checkpoint: &CheckpointData) -> Vec { + let first_tx = checkpoint.checkpoint_summary.network_total_transactions as usize + - checkpoint.transactions.len(); + + checkpoint + .transactions + .iter() + .enumerate() + .flat_map(|(i, tx)| { + tx.effects + .object_changes() + .into_iter() + .map(move |change| StoredTxAffectedObjects { + tx_sequence_number: (first_tx + i) as i64, + affected: change.id.to_vec(), + sender: tx.transaction.sender_address().to_vec(), + }) + }) + .collect() + } + + async fn commit_chunk(pool: ConnectionPool, processed_data: Vec) { + let mut conn = pool.get().await.unwrap(); + diesel::insert_into(tx_affected_objects::table) + .values(processed_data) + .on_conflict_do_nothing() + .execute(&mut conn) + .await + .unwrap(); + } +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/mod.rs b/crates/sui-mvr-indexer/src/backfill/backfill_instances/mod.rs new file mode 100644 index 0000000000000..304ed4e715e1d --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/mod.rs @@ -0,0 +1,55 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_instances::ingestion_backfills::digest_task::DigestBackfill; +use crate::backfill::backfill_instances::ingestion_backfills::ingestion_backfill_task::IngestionBackfillTask; +use crate::backfill::backfill_instances::ingestion_backfills::raw_checkpoints::RawCheckpointsBackFill; +use crate::backfill::backfill_instances::ingestion_backfills::tx_affected_objects::TxAffectedObjectsBackfill; +use crate::backfill::backfill_task::BackfillTask; +use crate::backfill::{BackfillTaskKind, IngestionBackfillKind}; +use std::sync::Arc; +use sui_types::messages_checkpoint::CheckpointSequenceNumber; + +mod ingestion_backfills; +mod sql_backfill; +mod system_state_summary_json; + +pub async fn get_backfill_task( + kind: BackfillTaskKind, + range_start: usize, +) -> Arc { + match kind { + BackfillTaskKind::SystemStateSummaryJson => { + Arc::new(system_state_summary_json::SystemStateSummaryJsonBackfill) + } + BackfillTaskKind::Sql { sql, key_column } => { + Arc::new(sql_backfill::SqlBackFill::new(sql, key_column)) + } + BackfillTaskKind::Ingestion { + kind, + remote_store_url, + } => match kind { + IngestionBackfillKind::Digest => Arc::new( + IngestionBackfillTask::::new( + remote_store_url, + range_start as CheckpointSequenceNumber, + ) + .await, + ), + IngestionBackfillKind::RawCheckpoints => Arc::new( + IngestionBackfillTask::::new( + remote_store_url, + range_start as CheckpointSequenceNumber, + ) + .await, + ), + IngestionBackfillKind::TxAffectedObjects => Arc::new( + IngestionBackfillTask::::new( + remote_store_url, + range_start as CheckpointSequenceNumber, + ) + .await, + ), + }, + } +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfill.rs b/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfill.rs new file mode 100644 index 0000000000000..543f077e2ba3b --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfill.rs @@ -0,0 +1,36 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_task::BackfillTask; +use crate::database::ConnectionPool; +use async_trait::async_trait; +use diesel_async::RunQueryDsl; +use std::ops::RangeInclusive; + +pub struct SqlBackFill { + sql: String, + key_column: String, +} + +impl SqlBackFill { + pub fn new(sql: String, key_column: String) -> Self { + Self { sql, key_column } + } +} + +#[async_trait] +impl BackfillTask for SqlBackFill { + async fn backfill_range(&self, pool: ConnectionPool, range: &RangeInclusive) { + let mut conn = pool.get().await.unwrap(); + + let query = format!( + "{} WHERE {} BETWEEN {} AND {} ON CONFLICT DO NOTHING", + self.sql, + self.key_column, + *range.start(), + *range.end() + ); + + diesel::sql_query(query).execute(&mut conn).await.unwrap(); + } +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/event_sender.sh b/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/event_sender.sh new file mode 100644 index 0000000000000..ea883107b31ca --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/event_sender.sh @@ -0,0 +1,6 @@ +# Copyright (c) Mysten Labs, Inc. +# SPDX-License-Identifier: Apache-2.0 + +INDEXER=${INDEXER:-"sui-mvr-indexer"} +DB=${DB:-"postgres://postgres:postgrespw@localhost:5432/postgres"} +"$INDEXER" --database-url "$DB" run-back-fill "$1" "$2" sql "UPDATE events SET sender = CASE WHEN cardinality(senders) > 0 THEN senders[1] ELSE NULL END" checkpoint_sequence_number diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/full_objects_history.sh b/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/full_objects_history.sh new file mode 100644 index 0000000000000..18a0e3b9e84de --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/full_objects_history.sh @@ -0,0 +1,6 @@ +# Copyright (c) Mysten Labs, Inc. +# SPDX-License-Identifier: Apache-2.0 + +INDEXER=${INDEXER:-"sui-mvr-indexer"} +DB=${DB:-"postgres://postgres:postgrespw@localhost:5432/postgres"} +"$INDEXER" --database-url "$DB" run-back-fill "$1" "$2" sql "INSERT INTO full_objects_history (object_id, object_version, serialized_object) SELECT object_id, object_version, serialized_object FROM objects_history" checkpoint_sequence_number diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/tx_affected_addresses.sh b/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/tx_affected_addresses.sh new file mode 100644 index 0000000000000..da0dc0915a0b4 --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/tx_affected_addresses.sh @@ -0,0 +1,7 @@ +# Copyright (c) Mysten Labs, Inc. +# SPDX-License-Identifier: Apache-2.0 + +INDEXER=${INDEXER:-"sui-mvr-indexer"} +DB=${DB:-"postgres://postgres:postgrespw@localhost:5432/postgres"} +"$INDEXER" --database-url "$DB" run-back-fill "$1" "$2" sql "INSERT INTO tx_affected_addresses SELECT tx_sequence_number, sender AS affected, sender FROM tx_senders" tx_sequence_number +"$INDEXER" --database-url "$DB" run-back-fill "$1" "$2" sql "INSERT INTO tx_affected_addresses SELECT tx_sequence_number, recipient AS affected, sender FROM tx_recipients" tx_sequence_number diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/system_state_summary_json.rs b/crates/sui-mvr-indexer/src/backfill/backfill_instances/system_state_summary_json.rs new file mode 100644 index 0000000000000..912abdd871a1c --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/system_state_summary_json.rs @@ -0,0 +1,56 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_task::BackfillTask; +use crate::database::ConnectionPool; +use crate::schema::epochs; +use async_trait::async_trait; +use diesel::{ExpressionMethods, QueryDsl}; +use diesel_async::{AsyncConnection, RunQueryDsl}; +use std::ops::RangeInclusive; +use sui_types::sui_system_state::sui_system_state_summary::SuiSystemStateSummary; + +pub struct SystemStateSummaryJsonBackfill; + +#[async_trait] +impl BackfillTask for SystemStateSummaryJsonBackfill { + async fn backfill_range(&self, pool: ConnectionPool, range: &RangeInclusive) { + let mut conn = pool.get().await.unwrap(); + + let results: Vec>> = epochs::table + .select(epochs::system_state) + .filter(epochs::epoch.between(*range.start() as i64, *range.end() as i64)) + .load(&mut conn) + .await + .unwrap(); + + let mut system_states = vec![]; + for bytes in results { + let Some(bytes) = bytes else { + continue; + }; + let system_state_summary: SuiSystemStateSummary = bcs::from_bytes(&bytes).unwrap(); + let json_ser = serde_json::to_value(&system_state_summary).unwrap(); + if system_state_summary.epoch == 1 { + // Each existing system state's epoch is off by 1. + // This means there won't be any row with a system state summary for epoch 0. + // We need to manually insert a row for epoch 0. + system_states.push((0, json_ser.clone())); + } + system_states.push((system_state_summary.epoch, json_ser)); + } + conn.transaction::<_, diesel::result::Error, _>(|conn| { + Box::pin(async move { + for (epoch, json_ser) in system_states { + diesel::update(epochs::table.filter(epochs::epoch.eq(epoch as i64))) + .set(epochs::system_state_summary_json.eq(Some(json_ser))) + .execute(conn) + .await?; + } + Ok(()) + }) + }) + .await + .unwrap(); + } +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_runner.rs b/crates/sui-mvr-indexer/src/backfill/backfill_runner.rs new file mode 100644 index 0000000000000..3126dc90fe35f --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_runner.rs @@ -0,0 +1,94 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_instances::get_backfill_task; +use crate::backfill::backfill_task::BackfillTask; +use crate::backfill::BackfillTaskKind; +use crate::config::BackFillConfig; +use crate::database::ConnectionPool; +use futures::StreamExt; +use std::collections::BTreeSet; +use std::ops::RangeInclusive; +use std::sync::Arc; +use std::time::Instant; +use tokio::sync::{mpsc, Mutex}; +use tokio_stream::wrappers::ReceiverStream; + +pub struct BackfillRunner {} + +impl BackfillRunner { + pub async fn run( + runner_kind: BackfillTaskKind, + pool: ConnectionPool, + backfill_config: BackFillConfig, + total_range: RangeInclusive, + ) { + let task = get_backfill_task(runner_kind, *total_range.start()).await; + Self::run_impl(pool, backfill_config, total_range, task).await; + } + + /// Main function to run the parallel queries and batch processing. + async fn run_impl( + pool: ConnectionPool, + config: BackFillConfig, + total_range: RangeInclusive, + task: Arc, + ) { + let cur_time = Instant::now(); + // Keeps track of the checkpoint ranges (using starting checkpoint number) + // that are in progress. + let in_progress = Arc::new(Mutex::new(BTreeSet::new())); + + let concurrency = config.max_concurrency; + let (tx, rx) = mpsc::channel(concurrency * 10); + // Spawn a task to send chunks lazily over the channel + tokio::spawn(async move { + for chunk in create_chunk_iter(total_range, config.chunk_size) { + if tx.send(chunk).await.is_err() { + // Channel closed, stop producing chunks + break; + } + } + }); + // Convert the receiver into a stream + let stream = ReceiverStream::new(rx); + + // Process chunks in parallel, limiting the number of concurrent query tasks + stream + .for_each_concurrent(concurrency, move |range| { + let pool_clone = pool.clone(); + let in_progress_clone = in_progress.clone(); + let task = task.clone(); + + async move { + in_progress_clone.lock().await.insert(*range.start()); + task.backfill_range(pool_clone, &range).await; + println!("Finished range: {:?}.", range); + in_progress_clone.lock().await.remove(range.start()); + let cur_min_in_progress = in_progress_clone.lock().await.iter().next().cloned(); + if let Some(cur_min_in_progress) = cur_min_in_progress { + println!( + "Average backfill speed: {} checkpoints/s. Minimum range start number still in progress: {:?}.", + cur_min_in_progress as f64 / cur_time.elapsed().as_secs_f64(), + cur_min_in_progress + ); + } + } + }) + .await; + + println!("Finished backfilling in {:?}", cur_time.elapsed()); + } +} + +/// Creates chunks based on the total range and chunk size. +fn create_chunk_iter( + total_range: RangeInclusive, + chunk_size: usize, +) -> impl Iterator> { + let end = *total_range.end(); + total_range.step_by(chunk_size).map(move |chunk_start| { + let chunk_end = std::cmp::min(chunk_start + chunk_size - 1, end); + chunk_start..=chunk_end + }) +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_task.rs b/crates/sui-mvr-indexer/src/backfill/backfill_task.rs new file mode 100644 index 0000000000000..008bfa5b482c0 --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_task.rs @@ -0,0 +1,12 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::database::ConnectionPool; +use async_trait::async_trait; +use std::ops::RangeInclusive; + +#[async_trait] +pub trait BackfillTask: Send + Sync { + /// Backfill the database for a specific range. + async fn backfill_range(&self, pool: ConnectionPool, range: &RangeInclusive); +} diff --git a/crates/sui-mvr-indexer/src/backfill/mod.rs b/crates/sui-mvr-indexer/src/backfill/mod.rs new file mode 100644 index 0000000000000..e17ba40628ef1 --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/mod.rs @@ -0,0 +1,35 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use clap::{Subcommand, ValueEnum}; + +pub mod backfill_instances; +pub mod backfill_runner; +pub mod backfill_task; + +#[derive(Subcommand, Clone, Debug)] +pub enum BackfillTaskKind { + SystemStateSummaryJson, + /// \sql is the SQL string to run, appended with the range between the start and end, + /// as well as conflict resolution (see sql_backfill.rs). + /// \key_column is the primary key column to use for the range. + Sql { + sql: String, + key_column: String, + }, + /// Starts a backfill pipeline from the ingestion engine. + /// \remote_store_url is the URL of the remote store to ingest from. + /// Any `IngestionBackfillKind` will need to map to a type that + /// implements `IngestionBackfillTrait`. + Ingestion { + kind: IngestionBackfillKind, + remote_store_url: String, + }, +} + +#[derive(ValueEnum, Clone, Debug)] +pub enum IngestionBackfillKind { + Digest, + RawCheckpoints, + TxAffectedObjects, +} diff --git a/crates/sui-mvr-indexer/src/benchmark.rs b/crates/sui-mvr-indexer/src/benchmark.rs new file mode 100644 index 0000000000000..96df25cba9fa6 --- /dev/null +++ b/crates/sui-mvr-indexer/src/benchmark.rs @@ -0,0 +1,130 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::config::{BenchmarkConfig, IngestionConfig, IngestionSources, UploadOptions}; +use crate::database::ConnectionPool; +use crate::db::{reset_database, run_migrations}; +use crate::errors::IndexerError; +use crate::indexer::Indexer; +use crate::metrics::IndexerMetrics; +use crate::store::PgIndexerStore; +use std::path::PathBuf; +use sui_synthetic_ingestion::benchmark::{run_benchmark, BenchmarkableIndexer}; +use sui_synthetic_ingestion::{IndexerProgress, SyntheticIngestionConfig}; +use tokio::sync::watch; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; + +pub async fn run_indexer_benchmark( + config: BenchmarkConfig, + pool: ConnectionPool, + metrics: IndexerMetrics, +) { + if config.reset_db { + reset_database(pool.dedicated_connection().await.unwrap()) + .await + .unwrap(); + } else { + run_migrations(pool.dedicated_connection().await.unwrap()) + .await + .unwrap(); + } + let store = PgIndexerStore::new(pool, UploadOptions::default(), metrics.clone()); + let ingestion_dir = config + .workload_dir + .clone() + .unwrap_or_else(|| tempfile::tempdir().unwrap().into_path()); + // If we are using a non-temp directory, we should not delete the ingestion directory. + let gc_checkpoint_files = config.workload_dir.is_none(); + let synthetic_ingestion_config = SyntheticIngestionConfig { + ingestion_dir: ingestion_dir.clone(), + checkpoint_size: config.checkpoint_size, + num_checkpoints: config.num_checkpoints, + starting_checkpoint: config.starting_checkpoint, + }; + let indexer = BenchmarkIndexer::new(store, metrics, ingestion_dir, gc_checkpoint_files); + run_benchmark(synthetic_ingestion_config, indexer).await; +} + +pub struct BenchmarkIndexer { + inner: Option, + cancel: CancellationToken, + committed_checkpoints_rx: watch::Receiver>, + handle: Option>>, +} + +struct BenchmarkIndexerInner { + ingestion_dir: PathBuf, + gc_checkpoint_files: bool, + store: PgIndexerStore, + metrics: IndexerMetrics, + committed_checkpoints_tx: watch::Sender>, +} + +impl BenchmarkIndexer { + pub fn new( + store: PgIndexerStore, + metrics: IndexerMetrics, + ingestion_dir: PathBuf, + gc_checkpoint_files: bool, + ) -> Self { + let cancel = CancellationToken::new(); + let (committed_checkpoints_tx, committed_checkpoints_rx) = watch::channel(None); + Self { + inner: Some(BenchmarkIndexerInner { + ingestion_dir, + gc_checkpoint_files, + store, + metrics, + committed_checkpoints_tx, + }), + cancel, + committed_checkpoints_rx, + handle: None, + } + } +} + +#[async_trait::async_trait] +impl BenchmarkableIndexer for BenchmarkIndexer { + fn subscribe_to_committed_checkpoints(&self) -> watch::Receiver> { + self.committed_checkpoints_rx.clone() + } + + async fn start(&mut self) { + let BenchmarkIndexerInner { + ingestion_dir, + gc_checkpoint_files, + store, + metrics, + committed_checkpoints_tx, + } = self.inner.take().unwrap(); + let ingestion_config = IngestionConfig { + sources: IngestionSources { + data_ingestion_path: Some(ingestion_dir), + ..Default::default() + }, + gc_checkpoint_files, + ..Default::default() + }; + let cancel = self.cancel.clone(); + let handle = tokio::task::spawn(async move { + Indexer::start_writer( + ingestion_config, + store, + metrics, + Default::default(), + None, + cancel, + Some(committed_checkpoints_tx), + ) + .await + }); + self.handle = Some(handle); + } + + async fn stop(mut self) { + self.cancel.cancel(); + self.handle.unwrap().await.unwrap().unwrap(); + } +} diff --git a/crates/sui-mvr-indexer/src/config.rs b/crates/sui-mvr-indexer/src/config.rs new file mode 100644 index 0000000000000..6db349aa64747 --- /dev/null +++ b/crates/sui-mvr-indexer/src/config.rs @@ -0,0 +1,633 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::db::ConnectionPoolConfig; +use crate::{backfill::BackfillTaskKind, handlers::pruner::PrunableTable}; +use clap::{Args, Parser, Subcommand}; +use serde::{Deserialize, Serialize}; +use std::{collections::HashMap, net::SocketAddr, path::PathBuf}; +use strum::IntoEnumIterator; +use sui_json_rpc::name_service::NameServiceConfig; +use sui_types::base_types::{ObjectID, SuiAddress}; +use url::Url; + +/// The primary purpose of objects_history is to serve consistency query. +/// A short retention is sufficient. +const OBJECTS_HISTORY_EPOCHS_TO_KEEP: u64 = 2; + +#[derive(Parser, Clone, Debug)] +#[clap( + name = "Sui indexer", + about = "An off-fullnode service serving data from Sui protocol" +)] +pub struct IndexerConfig { + #[clap(long, alias = "db-url")] + pub database_url: Url, + + #[clap(flatten)] + pub connection_pool_config: ConnectionPoolConfig, + + #[clap(long, default_value = "0.0.0.0:9184")] + pub metrics_address: SocketAddr, + + #[command(subcommand)] + pub command: Command, +} + +#[derive(Args, Debug, Clone)] +pub struct NameServiceOptions { + #[arg(default_value_t = NameServiceConfig::default().package_address)] + #[arg(long = "name-service-package-address")] + pub package_address: SuiAddress, + #[arg(default_value_t = NameServiceConfig::default().registry_id)] + #[arg(long = "name-service-registry-id")] + pub registry_id: ObjectID, + #[arg(default_value_t = NameServiceConfig::default().reverse_registry_id)] + #[arg(long = "name-service-reverse-registry-id")] + pub reverse_registry_id: ObjectID, +} + +impl NameServiceOptions { + pub fn to_config(&self) -> NameServiceConfig { + let Self { + package_address, + registry_id, + reverse_registry_id, + } = self.clone(); + NameServiceConfig { + package_address, + registry_id, + reverse_registry_id, + } + } +} + +impl Default for NameServiceOptions { + fn default() -> Self { + let NameServiceConfig { + package_address, + registry_id, + reverse_registry_id, + } = NameServiceConfig::default(); + Self { + package_address, + registry_id, + reverse_registry_id, + } + } +} + +#[derive(Args, Debug, Clone)] +pub struct JsonRpcConfig { + #[command(flatten)] + pub name_service_options: NameServiceOptions, + + #[clap(long, default_value = "0.0.0.0:9000")] + pub rpc_address: SocketAddr, + + #[clap(long)] + pub rpc_client_url: String, +} + +#[derive(Args, Debug, Default, Clone)] +#[group(required = true, multiple = true)] +pub struct IngestionSources { + #[arg(long)] + pub data_ingestion_path: Option, + + #[arg(long)] + pub remote_store_url: Option, + + #[arg(long)] + pub rpc_client_url: Option, +} + +#[derive(Args, Debug, Clone)] +pub struct IngestionConfig { + #[clap(flatten)] + pub sources: IngestionSources, + + #[arg( + long, + default_value_t = Self::DEFAULT_CHECKPOINT_DOWNLOAD_QUEUE_SIZE, + env = "DOWNLOAD_QUEUE_SIZE", + )] + pub checkpoint_download_queue_size: usize, + + /// Start checkpoint to ingest from, this is optional and if not provided, the ingestion will + /// start from the next checkpoint after the latest committed checkpoint. + #[arg(long, env = "START_CHECKPOINT")] + pub start_checkpoint: Option, + + /// End checkpoint to ingest until, this is optional and if not provided, the ingestion will + /// continue until u64::MAX. + #[arg(long, env = "END_CHECKPOINT")] + pub end_checkpoint: Option, + + #[arg( + long, + default_value_t = Self::DEFAULT_CHECKPOINT_DOWNLOAD_TIMEOUT, + env = "INGESTION_READER_TIMEOUT_SECS", + )] + pub checkpoint_download_timeout: u64, + + /// Limit indexing parallelism on big checkpoints to avoid OOMing by limiting the total size of + /// the checkpoint download queue. + #[arg( + long, + default_value_t = Self::DEFAULT_CHECKPOINT_DOWNLOAD_QUEUE_SIZE_BYTES, + env = "CHECKPOINT_PROCESSING_BATCH_DATA_LIMIT", + )] + pub checkpoint_download_queue_size_bytes: usize, + + /// Whether to delete processed checkpoint files from the local directory, + /// when running Fullnode-colocated indexer. + #[arg(long, default_value_t = true)] + pub gc_checkpoint_files: bool, +} + +impl IngestionConfig { + const DEFAULT_CHECKPOINT_DOWNLOAD_QUEUE_SIZE: usize = 200; + const DEFAULT_CHECKPOINT_DOWNLOAD_QUEUE_SIZE_BYTES: usize = 20_000_000; + const DEFAULT_CHECKPOINT_DOWNLOAD_TIMEOUT: u64 = 20; +} + +impl Default for IngestionConfig { + fn default() -> Self { + Self { + sources: Default::default(), + start_checkpoint: None, + end_checkpoint: None, + checkpoint_download_queue_size: Self::DEFAULT_CHECKPOINT_DOWNLOAD_QUEUE_SIZE, + checkpoint_download_timeout: Self::DEFAULT_CHECKPOINT_DOWNLOAD_TIMEOUT, + checkpoint_download_queue_size_bytes: + Self::DEFAULT_CHECKPOINT_DOWNLOAD_QUEUE_SIZE_BYTES, + gc_checkpoint_files: true, + } + } +} + +#[derive(Args, Debug, Clone)] +pub struct BackFillConfig { + /// Maximum number of concurrent tasks to run. + #[arg( + long, + default_value_t = Self::DEFAULT_MAX_CONCURRENCY, + )] + pub max_concurrency: usize, + /// Number of checkpoints to backfill in a single SQL command. + #[arg( + long, + default_value_t = Self::DEFAULT_CHUNK_SIZE, + )] + pub chunk_size: usize, +} + +impl BackFillConfig { + const DEFAULT_MAX_CONCURRENCY: usize = 10; + const DEFAULT_CHUNK_SIZE: usize = 1000; +} + +#[derive(Subcommand, Clone, Debug)] +pub enum Command { + Indexer { + #[command(flatten)] + ingestion_config: IngestionConfig, + #[command(flatten)] + snapshot_config: SnapshotLagConfig, + #[command(flatten)] + pruning_options: PruningOptions, + #[command(flatten)] + upload_options: UploadOptions, + }, + JsonRpcService(JsonRpcConfig), + ResetDatabase { + #[clap(long)] + force: bool, + /// If true, only drop all tables but do not run the migrations. + /// That is, no tables will exist in the DB after the reset. + #[clap(long, default_value_t = false)] + skip_migrations: bool, + }, + /// Run through the migration scripts. + RunMigrations, + /// Backfill DB tables for some ID range [\start, \end]. + /// The tool will automatically slice it into smaller ranges and for each range, + /// it first makes a read query to the DB to get data needed for backfil if needed, + /// which then can be processed and written back to the DB. + /// To add a new backfill, add a new module and implement the `BackfillTask` trait. + /// full_objects_history.rs provides an example to do SQL-only backfills. + /// system_state_summary_json.rs provides an example to do SQL + processing backfills. + RunBackFill { + /// Start of the range to backfill, inclusive. + /// It can be a checkpoint number or an epoch or any other identifier that can be used to + /// slice the backfill range. + start: usize, + /// End of the range to backfill, inclusive. + end: usize, + #[clap(subcommand)] + runner_kind: BackfillTaskKind, + #[command(flatten)] + backfill_config: BackFillConfig, + }, + /// Restore the database from formal snaphots. + Restore(RestoreConfig), + Benchmark(BenchmarkConfig), +} + +#[derive(Args, Default, Debug, Clone)] +pub struct PruningOptions { + /// Path to TOML file containing configuration for retention policies. + #[arg(long)] + pub pruning_config_path: Option, +} + +/// Represents the default retention policy and overrides for prunable tables. Instantiated only if +/// `PruningOptions` is provided on indexer start. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RetentionConfig { + /// Default retention policy for all tables. + pub epochs_to_keep: u64, + /// A map of tables to their respective retention policies that will override the default. + /// Prunable tables not named here will use the default retention policy. + #[serde(default)] + pub overrides: HashMap, +} + +impl PruningOptions { + /// Load default retention policy and overrides from file. + pub fn load_from_file(&self) -> Option { + let config_path = self.pruning_config_path.as_ref()?; + + let contents = std::fs::read_to_string(config_path) + .expect("Failed to read default retention policy and overrides from file"); + let retention_with_overrides = toml::de::from_str::(&contents) + .expect("Failed to parse into RetentionConfig struct"); + + let default_retention = retention_with_overrides.epochs_to_keep; + + assert!( + default_retention > 0, + "Default retention must be greater than 0" + ); + assert!( + retention_with_overrides + .overrides + .values() + .all(|&policy| policy > 0), + "All retention overrides must be greater than 0" + ); + + Some(retention_with_overrides) + } +} + +impl RetentionConfig { + /// Create a new `RetentionConfig` with the specified default retention and overrides. Call + /// `finalize()` on the instance to update the `policies` field with the default retention + /// policy for all tables that do not have an override specified. + pub fn new(epochs_to_keep: u64, overrides: HashMap) -> Self { + Self { + epochs_to_keep, + overrides, + } + } + + pub fn new_with_default_retention_only_for_testing(epochs_to_keep: u64) -> Self { + let mut overrides = HashMap::new(); + overrides.insert( + PrunableTable::ObjectsHistory, + OBJECTS_HISTORY_EPOCHS_TO_KEEP, + ); + + Self::new(epochs_to_keep, HashMap::new()) + } + + /// Consumes this struct to produce a full mapping of every prunable table and its retention + /// policy. By default, every prunable table will have the default retention policy from + /// `epochs_to_keep`. Some tables like `objects_history` will observe a different default + /// retention policy. These default values are overridden by any entries in `overrides`. + pub fn retention_policies(self) -> HashMap { + let RetentionConfig { + epochs_to_keep, + mut overrides, + } = self; + + for table in PrunableTable::iter() { + let default_retention = match table { + PrunableTable::ObjectsHistory => OBJECTS_HISTORY_EPOCHS_TO_KEEP, + _ => epochs_to_keep, + }; + + overrides.entry(table).or_insert(default_retention); + } + + overrides + } +} + +#[derive(Args, Debug, Clone)] +pub struct SnapshotLagConfig { + #[arg( + long = "objects-snapshot-min-checkpoint-lag", + default_value_t = Self::DEFAULT_MIN_LAG, + env = "OBJECTS_SNAPSHOT_MIN_CHECKPOINT_LAG", + )] + pub snapshot_min_lag: usize, + + #[arg( + long = "objects-snapshot-sleep-duration", + default_value_t = Self::DEFAULT_SLEEP_DURATION_SEC, + )] + pub sleep_duration: u64, +} + +impl SnapshotLagConfig { + const DEFAULT_MIN_LAG: usize = 300; + const DEFAULT_SLEEP_DURATION_SEC: u64 = 5; +} + +impl Default for SnapshotLagConfig { + fn default() -> Self { + SnapshotLagConfig { + snapshot_min_lag: Self::DEFAULT_MIN_LAG, + sleep_duration: Self::DEFAULT_SLEEP_DURATION_SEC, + } + } +} + +#[derive(Args, Debug, Clone, Default)] +pub struct UploadOptions { + #[arg(long, env = "GCS_DISPLAY_BUCKET")] + pub gcs_display_bucket: Option, + #[arg(long, env = "GCS_CRED_PATH")] + pub gcs_cred_path: Option, +} + +#[derive(Args, Debug, Clone)] +pub struct RestoreConfig { + #[arg(long, env = "START_EPOCH", required = true)] + pub start_epoch: u64, + #[arg(long, env = "SNAPSHOT_ENDPOINT")] + pub snapshot_endpoint: String, + #[arg(long, env = "SNAPSHOT_BUCKET")] + pub snapshot_bucket: String, + #[arg(long, env = "SNAPSHOT_DOWNLOAD_DIR", required = true)] + pub snapshot_download_dir: String, + + #[arg(long, env = "GCS_ARCHIVE_BUCKET")] + pub gcs_archive_bucket: String, + #[arg(long, env = "GCS_DISPLAY_BUCKET")] + pub gcs_display_bucket: String, + + #[arg(env = "OBJECT_STORE_CONCURRENT_LIMIT")] + pub object_store_concurrent_limit: usize, + #[arg(env = "OBJECT_STORE_MAX_TIMEOUT_SECS")] + pub object_store_max_timeout_secs: u64, +} + +impl Default for RestoreConfig { + fn default() -> Self { + Self { + start_epoch: 0, // not used b/c it's required + snapshot_endpoint: "https://formal-snapshot.mainnet.sui.io".to_string(), + snapshot_bucket: "mysten-mainnet-formal".to_string(), + snapshot_download_dir: "".to_string(), // not used b/c it's required + gcs_archive_bucket: "mysten-mainnet-archives".to_string(), + gcs_display_bucket: "mysten-mainnet-display-table".to_string(), + object_store_concurrent_limit: 50, + object_store_max_timeout_secs: 512, + } + } +} + +#[derive(Args, Debug, Clone)] +pub struct BenchmarkConfig { + #[arg( + long, + default_value_t = 200, + help = "Number of transactions in a checkpoint." + )] + pub checkpoint_size: u64, + #[arg( + long, + default_value_t = 2000, + help = "Total number of synthetic checkpoints to generate." + )] + pub num_checkpoints: u64, + #[arg( + long, + default_value_t = 1, + help = "Customize the first checkpoint sequence number to be committed, must be non-zero." + )] + pub starting_checkpoint: u64, + #[arg( + long, + default_value_t = false, + help = "Whether to reset the database before running." + )] + pub reset_db: bool, + #[arg( + long, + help = "Path to workload directory. If not provided, a temporary directory will be created.\ + If provided, synthetic workload generator will either load data from it if it exists or generate new data.\ + This avoids repeat generation of the same data." + )] + pub workload_dir: Option, +} + +#[cfg(test)] +mod test { + use super::*; + use std::io::Write; + use tap::Pipe; + use tempfile::NamedTempFile; + + fn parse_args<'a, T>(args: impl IntoIterator) -> Result + where + T: clap::Args + clap::FromArgMatches, + { + clap::Command::new("test") + .no_binary_name(true) + .pipe(T::augment_args) + .try_get_matches_from(args) + .and_then(|matches| T::from_arg_matches(&matches)) + } + + #[test] + fn name_service() { + parse_args::(["--name-service-registry-id=0x1"]).unwrap(); + parse_args::([ + "--name-service-package-address", + "0x0000000000000000000000000000000000000000000000000000000000000001", + ]) + .unwrap(); + parse_args::(["--name-service-reverse-registry-id=0x1"]).unwrap(); + parse_args::([ + "--name-service-registry-id=0x1", + "--name-service-package-address", + "0x0000000000000000000000000000000000000000000000000000000000000002", + "--name-service-reverse-registry-id=0x3", + ]) + .unwrap(); + parse_args::([]).unwrap(); + } + + #[test] + fn ingestion_sources() { + parse_args::(["--data-ingestion-path=/tmp/foo"]).unwrap(); + parse_args::(["--remote-store-url=http://example.com"]).unwrap(); + parse_args::(["--rpc-client-url=http://example.com"]).unwrap(); + + parse_args::([ + "--data-ingestion-path=/tmp/foo", + "--remote-store-url=http://example.com", + "--rpc-client-url=http://example.com", + ]) + .unwrap(); + + // At least one must be present + parse_args::([]).unwrap_err(); + } + + #[test] + fn json_rpc_config() { + parse_args::(["--rpc-client-url=http://example.com"]).unwrap(); + + // Can include name service options and bind address + parse_args::([ + "--rpc-address=127.0.0.1:8080", + "--name-service-registry-id=0x1", + "--rpc-client-url=http://example.com", + ]) + .unwrap(); + + // fullnode rpc url must be present + parse_args::([]).unwrap_err(); + } + + #[test] + fn pruning_options_with_objects_history_override() { + let mut temp_file = NamedTempFile::new().unwrap(); + let toml_content = r#" + epochs_to_keep = 5 + + [overrides] + objects_history = 10 + transactions = 20 + "#; + temp_file.write_all(toml_content.as_bytes()).unwrap(); + let temp_path: PathBuf = temp_file.path().to_path_buf(); + let pruning_options = PruningOptions { + pruning_config_path: Some(temp_path.clone()), + }; + let retention_config = pruning_options.load_from_file().unwrap(); + + // Assert the parsed values + assert_eq!(retention_config.epochs_to_keep, 5); + assert_eq!( + retention_config + .overrides + .get(&PrunableTable::ObjectsHistory) + .copied(), + Some(10) + ); + assert_eq!( + retention_config + .overrides + .get(&PrunableTable::Transactions) + .copied(), + Some(20) + ); + assert_eq!(retention_config.overrides.len(), 2); + + let retention_policies = retention_config.retention_policies(); + + for table in PrunableTable::iter() { + let Some(retention) = retention_policies.get(&table).copied() else { + panic!("Expected a retention policy for table {:?}", table); + }; + + match table { + PrunableTable::ObjectsHistory => assert_eq!(retention, 10), + PrunableTable::Transactions => assert_eq!(retention, 20), + _ => assert_eq!(retention, 5), + }; + } + } + + #[test] + fn pruning_options_no_objects_history_override() { + let mut temp_file = NamedTempFile::new().unwrap(); + let toml_content = r#" + epochs_to_keep = 5 + + [overrides] + tx_affected_addresses = 10 + transactions = 20 + "#; + temp_file.write_all(toml_content.as_bytes()).unwrap(); + let temp_path: PathBuf = temp_file.path().to_path_buf(); + let pruning_options = PruningOptions { + pruning_config_path: Some(temp_path.clone()), + }; + let retention_config = pruning_options.load_from_file().unwrap(); + + // Assert the parsed values + assert_eq!(retention_config.epochs_to_keep, 5); + assert_eq!( + retention_config + .overrides + .get(&PrunableTable::TxAffectedAddresses) + .copied(), + Some(10) + ); + assert_eq!( + retention_config + .overrides + .get(&PrunableTable::Transactions) + .copied(), + Some(20) + ); + assert_eq!(retention_config.overrides.len(), 2); + + let retention_policies = retention_config.retention_policies(); + + for table in PrunableTable::iter() { + let Some(retention) = retention_policies.get(&table).copied() else { + panic!("Expected a retention policy for table {:?}", table); + }; + + match table { + PrunableTable::ObjectsHistory => { + assert_eq!(retention, OBJECTS_HISTORY_EPOCHS_TO_KEEP) + } + PrunableTable::TxAffectedAddresses => assert_eq!(retention, 10), + PrunableTable::Transactions => assert_eq!(retention, 20), + _ => assert_eq!(retention, 5), + }; + } + } + + #[test] + fn test_invalid_pruning_config_file() { + let toml_str = r#" + epochs_to_keep = 5 + + [overrides] + objects_history = 10 + transactions = 20 + invalid_table = 30 + "#; + + let result = toml::from_str::(toml_str); + assert!(result.is_err(), "Expected an error, but parsing succeeded"); + + if let Err(e) = result { + assert!( + e.to_string().contains("unknown variant `invalid_table`"), + "Error message doesn't mention the invalid table" + ); + } + } +} diff --git a/crates/sui-mvr-indexer/src/database.rs b/crates/sui-mvr-indexer/src/database.rs new file mode 100644 index 0000000000000..9c1446ff9c8ed --- /dev/null +++ b/crates/sui-mvr-indexer/src/database.rs @@ -0,0 +1,161 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::sync::Arc; + +use diesel::prelude::ConnectionError; +use diesel_async::pooled_connection::bb8::Pool; +use diesel_async::pooled_connection::bb8::PooledConnection; +use diesel_async::pooled_connection::bb8::RunError; +use diesel_async::pooled_connection::AsyncDieselConnectionManager; +use diesel_async::pooled_connection::PoolError; +use diesel_async::RunQueryDsl; +use diesel_async::{AsyncConnection, AsyncPgConnection}; +use futures::FutureExt; +use url::Url; + +use crate::db::ConnectionConfig; +use crate::db::ConnectionPoolConfig; + +#[derive(Clone, Debug)] +pub struct ConnectionPool { + database_url: Arc, + pool: Pool, +} + +impl ConnectionPool { + pub async fn new(database_url: Url, config: ConnectionPoolConfig) -> Result { + let database_url = Arc::new(database_url); + let connection_config = config.connection_config(); + let mut manager_config = diesel_async::pooled_connection::ManagerConfig::default(); + manager_config.custom_setup = + Box::new(move |url| establish_connection(url, connection_config).boxed()); + let manager = + AsyncDieselConnectionManager::new_with_config(database_url.as_str(), manager_config); + + Pool::builder() + .max_size(config.pool_size) + .connection_timeout(config.connection_timeout) + .build(manager) + .await + .map(|pool| Self { database_url, pool }) + } + + /// Retrieves a connection from the pool. + pub async fn get(&self) -> Result, RunError> { + self.pool.get().await.map(Connection::PooledConnection) + } + + /// Get a new dedicated connection that will not be managed by the pool. + /// An application may want a persistent connection (e.g. to do a + /// postgres LISTEN) that will not be closed or repurposed by the pool. + /// + /// This method allows reusing the manager's configuration but otherwise + /// bypassing the pool + pub async fn dedicated_connection(&self) -> Result, PoolError> { + self.pool + .dedicated_connection() + .await + .map(Connection::Dedicated) + } + + /// Returns information about the current state of the pool. + pub fn state(&self) -> bb8::State { + self.pool.state() + } + + /// Returns the database url that this pool is configured with + pub fn url(&self) -> &Url { + &self.database_url + } +} + +pub enum Connection<'a> { + PooledConnection(PooledConnection<'a, AsyncPgConnection>), + Dedicated(AsyncPgConnection), +} + +impl Connection<'static> { + pub async fn dedicated(database_url: &Url) -> Result { + AsyncPgConnection::establish(database_url.as_str()) + .await + .map(Connection::Dedicated) + } + + /// Run the provided Migrations + pub async fn run_pending_migrations( + self, + migrations: M, + ) -> diesel::migration::Result>> + where + M: diesel::migration::MigrationSource + Send + 'static, + { + use diesel::migration::MigrationVersion; + use diesel_migrations::MigrationHarness; + + let mut connection = + diesel_async::async_connection_wrapper::AsyncConnectionWrapper::::from(self); + + tokio::task::spawn_blocking(move || { + connection + .run_pending_migrations(migrations) + .map(|versions| versions.iter().map(MigrationVersion::as_owned).collect()) + }) + .await + .unwrap() + } +} + +impl<'a> std::ops::Deref for Connection<'a> { + type Target = AsyncPgConnection; + + fn deref(&self) -> &Self::Target { + match self { + Connection::PooledConnection(pooled) => pooled.deref(), + Connection::Dedicated(dedicated) => dedicated, + } + } +} + +impl<'a> std::ops::DerefMut for Connection<'a> { + fn deref_mut(&mut self) -> &mut AsyncPgConnection { + match self { + Connection::PooledConnection(pooled) => pooled.deref_mut(), + Connection::Dedicated(dedicated) => dedicated, + } + } +} + +impl ConnectionConfig { + async fn apply(&self, connection: &mut AsyncPgConnection) -> Result<(), diesel::result::Error> { + diesel::sql_query(format!( + "SET statement_timeout = {}", + self.statement_timeout.as_millis(), + )) + .execute(connection) + .await?; + + if self.read_only { + diesel::sql_query("SET default_transaction_read_only = 'on'") + .execute(connection) + .await?; + } + + Ok(()) + } +} + +/// Function used by the Connection Pool Manager to establish and setup new connections +async fn establish_connection( + url: &str, + config: ConnectionConfig, +) -> Result { + let mut connection = AsyncPgConnection::establish(url).await?; + + config + .apply(&mut connection) + .await + .map_err(ConnectionError::CouldntSetupConfiguration)?; + + Ok(connection) +} diff --git a/crates/sui-mvr-indexer/src/db.rs b/crates/sui-mvr-indexer/src/db.rs new file mode 100644 index 0000000000000..4a2893603bb10 --- /dev/null +++ b/crates/sui-mvr-indexer/src/db.rs @@ -0,0 +1,395 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::database::Connection; +use crate::errors::IndexerError; +use crate::handlers::pruner::PrunableTable; +use clap::Args; +use diesel::migration::{Migration, MigrationSource, MigrationVersion}; +use diesel::pg::Pg; +use diesel::prelude::QueryableByName; +use diesel::table; +use diesel::QueryDsl; +use diesel_migrations::{embed_migrations, EmbeddedMigrations}; +use std::collections::{BTreeSet, HashSet}; +use std::time::Duration; +use strum::IntoEnumIterator; +use tracing::info; + +table! { + __diesel_schema_migrations (version) { + version -> VarChar, + run_on -> Timestamp, + } +} + +const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/pg"); + +#[derive(Args, Debug, Clone)] +pub struct ConnectionPoolConfig { + #[arg(long, default_value_t = 100)] + #[arg(env = "DB_POOL_SIZE")] + pub pool_size: u32, + #[arg(long, value_parser = parse_duration, default_value = "30")] + #[arg(env = "DB_CONNECTION_TIMEOUT")] + pub connection_timeout: Duration, + #[arg(long, value_parser = parse_duration, default_value = "3600")] + #[arg(env = "DB_STATEMENT_TIMEOUT")] + pub statement_timeout: Duration, +} + +fn parse_duration(arg: &str) -> Result { + let seconds = arg.parse()?; + Ok(std::time::Duration::from_secs(seconds)) +} + +impl ConnectionPoolConfig { + const DEFAULT_POOL_SIZE: u32 = 100; + const DEFAULT_CONNECTION_TIMEOUT: u64 = 30; + const DEFAULT_STATEMENT_TIMEOUT: u64 = 3600; + + pub(crate) fn connection_config(&self) -> ConnectionConfig { + ConnectionConfig { + statement_timeout: self.statement_timeout, + read_only: false, + } + } + + pub fn set_pool_size(&mut self, size: u32) { + self.pool_size = size; + } + + pub fn set_connection_timeout(&mut self, timeout: Duration) { + self.connection_timeout = timeout; + } + + pub fn set_statement_timeout(&mut self, timeout: Duration) { + self.statement_timeout = timeout; + } +} + +impl Default for ConnectionPoolConfig { + fn default() -> Self { + Self { + pool_size: Self::DEFAULT_POOL_SIZE, + connection_timeout: Duration::from_secs(Self::DEFAULT_CONNECTION_TIMEOUT), + statement_timeout: Duration::from_secs(Self::DEFAULT_STATEMENT_TIMEOUT), + } + } +} + +#[derive(Debug, Clone, Copy)] +pub struct ConnectionConfig { + pub statement_timeout: Duration, + pub read_only: bool, +} + +/// Checks that the local migration scripts is a prefix of the records in the database. +/// This allows us run migration scripts against a DB at anytime, without worrying about +/// existing readers fail over. +/// We do however need to make sure that whenever we are deploying a new version of either reader or writer, +/// we must first run migration scripts to ensure that there is not more local scripts than in the DB record. +pub async fn check_db_migration_consistency(conn: &mut Connection<'_>) -> Result<(), IndexerError> { + info!("Starting compatibility check"); + let migrations: Vec>> = MIGRATIONS.migrations().map_err(|err| { + IndexerError::DbMigrationError(format!( + "Failed to fetch local migrations from schema: {err}" + )) + })?; + let local_migrations: Vec<_> = migrations + .into_iter() + .map(|m| m.name().version().as_owned()) + .collect(); + check_db_migration_consistency_impl(conn, local_migrations).await?; + info!("Compatibility check passed"); + Ok(()) +} + +async fn check_db_migration_consistency_impl( + conn: &mut Connection<'_>, + local_migrations: Vec>, +) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + // Unfortunately we cannot call applied_migrations() directly on the connection, + // since it implicitly creates the __diesel_schema_migrations table if it doesn't exist, + // which is a write operation that we don't want to do in this function. + let applied_migrations: BTreeSet> = BTreeSet::from_iter( + __diesel_schema_migrations::table + .select(__diesel_schema_migrations::version) + .load(conn) + .await?, + ); + + // We check that the local migrations is a subset of the applied migrations. + let unapplied_migrations: Vec<_> = local_migrations + .into_iter() + .filter(|m| !applied_migrations.contains(m)) + .collect(); + + if unapplied_migrations.is_empty() { + return Ok(()); + } + + Err(IndexerError::DbMigrationError(format!( + "This binary expected the following migrations to have been run, and they were not: {:?}", + unapplied_migrations + ))) +} + +/// Check that prunable tables exist in the database. +pub async fn check_prunable_tables_valid(conn: &mut Connection<'_>) -> Result<(), IndexerError> { + info!("Starting compatibility check"); + + use diesel_async::RunQueryDsl; + + let select_parent_tables = r#" + SELECT c.relname AS table_name + FROM pg_class c + JOIN pg_namespace n ON n.oid = c.relnamespace + LEFT JOIN pg_partitioned_table pt ON pt.partrelid = c.oid + WHERE c.relkind IN ('r', 'p') -- 'r' for regular tables, 'p' for partitioned tables + AND n.nspname = 'public' + AND ( + pt.partrelid IS NOT NULL -- This is a partitioned (parent) table + OR NOT EXISTS ( -- This is not a partition (child table) + SELECT 1 + FROM pg_inherits i + WHERE i.inhrelid = c.oid + ) + ); + "#; + + #[derive(QueryableByName)] + struct TableName { + #[diesel(sql_type = diesel::sql_types::Text)] + table_name: String, + } + + let result: Vec = diesel::sql_query(select_parent_tables) + .load(conn) + .await + .map_err(|e| IndexerError::DbMigrationError(format!("Failed to fetch tables: {e}")))?; + + let parent_tables_from_db: HashSet<_> = result.into_iter().map(|t| t.table_name).collect(); + + for key in PrunableTable::iter() { + if !parent_tables_from_db.contains(key.as_ref()) { + return Err(IndexerError::GenericError(format!( + "Invalid retention policy override provided for table {}: does not exist in the database", + key + ))); + } + } + + info!("Compatibility check passed"); + Ok(()) +} + +pub use setup_postgres::{reset_database, run_migrations}; + +pub mod setup_postgres { + use crate::{database::Connection, db::MIGRATIONS}; + use anyhow::anyhow; + use diesel_async::RunQueryDsl; + use tracing::info; + + pub async fn reset_database(mut conn: Connection<'static>) -> Result<(), anyhow::Error> { + info!("Resetting PG database ..."); + clear_database(&mut conn).await?; + run_migrations(conn).await?; + info!("Reset database complete."); + Ok(()) + } + + pub async fn clear_database(conn: &mut Connection<'static>) -> Result<(), anyhow::Error> { + info!("Clearing the database..."); + let drop_all_tables = " + DO $$ DECLARE + r RECORD; + BEGIN + FOR r IN (SELECT tablename FROM pg_tables WHERE schemaname = 'public') + LOOP + EXECUTE 'DROP TABLE IF EXISTS ' || quote_ident(r.tablename) || ' CASCADE'; + END LOOP; + END $$;"; + diesel::sql_query(drop_all_tables).execute(conn).await?; + info!("Dropped all tables."); + + let drop_all_procedures = " + DO $$ DECLARE + r RECORD; + BEGIN + FOR r IN (SELECT proname, oidvectortypes(proargtypes) as argtypes + FROM pg_proc INNER JOIN pg_namespace ns ON (pg_proc.pronamespace = ns.oid) + WHERE ns.nspname = 'public' AND prokind = 'p') + LOOP + EXECUTE 'DROP PROCEDURE IF EXISTS ' || quote_ident(r.proname) || '(' || r.argtypes || ') CASCADE'; + END LOOP; + END $$;"; + diesel::sql_query(drop_all_procedures).execute(conn).await?; + info!("Dropped all procedures."); + + let drop_all_functions = " + DO $$ DECLARE + r RECORD; + BEGIN + FOR r IN (SELECT proname, oidvectortypes(proargtypes) as argtypes + FROM pg_proc INNER JOIN pg_namespace ON (pg_proc.pronamespace = pg_namespace.oid) + WHERE pg_namespace.nspname = 'public' AND prokind = 'f') + LOOP + EXECUTE 'DROP FUNCTION IF EXISTS ' || quote_ident(r.proname) || '(' || r.argtypes || ') CASCADE'; + END LOOP; + END $$;"; + diesel::sql_query(drop_all_functions).execute(conn).await?; + info!("Database cleared."); + Ok(()) + } + + pub async fn run_migrations(conn: Connection<'static>) -> Result<(), anyhow::Error> { + info!("Running migrations ..."); + conn.run_pending_migrations(MIGRATIONS) + .await + .map_err(|e| anyhow!("Failed to run migrations {e}"))?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use crate::database::{Connection, ConnectionPool}; + use crate::db::{ + check_db_migration_consistency, check_db_migration_consistency_impl, reset_database, + ConnectionPoolConfig, MIGRATIONS, + }; + use crate::tempdb::TempDb; + use diesel::migration::{Migration, MigrationSource}; + use diesel::pg::Pg; + use diesel_migrations::MigrationHarness; + + // Check that the migration records in the database created from the local schema + // pass the consistency check. + #[tokio::test] + async fn db_migration_consistency_smoke_test() { + let database = TempDb::new().unwrap(); + let pool = ConnectionPool::new( + database.database().url().to_owned(), + ConnectionPoolConfig { + pool_size: 2, + ..Default::default() + }, + ) + .await + .unwrap(); + + reset_database(pool.dedicated_connection().await.unwrap()) + .await + .unwrap(); + check_db_migration_consistency(&mut pool.get().await.unwrap()) + .await + .unwrap(); + } + + #[tokio::test] + async fn db_migration_consistency_non_prefix_test() { + let database = TempDb::new().unwrap(); + let pool = ConnectionPool::new( + database.database().url().to_owned(), + ConnectionPoolConfig { + pool_size: 2, + ..Default::default() + }, + ) + .await + .unwrap(); + + reset_database(pool.dedicated_connection().await.unwrap()) + .await + .unwrap(); + let mut connection = pool.get().await.unwrap(); + + let mut sync_connection_wrapper = + diesel_async::async_connection_wrapper::AsyncConnectionWrapper::::from( + pool.dedicated_connection().await.unwrap(), + ); + + tokio::task::spawn_blocking(move || { + sync_connection_wrapper + .revert_migration(MIGRATIONS.migrations().unwrap().last().unwrap()) + .unwrap(); + }) + .await + .unwrap(); + // Local migrations is one record more than the applied migrations. + // This will fail the consistency check since it's not a prefix. + assert!(check_db_migration_consistency(&mut connection) + .await + .is_err()); + + pool.dedicated_connection() + .await + .unwrap() + .run_pending_migrations(MIGRATIONS) + .await + .unwrap(); + // After running pending migrations they should be consistent. + check_db_migration_consistency(&mut connection) + .await + .unwrap(); + } + + #[tokio::test] + async fn db_migration_consistency_prefix_test() { + let database = TempDb::new().unwrap(); + let pool = ConnectionPool::new( + database.database().url().to_owned(), + ConnectionPoolConfig { + pool_size: 2, + ..Default::default() + }, + ) + .await + .unwrap(); + + reset_database(pool.dedicated_connection().await.unwrap()) + .await + .unwrap(); + + let migrations: Vec>> = MIGRATIONS.migrations().unwrap(); + let mut local_migrations: Vec<_> = migrations.iter().map(|m| m.name().version()).collect(); + local_migrations.pop(); + // Local migrations is one record less than the applied migrations. + // This should pass the consistency check since it's still a prefix. + check_db_migration_consistency_impl(&mut pool.get().await.unwrap(), local_migrations) + .await + .unwrap(); + } + + #[tokio::test] + async fn db_migration_consistency_subset_test() { + let database = TempDb::new().unwrap(); + let pool = ConnectionPool::new( + database.database().url().to_owned(), + ConnectionPoolConfig { + pool_size: 2, + ..Default::default() + }, + ) + .await + .unwrap(); + + reset_database(pool.dedicated_connection().await.unwrap()) + .await + .unwrap(); + + let migrations: Vec>> = MIGRATIONS.migrations().unwrap(); + let mut local_migrations: Vec<_> = migrations.iter().map(|m| m.name().version()).collect(); + local_migrations.remove(2); + + // Local migrations are missing one record compared to the applied migrations, which should + // still be okay. + check_db_migration_consistency_impl(&mut pool.get().await.unwrap(), local_migrations) + .await + .unwrap(); + } +} diff --git a/crates/sui-mvr-indexer/src/errors.rs b/crates/sui-mvr-indexer/src/errors.rs new file mode 100644 index 0000000000000..c8971e39781ad --- /dev/null +++ b/crates/sui-mvr-indexer/src/errors.rs @@ -0,0 +1,172 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use fastcrypto::error::FastCryptoError; +use jsonrpsee::core::Error as RpcError; +use jsonrpsee::types::error::CallError; +use sui_json_rpc::name_service::NameServiceError; +use thiserror::Error; + +use sui_types::base_types::ObjectIDParseError; +use sui_types::error::{SuiError, SuiObjectResponseError, UserInputError}; + +#[derive(Debug, Error)] +pub struct DataDownloadError { + pub error: IndexerError, + pub next_checkpoint_sequence_number: u64, +} + +impl std::fmt::Display for DataDownloadError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "next_checkpoint_seq: {}, error: {}", + self.next_checkpoint_sequence_number, self.error + ) + } +} + +#[derive(Debug, Error)] +pub enum IndexerError { + #[error("Indexer failed to read from archives store with error: `{0}`")] + ArchiveReaderError(String), + + #[error("Stream closed unexpectedly with error: `{0}`")] + ChannelClosed(String), + + #[error("Indexer failed to convert timestamp to NaiveDateTime with error: `{0}`")] + DateTimeParsingError(String), + + #[error("Indexer failed to deserialize event from events table with error: `{0}`")] + EventDeserializationError(String), + + #[error("Fullnode returns unexpected responses, which may block indexers from proceeding, with error: `{0}`")] + UnexpectedFullnodeResponseError(String), + + #[error("Indexer failed to transform data with error: `{0}`")] + DataTransformationError(String), + + #[error("Indexer failed to read fullnode with error: `{0}`")] + FullNodeReadingError(String), + + #[error("Indexer failed to convert structs to diesel Insertable with error: `{0}`")] + InsertableParsingError(String), + + #[error("Indexer failed to build JsonRpcServer with error: `{0}`")] + JsonRpcServerError(#[from] sui_json_rpc::error::Error), + + #[error("Indexer failed to find object mutations, which should never happen.")] + ObjectMutationNotAvailable, + + #[error("Indexer failed to build PG connection pool with error: `{0}`")] + PgConnectionPoolInitError(String), + + #[error("Indexer failed to get a pool connection from PG connection pool with error: `{0}`")] + PgPoolConnectionError(String), + + #[error("Indexer failed to read PostgresDB with error: `{0}`")] + PostgresReadError(String), + + #[error("Indexer failed to reset PostgresDB with error: `{0}`")] + PostgresResetError(String), + + #[error("Indexer failed to commit changes to PostgresDB with error: `{0}`")] + PostgresWriteError(String), + + #[error(transparent)] + PostgresError(#[from] diesel::result::Error), + + #[error("Indexer failed to initialize fullnode Http client with error: `{0}`")] + HttpClientInitError(String), + + #[error("Indexer failed to serialize/deserialize with error: `{0}`")] + SerdeError(String), + + #[error("Indexer error related to dynamic field: `{0}`")] + DynamicFieldError(String), + + #[error("Indexer does not support the feature with error: `{0}`")] + NotSupportedError(String), + + #[error("Indexer read corrupted/incompatible data from persistent storage: `{0}`")] + PersistentStorageDataCorruptionError(String), + + #[error("Indexer generic error: `{0}`")] + GenericError(String), + + #[error("GCS error: `{0}`")] + GcsError(String), + + #[error("Indexer failed to resolve object to move struct with error: `{0}`")] + ResolveMoveStructError(String), + + #[error(transparent)] + UncategorizedError(#[from] anyhow::Error), + + #[error(transparent)] + ObjectIdParseError(#[from] ObjectIDParseError), + + #[error("Invalid transaction digest with error: `{0}`")] + InvalidTransactionDigestError(String), + + #[error(transparent)] + SuiError(#[from] SuiError), + + #[error(transparent)] + BcsError(#[from] bcs::Error), + + #[error("Invalid argument with error: `{0}`")] + InvalidArgumentError(String), + + #[error(transparent)] + UserInputError(#[from] UserInputError), + + #[error("Indexer failed to resolve module with error: `{0}`")] + ModuleResolutionError(String), + + #[error(transparent)] + ObjectResponseError(#[from] SuiObjectResponseError), + + #[error(transparent)] + FastCryptoError(#[from] FastCryptoError), + + #[error("`{0}`: `{1}`")] + ErrorWithContext(String, Box), + + #[error("Indexer failed to send item to channel with error: `{0}`")] + MpscChannelError(String), + + #[error(transparent)] + NameServiceError(#[from] NameServiceError), + + #[error("Inconsistent migration records: {0}")] + DbMigrationError(String), +} + +pub trait Context { + fn context(self, context: &str) -> Result; +} + +impl Context for Result { + fn context(self, context: &str) -> Result { + self.map_err(|e| IndexerError::ErrorWithContext(context.to_string(), Box::new(e))) + } +} + +impl From for RpcError { + fn from(e: IndexerError) -> Self { + RpcError::Call(CallError::Failed(e.into())) + } +} + +impl From for IndexerError { + fn from(value: tokio::task::JoinError) -> Self { + IndexerError::UncategorizedError(anyhow::Error::from(value)) + } +} + +impl From for IndexerError { + fn from(value: diesel_async::pooled_connection::bb8::RunError) -> Self { + Self::PgPoolConnectionError(value.to_string()) + } +} diff --git a/crates/sui-mvr-indexer/src/handlers/checkpoint_handler.rs b/crates/sui-mvr-indexer/src/handlers/checkpoint_handler.rs new file mode 100644 index 0000000000000..170bda5ff6108 --- /dev/null +++ b/crates/sui-mvr-indexer/src/handlers/checkpoint_handler.rs @@ -0,0 +1,653 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::BTreeMap; +use std::sync::Arc; + +use async_trait::async_trait; +use itertools::Itertools; +use sui_types::dynamic_field::DynamicFieldInfo; +use tokio_util::sync::CancellationToken; +use tracing::{info, warn}; + +use move_core_types::language_storage::{StructTag, TypeTag}; +use mysten_metrics::{get_metrics, spawn_monitored_task}; +use sui_data_ingestion_core::Worker; +use sui_rest_api::{CheckpointData, CheckpointTransaction}; +use sui_synthetic_ingestion::IndexerProgress; +use sui_types::dynamic_field::DynamicFieldType; +use sui_types::effects::{ObjectChange, TransactionEffectsAPI}; +use sui_types::event::SystemEpochInfoEvent; +use sui_types::messages_checkpoint::{ + CertifiedCheckpointSummary, CheckpointContents, CheckpointSequenceNumber, +}; +use sui_types::object::Object; +use sui_types::object::Owner; +use sui_types::sui_system_state::{get_sui_system_state, SuiSystemStateTrait}; +use sui_types::transaction::TransactionDataAPI; +use tokio::sync::watch; + +use crate::errors::IndexerError; +use crate::handlers::committer::start_tx_checkpoint_commit_task; +use crate::metrics::IndexerMetrics; +use crate::models::display::StoredDisplay; +use crate::models::epoch::{EndOfEpochUpdate, EpochEndInfo, EpochStartInfo, StartOfEpochUpdate}; +use crate::models::obj_indices::StoredObjectVersion; +use crate::store::{IndexerStore, PgIndexerStore}; +use crate::types::{ + EventIndex, IndexedCheckpoint, IndexedDeletedObject, IndexedEvent, IndexedObject, + IndexedPackage, IndexedTransaction, IndexerResult, TransactionKind, TxIndex, +}; + +use super::tx_processor::EpochEndIndexingObjectStore; +use super::tx_processor::TxChangesProcessor; +use super::CheckpointDataToCommit; +use super::EpochToCommit; +use super::TransactionObjectChangesToCommit; + +const CHECKPOINT_QUEUE_SIZE: usize = 100; + +pub async fn new_handlers( + state: PgIndexerStore, + metrics: IndexerMetrics, + cancel: CancellationToken, + committed_checkpoints_tx: Option>>, + start_checkpoint_opt: Option, + end_checkpoint_opt: Option, +) -> Result<(CheckpointHandler, u64), IndexerError> { + let start_checkpoint = match start_checkpoint_opt { + Some(start_checkpoint) => start_checkpoint, + None => state + .get_latest_checkpoint_sequence_number() + .await? + .map(|seq| seq.saturating_add(1)) + .unwrap_or_default(), + }; + + let checkpoint_queue_size = std::env::var("CHECKPOINT_QUEUE_SIZE") + .unwrap_or(CHECKPOINT_QUEUE_SIZE.to_string()) + .parse::() + .unwrap(); + let global_metrics = get_metrics().unwrap(); + let (indexed_checkpoint_sender, indexed_checkpoint_receiver) = + mysten_metrics::metered_channel::channel( + checkpoint_queue_size, + &global_metrics + .channel_inflight + .with_label_values(&["checkpoint_indexing"]), + ); + + let state_clone = state.clone(); + let metrics_clone = metrics.clone(); + spawn_monitored_task!(start_tx_checkpoint_commit_task( + state_clone, + metrics_clone, + indexed_checkpoint_receiver, + cancel.clone(), + committed_checkpoints_tx, + start_checkpoint, + end_checkpoint_opt, + )); + Ok(( + CheckpointHandler::new(state, metrics, indexed_checkpoint_sender), + start_checkpoint, + )) +} + +pub struct CheckpointHandler { + state: PgIndexerStore, + metrics: IndexerMetrics, + indexed_checkpoint_sender: mysten_metrics::metered_channel::Sender, +} + +#[async_trait] +impl Worker for CheckpointHandler { + type Result = (); + async fn process_checkpoint(&self, checkpoint: &CheckpointData) -> anyhow::Result<()> { + let time_now_ms = chrono::Utc::now().timestamp_millis(); + let cp_download_lag = time_now_ms - checkpoint.checkpoint_summary.timestamp_ms as i64; + info!( + "checkpoint download lag for cp {}: {} ms", + checkpoint.checkpoint_summary.sequence_number, cp_download_lag + ); + self.metrics.download_lag_ms.set(cp_download_lag); + self.metrics + .max_downloaded_checkpoint_sequence_number + .set(checkpoint.checkpoint_summary.sequence_number as i64); + self.metrics + .downloaded_checkpoint_timestamp_ms + .set(checkpoint.checkpoint_summary.timestamp_ms as i64); + info!( + "Indexer lag: downloaded checkpoint {} with time now {} and checkpoint time {}", + checkpoint.checkpoint_summary.sequence_number, + time_now_ms, + checkpoint.checkpoint_summary.timestamp_ms + ); + let checkpoint_data = Self::index_checkpoint( + &self.state, + checkpoint, + Arc::new(self.metrics.clone()), + Self::index_packages(std::slice::from_ref(checkpoint), &self.metrics), + ) + .await?; + self.indexed_checkpoint_sender.send(checkpoint_data).await?; + Ok(()) + } +} + +impl CheckpointHandler { + fn new( + state: PgIndexerStore, + metrics: IndexerMetrics, + indexed_checkpoint_sender: mysten_metrics::metered_channel::Sender, + ) -> Self { + Self { + state, + metrics, + indexed_checkpoint_sender, + } + } + + async fn index_epoch( + state: &PgIndexerStore, + data: &CheckpointData, + ) -> Result, IndexerError> { + let checkpoint_object_store = EpochEndIndexingObjectStore::new(data); + + let CheckpointData { + transactions, + checkpoint_summary, + checkpoint_contents: _, + } = data; + + // Genesis epoch + if *checkpoint_summary.sequence_number() == 0 { + info!("Processing genesis epoch"); + let system_state_summary = + get_sui_system_state(&checkpoint_object_store)?.into_sui_system_state_summary(); + return Ok(Some(EpochToCommit { + last_epoch: None, + new_epoch: StartOfEpochUpdate::new(system_state_summary, EpochStartInfo::default()), + })); + } + + // If not end of epoch, return + if checkpoint_summary.end_of_epoch_data.is_none() { + return Ok(None); + } + + let system_state_summary = + get_sui_system_state(&checkpoint_object_store)?.into_sui_system_state_summary(); + + let epoch_event_opt = transactions + .iter() + .find_map(|t| { + t.events.as_ref()?.data.iter().find_map(|ev| { + if ev.is_system_epoch_info_event() { + Some(bcs::from_bytes::(&ev.contents)) + } else { + None + } + }) + }) + .transpose()?; + if epoch_event_opt.is_none() { + warn!( + "No SystemEpochInfoEvent found at end of epoch {}, some epoch data will be set to default.", + checkpoint_summary.epoch, + ); + assert!( + system_state_summary.safe_mode, + "Sui is not in safe mode but no SystemEpochInfoEvent found at end of epoch {}", + checkpoint_summary.epoch + ); + } + + // At some point while committing data in epoch X - 1, we will encounter a new epoch X. We + // want to retrieve X - 2's network total transactions to calculate the number of + // transactions that occurred in epoch X - 1. + let first_tx_sequence_number = match system_state_summary.epoch { + // If first epoch change, this number is 0 + 1 => Ok(0), + _ => { + let last_epoch = system_state_summary.epoch - 2; + state + .get_network_total_transactions_by_end_of_epoch(last_epoch) + .await? + .ok_or_else(|| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Network total transactions for epoch {} not found", + last_epoch + )) + }) + } + }?; + + let epoch_end_info = EpochEndInfo::new(epoch_event_opt.as_ref()); + let epoch_start_info = EpochStartInfo::new( + checkpoint_summary.sequence_number.saturating_add(1), + checkpoint_summary.network_total_transactions, + epoch_event_opt.as_ref(), + ); + + Ok(Some(EpochToCommit { + last_epoch: Some(EndOfEpochUpdate::new( + checkpoint_summary, + first_tx_sequence_number, + epoch_end_info, + )), + new_epoch: StartOfEpochUpdate::new(system_state_summary, epoch_start_info), + })) + } + + fn derive_object_versions( + object_history_changes: &TransactionObjectChangesToCommit, + ) -> Vec { + let mut object_versions = vec![]; + for changed_obj in object_history_changes.changed_objects.iter() { + object_versions.push(StoredObjectVersion { + object_id: changed_obj.object.id().to_vec(), + object_version: changed_obj.object.version().value() as i64, + cp_sequence_number: changed_obj.checkpoint_sequence_number as i64, + }); + } + for deleted_obj in object_history_changes.deleted_objects.iter() { + object_versions.push(StoredObjectVersion { + object_id: deleted_obj.object_id.to_vec(), + object_version: deleted_obj.object_version as i64, + cp_sequence_number: deleted_obj.checkpoint_sequence_number as i64, + }); + } + object_versions + } + + async fn index_checkpoint( + state: &PgIndexerStore, + data: &CheckpointData, + metrics: Arc, + packages: Vec, + ) -> Result { + let checkpoint_seq = data.checkpoint_summary.sequence_number; + info!(checkpoint_seq, "Indexing checkpoint data blob"); + + // Index epoch + let epoch = Self::index_epoch(state, data).await?; + + // Index Objects + let object_changes: TransactionObjectChangesToCommit = + Self::index_objects(data, &metrics).await?; + let object_history_changes: TransactionObjectChangesToCommit = + Self::index_objects_history(data).await?; + let object_versions = Self::derive_object_versions(&object_history_changes); + + let (checkpoint, db_transactions, db_events, db_tx_indices, db_event_indices, db_displays) = { + let CheckpointData { + transactions, + checkpoint_summary, + checkpoint_contents, + } = data; + + let (db_transactions, db_events, db_tx_indices, db_event_indices, db_displays) = + Self::index_transactions( + transactions, + checkpoint_summary, + checkpoint_contents, + &metrics, + ) + .await?; + + let successful_tx_num: u64 = db_transactions.iter().map(|t| t.successful_tx_num).sum(); + ( + IndexedCheckpoint::from_sui_checkpoint( + checkpoint_summary, + checkpoint_contents, + successful_tx_num as usize, + ), + db_transactions, + db_events, + db_tx_indices, + db_event_indices, + db_displays, + ) + }; + let time_now_ms = chrono::Utc::now().timestamp_millis(); + metrics + .index_lag_ms + .set(time_now_ms - checkpoint.timestamp_ms as i64); + metrics + .max_indexed_checkpoint_sequence_number + .set(checkpoint.sequence_number as i64); + metrics + .indexed_checkpoint_timestamp_ms + .set(checkpoint.timestamp_ms as i64); + info!( + "Indexer lag: indexed checkpoint {} with time now {} and checkpoint time {}", + checkpoint.sequence_number, time_now_ms, checkpoint.timestamp_ms + ); + + Ok(CheckpointDataToCommit { + checkpoint, + transactions: db_transactions, + events: db_events, + tx_indices: db_tx_indices, + event_indices: db_event_indices, + display_updates: db_displays, + object_changes, + object_history_changes, + object_versions, + packages, + epoch, + }) + } + + async fn index_transactions( + transactions: &[CheckpointTransaction], + checkpoint_summary: &CertifiedCheckpointSummary, + checkpoint_contents: &CheckpointContents, + metrics: &IndexerMetrics, + ) -> IndexerResult<( + Vec, + Vec, + Vec, + Vec, + BTreeMap, + )> { + let checkpoint_seq = checkpoint_summary.sequence_number(); + + let mut tx_seq_num_iter = checkpoint_contents + .enumerate_transactions(checkpoint_summary) + .map(|(seq, execution_digest)| (execution_digest.transaction, seq)); + + if checkpoint_contents.size() != transactions.len() { + return Err(IndexerError::FullNodeReadingError(format!( + "CheckpointContents has different size {} compared to Transactions {} for checkpoint {}", + checkpoint_contents.size(), + transactions.len(), + checkpoint_seq + ))); + } + + let mut db_transactions = Vec::new(); + let mut db_events = Vec::new(); + let mut db_displays = BTreeMap::new(); + let mut db_tx_indices = Vec::new(); + let mut db_event_indices = Vec::new(); + + for tx in transactions { + let CheckpointTransaction { + transaction: sender_signed_data, + effects: fx, + events, + input_objects, + output_objects, + } = tx; + // Unwrap safe - we checked they have equal length above + let (tx_digest, tx_sequence_number) = tx_seq_num_iter.next().unwrap(); + if tx_digest != *sender_signed_data.digest() { + return Err(IndexerError::FullNodeReadingError(format!( + "Transactions has different ordering from CheckpointContents, for checkpoint {}, Mismatch found at {} v.s. {}", + checkpoint_seq, tx_digest, sender_signed_data.digest() + ))); + } + + let tx = sender_signed_data.transaction_data(); + let events = events + .as_ref() + .map(|events| events.data.clone()) + .unwrap_or_default(); + + let transaction_kind = if tx.is_system_tx() { + TransactionKind::SystemTransaction + } else { + TransactionKind::ProgrammableTransaction + }; + + db_events.extend(events.iter().enumerate().map(|(idx, event)| { + IndexedEvent::from_event( + tx_sequence_number, + idx as u64, + *checkpoint_seq, + tx_digest, + event, + checkpoint_summary.timestamp_ms, + ) + })); + + db_event_indices.extend( + events.iter().enumerate().map(|(idx, event)| { + EventIndex::from_event(tx_sequence_number, idx as u64, event) + }), + ); + + db_displays.extend( + events + .iter() + .flat_map(StoredDisplay::try_from_event) + .map(|display| (display.object_type.clone(), display)), + ); + + let objects: Vec<_> = input_objects.iter().chain(output_objects.iter()).collect(); + + let (balance_change, object_changes) = + TxChangesProcessor::new(&objects, metrics.clone()) + .get_changes(tx, fx, &tx_digest) + .await?; + + let db_txn = IndexedTransaction { + tx_sequence_number, + tx_digest, + checkpoint_sequence_number: *checkpoint_summary.sequence_number(), + timestamp_ms: checkpoint_summary.timestamp_ms, + sender_signed_data: sender_signed_data.data().clone(), + effects: fx.clone(), + object_changes, + balance_change, + events, + transaction_kind: transaction_kind.clone(), + successful_tx_num: if fx.status().is_ok() { + tx.kind().tx_count() as u64 + } else { + 0 + }, + }; + + db_transactions.push(db_txn); + + // Input Objects + let input_objects = tx + .input_objects() + .expect("committed txns have been validated") + .into_iter() + .map(|obj_kind| obj_kind.object_id()) + .collect(); + + // Changed Objects + let changed_objects = fx + .all_changed_objects() + .into_iter() + .map(|(object_ref, _owner, _write_kind)| object_ref.0) + .collect(); + + // Affected Objects + let affected_objects = fx + .object_changes() + .into_iter() + .map(|ObjectChange { id, .. }| id) + .collect(); + + // Payers + let payers = vec![tx.gas_owner()]; + + // Sender + let sender = tx.sender(); + + // Recipients + let recipients = fx + .all_changed_objects() + .into_iter() + .filter_map(|(_object_ref, owner, _write_kind)| match owner { + Owner::AddressOwner(address) => Some(address), + _ => None, + }) + .unique() + .collect(); + + // Move Calls + let move_calls = tx + .move_calls() + .into_iter() + .map(|(p, m, f)| (*p, m.to_string(), f.to_string())) + .collect(); + + db_tx_indices.push(TxIndex { + tx_sequence_number, + transaction_digest: tx_digest, + checkpoint_sequence_number: *checkpoint_seq, + input_objects, + changed_objects, + affected_objects, + sender, + payers, + recipients, + move_calls, + tx_kind: transaction_kind, + }); + } + Ok(( + db_transactions, + db_events, + db_tx_indices, + db_event_indices, + db_displays, + )) + } + + pub(crate) async fn index_objects( + data: &CheckpointData, + metrics: &IndexerMetrics, + ) -> Result { + let _timer = metrics.indexing_objects_latency.start_timer(); + let checkpoint_seq = data.checkpoint_summary.sequence_number; + + let eventually_removed_object_refs_post_version = + data.eventually_removed_object_refs_post_version(); + let indexed_eventually_removed_objects = eventually_removed_object_refs_post_version + .into_iter() + .map(|obj_ref| IndexedDeletedObject { + object_id: obj_ref.0, + object_version: obj_ref.1.into(), + checkpoint_sequence_number: checkpoint_seq, + }) + .collect(); + + let latest_live_output_objects = data.latest_live_output_objects(); + let changed_objects = latest_live_output_objects + .into_iter() + .map(|o| { + try_extract_df_kind(o) + .map(|df_kind| IndexedObject::from_object(checkpoint_seq, o.clone(), df_kind)) + }) + .collect::, _>>()?; + + Ok(TransactionObjectChangesToCommit { + changed_objects, + deleted_objects: indexed_eventually_removed_objects, + }) + } + + // similar to index_objects, but objects_history keeps all versions of objects + async fn index_objects_history( + data: &CheckpointData, + ) -> Result { + let checkpoint_seq = data.checkpoint_summary.sequence_number; + let deleted_objects = data + .transactions + .iter() + .flat_map(|tx| tx.removed_object_refs_post_version()) + .collect::>(); + let indexed_deleted_objects: Vec = deleted_objects + .into_iter() + .map(|obj_ref| IndexedDeletedObject { + object_id: obj_ref.0, + object_version: obj_ref.1.into(), + checkpoint_sequence_number: checkpoint_seq, + }) + .collect(); + + let output_objects: Vec<_> = data + .transactions + .iter() + .flat_map(|tx| &tx.output_objects) + .collect(); + + // TODO(gegaowp): the current df_info implementation is not correct, + // but we have decided remove all df_* except df_kind. + let changed_objects = output_objects + .into_iter() + .map(|o| { + try_extract_df_kind(o) + .map(|df_kind| IndexedObject::from_object(checkpoint_seq, o.clone(), df_kind)) + }) + .collect::, _>>()?; + + Ok(TransactionObjectChangesToCommit { + changed_objects, + deleted_objects: indexed_deleted_objects, + }) + } + + fn index_packages( + checkpoint_data: &[CheckpointData], + metrics: &IndexerMetrics, + ) -> Vec { + let _timer = metrics.indexing_packages_latency.start_timer(); + checkpoint_data + .iter() + .flat_map(|data| { + let checkpoint_sequence_number = data.checkpoint_summary.sequence_number; + data.transactions + .iter() + .flat_map(|tx| &tx.output_objects) + .filter_map(|o| { + if let sui_types::object::Data::Package(p) = &o.data { + Some(IndexedPackage { + package_id: o.id(), + move_package: p.clone(), + checkpoint_sequence_number, + }) + } else { + None + } + }) + .collect::>() + }) + .collect() + } +} + +/// If `o` is a dynamic `Field`, determine whether it represents a Dynamic Field or a Dynamic +/// Object Field based on its type. +fn try_extract_df_kind(o: &Object) -> IndexerResult> { + // Skip if not a move object + let Some(move_object) = o.data.try_as_move() else { + return Ok(None); + }; + + if !move_object.type_().is_dynamic_field() { + return Ok(None); + } + + let type_: StructTag = move_object.type_().clone().into(); + let [name, _] = type_.type_params.as_slice() else { + return Ok(None); + }; + + Ok(Some( + if matches!(name, TypeTag::Struct(s) if DynamicFieldInfo::is_dynamic_object_field_wrapper(s)) + { + DynamicFieldType::DynamicObject + } else { + DynamicFieldType::DynamicField + }, + )) +} diff --git a/crates/sui-mvr-indexer/src/handlers/committer.rs b/crates/sui-mvr-indexer/src/handlers/committer.rs new file mode 100644 index 0000000000000..9cc174e8496fa --- /dev/null +++ b/crates/sui-mvr-indexer/src/handlers/committer.rs @@ -0,0 +1,281 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::{BTreeMap, HashMap}; + +use sui_synthetic_ingestion::IndexerProgress; +use sui_types::messages_checkpoint::CheckpointSequenceNumber; +use tap::tap::TapFallible; +use tokio::sync::watch; +use tokio_util::sync::CancellationToken; +use tracing::instrument; +use tracing::{error, info}; + +use crate::metrics::IndexerMetrics; +use crate::store::IndexerStore; +use crate::types::IndexerResult; + +use super::{CheckpointDataToCommit, CommitterTables, CommitterWatermark, EpochToCommit}; + +pub(crate) const CHECKPOINT_COMMIT_BATCH_SIZE: usize = 100; + +pub async fn start_tx_checkpoint_commit_task( + state: S, + metrics: IndexerMetrics, + tx_indexing_receiver: mysten_metrics::metered_channel::Receiver, + cancel: CancellationToken, + mut committed_checkpoints_tx: Option>>, + mut next_checkpoint_sequence_number: CheckpointSequenceNumber, + end_checkpoint_opt: Option, +) -> IndexerResult<()> +where + S: IndexerStore + Clone + Sync + Send + 'static, +{ + use futures::StreamExt; + + info!("Indexer checkpoint commit task started..."); + let checkpoint_commit_batch_size = std::env::var("CHECKPOINT_COMMIT_BATCH_SIZE") + .unwrap_or(CHECKPOINT_COMMIT_BATCH_SIZE.to_string()) + .parse::() + .unwrap(); + info!("Using checkpoint commit batch size {checkpoint_commit_batch_size}"); + + let mut stream = mysten_metrics::metered_channel::ReceiverStream::new(tx_indexing_receiver) + .ready_chunks(checkpoint_commit_batch_size); + + let mut unprocessed = HashMap::new(); + let mut batch = vec![]; + + while let Some(indexed_checkpoint_batch) = stream.next().await { + if cancel.is_cancelled() { + break; + } + + // split the batch into smaller batches per epoch to handle partitioning + for checkpoint in indexed_checkpoint_batch { + unprocessed.insert(checkpoint.checkpoint.sequence_number, checkpoint); + } + while let Some(checkpoint) = unprocessed.remove(&next_checkpoint_sequence_number) { + let epoch = checkpoint.epoch.clone(); + batch.push(checkpoint); + next_checkpoint_sequence_number += 1; + let epoch_number_option = epoch.as_ref().map(|epoch| epoch.new_epoch_id()); + // The batch will consist of contiguous checkpoints and at most one epoch boundary at + // the end. + if batch.len() == checkpoint_commit_batch_size || epoch.is_some() { + commit_checkpoints( + &state, + batch, + epoch, + &metrics, + &mut committed_checkpoints_tx, + ) + .await; + batch = vec![]; + } + if let Some(epoch_number) = epoch_number_option { + state.upload_display(epoch_number).await.tap_err(|e| { + error!( + "Failed to upload display table before epoch {} with error: {}", + epoch_number, + e.to_string() + ); + })?; + } + // stop adding to the commit batch if we've reached the end checkpoint + if let Some(end_checkpoint_sequence_number) = end_checkpoint_opt { + if next_checkpoint_sequence_number > end_checkpoint_sequence_number { + break; + } + } + } + if !batch.is_empty() { + commit_checkpoints(&state, batch, None, &metrics, &mut committed_checkpoints_tx).await; + batch = vec![]; + } + + // stop the commit task if we've reached the end checkpoint + if let Some(end_checkpoint_sequence_number) = end_checkpoint_opt { + if next_checkpoint_sequence_number > end_checkpoint_sequence_number { + break; + } + } + } + Ok(()) +} + +/// Writes indexed checkpoint data to the database, and then update watermark upper bounds and +/// metrics. Expects `indexed_checkpoint_batch` to be non-empty, and contain contiguous checkpoints. +/// There can be at most one epoch boundary at the end. If an epoch boundary is detected, +/// epoch-partitioned tables must be advanced. +// Unwrap: Caller needs to make sure indexed_checkpoint_batch is not empty +#[instrument(skip_all, fields( + first = indexed_checkpoint_batch.first().as_ref().unwrap().checkpoint.sequence_number, + last = indexed_checkpoint_batch.last().as_ref().unwrap().checkpoint.sequence_number +))] +async fn commit_checkpoints( + state: &S, + indexed_checkpoint_batch: Vec, + epoch: Option, + metrics: &IndexerMetrics, + committed_checkpoints_tx: &mut Option>>, +) where + S: IndexerStore + Clone + Sync + Send + 'static, +{ + let mut checkpoint_batch = vec![]; + let mut tx_batch = vec![]; + let mut events_batch = vec![]; + let mut tx_indices_batch = vec![]; + let mut event_indices_batch = vec![]; + let mut display_updates_batch = BTreeMap::new(); + let mut object_changes_batch = vec![]; + let mut object_history_changes_batch = vec![]; + let mut object_versions_batch = vec![]; + let mut packages_batch = vec![]; + + for indexed_checkpoint in indexed_checkpoint_batch { + let CheckpointDataToCommit { + checkpoint, + transactions, + events, + event_indices, + tx_indices, + display_updates, + object_changes, + object_history_changes, + object_versions, + packages, + epoch: _, + } = indexed_checkpoint; + checkpoint_batch.push(checkpoint); + tx_batch.push(transactions); + events_batch.push(events); + tx_indices_batch.push(tx_indices); + event_indices_batch.push(event_indices); + display_updates_batch.extend(display_updates.into_iter()); + object_changes_batch.push(object_changes); + object_history_changes_batch.push(object_history_changes); + object_versions_batch.push(object_versions); + packages_batch.push(packages); + } + + let first_checkpoint_seq = checkpoint_batch.first().unwrap().sequence_number; + let last_checkpoint = checkpoint_batch.last().unwrap(); + let indexer_progress = IndexerProgress { + checkpoint: last_checkpoint.sequence_number, + network_total_transactions: last_checkpoint.network_total_transactions, + }; + let committer_watermark = CommitterWatermark::from(last_checkpoint); + + let guard = metrics.checkpoint_db_commit_latency.start_timer(); + let tx_batch = tx_batch.into_iter().flatten().collect::>(); + let packages_batch = packages_batch.into_iter().flatten().collect::>(); + let checkpoint_num = checkpoint_batch.len(); + let tx_count = tx_batch.len(); + + { + let _step_1_guard = metrics.checkpoint_db_commit_latency_step_1.start_timer(); + let mut persist_tasks = vec![ + state.persist_packages(packages_batch), + state.persist_object_history(object_history_changes_batch.clone()), + ]; + if let Some(epoch_data) = epoch.clone() { + persist_tasks.push(state.persist_epoch(epoch_data)); + } + futures::future::join_all(persist_tasks) + .await + .into_iter() + .map(|res| { + if res.is_err() { + error!("Failed to persist data with error: {:?}", res); + } + res + }) + .collect::>>() + .expect("Persisting data into DB should not fail."); + } + + let is_epoch_end = epoch.is_some(); + + // On epoch boundary, we need to modify the existing partitions' upper bound, and introduce a + // new partition for incoming data for the upcoming epoch. + if let Some(epoch_data) = epoch { + state + .advance_epoch(epoch_data) + .await + .tap_err(|e| { + error!("Failed to advance epoch with error: {}", e.to_string()); + }) + .expect("Advancing epochs in DB should not fail."); + metrics.total_epoch_committed.inc(); + } + + state + .persist_checkpoints(checkpoint_batch) + .await + .tap_err(|e| { + error!( + "Failed to persist checkpoint data with error: {}", + e.to_string() + ); + }) + .expect("Persisting data into DB should not fail."); + + if is_epoch_end { + // The epoch has advanced so we update the configs for the new protocol version, if it has changed. + let chain_id = state + .get_chain_identifier() + .await + .expect("Failed to get chain identifier") + .expect("Chain identifier should have been indexed at this point"); + let _ = state + .persist_protocol_configs_and_feature_flags(chain_id) + .await; + } + + state + .update_watermarks_upper_bound::(committer_watermark) + .await + .tap_err(|e| { + error!( + "Failed to update watermark upper bound with error: {}", + e.to_string() + ); + }) + .expect("Updating watermark upper bound in DB should not fail."); + + let elapsed = guard.stop_and_record(); + + info!( + elapsed, + "Checkpoint {}-{} committed with {} transactions.", + first_checkpoint_seq, + committer_watermark.checkpoint_hi_inclusive, + tx_count, + ); + metrics + .latest_tx_checkpoint_sequence_number + .set(committer_watermark.checkpoint_hi_inclusive as i64); + metrics + .total_tx_checkpoint_committed + .inc_by(checkpoint_num as u64); + metrics.total_transaction_committed.inc_by(tx_count as u64); + metrics.transaction_per_checkpoint.observe( + tx_count as f64 + / (committer_watermark.checkpoint_hi_inclusive - first_checkpoint_seq + 1) as f64, + ); + // 1000.0 is not necessarily the batch size, it's to roughly map average tx commit latency to [0.1, 1] seconds, + // which is well covered by DB_COMMIT_LATENCY_SEC_BUCKETS. + metrics + .thousand_transaction_avg_db_commit_latency + .observe(elapsed * 1000.0 / tx_count as f64); + + if let Some(committed_checkpoints_tx) = committed_checkpoints_tx.as_mut() { + if let Err(err) = committed_checkpoints_tx.send(Some(indexer_progress)) { + error!( + "Failed to send committed checkpoints to the watch channel: {}", + err + ); + } + } +} diff --git a/crates/sui-mvr-indexer/src/handlers/mod.rs b/crates/sui-mvr-indexer/src/handlers/mod.rs new file mode 100644 index 0000000000000..403ee8e22706c --- /dev/null +++ b/crates/sui-mvr-indexer/src/handlers/mod.rs @@ -0,0 +1,316 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::BTreeMap; + +use async_trait::async_trait; +use futures::{FutureExt, StreamExt}; + +use serde::{Deserialize, Serialize}; +use sui_rest_api::CheckpointData; +use tokio_util::sync::CancellationToken; + +use crate::{ + errors::IndexerError, + models::{ + display::StoredDisplay, + epoch::{EndOfEpochUpdate, StartOfEpochUpdate}, + obj_indices::StoredObjectVersion, + }, + types::{ + EventIndex, IndexedCheckpoint, IndexedDeletedObject, IndexedEvent, IndexedObject, + IndexedPackage, IndexedTransaction, IndexerResult, TxIndex, + }, +}; + +pub mod checkpoint_handler; +pub mod committer; +pub mod objects_snapshot_handler; +pub mod pruner; +pub mod tx_processor; + +pub(crate) const CHECKPOINT_COMMIT_BATCH_SIZE: usize = 100; +pub(crate) const UNPROCESSED_CHECKPOINT_SIZE_LIMIT: usize = 1000; + +#[derive(Debug)] +pub struct CheckpointDataToCommit { + pub checkpoint: IndexedCheckpoint, + pub transactions: Vec, + pub events: Vec, + pub event_indices: Vec, + pub tx_indices: Vec, + pub display_updates: BTreeMap, + pub object_changes: TransactionObjectChangesToCommit, + pub object_history_changes: TransactionObjectChangesToCommit, + pub object_versions: Vec, + pub packages: Vec, + pub epoch: Option, +} + +#[derive(Clone, Debug)] +pub struct TransactionObjectChangesToCommit { + pub changed_objects: Vec, + pub deleted_objects: Vec, +} + +#[derive(Clone, Debug)] +pub struct EpochToCommit { + pub last_epoch: Option, + pub new_epoch: StartOfEpochUpdate, +} + +impl EpochToCommit { + pub fn new_epoch_id(&self) -> u64 { + self.new_epoch.epoch as u64 + } + + pub fn new_epoch_first_checkpoint_id(&self) -> u64 { + self.new_epoch.first_checkpoint_id as u64 + } + + pub fn last_epoch_total_transactions(&self) -> Option { + self.last_epoch + .as_ref() + .map(|e| e.epoch_total_transactions as u64) + } + + pub fn new_epoch_first_tx_sequence_number(&self) -> u64 { + self.new_epoch.first_tx_sequence_number as u64 + } +} + +pub struct CommonHandler { + handler: Box>, +} + +impl CommonHandler { + pub fn new(handler: Box>) -> Self { + Self { handler } + } + + async fn start_transform_and_load( + &self, + cp_receiver: mysten_metrics::metered_channel::Receiver<(CommitterWatermark, T)>, + cancel: CancellationToken, + start_checkpoint: u64, + end_checkpoint_opt: Option, + ) -> IndexerResult<()> { + let checkpoint_commit_batch_size = std::env::var("CHECKPOINT_COMMIT_BATCH_SIZE") + .unwrap_or(CHECKPOINT_COMMIT_BATCH_SIZE.to_string()) + .parse::() + .unwrap(); + let mut stream = mysten_metrics::metered_channel::ReceiverStream::new(cp_receiver) + .ready_chunks(checkpoint_commit_batch_size); + + // Mapping of ordered checkpoint data to ensure that we process them in order. The key is + // just the checkpoint sequence number, and the tuple is (CommitterWatermark, T). + let mut unprocessed: BTreeMap = BTreeMap::new(); + let mut tuple_batch = vec![]; + let mut next_cp_to_process = start_checkpoint; + + loop { + if cancel.is_cancelled() { + return Ok(()); + } + + // Try to fetch new data tuple from the stream + if unprocessed.len() >= UNPROCESSED_CHECKPOINT_SIZE_LIMIT { + tracing::info!( + "Unprocessed checkpoint size reached limit {}, skip reading from stream...", + UNPROCESSED_CHECKPOINT_SIZE_LIMIT + ); + } else { + // Try to fetch new data tuple from the stream + match stream.next().now_or_never() { + Some(Some(tuple_chunk)) => { + if cancel.is_cancelled() { + return Ok(()); + } + for tuple in tuple_chunk { + unprocessed.insert(tuple.0.checkpoint_hi_inclusive, tuple); + } + } + Some(None) => break, // Stream has ended + None => {} // No new data tuple available right now + } + } + + // Process unprocessed checkpoints, even no new checkpoints from stream + let checkpoint_lag_limiter = self.handler.get_max_committable_checkpoint().await?; + let max_commitable_cp = std::cmp::min( + checkpoint_lag_limiter, + end_checkpoint_opt.unwrap_or(u64::MAX), + ); + // Stop pushing to tuple_batch if we've reached the end checkpoint. + while next_cp_to_process <= max_commitable_cp { + if let Some(data_tuple) = unprocessed.remove(&next_cp_to_process) { + tuple_batch.push(data_tuple); + next_cp_to_process += 1; + } else { + break; + } + } + + if !tuple_batch.is_empty() { + let committer_watermark = tuple_batch.last().unwrap().0; + let batch = tuple_batch.into_iter().map(|t| t.1).collect(); + self.handler.load(batch).await.map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to load transformed data into DB for handler {}: {}", + self.handler.name(), + e + )) + })?; + self.handler.set_watermark_hi(committer_watermark).await?; + tuple_batch = vec![]; + } + + if let Some(end_checkpoint) = end_checkpoint_opt { + if next_cp_to_process > end_checkpoint { + tracing::info!( + "Reached end checkpoint, stopping handler {}...", + self.handler.name() + ); + return Ok(()); + } + } + } + Err(IndexerError::ChannelClosed(format!( + "Checkpoint channel is closed unexpectedly for handler {}", + self.handler.name() + ))) + } +} + +#[async_trait] +pub trait Handler: Send + Sync { + /// return handler name + fn name(&self) -> String; + + /// commit batch of transformed data to DB + async fn load(&self, batch: Vec) -> IndexerResult<()>; + + /// read high watermark of the table DB + async fn get_watermark_hi(&self) -> IndexerResult>; + + /// Updates the relevant entries on the `watermarks` table with the full `CommitterWatermark`, + /// which tracks the latest epoch, cp, and tx sequence number of the committed batch. + async fn set_watermark_hi(&self, watermark: CommitterWatermark) -> IndexerResult<()>; + + /// By default, return u64::MAX, which means no extra waiting is needed before commiting; + /// get max committable checkpoint, for handlers that want to wait for some condition before commiting, + /// one use-case is the objects snapshot handler, + /// which waits for the lag between snapshot and latest checkpoint to reach a certain threshold. + async fn get_max_committable_checkpoint(&self) -> IndexerResult { + Ok(u64::MAX) + } +} + +/// The indexer writer operates on checkpoint data, which contains information on the current epoch, +/// checkpoint, and transaction. These three numbers form the watermark upper bound for each +/// committed table. The reader and pruner are responsible for determining which of the three units +/// will be used for a particular table. +#[derive(Clone, Copy, Ord, PartialOrd, Eq, PartialEq)] +pub struct CommitterWatermark { + pub epoch_hi_inclusive: u64, + pub checkpoint_hi_inclusive: u64, + pub tx_hi: u64, +} + +impl From<&IndexedCheckpoint> for CommitterWatermark { + fn from(checkpoint: &IndexedCheckpoint) -> Self { + Self { + epoch_hi_inclusive: checkpoint.epoch, + checkpoint_hi_inclusive: checkpoint.sequence_number, + tx_hi: checkpoint.network_total_transactions, + } + } +} + +impl From<&CheckpointData> for CommitterWatermark { + fn from(checkpoint: &CheckpointData) -> Self { + Self { + epoch_hi_inclusive: checkpoint.checkpoint_summary.epoch, + checkpoint_hi_inclusive: checkpoint.checkpoint_summary.sequence_number, + tx_hi: checkpoint.checkpoint_summary.network_total_transactions, + } + } +} + +/// Enum representing tables that the committer handler writes to. +#[derive( + Debug, + Eq, + PartialEq, + strum_macros::Display, + strum_macros::EnumString, + strum_macros::EnumIter, + strum_macros::AsRefStr, + Hash, + Serialize, + Deserialize, + Clone, +)] +#[strum(serialize_all = "snake_case")] +#[serde(rename_all = "snake_case")] +pub enum CommitterTables { + // Unpruned tables + ChainIdentifier, + Display, + Epochs, + FeatureFlags, + FullObjectsHistory, + Objects, + ObjectsVersion, + Packages, + ProtocolConfigs, + RawCheckpoints, + + // Prunable tables + ObjectsHistory, + Transactions, + Events, + + EventEmitPackage, + EventEmitModule, + EventSenders, + EventStructInstantiation, + EventStructModule, + EventStructName, + EventStructPackage, + + TxAffectedAddresses, + TxAffectedObjects, + TxCallsPkg, + TxCallsMod, + TxCallsFun, + TxChangedObjects, + TxDigests, + TxInputObjects, + TxKinds, + TxRecipients, + TxSenders, + + Checkpoints, + PrunerCpWatermark, +} + +/// Enum representing tables that the objects snapshot handler writes to. +#[derive( + Debug, + Eq, + PartialEq, + strum_macros::Display, + strum_macros::EnumString, + strum_macros::EnumIter, + strum_macros::AsRefStr, + Hash, + Serialize, + Deserialize, + Clone, +)] +#[strum(serialize_all = "snake_case")] +#[serde(rename_all = "snake_case")] +pub enum ObjectsSnapshotHandlerTables { + ObjectsSnapshot, +} diff --git a/crates/sui-mvr-indexer/src/handlers/objects_snapshot_handler.rs b/crates/sui-mvr-indexer/src/handlers/objects_snapshot_handler.rs new file mode 100644 index 0000000000000..d37d532827947 --- /dev/null +++ b/crates/sui-mvr-indexer/src/handlers/objects_snapshot_handler.rs @@ -0,0 +1,139 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use async_trait::async_trait; +use mysten_metrics::get_metrics; +use mysten_metrics::metered_channel::Sender; +use mysten_metrics::spawn_monitored_task; +use sui_data_ingestion_core::Worker; +use sui_rest_api::CheckpointData; +use tokio_util::sync::CancellationToken; +use tracing::info; + +use crate::config::SnapshotLagConfig; +use crate::store::PgIndexerStore; +use crate::types::IndexerResult; +use crate::{metrics::IndexerMetrics, store::IndexerStore}; + +use super::checkpoint_handler::CheckpointHandler; +use super::{CommitterWatermark, ObjectsSnapshotHandlerTables, TransactionObjectChangesToCommit}; +use super::{CommonHandler, Handler}; + +#[derive(Clone)] +pub struct ObjectsSnapshotHandler { + pub store: PgIndexerStore, + pub sender: Sender<(CommitterWatermark, TransactionObjectChangesToCommit)>, + snapshot_config: SnapshotLagConfig, + metrics: IndexerMetrics, +} + +pub struct CheckpointObjectChanges { + pub checkpoint_sequence_number: u64, + pub object_changes: TransactionObjectChangesToCommit, +} + +#[async_trait] +impl Worker for ObjectsSnapshotHandler { + type Result = (); + async fn process_checkpoint(&self, checkpoint: &CheckpointData) -> anyhow::Result<()> { + let transformed_data = CheckpointHandler::index_objects(checkpoint, &self.metrics).await?; + self.sender + .send((CommitterWatermark::from(checkpoint), transformed_data)) + .await?; + Ok(()) + } +} + +#[async_trait] +impl Handler for ObjectsSnapshotHandler { + fn name(&self) -> String { + "objects_snapshot_handler".to_string() + } + + async fn load( + &self, + transformed_data: Vec, + ) -> IndexerResult<()> { + self.store + .persist_objects_snapshot(transformed_data) + .await?; + Ok(()) + } + + async fn get_watermark_hi(&self) -> IndexerResult> { + self.store + .get_latest_object_snapshot_checkpoint_sequence_number() + .await + } + + async fn set_watermark_hi(&self, watermark: CommitterWatermark) -> IndexerResult<()> { + self.store + .update_watermarks_upper_bound::(watermark) + .await?; + + self.metrics + .latest_object_snapshot_sequence_number + .set(watermark.checkpoint_hi_inclusive as i64); + Ok(()) + } + + async fn get_max_committable_checkpoint(&self) -> IndexerResult { + let latest_checkpoint = self.store.get_latest_checkpoint_sequence_number().await?; + Ok(latest_checkpoint + .map(|seq| seq.saturating_sub(self.snapshot_config.snapshot_min_lag as u64)) + .unwrap_or_default()) // hold snapshot handler until at least one checkpoint is in DB + } +} + +pub async fn start_objects_snapshot_handler( + store: PgIndexerStore, + metrics: IndexerMetrics, + snapshot_config: SnapshotLagConfig, + cancel: CancellationToken, + start_checkpoint_opt: Option, + end_checkpoint_opt: Option, +) -> IndexerResult<(ObjectsSnapshotHandler, u64)> { + info!("Starting object snapshot handler..."); + + let global_metrics = get_metrics().unwrap(); + let (sender, receiver) = mysten_metrics::metered_channel::channel( + 600, + &global_metrics + .channel_inflight + .with_label_values(&["objects_snapshot_handler_checkpoint_data"]), + ); + + let objects_snapshot_handler = + ObjectsSnapshotHandler::new(store.clone(), sender, metrics.clone(), snapshot_config); + + let next_cp_from_db = objects_snapshot_handler + .get_watermark_hi() + .await? + .map(|cp| cp.saturating_add(1)) + .unwrap_or_default(); + let start_checkpoint = start_checkpoint_opt.unwrap_or(next_cp_from_db); + let common_handler = CommonHandler::new(Box::new(objects_snapshot_handler.clone())); + spawn_monitored_task!(common_handler.start_transform_and_load( + receiver, + cancel, + start_checkpoint, + end_checkpoint_opt, + )); + Ok((objects_snapshot_handler, start_checkpoint)) +} + +impl ObjectsSnapshotHandler { + pub fn new( + store: PgIndexerStore, + sender: Sender<(CommitterWatermark, TransactionObjectChangesToCommit)>, + metrics: IndexerMetrics, + snapshot_config: SnapshotLagConfig, + ) -> ObjectsSnapshotHandler { + Self { + store, + sender, + metrics, + snapshot_config, + } + } +} diff --git a/crates/sui-mvr-indexer/src/handlers/pruner.rs b/crates/sui-mvr-indexer/src/handlers/pruner.rs new file mode 100644 index 0000000000000..85b6faa12f071 --- /dev/null +++ b/crates/sui-mvr-indexer/src/handlers/pruner.rs @@ -0,0 +1,288 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use mysten_metrics::spawn_monitored_task; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::time::Duration; +use strum_macros; +use tokio_util::sync::CancellationToken; +use tracing::{error, info}; + +use crate::config::RetentionConfig; +use crate::errors::IndexerError; +use crate::store::pg_partition_manager::PgPartitionManager; +use crate::store::PgIndexerStore; +use crate::{metrics::IndexerMetrics, store::IndexerStore, types::IndexerResult}; + +pub struct Pruner { + pub store: PgIndexerStore, + pub partition_manager: PgPartitionManager, + // TODO: (wlmyng) - we can remove this when pruner logic is updated to use `retention_policies`. + pub epochs_to_keep: u64, + pub retention_policies: HashMap, + pub metrics: IndexerMetrics, +} + +/// Enum representing tables that the pruner is allowed to prune. This corresponds to table names in +/// the database, and should be used in lieu of string literals. This enum is also meant to +/// facilitate the process of determining which unit (epoch, cp, or tx) should be used for the +/// table's range. Pruner will ignore any table that is not listed here. +#[derive( + Debug, + Eq, + PartialEq, + strum_macros::Display, + strum_macros::EnumString, + strum_macros::EnumIter, + strum_macros::AsRefStr, + Hash, + Serialize, + Deserialize, + Clone, +)] +#[strum(serialize_all = "snake_case")] +#[serde(rename_all = "snake_case")] +pub enum PrunableTable { + ObjectsHistory, + Transactions, + Events, + + EventEmitPackage, + EventEmitModule, + EventSenders, + EventStructInstantiation, + EventStructModule, + EventStructName, + EventStructPackage, + + TxAffectedAddresses, + TxAffectedObjects, + TxCallsPkg, + TxCallsMod, + TxCallsFun, + TxChangedObjects, + TxDigests, + TxInputObjects, + TxKinds, + TxRecipients, + TxSenders, + + Checkpoints, + PrunerCpWatermark, +} + +impl PrunableTable { + pub fn select_reader_lo(&self, cp: u64, tx: u64) -> u64 { + match self { + PrunableTable::ObjectsHistory => cp, + PrunableTable::Transactions => tx, + PrunableTable::Events => tx, + + PrunableTable::EventEmitPackage => tx, + PrunableTable::EventEmitModule => tx, + PrunableTable::EventSenders => tx, + PrunableTable::EventStructInstantiation => tx, + PrunableTable::EventStructModule => tx, + PrunableTable::EventStructName => tx, + PrunableTable::EventStructPackage => tx, + + PrunableTable::TxAffectedAddresses => tx, + PrunableTable::TxAffectedObjects => tx, + PrunableTable::TxCallsPkg => tx, + PrunableTable::TxCallsMod => tx, + PrunableTable::TxCallsFun => tx, + PrunableTable::TxChangedObjects => tx, + PrunableTable::TxDigests => tx, + PrunableTable::TxInputObjects => tx, + PrunableTable::TxKinds => tx, + PrunableTable::TxRecipients => tx, + PrunableTable::TxSenders => tx, + + PrunableTable::Checkpoints => cp, + PrunableTable::PrunerCpWatermark => cp, + } + } +} + +impl Pruner { + /// Instantiates a pruner with default retention and overrides. Pruner will finalize the + /// retention policies so there is a value for every prunable table. + pub fn new( + store: PgIndexerStore, + retention_config: RetentionConfig, + metrics: IndexerMetrics, + ) -> Result { + let partition_manager = PgPartitionManager::new(store.pool())?; + let epochs_to_keep = retention_config.epochs_to_keep; + let retention_policies = retention_config.retention_policies(); + + Ok(Self { + store, + epochs_to_keep, + partition_manager, + retention_policies, + metrics, + }) + } + + /// Given a table name, return the number of epochs to keep for that table. Return `None` if the + /// table is not prunable. + fn table_retention(&self, table_name: &str) -> Option { + if let Ok(variant) = table_name.parse::() { + self.retention_policies.get(&variant).copied() + } else { + None + } + } + + pub async fn start(&self, cancel: CancellationToken) -> IndexerResult<()> { + let store_clone = self.store.clone(); + let retention_policies = self.retention_policies.clone(); + let cancel_clone = cancel.clone(); + spawn_monitored_task!(update_watermarks_lower_bounds_task( + store_clone, + retention_policies, + cancel_clone + )); + + let mut last_seen_max_epoch = 0; + // The first epoch that has not yet been pruned. + let mut next_prune_epoch = None; + while !cancel.is_cancelled() { + let (min_epoch, max_epoch) = self.store.get_available_epoch_range().await?; + if max_epoch == last_seen_max_epoch { + tokio::time::sleep(Duration::from_secs(5)).await; + continue; + } + last_seen_max_epoch = max_epoch; + + // Not all partitioned tables are epoch-partitioned, so we need to filter them out. + let table_partitions: HashMap<_, _> = self + .partition_manager + .get_table_partitions() + .await? + .into_iter() + .filter(|(table_name, _)| { + self.partition_manager + .get_strategy(table_name) + .is_epoch_partitioned() + }) + .collect(); + + for (table_name, (min_partition, max_partition)) in &table_partitions { + if let Some(epochs_to_keep) = self.table_retention(table_name) { + if last_seen_max_epoch != *max_partition { + error!( + "Epochs are out of sync for table {}: max_epoch={}, max_partition={}", + table_name, last_seen_max_epoch, max_partition + ); + } + + for epoch in + *min_partition..last_seen_max_epoch.saturating_sub(epochs_to_keep - 1) + { + if cancel.is_cancelled() { + info!("Pruner task cancelled."); + return Ok(()); + } + self.partition_manager + .drop_table_partition(table_name.clone(), epoch) + .await?; + info!( + "Batch dropped table partition {} epoch {}", + table_name, epoch + ); + } + } + } + + // TODO: (wlmyng) Once we have the watermarks table, we can iterate through each row + // returned from `watermarks`, look it up against `retention_policies`, and process them + // independently. This also means that pruning overrides will only apply for + // epoch-partitioned tables right now. + let prune_to_epoch = last_seen_max_epoch.saturating_sub(self.epochs_to_keep - 1); + let prune_start_epoch = next_prune_epoch.unwrap_or(min_epoch); + for epoch in prune_start_epoch..prune_to_epoch { + if cancel.is_cancelled() { + info!("Pruner task cancelled."); + return Ok(()); + } + info!("Pruning epoch {}", epoch); + if let Err(err) = self.store.prune_epoch(epoch).await { + error!("Failed to prune epoch {}: {}", epoch, err); + break; + }; + self.metrics.last_pruned_epoch.set(epoch as i64); + info!("Pruned epoch {}", epoch); + next_prune_epoch = Some(epoch + 1); + } + } + info!("Pruner task cancelled."); + Ok(()) + } +} + +/// Task to periodically query the `watermarks` table and update the lower bounds for all watermarks +/// if the entry exceeds epoch-level retention policy. +async fn update_watermarks_lower_bounds_task( + store: PgIndexerStore, + retention_policies: HashMap, + cancel: CancellationToken, +) -> IndexerResult<()> { + let mut interval = tokio::time::interval(Duration::from_secs(5)); + loop { + tokio::select! { + _ = cancel.cancelled() => { + info!("Pruner watermark lower bound update task cancelled."); + return Ok(()); + } + _ = interval.tick() => { + update_watermarks_lower_bounds(&store, &retention_policies, &cancel).await?; + } + } + } +} + +/// Fetches all entries from the `watermarks` table, and updates the `reader_lo` for each entry if +/// its epoch range exceeds the respective retention policy. +async fn update_watermarks_lower_bounds( + store: &PgIndexerStore, + retention_policies: &HashMap, + cancel: &CancellationToken, +) -> IndexerResult<()> { + let (watermarks, _) = store.get_watermarks().await?; + let mut lower_bound_updates = vec![]; + + for watermark in watermarks.iter() { + if cancel.is_cancelled() { + info!("Pruner watermark lower bound update task cancelled."); + return Ok(()); + } + + let Some(prunable_table) = watermark.entity() else { + continue; + }; + + let Some(epochs_to_keep) = retention_policies.get(&prunable_table) else { + error!( + "No retention policy found for prunable table {}", + prunable_table + ); + continue; + }; + + if let Some(new_epoch_lo) = watermark.new_epoch_lo(*epochs_to_keep) { + lower_bound_updates.push((prunable_table, new_epoch_lo)); + }; + } + + if !lower_bound_updates.is_empty() { + store + .update_watermarks_lower_bound(lower_bound_updates) + .await?; + info!("Finished updating lower bounds for watermarks"); + } + + Ok(()) +} diff --git a/crates/sui-mvr-indexer/src/handlers/tx_processor.rs b/crates/sui-mvr-indexer/src/handlers/tx_processor.rs new file mode 100644 index 0000000000000..0a8051ee8eabb --- /dev/null +++ b/crates/sui-mvr-indexer/src/handlers/tx_processor.rs @@ -0,0 +1,223 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::HashMap; + +use async_trait::async_trait; +use sui_json_rpc::get_balance_changes_from_effect; +use sui_json_rpc::get_object_changes; +use sui_json_rpc::ObjectProvider; +use sui_rest_api::CheckpointData; +use sui_types::base_types::ObjectID; +use sui_types::base_types::SequenceNumber; +use sui_types::digests::TransactionDigest; +use sui_types::effects::{TransactionEffects, TransactionEffectsAPI}; +use sui_types::object::Object; +use sui_types::transaction::{TransactionData, TransactionDataAPI}; + +use crate::errors::IndexerError; +use crate::metrics::IndexerMetrics; +use crate::types::{IndexedObjectChange, IndexerResult}; + +pub struct InMemObjectCache { + id_map: HashMap, + seq_map: HashMap<(ObjectID, SequenceNumber), Object>, +} + +impl InMemObjectCache { + pub fn new() -> Self { + Self { + id_map: HashMap::new(), + seq_map: HashMap::new(), + } + } + + pub fn insert_object(&mut self, obj: Object) { + self.id_map.insert(obj.id(), obj.clone()); + self.seq_map.insert((obj.id(), obj.version()), obj); + } + + pub fn get(&self, id: &ObjectID, version: Option<&SequenceNumber>) -> Option<&Object> { + if let Some(version) = version { + self.seq_map.get(&(*id, *version)) + } else { + self.id_map.get(id) + } + } +} + +impl Default for InMemObjectCache { + fn default() -> Self { + Self::new() + } +} + +/// Along with InMemObjectCache, TxChangesProcessor implements ObjectProvider +/// so it can be used in indexing write path to get object/balance changes. +/// Its lifetime is per checkpoint. +pub struct TxChangesProcessor { + object_cache: InMemObjectCache, + metrics: IndexerMetrics, +} + +impl TxChangesProcessor { + pub fn new(objects: &[&Object], metrics: IndexerMetrics) -> Self { + let mut object_cache = InMemObjectCache::new(); + for obj in objects { + object_cache.insert_object(<&Object>::clone(obj).clone()); + } + Self { + object_cache, + metrics, + } + } + + pub(crate) async fn get_changes( + &self, + tx: &TransactionData, + effects: &TransactionEffects, + tx_digest: &TransactionDigest, + ) -> IndexerResult<( + Vec, + Vec, + )> { + let _timer = self + .metrics + .indexing_tx_object_changes_latency + .start_timer(); + let object_change: Vec<_> = get_object_changes( + self, + effects, + tx.sender(), + effects.modified_at_versions(), + effects.all_changed_objects(), + effects.all_removed_objects(), + ) + .await? + .into_iter() + .map(IndexedObjectChange::from) + .collect(); + let balance_change = get_balance_changes_from_effect( + self, + effects, + tx.input_objects().unwrap_or_else(|e| { + panic!( + "Checkpointed tx {:?} has inavlid input objects: {e}", + tx_digest, + ) + }), + None, + ) + .await?; + Ok((balance_change, object_change)) + } +} + +#[async_trait] +impl ObjectProvider for TxChangesProcessor { + type Error = IndexerError; + + async fn get_object( + &self, + id: &ObjectID, + version: &SequenceNumber, + ) -> Result { + let object = self + .object_cache + .get(id, Some(version)) + .as_ref() + .map(|o| <&Object>::clone(o).clone()); + if let Some(o) = object { + self.metrics.indexing_get_object_in_mem_hit.inc(); + return Ok(o); + } + + panic!( + "Object {} is not found in TxChangesProcessor as an ObjectProvider (fn get_object)", + id + ); + } + + async fn find_object_lt_or_eq_version( + &self, + id: &ObjectID, + version: &SequenceNumber, + ) -> Result, Self::Error> { + // First look up the exact version in object_cache. + let object = self + .object_cache + .get(id, Some(version)) + .as_ref() + .map(|o| <&Object>::clone(o).clone()); + if let Some(o) = object { + self.metrics.indexing_get_object_in_mem_hit.inc(); + return Ok(Some(o)); + } + + // Second look up the latest version in object_cache. This may be + // called when the object is deleted hence the version at deletion + // is given. + let object = self + .object_cache + .get(id, None) + .as_ref() + .map(|o| <&Object>::clone(o).clone()); + if let Some(o) = object { + if o.version() > *version { + panic!( + "Found a higher version {} for object {}, expected lt_or_eq {}", + o.version(), + id, + *version + ); + } + if o.version() <= *version { + self.metrics.indexing_get_object_in_mem_hit.inc(); + return Ok(Some(o)); + } + } + + panic!("Object {} is not found in TxChangesProcessor as an ObjectProvider (fn find_object_lt_or_eq_version)", id); + } +} + +// This is a struct that is used to extract SuiSystemState and its dynamic children +// for end-of-epoch indexing. +pub(crate) struct EpochEndIndexingObjectStore<'a> { + objects: Vec<&'a Object>, +} + +impl<'a> EpochEndIndexingObjectStore<'a> { + pub fn new(data: &'a CheckpointData) -> Self { + Self { + objects: data.latest_live_output_objects(), + } + } +} + +impl<'a> sui_types::storage::ObjectStore for EpochEndIndexingObjectStore<'a> { + fn get_object( + &self, + object_id: &ObjectID, + ) -> Result, sui_types::storage::error::Error> { + Ok(self + .objects + .iter() + .find(|o| o.id() == *object_id) + .cloned() + .cloned()) + } + + fn get_object_by_key( + &self, + object_id: &ObjectID, + version: sui_types::base_types::VersionNumber, + ) -> Result, sui_types::storage::error::Error> { + Ok(self + .objects + .iter() + .find(|o| o.id() == *object_id && o.version() == version) + .cloned() + .cloned()) + } +} diff --git a/crates/sui-mvr-indexer/src/indexer.rs b/crates/sui-mvr-indexer/src/indexer.rs new file mode 100644 index 0000000000000..d1819a90a7416 --- /dev/null +++ b/crates/sui-mvr-indexer/src/indexer.rs @@ -0,0 +1,214 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::HashMap; +use std::env; + +use anyhow::Result; +use prometheus::Registry; +use tokio::sync::{oneshot, watch}; +use tokio_util::sync::CancellationToken; +use tracing::info; + +use async_trait::async_trait; +use futures::future::try_join_all; +use mysten_metrics::spawn_monitored_task; +use sui_data_ingestion_core::{ + DataIngestionMetrics, IndexerExecutor, ProgressStore, ReaderOptions, WorkerPool, +}; +use sui_synthetic_ingestion::IndexerProgress; +use sui_types::messages_checkpoint::CheckpointSequenceNumber; + +use crate::build_json_rpc_server; +use crate::config::{IngestionConfig, JsonRpcConfig, RetentionConfig, SnapshotLagConfig}; +use crate::database::ConnectionPool; +use crate::errors::IndexerError; +use crate::handlers::checkpoint_handler::new_handlers; +use crate::handlers::objects_snapshot_handler::start_objects_snapshot_handler; +use crate::handlers::pruner::Pruner; +use crate::indexer_reader::IndexerReader; +use crate::metrics::IndexerMetrics; +use crate::store::{IndexerStore, PgIndexerStore}; + +pub struct Indexer; + +impl Indexer { + pub async fn start_writer( + config: IngestionConfig, + store: PgIndexerStore, + metrics: IndexerMetrics, + snapshot_config: SnapshotLagConfig, + retention_config: Option, + cancel: CancellationToken, + committed_checkpoints_tx: Option>>, + ) -> Result<(), IndexerError> { + info!( + "Sui Indexer Writer (version {:?}) started...", + env!("CARGO_PKG_VERSION") + ); + info!("Sui Indexer Writer config: {config:?}",); + + let extra_reader_options = ReaderOptions { + batch_size: config.checkpoint_download_queue_size, + timeout_secs: config.checkpoint_download_timeout, + data_limit: config.checkpoint_download_queue_size_bytes, + gc_checkpoint_files: config.gc_checkpoint_files, + ..Default::default() + }; + + // Start objects snapshot processor, which is a separate pipeline with its ingestion pipeline. + let (object_snapshot_worker, object_snapshot_watermark) = start_objects_snapshot_handler( + store.clone(), + metrics.clone(), + snapshot_config, + cancel.clone(), + config.start_checkpoint, + config.end_checkpoint, + ) + .await?; + + if let Some(retention_config) = retention_config { + let pruner = Pruner::new(store.clone(), retention_config, metrics.clone())?; + let cancel_clone = cancel.clone(); + spawn_monitored_task!(pruner.start(cancel_clone)); + } + + // If we already have chain identifier indexed (i.e. the first checkpoint has been indexed), + // then we persist protocol configs for protocol versions not yet in the db. + // Otherwise, we would do the persisting in `commit_checkpoint` while the first cp is + // being indexed. + if let Some(chain_id) = IndexerStore::get_chain_identifier(&store).await? { + store + .persist_protocol_configs_and_feature_flags(chain_id) + .await?; + } + + let mut exit_senders = vec![]; + let mut executors = vec![]; + + let (worker, primary_watermark) = new_handlers( + store, + metrics, + cancel.clone(), + committed_checkpoints_tx, + config.start_checkpoint, + config.end_checkpoint, + ) + .await?; + // Ingestion task watermarks are snapshotted once on indexer startup based on the + // corresponding watermark table before being handed off to the ingestion task. + let progress_store = ShimIndexerProgressStore::new(vec![ + ("primary".to_string(), primary_watermark), + ("object_snapshot".to_string(), object_snapshot_watermark), + ]); + let mut executor = IndexerExecutor::new( + progress_store.clone(), + 2, + DataIngestionMetrics::new(&Registry::new()), + ); + + let worker_pool = WorkerPool::new( + worker, + "primary".to_string(), + config.checkpoint_download_queue_size, + ); + executor.register(worker_pool).await?; + let (exit_sender, exit_receiver) = oneshot::channel(); + executors.push((executor, exit_receiver)); + exit_senders.push(exit_sender); + + // in a non-colocated setup, start a separate indexer for processing object snapshots + if config.sources.data_ingestion_path.is_none() { + let executor = IndexerExecutor::new( + progress_store, + 1, + DataIngestionMetrics::new(&Registry::new()), + ); + let (exit_sender, exit_receiver) = oneshot::channel(); + exit_senders.push(exit_sender); + executors.push((executor, exit_receiver)); + } + + let worker_pool = WorkerPool::new( + object_snapshot_worker, + "object_snapshot".to_string(), + config.checkpoint_download_queue_size, + ); + let executor = executors.last_mut().expect("executors is not empty"); + executor.0.register(worker_pool).await?; + + // Spawn a task that links the cancellation token to the exit sender + spawn_monitored_task!(async move { + cancel.cancelled().await; + for exit_sender in exit_senders { + let _ = exit_sender.send(()); + } + }); + + info!("Starting data ingestion executor..."); + let futures = executors.into_iter().map(|(executor, exit_receiver)| { + executor.run( + config + .sources + .data_ingestion_path + .clone() + .unwrap_or(tempfile::tempdir().unwrap().into_path()), + config + .sources + .remote_store_url + .as_ref() + .map(|url| url.as_str().to_owned()), + vec![], + extra_reader_options.clone(), + exit_receiver, + ) + }); + try_join_all(futures).await?; + Ok(()) + } + + pub async fn start_reader( + config: &JsonRpcConfig, + registry: &Registry, + pool: ConnectionPool, + cancel: CancellationToken, + ) -> Result<(), IndexerError> { + info!( + "Sui Indexer Reader (version {:?}) started...", + env!("CARGO_PKG_VERSION") + ); + let indexer_reader = IndexerReader::new(pool); + let handle = build_json_rpc_server(registry, indexer_reader, config, cancel) + .await + .expect("Json rpc server should not run into errors upon start."); + tokio::spawn(async move { handle.stopped().await }) + .await + .expect("Rpc server task failed"); + + Ok(()) + } +} + +#[derive(Clone)] +struct ShimIndexerProgressStore { + watermarks: HashMap, +} + +impl ShimIndexerProgressStore { + fn new(watermarks: Vec<(String, CheckpointSequenceNumber)>) -> Self { + Self { + watermarks: watermarks.into_iter().collect(), + } + } +} + +#[async_trait] +impl ProgressStore for ShimIndexerProgressStore { + async fn load(&mut self, task_name: String) -> Result { + Ok(*self.watermarks.get(&task_name).expect("missing watermark")) + } + + async fn save(&mut self, _: String, _: CheckpointSequenceNumber) -> Result<()> { + Ok(()) + } +} diff --git a/crates/sui-mvr-indexer/src/indexer_reader.rs b/crates/sui-mvr-indexer/src/indexer_reader.rs new file mode 100644 index 0000000000000..d0eed2ee4a461 --- /dev/null +++ b/crates/sui-mvr-indexer/src/indexer_reader.rs @@ -0,0 +1,1511 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::anyhow; +use anyhow::Result; +use diesel::{ + dsl::sql, sql_types::Bool, ExpressionMethods, JoinOnDsl, NullableExpressionMethods, + OptionalExtension, QueryDsl, SelectableHelper, TextExpressionMethods, +}; +use itertools::Itertools; +use std::sync::Arc; +use sui_types::dynamic_field::visitor as DFV; +use sui_types::object::bounded_visitor::BoundedVisitor; +use tap::{Pipe, TapFallible}; +use tracing::{debug, error, warn}; + +use fastcrypto::encoding::Encoding; +use fastcrypto::encoding::Hex; +use move_core_types::annotated_value::MoveStructLayout; +use move_core_types::language_storage::{StructTag, TypeTag}; +use sui_json_rpc_types::DisplayFieldsResponse; +use sui_json_rpc_types::{Balance, Coin as SuiCoin, SuiCoinMetadata, SuiMoveValue}; +use sui_json_rpc_types::{ + CheckpointId, EpochInfo, EventFilter, SuiEvent, SuiObjectDataFilter, + SuiTransactionBlockResponse, TransactionFilter, +}; +use sui_package_resolver::Package; +use sui_package_resolver::PackageStore; +use sui_package_resolver::{PackageStoreWithLruCache, Resolver}; +use sui_types::effects::TransactionEvents; +use sui_types::{balance::Supply, coin::TreasuryCap, dynamic_field::DynamicFieldName}; +use sui_types::{ + base_types::{ObjectID, SuiAddress, VersionNumber}, + committee::EpochId, + digests::TransactionDigest, + dynamic_field::DynamicFieldInfo, + object::{Object, ObjectRead}, + sui_system_state::{sui_system_state_summary::SuiSystemStateSummary, SuiSystemStateTrait}, +}; +use sui_types::{coin::CoinMetadata, event::EventID}; + +use crate::database::ConnectionPool; +use crate::db::ConnectionPoolConfig; +use crate::models::transactions::{stored_events_to_events, StoredTransactionEvents}; +use crate::schema::pruner_cp_watermark; +use crate::schema::tx_digests; +use crate::{ + errors::IndexerError, + models::{ + checkpoints::StoredCheckpoint, + display::StoredDisplay, + epoch::StoredEpochInfo, + events::StoredEvent, + objects::{CoinBalance, StoredObject}, + transactions::{tx_events_to_sui_tx_events, StoredTransaction}, + tx_indices::TxSequenceNumber, + }, + schema::{checkpoints, display, epochs, events, objects, transactions}, + store::package_resolver::IndexerStorePackageResolver, + types::{IndexerResult, OwnerType}, +}; + +pub const TX_SEQUENCE_NUMBER_STR: &str = "tx_sequence_number"; +pub const TRANSACTION_DIGEST_STR: &str = "transaction_digest"; +pub const EVENT_SEQUENCE_NUMBER_STR: &str = "event_sequence_number"; + +#[derive(Clone)] +pub struct IndexerReader { + pool: ConnectionPool, + package_resolver: PackageResolver, +} + +pub type PackageResolver = Arc>>; + +// Impl for common initialization and utilities +impl IndexerReader { + pub fn new(pool: ConnectionPool) -> Self { + let indexer_store_pkg_resolver = IndexerStorePackageResolver::new(pool.clone()); + let package_cache = PackageStoreWithLruCache::new(indexer_store_pkg_resolver); + let package_resolver = Arc::new(Resolver::new(package_cache)); + Self { + pool, + package_resolver, + } + } + + pub async fn new_with_config>( + db_url: T, + config: ConnectionPoolConfig, + ) -> Result { + let db_url = db_url.into(); + + let pool = ConnectionPool::new(db_url.parse()?, config).await?; + + let indexer_store_pkg_resolver = IndexerStorePackageResolver::new(pool.clone()); + let package_cache = PackageStoreWithLruCache::new(indexer_store_pkg_resolver); + let package_resolver = Arc::new(Resolver::new(package_cache)); + Ok(Self { + pool, + package_resolver, + }) + } + + pub fn pool(&self) -> &ConnectionPool { + &self.pool + } +} + +// Impl for reading data from the DB +impl IndexerReader { + async fn get_object_from_db( + &self, + object_id: &ObjectID, + version: Option, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let mut query = objects::table + .filter(objects::object_id.eq(object_id.to_vec())) + .into_boxed(); + if let Some(version) = version { + query = query.filter(objects::object_version.eq(version.value() as i64)) + } + + query + .first::(&mut connection) + .await + .optional() + .map_err(Into::into) + } + + pub async fn get_object( + &self, + object_id: &ObjectID, + version: Option, + ) -> Result, IndexerError> { + let Some(stored_package) = self.get_object_from_db(object_id, version).await? else { + return Ok(None); + }; + + let object = stored_package.try_into()?; + Ok(Some(object)) + } + + pub async fn get_object_read(&self, object_id: ObjectID) -> Result { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let stored_object = objects::table + .filter(objects::object_id.eq(object_id.to_vec())) + .first::(&mut connection) + .await + .optional()?; + + if let Some(object) = stored_object { + object + .try_into_object_read(self.package_resolver.clone()) + .await + } else { + Ok(ObjectRead::NotExists(object_id)) + } + } + + pub async fn get_package(&self, package_id: ObjectID) -> Result { + let store = self.package_resolver.package_store(); + let pkg = store + .fetch(package_id.into()) + .await + .map_err(|e| { + IndexerError::PostgresReadError(format!( + "Fail to fetch package from package store with error {:?}", + e + )) + })? + .as_ref() + .clone(); + Ok(pkg) + } + + async fn get_epoch_info_from_db( + &self, + epoch: Option, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let stored_epoch = epochs::table + .into_boxed() + .pipe(|query| { + if let Some(epoch) = epoch { + query.filter(epochs::epoch.eq(epoch as i64)) + } else { + query.order_by(epochs::epoch.desc()) + } + }) + .first::(&mut connection) + .await + .optional()?; + + Ok(stored_epoch) + } + + pub async fn get_latest_epoch_info_from_db(&self) -> Result { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let stored_epoch = epochs::table + .order_by(epochs::epoch.desc()) + .first::(&mut connection) + .await?; + + Ok(stored_epoch) + } + + pub async fn get_epoch_info( + &self, + epoch: Option, + ) -> Result, IndexerError> { + let stored_epoch = self.get_epoch_info_from_db(epoch).await?; + + let stored_epoch = match stored_epoch { + Some(stored_epoch) => stored_epoch, + None => return Ok(None), + }; + + let epoch_info = EpochInfo::try_from(stored_epoch)?; + Ok(Some(epoch_info)) + } + + async fn get_epochs_from_db( + &self, + cursor: Option, + limit: usize, + descending_order: bool, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let mut query = epochs::table.into_boxed(); + + if let Some(cursor) = cursor { + if descending_order { + query = query.filter(epochs::epoch.lt(cursor as i64)); + } else { + query = query.filter(epochs::epoch.gt(cursor as i64)); + } + } + + if descending_order { + query = query.order_by(epochs::epoch.desc()); + } else { + query = query.order_by(epochs::epoch.asc()); + } + + query + .limit(limit as i64) + .load(&mut connection) + .await + .map_err(Into::into) + } + + pub async fn get_epochs( + &self, + cursor: Option, + limit: usize, + descending_order: bool, + ) -> Result, IndexerError> { + self.get_epochs_from_db(cursor, limit, descending_order) + .await? + .into_iter() + .map(EpochInfo::try_from) + .collect::, _>>() + .map_err(Into::into) + } + + pub async fn get_latest_sui_system_state(&self) -> Result { + let object_store = ConnectionAsObjectStore::from_pool(&self.pool) + .await + .map_err(|e| IndexerError::PgPoolConnectionError(e.to_string()))?; + + let system_state = tokio::task::spawn_blocking(move || { + sui_types::sui_system_state::get_sui_system_state(&object_store) + }) + .await + .unwrap()? + .into_sui_system_state_summary(); + + Ok(system_state) + } + + pub async fn get_validator_from_table( + &self, + table_id: ObjectID, + pool_id: sui_types::id::ID, + ) -> Result< + sui_types::sui_system_state::sui_system_state_summary::SuiValidatorSummary, + IndexerError, + > { + let object_store = ConnectionAsObjectStore::from_pool(&self.pool) + .await + .map_err(|e| IndexerError::PgPoolConnectionError(e.to_string()))?; + + let validator = tokio::task::spawn_blocking(move || { + sui_types::sui_system_state::get_validator_from_table(&object_store, table_id, &pool_id) + }) + .await + .unwrap()?; + Ok(validator) + } + + /// Retrieve the system state data for the given epoch. If no epoch is given, + /// it will retrieve the latest epoch's data and return the system state. + /// System state of the an epoch is written at the end of the epoch, so system state + /// of the current epoch is empty until the epoch ends. You can call + /// `get_latest_sui_system_state` for current epoch instead. + pub async fn get_epoch_sui_system_state( + &self, + epoch: Option, + ) -> Result { + let stored_epoch = self.get_epoch_info_from_db(epoch).await?; + let stored_epoch = match stored_epoch { + Some(stored_epoch) => stored_epoch, + None => return Err(IndexerError::InvalidArgumentError("Invalid epoch".into())), + }; + stored_epoch.get_json_system_state_summary() + } + + async fn get_checkpoint_from_db( + &self, + checkpoint_id: CheckpointId, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + let stored_checkpoint = checkpoints::table + .into_boxed() + .pipe(|query| match checkpoint_id { + CheckpointId::SequenceNumber(seq) => { + query.filter(checkpoints::sequence_number.eq(seq as i64)) + } + CheckpointId::Digest(digest) => { + query.filter(checkpoints::checkpoint_digest.eq(digest.into_inner().to_vec())) + } + }) + .first::(&mut connection) + .await + .optional()?; + + Ok(stored_checkpoint) + } + + async fn get_latest_checkpoint_from_db(&self) -> Result { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + checkpoints::table + .order_by(checkpoints::sequence_number.desc()) + .first::(&mut connection) + .await + .map_err(Into::into) + } + + pub async fn get_checkpoint( + &self, + checkpoint_id: CheckpointId, + ) -> Result, IndexerError> { + let stored_checkpoint = match self.get_checkpoint_from_db(checkpoint_id).await? { + Some(stored_checkpoint) => stored_checkpoint, + None => return Ok(None), + }; + + let checkpoint = sui_json_rpc_types::Checkpoint::try_from(stored_checkpoint)?; + Ok(Some(checkpoint)) + } + + pub async fn get_latest_checkpoint( + &self, + ) -> Result { + let stored_checkpoint = self.get_latest_checkpoint_from_db().await?; + + sui_json_rpc_types::Checkpoint::try_from(stored_checkpoint) + } + + async fn get_checkpoints_from_db( + &self, + cursor: Option, + limit: usize, + descending_order: bool, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let mut query = checkpoints::table.into_boxed(); + if let Some(cursor) = cursor { + if descending_order { + query = query.filter(checkpoints::sequence_number.lt(cursor as i64)); + } else { + query = query.filter(checkpoints::sequence_number.gt(cursor as i64)); + } + } + if descending_order { + query = query.order_by(checkpoints::sequence_number.desc()); + } else { + query = query.order_by(checkpoints::sequence_number.asc()); + } + + query + .limit(limit as i64) + .load::(&mut connection) + .await + .map_err(Into::into) + } + + pub async fn get_checkpoints( + &self, + cursor: Option, + limit: usize, + descending_order: bool, + ) -> Result, IndexerError> { + self.get_checkpoints_from_db(cursor, limit, descending_order) + .await? + .into_iter() + .map(sui_json_rpc_types::Checkpoint::try_from) + .collect() + } + + async fn multi_get_transactions( + &self, + digests: &[TransactionDigest], + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let digests = digests + .iter() + .map(|digest| digest.inner().to_vec()) + .collect::>(); + + transactions::table + .inner_join( + tx_digests::table + .on(transactions::tx_sequence_number.eq(tx_digests::tx_sequence_number)), + ) + .filter(tx_digests::tx_digest.eq_any(digests)) + .select(StoredTransaction::as_select()) + .load::(&mut connection) + .await + .map_err(Into::into) + } + + async fn stored_transaction_to_transaction_block( + &self, + stored_txes: Vec, + options: sui_json_rpc_types::SuiTransactionBlockResponseOptions, + ) -> IndexerResult> { + let mut tx_block_responses_futures = vec![]; + for stored_tx in stored_txes { + let package_resolver_clone = self.package_resolver(); + let options_clone = options.clone(); + tx_block_responses_futures.push(tokio::task::spawn( + stored_tx + .try_into_sui_transaction_block_response(options_clone, package_resolver_clone), + )); + } + + let tx_blocks = futures::future::join_all(tx_block_responses_futures) + .await + .into_iter() + .collect::, _>>() + .tap_err(|e| error!("Failed to join all tx block futures: {}", e))? + .into_iter() + .collect::, _>>() + .tap_err(|e| error!("Failed to collect tx block futures: {}", e))?; + Ok(tx_blocks) + } + + async fn multi_get_transactions_with_sequence_numbers( + &self, + tx_sequence_numbers: Vec, + // Some(true) for desc, Some(false) for asc, None for undefined order + is_descending: Option, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let mut query = transactions::table + .filter(transactions::tx_sequence_number.eq_any(tx_sequence_numbers)) + .into_boxed(); + match is_descending { + Some(true) => { + query = query.order(transactions::dsl::tx_sequence_number.desc()); + } + Some(false) => { + query = query.order(transactions::dsl::tx_sequence_number.asc()); + } + None => (), + } + + query + .load::(&mut connection) + .await + .map_err(Into::into) + } + + pub async fn get_owned_objects( + &self, + address: SuiAddress, + filter: Option, + cursor: Option, + limit: usize, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let mut query = objects::table + .filter(objects::owner_type.eq(OwnerType::Address as i16)) + .filter(objects::owner_id.eq(address.to_vec())) + .order(objects::object_id.asc()) + .limit(limit as i64) + .into_boxed(); + if let Some(filter) = filter { + match filter { + SuiObjectDataFilter::StructType(struct_tag) => { + let object_type = struct_tag.to_canonical_string(/* with_prefix */ true); + query = query.filter(objects::object_type.like(format!("{}%", object_type))); + } + SuiObjectDataFilter::MatchAny(filters) => { + let mut condition = "(".to_string(); + for (i, filter) in filters.iter().enumerate() { + if let SuiObjectDataFilter::StructType(struct_tag) = filter { + let object_type = + struct_tag.to_canonical_string(/* with_prefix */ true); + if i == 0 { + condition += + format!("objects.object_type LIKE '{}%'", object_type).as_str(); + } else { + condition += + format!(" OR objects.object_type LIKE '{}%'", object_type) + .as_str(); + } + } else { + return Err(IndexerError::InvalidArgumentError( + "Invalid filter type. Only struct, MatchAny and MatchNone of struct filters are supported.".into(), + )); + } + } + condition += ")"; + query = query.filter(sql::(&condition)); + } + SuiObjectDataFilter::MatchNone(filters) => { + for filter in filters { + if let SuiObjectDataFilter::StructType(struct_tag) = filter { + let object_type = + struct_tag.to_canonical_string(/* with_prefix */ true); + query = query + .filter(objects::object_type.not_like(format!("{}%", object_type))); + } else { + return Err(IndexerError::InvalidArgumentError( + "Invalid filter type. Only struct, MatchAny and MatchNone of struct filters are supported.".into(), + )); + } + } + } + _ => { + return Err(IndexerError::InvalidArgumentError( + "Invalid filter type. Only struct, MatchAny and MatchNone of struct filters are supported.".into(), + )); + } + } + } + + if let Some(object_cursor) = cursor { + query = query.filter(objects::object_id.gt(object_cursor.to_vec())); + } + + query + .load::(&mut connection) + .await + .map_err(|e| IndexerError::PostgresReadError(e.to_string())) + } + + pub async fn multi_get_objects( + &self, + object_ids: Vec, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + let object_ids = object_ids.into_iter().map(|id| id.to_vec()).collect_vec(); + + objects::table + .filter(objects::object_id.eq_any(object_ids)) + .load::(&mut connection) + .await + .map_err(Into::into) + } + + async fn query_transaction_blocks_by_checkpoint( + &self, + checkpoint_seq: u64, + options: sui_json_rpc_types::SuiTransactionBlockResponseOptions, + cursor_tx_seq: Option, + limit: usize, + is_descending: bool, + ) -> IndexerResult> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let tx_range: (i64, i64) = pruner_cp_watermark::dsl::pruner_cp_watermark + .select(( + pruner_cp_watermark::min_tx_sequence_number, + pruner_cp_watermark::max_tx_sequence_number, + )) + .filter(pruner_cp_watermark::checkpoint_sequence_number.eq(checkpoint_seq as i64)) + .first::<(i64, i64)>(&mut connection) + .await?; + + let mut query = transactions::table + .filter(transactions::tx_sequence_number.between(tx_range.0, tx_range.1)) + .into_boxed(); + + if let Some(cursor_tx_seq) = cursor_tx_seq { + if is_descending { + query = query.filter(transactions::tx_sequence_number.lt(cursor_tx_seq)); + } else { + query = query.filter(transactions::tx_sequence_number.gt(cursor_tx_seq)); + } + } + if is_descending { + query = query.order(transactions::tx_sequence_number.desc()); + } else { + query = query.order(transactions::tx_sequence_number.asc()); + } + let stored_txes = query + .limit(limit as i64) + .load::(&mut connection) + .await?; + self.stored_transaction_to_transaction_block(stored_txes, options) + .await + } + + pub async fn query_transaction_blocks( + &self, + filter: Option, + options: sui_json_rpc_types::SuiTransactionBlockResponseOptions, + cursor: Option, + limit: usize, + is_descending: bool, + ) -> IndexerResult> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let cursor_tx_seq = if let Some(cursor) = cursor { + let tx_seq = tx_digests::table + .select(tx_digests::tx_sequence_number) + .filter(tx_digests::tx_digest.eq(cursor.into_inner().to_vec())) + .first::(&mut connection) + .await?; + Some(tx_seq) + } else { + None + }; + let cursor_clause = if let Some(cursor_tx_seq) = cursor_tx_seq { + if is_descending { + format!("AND {TX_SEQUENCE_NUMBER_STR} < {}", cursor_tx_seq) + } else { + format!("AND {TX_SEQUENCE_NUMBER_STR} > {}", cursor_tx_seq) + } + } else { + "".to_string() + }; + let order_str = if is_descending { "DESC" } else { "ASC" }; + let (table_name, main_where_clause) = match filter { + // Processed above + Some(TransactionFilter::Checkpoint(seq)) => { + return self + .query_transaction_blocks_by_checkpoint( + seq, + options, + cursor_tx_seq, + limit, + is_descending, + ) + .await + } + // FIXME: sanitize module & function + Some(TransactionFilter::MoveFunction { + package, + module, + function, + }) => { + let package = Hex::encode(package.to_vec()); + match (module, function) { + (Some(module), Some(function)) => ( + "tx_calls_fun".to_owned(), + format!( + "package = '\\x{package}'::bytea AND module = '{module}' AND func = '{function}'", + ), + ), + (Some(module), None) => ( + "tx_calls_mod".to_owned(), + format!( + "package = '\\x{package}'::bytea AND module = '{module}'", + ), + ), + (None, Some(_)) => { + return Err(IndexerError::InvalidArgumentError( + "Function cannot be present without Module.".into(), + )); + } + (None, None) => ( + "tx_calls_pkg".to_owned(), + format!("package = '\\x{package}'::bytea"), + ), + } + } + Some(TransactionFilter::AffectedObject(object_id)) => { + let object_id = Hex::encode(object_id.to_vec()); + ( + "tx_affected_objects".to_owned(), + format!("affected = '\\x{object_id}'::bytea"), + ) + } + Some(TransactionFilter::FromAddress(from_address)) => { + let from_address = Hex::encode(from_address.to_vec()); + ( + "tx_affected_addresses".to_owned(), + format!("sender = '\\x{from_address}'::bytea AND affected = '\\x{from_address}'::bytea"), + ) + } + Some(TransactionFilter::FromAndToAddress { from, to }) => { + let from_address = Hex::encode(from.to_vec()); + let to_address = Hex::encode(to.to_vec()); + ( + "tx_affected_addresses".to_owned(), + format!("sender = '\\x{from_address}'::bytea AND affected = '\\x{to_address}'::bytea"), + ) + } + Some(TransactionFilter::FromOrToAddress { addr }) => { + let address = Hex::encode(addr.to_vec()); + ( + "tx_affected_addresses".to_owned(), + format!("affected = '\\x{address}'::bytea"), + ) + } + Some( + TransactionFilter::TransactionKind(_) | TransactionFilter::TransactionKindIn(_), + ) => { + return Err(IndexerError::NotSupportedError( + "TransactionKind filter is not supported.".into(), + )); + } + Some(TransactionFilter::InputObject(_) | TransactionFilter::ChangedObject(_)) => { + return Err(IndexerError::NotSupportedError( + "InputObject and OutputObject filters are not supported, please use AffectedObject instead.".into() + )) + } + Some(TransactionFilter::ToAddress(_)) => { + return Err(IndexerError::NotSupportedError( + "ToAddress filter is not supported, please use FromOrToAddress instead.".into() + )) + } + None => { + // apply no filter + ("transactions".to_owned(), "1 = 1".into()) + } + }; + + let query = format!( + "SELECT {TX_SEQUENCE_NUMBER_STR} FROM {} WHERE {} {} ORDER BY {TX_SEQUENCE_NUMBER_STR} {} LIMIT {}", + table_name, + main_where_clause, + cursor_clause, + order_str, + limit, + ); + + debug!("query transaction blocks: {}", query); + let tx_sequence_numbers = diesel::sql_query(query.clone()) + .load::(&mut connection) + .await? + .into_iter() + .map(|tsn| tsn.tx_sequence_number) + .collect::>(); + self.multi_get_transaction_block_response_by_sequence_numbers( + tx_sequence_numbers, + options, + Some(is_descending), + ) + .await + } + + async fn multi_get_transaction_block_response_in_blocking_task_impl( + &self, + digests: &[TransactionDigest], + options: sui_json_rpc_types::SuiTransactionBlockResponseOptions, + ) -> Result, IndexerError> { + let stored_txes = self.multi_get_transactions(digests).await?; + self.stored_transaction_to_transaction_block(stored_txes, options) + .await + } + + async fn multi_get_transaction_block_response_by_sequence_numbers( + &self, + tx_sequence_numbers: Vec, + options: sui_json_rpc_types::SuiTransactionBlockResponseOptions, + // Some(true) for desc, Some(false) for asc, None for undefined order + is_descending: Option, + ) -> Result, IndexerError> { + let stored_txes: Vec = self + .multi_get_transactions_with_sequence_numbers(tx_sequence_numbers, is_descending) + .await?; + self.stored_transaction_to_transaction_block(stored_txes, options) + .await + } + + pub async fn multi_get_transaction_block_response_in_blocking_task( + &self, + digests: Vec, + options: sui_json_rpc_types::SuiTransactionBlockResponseOptions, + ) -> Result, IndexerError> { + self.multi_get_transaction_block_response_in_blocking_task_impl(&digests, options) + .await + } + + pub async fn get_transaction_events( + &self, + digest: TransactionDigest, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + // Use the tx_digests lookup table for the corresponding tx_sequence_number, and then fetch + // event-relevant data from the entry on the transactions table. + let (timestamp_ms, serialized_events) = transactions::table + .filter( + transactions::tx_sequence_number + .nullable() + .eq(tx_digests::table + .select(tx_digests::tx_sequence_number) + .filter(tx_digests::tx_digest.eq(digest.into_inner().to_vec())) + .single_value()), + ) + .select((transactions::timestamp_ms, transactions::events)) + .first::<(i64, StoredTransactionEvents)>(&mut connection) + .await?; + + let events = stored_events_to_events(serialized_events)?; + let tx_events = TransactionEvents { data: events }; + + let sui_tx_events = tx_events_to_sui_tx_events( + tx_events, + self.package_resolver(), + digest, + timestamp_ms as u64, + ) + .await?; + Ok(sui_tx_events.map_or(vec![], |ste| ste.data)) + } + + async fn query_events_by_tx_digest( + &self, + tx_digest: TransactionDigest, + cursor: Option, + cursor_tx_seq: i64, + limit: usize, + descending_order: bool, + ) -> IndexerResult> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let mut query = events::table.into_boxed(); + + if let Some(cursor) = cursor { + if cursor.tx_digest != tx_digest { + return Err(IndexerError::InvalidArgumentError( + "Cursor tx_digest does not match the tx_digest in the query.".into(), + )); + } + if descending_order { + query = query.filter(events::event_sequence_number.lt(cursor.event_seq as i64)); + } else { + query = query.filter(events::event_sequence_number.gt(cursor.event_seq as i64)); + } + } else if descending_order { + query = query.filter(events::event_sequence_number.le(i64::MAX)); + } else { + query = query.filter(events::event_sequence_number.ge(0)); + }; + + if descending_order { + query = query.order(events::event_sequence_number.desc()); + } else { + query = query.order(events::event_sequence_number.asc()); + } + + // If the cursor is provided and matches tx_digest, we've already fetched the + // tx_sequence_number and can query events table directly. Otherwise, we can just consult + // the tx_digests table for the tx_sequence_number to key into events table. + if cursor.is_some() { + query = query.filter(events::tx_sequence_number.eq(cursor_tx_seq)); + } else { + query = query.filter( + events::tx_sequence_number.nullable().eq(tx_digests::table + .select(tx_digests::tx_sequence_number) + .filter(tx_digests::tx_digest.eq(tx_digest.into_inner().to_vec())) + .single_value()), + ); + } + + let stored_events = query + .limit(limit as i64) + .load::(&mut connection) + .await?; + + let mut sui_event_futures = vec![]; + for stored_event in stored_events { + sui_event_futures.push(tokio::task::spawn( + stored_event.try_into_sui_event(self.package_resolver.clone()), + )); + } + + let sui_events = futures::future::join_all(sui_event_futures) + .await + .into_iter() + .collect::, _>>() + .tap_err(|e| error!("Failed to join sui event futures: {}", e))? + .into_iter() + .collect::, _>>() + .tap_err(|e| error!("Failed to collect sui event futures: {}", e))?; + Ok(sui_events) + } + + pub async fn query_events( + &self, + filter: EventFilter, + cursor: Option, + limit: usize, + descending_order: bool, + ) -> IndexerResult> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let (tx_seq, event_seq) = if let Some(cursor) = cursor { + let EventID { + tx_digest, + event_seq, + } = cursor; + let tx_seq = transactions::table + .select(transactions::tx_sequence_number) + .filter( + transactions::tx_sequence_number + .nullable() + .eq(tx_digests::table + .select(tx_digests::tx_sequence_number) + .filter(tx_digests::tx_digest.eq(tx_digest.into_inner().to_vec())) + .single_value()), + ) + .first::(&mut connection) + .await?; + (tx_seq, event_seq as i64) + } else if descending_order { + (i64::MAX, i64::MAX) + } else { + (-1, 0) + }; + + let query = if let EventFilter::Sender(sender) = &filter { + // Need to remove ambiguities for tx_sequence_number column + let cursor_clause = if descending_order { + format!("(e.{TX_SEQUENCE_NUMBER_STR} < {} OR (e.{TX_SEQUENCE_NUMBER_STR} = {} AND e.{EVENT_SEQUENCE_NUMBER_STR} < {}))", tx_seq, tx_seq, event_seq) + } else { + format!("(e.{TX_SEQUENCE_NUMBER_STR} > {} OR (e.{TX_SEQUENCE_NUMBER_STR} = {} AND e.{EVENT_SEQUENCE_NUMBER_STR} > {}))", tx_seq, tx_seq, event_seq) + }; + let order_clause = if descending_order { + format!("e.{TX_SEQUENCE_NUMBER_STR} DESC, e.{EVENT_SEQUENCE_NUMBER_STR} DESC") + } else { + format!("e.{TX_SEQUENCE_NUMBER_STR} ASC, e.{EVENT_SEQUENCE_NUMBER_STR} ASC") + }; + format!( + "( \ + SELECT * + FROM event_senders s + JOIN events e + USING (tx_sequence_number, event_sequence_number) + WHERE s.sender = '\\x{}'::bytea AND {} \ + ORDER BY {} \ + LIMIT {} + )", + Hex::encode(sender.to_vec()), + cursor_clause, + order_clause, + limit, + ) + } else if let EventFilter::Transaction(tx_digest) = filter { + return self + .query_events_by_tx_digest(tx_digest, cursor, tx_seq, limit, descending_order) + .await; + } else { + let main_where_clause = match filter { + EventFilter::All([]) => { + // No filter + "1 = 1".to_string() + } + EventFilter::MoveModule { package, module } => { + format!( + "package = '\\x{}'::bytea AND module = '{}'", + package.to_hex(), + module, + ) + } + EventFilter::MoveEventType(struct_tag) => { + format!("event_type = '{}'", struct_tag) + } + EventFilter::MoveEventModule { package, module } => { + let package_module_prefix = format!("{}::{}", package.to_hex_literal(), module); + format!("event_type LIKE '{package_module_prefix}::%'") + } + EventFilter::Sender(_) => { + // Processed above + unreachable!() + } + EventFilter::Transaction(_) => { + // Processed above + unreachable!() + } + EventFilter::TimeRange { .. } | EventFilter::Any(_) => { + return Err(IndexerError::NotSupportedError( + "This type of EventFilter is not supported.".to_owned(), + )); + } + }; + + let cursor_clause = if descending_order { + format!("AND ({TX_SEQUENCE_NUMBER_STR} < {} OR ({TX_SEQUENCE_NUMBER_STR} = {} AND {EVENT_SEQUENCE_NUMBER_STR} < {}))", tx_seq, tx_seq, event_seq) + } else { + format!("AND ({TX_SEQUENCE_NUMBER_STR} > {} OR ({TX_SEQUENCE_NUMBER_STR} = {} AND {EVENT_SEQUENCE_NUMBER_STR} > {}))", tx_seq, tx_seq, event_seq) + }; + let order_clause = if descending_order { + format!("{TX_SEQUENCE_NUMBER_STR} DESC, {EVENT_SEQUENCE_NUMBER_STR} DESC") + } else { + format!("{TX_SEQUENCE_NUMBER_STR} ASC, {EVENT_SEQUENCE_NUMBER_STR} ASC") + }; + + format!( + " + SELECT * FROM events \ + WHERE {} {} \ + ORDER BY {} \ + LIMIT {} + ", + main_where_clause, cursor_clause, order_clause, limit, + ) + }; + debug!("query events: {}", query); + let stored_events = diesel::sql_query(query) + .load::(&mut connection) + .await?; + + let mut sui_event_futures = vec![]; + for stored_event in stored_events { + sui_event_futures.push(tokio::task::spawn( + stored_event.try_into_sui_event(self.package_resolver.clone()), + )); + } + + let sui_events = futures::future::join_all(sui_event_futures) + .await + .into_iter() + .collect::, _>>() + .tap_err(|e| error!("Failed to join sui event futures: {}", e))? + .into_iter() + .collect::, _>>() + .tap_err(|e| error!("Failed to collect sui event futures: {}", e))?; + Ok(sui_events) + } + + pub async fn get_dynamic_fields( + &self, + parent_object_id: ObjectID, + cursor: Option, + limit: usize, + ) -> Result, IndexerError> { + let stored_objects = self + .get_dynamic_fields_raw(parent_object_id, cursor, limit) + .await?; + let mut df_futures = vec![]; + let indexer_reader_arc = Arc::new(self.clone()); + for stored_object in stored_objects { + let indexer_reader_arc_clone = Arc::clone(&indexer_reader_arc); + df_futures.push(tokio::task::spawn(async move { + indexer_reader_arc_clone + .try_create_dynamic_field_info(stored_object) + .await + })); + } + let df_infos = futures::future::join_all(df_futures) + .await + .into_iter() + .collect::, _>>() + .tap_err(|e| error!("Error joining DF futures: {:?}", e))? + .into_iter() + .collect::, _>>() + .tap_err(|e| error!("Error calling try_create_dynamic_field_info: {:?}", e))? + .into_iter() + .flatten() + .collect::>(); + Ok(df_infos) + } + + pub async fn get_dynamic_fields_raw( + &self, + parent_object_id: ObjectID, + cursor: Option, + limit: usize, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let mut query = objects::table + .filter(objects::owner_type.eq(OwnerType::Object as i16)) + .filter(objects::owner_id.eq(parent_object_id.to_vec())) + .order(objects::object_id.asc()) + .limit(limit as i64) + .into_boxed(); + + if let Some(object_cursor) = cursor { + query = query.filter(objects::object_id.gt(object_cursor.to_vec())); + } + + query + .load::(&mut connection) + .await + .map_err(Into::into) + } + + async fn try_create_dynamic_field_info( + &self, + stored_object: StoredObject, + ) -> Result, IndexerError> { + if stored_object.df_kind.is_none() { + return Ok(None); + } + + let object: Object = stored_object.try_into()?; + let move_object = match object.data.try_as_move().cloned() { + Some(move_object) => move_object, + None => { + return Err(IndexerError::ResolveMoveStructError( + "Object is not a MoveObject".to_string(), + )); + } + }; + let type_tag: TypeTag = move_object.type_().clone().into(); + let layout = self + .package_resolver + .type_layout(type_tag.clone()) + .await + .map_err(|e| { + IndexerError::ResolveMoveStructError(format!( + "Failed to get type layout for type {}: {e}", + type_tag.to_canonical_display(/* with_prefix */ true), + )) + })?; + + let field = DFV::FieldVisitor::deserialize(move_object.contents(), &layout) + .tap_err(|e| warn!("{e}"))?; + + let type_ = field.kind; + let name_type: TypeTag = field.name_layout.into(); + let bcs_name = field.name_bytes.to_owned(); + + let name_value = BoundedVisitor::deserialize_value(field.name_bytes, field.name_layout) + .tap_err(|e| warn!("{e}"))?; + + let name = DynamicFieldName { + type_: name_type, + value: SuiMoveValue::from(name_value).to_json_value(), + }; + + let value_metadata = field.value_metadata().map_err(|e| { + warn!("{e}"); + IndexerError::UncategorizedError(anyhow!(e)) + })?; + + Ok(Some(match value_metadata { + DFV::ValueMetadata::DynamicField(object_type) => DynamicFieldInfo { + name, + bcs_name, + type_, + object_type: object_type.to_canonical_string(/* with_prefix */ true), + object_id: object.id(), + version: object.version(), + digest: object.digest(), + }, + + DFV::ValueMetadata::DynamicObjectField(object_id) => { + let object = self.get_object(&object_id, None).await?.ok_or_else(|| { + IndexerError::UncategorizedError(anyhow!( + "Failed to find object_id {} when trying to create dynamic field info", + object_id.to_canonical_display(/* with_prefix */ true), + )) + })?; + + let object_type = object.data.type_().unwrap().clone(); + DynamicFieldInfo { + name, + bcs_name, + type_, + object_type: object_type.to_canonical_string(/* with_prefix */ true), + object_id, + version: object.version(), + digest: object.digest(), + } + } + })) + } + + pub async fn bcs_name_from_dynamic_field_name( + &self, + name: &DynamicFieldName, + ) -> Result, IndexerError> { + let move_type_layout = self + .package_resolver() + .type_layout(name.type_.clone()) + .await + .map_err(|e| { + IndexerError::ResolveMoveStructError(format!( + "Failed to get type layout for type {}: {}", + name.type_, e + )) + })?; + let sui_json_value = sui_json::SuiJsonValue::new(name.value.clone())?; + let name_bcs_value = sui_json_value.to_bcs_bytes(&move_type_layout)?; + Ok(name_bcs_value) + } + + async fn get_display_object_by_type( + &self, + object_type: &move_core_types::language_storage::StructTag, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let object_type = object_type.to_canonical_string(/* with_prefix */ true); + let stored_display = display::table + .filter(display::object_type.eq(object_type)) + .first::(&mut connection) + .await + .optional()?; + + let stored_display = match stored_display { + Some(display) => display, + None => return Ok(None), + }; + + let display_update = stored_display.to_display_update_event()?; + + Ok(Some(display_update)) + } + + pub async fn get_owned_coins( + &self, + owner: SuiAddress, + // If coin_type is None, look for all coins. + coin_type: Option, + cursor: ObjectID, + limit: usize, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + let mut query = objects::dsl::objects + .filter(objects::dsl::owner_type.eq(OwnerType::Address as i16)) + .filter(objects::dsl::owner_id.eq(owner.to_vec())) + .filter(objects::dsl::object_id.gt(cursor.to_vec())) + .into_boxed(); + if let Some(coin_type) = coin_type { + query = query.filter(objects::dsl::coin_type.eq(Some(coin_type))); + } else { + query = query.filter(objects::dsl::coin_type.is_not_null()); + } + + query + .order((objects::dsl::coin_type.asc(), objects::dsl::object_id.asc())) + .limit(limit as i64) + .load::(&mut connection) + .await? + .into_iter() + .map(|o| o.try_into()) + .collect::>>() + } + + pub async fn get_coin_balances( + &self, + owner: SuiAddress, + // If coin_type is None, look for all coins. + coin_type: Option, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let coin_type_filter = if let Some(coin_type) = coin_type { + format!("= '{}'", coin_type) + } else { + "IS NOT NULL".to_string() + }; + // Note: important to cast to BIGINT to avoid deserialize confusion + let query = format!( + " + SELECT coin_type, \ + CAST(COUNT(*) AS BIGINT) AS coin_num, \ + CAST(SUM(coin_balance) AS BIGINT) AS coin_balance \ + FROM objects \ + WHERE owner_type = {} \ + AND owner_id = '\\x{}'::BYTEA \ + AND coin_type {} \ + GROUP BY coin_type \ + ORDER BY coin_type ASC + ", + OwnerType::Address as i16, + Hex::encode(owner.to_vec()), + coin_type_filter, + ); + + debug!("get coin balances query: {query}"); + diesel::sql_query(query) + .load::(&mut connection) + .await? + .into_iter() + .map(|cb| cb.try_into()) + .collect::>>() + } + + pub(crate) async fn get_display_fields( + &self, + original_object: &sui_types::object::Object, + original_layout: &Option, + ) -> Result { + let (object_type, layout) = if let Some((object_type, layout)) = + sui_json_rpc::read_api::get_object_type_and_struct(original_object, original_layout) + .map_err(|e| IndexerError::GenericError(e.to_string()))? + { + (object_type, layout) + } else { + return Ok(DisplayFieldsResponse { + data: None, + error: None, + }); + }; + + if let Some(display_object) = self.get_display_object_by_type(&object_type).await? { + return sui_json_rpc::read_api::get_rendered_fields(display_object.fields, &layout) + .map_err(|e| IndexerError::GenericError(e.to_string())); + } + Ok(DisplayFieldsResponse { + data: None, + error: None, + }) + } + + pub async fn get_singleton_object(&self, type_: &StructTag) -> Result> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let object = match objects::table + .filter(objects::object_type_package.eq(type_.address.to_vec())) + .filter(objects::object_type_module.eq(type_.module.to_string())) + .filter(objects::object_type_name.eq(type_.name.to_string())) + .filter(objects::object_type.eq(type_.to_canonical_string(/* with_prefix */ true))) + .first::(&mut connection) + .await + .optional()? + { + Some(object) => object, + None => return Ok(None), + } + .try_into()?; + + Ok(Some(object)) + } + + pub async fn get_coin_metadata( + &self, + coin_struct: StructTag, + ) -> Result, IndexerError> { + let coin_metadata_type = CoinMetadata::type_(coin_struct); + + self.get_singleton_object(&coin_metadata_type) + .await? + .and_then(|o| SuiCoinMetadata::try_from(o).ok()) + .pipe(Ok) + } + + pub async fn get_total_supply(&self, coin_struct: StructTag) -> Result { + let treasury_cap_type = TreasuryCap::type_(coin_struct); + + self.get_singleton_object(&treasury_cap_type) + .await? + .and_then(|o| TreasuryCap::try_from(o).ok()) + .ok_or(IndexerError::GenericError(format!( + "Cannot find treasury cap object with type {}", + treasury_cap_type + )))? + .total_supply + .pipe(Ok) + } + + pub fn package_resolver(&self) -> PackageResolver { + self.package_resolver.clone() + } +} + +// NOTE: Do not make this public and easily accessible as we need to be careful that it is only +// used in non-async contexts via the use of tokio::task::spawn_blocking in order to avoid blocking +// the async runtime. +// +// Maybe we should look into introducing an async object store trait... +struct ConnectionAsObjectStore { + inner: std::sync::Mutex< + diesel_async::async_connection_wrapper::AsyncConnectionWrapper< + crate::database::Connection<'static>, + >, + >, +} + +impl ConnectionAsObjectStore { + async fn from_pool( + pool: &ConnectionPool, + ) -> Result { + let connection = std::sync::Mutex::new(pool.dedicated_connection().await?.into()); + + Ok(Self { inner: connection }) + } + + fn get_object_from_db( + &self, + object_id: &ObjectID, + version: Option, + ) -> Result, IndexerError> { + use diesel::RunQueryDsl; + + let mut guard = self.inner.lock().unwrap(); + let connection: &mut diesel_async::async_connection_wrapper::AsyncConnectionWrapper<_> = + &mut guard; + + let mut query = objects::table + .filter(objects::object_id.eq(object_id.to_vec())) + .into_boxed(); + if let Some(version) = version { + query = query.filter(objects::object_version.eq(version.value() as i64)) + } + + query + .first::(connection) + .optional() + .map_err(Into::into) + } + + fn get_object( + &self, + object_id: &ObjectID, + version: Option, + ) -> Result, IndexerError> { + let Some(stored_package) = self.get_object_from_db(object_id, version)? else { + return Ok(None); + }; + + let object = stored_package.try_into()?; + Ok(Some(object)) + } +} + +impl sui_types::storage::ObjectStore for ConnectionAsObjectStore { + fn get_object( + &self, + object_id: &ObjectID, + ) -> Result, sui_types::storage::error::Error> { + self.get_object(object_id, None) + .map_err(sui_types::storage::error::Error::custom) + } + + fn get_object_by_key( + &self, + object_id: &ObjectID, + version: sui_types::base_types::VersionNumber, + ) -> Result, sui_types::storage::error::Error> { + self.get_object(object_id, Some(version)) + .map_err(sui_types::storage::error::Error::custom) + } +} diff --git a/crates/sui-mvr-indexer/src/lib.rs b/crates/sui-mvr-indexer/src/lib.rs new file mode 100644 index 0000000000000..f40b0fdfcfb8a --- /dev/null +++ b/crates/sui-mvr-indexer/src/lib.rs @@ -0,0 +1,97 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +#![recursion_limit = "256"] + +use std::time::Duration; + +use anyhow::Result; +use config::JsonRpcConfig; +use jsonrpsee::http_client::{HeaderMap, HeaderValue, HttpClient, HttpClientBuilder}; +use metrics::IndexerMetrics; +use mysten_metrics::spawn_monitored_task; +use prometheus::Registry; +use system_package_task::SystemPackageTask; +use tokio_util::sync::CancellationToken; +use tracing::warn; + +use sui_json_rpc::ServerType; +use sui_json_rpc::{JsonRpcServerBuilder, ServerHandle}; +use sui_json_rpc_api::CLIENT_SDK_TYPE_HEADER; + +use crate::apis::{ + CoinReadApi, ExtendedApi, GovernanceReadApi, IndexerApi, MoveUtilsApi, ReadApi, + TransactionBuilderApi, WriteApi, +}; +use crate::indexer_reader::IndexerReader; +use errors::IndexerError; + +pub mod apis; +pub mod backfill; +pub mod benchmark; +pub mod config; +pub mod database; +pub mod db; +pub mod errors; +pub mod handlers; +pub mod indexer; +pub mod indexer_reader; +pub mod metrics; +pub mod models; +pub mod restorer; +pub mod schema; +pub mod store; +pub mod system_package_task; +pub mod tempdb; +pub mod test_utils; +pub mod types; + +pub async fn build_json_rpc_server( + prometheus_registry: &Registry, + reader: IndexerReader, + config: &JsonRpcConfig, + cancel: CancellationToken, +) -> Result { + let mut builder = + JsonRpcServerBuilder::new(env!("CARGO_PKG_VERSION"), prometheus_registry, None, None); + let http_client = crate::get_http_client(&config.rpc_client_url)?; + + builder.register_module(WriteApi::new(http_client.clone()))?; + builder.register_module(IndexerApi::new( + reader.clone(), + config.name_service_options.to_config(), + ))?; + builder.register_module(TransactionBuilderApi::new(reader.clone()))?; + builder.register_module(MoveUtilsApi::new(reader.clone()))?; + builder.register_module(GovernanceReadApi::new(reader.clone()))?; + builder.register_module(ReadApi::new(reader.clone()))?; + builder.register_module(CoinReadApi::new(reader.clone()))?; + builder.register_module(ExtendedApi::new(reader.clone()))?; + + let system_package_task = + SystemPackageTask::new(reader.clone(), cancel.clone(), Duration::from_secs(10)); + + tracing::info!("Starting system package task"); + spawn_monitored_task!(async move { system_package_task.run().await }); + + Ok(builder + .start(config.rpc_address, None, ServerType::Http, Some(cancel)) + .await?) +} + +fn get_http_client(rpc_client_url: &str) -> Result { + let mut headers = HeaderMap::new(); + headers.insert(CLIENT_SDK_TYPE_HEADER, HeaderValue::from_static("indexer")); + + HttpClientBuilder::default() + .max_request_body_size(2 << 30) + .max_concurrent_requests(usize::MAX) + .set_headers(headers.clone()) + .build(rpc_client_url) + .map_err(|e| { + warn!("Failed to get new Http client with error: {:?}", e); + IndexerError::HttpClientInitError(format!( + "Failed to initialize fullnode RPC client with error: {:?}", + e + )) + }) +} diff --git a/crates/sui-mvr-indexer/src/main.rs b/crates/sui-mvr-indexer/src/main.rs new file mode 100644 index 0000000000000..703ac457398a1 --- /dev/null +++ b/crates/sui-mvr-indexer/src/main.rs @@ -0,0 +1,117 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use clap::Parser; +use sui_mvr_indexer::backfill::backfill_runner::BackfillRunner; +use sui_mvr_indexer::benchmark::run_indexer_benchmark; +use sui_mvr_indexer::config::{Command, UploadOptions}; +use sui_mvr_indexer::database::ConnectionPool; +use sui_mvr_indexer::db::setup_postgres::clear_database; +use sui_mvr_indexer::db::{ + check_db_migration_consistency, check_prunable_tables_valid, reset_database, run_migrations, +}; +use sui_mvr_indexer::indexer::Indexer; +use sui_mvr_indexer::metrics::{ + spawn_connection_pool_metric_collector, start_prometheus_server, IndexerMetrics, +}; +use sui_mvr_indexer::restorer::formal_snapshot::IndexerFormalSnapshotRestorer; +use sui_mvr_indexer::store::PgIndexerStore; +use tokio_util::sync::CancellationToken; +use tracing::warn; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let opts = sui_mvr_indexer::config::IndexerConfig::parse(); + + // NOTE: this is to print out tracing like info, warn & error. + let _guard = telemetry_subscribers::TelemetryConfig::new() + .with_env() + .init(); + warn!("WARNING: Sui indexer is still experimental and we expect occasional breaking changes that require backfills."); + + let (_registry_service, registry) = start_prometheus_server(opts.metrics_address)?; + mysten_metrics::init_metrics(®istry); + let indexer_metrics = IndexerMetrics::new(®istry); + + let pool = ConnectionPool::new( + opts.database_url.clone(), + opts.connection_pool_config.clone(), + ) + .await?; + spawn_connection_pool_metric_collector(indexer_metrics.clone(), pool.clone()); + + match opts.command { + Command::Indexer { + ingestion_config, + snapshot_config, + pruning_options, + upload_options, + } => { + // Make sure to run all migrations on startup, and also serve as a compatibility check. + run_migrations(pool.dedicated_connection().await?).await?; + let retention_config = pruning_options.load_from_file(); + if retention_config.is_some() { + check_prunable_tables_valid(&mut pool.get().await?).await?; + } + + let store = PgIndexerStore::new(pool, upload_options, indexer_metrics.clone()); + + Indexer::start_writer( + ingestion_config, + store, + indexer_metrics, + snapshot_config, + retention_config, + CancellationToken::new(), + None, + ) + .await?; + } + Command::JsonRpcService(json_rpc_config) => { + check_db_migration_consistency(&mut pool.get().await?).await?; + + Indexer::start_reader(&json_rpc_config, ®istry, pool, CancellationToken::new()) + .await?; + } + Command::ResetDatabase { + force, + skip_migrations, + } => { + if !force { + return Err(anyhow::anyhow!( + "Resetting the DB requires use of the `--force` flag", + )); + } + + if skip_migrations { + clear_database(&mut pool.dedicated_connection().await?).await?; + } else { + reset_database(pool.dedicated_connection().await?).await?; + } + } + Command::RunMigrations => { + run_migrations(pool.dedicated_connection().await?).await?; + } + Command::RunBackFill { + start, + end, + runner_kind, + backfill_config, + } => { + let total_range = start..=end; + BackfillRunner::run(runner_kind, pool, backfill_config, total_range).await; + } + Command::Restore(restore_config) => { + let store = + PgIndexerStore::new(pool, UploadOptions::default(), indexer_metrics.clone()); + let mut formal_restorer = + IndexerFormalSnapshotRestorer::new(store, restore_config).await?; + formal_restorer.restore().await?; + } + Command::Benchmark(benchmark_config) => { + run_indexer_benchmark(benchmark_config, pool, indexer_metrics).await; + } + } + + Ok(()) +} diff --git a/crates/sui-mvr-indexer/src/metrics.rs b/crates/sui-mvr-indexer/src/metrics.rs new file mode 100644 index 0000000000000..0b1f8c1e5bed5 --- /dev/null +++ b/crates/sui-mvr-indexer/src/metrics.rs @@ -0,0 +1,813 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use axum::{extract::Extension, http::StatusCode, routing::get, Router}; +use mysten_metrics::RegistryService; +use prometheus::{ + register_histogram_with_registry, register_int_counter_with_registry, + register_int_gauge_with_registry, Histogram, IntCounter, IntGauge, +}; +use prometheus::{Registry, TextEncoder}; +use std::net::SocketAddr; +use tracing::info; + +const METRICS_ROUTE: &str = "/metrics"; + +pub fn start_prometheus_server( + addr: SocketAddr, +) -> Result<(RegistryService, Registry), anyhow::Error> { + info!(address =% addr, "Starting prometheus server"); + let registry = Registry::new_custom(Some("indexer".to_string()), None)?; + let registry_service = RegistryService::new(registry.clone()); + + let app = Router::new() + .route(METRICS_ROUTE, get(metrics)) + .layer(Extension(registry_service.clone())); + + tokio::spawn(async move { + let listener = tokio::net::TcpListener::bind(&addr).await.unwrap(); + axum::serve(listener, app).await.unwrap(); + }); + Ok((registry_service, registry)) +} + +async fn metrics(Extension(registry_service): Extension) -> (StatusCode, String) { + let metrics_families = registry_service.gather_all(); + match TextEncoder.encode_to_string(&metrics_families) { + Ok(metrics) => (StatusCode::OK, metrics), + Err(error) => ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("unable to encode metrics: {error}"), + ), + } +} + +/// NOTE: for various data ingestion steps, which are expected to be within [0.001, 100] seconds, +/// and high double digits usually means something is broken. +const DATA_INGESTION_LATENCY_SEC_BUCKETS: &[f64] = &[ + 0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, +]; +/// NOTE: for objects_snapshot update and advance_epoch, which are expected to be within [0.1, 100] seconds, +/// and can go up to high hundreds of seconds when things go wrong. +const DB_UPDATE_QUERY_LATENCY_SEC_BUCKETS: &[f64] = &[ + 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0, 2000.0, 5000.0, + 10000.0, +]; +/// NOTE: for json_rpc calls, which are expected to be within [0.01, 100] seconds, +/// high hundreds of seconds usually means something is broken. +const JSON_RPC_LATENCY_SEC_BUCKETS: &[f64] = &[ + 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0, +]; + +#[derive(Clone)] +pub struct IndexerMetrics { + pub total_checkpoint_received: IntCounter, + pub total_tx_checkpoint_committed: IntCounter, + pub total_object_checkpoint_committed: IntCounter, + pub total_transaction_committed: IntCounter, + pub total_object_change_committed: IntCounter, + pub total_transaction_chunk_committed: IntCounter, + pub total_object_change_chunk_committed: IntCounter, + pub total_epoch_committed: IntCounter, + pub latest_fullnode_checkpoint_sequence_number: IntGauge, + pub latest_tx_checkpoint_sequence_number: IntGauge, + pub latest_indexer_object_checkpoint_sequence_number: IntGauge, + pub latest_object_snapshot_sequence_number: IntGauge, + // max checkpoint sequence numbers on various stages of indexer data ingestion + pub max_downloaded_checkpoint_sequence_number: IntGauge, + pub max_indexed_checkpoint_sequence_number: IntGauge, + pub max_committed_checkpoint_sequence_number: IntGauge, + // the related timestamps of max checkpoint ^ on various stages + pub downloaded_checkpoint_timestamp_ms: IntGauge, + pub indexed_checkpoint_timestamp_ms: IntGauge, + pub committed_checkpoint_timestamp_ms: IntGauge, + // lag starting from the timestamp of the latest checkpoint to the current time + pub download_lag_ms: IntGauge, + pub index_lag_ms: IntGauge, + pub db_commit_lag_ms: IntGauge, + // latencies of various steps of data ingestion. + // checkpoint E2E latency is: fullnode_download_latency + checkpoint_index_latency + db_commit_latency + pub checkpoint_download_bytes_size: IntGauge, + pub tokio_blocking_task_wait_latency: Histogram, + pub fullnode_checkpoint_data_download_latency: Histogram, + pub fullnode_checkpoint_wait_and_download_latency: Histogram, + pub fullnode_transaction_download_latency: Histogram, + pub fullnode_object_download_latency: Histogram, + pub checkpoint_index_latency: Histogram, + pub indexing_batch_size: IntGauge, + pub indexing_tx_object_changes_latency: Histogram, + pub indexing_objects_latency: Histogram, + pub indexing_get_object_in_mem_hit: IntCounter, + pub indexing_get_object_db_hit: IntCounter, + pub indexing_module_resolver_in_mem_hit: IntCounter, + pub indexing_package_resolver_in_mem_hit: IntCounter, + pub indexing_packages_latency: Histogram, + pub checkpoint_objects_index_latency: Histogram, + pub checkpoint_db_commit_latency: Histogram, + pub checkpoint_db_commit_latency_step_1: Histogram, + pub checkpoint_db_commit_latency_transactions: Histogram, + pub checkpoint_db_commit_latency_transactions_chunks: Histogram, + pub checkpoint_db_commit_latency_transactions_chunks_transformation: Histogram, + pub checkpoint_db_commit_latency_objects: Histogram, + pub checkpoint_db_commit_latency_objects_snapshot: Histogram, + pub checkpoint_db_commit_latency_objects_version: Histogram, + pub checkpoint_db_commit_latency_objects_history: Histogram, + pub checkpoint_db_commit_latency_full_objects_history: Histogram, + pub checkpoint_db_commit_latency_objects_chunks: Histogram, + pub checkpoint_db_commit_latency_objects_snapshot_chunks: Histogram, + pub checkpoint_db_commit_latency_objects_version_chunks: Histogram, + pub checkpoint_db_commit_latency_objects_history_chunks: Histogram, + pub checkpoint_db_commit_latency_full_objects_history_chunks: Histogram, + pub checkpoint_db_commit_latency_events: Histogram, + pub checkpoint_db_commit_latency_events_chunks: Histogram, + pub checkpoint_db_commit_latency_event_indices: Histogram, + pub checkpoint_db_commit_latency_event_indices_chunks: Histogram, + pub checkpoint_db_commit_latency_packages: Histogram, + pub checkpoint_db_commit_latency_tx_indices: Histogram, + pub checkpoint_db_commit_latency_tx_indices_chunks: Histogram, + pub checkpoint_db_commit_latency_checkpoints: Histogram, + pub checkpoint_db_commit_latency_epoch: Histogram, + pub checkpoint_db_commit_latency_watermarks: Histogram, + pub thousand_transaction_avg_db_commit_latency: Histogram, + pub object_db_commit_latency: Histogram, + pub object_mutation_db_commit_latency: Histogram, + pub object_deletion_db_commit_latency: Histogram, + pub epoch_db_commit_latency: Histogram, + // latencies of slow DB update queries, now only advance epoch and objects_snapshot update + pub advance_epoch_latency: Histogram, + // latencies of RPC endpoints in read.rs + pub get_transaction_block_latency: Histogram, + pub multi_get_transaction_blocks_latency: Histogram, + pub get_object_latency: Histogram, + pub multi_get_objects_latency: Histogram, + pub try_get_past_object_latency: Histogram, + pub try_multi_get_past_objects_latency: Histogram, + pub get_checkpoint_latency: Histogram, + pub get_checkpoints_latency: Histogram, + pub get_events_latency: Histogram, + pub get_loaded_child_objects_latency: Histogram, + pub get_total_transaction_blocks_latency: Histogram, + pub get_latest_checkpoint_sequence_number_latency: Histogram, + // latencies of RPC endpoints in indexer.rs + pub get_owned_objects_latency: Histogram, + pub query_transaction_blocks_latency: Histogram, + pub query_events_latency: Histogram, + pub get_dynamic_fields_latency: Histogram, + pub get_dynamic_field_object_latency: Histogram, + pub get_protocol_config_latency: Histogram, + // latency of event websocket subscription + pub subscription_process_latency: Histogram, + pub transaction_per_checkpoint: Histogram, + // indexer state metrics + pub db_conn_pool_size: IntGauge, + pub idle_db_conn: IntGauge, + pub address_processor_failure: IntCounter, + pub checkpoint_metrics_processor_failure: IntCounter, + // pruner metrics + pub last_pruned_epoch: IntGauge, + pub last_pruned_checkpoint: IntGauge, + pub last_pruned_transaction: IntGauge, + pub epoch_pruning_latency: Histogram, +} + +impl IndexerMetrics { + pub fn new(registry: &Registry) -> Self { + Self { + total_checkpoint_received: register_int_counter_with_registry!( + "total_checkpoint_received", + "Total number of checkpoint received", + registry, + ) + .unwrap(), + total_tx_checkpoint_committed: register_int_counter_with_registry!( + "total_checkpoint_committed", + "Total number of checkpoint committed", + registry, + ) + .unwrap(), + total_object_checkpoint_committed: register_int_counter_with_registry!( + "total_object_checkpoint_committed", + "Total number of object checkpoint committed", + registry, + ) + .unwrap(), + total_transaction_committed: register_int_counter_with_registry!( + "total_transaction_committed", + "Total number of transaction committed", + registry, + ) + .unwrap(), + total_object_change_committed: register_int_counter_with_registry!( + "total_object_change_committed", + "Total number of object change committed", + registry, + ) + .unwrap(), + total_transaction_chunk_committed: register_int_counter_with_registry!( + "total_transaction_chunk_committed", + "Total number of transaction chunk committed", + registry, + ) + .unwrap(), + total_object_change_chunk_committed: register_int_counter_with_registry!( + "total_object_change_chunk_committed", + "Total number of object change chunk committed", + registry, + ) + .unwrap(), + total_epoch_committed: register_int_counter_with_registry!( + "total_epoch_committed", + "Total number of epoch committed", + registry, + ) + .unwrap(), + latest_fullnode_checkpoint_sequence_number: register_int_gauge_with_registry!( + "latest_fullnode_checkpoint_sequence_number", + "Latest checkpoint sequence number from the Full Node", + registry, + ) + .unwrap(), + latest_tx_checkpoint_sequence_number: register_int_gauge_with_registry!( + "latest_indexer_checkpoint_sequence_number", + "Latest checkpoint sequence number from the Indexer", + registry, + ) + .unwrap(), + latest_indexer_object_checkpoint_sequence_number: register_int_gauge_with_registry!( + "latest_indexer_object_checkpoint_sequence_number", + "Latest object checkpoint sequence number from the Indexer", + registry, + ) + .unwrap(), + latest_object_snapshot_sequence_number: register_int_gauge_with_registry!( + "latest_object_snapshot_sequence_number", + "Latest object snapshot sequence number from the Indexer", + registry, + ).unwrap(), + max_downloaded_checkpoint_sequence_number: register_int_gauge_with_registry!( + "max_downloaded_checkpoint_sequence_number", + "Max downloaded checkpoint sequence number", + registry, + ).unwrap(), + max_indexed_checkpoint_sequence_number: register_int_gauge_with_registry!( + "max_indexed_checkpoint_sequence_number", + "Max indexed checkpoint sequence number", + registry, + ).unwrap(), + max_committed_checkpoint_sequence_number: register_int_gauge_with_registry!( + "max_committed_checkpoint_sequence_number", + "Max committed checkpoint sequence number", + registry, + ).unwrap(), + downloaded_checkpoint_timestamp_ms: register_int_gauge_with_registry!( + "downloaded_checkpoint_timestamp_ms", + "Timestamp of the downloaded checkpoint", + registry, + ).unwrap(), + indexed_checkpoint_timestamp_ms: register_int_gauge_with_registry!( + "indexed_checkpoint_timestamp_ms", + "Timestamp of the indexed checkpoint", + registry, + ).unwrap(), + committed_checkpoint_timestamp_ms: register_int_gauge_with_registry!( + "committed_checkpoint_timestamp_ms", + "Timestamp of the committed checkpoint", + registry, + ).unwrap(), + download_lag_ms: register_int_gauge_with_registry!( + "download_lag_ms", + "Lag of the latest checkpoint in milliseconds", + registry, + ).unwrap(), + index_lag_ms: register_int_gauge_with_registry!( + "index_lag_ms", + "Lag of the latest checkpoint in milliseconds", + registry, + ).unwrap(), + db_commit_lag_ms: register_int_gauge_with_registry!( + "db_commit_lag_ms", + "Lag of the latest checkpoint in milliseconds", + registry, + ).unwrap(), + checkpoint_download_bytes_size: register_int_gauge_with_registry!( + "checkpoint_download_bytes_size", + "Size of the downloaded checkpoint in bytes", + registry, + ).unwrap(), + fullnode_checkpoint_data_download_latency: register_histogram_with_registry!( + "fullnode_checkpoint_data_download_latency", + "Time spent in downloading checkpoint and transaction for a new checkpoint from the Full Node", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + fullnode_checkpoint_wait_and_download_latency: register_histogram_with_registry!( + "fullnode_checkpoint_wait_and_download_latency", + "Time spent in waiting for a new checkpoint from the Full Node", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + + fullnode_transaction_download_latency: register_histogram_with_registry!( + "fullnode_transaction_download_latency", + "Time spent in waiting for a new transaction from the Full Node", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + fullnode_object_download_latency: register_histogram_with_registry!( + "fullnode_object_download_latency", + "Time spent in waiting for a new epoch from the Full Node", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_index_latency: register_histogram_with_registry!( + "checkpoint_index_latency", + "Time spent in indexing a checkpoint", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + indexing_batch_size: register_int_gauge_with_registry!( + "indexing_batch_size", + "Size of the indexing batch", + registry, + ).unwrap(), + indexing_tx_object_changes_latency: register_histogram_with_registry!( + "indexing_tx_object_changes_latency", + "Time spent in indexing object changes for a transaction", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + indexing_objects_latency: register_histogram_with_registry!( + "indexing_objects_latency", + "Time spent in indexing objects", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + indexing_packages_latency: register_histogram_with_registry!( + "indexing_packages_latency", + "Time spent in indexing packages", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + indexing_get_object_in_mem_hit: register_int_counter_with_registry!( + "indexing_get_object_in_mem_hit", + "Total number get object hit in mem", + registry, + ) + .unwrap(), + indexing_get_object_db_hit: register_int_counter_with_registry!( + "indexing_get_object_db_hit", + "Total number get object hit in db", + registry, + ) + .unwrap(), + indexing_module_resolver_in_mem_hit: register_int_counter_with_registry!( + "indexing_module_resolver_in_mem_hit", + "Total number module resolver hit in mem", + registry, + ) + .unwrap(), + indexing_package_resolver_in_mem_hit: register_int_counter_with_registry!( + "indexing_package_resolver_in_mem_hit", + "Total number package resolver hit in mem", + registry, + ) + .unwrap(), + checkpoint_objects_index_latency: register_histogram_with_registry!( + "checkpoint_object_index_latency", + "Time spent in indexing a checkpoint objects", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency: register_histogram_with_registry!( + "checkpoint_db_commit_latency", + "Time spent committing a checkpoint to the db", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + + checkpoint_db_commit_latency_step_1: register_histogram_with_registry!( + "checkpoint_db_commit_latency_step_1", + "Time spent committing a checkpoint to the db, step 1", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_transactions: register_histogram_with_registry!( + "checkpoint_db_commit_latency_transactions", + "Time spent committing transactions", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_transactions_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_transactions_chunks", + "Time spent committing transactions chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_transactions_chunks_transformation: register_histogram_with_registry!( + "checkpoint_db_commit_latency_transactions_transaformation", + "Time spent in transactions chunks transformation prior to commit", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_objects: register_histogram_with_registry!( + "checkpoint_db_commit_latency_objects", + "Time spent committing objects", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_objects_snapshot: register_histogram_with_registry!( + "checkpoint_db_commit_latency_objects_snapshot", + "Time spent committing objects snapshots", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_objects_version: register_histogram_with_registry!( + "checkpoint_db_commit_latency_objects_version", + "Time spent committing objects version", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ).unwrap(), + checkpoint_db_commit_latency_objects_history: register_histogram_with_registry!( + "checkpoint_db_commit_latency_objects_history", + "Time spent committing objects history", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ).unwrap(), + checkpoint_db_commit_latency_full_objects_history: register_histogram_with_registry!( + "checkpoint_db_commit_latency_full_objects_history", + "Time spent committing full objects history", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ).unwrap(), + checkpoint_db_commit_latency_objects_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_objects_chunks", + "Time spent committing objects chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_objects_snapshot_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_objects_snapshot_chunks", + "Time spent committing objects snapshot chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_objects_version_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_objects_version_chunks", + "Time spent committing objects version chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ).unwrap(), + checkpoint_db_commit_latency_objects_history_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_objects_history_chunks", + "Time spent committing objects history chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ).unwrap(), + checkpoint_db_commit_latency_full_objects_history_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_full_objects_history_chunks", + "Time spent committing full objects history chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_events: register_histogram_with_registry!( + "checkpoint_db_commit_latency_events", + "Time spent committing events", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_events_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_events_chunks", + "Time spent committing events chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_event_indices: register_histogram_with_registry!( + "checkpoint_db_commit_latency_event_indices", + "Time spent committing event indices", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_event_indices_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_event_indices_chunks", + "Time spent committing event indices chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_packages: register_histogram_with_registry!( + "checkpoint_db_commit_latency_packages", + "Time spent committing packages", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_tx_indices: register_histogram_with_registry!( + "checkpoint_db_commit_latency_tx_indices", + "Time spent committing tx indices", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_tx_indices_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_tx_indices_chunks", + "Time spent committing tx_indices chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_checkpoints: register_histogram_with_registry!( + "checkpoint_db_commit_latency_checkpoints", + "Time spent committing checkpoints", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_epoch: register_histogram_with_registry!( + "checkpoint_db_commit_latency_epochs", + "Time spent committing epochs", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_watermarks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_watermarks", + "Time spent committing watermarks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + tokio_blocking_task_wait_latency: register_histogram_with_registry!( + "tokio_blocking_task_wait_latency", + "Time spent to wait for tokio blocking task pool", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ).unwrap(), + thousand_transaction_avg_db_commit_latency: register_histogram_with_registry!( + "transaction_db_commit_latency", + "Average time spent committing 1000 transactions to the db", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + object_db_commit_latency: register_histogram_with_registry!( + "object_db_commit_latency", + "Time spent committing a object to the db", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + object_mutation_db_commit_latency: register_histogram_with_registry!( + "object_mutation_db_commit_latency", + "Time spent committing a object mutation to the db", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + object_deletion_db_commit_latency: register_histogram_with_registry!( + "object_deletion_db_commit_latency", + "Time spent committing a object deletion to the db", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + epoch_db_commit_latency: register_histogram_with_registry!( + "epoch_db_commit_latency", + "Time spent committing a epoch to the db", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + advance_epoch_latency: register_histogram_with_registry!( + "advance_epoch_latency", + "Time spent in advancing epoch", + DB_UPDATE_QUERY_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ).unwrap(), + subscription_process_latency: register_histogram_with_registry!( + "subscription_process_latency", + "Time spent in process Websocket subscription", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + transaction_per_checkpoint: register_histogram_with_registry!( + "transaction_per_checkpoint", + "Number of transactions per checkpoint", + vec![1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0, 2000.0, 5000.0], + registry, + ) + .unwrap(), + get_transaction_block_latency: register_histogram_with_registry!( + "get_transaction_block_latency", + "Time spent in get_transaction_block on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + multi_get_transaction_blocks_latency: register_histogram_with_registry!( + "multi_get_transaction_blocks_latency", + "Time spent in multi_get_transaction_blocks on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_object_latency: register_histogram_with_registry!( + "get_object_latency", + "Time spent in get_object on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + multi_get_objects_latency: register_histogram_with_registry!( + "multi_get_objects_latency", + "Time spent in multi_get_objects on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + try_get_past_object_latency: register_histogram_with_registry!( + "try_get_past_object_latency", + "Time spent in try_get_past_object on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + try_multi_get_past_objects_latency: register_histogram_with_registry!( + "try_multi_get_past_objects_latency", + "Time spent in try_multi_get_past_objects on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_checkpoint_latency: register_histogram_with_registry!( + "get_checkpoint_latency", + "Time spent in get_checkpoint on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_checkpoints_latency: register_histogram_with_registry!( + "get_checkpoints_latency", + "Time spent in get_checkpoints on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_events_latency: register_histogram_with_registry!( + "get_events_latency", + "Time spent in get_events on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_total_transaction_blocks_latency: register_histogram_with_registry!( + "get_total_transaction_blocks_latency", + "Time spent in get_total_transaction_blocks on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_latest_checkpoint_sequence_number_latency: register_histogram_with_registry!( + "get_latest_checkpoint_sequence_number_latency", + "Time spent in get_latest_checkpoint_sequence_number on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_owned_objects_latency: register_histogram_with_registry!( + "get_owned_objects_latency", + "Time spent in get_owned_objects on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + query_transaction_blocks_latency: register_histogram_with_registry!( + "query_transaction_blocks_latency", + "Time spent in query_transaction_blocks on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + query_events_latency: register_histogram_with_registry!( + "query_events_latency", + "Time spent in query_events on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_dynamic_fields_latency: register_histogram_with_registry!( + "get_dynamic_fields_latency", + "Time spent in get_dynamic_fields on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_dynamic_field_object_latency: register_histogram_with_registry!( + "get_dynamic_field_object_latency", + "Time spent in get_dynamic_field_object on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_loaded_child_objects_latency: register_histogram_with_registry!( + "get_loaded_child_objects_latency", + "Time spent in get_loaded_child_objects_latency on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_protocol_config_latency: register_histogram_with_registry!( + "get_protocol_config_latency", + "Time spent in get_protocol_config_latency on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + db_conn_pool_size: register_int_gauge_with_registry!( + "db_conn_pool_size", + "Size of the database connection pool", + registry + ).unwrap(), + idle_db_conn: register_int_gauge_with_registry!( + "idle_db_conn", + "Number of idle database connections", + registry + ).unwrap(), + address_processor_failure: register_int_counter_with_registry!( + "address_processor_failure", + "Total number of address processor failure", + registry, + ) + .unwrap(), + checkpoint_metrics_processor_failure: register_int_counter_with_registry!( + "checkpoint_metrics_processor_failure", + "Total number of checkpoint metrics processor failure", + registry, + ) + .unwrap(), + last_pruned_epoch: register_int_gauge_with_registry!( + "last_pruned_epoch", + "Last pruned epoch number", + registry, + ) + .unwrap(), + last_pruned_checkpoint: register_int_gauge_with_registry!( + "last_pruned_checkpoint", + "Last pruned checkpoint sequence number", + registry, + ) + .unwrap(), + last_pruned_transaction: register_int_gauge_with_registry!( + "last_pruned_transaction", + "Last pruned transaction sequence number", + registry, + ).unwrap(), + epoch_pruning_latency: register_histogram_with_registry!( + "epoch_pruning_latency", + "Time spent in pruning one epoch", + DB_UPDATE_QUERY_LATENCY_SEC_BUCKETS.to_vec(), + registry + ).unwrap(), + } + } +} + +pub fn spawn_connection_pool_metric_collector( + metrics: IndexerMetrics, + connection_pool: crate::database::ConnectionPool, +) { + tokio::spawn(async move { + loop { + let cp_state = connection_pool.state(); + tracing::debug!( + connection_pool_size =% cp_state.connections, + idle_connections =% cp_state.idle_connections, + ); + metrics.db_conn_pool_size.set(cp_state.connections as i64); + metrics.idle_db_conn.set(cp_state.idle_connections as i64); + tokio::time::sleep(tokio::time::Duration::from_secs(60)).await; + } + }); +} diff --git a/crates/sui-mvr-indexer/src/models/checkpoints.rs b/crates/sui-mvr-indexer/src/models/checkpoints.rs new file mode 100644 index 0000000000000..d18c1a1a7ce9e --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/checkpoints.rs @@ -0,0 +1,186 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use diesel::prelude::*; + +use sui_json_rpc_types::Checkpoint as RpcCheckpoint; +use sui_types::base_types::TransactionDigest; +use sui_types::digests::CheckpointDigest; +use sui_types::gas::GasCostSummary; + +use crate::errors::IndexerError; +use crate::schema::{chain_identifier, checkpoints, pruner_cp_watermark}; +use crate::types::IndexedCheckpoint; + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = chain_identifier)] +pub struct StoredChainIdentifier { + pub checkpoint_digest: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = checkpoints)] +pub struct StoredCheckpoint { + pub sequence_number: i64, + pub checkpoint_digest: Vec, + pub epoch: i64, + pub network_total_transactions: i64, + pub previous_checkpoint_digest: Option>, + pub end_of_epoch: bool, + pub tx_digests: Vec>>, + pub timestamp_ms: i64, + pub total_gas_cost: i64, + pub computation_cost: i64, + pub storage_cost: i64, + pub storage_rebate: i64, + pub non_refundable_storage_fee: i64, + pub checkpoint_commitments: Vec, + pub validator_signature: Vec, + pub end_of_epoch_data: Option>, + pub min_tx_sequence_number: Option, + pub max_tx_sequence_number: Option, +} + +impl From<&IndexedCheckpoint> for StoredCheckpoint { + fn from(c: &IndexedCheckpoint) -> Self { + Self { + sequence_number: c.sequence_number as i64, + checkpoint_digest: c.checkpoint_digest.into_inner().to_vec(), + epoch: c.epoch as i64, + tx_digests: c + .tx_digests + .iter() + .map(|tx| Some(tx.into_inner().to_vec())) + .collect(), + network_total_transactions: c.network_total_transactions as i64, + previous_checkpoint_digest: c + .previous_checkpoint_digest + .as_ref() + .map(|d| (*d).into_inner().to_vec()), + timestamp_ms: c.timestamp_ms as i64, + total_gas_cost: c.total_gas_cost, + computation_cost: c.computation_cost as i64, + storage_cost: c.storage_cost as i64, + storage_rebate: c.storage_rebate as i64, + non_refundable_storage_fee: c.non_refundable_storage_fee as i64, + checkpoint_commitments: bcs::to_bytes(&c.checkpoint_commitments).unwrap(), + validator_signature: bcs::to_bytes(&c.validator_signature).unwrap(), + end_of_epoch_data: c + .end_of_epoch_data + .as_ref() + .map(|d| bcs::to_bytes(d).unwrap()), + end_of_epoch: c.end_of_epoch_data.is_some(), + min_tx_sequence_number: Some(c.min_tx_sequence_number as i64), + max_tx_sequence_number: Some(c.max_tx_sequence_number as i64), + } + } +} + +impl TryFrom for RpcCheckpoint { + type Error = IndexerError; + fn try_from(checkpoint: StoredCheckpoint) -> Result { + let parsed_digest = CheckpointDigest::try_from(checkpoint.checkpoint_digest.clone()) + .map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to decode checkpoint digest: {:?} with err: {:?}", + checkpoint.checkpoint_digest, e + )) + })?; + + let parsed_previous_digest: Option = checkpoint + .previous_checkpoint_digest + .map(|digest| { + CheckpointDigest::try_from(digest.clone()).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to decode previous checkpoint digest: {:?} with err: {:?}", + digest, e + )) + }) + }) + .transpose()?; + + let transactions: Vec = { + checkpoint + .tx_digests + .into_iter() + .map(|tx_digest| match tx_digest { + None => Err(IndexerError::PersistentStorageDataCorruptionError( + "tx_digests should not contain null elements".to_string(), + )), + Some(tx_digest) => { + TransactionDigest::try_from(tx_digest.as_slice()).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to decode transaction digest: {:?} with err: {:?}", + tx_digest, e + )) + }) + } + }) + .collect::, IndexerError>>()? + }; + let validator_signature = + bcs::from_bytes(&checkpoint.validator_signature).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to decode validator signature: {:?} with err: {:?}", + checkpoint.validator_signature, e + )) + })?; + + let checkpoint_commitments = + bcs::from_bytes(&checkpoint.checkpoint_commitments).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to decode checkpoint commitments: {:?} with err: {:?}", + checkpoint.checkpoint_commitments, e + )) + })?; + + let end_of_epoch_data = checkpoint + .end_of_epoch_data + .map(|data| { + bcs::from_bytes(&data).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to decode end of epoch data: {:?} with err: {:?}", + data, e + )) + }) + }) + .transpose()?; + + Ok(RpcCheckpoint { + epoch: checkpoint.epoch as u64, + sequence_number: checkpoint.sequence_number as u64, + digest: parsed_digest, + previous_digest: parsed_previous_digest, + end_of_epoch_data, + epoch_rolling_gas_cost_summary: GasCostSummary { + computation_cost: checkpoint.computation_cost as u64, + storage_cost: checkpoint.storage_cost as u64, + storage_rebate: checkpoint.storage_rebate as u64, + non_refundable_storage_fee: checkpoint.non_refundable_storage_fee as u64, + }, + network_total_transactions: checkpoint.network_total_transactions as u64, + timestamp_ms: checkpoint.timestamp_ms as u64, + transactions, + validator_signature, + checkpoint_commitments, + }) + } +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = pruner_cp_watermark)] +pub struct StoredCpTx { + pub checkpoint_sequence_number: i64, + pub min_tx_sequence_number: i64, + pub max_tx_sequence_number: i64, +} + +impl From<&IndexedCheckpoint> for StoredCpTx { + fn from(c: &IndexedCheckpoint) -> Self { + Self { + checkpoint_sequence_number: c.sequence_number as i64, + min_tx_sequence_number: c.min_tx_sequence_number as i64, + max_tx_sequence_number: c.max_tx_sequence_number as i64, + } + } +} diff --git a/crates/sui-mvr-indexer/src/models/display.rs b/crates/sui-mvr-indexer/src/models/display.rs new file mode 100644 index 0000000000000..33a1c7c7cb0cb --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/display.rs @@ -0,0 +1,35 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use diesel::prelude::*; +use serde::Deserialize; + +use sui_types::display::DisplayVersionUpdatedEvent; + +use crate::schema::display; + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Deserialize)] +#[diesel(table_name = display)] +pub struct StoredDisplay { + pub object_type: String, + pub id: Vec, + pub version: i16, + pub bcs: Vec, +} + +impl StoredDisplay { + pub fn try_from_event(event: &sui_types::event::Event) -> Option { + let (ty, display_event) = DisplayVersionUpdatedEvent::try_from_event(event)?; + + Some(Self { + object_type: ty.to_canonical_string(/* with_prefix */ true), + id: display_event.id.bytes.to_vec(), + version: display_event.version as i16, + bcs: event.contents.clone(), + }) + } + + pub fn to_display_update_event(&self) -> Result { + bcs::from_bytes(&self.bcs) + } +} diff --git a/crates/sui-mvr-indexer/src/models/epoch.rs b/crates/sui-mvr-indexer/src/models/epoch.rs new file mode 100644 index 0000000000000..d8e943f4c245c --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/epoch.rs @@ -0,0 +1,278 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::schema::epochs; +use crate::{errors::IndexerError, schema::feature_flags, schema::protocol_configs}; +use diesel::prelude::{AsChangeset, Identifiable}; +use diesel::{Insertable, Queryable, Selectable}; +use sui_json_rpc_types::{EndOfEpochInfo, EpochInfo}; +use sui_types::event::SystemEpochInfoEvent; +use sui_types::messages_checkpoint::CertifiedCheckpointSummary; +use sui_types::sui_system_state::sui_system_state_summary::SuiSystemStateSummary; + +#[derive(Queryable, Insertable, Debug, Clone, Default)] +#[diesel(table_name = epochs)] +pub struct StoredEpochInfo { + pub epoch: i64, + pub first_checkpoint_id: i64, + pub epoch_start_timestamp: i64, + pub reference_gas_price: i64, + pub protocol_version: i64, + pub total_stake: i64, + pub storage_fund_balance: i64, + pub system_state: Option>, + pub epoch_total_transactions: Option, + pub last_checkpoint_id: Option, + pub epoch_end_timestamp: Option, + pub storage_fund_reinvestment: Option, + pub storage_charge: Option, + pub storage_rebate: Option, + pub stake_subsidy_amount: Option, + pub total_gas_fees: Option, + pub total_stake_rewards_distributed: Option, + pub leftover_storage_fund_inflow: Option, + pub epoch_commitments: Option>, + /// This is the system state summary at the beginning of the epoch, serialized as JSON. + pub system_state_summary_json: Option, + /// First transaction sequence number of this epoch. + pub first_tx_sequence_number: Option, +} + +#[derive(Insertable, Identifiable, AsChangeset, Clone, Debug)] +#[diesel(primary_key(epoch))] +#[diesel(table_name = epochs)] +pub struct StartOfEpochUpdate { + pub epoch: i64, + pub first_checkpoint_id: i64, + pub first_tx_sequence_number: i64, + pub epoch_start_timestamp: i64, + pub reference_gas_price: i64, + pub protocol_version: i64, + pub total_stake: i64, + pub storage_fund_balance: i64, + pub system_state_summary_json: serde_json::Value, +} + +#[derive(Identifiable, AsChangeset, Clone, Debug)] +#[diesel(primary_key(epoch))] +#[diesel(table_name = epochs)] +pub struct EndOfEpochUpdate { + pub epoch: i64, + pub epoch_total_transactions: i64, + pub last_checkpoint_id: i64, + pub epoch_end_timestamp: i64, + pub storage_fund_reinvestment: i64, + pub storage_charge: i64, + pub storage_rebate: i64, + pub stake_subsidy_amount: i64, + pub total_gas_fees: i64, + pub total_stake_rewards_distributed: i64, + pub leftover_storage_fund_inflow: i64, + pub epoch_commitments: Vec, +} + +#[derive(Queryable, Insertable, Debug, Clone, Default)] +#[diesel(table_name = protocol_configs)] +pub struct StoredProtocolConfig { + pub protocol_version: i64, + pub config_name: String, + pub config_value: Option, +} + +#[derive(Queryable, Insertable, Debug, Clone, Default)] +#[diesel(table_name = feature_flags)] +pub struct StoredFeatureFlag { + pub protocol_version: i64, + pub flag_name: String, + pub flag_value: bool, +} + +#[derive(Queryable, Selectable, Clone)] +#[diesel(table_name = epochs)] +pub struct QueryableEpochInfo { + pub epoch: i64, + pub first_checkpoint_id: i64, + pub epoch_start_timestamp: i64, + pub reference_gas_price: i64, + pub protocol_version: i64, + pub total_stake: i64, + pub storage_fund_balance: i64, + pub epoch_total_transactions: Option, + pub first_tx_sequence_number: Option, + pub last_checkpoint_id: Option, + pub epoch_end_timestamp: Option, + pub storage_fund_reinvestment: Option, + pub storage_charge: Option, + pub storage_rebate: Option, + pub stake_subsidy_amount: Option, + pub total_gas_fees: Option, + pub total_stake_rewards_distributed: Option, + pub leftover_storage_fund_inflow: Option, + pub epoch_commitments: Option>, +} + +#[derive(Queryable)] +pub struct QueryableEpochSystemState { + pub epoch: i64, + pub system_state: Vec, +} + +#[derive(Default)] +pub struct EpochStartInfo { + pub first_checkpoint_id: u64, + pub first_tx_sequence_number: u64, + pub total_stake: u64, + pub storage_fund_balance: u64, +} + +impl EpochStartInfo { + pub fn new( + first_checkpoint_id: u64, + first_tx_sequence_number: u64, + epoch_event_opt: Option<&SystemEpochInfoEvent>, + ) -> Self { + Self { + first_checkpoint_id, + first_tx_sequence_number, + total_stake: epoch_event_opt.map(|e| e.total_stake).unwrap_or_default(), + storage_fund_balance: epoch_event_opt + .map(|e| e.storage_fund_balance) + .unwrap_or_default(), + } + } +} + +impl StartOfEpochUpdate { + pub fn new( + new_system_state_summary: SuiSystemStateSummary, + epoch_start_info: EpochStartInfo, + ) -> Self { + Self { + epoch: new_system_state_summary.epoch as i64, + system_state_summary_json: serde_json::to_value(new_system_state_summary.clone()) + .unwrap(), + first_checkpoint_id: epoch_start_info.first_checkpoint_id as i64, + first_tx_sequence_number: epoch_start_info.first_tx_sequence_number as i64, + epoch_start_timestamp: new_system_state_summary.epoch_start_timestamp_ms as i64, + reference_gas_price: new_system_state_summary.reference_gas_price as i64, + protocol_version: new_system_state_summary.protocol_version as i64, + total_stake: epoch_start_info.total_stake as i64, + storage_fund_balance: epoch_start_info.storage_fund_balance as i64, + } + } +} + +#[derive(Default)] +pub struct EpochEndInfo { + pub storage_fund_reinvestment: u64, + pub storage_charge: u64, + pub storage_rebate: u64, + pub leftover_storage_fund_inflow: u64, + pub stake_subsidy_amount: u64, + pub total_gas_fees: u64, + pub total_stake_rewards_distributed: u64, +} + +impl EpochEndInfo { + pub fn new(epoch_event_opt: Option<&SystemEpochInfoEvent>) -> Self { + epoch_event_opt.map_or_else(Self::default, |epoch_event| Self { + storage_fund_reinvestment: epoch_event.storage_fund_reinvestment, + storage_charge: epoch_event.storage_charge, + storage_rebate: epoch_event.storage_rebate, + leftover_storage_fund_inflow: epoch_event.leftover_storage_fund_inflow, + stake_subsidy_amount: epoch_event.stake_subsidy_amount, + total_gas_fees: epoch_event.total_gas_fees, + total_stake_rewards_distributed: epoch_event.total_stake_rewards_distributed, + }) + } +} + +impl EndOfEpochUpdate { + pub fn new( + last_checkpoint_summary: &CertifiedCheckpointSummary, + first_tx_sequence_number: u64, + epoch_end_info: EpochEndInfo, + ) -> Self { + Self { + epoch: last_checkpoint_summary.epoch as i64, + epoch_total_transactions: (last_checkpoint_summary.network_total_transactions + - first_tx_sequence_number) as i64, + last_checkpoint_id: *last_checkpoint_summary.sequence_number() as i64, + epoch_end_timestamp: last_checkpoint_summary.timestamp_ms as i64, + storage_fund_reinvestment: epoch_end_info.storage_fund_reinvestment as i64, + storage_charge: epoch_end_info.storage_charge as i64, + storage_rebate: epoch_end_info.storage_rebate as i64, + leftover_storage_fund_inflow: epoch_end_info.leftover_storage_fund_inflow as i64, + stake_subsidy_amount: epoch_end_info.stake_subsidy_amount as i64, + total_gas_fees: epoch_end_info.total_gas_fees as i64, + total_stake_rewards_distributed: epoch_end_info.total_stake_rewards_distributed as i64, + epoch_commitments: bcs::to_bytes( + &last_checkpoint_summary + .end_of_epoch_data + .clone() + .unwrap() + .epoch_commitments, + ) + .unwrap(), + } + } +} + +impl StoredEpochInfo { + pub fn get_json_system_state_summary(&self) -> Result { + let Some(system_state_summary_json) = self.system_state_summary_json.clone() else { + return Err(IndexerError::PersistentStorageDataCorruptionError( + "System state summary is null for the given epoch".into(), + )); + }; + let system_state_summary: SuiSystemStateSummary = + serde_json::from_value(system_state_summary_json).map_err(|_| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to deserialize `system_state` for epoch {:?}", + self.epoch, + )) + })?; + debug_assert_eq!(system_state_summary.epoch, self.epoch as u64); + Ok(system_state_summary) + } +} + +impl From<&StoredEpochInfo> for Option { + fn from(info: &StoredEpochInfo) -> Option { + Some(EndOfEpochInfo { + reference_gas_price: (info.reference_gas_price as u64), + protocol_version: (info.protocol_version as u64), + last_checkpoint_id: info.last_checkpoint_id.map(|v| v as u64)?, + total_stake: info.total_stake as u64, + storage_fund_balance: info.storage_fund_balance as u64, + epoch_end_timestamp: info.epoch_end_timestamp.map(|v| v as u64)?, + storage_fund_reinvestment: info.storage_fund_reinvestment.map(|v| v as u64)?, + storage_charge: info.storage_charge.map(|v| v as u64)?, + storage_rebate: info.storage_rebate.map(|v| v as u64)?, + stake_subsidy_amount: info.stake_subsidy_amount.map(|v| v as u64)?, + total_gas_fees: info.total_gas_fees.map(|v| v as u64)?, + total_stake_rewards_distributed: info + .total_stake_rewards_distributed + .map(|v| v as u64)?, + leftover_storage_fund_inflow: info.leftover_storage_fund_inflow.map(|v| v as u64)?, + }) + } +} + +impl TryFrom for EpochInfo { + type Error = IndexerError; + + fn try_from(value: StoredEpochInfo) -> Result { + let end_of_epoch_info = (&value).into(); + let system_state_summary = value.get_json_system_state_summary()?; + Ok(EpochInfo { + epoch: value.epoch as u64, + validators: system_state_summary.active_validators, + epoch_total_transactions: value.epoch_total_transactions.unwrap_or(0) as u64, + first_checkpoint_id: value.first_checkpoint_id as u64, + epoch_start_timestamp: value.epoch_start_timestamp as u64, + end_of_epoch_info, + reference_gas_price: Some(value.reference_gas_price as u64), + }) + } +} diff --git a/crates/sui-mvr-indexer/src/models/event_indices.rs b/crates/sui-mvr-indexer/src/models/event_indices.rs new file mode 100644 index 0000000000000..08f17cce339d5 --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/event_indices.rs @@ -0,0 +1,145 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + schema::{ + event_emit_module, event_emit_package, event_senders, event_struct_instantiation, + event_struct_module, event_struct_name, event_struct_package, + }, + types::EventIndex, +}; +use diesel::prelude::*; + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_emit_package)] +pub struct StoredEventEmitPackage { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_emit_module)] +pub struct StoredEventEmitModule { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub module: String, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_senders)] +pub struct StoredEventSenders { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_struct_package)] +pub struct StoredEventStructPackage { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_struct_module)] +pub struct StoredEventStructModule { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub module: String, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_struct_name)] +pub struct StoredEventStructName { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub module: String, + pub type_name: String, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_struct_instantiation)] +pub struct StoredEventStructInstantiation { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub module: String, + pub type_instantiation: String, + pub sender: Vec, +} + +impl EventIndex { + pub fn split( + self: EventIndex, + ) -> ( + StoredEventEmitPackage, + StoredEventEmitModule, + StoredEventSenders, + StoredEventStructPackage, + StoredEventStructModule, + StoredEventStructName, + StoredEventStructInstantiation, + ) { + let tx_sequence_number = self.tx_sequence_number as i64; + let event_sequence_number = self.event_sequence_number as i64; + ( + StoredEventEmitPackage { + tx_sequence_number, + event_sequence_number, + package: self.emit_package.to_vec(), + sender: self.sender.to_vec(), + }, + StoredEventEmitModule { + tx_sequence_number, + event_sequence_number, + package: self.emit_package.to_vec(), + module: self.emit_module.clone(), + sender: self.sender.to_vec(), + }, + StoredEventSenders { + tx_sequence_number, + event_sequence_number, + sender: self.sender.to_vec(), + }, + StoredEventStructPackage { + tx_sequence_number, + event_sequence_number, + package: self.type_package.to_vec(), + sender: self.sender.to_vec(), + }, + StoredEventStructModule { + tx_sequence_number, + event_sequence_number, + package: self.type_package.to_vec(), + module: self.type_module.clone(), + sender: self.sender.to_vec(), + }, + StoredEventStructName { + tx_sequence_number, + event_sequence_number, + package: self.type_package.to_vec(), + module: self.type_module.clone(), + type_name: self.type_name.clone(), + sender: self.sender.to_vec(), + }, + StoredEventStructInstantiation { + tx_sequence_number, + event_sequence_number, + package: self.type_package.to_vec(), + module: self.type_module.clone(), + type_instantiation: self.type_instantiation.clone(), + sender: self.sender.to_vec(), + }, + ) + } +} diff --git a/crates/sui-mvr-indexer/src/models/events.rs b/crates/sui-mvr-indexer/src/models/events.rs new file mode 100644 index 0000000000000..6b9c5044c3ba5 --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/events.rs @@ -0,0 +1,156 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::str::FromStr; +use std::sync::Arc; + +use diesel::prelude::*; +use move_core_types::identifier::Identifier; + +use sui_json_rpc_types::{type_and_fields_from_move_event_data, SuiEvent}; +use sui_package_resolver::{PackageStore, Resolver}; +use sui_types::base_types::{ObjectID, SuiAddress}; +use sui_types::digests::TransactionDigest; +use sui_types::event::EventID; +use sui_types::object::bounded_visitor::BoundedVisitor; +use sui_types::parse_sui_struct_tag; + +use crate::errors::IndexerError; +use crate::schema::events; +use crate::types::IndexedEvent; + +#[derive(Queryable, QueryableByName, Selectable, Insertable, Debug, Clone)] +#[diesel(table_name = events)] +pub struct StoredEvent { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub transaction_digest: Vec, + pub senders: Vec>>, + pub package: Vec, + pub module: String, + pub event_type: String, + pub timestamp_ms: i64, + pub bcs: Vec, + pub sender: Option>, +} + +pub type SendersType = Vec>>; + +impl From for StoredEvent { + fn from(event: IndexedEvent) -> Self { + Self { + tx_sequence_number: event.tx_sequence_number as i64, + event_sequence_number: event.event_sequence_number as i64, + transaction_digest: event.transaction_digest.into_inner().to_vec(), + senders: vec![Some(event.sender.to_vec())], + package: event.package.to_vec(), + module: event.module.clone(), + event_type: event.event_type.clone(), + bcs: event.bcs.clone(), + timestamp_ms: event.timestamp_ms as i64, + sender: Some(event.sender.to_vec()), + } + } +} + +impl StoredEvent { + pub async fn try_into_sui_event( + self, + package_resolver: Arc>, + ) -> Result { + let package_id = ObjectID::from_bytes(self.package.clone()).map_err(|_e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to parse event package ID: {:?}", + self.package + )) + })?; + // Note: SuiEvent only has one sender today, so we always use the first one. + let sender = { + self.senders.first().ok_or_else(|| { + IndexerError::PersistentStorageDataCorruptionError( + "Event senders should contain at least one address".to_string(), + ) + })? + }; + let sender = match sender { + Some(ref s) => SuiAddress::from_bytes(s).map_err(|_e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to parse event sender address: {:?}", + sender + )) + })?, + None => { + return Err(IndexerError::PersistentStorageDataCorruptionError( + "Event senders element should not be null".to_string(), + )) + } + }; + + let type_ = parse_sui_struct_tag(&self.event_type)?; + let move_type_layout = package_resolver + .type_layout(type_.clone().into()) + .await + .map_err(|e| { + IndexerError::ResolveMoveStructError(format!( + "Failed to convert to sui event with Error: {e}", + )) + })?; + let move_object = BoundedVisitor::deserialize_value(&self.bcs, &move_type_layout) + .map_err(|e| IndexerError::SerdeError(e.to_string()))?; + let (_, parsed_json) = type_and_fields_from_move_event_data(move_object) + .map_err(|e| IndexerError::SerdeError(e.to_string()))?; + let tx_digest = + TransactionDigest::try_from(self.transaction_digest.as_slice()).map_err(|e| { + IndexerError::SerdeError(format!( + "Failed to parse transaction digest: {:?}, error: {}", + self.transaction_digest, e + )) + })?; + Ok(SuiEvent { + id: EventID { + tx_digest, + event_seq: self.event_sequence_number as u64, + }, + package_id, + transaction_module: Identifier::from_str(&self.module)?, + sender, + type_, + bcs: self.bcs, + parsed_json, + timestamp_ms: Some(self.timestamp_ms as u64), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use move_core_types::{account_address::AccountAddress, language_storage::StructTag}; + use sui_types::event::Event; + + #[test] + fn test_canonical_string_of_event_type() { + let tx_digest = TransactionDigest::default(); + let event = Event { + package_id: ObjectID::random(), + transaction_module: Identifier::new("test").unwrap(), + sender: AccountAddress::random().into(), + type_: StructTag { + address: AccountAddress::TWO, + module: Identifier::new("test").unwrap(), + name: Identifier::new("test").unwrap(), + type_params: vec![], + }, + contents: vec![], + }; + + let indexed_event = IndexedEvent::from_event(1, 1, 1, tx_digest, &event, 100); + + let stored_event = StoredEvent::from(indexed_event); + + assert_eq!( + stored_event.event_type, + "0x0000000000000000000000000000000000000000000000000000000000000002::test::test" + ); + } +} diff --git a/crates/sui-mvr-indexer/src/models/mod.rs b/crates/sui-mvr-indexer/src/models/mod.rs new file mode 100644 index 0000000000000..84e8b308bc0d5 --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/mod.rs @@ -0,0 +1,15 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +pub mod checkpoints; +pub mod display; +pub mod epoch; +pub mod event_indices; +pub mod events; +pub mod obj_indices; +pub mod objects; +pub mod packages; +pub mod raw_checkpoints; +pub mod transactions; +pub mod tx_indices; +pub mod watermarks; diff --git a/crates/sui-mvr-indexer/src/models/obj_indices.rs b/crates/sui-mvr-indexer/src/models/obj_indices.rs new file mode 100644 index 0000000000000..4acc554565522 --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/obj_indices.rs @@ -0,0 +1,16 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use diesel::prelude::*; + +use crate::schema::objects_version; +/// Model types related to tables that support efficient execution of queries on the `objects`, +/// `objects_history` and `objects_snapshot` tables. + +#[derive(Queryable, Insertable, Debug, Identifiable, Clone, QueryableByName, Selectable)] +#[diesel(table_name = objects_version, primary_key(object_id, object_version))] +pub struct StoredObjectVersion { + pub object_id: Vec, + pub object_version: i64, + pub cp_sequence_number: i64, +} diff --git a/crates/sui-mvr-indexer/src/models/objects.rs b/crates/sui-mvr-indexer/src/models/objects.rs new file mode 100644 index 0000000000000..321aaebe2d4ed --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/objects.rs @@ -0,0 +1,579 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::HashMap; +use std::sync::Arc; + +use diesel::prelude::*; +use serde::de::DeserializeOwned; + +use move_core_types::annotated_value::MoveTypeLayout; +use sui_json_rpc::coin_api::parse_to_struct_tag; +use sui_json_rpc_types::{Balance, Coin as SuiCoin}; +use sui_package_resolver::{PackageStore, Resolver}; +use sui_types::base_types::{ObjectID, ObjectRef}; +use sui_types::digests::ObjectDigest; +use sui_types::dynamic_field::{DynamicFieldType, Field}; +use sui_types::object::{Object, ObjectRead}; + +use crate::errors::IndexerError; +use crate::schema::{full_objects_history, objects, objects_history, objects_snapshot}; +use crate::types::{owner_to_owner_info, IndexedDeletedObject, IndexedObject, ObjectStatus}; + +#[derive(Queryable)] +pub struct DynamicFieldColumn { + pub object_id: Vec, + pub object_version: i64, + pub object_digest: Vec, + pub df_kind: Option, + pub df_name: Option>, + pub df_object_type: Option, + pub df_object_id: Option>, +} + +#[derive(Queryable)] +pub struct ObjectRefColumn { + pub object_id: Vec, + pub object_version: i64, + pub object_digest: Vec, +} + +// NOTE: please add updating statement like below in pg_indexer_store.rs, +// if new columns are added here: +// objects::epoch.eq(excluded(objects::epoch)) +#[derive(Queryable, Insertable, Debug, Identifiable, Clone, QueryableByName)] +#[diesel(table_name = objects, primary_key(object_id))] +pub struct StoredObject { + pub object_id: Vec, + pub object_version: i64, + pub object_digest: Vec, + pub owner_type: i16, + pub owner_id: Option>, + /// The full type of this object, including package id, module, name and type parameters. + /// This and following three fields will be None if the object is a Package + pub object_type: Option, + pub object_type_package: Option>, + pub object_type_module: Option, + /// Name of the object type, e.g., "Coin", without type parameters. + pub object_type_name: Option, + pub serialized_object: Vec, + pub coin_type: Option, + // TODO deal with overflow + pub coin_balance: Option, + pub df_kind: Option, +} + +impl From for StoredObject { + fn from(o: IndexedObject) -> Self { + let IndexedObject { + checkpoint_sequence_number: _, + object, + df_kind, + } = o; + let (owner_type, owner_id) = owner_to_owner_info(&object.owner); + let coin_type = object + .coin_type_maybe() + .map(|t| t.to_canonical_string(/* with_prefix */ true)); + let coin_balance = if coin_type.is_some() { + Some(object.get_coin_value_unsafe()) + } else { + None + }; + Self { + object_id: object.id().to_vec(), + object_version: object.version().value() as i64, + object_digest: object.digest().into_inner().to_vec(), + owner_type: owner_type as i16, + owner_id: owner_id.map(|id| id.to_vec()), + object_type: object + .type_() + .map(|t| t.to_canonical_string(/* with_prefix */ true)), + object_type_package: object.type_().map(|t| t.address().to_vec()), + object_type_module: object.type_().map(|t| t.module().to_string()), + object_type_name: object.type_().map(|t| t.name().to_string()), + serialized_object: bcs::to_bytes(&object).unwrap(), + coin_type, + coin_balance: coin_balance.map(|b| b as i64), + df_kind: df_kind.map(|k| match k { + DynamicFieldType::DynamicField => 0, + DynamicFieldType::DynamicObject => 1, + }), + } + } +} + +#[derive(Queryable, Insertable, Debug, Identifiable, Clone, QueryableByName)] +#[diesel(table_name = objects, primary_key(object_id))] +pub struct StoredDeletedObject { + pub object_id: Vec, + pub object_version: i64, +} + +impl From for StoredDeletedObject { + fn from(o: IndexedDeletedObject) -> Self { + Self { + object_id: o.object_id.to_vec(), + object_version: o.object_version as i64, + } + } +} + +#[derive(Queryable, Insertable, Selectable, Debug, Identifiable, Clone, QueryableByName)] +#[diesel(table_name = objects_snapshot, primary_key(object_id))] +pub struct StoredObjectSnapshot { + pub object_id: Vec, + pub object_version: i64, + pub object_status: i16, + pub object_digest: Option>, + pub checkpoint_sequence_number: i64, + pub owner_type: Option, + pub owner_id: Option>, + pub object_type: Option, + pub object_type_package: Option>, + pub object_type_module: Option, + pub object_type_name: Option, + pub serialized_object: Option>, + pub coin_type: Option, + pub coin_balance: Option, + pub df_kind: Option, +} + +impl From for StoredObjectSnapshot { + fn from(o: IndexedObject) -> Self { + let IndexedObject { + checkpoint_sequence_number, + object, + df_kind, + } = o; + let (owner_type, owner_id) = owner_to_owner_info(&object.owner); + let coin_type = object + .coin_type_maybe() + .map(|t| t.to_canonical_string(/* with_prefix */ true)); + let coin_balance = if coin_type.is_some() { + Some(object.get_coin_value_unsafe()) + } else { + None + }; + + Self { + object_id: object.id().to_vec(), + object_version: object.version().value() as i64, + object_status: ObjectStatus::Active as i16, + object_digest: Some(object.digest().into_inner().to_vec()), + checkpoint_sequence_number: checkpoint_sequence_number as i64, + owner_type: Some(owner_type as i16), + owner_id: owner_id.map(|id| id.to_vec()), + object_type: object + .type_() + .map(|t| t.to_canonical_string(/* with_prefix */ true)), + object_type_package: object.type_().map(|t| t.address().to_vec()), + object_type_module: object.type_().map(|t| t.module().to_string()), + object_type_name: object.type_().map(|t| t.name().to_string()), + serialized_object: Some(bcs::to_bytes(&object).unwrap()), + coin_type, + coin_balance: coin_balance.map(|b| b as i64), + df_kind: df_kind.map(|k| match k { + DynamicFieldType::DynamicField => 0, + DynamicFieldType::DynamicObject => 1, + }), + } + } +} + +impl From for StoredObjectSnapshot { + fn from(o: IndexedDeletedObject) -> Self { + Self { + object_id: o.object_id.to_vec(), + object_version: o.object_version as i64, + object_status: ObjectStatus::WrappedOrDeleted as i16, + object_digest: None, + checkpoint_sequence_number: o.checkpoint_sequence_number as i64, + owner_type: None, + owner_id: None, + object_type: None, + object_type_package: None, + object_type_module: None, + object_type_name: None, + serialized_object: None, + coin_type: None, + coin_balance: None, + df_kind: None, + } + } +} + +#[derive(Queryable, Insertable, Selectable, Debug, Identifiable, Clone, QueryableByName)] +#[diesel(table_name = objects_history, primary_key(object_id, object_version, checkpoint_sequence_number))] +pub struct StoredHistoryObject { + pub object_id: Vec, + pub object_version: i64, + pub object_status: i16, + pub object_digest: Option>, + pub checkpoint_sequence_number: i64, + pub owner_type: Option, + pub owner_id: Option>, + pub object_type: Option, + pub object_type_package: Option>, + pub object_type_module: Option, + pub object_type_name: Option, + pub serialized_object: Option>, + pub coin_type: Option, + pub coin_balance: Option, + pub df_kind: Option, +} + +impl From for StoredHistoryObject { + fn from(o: IndexedObject) -> Self { + let IndexedObject { + checkpoint_sequence_number, + object, + df_kind, + } = o; + let (owner_type, owner_id) = owner_to_owner_info(&object.owner); + let coin_type = object + .coin_type_maybe() + .map(|t| t.to_canonical_string(/* with_prefix */ true)); + let coin_balance = if coin_type.is_some() { + Some(object.get_coin_value_unsafe()) + } else { + None + }; + + Self { + object_id: object.id().to_vec(), + object_version: object.version().value() as i64, + object_status: ObjectStatus::Active as i16, + object_digest: Some(object.digest().into_inner().to_vec()), + checkpoint_sequence_number: checkpoint_sequence_number as i64, + owner_type: Some(owner_type as i16), + owner_id: owner_id.map(|id| id.to_vec()), + object_type: object + .type_() + .map(|t| t.to_canonical_string(/* with_prefix */ true)), + object_type_package: object.type_().map(|t| t.address().to_vec()), + object_type_module: object.type_().map(|t| t.module().to_string()), + object_type_name: object.type_().map(|t| t.name().to_string()), + serialized_object: Some(bcs::to_bytes(&object).unwrap()), + coin_type, + coin_balance: coin_balance.map(|b| b as i64), + df_kind: df_kind.map(|k| match k { + DynamicFieldType::DynamicField => 0, + DynamicFieldType::DynamicObject => 1, + }), + } + } +} + +impl From for StoredHistoryObject { + fn from(o: IndexedDeletedObject) -> Self { + Self { + object_id: o.object_id.to_vec(), + object_version: o.object_version as i64, + object_status: ObjectStatus::WrappedOrDeleted as i16, + object_digest: None, + checkpoint_sequence_number: o.checkpoint_sequence_number as i64, + owner_type: None, + owner_id: None, + object_type: None, + object_type_package: None, + object_type_module: None, + object_type_name: None, + serialized_object: None, + coin_type: None, + coin_balance: None, + df_kind: None, + } + } +} + +impl TryFrom for Object { + type Error = IndexerError; + + fn try_from(o: StoredObject) -> Result { + bcs::from_bytes(&o.serialized_object).map_err(|e| { + IndexerError::SerdeError(format!( + "Failed to deserialize object: {:?}, error: {}", + o.object_id, e + )) + }) + } +} + +impl StoredObject { + pub async fn try_into_object_read( + self, + package_resolver: Arc>, + ) -> Result { + let oref = self.get_object_ref()?; + let object: sui_types::object::Object = self.try_into()?; + let Some(move_object) = object.data.try_as_move().cloned() else { + return Err(IndexerError::PostgresReadError(format!( + "Object {:?} is not a Move object", + oref, + ))); + }; + + let move_type_layout = package_resolver + .type_layout(move_object.type_().clone().into()) + .await + .map_err(|e| { + IndexerError::ResolveMoveStructError(format!( + "Failed to convert into object read for obj {}:{}, type: {}. Error: {e}", + object.id(), + object.version(), + move_object.type_(), + )) + })?; + let move_struct_layout = match move_type_layout { + MoveTypeLayout::Struct(s) => Ok(s), + _ => Err(IndexerError::ResolveMoveStructError( + "MoveTypeLayout is not Struct".to_string(), + )), + }?; + + Ok(ObjectRead::Exists(oref, object, Some(*move_struct_layout))) + } + + pub fn get_object_ref(&self) -> Result { + let object_id = ObjectID::from_bytes(self.object_id.clone()).map_err(|_| { + IndexerError::SerdeError(format!("Can't convert {:?} to object_id", self.object_id)) + })?; + let object_digest = + ObjectDigest::try_from(self.object_digest.as_slice()).map_err(|_| { + IndexerError::SerdeError(format!( + "Can't convert {:?} to object_digest", + self.object_digest + )) + })?; + Ok(( + object_id, + (self.object_version as u64).into(), + object_digest, + )) + } + + pub fn to_dynamic_field(&self) -> Option> + where + K: DeserializeOwned, + V: DeserializeOwned, + { + let object: Object = bcs::from_bytes(&self.serialized_object).ok()?; + + let object = object.data.try_as_move()?; + let ty = object.type_(); + + if !ty.is_dynamic_field() { + return None; + } + + bcs::from_bytes(object.contents()).ok() + } +} + +impl TryFrom for SuiCoin { + type Error = IndexerError; + + fn try_from(o: StoredObject) -> Result { + let object: Object = o.clone().try_into()?; + let (coin_object_id, version, digest) = o.get_object_ref()?; + let coin_type_canonical = + o.coin_type + .ok_or(IndexerError::PersistentStorageDataCorruptionError(format!( + "Object {} is supposed to be a coin but has an empty coin_type column", + coin_object_id, + )))?; + let coin_type = parse_to_struct_tag(coin_type_canonical.as_str()) + .map_err(|_| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "The type of object {} cannot be parsed as a struct tag", + coin_object_id, + )) + })? + .to_string(); + let balance = o + .coin_balance + .ok_or(IndexerError::PersistentStorageDataCorruptionError(format!( + "Object {} is supposed to be a coin but has an empty coin_balance column", + coin_object_id, + )))?; + Ok(SuiCoin { + coin_type, + coin_object_id, + version, + digest, + balance: balance as u64, + previous_transaction: object.previous_transaction, + }) + } +} + +#[derive(QueryableByName)] +pub struct CoinBalance { + #[diesel(sql_type = diesel::sql_types::Text)] + pub coin_type: String, + #[diesel(sql_type = diesel::sql_types::BigInt)] + pub coin_num: i64, + #[diesel(sql_type = diesel::sql_types::BigInt)] + pub coin_balance: i64, +} + +impl TryFrom for Balance { + type Error = IndexerError; + + fn try_from(c: CoinBalance) -> Result { + let coin_type = parse_to_struct_tag(c.coin_type.as_str()) + .map_err(|_| { + IndexerError::PersistentStorageDataCorruptionError( + "The type of coin balance cannot be parsed as a struct tag".to_string(), + ) + })? + .to_string(); + Ok(Self { + coin_type, + coin_object_count: c.coin_num as usize, + // TODO: deal with overflow + total_balance: c.coin_balance as u128, + locked_balance: HashMap::default(), + }) + } +} + +#[derive(Queryable, Insertable, Debug, Identifiable, Clone, QueryableByName, Selectable)] +#[diesel(table_name = full_objects_history, primary_key(object_id, object_version))] +pub struct StoredFullHistoryObject { + pub object_id: Vec, + pub object_version: i64, + pub serialized_object: Option>, +} + +impl From for StoredFullHistoryObject { + fn from(o: IndexedObject) -> Self { + let object = o.object; + Self { + object_id: object.id().to_vec(), + object_version: object.version().value() as i64, + serialized_object: Some(bcs::to_bytes(&object).unwrap()), + } + } +} + +impl From for StoredFullHistoryObject { + fn from(o: IndexedDeletedObject) -> Self { + Self { + object_id: o.object_id.to_vec(), + object_version: o.object_version as i64, + serialized_object: None, + } + } +} + +#[cfg(test)] +mod tests { + use move_core_types::{account_address::AccountAddress, language_storage::StructTag}; + use sui_types::{ + coin::Coin, + digests::TransactionDigest, + gas_coin::{GasCoin, GAS}, + object::{Data, MoveObject, ObjectInner, Owner}, + Identifier, TypeTag, + }; + + use super::*; + + #[test] + fn test_canonical_string_of_object_type_for_coin() { + let test_obj = Object::new_gas_for_testing(); + let indexed_obj = IndexedObject::from_object(1, test_obj, None); + + let stored_obj = StoredObject::from(indexed_obj); + + match stored_obj.object_type { + Some(t) => { + assert_eq!(t, "0x0000000000000000000000000000000000000000000000000000000000000002::coin::Coin<0x0000000000000000000000000000000000000000000000000000000000000002::sui::SUI>"); + } + None => { + panic!("object_type should not be none"); + } + } + } + + #[test] + fn test_convert_stored_obj_to_sui_coin() { + let test_obj = Object::new_gas_for_testing(); + let indexed_obj = IndexedObject::from_object(1, test_obj, None); + + let stored_obj = StoredObject::from(indexed_obj); + + let sui_coin = SuiCoin::try_from(stored_obj).unwrap(); + assert_eq!(sui_coin.coin_type, "0x2::sui::SUI"); + } + + #[test] + fn test_output_format_coin_balance() { + let test_obj = Object::new_gas_for_testing(); + let indexed_obj = IndexedObject::from_object(1, test_obj, None); + + let stored_obj = StoredObject::from(indexed_obj); + let test_balance = CoinBalance { + coin_type: stored_obj.coin_type.unwrap(), + coin_num: 1, + coin_balance: 100, + }; + let balance = Balance::try_from(test_balance).unwrap(); + assert_eq!(balance.coin_type, "0x2::sui::SUI"); + } + + #[test] + fn test_vec_of_coin_sui_conversion() { + // 0xe7::vec_coin::VecCoin>> + let vec_coins_type = TypeTag::Vector(Box::new( + Coin::type_(TypeTag::Struct(Box::new(GAS::type_()))).into(), + )); + let object_type = StructTag { + address: AccountAddress::from_hex_literal("0xe7").unwrap(), + module: Identifier::new("vec_coin").unwrap(), + name: Identifier::new("VecCoin").unwrap(), + type_params: vec![vec_coins_type], + }; + + let id = ObjectID::ZERO; + let gas = 10; + + let contents = bcs::to_bytes(&vec![GasCoin::new(id, gas)]).unwrap(); + let data = Data::Move( + unsafe { + MoveObject::new_from_execution_with_limit( + object_type.into(), + true, + 1.into(), + contents, + 256, + ) + } + .unwrap(), + ); + + let owner = AccountAddress::from_hex_literal("0x1").unwrap(); + + let object = ObjectInner { + owner: Owner::AddressOwner(owner.into()), + data, + previous_transaction: TransactionDigest::genesis_marker(), + storage_rebate: 0, + } + .into(); + + let indexed_obj = IndexedObject::from_object(1, object, None); + + let stored_obj = StoredObject::from(indexed_obj); + + match stored_obj.object_type { + Some(t) => { + assert_eq!(t, "0x00000000000000000000000000000000000000000000000000000000000000e7::vec_coin::VecCoin>>"); + } + None => { + panic!("object_type should not be none"); + } + } + } +} diff --git a/crates/sui-mvr-indexer/src/models/packages.rs b/crates/sui-mvr-indexer/src/models/packages.rs new file mode 100644 index 0000000000000..97c8e8fc5b459 --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/packages.rs @@ -0,0 +1,29 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::schema::packages; +use crate::types::IndexedPackage; + +use diesel::prelude::*; + +#[derive(Queryable, Insertable, Selectable, Clone, Debug, Identifiable)] +#[diesel(table_name = packages, primary_key(package_id))] +pub struct StoredPackage { + pub package_id: Vec, + pub original_id: Vec, + pub package_version: i64, + pub move_package: Vec, + pub checkpoint_sequence_number: i64, +} + +impl From for StoredPackage { + fn from(p: IndexedPackage) -> Self { + Self { + package_id: p.package_id.to_vec(), + original_id: p.move_package.original_package_id().to_vec(), + package_version: p.move_package.version().value() as i64, + move_package: bcs::to_bytes(&p.move_package).unwrap(), + checkpoint_sequence_number: p.checkpoint_sequence_number as i64, + } + } +} diff --git a/crates/sui-mvr-indexer/src/models/raw_checkpoints.rs b/crates/sui-mvr-indexer/src/models/raw_checkpoints.rs new file mode 100644 index 0000000000000..98fafba928705 --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/raw_checkpoints.rs @@ -0,0 +1,26 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::schema::raw_checkpoints; +use crate::types::IndexedCheckpoint; +use diesel::prelude::*; + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = raw_checkpoints)] +pub struct StoredRawCheckpoint { + pub sequence_number: i64, + /// BCS serialized CertifiedCheckpointSummary + pub certified_checkpoint: Vec, + /// BCS serialized CheckpointContents + pub checkpoint_contents: Vec, +} + +impl From<&IndexedCheckpoint> for StoredRawCheckpoint { + fn from(c: &IndexedCheckpoint) -> Self { + Self { + sequence_number: c.sequence_number as i64, + certified_checkpoint: bcs::to_bytes(c.certified_checkpoint.as_ref().unwrap()).unwrap(), + checkpoint_contents: bcs::to_bytes(c.checkpoint_contents.as_ref().unwrap()).unwrap(), + } + } +} diff --git a/crates/sui-mvr-indexer/src/models/transactions.rs b/crates/sui-mvr-indexer/src/models/transactions.rs new file mode 100644 index 0000000000000..1856025c5be4d --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/transactions.rs @@ -0,0 +1,353 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::sync::Arc; + +use diesel::prelude::*; + +use move_core_types::annotated_value::{MoveDatatypeLayout, MoveTypeLayout}; +use move_core_types::language_storage::TypeTag; +use sui_json_rpc_types::{ + BalanceChange, ObjectChange, SuiEvent, SuiTransactionBlock, SuiTransactionBlockEffects, + SuiTransactionBlockEvents, SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, +}; +use sui_package_resolver::{PackageStore, Resolver}; +use sui_types::digests::TransactionDigest; +use sui_types::effects::TransactionEffects; +use sui_types::effects::TransactionEvents; +use sui_types::event::Event; +use sui_types::transaction::SenderSignedData; + +use crate::errors::IndexerError; +use crate::schema::transactions; +use crate::types::IndexedObjectChange; +use crate::types::IndexedTransaction; +use crate::types::IndexerResult; + +#[derive(Clone, Debug, Queryable, Insertable, QueryableByName, Selectable)] +#[diesel(table_name = transactions)] +pub struct StoredTransaction { + pub tx_sequence_number: i64, + pub transaction_digest: Vec, + pub raw_transaction: Vec, + pub raw_effects: Vec, + pub checkpoint_sequence_number: i64, + pub timestamp_ms: i64, + pub object_changes: Vec>>, + pub balance_changes: Vec>>, + pub events: Vec>>, + pub transaction_kind: i16, + pub success_command_count: i16, +} + +pub type StoredTransactionEvents = Vec>>; + +#[derive(Debug, Queryable)] +pub struct TxSeq { + pub seq: i64, +} + +impl Default for TxSeq { + fn default() -> Self { + Self { seq: -1 } + } +} + +#[derive(Clone, Debug, Queryable)] +pub struct StoredTransactionTimestamp { + pub tx_sequence_number: i64, + pub timestamp_ms: i64, +} + +#[derive(Clone, Debug, Queryable)] +pub struct StoredTransactionCheckpoint { + pub tx_sequence_number: i64, + pub checkpoint_sequence_number: i64, +} + +#[derive(Clone, Debug, Queryable)] +pub struct StoredTransactionSuccessCommandCount { + pub tx_sequence_number: i64, + pub checkpoint_sequence_number: i64, + pub success_command_count: i16, + pub timestamp_ms: i64, +} + +impl From<&IndexedTransaction> for StoredTransaction { + fn from(tx: &IndexedTransaction) -> Self { + StoredTransaction { + tx_sequence_number: tx.tx_sequence_number as i64, + transaction_digest: tx.tx_digest.into_inner().to_vec(), + raw_transaction: bcs::to_bytes(&tx.sender_signed_data).unwrap(), + raw_effects: bcs::to_bytes(&tx.effects).unwrap(), + checkpoint_sequence_number: tx.checkpoint_sequence_number as i64, + object_changes: tx + .object_changes + .iter() + .map(|oc| Some(bcs::to_bytes(&oc).unwrap())) + .collect(), + balance_changes: tx + .balance_change + .iter() + .map(|bc| Some(bcs::to_bytes(&bc).unwrap())) + .collect(), + events: tx + .events + .iter() + .map(|e| Some(bcs::to_bytes(&e).unwrap())) + .collect(), + timestamp_ms: tx.timestamp_ms as i64, + transaction_kind: tx.transaction_kind.clone() as i16, + success_command_count: tx.successful_tx_num as i16, + } + } +} + +impl StoredTransaction { + pub fn get_balance_len(&self) -> usize { + self.balance_changes.len() + } + + pub fn get_balance_at_idx(&self, idx: usize) -> Option> { + self.balance_changes.get(idx).cloned().flatten() + } + + pub fn get_object_len(&self) -> usize { + self.object_changes.len() + } + + pub fn get_object_at_idx(&self, idx: usize) -> Option> { + self.object_changes.get(idx).cloned().flatten() + } + + pub fn get_event_len(&self) -> usize { + self.events.len() + } + + pub fn get_event_at_idx(&self, idx: usize) -> Option> { + self.events.get(idx).cloned().flatten() + } + + pub async fn try_into_sui_transaction_block_response( + self, + options: SuiTransactionBlockResponseOptions, + package_resolver: Arc>, + ) -> IndexerResult { + let options = options.clone(); + let tx_digest = + TransactionDigest::try_from(self.transaction_digest.as_slice()).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Can't convert {:?} as tx_digest. Error: {e}", + self.transaction_digest + )) + })?; + + let transaction = if options.show_input { + let sender_signed_data = self.try_into_sender_signed_data()?; + let tx_block = SuiTransactionBlock::try_from_with_package_resolver( + sender_signed_data, + package_resolver.clone(), + ) + .await?; + Some(tx_block) + } else { + None + }; + + let effects = if options.show_effects { + let effects = self.try_into_sui_transaction_effects()?; + Some(effects) + } else { + None + }; + + let raw_transaction = if options.show_raw_input { + self.raw_transaction + } else { + Vec::new() + }; + + let events = if options.show_events { + let events = { + self + .events + .into_iter() + .map(|event| match event { + Some(event) => { + let event: Event = bcs::from_bytes(&event).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Can't convert event bytes into Event. tx_digest={:?} Error: {e}", + tx_digest + )) + })?; + Ok(event) + } + None => Err(IndexerError::PersistentStorageDataCorruptionError(format!( + "Event should not be null, tx_digest={:?}", + tx_digest + ))), + }) + .collect::, IndexerError>>()? + }; + let timestamp = self.timestamp_ms as u64; + let tx_events = TransactionEvents { data: events }; + + tx_events_to_sui_tx_events(tx_events, package_resolver, tx_digest, timestamp).await? + } else { + None + }; + + let object_changes = if options.show_object_changes { + let object_changes = { + self.object_changes.into_iter().map(|object_change| { + match object_change { + Some(object_change) => { + let object_change: IndexedObjectChange = bcs::from_bytes(&object_change) + .map_err(|e| IndexerError::PersistentStorageDataCorruptionError( + format!("Can't convert object_change bytes into IndexedObjectChange. tx_digest={:?} Error: {e}", tx_digest) + ))?; + Ok(ObjectChange::from(object_change)) + } + None => Err(IndexerError::PersistentStorageDataCorruptionError(format!("object_change should not be null, tx_digest={:?}", tx_digest))), + } + }).collect::, IndexerError>>()? + }; + Some(object_changes) + } else { + None + }; + + let balance_changes = if options.show_balance_changes { + let balance_changes = { + self.balance_changes.into_iter().map(|balance_change| { + match balance_change { + Some(balance_change) => { + let balance_change: BalanceChange = bcs::from_bytes(&balance_change) + .map_err(|e| IndexerError::PersistentStorageDataCorruptionError( + format!("Can't convert balance_change bytes into BalanceChange. tx_digest={:?} Error: {e}", tx_digest) + ))?; + Ok(balance_change) + } + None => Err(IndexerError::PersistentStorageDataCorruptionError(format!("object_change should not be null, tx_digest={:?}", tx_digest))), + } + }).collect::, IndexerError>>()? + }; + Some(balance_changes) + } else { + None + }; + + Ok(SuiTransactionBlockResponse { + digest: tx_digest, + transaction, + raw_transaction, + effects, + events, + object_changes, + balance_changes, + timestamp_ms: Some(self.timestamp_ms as u64), + checkpoint: Some(self.checkpoint_sequence_number as u64), + confirmed_local_execution: None, + errors: vec![], + raw_effects: self.raw_effects, + }) + } + fn try_into_sender_signed_data(&self) -> IndexerResult { + let sender_signed_data: SenderSignedData = + bcs::from_bytes(&self.raw_transaction).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Can't convert raw_transaction of {} into SenderSignedData. Error: {e}", + self.tx_sequence_number + )) + })?; + Ok(sender_signed_data) + } + + pub fn try_into_sui_transaction_effects(&self) -> IndexerResult { + let effects: TransactionEffects = bcs::from_bytes(&self.raw_effects).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Can't convert raw_effects of {} into TransactionEffects. Error: {e}", + self.tx_sequence_number + )) + })?; + let effects = SuiTransactionBlockEffects::try_from(effects)?; + Ok(effects) + } +} + +pub fn stored_events_to_events( + stored_events: StoredTransactionEvents, +) -> Result, IndexerError> { + stored_events + .into_iter() + .map(|event| match event { + Some(event) => { + let event: Event = bcs::from_bytes(&event).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Can't convert event bytes into Event. Error: {e}", + )) + })?; + Ok(event) + } + None => Err(IndexerError::PersistentStorageDataCorruptionError( + "Event should not be null".to_string(), + )), + }) + .collect::, IndexerError>>() +} + +pub async fn tx_events_to_sui_tx_events( + tx_events: TransactionEvents, + package_resolver: Arc>, + tx_digest: TransactionDigest, + timestamp: u64, +) -> Result, IndexerError> { + let mut sui_event_futures = vec![]; + let tx_events_data_len = tx_events.data.len(); + for tx_event in tx_events.data.clone() { + let package_resolver_clone = package_resolver.clone(); + sui_event_futures.push(tokio::task::spawn(async move { + let resolver = package_resolver_clone; + resolver + .type_layout(TypeTag::Struct(Box::new(tx_event.type_.clone()))) + .await + })); + } + let event_move_type_layouts = futures::future::join_all(sui_event_futures) + .await + .into_iter() + .collect::, _>>()? + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::ResolveMoveStructError(format!( + "Failed to convert to sui event with Error: {e}", + )) + })?; + let event_move_datatype_layouts = event_move_type_layouts + .into_iter() + .filter_map(|move_type_layout| match move_type_layout { + MoveTypeLayout::Struct(s) => Some(MoveDatatypeLayout::Struct(s)), + MoveTypeLayout::Enum(e) => Some(MoveDatatypeLayout::Enum(e)), + _ => None, + }) + .collect::>(); + assert!(tx_events_data_len == event_move_datatype_layouts.len()); + let sui_events = tx_events + .data + .into_iter() + .enumerate() + .zip(event_move_datatype_layouts) + .map(|((seq, tx_event), move_datatype_layout)| { + SuiEvent::try_from( + tx_event, + tx_digest, + seq as u64, + Some(timestamp), + move_datatype_layout, + ) + }) + .collect::, _>>()?; + let sui_tx_events = SuiTransactionBlockEvents { data: sui_events }; + Ok(Some(sui_tx_events)) +} diff --git a/crates/sui-mvr-indexer/src/models/tx_indices.rs b/crates/sui-mvr-indexer/src/models/tx_indices.rs new file mode 100644 index 0000000000000..a00b715eedf98 --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/tx_indices.rs @@ -0,0 +1,225 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + schema::{ + tx_affected_addresses, tx_affected_objects, tx_calls_fun, tx_calls_mod, tx_calls_pkg, + tx_changed_objects, tx_digests, tx_input_objects, tx_kinds, + }, + types::TxIndex, +}; +use diesel::prelude::*; +use itertools::Itertools; + +#[derive(QueryableByName)] +pub struct TxSequenceNumber { + #[diesel(sql_type = diesel::sql_types::BigInt)] + pub tx_sequence_number: i64, +} + +#[derive(QueryableByName)] +pub struct TxDigest { + #[diesel(sql_type = diesel::sql_types::Binary)] + pub transaction_digest: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_affected_addresses)] +pub struct StoredTxAffectedAddresses { + pub tx_sequence_number: i64, + pub affected: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_affected_objects)] +pub struct StoredTxAffectedObjects { + pub tx_sequence_number: i64, + pub affected: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_input_objects)] +pub struct StoredTxInputObject { + pub tx_sequence_number: i64, + pub object_id: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_changed_objects)] +pub struct StoredTxChangedObject { + pub tx_sequence_number: i64, + pub object_id: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_calls_pkg)] +pub struct StoredTxPkg { + pub tx_sequence_number: i64, + pub package: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_calls_mod)] +pub struct StoredTxMod { + pub tx_sequence_number: i64, + pub package: Vec, + pub module: String, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_calls_fun)] +pub struct StoredTxFun { + pub tx_sequence_number: i64, + pub package: Vec, + pub module: String, + pub func: String, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_digests)] +pub struct StoredTxDigest { + pub tx_digest: Vec, + pub tx_sequence_number: i64, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_kinds)] +pub struct StoredTxKind { + pub tx_kind: i16, + pub tx_sequence_number: i64, +} + +#[allow(clippy::type_complexity)] +impl TxIndex { + pub fn split( + self: TxIndex, + ) -> ( + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, + ) { + let tx_sequence_number = self.tx_sequence_number as i64; + + let tx_affected_addresses = self + .recipients + .iter() + .chain(self.payers.iter()) + .chain(std::iter::once(&self.sender)) + .unique() + .map(|a| StoredTxAffectedAddresses { + tx_sequence_number, + affected: a.to_vec(), + sender: self.sender.to_vec(), + }) + .collect(); + + let tx_affected_objects = self + .affected_objects + .iter() + .map(|o| StoredTxAffectedObjects { + tx_sequence_number, + affected: o.to_vec(), + sender: self.sender.to_vec(), + }) + .collect(); + + let tx_input_objects = self + .input_objects + .iter() + .map(|o| StoredTxInputObject { + tx_sequence_number, + object_id: bcs::to_bytes(&o).unwrap(), + sender: self.sender.to_vec(), + }) + .collect(); + + let tx_changed_objects = self + .changed_objects + .iter() + .map(|o| StoredTxChangedObject { + tx_sequence_number, + object_id: bcs::to_bytes(&o).unwrap(), + sender: self.sender.to_vec(), + }) + .collect(); + + let mut packages = Vec::new(); + let mut packages_modules = Vec::new(); + let mut packages_modules_funcs = Vec::new(); + + for (pkg, pkg_mod, pkg_mod_func) in self + .move_calls + .iter() + .map(|(p, m, f)| (*p, (*p, m.clone()), (*p, m.clone(), f.clone()))) + { + packages.push(pkg); + packages_modules.push(pkg_mod); + packages_modules_funcs.push(pkg_mod_func); + } + + let tx_pkgs = packages + .iter() + .map(|p| StoredTxPkg { + tx_sequence_number, + package: p.to_vec(), + sender: self.sender.to_vec(), + }) + .collect(); + + let tx_mods = packages_modules + .iter() + .map(|(p, m)| StoredTxMod { + tx_sequence_number, + package: p.to_vec(), + module: m.to_string(), + sender: self.sender.to_vec(), + }) + .collect(); + + let tx_calls = packages_modules_funcs + .iter() + .map(|(p, m, f)| StoredTxFun { + tx_sequence_number, + package: p.to_vec(), + module: m.to_string(), + func: f.to_string(), + sender: self.sender.to_vec(), + }) + .collect(); + + let stored_tx_digest = StoredTxDigest { + tx_digest: self.transaction_digest.into_inner().to_vec(), + tx_sequence_number, + }; + + let tx_kind = StoredTxKind { + tx_kind: self.tx_kind as i16, + tx_sequence_number, + }; + + ( + tx_affected_addresses, + tx_affected_objects, + tx_input_objects, + tx_changed_objects, + tx_pkgs, + tx_mods, + tx_calls, + vec![stored_tx_digest], + vec![tx_kind], + ) + } +} diff --git a/crates/sui-mvr-indexer/src/models/watermarks.rs b/crates/sui-mvr-indexer/src/models/watermarks.rs new file mode 100644 index 0000000000000..1ff3d3cfe52ac --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/watermarks.rs @@ -0,0 +1,76 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::str::FromStr; + +use diesel::prelude::*; + +use crate::{ + handlers::{pruner::PrunableTable, CommitterWatermark}, + schema::watermarks::{self}, +}; + +/// Represents a row in the `watermarks` table. +#[derive(Queryable, Insertable, Default, QueryableByName, Clone)] +#[diesel(table_name = watermarks, primary_key(entity))] +pub struct StoredWatermark { + /// The table governed by this watermark, i.e `epochs`, `checkpoints`, `transactions`. + pub pipeline: String, + /// Inclusive upper epoch bound for this entity's data. Committer updates this field. Pruner uses + /// this to determine if pruning is necessary based on the retention policy. + pub epoch_hi_inclusive: i64, + /// Inclusive upper checkpoint bound for this entity's data. Committer updates this field. All + /// data of this entity in the checkpoint must be persisted before advancing this watermark. The + /// committer refers to this on disaster recovery to resume writing. + pub checkpoint_hi_inclusive: i64, + /// Exclusive upper transaction sequence number bound for this entity's data. Committer updates + /// this field. + pub tx_hi: i64, + /// Inclusive lower epoch bound for this entity's data. Pruner updates this field when the epoch range exceeds the retention policy. + pub epoch_lo: i64, + /// Inclusive low watermark that the pruner advances. Corresponds to the epoch id, checkpoint + /// sequence number, or tx sequence number depending on the entity. Data before this watermark is + /// considered pruned by a reader. The underlying data may still exist in the db instance. + pub reader_lo: i64, + /// Updated using the database's current timestamp when the pruner sees that some data needs to + /// be dropped. The pruner uses this column to determine whether to prune or wait long enough + /// that all in-flight reads complete or timeout before it acts on an updated watermark. + pub timestamp_ms: i64, + /// Column used by the pruner to track its true progress. Data below this watermark can be + /// immediately pruned. + pub pruner_hi: i64, +} + +impl StoredWatermark { + pub fn from_upper_bound_update(entity: &str, watermark: CommitterWatermark) -> Self { + StoredWatermark { + pipeline: entity.to_string(), + epoch_hi_inclusive: watermark.epoch_hi_inclusive as i64, + checkpoint_hi_inclusive: watermark.checkpoint_hi_inclusive as i64, + tx_hi: watermark.tx_hi as i64, + ..StoredWatermark::default() + } + } + + pub fn from_lower_bound_update(entity: &str, epoch_lo: u64, reader_lo: u64) -> Self { + StoredWatermark { + pipeline: entity.to_string(), + epoch_lo: epoch_lo as i64, + reader_lo: reader_lo as i64, + ..StoredWatermark::default() + } + } + + pub fn entity(&self) -> Option { + PrunableTable::from_str(&self.pipeline).ok() + } + + /// Determine whether to set a new epoch lower bound based on the retention policy. + pub fn new_epoch_lo(&self, retention: u64) -> Option { + if self.epoch_lo as u64 + retention <= self.epoch_hi_inclusive as u64 { + Some((self.epoch_hi_inclusive as u64).saturating_sub(retention - 1)) + } else { + None + } + } +} diff --git a/crates/sui-mvr-indexer/src/restorer/archives.rs b/crates/sui-mvr-indexer/src/restorer/archives.rs new file mode 100644 index 0000000000000..f70336f76d3c5 --- /dev/null +++ b/crates/sui-mvr-indexer/src/restorer/archives.rs @@ -0,0 +1,60 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::num::NonZeroUsize; + +use prometheus::Registry; +use sui_types::digests::CheckpointDigest; +use tracing::info; + +use sui_archival::reader::{ArchiveReader, ArchiveReaderMetrics}; +use sui_config::node::ArchiveReaderConfig; +use sui_config::object_storage_config::{ObjectStoreConfig, ObjectStoreType}; + +use crate::errors::IndexerError; +use crate::types::IndexerResult; + +#[derive(Clone, Debug)] +pub struct RestoreCheckpointInfo { + pub next_checkpoint_after_epoch: u64, + pub chain_identifier: CheckpointDigest, +} + +pub async fn read_restore_checkpoint_info( + archive_bucket: Option, + epoch: u64, +) -> IndexerResult { + let archive_store_config = ObjectStoreConfig { + object_store: Some(ObjectStoreType::GCS), + bucket: archive_bucket, + object_store_connection_limit: 50, + no_sign_request: false, + ..Default::default() + }; + let archive_reader_config = ArchiveReaderConfig { + remote_store_config: archive_store_config, + download_concurrency: NonZeroUsize::new(50).unwrap(), + use_for_pruning_watermark: false, + }; + let metrics = ArchiveReaderMetrics::new(&Registry::default()); + let archive_reader = ArchiveReader::new(archive_reader_config, &metrics)?; + archive_reader.sync_manifest_once().await?; + let manifest = archive_reader.get_manifest().await?; + let next_checkpoint_after_epoch = manifest.next_checkpoint_after_epoch(epoch); + info!( + "Read from archives: next checkpoint sequence after epoch {} is: {}", + epoch, next_checkpoint_after_epoch + ); + let cp_summaries = archive_reader + .get_summaries_for_list_no_verify(vec![0]) + .await + .map_err(|e| IndexerError::ArchiveReaderError(format!("Failed to get summaries: {}", e)))?; + let first_cp = cp_summaries + .first() + .ok_or_else(|| IndexerError::ArchiveReaderError("No checkpoint found".to_string()))?; + let chain_identifier = *first_cp.digest(); + Ok(RestoreCheckpointInfo { + next_checkpoint_after_epoch, + chain_identifier, + }) +} diff --git a/crates/sui-mvr-indexer/src/restorer/formal_snapshot.rs b/crates/sui-mvr-indexer/src/restorer/formal_snapshot.rs new file mode 100644 index 0000000000000..bab43c7303f38 --- /dev/null +++ b/crates/sui-mvr-indexer/src/restorer/formal_snapshot.rs @@ -0,0 +1,283 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::BTreeMap; +use std::fs; +use std::num::NonZeroUsize; +use std::path::PathBuf; +use std::sync::Arc; + +use futures::future::{AbortHandle, AbortRegistration, Abortable}; +use indicatif::{MultiProgress, ProgressBar, ProgressStyle}; +use object_store::path::Path; +use tokio::sync::{Mutex, Semaphore}; +use tokio::task; +use tracing::info; + +use sui_config::object_storage_config::{ObjectStoreConfig, ObjectStoreType}; +use sui_core::authority::authority_store_tables::LiveObject; +use sui_snapshot::reader::{download_bytes, LiveObjectIter, StateSnapshotReaderV1}; +use sui_snapshot::FileMetadata; +use sui_storage::object_store::util::get; +use sui_storage::object_store::ObjectStoreGetExt; +use sui_types::accumulator::Accumulator; + +use crate::config::RestoreConfig; +use crate::errors::IndexerError; +use crate::handlers::TransactionObjectChangesToCommit; +use crate::restorer::archives::{read_restore_checkpoint_info, RestoreCheckpointInfo}; +use crate::store::{indexer_store::IndexerStore, PgIndexerStore}; +use crate::types::{IndexedCheckpoint, IndexedObject}; + +pub type DigestByBucketAndPartition = BTreeMap>; +pub type SnapshotChecksums = (DigestByBucketAndPartition, Accumulator); +pub type Sha3DigestType = Arc>>>; + +pub struct IndexerFormalSnapshotRestorer { + store: PgIndexerStore, + reader: StateSnapshotReaderV1, + restore_config: RestoreConfig, +} + +impl IndexerFormalSnapshotRestorer { + pub async fn new( + store: PgIndexerStore, + restore_config: RestoreConfig, + ) -> Result { + let remote_store_config = ObjectStoreConfig { + object_store: Some(ObjectStoreType::S3), + aws_endpoint: Some(restore_config.snapshot_endpoint.clone()), + aws_virtual_hosted_style_request: true, + object_store_connection_limit: restore_config.object_store_concurrent_limit, + no_sign_request: true, + ..Default::default() + }; + + let base_path = PathBuf::from(restore_config.snapshot_download_dir.clone()); + let snapshot_dir = base_path.join("snapshot"); + if snapshot_dir.exists() { + fs::remove_dir_all(snapshot_dir.clone()).unwrap(); + info!( + "Deleted all files from snapshot directory: {:?}", + snapshot_dir + ); + } else { + fs::create_dir(snapshot_dir.clone()).unwrap(); + info!("Created snapshot directory: {:?}", snapshot_dir); + } + + let local_store_config = ObjectStoreConfig { + object_store: Some(ObjectStoreType::File), + directory: Some(snapshot_dir.clone().to_path_buf()), + ..Default::default() + }; + + let m = MultiProgress::new(); + let reader = StateSnapshotReaderV1::new( + restore_config.start_epoch, + &remote_store_config, + &local_store_config, + usize::MAX, + NonZeroUsize::new(restore_config.object_store_concurrent_limit).unwrap(), + m.clone(), + ) + .await + .unwrap_or_else(|err| panic!("Failed to create reader: {}", err)); + info!( + "Initialized formal snapshot reader at epoch {}", + restore_config.start_epoch + ); + + Ok(Self { + store, + reader, + restore_config: restore_config.clone(), + }) + } + + pub async fn restore(&mut self) -> Result<(), IndexerError> { + let (sha3_digests, num_part_files) = self.reader.compute_checksum().await?; + let (_abort_handle, abort_registration) = AbortHandle::new_pair(); + let (input_files, epoch_dir, remote_object_store, _concurrency) = + self.reader.export_metadata().await?; + let owned_input_files: Vec<(u32, (u32, FileMetadata))> = input_files + .into_iter() + .map(|(bucket, (part_num, metadata))| (*bucket, (part_num, metadata.clone()))) + .collect(); + self.restore_move_objects( + abort_registration, + owned_input_files, + epoch_dir, + remote_object_store, + sha3_digests, + num_part_files, + ) + .await?; + info!("Finished restoring move objects"); + self.restore_display_table().await?; + info!("Finished restoring display table"); + self.restore_cp_watermark_and_chain_id().await?; + info!("Finished restoring checkpoint info"); + Ok(()) + } + + async fn restore_move_objects( + &self, + abort_registration: AbortRegistration, + input_files: Vec<(u32, (u32, FileMetadata))>, + epoch_dir: Path, + remote_object_store: Arc, + sha3_digests: Arc>, + num_part_files: usize, + ) -> std::result::Result<(), anyhow::Error> { + let move_object_progress_bar = Arc::new(self.reader.get_multi_progress().add( + ProgressBar::new(num_part_files as u64).with_style( + ProgressStyle::with_template( + "[{elapsed_precise}] {wide_bar} {pos} out of {len} move object files restored ({msg})", + ) + .unwrap(), + ), + )); + + Abortable::new( + async move { + let sema_limit = Arc::new(Semaphore::new( + self.restore_config.object_store_concurrent_limit, + )); + let mut restore_tasks = vec![]; + + for (bucket, (part_num, file_metadata)) in input_files.into_iter() { + let sema_limit_clone = sema_limit.clone(); + let epoch_dir_clone = epoch_dir.clone(); + let remote_object_store_clone = remote_object_store.clone(); + let sha3_digests_clone = sha3_digests.clone(); + let store_clone = self.store.clone(); + let bar_clone = move_object_progress_bar.clone(); + let restore_config = self.restore_config.clone(); + + let restore_task = task::spawn(async move { + let _permit = sema_limit_clone.acquire().await.unwrap(); + let object_file_path = file_metadata.file_path(&epoch_dir_clone); + let (bytes, _) = download_bytes( + remote_object_store_clone, + &file_metadata, + epoch_dir_clone, + sha3_digests_clone, + &&bucket, + &part_num, + Some(restore_config.object_store_max_timeout_secs), + ) + .await; + info!( + "Finished downloading move object file {:?}", + object_file_path + ); + let mut move_objects = vec![]; + let _result: Result<(), anyhow::Error> = + LiveObjectIter::new(&file_metadata, bytes.clone()).map(|obj_iter| { + for object in obj_iter { + match object { + LiveObject::Normal(obj) => { + // TODO: placeholder values for df_info and checkpoint_seq_num, + // will clean it up when the column cleanup is done. + let indexed_object = + IndexedObject::from_object(0, obj, None); + move_objects.push(indexed_object); + } + LiveObject::Wrapped(_) => {} + } + } + }); + + let live_obj_cnt = move_objects.len(); + let object_changes = TransactionObjectChangesToCommit { + changed_objects: move_objects.clone(), + deleted_objects: vec![], + }; + info!( + "Start persisting {} objects to objects table from {}", + live_obj_cnt, object_file_path + ); + store_clone + .persist_objects(vec![object_changes]) + .await + .expect("Failed to persist to objects from restore"); + info!( + "Finished persisting {} objects to objects table from {}", + live_obj_cnt, object_file_path + ); + + let objects_snapshot_changes = TransactionObjectChangesToCommit { + changed_objects: move_objects, + deleted_objects: vec![], + }; + store_clone + .persist_objects_snapshot(vec![objects_snapshot_changes]) + .await + .expect("Failed to persist objects snapshot"); + + bar_clone.inc(1); + bar_clone.set_message(format!( + "Restored {} live move objects from {}", + live_obj_cnt, object_file_path + )); + Ok::<(), anyhow::Error>(()) + }); + restore_tasks.push(restore_task); + } + + let restore_task_results = futures::future::join_all(restore_tasks).await; + for restore_task_result in restore_task_results { + restore_task_result??; + } + Ok(()) + }, + abort_registration, + ) + .await? + } + + async fn restore_display_table(&self) -> std::result::Result<(), anyhow::Error> { + let bucket = self.restore_config.gcs_display_bucket.clone(); + let start_epoch = self.restore_config.start_epoch; + + let remote_store_config = ObjectStoreConfig { + object_store: Some(ObjectStoreType::GCS), + bucket: Some(bucket), + object_store_connection_limit: 200, + no_sign_request: false, + ..Default::default() + }; + let remote_store = remote_store_config.make().map_err(|e| { + IndexerError::GcsError(format!("Failed to make GCS remote store: {}", e)) + })?; + let path = Path::from(format!("display_{}.csv", start_epoch).as_str()); + let bytes: bytes::Bytes = get(&remote_store, &path).await?; + self.store.restore_display(bytes).await?; + Ok(()) + } + + async fn restore_cp_watermark_and_chain_id(&self) -> Result<(), IndexerError> { + let restore_checkpoint_info = read_restore_checkpoint_info( + Some(self.restore_config.gcs_archive_bucket.clone()), + self.restore_config.start_epoch, + ) + .await?; + let RestoreCheckpointInfo { + next_checkpoint_after_epoch, + chain_identifier, + } = restore_checkpoint_info; + self.store + .persist_chain_identifier(chain_identifier.into_inner().to_vec()) + .await?; + assert!(next_checkpoint_after_epoch > 0); + // FIXME: This is a temporary hack to add a checkpoint watermark. + // Once we have proper watermark tables, we should remove the following code. + let last_cp = IndexedCheckpoint { + sequence_number: next_checkpoint_after_epoch - 1, + ..Default::default() + }; + self.store.persist_checkpoints(vec![last_cp]).await?; + Ok(()) + } +} diff --git a/crates/sui-mvr-indexer/src/restorer/mod.rs b/crates/sui-mvr-indexer/src/restorer/mod.rs new file mode 100644 index 0000000000000..1899227725b62 --- /dev/null +++ b/crates/sui-mvr-indexer/src/restorer/mod.rs @@ -0,0 +1,5 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +pub mod archives; +pub mod formal_snapshot; diff --git a/crates/sui-mvr-indexer/src/schema.patch b/crates/sui-mvr-indexer/src/schema.patch new file mode 100644 index 0000000000000..c935f4d862fe0 --- /dev/null +++ b/crates/sui-mvr-indexer/src/schema.patch @@ -0,0 +1,7 @@ +diff --git a/crates/sui-mvr-indexer/src/schema.rs b/crates/sui-mvr-indexer/src/schema.rs +--- a/crates/sui-mvr-indexer/src/schema.rs ++++ b/crates/sui-mvr-indexer/src/schema.rs +@@ -1 +1,3 @@ ++// Copyright (c) Mysten Labs, Inc. ++// SPDX-License-Identifier: Apache-2.0 + // @generated automatically by Diesel CLI. diff --git a/crates/sui-mvr-indexer/src/schema.rs b/crates/sui-mvr-indexer/src/schema.rs new file mode 100644 index 0000000000000..447b45557922c --- /dev/null +++ b/crates/sui-mvr-indexer/src/schema.rs @@ -0,0 +1,404 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +// @generated automatically by Diesel CLI. + +diesel::table! { + chain_identifier (checkpoint_digest) { + checkpoint_digest -> Bytea, + } +} + +diesel::table! { + checkpoints (sequence_number) { + sequence_number -> Int8, + checkpoint_digest -> Bytea, + epoch -> Int8, + network_total_transactions -> Int8, + previous_checkpoint_digest -> Nullable, + end_of_epoch -> Bool, + tx_digests -> Array>, + timestamp_ms -> Int8, + total_gas_cost -> Int8, + computation_cost -> Int8, + storage_cost -> Int8, + storage_rebate -> Int8, + non_refundable_storage_fee -> Int8, + checkpoint_commitments -> Bytea, + validator_signature -> Bytea, + end_of_epoch_data -> Nullable, + min_tx_sequence_number -> Nullable, + max_tx_sequence_number -> Nullable, + } +} + +diesel::table! { + display (object_type) { + object_type -> Text, + id -> Bytea, + version -> Int2, + bcs -> Bytea, + } +} + +diesel::table! { + epochs (epoch) { + epoch -> Int8, + first_checkpoint_id -> Int8, + epoch_start_timestamp -> Int8, + reference_gas_price -> Int8, + protocol_version -> Int8, + total_stake -> Int8, + storage_fund_balance -> Int8, + system_state -> Nullable, + epoch_total_transactions -> Nullable, + last_checkpoint_id -> Nullable, + epoch_end_timestamp -> Nullable, + storage_fund_reinvestment -> Nullable, + storage_charge -> Nullable, + storage_rebate -> Nullable, + stake_subsidy_amount -> Nullable, + total_gas_fees -> Nullable, + total_stake_rewards_distributed -> Nullable, + leftover_storage_fund_inflow -> Nullable, + epoch_commitments -> Nullable, + system_state_summary_json -> Nullable, + first_tx_sequence_number -> Nullable, + } +} + +diesel::table! { + event_emit_module (package, module, tx_sequence_number, event_sequence_number) { + package -> Bytea, + module -> Text, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + event_emit_package (package, tx_sequence_number, event_sequence_number) { + package -> Bytea, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + event_senders (sender, tx_sequence_number, event_sequence_number) { + sender -> Bytea, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + } +} + +diesel::table! { + event_struct_instantiation (package, module, type_instantiation, tx_sequence_number, event_sequence_number) { + package -> Bytea, + module -> Text, + type_instantiation -> Text, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + event_struct_module (package, module, tx_sequence_number, event_sequence_number) { + package -> Bytea, + module -> Text, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + event_struct_name (package, module, type_name, tx_sequence_number, event_sequence_number) { + package -> Bytea, + module -> Text, + type_name -> Text, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + event_struct_package (package, tx_sequence_number, event_sequence_number) { + package -> Bytea, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + events (tx_sequence_number, event_sequence_number) { + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + transaction_digest -> Bytea, + senders -> Array>, + package -> Bytea, + module -> Text, + event_type -> Text, + timestamp_ms -> Int8, + bcs -> Bytea, + sender -> Nullable, + } +} + +diesel::table! { + feature_flags (protocol_version, flag_name) { + protocol_version -> Int8, + flag_name -> Text, + flag_value -> Bool, + } +} + +diesel::table! { + full_objects_history (object_id, object_version) { + object_id -> Bytea, + object_version -> Int8, + serialized_object -> Nullable, + } +} + +diesel::table! { + objects (object_id) { + object_id -> Bytea, + object_version -> Int8, + object_digest -> Bytea, + owner_type -> Int2, + owner_id -> Nullable, + object_type -> Nullable, + object_type_package -> Nullable, + object_type_module -> Nullable, + object_type_name -> Nullable, + serialized_object -> Bytea, + coin_type -> Nullable, + coin_balance -> Nullable, + df_kind -> Nullable, + } +} + +diesel::table! { + objects_history (checkpoint_sequence_number, object_id, object_version) { + object_id -> Bytea, + object_version -> Int8, + object_status -> Int2, + object_digest -> Nullable, + checkpoint_sequence_number -> Int8, + owner_type -> Nullable, + owner_id -> Nullable, + object_type -> Nullable, + object_type_package -> Nullable, + object_type_module -> Nullable, + object_type_name -> Nullable, + serialized_object -> Nullable, + coin_type -> Nullable, + coin_balance -> Nullable, + df_kind -> Nullable, + } +} + +diesel::table! { + objects_snapshot (object_id) { + object_id -> Bytea, + object_version -> Int8, + object_status -> Int2, + object_digest -> Nullable, + checkpoint_sequence_number -> Int8, + owner_type -> Nullable, + owner_id -> Nullable, + object_type -> Nullable, + object_type_package -> Nullable, + object_type_module -> Nullable, + object_type_name -> Nullable, + serialized_object -> Nullable, + coin_type -> Nullable, + coin_balance -> Nullable, + df_kind -> Nullable, + } +} + +diesel::table! { + objects_version (object_id, object_version) { + object_id -> Bytea, + object_version -> Int8, + cp_sequence_number -> Int8, + } +} + +diesel::table! { + packages (package_id, original_id, package_version) { + package_id -> Bytea, + original_id -> Bytea, + package_version -> Int8, + move_package -> Bytea, + checkpoint_sequence_number -> Int8, + } +} + +diesel::table! { + protocol_configs (protocol_version, config_name) { + protocol_version -> Int8, + config_name -> Text, + config_value -> Nullable, + } +} + +diesel::table! { + pruner_cp_watermark (checkpoint_sequence_number) { + checkpoint_sequence_number -> Int8, + min_tx_sequence_number -> Int8, + max_tx_sequence_number -> Int8, + } +} + +diesel::table! { + raw_checkpoints (sequence_number) { + sequence_number -> Int8, + certified_checkpoint -> Bytea, + checkpoint_contents -> Bytea, + } +} + +diesel::table! { + transactions (tx_sequence_number) { + tx_sequence_number -> Int8, + transaction_digest -> Bytea, + raw_transaction -> Bytea, + raw_effects -> Bytea, + checkpoint_sequence_number -> Int8, + timestamp_ms -> Int8, + object_changes -> Array>, + balance_changes -> Array>, + events -> Array>, + transaction_kind -> Int2, + success_command_count -> Int2, + } +} + +diesel::table! { + tx_affected_addresses (affected, tx_sequence_number) { + tx_sequence_number -> Int8, + affected -> Bytea, + sender -> Bytea, + } +} + +diesel::table! { + tx_affected_objects (affected, tx_sequence_number) { + tx_sequence_number -> Int8, + affected -> Bytea, + sender -> Bytea, + } +} + +diesel::table! { + tx_calls_fun (package, module, func, tx_sequence_number) { + tx_sequence_number -> Int8, + package -> Bytea, + module -> Text, + func -> Text, + sender -> Bytea, + } +} + +diesel::table! { + tx_calls_mod (package, module, tx_sequence_number) { + tx_sequence_number -> Int8, + package -> Bytea, + module -> Text, + sender -> Bytea, + } +} + +diesel::table! { + tx_calls_pkg (package, tx_sequence_number) { + tx_sequence_number -> Int8, + package -> Bytea, + sender -> Bytea, + } +} + +diesel::table! { + tx_changed_objects (object_id, tx_sequence_number) { + tx_sequence_number -> Int8, + object_id -> Bytea, + sender -> Bytea, + } +} + +diesel::table! { + tx_digests (tx_digest) { + tx_digest -> Bytea, + tx_sequence_number -> Int8, + } +} + +diesel::table! { + tx_input_objects (object_id, tx_sequence_number) { + tx_sequence_number -> Int8, + object_id -> Bytea, + sender -> Bytea, + } +} + +diesel::table! { + tx_kinds (tx_kind, tx_sequence_number) { + tx_sequence_number -> Int8, + tx_kind -> Int2, + } +} + +diesel::table! { + watermarks (pipeline) { + pipeline -> Text, + epoch_hi_inclusive -> Int8, + checkpoint_hi_inclusive -> Int8, + tx_hi -> Int8, + epoch_lo -> Int8, + reader_lo -> Int8, + timestamp_ms -> Int8, + pruner_hi -> Int8, + } +} + +diesel::allow_tables_to_appear_in_same_query!( + chain_identifier, + checkpoints, + display, + epochs, + event_emit_module, + event_emit_package, + event_senders, + event_struct_instantiation, + event_struct_module, + event_struct_name, + event_struct_package, + events, + feature_flags, + full_objects_history, + objects, + objects_history, + objects_snapshot, + objects_version, + packages, + protocol_configs, + pruner_cp_watermark, + raw_checkpoints, + transactions, + tx_affected_addresses, + tx_affected_objects, + tx_calls_fun, + tx_calls_mod, + tx_calls_pkg, + tx_changed_objects, + tx_digests, + tx_input_objects, + tx_kinds, + watermarks, +); diff --git a/crates/sui-mvr-indexer/src/store/indexer_store.rs b/crates/sui-mvr-indexer/src/store/indexer_store.rs new file mode 100644 index 0000000000000..998b37f286b1e --- /dev/null +++ b/crates/sui-mvr-indexer/src/store/indexer_store.rs @@ -0,0 +1,140 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::BTreeMap; + +use async_trait::async_trait; +use strum::IntoEnumIterator; + +use crate::errors::IndexerError; +use crate::handlers::pruner::PrunableTable; +use crate::handlers::{CommitterWatermark, EpochToCommit, TransactionObjectChangesToCommit}; +use crate::models::display::StoredDisplay; +use crate::models::obj_indices::StoredObjectVersion; +use crate::models::objects::{StoredDeletedObject, StoredObject}; +use crate::models::raw_checkpoints::StoredRawCheckpoint; +use crate::models::watermarks::StoredWatermark; +use crate::types::{ + EventIndex, IndexedCheckpoint, IndexedEvent, IndexedPackage, IndexedTransaction, TxIndex, +}; + +#[allow(clippy::large_enum_variant)] +pub enum ObjectsToCommit { + MutatedObject(StoredObject), + DeletedObject(StoredDeletedObject), +} + +#[async_trait] +pub trait IndexerStore: Clone + Sync + Send + 'static { + async fn get_latest_checkpoint_sequence_number(&self) -> Result, IndexerError>; + + async fn get_available_epoch_range(&self) -> Result<(u64, u64), IndexerError>; + + async fn get_available_checkpoint_range(&self) -> Result<(u64, u64), IndexerError>; + + async fn get_latest_object_snapshot_checkpoint_sequence_number( + &self, + ) -> Result, IndexerError>; + + async fn get_chain_identifier(&self) -> Result>, IndexerError>; + + async fn persist_protocol_configs_and_feature_flags( + &self, + chain_id: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_objects( + &self, + object_changes: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_object_history( + &self, + object_changes: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_full_objects_history( + &self, + object_changes: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_objects_version( + &self, + object_versions: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_objects_snapshot( + &self, + object_changes: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_checkpoints( + &self, + checkpoints: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_chain_identifier( + &self, + checkpoint_digest: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_transactions( + &self, + transactions: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_tx_indices(&self, indices: Vec) -> Result<(), IndexerError>; + + async fn persist_events(&self, events: Vec) -> Result<(), IndexerError>; + async fn persist_event_indices( + &self, + event_indices: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_displays( + &self, + display_updates: BTreeMap, + ) -> Result<(), IndexerError>; + + async fn persist_packages(&self, packages: Vec) -> Result<(), IndexerError>; + + /// Updates the current epoch with end-of-epoch data, and writes a new epoch to the database. + async fn persist_epoch(&self, epoch: EpochToCommit) -> Result<(), IndexerError>; + + /// Updates epoch-partitioned tables to accept data from the new epoch. + async fn advance_epoch(&self, epoch: EpochToCommit) -> Result<(), IndexerError>; + + async fn prune_epoch(&self, epoch: u64) -> Result<(), IndexerError>; + + async fn get_network_total_transactions_by_end_of_epoch( + &self, + epoch: u64, + ) -> Result, IndexerError>; + + async fn upload_display(&self, epoch: u64) -> Result<(), IndexerError>; + + async fn restore_display(&self, bytes: bytes::Bytes) -> Result<(), IndexerError>; + + async fn persist_raw_checkpoints( + &self, + checkpoints: Vec, + ) -> Result<(), IndexerError>; + + /// Update the upper bound of the watermarks for the given tables. + async fn update_watermarks_upper_bound( + &self, + watermark: CommitterWatermark, + ) -> Result<(), IndexerError> + where + E::Iterator: Iterator>; + + /// Updates each watermark entry's lower bounds per the list of tables and their new epoch lower + /// bounds. + async fn update_watermarks_lower_bound( + &self, + watermarks: Vec<(PrunableTable, u64)>, + ) -> Result<(), IndexerError>; + + /// Load all watermark entries from the store, and the latest timestamp from the db. + async fn get_watermarks(&self) -> Result<(Vec, i64), IndexerError>; +} diff --git a/crates/sui-mvr-indexer/src/store/mod.rs b/crates/sui-mvr-indexer/src/store/mod.rs new file mode 100644 index 0000000000000..9d6bf65cc26b4 --- /dev/null +++ b/crates/sui-mvr-indexer/src/store/mod.rs @@ -0,0 +1,93 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::time::Duration; + +use diesel_async::{scoped_futures::ScopedBoxFuture, AsyncPgConnection}; +pub(crate) use indexer_store::*; +pub use pg_indexer_store::PgIndexerStore; + +use crate::{database::ConnectionPool, errors::IndexerError}; + +pub mod indexer_store; +pub mod package_resolver; +mod pg_indexer_store; +pub mod pg_partition_manager; + +pub async fn transaction_with_retry<'a, Q, T>( + pool: &ConnectionPool, + timeout: Duration, + query: Q, +) -> Result +where + Q: for<'r> FnOnce( + &'r mut AsyncPgConnection, + ) -> ScopedBoxFuture<'a, 'r, Result> + + Send, + Q: Clone, + T: 'a, +{ + let backoff = backoff::ExponentialBackoff { + max_elapsed_time: Some(timeout), + ..Default::default() + }; + backoff::future::retry(backoff, || async { + let mut connection = pool.get().await.map_err(|e| backoff::Error::Transient { + err: IndexerError::PostgresWriteError(e.to_string()), + retry_after: None, + })?; + + connection + .build_transaction() + .read_write() + .run(query.clone()) + .await + .map_err(|e| { + tracing::error!("Error with persisting data into DB: {:?}, retrying...", e); + backoff::Error::Transient { + err: IndexerError::PostgresWriteError(e.to_string()), + retry_after: None, + } + }) + }) + .await +} + +pub async fn read_with_retry<'a, Q, T>( + pool: &ConnectionPool, + timeout: Duration, + query: Q, +) -> Result +where + Q: for<'r> FnOnce( + &'r mut AsyncPgConnection, + ) -> ScopedBoxFuture<'a, 'r, Result> + + Send, + Q: Clone, + T: 'a, +{ + let backoff = backoff::ExponentialBackoff { + max_elapsed_time: Some(timeout), + ..Default::default() + }; + backoff::future::retry(backoff, || async { + let mut connection = pool.get().await.map_err(|e| backoff::Error::Transient { + err: IndexerError::PostgresWriteError(e.to_string()), + retry_after: None, + })?; + + connection + .build_transaction() + .read_only() + .run(query.clone()) + .await + .map_err(|e| { + tracing::error!("Error with reading data from DB: {:?}, retrying...", e); + backoff::Error::Transient { + err: IndexerError::PostgresWriteError(e.to_string()), + retry_after: None, + } + }) + }) + .await +} diff --git a/crates/sui-mvr-indexer/src/store/package_resolver.rs b/crates/sui-mvr-indexer/src/store/package_resolver.rs new file mode 100644 index 0000000000000..f4cedd6500871 --- /dev/null +++ b/crates/sui-mvr-indexer/src/store/package_resolver.rs @@ -0,0 +1,58 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::sync::Arc; + +use crate::database::ConnectionPool; +use crate::schema::objects; +use anyhow::anyhow; +use async_trait::async_trait; +use diesel::ExpressionMethods; +use diesel::QueryDsl; +use diesel_async::RunQueryDsl; +use move_core_types::account_address::AccountAddress; +use sui_package_resolver::{error::Error as PackageResolverError, Package, PackageStore}; +use sui_types::object::Object; + +/// A package resolver that reads packages from the database. +#[derive(Clone)] +pub struct IndexerStorePackageResolver { + pool: ConnectionPool, +} + +impl IndexerStorePackageResolver { + pub fn new(pool: ConnectionPool) -> Self { + Self { pool } + } +} + +#[async_trait] +impl PackageStore for IndexerStorePackageResolver { + async fn fetch(&self, id: AccountAddress) -> Result, PackageResolverError> { + let pkg = self + .get_package_from_db(id) + .await + .map_err(|e| PackageResolverError::Store { + store: "PostgresDB", + error: e.to_string(), + })?; + Ok(Arc::new(pkg)) + } +} + +impl IndexerStorePackageResolver { + async fn get_package_from_db(&self, id: AccountAddress) -> Result { + let mut connection = self.pool.get().await?; + + let bcs = objects::dsl::objects + .select(objects::dsl::serialized_object) + .filter(objects::dsl::object_id.eq(id.to_vec())) + .get_result::>(&mut connection) + .await + .map_err(|e| anyhow!("Package not found in DB: {e}"))?; + + let object = bcs::from_bytes::(&bcs)?; + Package::read_from_object(&object) + .map_err(|e| anyhow!("Failed parsing object to package: {e}")) + } +} diff --git a/crates/sui-mvr-indexer/src/store/pg_indexer_store.rs b/crates/sui-mvr-indexer/src/store/pg_indexer_store.rs new file mode 100644 index 0000000000000..b1d1af7b31ed6 --- /dev/null +++ b/crates/sui-mvr-indexer/src/store/pg_indexer_store.rs @@ -0,0 +1,2495 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::{BTreeMap, HashMap}; +use std::io::Cursor; +use std::time::Duration; + +use async_trait::async_trait; +use core::result::Result::Ok; +use csv::{ReaderBuilder, Writer}; +use diesel::dsl::{max, min}; +use diesel::ExpressionMethods; +use diesel::OptionalExtension; +use diesel::QueryDsl; +use diesel_async::scoped_futures::ScopedFutureExt; +use futures::future::Either; +use itertools::Itertools; +use object_store::path::Path; +use strum::IntoEnumIterator; +use sui_types::base_types::ObjectID; +use tap::TapFallible; +use tracing::{info, warn}; + +use sui_config::object_storage_config::{ObjectStoreConfig, ObjectStoreType}; +use sui_protocol_config::ProtocolConfig; +use sui_storage::object_store::util::put; + +use crate::config::UploadOptions; +use crate::database::ConnectionPool; +use crate::errors::{Context, IndexerError}; +use crate::handlers::pruner::PrunableTable; +use crate::handlers::TransactionObjectChangesToCommit; +use crate::handlers::{CommitterWatermark, EpochToCommit}; +use crate::metrics::IndexerMetrics; +use crate::models::checkpoints::StoredChainIdentifier; +use crate::models::checkpoints::StoredCheckpoint; +use crate::models::checkpoints::StoredCpTx; +use crate::models::display::StoredDisplay; +use crate::models::epoch::StoredEpochInfo; +use crate::models::epoch::{StoredFeatureFlag, StoredProtocolConfig}; +use crate::models::events::StoredEvent; +use crate::models::obj_indices::StoredObjectVersion; +use crate::models::objects::{ + StoredDeletedObject, StoredFullHistoryObject, StoredHistoryObject, StoredObject, + StoredObjectSnapshot, +}; +use crate::models::packages::StoredPackage; +use crate::models::transactions::StoredTransaction; +use crate::models::watermarks::StoredWatermark; +use crate::schema::{ + chain_identifier, checkpoints, display, epochs, event_emit_module, event_emit_package, + event_senders, event_struct_instantiation, event_struct_module, event_struct_name, + event_struct_package, events, feature_flags, full_objects_history, objects, objects_history, + objects_snapshot, objects_version, packages, protocol_configs, pruner_cp_watermark, + raw_checkpoints, transactions, tx_affected_addresses, tx_affected_objects, tx_calls_fun, + tx_calls_mod, tx_calls_pkg, tx_changed_objects, tx_digests, tx_input_objects, tx_kinds, + watermarks, +}; +use crate::store::{read_with_retry, transaction_with_retry}; +use crate::types::{EventIndex, IndexedDeletedObject, IndexedObject}; +use crate::types::{IndexedCheckpoint, IndexedEvent, IndexedPackage, IndexedTransaction, TxIndex}; + +use super::pg_partition_manager::{EpochPartitionData, PgPartitionManager}; +use super::IndexerStore; + +use crate::models::raw_checkpoints::StoredRawCheckpoint; +use diesel::upsert::excluded; +use sui_types::digests::{ChainIdentifier, CheckpointDigest}; + +#[macro_export] +macro_rules! chunk { + ($data: expr, $size: expr) => {{ + $data + .into_iter() + .chunks($size) + .into_iter() + .map(|c| c.collect()) + .collect::>>() + }}; +} + +// In one DB transaction, the update could be chunked into +// a few statements, this is the amount of rows to update in one statement +// TODO: I think with the `per_db_tx` params, `PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX` +// is now less relevant. We should do experiments and remove it if it's true. +const PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX: usize = 1000; +// The amount of rows to update in one DB transaction +const PG_COMMIT_PARALLEL_CHUNK_SIZE: usize = 100; +// The amount of rows to update in one DB transaction, for objects particularly +// Having this number too high may cause many db deadlocks because of +// optimistic locking. +const PG_COMMIT_OBJECTS_PARALLEL_CHUNK_SIZE: usize = 500; +const PG_DB_COMMIT_SLEEP_DURATION: Duration = Duration::from_secs(3600); + +#[derive(Clone)] +pub struct PgIndexerStoreConfig { + pub parallel_chunk_size: usize, + pub parallel_objects_chunk_size: usize, + pub gcs_cred_path: Option, + pub gcs_display_bucket: Option, +} + +#[derive(Clone)] +pub struct PgIndexerStore { + pool: ConnectionPool, + metrics: IndexerMetrics, + partition_manager: PgPartitionManager, + config: PgIndexerStoreConfig, +} + +impl PgIndexerStore { + pub fn new( + pool: ConnectionPool, + upload_options: UploadOptions, + metrics: IndexerMetrics, + ) -> Self { + let parallel_chunk_size = std::env::var("PG_COMMIT_PARALLEL_CHUNK_SIZE") + .unwrap_or_else(|_e| PG_COMMIT_PARALLEL_CHUNK_SIZE.to_string()) + .parse::() + .unwrap(); + let parallel_objects_chunk_size = std::env::var("PG_COMMIT_OBJECTS_PARALLEL_CHUNK_SIZE") + .unwrap_or_else(|_e| PG_COMMIT_OBJECTS_PARALLEL_CHUNK_SIZE.to_string()) + .parse::() + .unwrap(); + let partition_manager = + PgPartitionManager::new(pool.clone()).expect("Failed to initialize partition manager"); + let config = PgIndexerStoreConfig { + parallel_chunk_size, + parallel_objects_chunk_size, + gcs_cred_path: upload_options.gcs_cred_path, + gcs_display_bucket: upload_options.gcs_display_bucket, + }; + + Self { + pool, + metrics, + partition_manager, + config, + } + } + + pub fn pool(&self) -> ConnectionPool { + self.pool.clone() + } + + /// Get the range of the protocol versions that need to be indexed. + pub async fn get_protocol_version_index_range(&self) -> Result<(i64, i64), IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + // We start indexing from the next protocol version after the latest one stored in the db. + let start = protocol_configs::table + .select(max(protocol_configs::protocol_version)) + .first::>(&mut connection) + .await + .map_err(Into::into) + .context("Failed reading latest protocol version from PostgresDB")? + .map_or(1, |v| v + 1); + + // We end indexing at the protocol version of the latest epoch stored in the db. + let end = epochs::table + .select(max(epochs::protocol_version)) + .first::>(&mut connection) + .await + .map_err(Into::into) + .context("Failed reading latest epoch protocol version from PostgresDB")? + .unwrap_or(1); + Ok((start, end)) + } + + async fn get_chain_identifier(&self) -> Result>, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + chain_identifier::table + .select(chain_identifier::checkpoint_digest) + .first::>(&mut connection) + .await + .optional() + .map_err(Into::into) + .context("Failed reading chain id from PostgresDB") + } + + // `pub` is needed for wait_for_checkpoint in tests + pub async fn get_latest_checkpoint_sequence_number(&self) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + checkpoints::table + .select(max(checkpoints::sequence_number)) + .first::>(&mut connection) + .await + .map_err(Into::into) + .map(|v| v.map(|v| v as u64)) + .context("Failed reading latest checkpoint sequence number from PostgresDB") + } + + async fn get_available_checkpoint_range(&self) -> Result<(u64, u64), IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + checkpoints::table + .select(( + min(checkpoints::sequence_number), + max(checkpoints::sequence_number), + )) + .first::<(Option, Option)>(&mut connection) + .await + .map_err(Into::into) + .map(|(min, max)| { + ( + min.unwrap_or_default() as u64, + max.unwrap_or_default() as u64, + ) + }) + .context("Failed reading min and max checkpoint sequence numbers from PostgresDB") + } + + async fn get_prunable_epoch_range(&self) -> Result<(u64, u64), IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + epochs::table + .select((min(epochs::epoch), max(epochs::epoch))) + .first::<(Option, Option)>(&mut connection) + .await + .map_err(Into::into) + .map(|(min, max)| { + ( + min.unwrap_or_default() as u64, + max.unwrap_or_default() as u64, + ) + }) + .context("Failed reading min and max epoch numbers from PostgresDB") + } + + async fn get_min_prunable_checkpoint(&self) -> Result { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + pruner_cp_watermark::table + .select(min(pruner_cp_watermark::checkpoint_sequence_number)) + .first::>(&mut connection) + .await + .map_err(Into::into) + .map(|v| v.unwrap_or_default() as u64) + .context("Failed reading min prunable checkpoint sequence number from PostgresDB") + } + + pub async fn get_checkpoint_range_for_epoch( + &self, + epoch: u64, + ) -> Result<(u64, Option), IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + epochs::table + .select((epochs::first_checkpoint_id, epochs::last_checkpoint_id)) + .filter(epochs::epoch.eq(epoch as i64)) + .first::<(i64, Option)>(&mut connection) + .await + .map_err(Into::into) + .map(|(min, max)| (min as u64, max.map(|v| v as u64))) + .context("Failed reading checkpoint range from PostgresDB") + } + + pub async fn get_transaction_range_for_checkpoint( + &self, + checkpoint: u64, + ) -> Result<(u64, u64), IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + pruner_cp_watermark::table + .select(( + pruner_cp_watermark::min_tx_sequence_number, + pruner_cp_watermark::max_tx_sequence_number, + )) + .filter(pruner_cp_watermark::checkpoint_sequence_number.eq(checkpoint as i64)) + .first::<(i64, i64)>(&mut connection) + .await + .map_err(Into::into) + .map(|(min, max)| (min as u64, max as u64)) + .context("Failed reading transaction range from PostgresDB") + } + + pub async fn get_latest_object_snapshot_checkpoint_sequence_number( + &self, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + objects_snapshot::table + .select(max(objects_snapshot::checkpoint_sequence_number)) + .first::>(&mut connection) + .await + .map_err(Into::into) + .map(|v| v.map(|v| v as u64)) + .context( + "Failed reading latest object snapshot checkpoint sequence number from PostgresDB", + ) + } + + async fn persist_display_updates( + &self, + display_updates: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::insert_into(display::table) + .values(display_updates) + .on_conflict(display::object_type) + .do_update() + .set(( + display::id.eq(excluded(display::id)), + display::version.eq(excluded(display::version)), + display::bcs.eq(excluded(display::bcs)), + )) + .execute(conn) + .await?; + + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await?; + + Ok(()) + } + + async fn persist_object_mutation_chunk( + &self, + mutated_object_mutation_chunk: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_chunks + .start_timer(); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::insert_into(objects::table) + .values(mutated_object_mutation_chunk.clone()) + .on_conflict(objects::object_id) + .do_update() + .set(( + objects::object_id.eq(excluded(objects::object_id)), + objects::object_version.eq(excluded(objects::object_version)), + objects::object_digest.eq(excluded(objects::object_digest)), + objects::owner_type.eq(excluded(objects::owner_type)), + objects::owner_id.eq(excluded(objects::owner_id)), + objects::object_type.eq(excluded(objects::object_type)), + objects::serialized_object.eq(excluded(objects::serialized_object)), + objects::coin_type.eq(excluded(objects::coin_type)), + objects::coin_balance.eq(excluded(objects::coin_balance)), + objects::df_kind.eq(excluded(objects::df_kind)), + )) + .execute(conn) + .await?; + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + guard.stop_and_record(); + }) + .tap_err(|e| { + tracing::error!("Failed to persist object mutations with error: {}", e); + }) + } + + async fn persist_object_deletion_chunk( + &self, + deleted_objects_chunk: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_chunks + .start_timer(); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::delete( + objects::table.filter( + objects::object_id.eq_any( + deleted_objects_chunk + .iter() + .map(|o| o.object_id.clone()) + .collect::>(), + ), + ), + ) + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write object deletion to PostgresDB")?; + + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + guard.stop_and_record(); + }) + .tap_err(|e| { + tracing::error!("Failed to persist object deletions with error: {}", e); + }) + } + + async fn persist_object_snapshot_mutation_chunk( + &self, + objects_snapshot_mutations: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_snapshot_chunks + .start_timer(); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for mutation_chunk in + objects_snapshot_mutations.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(objects_snapshot::table) + .values(mutation_chunk) + .on_conflict(objects_snapshot::object_id) + .do_update() + .set(( + objects_snapshot::object_version + .eq(excluded(objects_snapshot::object_version)), + objects_snapshot::object_status + .eq(excluded(objects_snapshot::object_status)), + objects_snapshot::object_digest + .eq(excluded(objects_snapshot::object_digest)), + objects_snapshot::owner_type.eq(excluded(objects_snapshot::owner_type)), + objects_snapshot::owner_id.eq(excluded(objects_snapshot::owner_id)), + objects_snapshot::object_type_package + .eq(excluded(objects_snapshot::object_type_package)), + objects_snapshot::object_type_module + .eq(excluded(objects_snapshot::object_type_module)), + objects_snapshot::object_type_name + .eq(excluded(objects_snapshot::object_type_name)), + objects_snapshot::object_type + .eq(excluded(objects_snapshot::object_type)), + objects_snapshot::serialized_object + .eq(excluded(objects_snapshot::serialized_object)), + objects_snapshot::coin_type.eq(excluded(objects_snapshot::coin_type)), + objects_snapshot::coin_balance + .eq(excluded(objects_snapshot::coin_balance)), + objects_snapshot::df_kind.eq(excluded(objects_snapshot::df_kind)), + objects_snapshot::checkpoint_sequence_number + .eq(excluded(objects_snapshot::checkpoint_sequence_number)), + )) + .execute(conn) + .await?; + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + guard.stop_and_record(); + }) + .tap_err(|e| { + tracing::error!("Failed to persist object snapshot with error: {}", e); + }) + } + + async fn persist_object_snapshot_deletion_chunk( + &self, + objects_snapshot_deletions: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_snapshot_chunks + .start_timer(); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for deletion_chunk in + objects_snapshot_deletions.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::delete( + objects_snapshot::table.filter( + objects_snapshot::object_id.eq_any( + deletion_chunk + .iter() + .map(|o| o.object_id.clone()) + .collect::>(), + ), + ), + ) + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write object deletion to PostgresDB")?; + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!( + elapsed, + "Deleted {} chunked object snapshots", + objects_snapshot_deletions.len(), + ); + }) + .tap_err(|e| { + tracing::error!( + "Failed to persist object snapshot deletions with error: {}", + e + ); + }) + } + + async fn persist_objects_history_chunk( + &self, + stored_objects_history: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_history_chunks + .start_timer(); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for stored_objects_history_chunk in + stored_objects_history.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + let error_message = concat!( + "Failed to write to ", + stringify!((objects_history::table)), + " DB" + ); + diesel::insert_into(objects_history::table) + .values(stored_objects_history_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context(error_message)?; + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + guard.stop_and_record(); + }) + .tap_err(|e| { + tracing::error!("Failed to persist object history with error: {}", e); + }) + } + + async fn persist_full_objects_history_chunk( + &self, + objects: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let guard = self + .metrics + .checkpoint_db_commit_latency_full_objects_history_chunks + .start_timer(); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for objects_chunk in objects.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(full_objects_history::table) + .values(objects_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write to full_objects_history table")?; + } + + Ok(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!( + elapsed, + "Persisted {} chunked full objects history", + objects.len(), + ); + }) + .tap_err(|e| { + tracing::error!("Failed to persist full object history with error: {}", e); + }) + } + + async fn persist_objects_version_chunk( + &self, + object_versions: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_version_chunks + .start_timer(); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for object_version_chunk in object_versions.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(objects_version::table) + .values(object_version_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write to objects_version table")?; + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!( + elapsed, + "Persisted {} chunked object versions", + object_versions.len(), + ); + }) + .tap_err(|e| { + tracing::error!("Failed to persist object versions with error: {}", e); + }) + } + + async fn persist_raw_checkpoints_impl( + &self, + raw_checkpoints: &[StoredRawCheckpoint], + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::insert_into(raw_checkpoints::table) + .values(raw_checkpoints) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write to raw_checkpoints table")?; + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + } + + async fn persist_checkpoints( + &self, + checkpoints: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let Some(first_checkpoint) = checkpoints.as_slice().first() else { + return Ok(()); + }; + + // If the first checkpoint has sequence number 0, we need to persist the digest as + // chain identifier. + if first_checkpoint.sequence_number == 0 { + let checkpoint_digest = first_checkpoint.checkpoint_digest.into_inner().to_vec(); + self.persist_protocol_configs_and_feature_flags(checkpoint_digest.clone()) + .await?; + self.persist_chain_identifier(checkpoint_digest).await?; + } + let guard = self + .metrics + .checkpoint_db_commit_latency_checkpoints + .start_timer(); + + let stored_cp_txs = checkpoints.iter().map(StoredCpTx::from).collect::>(); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for stored_cp_tx_chunk in stored_cp_txs.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(pruner_cp_watermark::table) + .values(stored_cp_tx_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write to pruner_cp_watermark table")?; + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + info!( + "Persisted {} pruner_cp_watermark rows.", + stored_cp_txs.len(), + ); + }) + .tap_err(|e| { + tracing::error!("Failed to persist pruner_cp_watermark with error: {}", e); + })?; + + let stored_checkpoints = checkpoints + .iter() + .map(StoredCheckpoint::from) + .collect::>(); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for stored_checkpoint_chunk in + stored_checkpoints.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(checkpoints::table) + .values(stored_checkpoint_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write to checkpoints table")?; + let time_now_ms = chrono::Utc::now().timestamp_millis(); + for stored_checkpoint in stored_checkpoint_chunk { + self.metrics + .db_commit_lag_ms + .set(time_now_ms - stored_checkpoint.timestamp_ms); + self.metrics + .max_committed_checkpoint_sequence_number + .set(stored_checkpoint.sequence_number); + self.metrics + .committed_checkpoint_timestamp_ms + .set(stored_checkpoint.timestamp_ms); + } + + for stored_checkpoint in stored_checkpoint_chunk { + info!( + "Indexer lag: \ + persisted checkpoint {} with time now {} and checkpoint time {}", + stored_checkpoint.sequence_number, + time_now_ms, + stored_checkpoint.timestamp_ms + ); + } + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!( + elapsed, + "Persisted {} checkpoints", + stored_checkpoints.len() + ); + }) + .tap_err(|e| { + tracing::error!("Failed to persist checkpoints with error: {}", e); + }) + } + + async fn persist_transactions_chunk( + &self, + transactions: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + let guard = self + .metrics + .checkpoint_db_commit_latency_transactions_chunks + .start_timer(); + let transformation_guard = self + .metrics + .checkpoint_db_commit_latency_transactions_chunks_transformation + .start_timer(); + let transactions = transactions + .iter() + .map(StoredTransaction::from) + .collect::>(); + drop(transformation_guard); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for transaction_chunk in transactions.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + let error_message = concat!( + "Failed to write to ", + stringify!((transactions::table)), + " DB" + ); + diesel::insert_into(transactions::table) + .values(transaction_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context(error_message)?; + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!( + elapsed, + "Persisted {} chunked transactions", + transactions.len() + ); + }) + .tap_err(|e| { + tracing::error!("Failed to persist transactions with error: {}", e); + }) + } + + async fn persist_events_chunk(&self, events: Vec) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + let guard = self + .metrics + .checkpoint_db_commit_latency_events_chunks + .start_timer(); + let len = events.len(); + let events = events + .into_iter() + .map(StoredEvent::from) + .collect::>(); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for event_chunk in events.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + let error_message = + concat!("Failed to write to ", stringify!((events::table)), " DB"); + diesel::insert_into(events::table) + .values(event_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context(error_message)?; + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} chunked events", len); + }) + .tap_err(|e| { + tracing::error!("Failed to persist events with error: {}", e); + }) + } + + async fn persist_packages(&self, packages: Vec) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + if packages.is_empty() { + return Ok(()); + } + let guard = self + .metrics + .checkpoint_db_commit_latency_packages + .start_timer(); + let packages = packages + .into_iter() + .map(StoredPackage::from) + .collect::>(); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for packages_chunk in packages.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(packages::table) + .values(packages_chunk) + .on_conflict(packages::package_id) + .do_update() + .set(( + packages::package_id.eq(excluded(packages::package_id)), + packages::package_version.eq(excluded(packages::package_version)), + packages::move_package.eq(excluded(packages::move_package)), + packages::checkpoint_sequence_number + .eq(excluded(packages::checkpoint_sequence_number)), + )) + .execute(conn) + .await?; + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} packages", packages.len()); + }) + .tap_err(|e| { + tracing::error!("Failed to persist packages with error: {}", e); + }) + } + + async fn persist_event_indices_chunk( + &self, + indices: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let guard = self + .metrics + .checkpoint_db_commit_latency_event_indices_chunks + .start_timer(); + let len = indices.len(); + let ( + event_emit_packages, + event_emit_modules, + event_senders, + event_struct_packages, + event_struct_modules, + event_struct_names, + event_struct_instantiations, + ) = indices.into_iter().map(|i| i.split()).fold( + ( + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + ), + |( + mut event_emit_packages, + mut event_emit_modules, + mut event_senders, + mut event_struct_packages, + mut event_struct_modules, + mut event_struct_names, + mut event_struct_instantiations, + ), + index| { + event_emit_packages.push(index.0); + event_emit_modules.push(index.1); + event_senders.push(index.2); + event_struct_packages.push(index.3); + event_struct_modules.push(index.4); + event_struct_names.push(index.5); + event_struct_instantiations.push(index.6); + ( + event_emit_packages, + event_emit_modules, + event_senders, + event_struct_packages, + event_struct_modules, + event_struct_names, + event_struct_instantiations, + ) + }, + ); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for event_emit_packages_chunk in + event_emit_packages.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(event_emit_package::table) + .values(event_emit_packages_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for event_emit_modules_chunk in + event_emit_modules.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(event_emit_module::table) + .values(event_emit_modules_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for event_senders_chunk in event_senders.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(event_senders::table) + .values(event_senders_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for event_struct_packages_chunk in + event_struct_packages.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(event_struct_package::table) + .values(event_struct_packages_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for event_struct_modules_chunk in + event_struct_modules.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(event_struct_module::table) + .values(event_struct_modules_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for event_struct_names_chunk in + event_struct_names.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(event_struct_name::table) + .values(event_struct_names_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for event_struct_instantiations_chunk in + event_struct_instantiations.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(event_struct_instantiation::table) + .values(event_struct_instantiations_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + Ok(()) + } + .scope_boxed() + }) + .await?; + + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} chunked event indices", len); + Ok(()) + } + + async fn persist_tx_indices_chunk(&self, indices: Vec) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let guard = self + .metrics + .checkpoint_db_commit_latency_tx_indices_chunks + .start_timer(); + let len = indices.len(); + let ( + affected_addresses, + affected_objects, + input_objects, + changed_objects, + pkgs, + mods, + funs, + digests, + kinds, + ) = indices.into_iter().map(|i| i.split()).fold( + ( + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + ), + |( + mut tx_affected_addresses, + mut tx_affected_objects, + mut tx_input_objects, + mut tx_changed_objects, + mut tx_pkgs, + mut tx_mods, + mut tx_funs, + mut tx_digests, + mut tx_kinds, + ), + index| { + tx_affected_addresses.extend(index.0); + tx_affected_objects.extend(index.1); + tx_input_objects.extend(index.2); + tx_changed_objects.extend(index.3); + tx_pkgs.extend(index.4); + tx_mods.extend(index.5); + tx_funs.extend(index.6); + tx_digests.extend(index.7); + tx_kinds.extend(index.8); + ( + tx_affected_addresses, + tx_affected_objects, + tx_input_objects, + tx_changed_objects, + tx_pkgs, + tx_mods, + tx_funs, + tx_digests, + tx_kinds, + ) + }, + ); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for affected_addresses_chunk in + affected_addresses.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(tx_affected_addresses::table) + .values(affected_addresses_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for affected_objects_chunk in + affected_objects.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(tx_affected_objects::table) + .values(affected_objects_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for input_objects_chunk in input_objects.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(tx_input_objects::table) + .values(input_objects_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for changed_objects_chunk in + changed_objects.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(tx_changed_objects::table) + .values(changed_objects_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for pkgs_chunk in pkgs.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(tx_calls_pkg::table) + .values(pkgs_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for mods_chunk in mods.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(tx_calls_mod::table) + .values(mods_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for funs_chunk in funs.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(tx_calls_fun::table) + .values(funs_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for digests_chunk in digests.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(tx_digests::table) + .values(digests_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for kinds_chunk in kinds.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(tx_kinds::table) + .values(kinds_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + Ok(()) + } + .scope_boxed() + }) + .await?; + + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} chunked tx_indices", len); + Ok(()) + } + + async fn persist_epoch(&self, epoch: EpochToCommit) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + let guard = self + .metrics + .checkpoint_db_commit_latency_epoch + .start_timer(); + let epoch_id = epoch.new_epoch.epoch; + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + if let Some(last_epoch) = &epoch.last_epoch { + let last_epoch_id = last_epoch.epoch; + + info!(last_epoch_id, "Persisting epoch end data."); + diesel::update(epochs::table.filter(epochs::epoch.eq(last_epoch_id))) + .set(last_epoch) + .execute(conn) + .await?; + } + + let epoch_id = epoch.new_epoch.epoch; + info!(epoch_id, "Persisting epoch beginning info"); + let error_message = + concat!("Failed to write to ", stringify!((epochs::table)), " DB"); + diesel::insert_into(epochs::table) + .values(epoch.new_epoch) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context(error_message)?; + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!(elapsed, epoch_id, "Persisted epoch beginning info"); + }) + .tap_err(|e| { + tracing::error!("Failed to persist epoch with error: {}", e); + }) + } + + async fn advance_epoch(&self, epoch_to_commit: EpochToCommit) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let last_epoch_id = epoch_to_commit.last_epoch.as_ref().map(|e| e.epoch); + // partition_0 has been created, so no need to advance it. + if let Some(last_epoch_id) = last_epoch_id { + let last_db_epoch: Option = epochs::table + .filter(epochs::epoch.eq(last_epoch_id)) + .first::(&mut connection) + .await + .optional() + .map_err(Into::into) + .context("Failed to read last epoch from PostgresDB")?; + if let Some(last_epoch) = last_db_epoch { + let epoch_partition_data = + EpochPartitionData::compose_data(epoch_to_commit, last_epoch); + let table_partitions = self.partition_manager.get_table_partitions().await?; + for (table, (_, last_partition)) in table_partitions { + // Only advance epoch partition for epoch partitioned tables. + if !self + .partition_manager + .get_strategy(&table) + .is_epoch_partitioned() + { + continue; + } + let guard = self.metrics.advance_epoch_latency.start_timer(); + self.partition_manager + .advance_epoch(table.clone(), last_partition, &epoch_partition_data) + .await?; + let elapsed = guard.stop_and_record(); + info!( + elapsed, + "Advanced epoch partition {} for table {}", + last_partition, + table.clone() + ); + } + } else { + tracing::error!("Last epoch: {} from PostgresDB is None.", last_epoch_id); + } + } + + Ok(()) + } + + async fn prune_checkpoints_table(&self, cp: u64) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::delete( + checkpoints::table.filter(checkpoints::sequence_number.eq(cp as i64)), + ) + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to prune checkpoints table")?; + + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + } + + async fn prune_event_indices_table( + &self, + min_tx: u64, + max_tx: u64, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let (min_tx, max_tx) = (min_tx as i64, max_tx as i64); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::delete( + event_emit_module::table + .filter(event_emit_module::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + event_emit_package::table + .filter(event_emit_package::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + event_senders::table + .filter(event_senders::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete(event_struct_instantiation::table.filter( + event_struct_instantiation::tx_sequence_number.between(min_tx, max_tx), + )) + .execute(conn) + .await?; + + diesel::delete( + event_struct_module::table + .filter(event_struct_module::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + event_struct_name::table + .filter(event_struct_name::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + event_struct_package::table + .filter(event_struct_package::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + Ok(()) + } + .scope_boxed() + }) + .await + } + + async fn prune_tx_indices_table(&self, min_tx: u64, max_tx: u64) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let (min_tx, max_tx) = (min_tx as i64, max_tx as i64); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::delete( + tx_affected_addresses::table + .filter(tx_affected_addresses::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + tx_affected_objects::table + .filter(tx_affected_objects::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + tx_input_objects::table + .filter(tx_input_objects::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + tx_changed_objects::table + .filter(tx_changed_objects::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + tx_calls_pkg::table + .filter(tx_calls_pkg::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + tx_calls_mod::table + .filter(tx_calls_mod::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + tx_calls_fun::table + .filter(tx_calls_fun::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + tx_digests::table + .filter(tx_digests::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + Ok(()) + } + .scope_boxed() + }) + .await + } + + async fn prune_cp_tx_table(&self, cp: u64) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::delete( + pruner_cp_watermark::table + .filter(pruner_cp_watermark::checkpoint_sequence_number.eq(cp as i64)), + ) + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to prune pruner_cp_watermark table")?; + Ok(()) + } + .scope_boxed() + }) + .await + } + + async fn get_network_total_transactions_by_end_of_epoch( + &self, + epoch: u64, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + // TODO: (wlmyng) update to read from epochs::network_total_transactions + + Ok(Some( + checkpoints::table + .filter(checkpoints::epoch.eq(epoch as i64)) + .select(checkpoints::network_total_transactions) + .order_by(checkpoints::sequence_number.desc()) + .first::(&mut connection) + .await + .map_err(Into::into) + .context("Failed to get network total transactions in epoch") + .map(|v| v as u64)?, + )) + } + + async fn update_watermarks_upper_bound( + &self, + watermark: CommitterWatermark, + ) -> Result<(), IndexerError> + where + E::Iterator: Iterator>, + { + use diesel_async::RunQueryDsl; + + let guard = self + .metrics + .checkpoint_db_commit_latency_watermarks + .start_timer(); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + let upper_bound_updates = E::iter() + .map(|table| StoredWatermark::from_upper_bound_update(table.as_ref(), watermark)) + .collect::>(); + async { + diesel::insert_into(watermarks::table) + .values(upper_bound_updates) + .on_conflict(watermarks::pipeline) + .do_update() + .set(( + watermarks::epoch_hi_inclusive.eq(excluded(watermarks::epoch_hi_inclusive)), + watermarks::checkpoint_hi_inclusive + .eq(excluded(watermarks::checkpoint_hi_inclusive)), + watermarks::tx_hi.eq(excluded(watermarks::tx_hi)), + )) + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to update watermarks upper bound")?; + + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted watermarks"); + }) + .tap_err(|e| { + tracing::error!("Failed to persist watermarks with error: {}", e); + }) + } + + async fn map_epochs_to_cp_tx( + &self, + epochs: &[u64], + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let results: Vec<(i64, i64, Option)> = epochs::table + .filter(epochs::epoch.eq_any(epochs.iter().map(|&e| e as i64))) + .select(( + epochs::epoch, + epochs::first_checkpoint_id, + epochs::first_tx_sequence_number, + )) + .load::<(i64, i64, Option)>(&mut connection) + .await + .map_err(Into::into) + .context("Failed to fetch first checkpoint and tx seq num for epochs")?; + + Ok(results + .into_iter() + .map(|(epoch, checkpoint, tx)| { + ( + epoch as u64, + (checkpoint as u64, tx.unwrap_or_default() as u64), + ) + }) + .collect()) + } + + async fn update_watermarks_lower_bound( + &self, + watermarks: Vec<(PrunableTable, u64)>, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let epochs: Vec = watermarks.iter().map(|(_table, epoch)| *epoch).collect(); + let epoch_mapping = self.map_epochs_to_cp_tx(&epochs).await?; + let lookups: Result, IndexerError> = watermarks + .into_iter() + .map(|(table, epoch)| { + let (checkpoint, tx) = epoch_mapping.get(&epoch).ok_or_else(|| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Epoch {} not found in epoch mapping", + epoch + )) + })?; + + Ok(StoredWatermark::from_lower_bound_update( + table.as_ref(), + epoch, + table.select_reader_lo(*checkpoint, *tx), + )) + }) + .collect(); + let lower_bound_updates = lookups?; + + let guard = self + .metrics + .checkpoint_db_commit_latency_watermarks + .start_timer(); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + use diesel::dsl::sql; + use diesel::query_dsl::methods::FilterDsl; + + diesel::insert_into(watermarks::table) + .values(lower_bound_updates) + .on_conflict(watermarks::pipeline) + .do_update() + .set(( + watermarks::reader_lo.eq(excluded(watermarks::reader_lo)), + watermarks::epoch_lo.eq(excluded(watermarks::epoch_lo)), + watermarks::timestamp_ms.eq(sql::( + "(EXTRACT(EPOCH FROM CURRENT_TIMESTAMP) * 1000)::bigint", + )), + )) + .filter(excluded(watermarks::reader_lo).gt(watermarks::reader_lo)) + .filter(excluded(watermarks::epoch_lo).gt(watermarks::epoch_lo)) + .filter( + diesel::dsl::sql::( + "(EXTRACT(EPOCH FROM CURRENT_TIMESTAMP) * 1000)::bigint", + ) + .gt(watermarks::timestamp_ms), + ) + .execute(conn) + .await?; + + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted watermarks"); + }) + .tap_err(|e| { + tracing::error!("Failed to persist watermarks with error: {}", e); + }) + } + + async fn get_watermarks(&self) -> Result<(Vec, i64), IndexerError> { + use diesel_async::RunQueryDsl; + + // read_only transaction, otherwise this will block and get blocked by write transactions to + // the same table. + read_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + let stored = watermarks::table + .load::(conn) + .await + .map_err(Into::into) + .context("Failed reading watermarks from PostgresDB")?; + + let timestamp = diesel::select(diesel::dsl::sql::( + "(EXTRACT(EPOCH FROM CURRENT_TIMESTAMP) * 1000)::bigint", + )) + .get_result(conn) + .await + .map_err(Into::into) + .context("Failed reading current timestamp from PostgresDB")?; + + Ok((stored, timestamp)) + } + .scope_boxed() + }) + .await + } +} + +#[async_trait] +impl IndexerStore for PgIndexerStore { + async fn get_latest_checkpoint_sequence_number(&self) -> Result, IndexerError> { + self.get_latest_checkpoint_sequence_number().await + } + + async fn get_available_epoch_range(&self) -> Result<(u64, u64), IndexerError> { + self.get_prunable_epoch_range().await + } + + async fn get_available_checkpoint_range(&self) -> Result<(u64, u64), IndexerError> { + self.get_available_checkpoint_range().await + } + + async fn get_chain_identifier(&self) -> Result>, IndexerError> { + self.get_chain_identifier().await + } + + async fn get_latest_object_snapshot_checkpoint_sequence_number( + &self, + ) -> Result, IndexerError> { + self.get_latest_object_snapshot_checkpoint_sequence_number() + .await + } + + async fn persist_objects( + &self, + object_changes: Vec, + ) -> Result<(), IndexerError> { + if object_changes.is_empty() { + return Ok(()); + } + let guard = self + .metrics + .checkpoint_db_commit_latency_objects + .start_timer(); + let (indexed_mutations, indexed_deletions) = retain_latest_indexed_objects(object_changes); + let object_mutations = indexed_mutations + .into_iter() + .map(StoredObject::from) + .collect::>(); + let object_deletions = indexed_deletions + .into_iter() + .map(StoredDeletedObject::from) + .collect::>(); + let mutation_len = object_mutations.len(); + let deletion_len = object_deletions.len(); + + let object_mutation_chunks = + chunk!(object_mutations, self.config.parallel_objects_chunk_size); + let object_deletion_chunks = + chunk!(object_deletions, self.config.parallel_objects_chunk_size); + let mutation_futures = object_mutation_chunks + .into_iter() + .map(|c| self.persist_object_mutation_chunk(c)) + .map(Either::Left); + let deletion_futures = object_deletion_chunks + .into_iter() + .map(|c| self.persist_object_deletion_chunk(c)) + .map(Either::Right); + let all_futures = mutation_futures.chain(deletion_futures).collect::>(); + + futures::future::join_all(all_futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all object mutation or deletion chunks: {:?}", + e + )) + })?; + let elapsed = guard.stop_and_record(); + info!( + elapsed, + "Persisted {} objects mutations and {} deletions", mutation_len, deletion_len + ); + Ok(()) + } + + async fn persist_objects_snapshot( + &self, + object_changes: Vec, + ) -> Result<(), IndexerError> { + if object_changes.is_empty() { + return Ok(()); + } + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_snapshot + .start_timer(); + let (indexed_mutations, indexed_deletions) = retain_latest_indexed_objects(object_changes); + let object_snapshot_mutations: Vec = indexed_mutations + .into_iter() + .map(StoredObjectSnapshot::from) + .collect(); + let object_snapshot_deletions: Vec = indexed_deletions + .into_iter() + .map(StoredObjectSnapshot::from) + .collect(); + let mutation_len = object_snapshot_mutations.len(); + let deletion_len = object_snapshot_deletions.len(); + let object_snapshot_mutation_chunks = chunk!( + object_snapshot_mutations, + self.config.parallel_objects_chunk_size + ); + let object_snapshot_deletion_chunks = chunk!( + object_snapshot_deletions, + self.config.parallel_objects_chunk_size + ); + let mutation_futures = object_snapshot_mutation_chunks + .into_iter() + .map(|c| self.persist_object_snapshot_mutation_chunk(c)) + .map(Either::Left) + .collect::>(); + let deletion_futures = object_snapshot_deletion_chunks + .into_iter() + .map(|c| self.persist_object_snapshot_deletion_chunk(c)) + .map(Either::Right) + .collect::>(); + let all_futures = mutation_futures + .into_iter() + .chain(deletion_futures) + .collect::>(); + futures::future::join_all(all_futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist object snapshot mutation or deletion chunks: {:?}", + e + )) + }) + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!( + elapsed, + "Persisted {} objects snapshot mutations and {} deletions", + mutation_len, + deletion_len + ); + }) + .tap_err(|e| { + tracing::error!( + "Failed to persist object snapshot mutation or deletion chunks: {:?}", + e + ) + })?; + Ok(()) + } + + async fn persist_object_history( + &self, + object_changes: Vec, + ) -> Result<(), IndexerError> { + let skip_history = std::env::var("SKIP_OBJECT_HISTORY") + .map(|val| val.eq_ignore_ascii_case("true")) + .unwrap_or(false); + if skip_history { + info!("skipping object history"); + return Ok(()); + } + + if object_changes.is_empty() { + return Ok(()); + } + let objects = make_objects_history_to_commit(object_changes); + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_history + .start_timer(); + + let len = objects.len(); + let chunks = chunk!(objects, self.config.parallel_objects_chunk_size); + let futures = chunks + .into_iter() + .map(|c| self.persist_objects_history_chunk(c)) + .collect::>(); + + futures::future::join_all(futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all objects history chunks: {:?}", + e + )) + })?; + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} objects history", len); + Ok(()) + } + + // TODO: There are quite some shared boiler-plate code in all functions. + // We should clean them up eventually. + async fn persist_full_objects_history( + &self, + object_changes: Vec, + ) -> Result<(), IndexerError> { + let skip_history = std::env::var("SKIP_OBJECT_HISTORY") + .map(|val| val.eq_ignore_ascii_case("true")) + .unwrap_or(false); + if skip_history { + info!("skipping object history"); + return Ok(()); + } + + if object_changes.is_empty() { + return Ok(()); + } + let objects: Vec = object_changes + .into_iter() + .flat_map(|c| { + let TransactionObjectChangesToCommit { + changed_objects, + deleted_objects, + } = c; + changed_objects + .into_iter() + .map(|o| o.into()) + .chain(deleted_objects.into_iter().map(|o| o.into())) + }) + .collect(); + let guard = self + .metrics + .checkpoint_db_commit_latency_full_objects_history + .start_timer(); + + let len = objects.len(); + let chunks = chunk!(objects, self.config.parallel_objects_chunk_size); + let futures = chunks + .into_iter() + .map(|c| self.persist_full_objects_history_chunk(c)) + .collect::>(); + + futures::future::join_all(futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all full objects history chunks: {:?}", + e + )) + })?; + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} full objects history", len); + Ok(()) + } + + async fn persist_objects_version( + &self, + object_versions: Vec, + ) -> Result<(), IndexerError> { + if object_versions.is_empty() { + return Ok(()); + } + + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_version + .start_timer(); + + let len = object_versions.len(); + let chunks = chunk!(object_versions, self.config.parallel_objects_chunk_size); + let futures = chunks + .into_iter() + .map(|c| self.persist_objects_version_chunk(c)) + .collect::>(); + + futures::future::join_all(futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all objects version chunks: {:?}", + e + )) + })?; + + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} object versions", len); + Ok(()) + } + + async fn persist_checkpoints( + &self, + checkpoints: Vec, + ) -> Result<(), IndexerError> { + self.persist_checkpoints(checkpoints).await + } + + async fn persist_transactions( + &self, + transactions: Vec, + ) -> Result<(), IndexerError> { + let guard = self + .metrics + .checkpoint_db_commit_latency_transactions + .start_timer(); + let len = transactions.len(); + + let chunks = chunk!(transactions, self.config.parallel_chunk_size); + let futures = chunks + .into_iter() + .map(|c| self.persist_transactions_chunk(c)) + .collect::>(); + + futures::future::join_all(futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all transactions chunks: {:?}", + e + )) + })?; + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} transactions", len); + Ok(()) + } + + async fn persist_events(&self, events: Vec) -> Result<(), IndexerError> { + if events.is_empty() { + return Ok(()); + } + let len = events.len(); + let guard = self + .metrics + .checkpoint_db_commit_latency_events + .start_timer(); + let chunks = chunk!(events, self.config.parallel_chunk_size); + let futures = chunks + .into_iter() + .map(|c| self.persist_events_chunk(c)) + .collect::>(); + + futures::future::join_all(futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all events chunks: {:?}", + e + )) + })?; + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} events", len); + Ok(()) + } + + async fn persist_displays( + &self, + display_updates: BTreeMap, + ) -> Result<(), IndexerError> { + if display_updates.is_empty() { + return Ok(()); + } + self.persist_display_updates(display_updates.values().cloned().collect::>()) + .await + } + + async fn persist_packages(&self, packages: Vec) -> Result<(), IndexerError> { + if packages.is_empty() { + return Ok(()); + } + self.persist_packages(packages).await + } + + async fn persist_event_indices(&self, indices: Vec) -> Result<(), IndexerError> { + if indices.is_empty() { + return Ok(()); + } + let len = indices.len(); + let guard = self + .metrics + .checkpoint_db_commit_latency_event_indices + .start_timer(); + let chunks = chunk!(indices, self.config.parallel_chunk_size); + + let futures = chunks + .into_iter() + .map(|chunk| self.persist_event_indices_chunk(chunk)) + .collect::>(); + futures::future::join_all(futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all event_indices chunks: {:?}", + e + )) + }) + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} event_indices chunks", len); + }) + .tap_err(|e| tracing::error!("Failed to persist all event_indices chunks: {:?}", e))?; + Ok(()) + } + + async fn persist_tx_indices(&self, indices: Vec) -> Result<(), IndexerError> { + if indices.is_empty() { + return Ok(()); + } + let len = indices.len(); + let guard = self + .metrics + .checkpoint_db_commit_latency_tx_indices + .start_timer(); + let chunks = chunk!(indices, self.config.parallel_chunk_size); + + let futures = chunks + .into_iter() + .map(|chunk| self.persist_tx_indices_chunk(chunk)) + .collect::>(); + futures::future::join_all(futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all tx_indices chunks: {:?}", + e + )) + }) + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} tx_indices chunks", len); + }) + .tap_err(|e| tracing::error!("Failed to persist all tx_indices chunks: {:?}", e))?; + Ok(()) + } + + async fn persist_epoch(&self, epoch: EpochToCommit) -> Result<(), IndexerError> { + self.persist_epoch(epoch).await + } + + async fn advance_epoch(&self, epoch: EpochToCommit) -> Result<(), IndexerError> { + self.advance_epoch(epoch).await + } + + async fn prune_epoch(&self, epoch: u64) -> Result<(), IndexerError> { + let (mut min_cp, max_cp) = match self.get_checkpoint_range_for_epoch(epoch).await? { + (min_cp, Some(max_cp)) => Ok((min_cp, max_cp)), + _ => Err(IndexerError::PostgresReadError(format!( + "Failed to get checkpoint range for epoch {}", + epoch + ))), + }?; + + // NOTE: for disaster recovery, min_cp is the min cp of the current epoch, which is likely + // partially pruned already. min_prunable_cp is the min cp to be pruned. + // By std::cmp::max, we will resume the pruning process from the next checkpoint, instead of + // the first cp of the current epoch. + let min_prunable_cp = self.get_min_prunable_checkpoint().await?; + min_cp = std::cmp::max(min_cp, min_prunable_cp); + for cp in min_cp..=max_cp { + // NOTE: the order of pruning tables is crucial: + // 1. prune checkpoints table, checkpoints table is the source table of available range, + // we prune it first to make sure that we always have full data for checkpoints within the available range; + // 2. then prune tx_* tables; + // 3. then prune pruner_cp_watermark table, which is the checkpoint pruning watermark table and also tx seq source + // of a checkpoint to prune tx_* tables; + // 4. lastly we prune epochs table when all checkpoints of the epoch have been pruned. + info!( + "Pruning checkpoint {} of epoch {} (min_prunable_cp: {})", + cp, epoch, min_prunable_cp + ); + self.prune_checkpoints_table(cp).await?; + + let (min_tx, max_tx) = self.get_transaction_range_for_checkpoint(cp).await?; + self.prune_tx_indices_table(min_tx, max_tx).await?; + info!( + "Pruned transactions for checkpoint {} from tx {} to tx {}", + cp, min_tx, max_tx + ); + self.prune_event_indices_table(min_tx, max_tx).await?; + info!( + "Pruned events of transactions for checkpoint {} from tx {} to tx {}", + cp, min_tx, max_tx + ); + self.metrics.last_pruned_transaction.set(max_tx as i64); + + self.prune_cp_tx_table(cp).await?; + info!("Pruned checkpoint {} of epoch {}", cp, epoch); + self.metrics.last_pruned_checkpoint.set(cp as i64); + } + + Ok(()) + } + + async fn upload_display(&self, epoch_number: u64) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + let mut connection = self.pool.get().await?; + let mut buffer = Cursor::new(Vec::new()); + { + let mut writer = Writer::from_writer(&mut buffer); + let displays = display::table + .load::(&mut connection) + .await + .map_err(Into::into) + .context("Failed to get display from database")?; + info!("Read {} displays", displays.len()); + writer + .write_record(["object_type", "id", "version", "bcs"]) + .map_err(|_| { + IndexerError::GcsError("Failed to write display to csv".to_string()) + })?; + for display in displays { + writer + .write_record(&[ + display.object_type, + hex::encode(display.id), + display.version.to_string(), + hex::encode(display.bcs), + ]) + .map_err(|_| IndexerError::GcsError("Failed to write to csv".to_string()))?; + } + writer + .flush() + .map_err(|_| IndexerError::GcsError("Failed to flush csv".to_string()))?; + } + + if let (Some(cred_path), Some(bucket)) = ( + self.config.gcs_cred_path.clone(), + self.config.gcs_display_bucket.clone(), + ) { + let remote_store_config = ObjectStoreConfig { + object_store: Some(ObjectStoreType::GCS), + bucket: Some(bucket), + google_service_account: Some(cred_path), + object_store_connection_limit: 200, + no_sign_request: false, + ..Default::default() + }; + let remote_store = remote_store_config.make().map_err(|e| { + IndexerError::GcsError(format!("Failed to make GCS remote store: {}", e)) + })?; + let path = Path::from(format!("display_{}.csv", epoch_number).as_str()); + put(&remote_store, &path, buffer.into_inner().into()) + .await + .map_err(|e| IndexerError::GcsError(format!("Failed to put to GCS: {}", e)))?; + } else { + warn!("Either GCS cred path or bucket is not set, skipping display upload."); + } + Ok(()) + } + + async fn restore_display(&self, bytes: bytes::Bytes) -> Result<(), IndexerError> { + let cursor = Cursor::new(bytes); + let mut csv_reader = ReaderBuilder::new().has_headers(true).from_reader(cursor); + let displays = csv_reader + .deserialize() + .collect::, csv::Error>>() + .map_err(|e| { + IndexerError::GcsError(format!("Failed to deserialize display records: {}", e)) + })?; + self.persist_display_updates(displays).await + } + + async fn get_network_total_transactions_by_end_of_epoch( + &self, + epoch: u64, + ) -> Result, IndexerError> { + self.get_network_total_transactions_by_end_of_epoch(epoch) + .await + } + + /// Persist protocol configs and feature flags until the protocol version for the latest epoch + /// we have stored in the db, inclusive. + async fn persist_protocol_configs_and_feature_flags( + &self, + chain_id: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let chain_id = ChainIdentifier::from( + CheckpointDigest::try_from(chain_id).expect("Unable to convert chain id"), + ); + + let mut all_configs = vec![]; + let mut all_flags = vec![]; + + let (start_version, end_version) = self.get_protocol_version_index_range().await?; + info!( + "Persisting protocol configs with start_version: {}, end_version: {}", + start_version, end_version + ); + + // Gather all protocol configs and feature flags for all versions between start and end. + for version in start_version..=end_version { + let protocol_configs = ProtocolConfig::get_for_version_if_supported( + (version as u64).into(), + chain_id.chain(), + ) + .ok_or(IndexerError::GenericError(format!( + "Unable to fetch protocol version {} and chain {:?}", + version, + chain_id.chain() + )))?; + let configs_vec = protocol_configs + .attr_map() + .into_iter() + .map(|(k, v)| StoredProtocolConfig { + protocol_version: version, + config_name: k, + config_value: v.map(|v| v.to_string()), + }) + .collect::>(); + all_configs.extend(configs_vec); + + let feature_flags = protocol_configs + .feature_map() + .into_iter() + .map(|(k, v)| StoredFeatureFlag { + protocol_version: version, + flag_name: k, + flag_value: v, + }) + .collect::>(); + all_flags.extend(feature_flags); + } + + // Now insert all of them into the db. + // TODO: right now the size of these updates is manageable but later we may consider batching. + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for config_chunk in all_configs.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(protocol_configs::table) + .values(config_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write to protocol_configs table")?; + } + + diesel::insert_into(feature_flags::table) + .values(all_flags.clone()) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write to feature_flags table")?; + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await?; + Ok(()) + } + + async fn persist_chain_identifier( + &self, + checkpoint_digest: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::insert_into(chain_identifier::table) + .values(StoredChainIdentifier { checkpoint_digest }) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context("failed to write to chain_identifier table")?; + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await?; + Ok(()) + } + + async fn persist_raw_checkpoints( + &self, + checkpoints: Vec, + ) -> Result<(), IndexerError> { + self.persist_raw_checkpoints_impl(&checkpoints).await + } + + async fn update_watermarks_upper_bound( + &self, + watermark: CommitterWatermark, + ) -> Result<(), IndexerError> + where + E::Iterator: Iterator>, + { + self.update_watermarks_upper_bound::(watermark).await + } + + async fn update_watermarks_lower_bound( + &self, + watermarks: Vec<(PrunableTable, u64)>, + ) -> Result<(), IndexerError> { + self.update_watermarks_lower_bound(watermarks).await + } + + async fn get_watermarks(&self) -> Result<(Vec, i64), IndexerError> { + self.get_watermarks().await + } +} + +fn make_objects_history_to_commit( + tx_object_changes: Vec, +) -> Vec { + let deleted_objects: Vec = tx_object_changes + .clone() + .into_iter() + .flat_map(|changes| changes.deleted_objects) + .map(|o| o.into()) + .collect(); + let mutated_objects: Vec = tx_object_changes + .into_iter() + .flat_map(|changes| changes.changed_objects) + .map(|o| o.into()) + .collect(); + deleted_objects.into_iter().chain(mutated_objects).collect() +} + +// Partition object changes into deletions and mutations, +// within partition of mutations or deletions, retain the latest with highest version; +// For overlappings of mutations and deletions, only keep one with higher version. +// This is necessary b/c after this step, DB commit will be done in parallel and not in order. +fn retain_latest_indexed_objects( + tx_object_changes: Vec, +) -> (Vec, Vec) { + // Only the last deleted / mutated object will be in the map, + // b/c tx_object_changes are in order and versions always increment, + let (mutations, deletions) = tx_object_changes + .into_iter() + .flat_map(|change| { + change + .changed_objects + .into_iter() + .map(Either::Left) + .chain( + change + .deleted_objects + .into_iter() + .map(Either::Right), + ) + }) + .fold( + (HashMap::::new(), HashMap::::new()), + |(mut mutations, mut deletions), either_change| { + match either_change { + // Remove mutation / deletion with a following deletion / mutation, + // b/c following deletion / mutation always has a higher version. + // Technically, assertions below are not required, double check just in case. + Either::Left(mutation) => { + let id = mutation.object.id(); + let mutation_version = mutation.object.version(); + if let Some(existing) = deletions.remove(&id) { + assert!( + existing.object_version < mutation_version.value(), + "Mutation version ({:?}) should be greater than existing deletion version ({:?}) for object {:?}", + mutation_version, + existing.object_version, + id + ); + } + if let Some(existing) = mutations.insert(id, mutation) { + assert!( + existing.object.version() < mutation_version, + "Mutation version ({:?}) should be greater than existing mutation version ({:?}) for object {:?}", + mutation_version, + existing.object.version(), + id + ); + } + } + Either::Right(deletion) => { + let id = deletion.object_id; + let deletion_version = deletion.object_version; + if let Some(existing) = mutations.remove(&id) { + assert!( + existing.object.version().value() < deletion_version, + "Deletion version ({:?}) should be greater than existing mutation version ({:?}) for object {:?}", + deletion_version, + existing.object.version(), + id + ); + } + if let Some(existing) = deletions.insert(id, deletion) { + assert!( + existing.object_version < deletion_version, + "Deletion version ({:?}) should be greater than existing deletion version ({:?}) for object {:?}", + deletion_version, + existing.object_version, + id + ); + } + } + } + (mutations, deletions) + }, + ); + ( + mutations.into_values().collect(), + deletions.into_values().collect(), + ) +} diff --git a/crates/sui-mvr-indexer/src/store/pg_partition_manager.rs b/crates/sui-mvr-indexer/src/store/pg_partition_manager.rs new file mode 100644 index 0000000000000..876a1b9c56146 --- /dev/null +++ b/crates/sui-mvr-indexer/src/store/pg_partition_manager.rs @@ -0,0 +1,224 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use diesel::sql_types::{BigInt, VarChar}; +use diesel::QueryableByName; +use diesel_async::scoped_futures::ScopedFutureExt; +use std::collections::{BTreeMap, HashMap}; +use std::time::Duration; +use tracing::{error, info}; + +use crate::database::ConnectionPool; +use crate::errors::IndexerError; +use crate::handlers::EpochToCommit; +use crate::models::epoch::StoredEpochInfo; +use crate::store::transaction_with_retry; + +const GET_PARTITION_SQL: &str = r" +SELECT parent.relname AS table_name, + MIN(CAST(SUBSTRING(child.relname FROM '\d+$') AS BIGINT)) AS first_partition, + MAX(CAST(SUBSTRING(child.relname FROM '\d+$') AS BIGINT)) AS last_partition +FROM pg_inherits + JOIN pg_class parent ON pg_inherits.inhparent = parent.oid + JOIN pg_class child ON pg_inherits.inhrelid = child.oid + JOIN pg_namespace nmsp_parent ON nmsp_parent.oid = parent.relnamespace + JOIN pg_namespace nmsp_child ON nmsp_child.oid = child.relnamespace +WHERE parent.relkind = 'p' +GROUP BY table_name; +"; + +#[derive(Clone)] +pub struct PgPartitionManager { + pool: ConnectionPool, + + partition_strategies: HashMap<&'static str, PgPartitionStrategy>, +} + +#[derive(Clone, Copy)] +pub enum PgPartitionStrategy { + CheckpointSequenceNumber, + TxSequenceNumber, + ObjectId, +} + +impl PgPartitionStrategy { + pub fn is_epoch_partitioned(&self) -> bool { + matches!( + self, + Self::CheckpointSequenceNumber | Self::TxSequenceNumber + ) + } +} + +#[derive(Clone, Debug)] +pub struct EpochPartitionData { + last_epoch: u64, + next_epoch: u64, + last_epoch_start_cp: u64, + next_epoch_start_cp: u64, + last_epoch_start_tx: u64, + next_epoch_start_tx: u64, +} + +impl EpochPartitionData { + pub fn compose_data(epoch: EpochToCommit, last_db_epoch: StoredEpochInfo) -> Self { + let last_epoch = last_db_epoch.epoch as u64; + let last_epoch_start_cp = last_db_epoch.first_checkpoint_id as u64; + let next_epoch = epoch.new_epoch_id(); + let next_epoch_start_cp = epoch.new_epoch_first_checkpoint_id(); + let next_epoch_start_tx = epoch.new_epoch_first_tx_sequence_number(); + let last_epoch_start_tx = + next_epoch_start_tx - epoch.last_epoch_total_transactions().unwrap(); + + Self { + last_epoch, + next_epoch, + last_epoch_start_cp, + next_epoch_start_cp, + last_epoch_start_tx, + next_epoch_start_tx, + } + } +} + +impl PgPartitionManager { + pub fn new(pool: ConnectionPool) -> Result { + let mut partition_strategies = HashMap::new(); + partition_strategies.insert("events", PgPartitionStrategy::TxSequenceNumber); + partition_strategies.insert("transactions", PgPartitionStrategy::TxSequenceNumber); + partition_strategies.insert("objects_version", PgPartitionStrategy::ObjectId); + let manager = Self { + pool, + partition_strategies, + }; + Ok(manager) + } + + pub async fn get_table_partitions(&self) -> Result, IndexerError> { + #[derive(QueryableByName, Debug, Clone)] + struct PartitionedTable { + #[diesel(sql_type = VarChar)] + table_name: String, + #[diesel(sql_type = BigInt)] + first_partition: i64, + #[diesel(sql_type = BigInt)] + last_partition: i64, + } + + let mut connection = self.pool.get().await?; + + Ok( + diesel_async::RunQueryDsl::load(diesel::sql_query(GET_PARTITION_SQL), &mut connection) + .await? + .into_iter() + .map(|table: PartitionedTable| { + ( + table.table_name, + (table.first_partition as u64, table.last_partition as u64), + ) + }) + .collect(), + ) + } + + /// Tries to fetch the partitioning strategy for the given partitioned table. Defaults to + /// `CheckpointSequenceNumber` as the majority of our tables are partitioned on an epoch's + /// checkpoints today. + pub fn get_strategy(&self, table_name: &str) -> PgPartitionStrategy { + self.partition_strategies + .get(table_name) + .copied() + .unwrap_or(PgPartitionStrategy::CheckpointSequenceNumber) + } + + pub fn determine_epoch_partition_range( + &self, + table_name: &str, + data: &EpochPartitionData, + ) -> Option<(u64, u64)> { + match self.get_strategy(table_name) { + PgPartitionStrategy::CheckpointSequenceNumber => { + Some((data.last_epoch_start_cp, data.next_epoch_start_cp)) + } + PgPartitionStrategy::TxSequenceNumber => { + Some((data.last_epoch_start_tx, data.next_epoch_start_tx)) + } + PgPartitionStrategy::ObjectId => None, + } + } + + pub async fn advance_epoch( + &self, + table: String, + last_partition: u64, + data: &EpochPartitionData, + ) -> Result<(), IndexerError> { + let Some(partition_range) = self.determine_epoch_partition_range(&table, data) else { + return Ok(()); + }; + if data.next_epoch == 0 { + tracing::info!("Epoch 0 partition has been created in the initial setup."); + return Ok(()); + } + if last_partition == data.last_epoch { + transaction_with_retry(&self.pool, Duration::from_secs(10), |conn| { + async { + diesel_async::RunQueryDsl::execute( + diesel::sql_query("CALL advance_partition($1, $2, $3, $4, $5)") + .bind::(table.clone()) + .bind::(data.last_epoch as i64) + .bind::(data.next_epoch as i64) + .bind::(partition_range.0 as i64) + .bind::(partition_range.1 as i64), + conn, + ) + .await?; + + Ok(()) + } + .scope_boxed() + }) + .await?; + + info!( + "Advanced epoch partition for table {} from {} to {}, prev partition upper bound {}", + table, last_partition, data.next_epoch, partition_range.0 + ); + } else if last_partition != data.next_epoch { + // skip when the partition is already advanced once, which is possible when indexer + // crashes and restarts; error otherwise. + error!( + "Epoch partition for table {} is not in sync with the last epoch {}.", + table, data.last_epoch + ); + } else { + info!( + "Epoch has been advanced to {} already, skipping.", + data.next_epoch + ); + } + Ok(()) + } + + pub async fn drop_table_partition( + &self, + table: String, + partition: u64, + ) -> Result<(), IndexerError> { + transaction_with_retry(&self.pool, Duration::from_secs(10), |conn| { + async { + diesel_async::RunQueryDsl::execute( + diesel::sql_query("CALL drop_partition($1, $2)") + .bind::(table.clone()) + .bind::(partition as i64), + conn, + ) + .await?; + Ok(()) + } + .scope_boxed() + }) + .await?; + Ok(()) + } +} diff --git a/crates/sui-mvr-indexer/src/store/query.rs b/crates/sui-mvr-indexer/src/store/query.rs new file mode 100644 index 0000000000000..93d57b298044d --- /dev/null +++ b/crates/sui-mvr-indexer/src/store/query.rs @@ -0,0 +1,329 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use sui_json_rpc_types::SuiObjectDataFilter; +use sui_types::base_types::ObjectID; + +pub trait DBFilter { + fn to_objects_history_sql(&self, cursor: Option, limit: usize, columns: Vec<&str>) + -> String; + fn to_latest_objects_sql(&self, cursor: Option, limit: usize, columns: Vec<&str>) -> String; +} + +impl DBFilter for SuiObjectDataFilter { + fn to_objects_history_sql( + &self, + cursor: Option, + limit: usize, + columns: Vec<&str>, + ) -> String { + let inner_clauses = to_clauses(self); + let inner_clauses = if let Some(inner_clauses) = inner_clauses { + format!("\n AND {inner_clauses}") + } else { + "".to_string() + }; + let outer_clauses = to_outer_clauses(self); + let outer_clauses = if let Some(outer_clauses) = outer_clauses { + format!("\nAND {outer_clauses}") + } else { + "".to_string() + }; + let cursor = if let Some(cursor) = cursor { + format!("\n AND o.object_id > '{cursor}'") + } else { + "".to_string() + }; + + let columns = columns + .iter() + .map(|c| format!("t1.{c}")) + .collect::>() + .join(", "); + // NOTE: order by checkpoint DESC so that whenever a row from checkpoint is available, + // we will pick that over the one from fast-path, which has checkpoint of -1. + format!( + "SELECT {columns} +FROM (SELECT DISTINCT ON (o.object_id) * + FROM objects_history o + WHERE o.checkpoint <= $1{cursor}{inner_clauses} + ORDER BY o.object_id, version, o.checkpoint DESC) AS t1 +WHERE t1.object_status NOT IN ('deleted', 'wrapped', 'unwrapped_then_deleted'){outer_clauses} +LIMIT {limit};" + ) + } + + fn to_latest_objects_sql( + &self, + cursor: Option, + limit: usize, + columns: Vec<&str>, + ) -> String { + let columns = columns + .iter() + .map(|c| format!("o.{c}")) + .collect::>() + .join(", "); + + let cursor = if let Some(cursor) = cursor { + format!(" AND o.object_id > '{cursor}'") + } else { + "".to_string() + }; + + let inner_clauses = to_latest_objects_clauses(self); + let inner_clauses = if let Some(inner_clauses) = inner_clauses { + format!(" AND {inner_clauses}") + } else { + "".to_string() + }; + + format!( + "SELECT {columns} +FROM objects o WHERE o.object_status NOT IN ('deleted', 'wrapped', 'unwrapped_then_deleted'){cursor}{inner_clauses} +LIMIT {limit};" + ) + } +} + +fn to_latest_objects_clauses(filter: &SuiObjectDataFilter) -> Option { + match filter { + SuiObjectDataFilter::AddressOwner(a) => Some(format!( + "(o.owner_type = 'address_owner' AND o.owner_address = '{a}')" + )), + _ => None, + } +} + +fn to_clauses(filter: &SuiObjectDataFilter) -> Option { + match filter { + SuiObjectDataFilter::MatchAll(sub_filters) => { + let sub_filters = sub_filters.iter().flat_map(to_clauses).collect::>(); + if sub_filters.is_empty() { + None + } else if sub_filters.len() == 1 { + Some(sub_filters[0].to_string()) + } else { + Some(format!("({})", sub_filters.join(" AND "))) + } + } + SuiObjectDataFilter::MatchAny(sub_filters) => { + let sub_filters = sub_filters.iter().flat_map(to_clauses).collect::>(); + if sub_filters.is_empty() { + // Any default to false + Some("FALSE".to_string()) + } else if sub_filters.len() == 1 { + Some(sub_filters[0].to_string()) + } else { + Some(format!("({})", sub_filters.join(" OR "))) + } + } + SuiObjectDataFilter::MatchNone(sub_filters) => { + let sub_filters = sub_filters.iter().flat_map(to_clauses).collect::>(); + if sub_filters.is_empty() { + None + } else { + Some(format!("NOT ({})", sub_filters.join(" OR "))) + } + } + SuiObjectDataFilter::Package(p) => Some(format!("o.object_type LIKE '{}::%'", p.to_hex_literal())), + SuiObjectDataFilter::MoveModule { package, module } => Some(format!( + "o.object_type LIKE '{}::{}::%'", + package.to_hex_literal(), + module + )), + SuiObjectDataFilter::StructType(s) => { + // If people do not provide type_params, we will match all type_params + // e.g. `0x2::coin::Coin` can match `0x2::coin::Coin<0x2::sui::SUI>` + if s.type_params.is_empty() { + Some(format!("o.object_type LIKE '{s}%'")) + } else { + Some(format!("o.object_type = '{s}'")) + } + }, + SuiObjectDataFilter::AddressOwner(a) => { + Some(format!("((o.owner_type = 'address_owner' AND o.owner_address = '{a}') OR (o.old_owner_type = 'address_owner' AND o.old_owner_address = '{a}'))")) + } + SuiObjectDataFilter::ObjectOwner(o) => { + Some(format!("((o.owner_type = 'object_owner' AND o.owner_address = '{o}') OR (o.old_owner_type = 'object_owner' AND o.old_owner_address = '{o}'))")) + } + SuiObjectDataFilter::ObjectId(id) => { + Some(format!("o.object_id = '{id}'")) + } + SuiObjectDataFilter::ObjectIds(ids) => { + if ids.is_empty() { + None + } else { + let ids = ids + .iter() + .map(|o| o.to_string()) + .collect::>() + .join(", "); + Some(format!("o.object_id IN '{ids}'")) + } + } + SuiObjectDataFilter::Version(v) => Some(format!("o.version = {v}")), + } +} + +fn to_outer_clauses(filter: &SuiObjectDataFilter) -> Option { + match filter { + SuiObjectDataFilter::MatchNone(sub_filters) => { + let sub_filters = sub_filters + .iter() + .flat_map(to_outer_clauses) + .collect::>(); + if sub_filters.is_empty() { + None + } else { + Some(format!("NOT ({})", sub_filters.join(" OR "))) + } + } + SuiObjectDataFilter::MatchAll(sub_filters) => { + let sub_filters = sub_filters + .iter() + .flat_map(to_outer_clauses) + .collect::>(); + if sub_filters.is_empty() { + None + } else if sub_filters.len() == 1 { + Some(sub_filters[0].to_string()) + } else { + Some(format!("({})", sub_filters.join(" AND "))) + } + } + SuiObjectDataFilter::MatchAny(sub_filters) => { + let sub_filters = sub_filters + .iter() + .flat_map(to_outer_clauses) + .collect::>(); + if sub_filters.is_empty() { + None + } else if sub_filters.len() == 1 { + Some(sub_filters[0].to_string()) + } else { + Some(format!("({})", sub_filters.join(" OR "))) + } + } + SuiObjectDataFilter::AddressOwner(a) => Some(format!("t1.owner_address = '{a}'")), + _ => None, + } +} + +#[cfg(test)] +mod test { + use std::str::FromStr; + + use move_core_types::ident_str; + + use sui_json_rpc_types::SuiObjectDataFilter; + use sui_types::base_types::{ObjectID, SuiAddress}; + use sui_types::parse_sui_struct_tag; + + use crate::store::query::DBFilter; + + #[test] + fn test_address_filter() { + let address = SuiAddress::from_str( + "0x92dd4d9b0150c251661d821583ef078024ae9e9ee11063e216500861eec7f381", + ) + .unwrap(); + let filter = SuiObjectDataFilter::AddressOwner(address); + + let expected_sql = "SELECT t1.* +FROM (SELECT DISTINCT ON (o.object_id) * + FROM objects_history o + WHERE o.checkpoint <= $1 + AND ((o.owner_type = 'address_owner' AND o.owner_address = '0x92dd4d9b0150c251661d821583ef078024ae9e9ee11063e216500861eec7f381') OR (o.old_owner_type = 'address_owner' AND o.old_owner_address = '0x92dd4d9b0150c251661d821583ef078024ae9e9ee11063e216500861eec7f381')) + ORDER BY o.object_id, version, o.checkpoint DESC) AS t1 +WHERE t1.object_status NOT IN ('deleted', 'wrapped', 'unwrapped_then_deleted') +AND t1.owner_address = '0x92dd4d9b0150c251661d821583ef078024ae9e9ee11063e216500861eec7f381' +LIMIT 100;"; + assert_eq!( + expected_sql, + filter.to_objects_history_sql(None, 100, vec!["*"]) + ); + } + + #[test] + fn test_move_module_filter() { + let filter = SuiObjectDataFilter::MoveModule { + package: ObjectID::from_str( + "0x485d947e293f07e659127dc5196146b49cdf2efbe4b233f4d293fc56aff2aa17", + ) + .unwrap(), + module: ident_str!("test_module").into(), + }; + let expected_sql = "SELECT t1.* +FROM (SELECT DISTINCT ON (o.object_id) * + FROM objects_history o + WHERE o.checkpoint <= $1 + AND o.object_type LIKE '0x485d947e293f07e659127dc5196146b49cdf2efbe4b233f4d293fc56aff2aa17::test_module::%' + ORDER BY o.object_id, version, o.checkpoint DESC) AS t1 +WHERE t1.object_status NOT IN ('deleted', 'wrapped', 'unwrapped_then_deleted') +LIMIT 100;"; + assert_eq!( + expected_sql, + filter.to_objects_history_sql(None, 100, vec!["*"]) + ); + } + + #[test] + fn test_empty_all_filter() { + let filter = SuiObjectDataFilter::MatchAll(vec![]); + let expected_sql = "SELECT t1.* +FROM (SELECT DISTINCT ON (o.object_id) * + FROM objects_history o + WHERE o.checkpoint <= $1 + ORDER BY o.object_id, version, o.checkpoint DESC) AS t1 +WHERE t1.object_status NOT IN ('deleted', 'wrapped', 'unwrapped_then_deleted') +LIMIT 100;"; + assert_eq!( + expected_sql, + filter.to_objects_history_sql(None, 100, vec!["*"]) + ); + } + + #[test] + fn test_empty_any_filter() { + let filter = SuiObjectDataFilter::MatchAny(vec![]); + let expected_sql = "SELECT t1.* +FROM (SELECT DISTINCT ON (o.object_id) * + FROM objects_history o + WHERE o.checkpoint <= $1 + AND FALSE + ORDER BY o.object_id, version, o.checkpoint DESC) AS t1 +WHERE t1.object_status NOT IN ('deleted', 'wrapped', 'unwrapped_then_deleted') +LIMIT 100;"; + assert_eq!( + expected_sql, + filter.to_objects_history_sql(None, 100, vec!["*"]) + ); + } + + #[test] + fn test_all_filter() { + let filter = SuiObjectDataFilter::MatchAll(vec![ + SuiObjectDataFilter::ObjectId( + ObjectID::from_str( + "0xef9fb75a7b3d4cb5551ef0b08c83528b94d5f5cd8be28b1d08a87dbbf3731738", + ) + .unwrap(), + ), + SuiObjectDataFilter::StructType(parse_sui_struct_tag("0x2::test::Test").unwrap()), + ]); + + let expected_sql = "SELECT t1.* +FROM (SELECT DISTINCT ON (o.object_id) * + FROM objects_history o + WHERE o.checkpoint <= $1 + AND (o.object_id = '0xef9fb75a7b3d4cb5551ef0b08c83528b94d5f5cd8be28b1d08a87dbbf3731738' AND o.object_type LIKE '0x2::test::Test%') + ORDER BY o.object_id, version, o.checkpoint DESC) AS t1 +WHERE t1.object_status NOT IN ('deleted', 'wrapped', 'unwrapped_then_deleted') +LIMIT 100;"; + assert_eq!( + expected_sql, + filter.to_objects_history_sql(None, 100, vec!["*"]) + ); + } +} diff --git a/crates/sui-mvr-indexer/src/system_package_task.rs b/crates/sui-mvr-indexer/src/system_package_task.rs new file mode 100644 index 0000000000000..8c2d6586f72d5 --- /dev/null +++ b/crates/sui-mvr-indexer/src/system_package_task.rs @@ -0,0 +1,66 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::indexer_reader::IndexerReader; +use std::time::Duration; +use sui_types::SYSTEM_PACKAGE_ADDRESSES; +use tokio_util::sync::CancellationToken; + +/// Background task responsible for evicting system packages from the package resolver's cache after +/// detecting an epoch boundary. +pub(crate) struct SystemPackageTask { + /// Holds the DB connection and also the package resolver to evict packages from. + reader: IndexerReader, + /// Signal to cancel the task. + cancel: CancellationToken, + /// Interval to sleep for between checks. + interval: Duration, +} + +impl SystemPackageTask { + pub(crate) fn new( + reader: IndexerReader, + cancel: CancellationToken, + interval: Duration, + ) -> Self { + Self { + reader, + cancel, + interval, + } + } + + pub(crate) async fn run(&self) { + let mut last_epoch: i64 = 0; + loop { + tokio::select! { + _ = self.cancel.cancelled() => { + tracing::info!( + "Shutdown signal received, terminating system package eviction task" + ); + return; + } + _ = tokio::time::sleep(self.interval) => { + let next_epoch = match self.reader.get_latest_epoch_info_from_db().await { + Ok(epoch) => epoch.epoch, + Err(e) => { + tracing::error!("Failed to fetch latest epoch: {:?}", e); + continue; + } + }; + + if next_epoch > last_epoch { + last_epoch = next_epoch; + tracing::info!( + "Detected epoch boundary, evicting system packages from cache" + ); + self.reader + .package_resolver() + .package_store() + .evict(SYSTEM_PACKAGE_ADDRESSES.iter().copied()); + } + } + } + } + } +} diff --git a/crates/sui-mvr-indexer/src/tempdb.rs b/crates/sui-mvr-indexer/src/tempdb.rs new file mode 100644 index 0000000000000..d63f34a02a3de --- /dev/null +++ b/crates/sui-mvr-indexer/src/tempdb.rs @@ -0,0 +1,343 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::anyhow; +use anyhow::Context; +use anyhow::Result; +use std::fs::OpenOptions; +use std::{ + path::{Path, PathBuf}, + process::{Child, Command}, + time::{Duration, Instant}, +}; +use tracing::trace; +use url::Url; + +/// A temporary, local postgres database +pub struct TempDb { + database: LocalDatabase, + + // Directory used for the ephemeral database. + // + // On drop the directory will be cleaned an its contents deleted. + // + // NOTE: This needs to be the last entry in this struct so that the database is dropped before + // and has a chance to gracefully shutdown before the directory is deleted. + dir: tempfile::TempDir, +} + +impl TempDb { + /// Create and start a new temporary postgres database. + /// + /// A fresh database will be initialized in a temporary directory that will be cleandup on drop. + /// The running `postgres` service will be serving traffic on an available, os-assigned port. + pub fn new() -> Result { + let dir = tempfile::TempDir::new()?; + let port = get_available_port(); + + let database = LocalDatabase::new_initdb(dir.path().to_owned(), port)?; + + Ok(Self { dir, database }) + } + + pub fn database(&self) -> &LocalDatabase { + &self.database + } + + pub fn database_mut(&mut self) -> &mut LocalDatabase { + &mut self.database + } + + pub fn dir(&self) -> &Path { + self.dir.path() + } +} + +#[derive(Debug)] +struct PostgresProcess { + dir: PathBuf, + inner: Child, +} + +impl PostgresProcess { + fn start(dir: PathBuf, port: u16) -> Result { + let child = Command::new("postgres") + // Set the data directory to use + .arg("-D") + .arg(&dir) + // Set the port to listen for incoming connections + .args(["-p", &port.to_string()]) + // Disable creating and listening on a UDS + .args(["-c", "unix_socket_directories="]) + // pipe stdout and stderr to files located in the data directory + .stdout( + OpenOptions::new() + .create(true) + .append(true) + .open(dir.join("stdout"))?, + ) + .stderr( + OpenOptions::new() + .create(true) + .append(true) + .open(dir.join("stderr"))?, + ) + .spawn() + .context("command not found: postgres")?; + + Ok(Self { dir, inner: child }) + } + + // https://www.postgresql.org/docs/16/app-pg-ctl.html + fn pg_ctl_stop(&mut self) -> Result<()> { + let output = Command::new("pg_ctl") + .arg("stop") + .arg("-D") + .arg(&self.dir) + .arg("-mfast") + .output() + .context("command not found: pg_ctl")?; + + if output.status.success() { + Ok(()) + } else { + Err(anyhow!("couldn't shut down postgres")) + } + } + + fn dump_stdout_stderr(&self) -> Result<(String, String)> { + let stdout = std::fs::read_to_string(self.dir.join("stdout"))?; + let stderr = std::fs::read_to_string(self.dir.join("stderr"))?; + + Ok((stdout, stderr)) + } +} + +impl Drop for PostgresProcess { + // When the Process struct goes out of scope we need to kill the child process + fn drop(&mut self) { + tracing::error!("dropping postgres"); + // check if the process has already been terminated + match self.inner.try_wait() { + // The child process has already terminated, perhaps due to a crash + Ok(Some(_)) => {} + + // The process is still running so we need to attempt to kill it + _ => { + if self.pg_ctl_stop().is_err() { + // Couldn't gracefully stop server so we'll just kill it + self.inner.kill().expect("postgres couldn't be killed"); + } + self.inner.wait().unwrap(); + } + } + + // Dump the contents of stdout/stderr if TRACE is enabled + if tracing::event_enabled!(tracing::Level::TRACE) { + if let Ok((stdout, stderr)) = self.dump_stdout_stderr() { + trace!("stdout: {stdout}"); + trace!("stderr: {stderr}"); + } + } + } +} + +/// Local instance of a `postgres` server. +/// +/// See for more info. +pub struct LocalDatabase { + dir: PathBuf, + port: u16, + url: Url, + process: Option, +} + +impl LocalDatabase { + /// Start a local `postgres` database service. + /// + /// `dir`: The location of the on-disk postgres database. The database must already exist at + /// the provided path. If you instead want to create a new database see `Self::new_initdb`. + /// + /// `port`: The port to listen for incoming connection on. + pub fn new(dir: PathBuf, port: u16) -> Result { + let url = format!( + "postgres://postgres:postgrespw@localhost:{port}/{db_name}", + db_name = "postgres" + ) + .parse() + .unwrap(); + let mut db = Self { + dir, + port, + url, + process: None, + }; + db.start()?; + Ok(db) + } + + /// Initialize and start a local `postgres` database service. + /// + /// Unlike `Self::new`, this will initialize a clean database at the provided path. + pub fn new_initdb(dir: PathBuf, port: u16) -> Result { + initdb(&dir)?; + Self::new(dir, port) + } + + /// Return the url used to connect to the database + pub fn url(&self) -> &Url { + &self.url + } + + fn start(&mut self) -> Result<()> { + if self.process.is_none() { + self.process = Some(PostgresProcess::start(self.dir.clone(), self.port)?); + self.wait_till_ready() + .map_err(|e| anyhow!("unable to start postgres: {e:?}"))?; + } + + Ok(()) + } + + fn health_check(&mut self) -> Result<(), HealthCheckError> { + if let Some(p) = &mut self.process { + match p.inner.try_wait() { + // This would mean the child process has crashed + Ok(Some(_)) => Err(HealthCheckError::NotRunning), + + // This is the case where the process is still running + Ok(None) => pg_isready(self.port), + + // Some other unknown error + Err(e) => Err(HealthCheckError::Unknown(e.to_string())), + } + } else { + Err(HealthCheckError::NotRunning) + } + } + + fn wait_till_ready(&mut self) -> Result<(), HealthCheckError> { + let start = Instant::now(); + + while start.elapsed() < Duration::from_secs(10) { + match self.health_check() { + Ok(()) => return Ok(()), + Err(HealthCheckError::NotReady) => {} + Err(HealthCheckError::NotRunning | HealthCheckError::Unknown(_)) => break, + } + + std::thread::sleep(Duration::from_millis(50)); + } + + Err(HealthCheckError::Unknown( + "timeout reached when waiting for service to be ready".to_owned(), + )) + } +} + +#[derive(Debug)] +enum HealthCheckError { + NotRunning, + NotReady, + #[allow(unused)] + Unknown(String), +} + +/// Run the postgres `pg_isready` command to get the status of database +/// +/// See for more info +fn pg_isready(port: u16) -> Result<(), HealthCheckError> { + let output = Command::new("pg_isready") + .arg("--host=localhost") + .arg("-p") + .arg(port.to_string()) + .arg("--username=postgres") + .output() + .map_err(|e| HealthCheckError::Unknown(format!("command not found: pg_ctl: {e}")))?; + + trace!("pg_isready code: {:?}", output.status.code()); + trace!("pg_isready output: {}", output.stderr.escape_ascii()); + trace!("pg_isready output: {}", output.stdout.escape_ascii()); + if output.status.success() { + Ok(()) + } else { + Err(HealthCheckError::NotReady) + } +} + +/// Run the postgres `initdb` command to initialize a database at the provided path +/// +/// See for more info +fn initdb(dir: &Path) -> Result<()> { + let output = Command::new("initdb") + .arg("-D") + .arg(dir) + .arg("--no-instructions") + .arg("--username=postgres") + .output() + .context("command not found: initdb")?; + + if output.status.success() { + Ok(()) + } else { + Err(anyhow!( + "unable to initialize database: {:?}", + String::from_utf8(output.stderr) + )) + } +} + +/// Return an ephemeral, available port. On unix systems, the port returned will be in the +/// TIME_WAIT state ensuring that the OS won't hand out this port for some grace period. +/// Callers should be able to bind to this port given they use SO_REUSEADDR. +pub fn get_available_port() -> u16 { + const MAX_PORT_RETRIES: u32 = 1000; + + for _ in 0..MAX_PORT_RETRIES { + if let Ok(port) = get_ephemeral_port() { + return port; + } + } + + panic!("Error: could not find an available port"); +} + +fn get_ephemeral_port() -> std::io::Result { + // Request a random available port from the OS + let listener = std::net::TcpListener::bind(("127.0.0.1", 0))?; + let addr = listener.local_addr()?; + + // Create and accept a connection (which we'll promptly drop) in order to force the port + // into the TIME_WAIT state, ensuring that the port will be reserved from some limited + // amount of time (roughly 60s on some Linux systems) + let _sender = std::net::TcpStream::connect(addr)?; + let _incoming = listener.accept()?; + + Ok(addr.port()) +} + +#[cfg(test)] +mod test { + #[tokio::test] + async fn smoketest() { + use crate::database::Connection; + use crate::tempdb::TempDb; + use diesel_async::RunQueryDsl; + + telemetry_subscribers::init_for_testing(); + + let db = TempDb::new().unwrap(); + println!("dir: {:?}", db.dir.path()); + + let url = db.database.url(); + println!("url: {}", url.as_str()); + let mut connection = Connection::dedicated(url).await.unwrap(); + + // Run a simple query to verify the db can properly be queried + let resp = diesel::sql_query("SELECT datname FROM pg_database") + .execute(&mut connection) + .await + .unwrap(); + println!("resp: {:?}", resp); + } +} diff --git a/crates/sui-mvr-indexer/src/test_utils.rs b/crates/sui-mvr-indexer/src/test_utils.rs new file mode 100644 index 0000000000000..431d0dc5854bc --- /dev/null +++ b/crates/sui-mvr-indexer/src/test_utils.rs @@ -0,0 +1,341 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use mysten_metrics::init_metrics; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; + +use simulacrum::Simulacrum; +use std::net::SocketAddr; +use std::path::PathBuf; +use std::sync::Arc; +use std::time::Duration; +use sui_json_rpc_types::SuiTransactionBlockResponse; + +use crate::config::{IngestionConfig, RetentionConfig, SnapshotLagConfig, UploadOptions}; +use crate::database::Connection; +use crate::database::ConnectionPool; +use crate::db::ConnectionPoolConfig; +use crate::errors::IndexerError; +use crate::indexer::Indexer; +use crate::store::PgIndexerStore; +use crate::tempdb::get_available_port; +use crate::tempdb::TempDb; +use crate::IndexerMetrics; + +/// Wrapper over `Indexer::start_reader` to make it easier to configure an indexer jsonrpc reader +/// for testing. +pub async fn start_indexer_jsonrpc_for_testing( + db_url: String, + fullnode_url: String, + json_rpc_url: String, + cancel: Option, +) -> (JoinHandle>, CancellationToken) { + let token = cancel.unwrap_or_default(); + + // Reduce the connection pool size to 10 for testing + // to prevent maxing out + let pool_config = ConnectionPoolConfig { + pool_size: 5, + connection_timeout: Duration::from_secs(10), + statement_timeout: Duration::from_secs(30), + }; + + println!("db_url: {db_url}"); + println!("pool_config: {pool_config:?}"); + + let registry = prometheus::Registry::default(); + init_metrics(®istry); + + let pool = ConnectionPool::new(db_url.parse().unwrap(), pool_config) + .await + .unwrap(); + + let handle = { + let config = crate::config::JsonRpcConfig { + name_service_options: crate::config::NameServiceOptions::default(), + rpc_address: json_rpc_url.parse().unwrap(), + rpc_client_url: fullnode_url, + }; + let token_clone = token.clone(); + tokio::spawn( + async move { Indexer::start_reader(&config, ®istry, pool, token_clone).await }, + ) + }; + + (handle, token) +} + +/// Wrapper over `Indexer::start_writer_with_config` to make it easier to configure an indexer +/// writer for testing. If the config options are null, default values that have historically worked +/// for testing will be used. +pub async fn start_indexer_writer_for_testing( + db_url: String, + snapshot_config: Option, + retention_config: Option, + data_ingestion_path: Option, + cancel: Option, + start_checkpoint: Option, + end_checkpoint: Option, +) -> ( + PgIndexerStore, + JoinHandle>, + CancellationToken, +) { + let token = cancel.unwrap_or_default(); + let snapshot_config = snapshot_config.unwrap_or(SnapshotLagConfig { + snapshot_min_lag: 5, + sleep_duration: 0, + }); + + // Reduce the connection pool size to 10 for testing to prevent maxing out + let pool_config = ConnectionPoolConfig { + pool_size: 5, + connection_timeout: Duration::from_secs(10), + statement_timeout: Duration::from_secs(30), + }; + + println!("db_url: {db_url}"); + println!("pool_config: {pool_config:?}"); + println!("{data_ingestion_path:?}"); + + let registry = prometheus::Registry::default(); + init_metrics(®istry); + let indexer_metrics = IndexerMetrics::new(®istry); + + let pool = ConnectionPool::new(db_url.parse().unwrap(), pool_config) + .await + .unwrap(); + let store = PgIndexerStore::new( + pool.clone(), + UploadOptions::default(), + indexer_metrics.clone(), + ); + + let handle = { + let connection = Connection::dedicated(&db_url.parse().unwrap()) + .await + .unwrap(); + crate::db::reset_database(connection).await.unwrap(); + + let store_clone = store.clone(); + let mut ingestion_config = IngestionConfig { + start_checkpoint, + end_checkpoint, + ..Default::default() + }; + ingestion_config.sources.data_ingestion_path = data_ingestion_path; + let token_clone = token.clone(); + + tokio::spawn(async move { + Indexer::start_writer( + ingestion_config, + store_clone, + indexer_metrics, + snapshot_config, + retention_config, + token_clone, + None, + ) + .await + }) + }; + + (store, handle, token) +} + +#[derive(Clone)] +pub struct SuiTransactionBlockResponseBuilder<'a> { + response: SuiTransactionBlockResponse, + full_response: &'a SuiTransactionBlockResponse, +} + +impl<'a> SuiTransactionBlockResponseBuilder<'a> { + pub fn new(full_response: &'a SuiTransactionBlockResponse) -> Self { + Self { + response: SuiTransactionBlockResponse::default(), + full_response, + } + } + + pub fn with_input(mut self) -> Self { + self.response = SuiTransactionBlockResponse { + transaction: self.full_response.transaction.clone(), + ..self.response + }; + self + } + + pub fn with_raw_input(mut self) -> Self { + self.response = SuiTransactionBlockResponse { + raw_transaction: self.full_response.raw_transaction.clone(), + ..self.response + }; + self + } + + pub fn with_effects(mut self) -> Self { + self.response = SuiTransactionBlockResponse { + effects: self.full_response.effects.clone(), + ..self.response + }; + self + } + + pub fn with_events(mut self) -> Self { + self.response = SuiTransactionBlockResponse { + events: self.full_response.events.clone(), + ..self.response + }; + self + } + + pub fn with_balance_changes(mut self) -> Self { + self.response = SuiTransactionBlockResponse { + balance_changes: self.full_response.balance_changes.clone(), + ..self.response + }; + self + } + + pub fn with_object_changes(mut self) -> Self { + self.response = SuiTransactionBlockResponse { + object_changes: self.full_response.object_changes.clone(), + ..self.response + }; + self + } + + pub fn with_input_and_changes(mut self) -> Self { + self.response = SuiTransactionBlockResponse { + transaction: self.full_response.transaction.clone(), + balance_changes: self.full_response.balance_changes.clone(), + object_changes: self.full_response.object_changes.clone(), + ..self.response + }; + self + } + + pub fn build(self) -> SuiTransactionBlockResponse { + SuiTransactionBlockResponse { + transaction: self.response.transaction, + raw_transaction: self.response.raw_transaction, + effects: self.response.effects, + events: self.response.events, + balance_changes: self.response.balance_changes, + object_changes: self.response.object_changes, + // Use full response for any fields that aren't showable + ..self.full_response.clone() + } + } +} + +/// Set up a test indexer fetching from a REST endpoint served by the given Simulacrum. +pub async fn set_up( + sim: Arc, + data_ingestion_path: PathBuf, +) -> ( + JoinHandle<()>, + PgIndexerStore, + JoinHandle>, + TempDb, +) { + let database = TempDb::new().unwrap(); + let server_url: SocketAddr = format!("127.0.0.1:{}", get_available_port()) + .parse() + .unwrap(); + + let server_handle = tokio::spawn(async move { + sui_rest_api::RestService::new_without_version(sim) + .start_service(server_url) + .await; + }); + // Starts indexer + let (pg_store, pg_handle, _) = start_indexer_writer_for_testing( + database.database().url().as_str().to_owned(), + None, + None, + Some(data_ingestion_path), + None, /* cancel */ + None, /* start_checkpoint */ + None, /* end_checkpoint */ + ) + .await; + (server_handle, pg_store, pg_handle, database) +} + +pub async fn set_up_with_start_and_end_checkpoints( + sim: Arc, + data_ingestion_path: PathBuf, + start_checkpoint: u64, + end_checkpoint: u64, +) -> ( + JoinHandle<()>, + PgIndexerStore, + JoinHandle>, + TempDb, +) { + let database = TempDb::new().unwrap(); + let server_url: SocketAddr = format!("127.0.0.1:{}", get_available_port()) + .parse() + .unwrap(); + let server_handle = tokio::spawn(async move { + sui_rest_api::RestService::new_without_version(sim) + .start_service(server_url) + .await; + }); + // Starts indexer + let (pg_store, pg_handle, _) = start_indexer_writer_for_testing( + database.database().url().as_str().to_owned(), + None, + None, + Some(data_ingestion_path), + None, /* cancel */ + Some(start_checkpoint), + Some(end_checkpoint), + ) + .await; + (server_handle, pg_store, pg_handle, database) +} + +/// Wait for the indexer to catch up to the given checkpoint sequence number. +pub async fn wait_for_checkpoint( + pg_store: &PgIndexerStore, + checkpoint_sequence_number: u64, +) -> Result<(), IndexerError> { + tokio::time::timeout(Duration::from_secs(30), async { + while { + let cp_opt = pg_store + .get_latest_checkpoint_sequence_number() + .await + .unwrap(); + cp_opt.is_none() || (cp_opt.unwrap() < checkpoint_sequence_number) + } { + tokio::time::sleep(Duration::from_millis(100)).await; + } + }) + .await + .expect("Timeout waiting for indexer to catchup to checkpoint"); + Ok(()) +} + +/// Wait for the indexer to catch up to the given checkpoint sequence number for objects snapshot. +pub async fn wait_for_objects_snapshot( + pg_store: &PgIndexerStore, + checkpoint_sequence_number: u64, +) -> Result<(), IndexerError> { + tokio::time::timeout(Duration::from_secs(30), async { + while { + let cp_opt = pg_store + .get_latest_object_snapshot_checkpoint_sequence_number() + .await + .unwrap(); + cp_opt.is_none() || (cp_opt.unwrap() < checkpoint_sequence_number) + } { + tokio::time::sleep(Duration::from_millis(100)).await; + } + }) + .await + .expect("Timeout waiting for indexer to catchup to checkpoint for objects snapshot"); + Ok(()) +} diff --git a/crates/sui-mvr-indexer/src/types.rs b/crates/sui-mvr-indexer/src/types.rs new file mode 100644 index 0000000000000..6c88e3d27641a --- /dev/null +++ b/crates/sui-mvr-indexer/src/types.rs @@ -0,0 +1,671 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use move_core_types::language_storage::StructTag; +use rand::Rng; +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; +use sui_json_rpc_types::{ + ObjectChange, SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, +}; +use sui_types::base_types::{ObjectDigest, SequenceNumber}; +use sui_types::base_types::{ObjectID, SuiAddress}; +use sui_types::crypto::AggregateAuthoritySignature; +use sui_types::digests::TransactionDigest; +use sui_types::dynamic_field::DynamicFieldType; +use sui_types::effects::TransactionEffects; +use sui_types::messages_checkpoint::{ + CertifiedCheckpointSummary, CheckpointCommitment, CheckpointContents, CheckpointDigest, + CheckpointSequenceNumber, EndOfEpochData, +}; +use sui_types::move_package::MovePackage; +use sui_types::object::{Object, Owner}; +use sui_types::sui_serde::SuiStructTag; +use sui_types::transaction::SenderSignedData; + +use crate::errors::IndexerError; + +pub type IndexerResult = Result; + +#[derive(Debug, Default)] +pub struct IndexedCheckpoint { + // TODO: A lot of fields are now redundant with certified_checkpoint and checkpoint_contents. + pub sequence_number: u64, + pub checkpoint_digest: CheckpointDigest, + pub epoch: u64, + pub tx_digests: Vec, + pub network_total_transactions: u64, + pub previous_checkpoint_digest: Option, + pub timestamp_ms: u64, + pub total_gas_cost: i64, // total gas cost could be negative + pub computation_cost: u64, + pub storage_cost: u64, + pub storage_rebate: u64, + pub non_refundable_storage_fee: u64, + pub checkpoint_commitments: Vec, + pub validator_signature: AggregateAuthoritySignature, + pub successful_tx_num: usize, + pub end_of_epoch_data: Option, + pub end_of_epoch: bool, + pub min_tx_sequence_number: u64, + pub max_tx_sequence_number: u64, + // FIXME: Remove the Default derive and make these fields mandatory. + pub certified_checkpoint: Option, + pub checkpoint_contents: Option, +} + +impl IndexedCheckpoint { + pub fn from_sui_checkpoint( + checkpoint: &CertifiedCheckpointSummary, + contents: &CheckpointContents, + successful_tx_num: usize, + ) -> Self { + let total_gas_cost = checkpoint.epoch_rolling_gas_cost_summary.computation_cost as i64 + + checkpoint.epoch_rolling_gas_cost_summary.storage_cost as i64 + - checkpoint.epoch_rolling_gas_cost_summary.storage_rebate as i64; + let tx_digests = contents.iter().map(|t| t.transaction).collect::>(); + let max_tx_sequence_number = checkpoint.network_total_transactions - 1; + // NOTE: + 1u64 first to avoid subtraction with overflow + let min_tx_sequence_number = max_tx_sequence_number + 1u64 - tx_digests.len() as u64; + let auth_sig = &checkpoint.auth_sig().signature; + Self { + sequence_number: checkpoint.sequence_number, + checkpoint_digest: *checkpoint.digest(), + epoch: checkpoint.epoch, + tx_digests, + previous_checkpoint_digest: checkpoint.previous_digest, + end_of_epoch_data: checkpoint.end_of_epoch_data.clone(), + end_of_epoch: checkpoint.end_of_epoch_data.clone().is_some(), + total_gas_cost, + computation_cost: checkpoint.epoch_rolling_gas_cost_summary.computation_cost, + storage_cost: checkpoint.epoch_rolling_gas_cost_summary.storage_cost, + storage_rebate: checkpoint.epoch_rolling_gas_cost_summary.storage_rebate, + non_refundable_storage_fee: checkpoint + .epoch_rolling_gas_cost_summary + .non_refundable_storage_fee, + successful_tx_num, + network_total_transactions: checkpoint.network_total_transactions, + timestamp_ms: checkpoint.timestamp_ms, + validator_signature: auth_sig.clone(), + checkpoint_commitments: checkpoint.checkpoint_commitments.clone(), + min_tx_sequence_number, + max_tx_sequence_number, + certified_checkpoint: Some(checkpoint.clone()), + checkpoint_contents: Some(contents.clone()), + } + } +} + +#[derive(Debug, Clone)] +pub struct IndexedEvent { + pub tx_sequence_number: u64, + pub event_sequence_number: u64, + pub checkpoint_sequence_number: u64, + pub transaction_digest: TransactionDigest, + pub sender: SuiAddress, + pub package: ObjectID, + pub module: String, + pub event_type: String, + pub event_type_package: ObjectID, + pub event_type_module: String, + /// Struct name of the event, without type parameters. + pub event_type_name: String, + pub bcs: Vec, + pub timestamp_ms: u64, +} + +impl IndexedEvent { + pub fn from_event( + tx_sequence_number: u64, + event_sequence_number: u64, + checkpoint_sequence_number: u64, + transaction_digest: TransactionDigest, + event: &sui_types::event::Event, + timestamp_ms: u64, + ) -> Self { + Self { + tx_sequence_number, + event_sequence_number, + checkpoint_sequence_number, + transaction_digest, + sender: event.sender, + package: event.package_id, + module: event.transaction_module.to_string(), + event_type: event.type_.to_canonical_string(/* with_prefix */ true), + event_type_package: event.type_.address.into(), + event_type_module: event.type_.module.to_string(), + event_type_name: event.type_.name.to_string(), + bcs: event.contents.clone(), + timestamp_ms, + } + } +} + +#[derive(Debug, Clone)] +pub struct EventIndex { + pub tx_sequence_number: u64, + pub event_sequence_number: u64, + pub sender: SuiAddress, + pub emit_package: ObjectID, + pub emit_module: String, + pub type_package: ObjectID, + pub type_module: String, + /// Struct name of the event, without type parameters. + pub type_name: String, + /// Type instantiation of the event, with type name and type parameters, if any. + pub type_instantiation: String, +} + +// for ingestion test +impl EventIndex { + pub fn random() -> Self { + let mut rng = rand::thread_rng(); + EventIndex { + tx_sequence_number: rng.gen(), + event_sequence_number: rng.gen(), + sender: SuiAddress::random_for_testing_only(), + emit_package: ObjectID::random(), + emit_module: rng.gen::().to_string(), + type_package: ObjectID::random(), + type_module: rng.gen::().to_string(), + type_name: rng.gen::().to_string(), + type_instantiation: rng.gen::().to_string(), + } + } +} + +impl EventIndex { + pub fn from_event( + tx_sequence_number: u64, + event_sequence_number: u64, + event: &sui_types::event::Event, + ) -> Self { + let type_instantiation = event + .type_ + .to_canonical_string(/* with_prefix */ true) + .splitn(3, "::") + .collect::>()[2] + .to_string(); + Self { + tx_sequence_number, + event_sequence_number, + sender: event.sender, + emit_package: event.package_id, + emit_module: event.transaction_module.to_string(), + type_package: event.type_.address.into(), + type_module: event.type_.module.to_string(), + type_name: event.type_.name.to_string(), + type_instantiation, + } + } +} + +#[derive(Debug, Copy, Clone)] +pub enum OwnerType { + Immutable = 0, + Address = 1, + Object = 2, + Shared = 3, +} + +pub enum ObjectStatus { + Active = 0, + WrappedOrDeleted = 1, +} + +impl TryFrom for ObjectStatus { + type Error = IndexerError; + + fn try_from(value: i16) -> Result { + Ok(match value { + 0 => ObjectStatus::Active, + 1 => ObjectStatus::WrappedOrDeleted, + value => { + return Err(IndexerError::PersistentStorageDataCorruptionError(format!( + "{value} as ObjectStatus" + ))) + } + }) + } +} + +impl TryFrom for OwnerType { + type Error = IndexerError; + + fn try_from(value: i16) -> Result { + Ok(match value { + 0 => OwnerType::Immutable, + 1 => OwnerType::Address, + 2 => OwnerType::Object, + 3 => OwnerType::Shared, + value => { + return Err(IndexerError::PersistentStorageDataCorruptionError(format!( + "{value} as OwnerType" + ))) + } + }) + } +} + +// Returns owner_type, owner_address +pub fn owner_to_owner_info(owner: &Owner) -> (OwnerType, Option) { + match owner { + Owner::AddressOwner(address) => (OwnerType::Address, Some(*address)), + Owner::ObjectOwner(address) => (OwnerType::Object, Some(*address)), + Owner::Shared { .. } => (OwnerType::Shared, None), + Owner::Immutable => (OwnerType::Immutable, None), + } +} + +#[derive(Debug, Copy, Clone)] +pub enum DynamicFieldKind { + DynamicField = 0, + DynamicObject = 1, +} + +#[derive(Clone, Debug)] +pub struct IndexedObject { + pub checkpoint_sequence_number: CheckpointSequenceNumber, + pub object: Object, + pub df_kind: Option, +} + +impl IndexedObject { + pub fn random() -> Self { + let mut rng = rand::thread_rng(); + let random_address = SuiAddress::random_for_testing_only(); + IndexedObject { + checkpoint_sequence_number: rng.gen(), + object: Object::with_owner_for_testing(random_address), + df_kind: { + let random_value = rng.gen_range(0..3); + match random_value { + 0 => Some(DynamicFieldType::DynamicField), + 1 => Some(DynamicFieldType::DynamicObject), + _ => None, + } + }, + } + } +} + +impl IndexedObject { + pub fn from_object( + checkpoint_sequence_number: CheckpointSequenceNumber, + object: Object, + df_kind: Option, + ) -> Self { + Self { + checkpoint_sequence_number, + object, + df_kind, + } + } +} + +#[derive(Clone, Debug)] +pub struct IndexedDeletedObject { + pub object_id: ObjectID, + pub object_version: u64, + pub checkpoint_sequence_number: u64, +} + +impl IndexedDeletedObject { + pub fn random() -> Self { + let mut rng = rand::thread_rng(); + IndexedDeletedObject { + object_id: ObjectID::random(), + object_version: rng.gen(), + checkpoint_sequence_number: rng.gen(), + } + } +} + +#[derive(Debug)] +pub struct IndexedPackage { + pub package_id: ObjectID, + pub move_package: MovePackage, + pub checkpoint_sequence_number: u64, +} + +#[derive(Debug, Clone)] +pub enum TransactionKind { + SystemTransaction = 0, + ProgrammableTransaction = 1, +} + +#[derive(Debug, Clone)] +pub struct IndexedTransaction { + pub tx_sequence_number: u64, + pub tx_digest: TransactionDigest, + pub sender_signed_data: SenderSignedData, + pub effects: TransactionEffects, + pub checkpoint_sequence_number: u64, + pub timestamp_ms: u64, + pub object_changes: Vec, + pub balance_change: Vec, + pub events: Vec, + pub transaction_kind: TransactionKind, + pub successful_tx_num: u64, +} + +#[derive(Debug, Clone)] +pub struct TxIndex { + pub tx_sequence_number: u64, + pub tx_kind: TransactionKind, + pub transaction_digest: TransactionDigest, + pub checkpoint_sequence_number: u64, + pub input_objects: Vec, + pub changed_objects: Vec, + pub affected_objects: Vec, + pub payers: Vec, + pub sender: SuiAddress, + pub recipients: Vec, + pub move_calls: Vec<(ObjectID, String, String)>, +} + +impl TxIndex { + pub fn random() -> Self { + let mut rng = rand::thread_rng(); + TxIndex { + tx_sequence_number: rng.gen(), + tx_kind: if rng.gen_bool(0.5) { + TransactionKind::SystemTransaction + } else { + TransactionKind::ProgrammableTransaction + }, + transaction_digest: TransactionDigest::random(), + checkpoint_sequence_number: rng.gen(), + input_objects: (0..1000).map(|_| ObjectID::random()).collect(), + changed_objects: (0..1000).map(|_| ObjectID::random()).collect(), + affected_objects: (0..1000).map(|_| ObjectID::random()).collect(), + payers: (0..rng.gen_range(0..100)) + .map(|_| SuiAddress::random_for_testing_only()) + .collect(), + sender: SuiAddress::random_for_testing_only(), + recipients: (0..rng.gen_range(0..1000)) + .map(|_| SuiAddress::random_for_testing_only()) + .collect(), + move_calls: (0..rng.gen_range(0..1000)) + .map(|_| { + ( + ObjectID::random(), + rng.gen::().to_string(), + rng.gen::().to_string(), + ) + }) + .collect(), + } + } +} + +// ObjectChange is not bcs deserializable, IndexedObjectChange is. +#[serde_as] +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub enum IndexedObjectChange { + Published { + package_id: ObjectID, + version: SequenceNumber, + digest: ObjectDigest, + modules: Vec, + }, + Transferred { + sender: SuiAddress, + recipient: Owner, + #[serde_as(as = "SuiStructTag")] + object_type: StructTag, + object_id: ObjectID, + version: SequenceNumber, + digest: ObjectDigest, + }, + /// Object mutated. + Mutated { + sender: SuiAddress, + owner: Owner, + #[serde_as(as = "SuiStructTag")] + object_type: StructTag, + object_id: ObjectID, + version: SequenceNumber, + previous_version: SequenceNumber, + digest: ObjectDigest, + }, + /// Delete object + Deleted { + sender: SuiAddress, + #[serde_as(as = "SuiStructTag")] + object_type: StructTag, + object_id: ObjectID, + version: SequenceNumber, + }, + /// Wrapped object + Wrapped { + sender: SuiAddress, + #[serde_as(as = "SuiStructTag")] + object_type: StructTag, + object_id: ObjectID, + version: SequenceNumber, + }, + /// New object creation + Created { + sender: SuiAddress, + owner: Owner, + #[serde_as(as = "SuiStructTag")] + object_type: StructTag, + object_id: ObjectID, + version: SequenceNumber, + digest: ObjectDigest, + }, +} + +impl From for IndexedObjectChange { + fn from(oc: ObjectChange) -> Self { + match oc { + ObjectChange::Published { + package_id, + version, + digest, + modules, + } => Self::Published { + package_id, + version, + digest, + modules, + }, + ObjectChange::Transferred { + sender, + recipient, + object_type, + object_id, + version, + digest, + } => Self::Transferred { + sender, + recipient, + object_type, + object_id, + version, + digest, + }, + ObjectChange::Mutated { + sender, + owner, + object_type, + object_id, + version, + previous_version, + digest, + } => Self::Mutated { + sender, + owner, + object_type, + object_id, + version, + previous_version, + digest, + }, + ObjectChange::Deleted { + sender, + object_type, + object_id, + version, + } => Self::Deleted { + sender, + object_type, + object_id, + version, + }, + ObjectChange::Wrapped { + sender, + object_type, + object_id, + version, + } => Self::Wrapped { + sender, + object_type, + object_id, + version, + }, + ObjectChange::Created { + sender, + owner, + object_type, + object_id, + version, + digest, + } => Self::Created { + sender, + owner, + object_type, + object_id, + version, + digest, + }, + } + } +} + +impl From for ObjectChange { + fn from(val: IndexedObjectChange) -> Self { + match val { + IndexedObjectChange::Published { + package_id, + version, + digest, + modules, + } => ObjectChange::Published { + package_id, + version, + digest, + modules, + }, + IndexedObjectChange::Transferred { + sender, + recipient, + object_type, + object_id, + version, + digest, + } => ObjectChange::Transferred { + sender, + recipient, + object_type, + object_id, + version, + digest, + }, + IndexedObjectChange::Mutated { + sender, + owner, + object_type, + object_id, + version, + previous_version, + digest, + } => ObjectChange::Mutated { + sender, + owner, + object_type, + object_id, + version, + previous_version, + digest, + }, + IndexedObjectChange::Deleted { + sender, + object_type, + object_id, + version, + } => ObjectChange::Deleted { + sender, + object_type, + object_id, + version, + }, + IndexedObjectChange::Wrapped { + sender, + object_type, + object_id, + version, + } => ObjectChange::Wrapped { + sender, + object_type, + object_id, + version, + }, + IndexedObjectChange::Created { + sender, + owner, + object_type, + object_id, + version, + digest, + } => ObjectChange::Created { + sender, + owner, + object_type, + object_id, + version, + digest, + }, + } + } +} + +// SuiTransactionBlockResponseWithOptions is only used on the reading path +pub struct SuiTransactionBlockResponseWithOptions { + pub response: SuiTransactionBlockResponse, + pub options: SuiTransactionBlockResponseOptions, +} + +impl From for SuiTransactionBlockResponse { + fn from(value: SuiTransactionBlockResponseWithOptions) -> Self { + let SuiTransactionBlockResponseWithOptions { response, options } = value; + + SuiTransactionBlockResponse { + digest: response.digest, + transaction: options.show_input.then_some(response.transaction).flatten(), + raw_transaction: options + .show_raw_input + .then_some(response.raw_transaction) + .unwrap_or_default(), + effects: options.show_effects.then_some(response.effects).flatten(), + events: options.show_events.then_some(response.events).flatten(), + object_changes: options + .show_object_changes + .then_some(response.object_changes) + .flatten(), + balance_changes: options + .show_balance_changes + .then_some(response.balance_changes) + .flatten(), + timestamp_ms: response.timestamp_ms, + confirmed_local_execution: response.confirmed_local_execution, + checkpoint: response.checkpoint, + errors: vec![], + raw_effects: options + .show_raw_effects + .then_some(response.raw_effects) + .unwrap_or_default(), + } + } +} diff --git a/crates/sui-mvr-indexer/tests/ingestion_tests.rs b/crates/sui-mvr-indexer/tests/ingestion_tests.rs new file mode 100644 index 0000000000000..351b243594e4b --- /dev/null +++ b/crates/sui-mvr-indexer/tests/ingestion_tests.rs @@ -0,0 +1,242 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +use std::sync::Arc; + +use diesel::ExpressionMethods; +use diesel::QueryDsl; +use diesel_async::RunQueryDsl; +use simulacrum::Simulacrum; +use sui_mvr_indexer::errors::IndexerError; +use sui_mvr_indexer::handlers::TransactionObjectChangesToCommit; +use sui_mvr_indexer::models::{checkpoints::StoredCheckpoint, objects::StoredObjectSnapshot}; +use sui_mvr_indexer::schema::{checkpoints, objects_snapshot}; +use sui_mvr_indexer::store::indexer_store::IndexerStore; +use sui_mvr_indexer::test_utils::{ + set_up, set_up_with_start_and_end_checkpoints, wait_for_checkpoint, wait_for_objects_snapshot, +}; +use sui_mvr_indexer::types::EventIndex; +use sui_mvr_indexer::types::IndexedDeletedObject; +use sui_mvr_indexer::types::IndexedObject; +use sui_mvr_indexer::types::TxIndex; +use sui_types::base_types::SuiAddress; +use tempfile::tempdir; + +#[tokio::test] +pub async fn test_checkpoint_range_ingestion() -> Result<(), IndexerError> { + let tempdir = tempdir().unwrap(); + let mut sim = Simulacrum::new(); + let data_ingestion_path = tempdir.path().to_path_buf(); + sim.set_data_ingestion_path(data_ingestion_path.clone()); + + // Create multiple checkpoints + for _ in 0..10 { + let transfer_recipient = SuiAddress::random_for_testing_only(); + let (transaction, _) = sim.transfer_txn(transfer_recipient); + let (_, err) = sim.execute_transaction(transaction).unwrap(); + assert!(err.is_none()); + sim.create_checkpoint(); + } + + // Set up indexer with specific start and end checkpoints + let start_checkpoint = 2; + let end_checkpoint = 4; + let (_, pg_store, _, _database) = set_up_with_start_and_end_checkpoints( + Arc::new(sim), + data_ingestion_path, + start_checkpoint, + end_checkpoint, + ) + .await; + + // Wait for the indexer to catch up to the end checkpoint + wait_for_checkpoint(&pg_store, end_checkpoint).await?; + + // Verify that only checkpoints within the specified range were ingested + let mut connection = pg_store.pool().dedicated_connection().await.unwrap(); + let checkpoint_count: i64 = checkpoints::table + .count() + .get_result(&mut connection) + .await + .expect("Failed to count checkpoints"); + assert_eq!(checkpoint_count, 3, "Expected 3 checkpoints to be ingested"); + + // Verify the range of ingested checkpoints + let min_checkpoint = checkpoints::table + .select(diesel::dsl::min(checkpoints::sequence_number)) + .first::>(&mut connection) + .await + .expect("Failed to get min checkpoint") + .expect("Min checkpoint should be Some"); + let max_checkpoint = checkpoints::table + .select(diesel::dsl::max(checkpoints::sequence_number)) + .first::>(&mut connection) + .await + .expect("Failed to get max checkpoint") + .expect("Max checkpoint should be Some"); + assert_eq!( + min_checkpoint, start_checkpoint as i64, + "Minimum ingested checkpoint should be {}", + start_checkpoint + ); + assert_eq!( + max_checkpoint, end_checkpoint as i64, + "Maximum ingested checkpoint should be {}", + end_checkpoint + ); + + Ok(()) +} + +#[tokio::test] +pub async fn test_objects_snapshot() -> Result<(), IndexerError> { + let tempdir = tempdir().unwrap(); + let mut sim = Simulacrum::new(); + let data_ingestion_path = tempdir.path().to_path_buf(); + sim.set_data_ingestion_path(data_ingestion_path.clone()); + + // Run 10 transfer transactions and create 10 checkpoints + let mut last_transaction = None; + let total_checkpoint_sequence_number = 7usize; + for _ in 0..total_checkpoint_sequence_number { + let transfer_recipient = SuiAddress::random_for_testing_only(); + let (transaction, _) = sim.transfer_txn(transfer_recipient); + let (_, err) = sim.execute_transaction(transaction.clone()).unwrap(); + assert!(err.is_none()); + last_transaction = Some(transaction); + let _ = sim.create_checkpoint(); + } + + let (_, pg_store, _, _database) = set_up(Arc::new(sim), data_ingestion_path).await; + + // Wait for objects snapshot at checkpoint max_expected_checkpoint_sequence_number + let max_expected_checkpoint_sequence_number = total_checkpoint_sequence_number - 5; + wait_for_objects_snapshot(&pg_store, max_expected_checkpoint_sequence_number as u64).await?; + + let mut connection = pg_store.pool().dedicated_connection().await.unwrap(); + // Get max checkpoint_sequence_number from objects_snapshot table and assert it's expected + let max_checkpoint_sequence_number = objects_snapshot::table + .select(objects_snapshot::checkpoint_sequence_number) + .order(objects_snapshot::checkpoint_sequence_number.desc()) + .limit(1) + .first::(&mut connection) + .await + .expect("Failed to read max checkpoint_sequence_number from objects_snapshot"); + assert_eq!( + max_checkpoint_sequence_number, + max_expected_checkpoint_sequence_number as i64 + ); + + // Get the object state at max_expected_checkpoint_sequence_number and assert. + let last_tx = last_transaction.unwrap(); + let obj_id = last_tx.gas()[0].0; + let gas_owner_id = last_tx.sender_address(); + + let snapshot_object = objects_snapshot::table + .filter(objects_snapshot::object_id.eq(obj_id.to_vec())) + .filter( + objects_snapshot::checkpoint_sequence_number + .eq(max_expected_checkpoint_sequence_number as i64), + ) + .first::(&mut connection) + .await + .expect("Failed reading object from objects_snapshot"); + // Assert that the object state is as expected at checkpoint max_expected_checkpoint_sequence_number + assert_eq!(snapshot_object.object_id, obj_id.to_vec()); + assert_eq!( + snapshot_object.checkpoint_sequence_number, + max_expected_checkpoint_sequence_number as i64 + ); + assert_eq!(snapshot_object.owner_type, Some(1)); + assert_eq!(snapshot_object.owner_id, Some(gas_owner_id.to_vec())); + Ok(()) +} + +#[tokio::test] +pub async fn test_objects_ingestion() -> Result<(), IndexerError> { + let tempdir = tempdir().unwrap(); + let mut sim = Simulacrum::new(); + let data_ingestion_path = tempdir.path().to_path_buf(); + sim.set_data_ingestion_path(data_ingestion_path.clone()); + + let (_, pg_store, _, _database) = set_up(Arc::new(sim), data_ingestion_path).await; + + let mut objects = Vec::new(); + for _ in 0..1000 { + objects.push(TransactionObjectChangesToCommit { + changed_objects: vec![IndexedObject::random()], + deleted_objects: vec![IndexedDeletedObject::random()], + }); + } + pg_store.persist_objects(objects).await?; + Ok(()) +} + +// test insert large batch of tx_indices +#[tokio::test] +pub async fn test_insert_large_batch_tx_indices() -> Result<(), IndexerError> { + let tempdir = tempdir().unwrap(); + let mut sim = Simulacrum::new(); + let data_ingestion_path = tempdir.path().to_path_buf(); + sim.set_data_ingestion_path(data_ingestion_path.clone()); + + let (_, pg_store, _, _database) = set_up(Arc::new(sim), data_ingestion_path).await; + + let mut v = Vec::new(); + for _ in 0..1000 { + v.push(TxIndex::random()); + } + pg_store.persist_tx_indices(v).await?; + Ok(()) +} + +// test insert large batch of event_indices +#[tokio::test] +pub async fn test_insert_large_batch_event_indices() -> Result<(), IndexerError> { + let tempdir = tempdir().unwrap(); + let mut sim = Simulacrum::new(); + let data_ingestion_path = tempdir.path().to_path_buf(); + sim.set_data_ingestion_path(data_ingestion_path.clone()); + + let (_, pg_store, _, _database) = set_up(Arc::new(sim), data_ingestion_path).await; + + let mut v = Vec::new(); + for _ in 0..1000 { + v.push(EventIndex::random()); + } + pg_store.persist_event_indices(v).await?; + Ok(()) +} + +#[tokio::test] +pub async fn test_epoch_boundary() -> Result<(), IndexerError> { + println!("test_epoch_boundary"); + let tempdir = tempdir().unwrap(); + let mut sim = Simulacrum::new(); + let data_ingestion_path = tempdir.path().to_path_buf(); + sim.set_data_ingestion_path(data_ingestion_path.clone()); + + let transfer_recipient = SuiAddress::random_for_testing_only(); + let (transaction, _) = sim.transfer_txn(transfer_recipient); + let (_, err) = sim.execute_transaction(transaction.clone()).unwrap(); + assert!(err.is_none()); + + sim.create_checkpoint(); // checkpoint 1 + sim.advance_epoch(true); // checkpoint 2 and epoch 1 + + let (transaction, _) = sim.transfer_txn(transfer_recipient); + let (_, err) = sim.execute_transaction(transaction.clone()).unwrap(); + sim.create_checkpoint(); // checkpoint 3 + assert!(err.is_none()); + + let (_, pg_store, _, _database) = set_up(Arc::new(sim), data_ingestion_path).await; + wait_for_checkpoint(&pg_store, 3).await?; + let mut connection = pg_store.pool().dedicated_connection().await.unwrap(); + let db_checkpoint: StoredCheckpoint = checkpoints::table + .order(checkpoints::sequence_number.desc()) + .first::(&mut connection) + .await + .expect("Failed reading checkpoint from PostgresDB"); + assert_eq!(db_checkpoint.sequence_number, 3); + assert_eq!(db_checkpoint.epoch, 1); + Ok(()) +} diff --git a/crates/sui-mvr-indexer/tests/json_rpc_tests.rs b/crates/sui-mvr-indexer/tests/json_rpc_tests.rs new file mode 100644 index 0000000000000..15e501a5f0aa2 --- /dev/null +++ b/crates/sui-mvr-indexer/tests/json_rpc_tests.rs @@ -0,0 +1,243 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::path::PathBuf; + +use sui_json_rpc_api::{CoinReadApiClient, IndexerApiClient, ReadApiClient}; +use sui_json_rpc_types::{ + CoinPage, EventFilter, SuiObjectDataOptions, SuiObjectResponse, SuiObjectResponseQuery, +}; +use sui_swarm_config::genesis_config::DEFAULT_GAS_AMOUNT; +use sui_test_transaction_builder::publish_package; +use sui_types::{event::EventID, transaction::CallArg}; +use test_cluster::TestClusterBuilder; + +#[tokio::test] +async fn test_get_owned_objects() -> Result<(), anyhow::Error> { + let cluster = TestClusterBuilder::new() + .with_indexer_backed_rpc() + .build() + .await; + + let http_client = cluster.rpc_client(); + let address = cluster.get_address_0(); + + let data_option = SuiObjectDataOptions::new().with_owner(); + let objects = http_client + .get_owned_objects( + address, + Some(SuiObjectResponseQuery::new_with_options( + data_option.clone(), + )), + None, + None, + ) + .await? + .data; + let fullnode_objects = cluster + .fullnode_handle + .rpc_client + .get_owned_objects( + address, + Some(SuiObjectResponseQuery::new_with_options( + data_option.clone(), + )), + None, + None, + ) + .await? + .data; + assert_eq!(5, objects.len()); + // TODO: right now we compare the results from indexer and fullnode, but as we deprecate fullnode rpc, + // we should change this to compare the results with the object id/digest from genesis potentially. + assert_eq!(objects, fullnode_objects); + + for obj in &objects { + let oref = obj.clone().into_object().unwrap(); + let result = http_client + .get_object(oref.object_id, Some(data_option.clone())) + .await?; + assert!( + matches!(result, SuiObjectResponse { data: Some(object), .. } if oref.object_id == object.object_id && object.owner.unwrap().get_owner_address()? == address) + ); + } + + // Multiget objectIDs test + let object_ids: Vec<_> = objects + .iter() + .map(|o| o.object().unwrap().object_id) + .collect(); + + let object_resp = http_client + .multi_get_objects(object_ids.clone(), None) + .await?; + let fullnode_object_resp = cluster + .fullnode_handle + .rpc_client + .multi_get_objects(object_ids, None) + .await?; + assert_eq!(5, object_resp.len()); + // TODO: right now we compare the results from indexer and fullnode, but as we deprecate fullnode rpc, + // we should change this to compare the results with the object id/digest from genesis potentially. + assert_eq!(object_resp, fullnode_object_resp); + Ok(()) +} + +#[tokio::test] +async fn test_get_coins() -> Result<(), anyhow::Error> { + let cluster = TestClusterBuilder::new() + .with_indexer_backed_rpc() + .build() + .await; + let http_client = cluster.rpc_client(); + let address = cluster.get_address_0(); + + let result: CoinPage = http_client.get_coins(address, None, None, None).await?; + assert_eq!(5, result.data.len()); + assert!(!result.has_next_page); + + // We should get 0 coins for a non-existent coin type. + let result: CoinPage = http_client + .get_coins(address, Some("0x2::sui::TestCoin".into()), None, None) + .await?; + assert_eq!(0, result.data.len()); + + // We should get all the 5 coins for SUI with the right balance. + let result: CoinPage = http_client + .get_coins(address, Some("0x2::sui::SUI".into()), None, None) + .await?; + assert_eq!(5, result.data.len()); + assert_eq!(result.data[0].balance, DEFAULT_GAS_AMOUNT); + assert!(!result.has_next_page); + + // When we have more than 3 coins, we should get a next page. + let result: CoinPage = http_client + .get_coins(address, Some("0x2::sui::SUI".into()), None, Some(3)) + .await?; + assert_eq!(3, result.data.len()); + assert!(result.has_next_page); + + // We should get the remaining 2 coins with the next page. + let result: CoinPage = http_client + .get_coins( + address, + Some("0x2::sui::SUI".into()), + result.next_cursor, + Some(3), + ) + .await?; + assert_eq!(2, result.data.len(), "{:?}", result); + assert!(!result.has_next_page); + + // No more coins after the last page. + let result: CoinPage = http_client + .get_coins( + address, + Some("0x2::sui::SUI".into()), + result.next_cursor, + None, + ) + .await?; + assert_eq!(0, result.data.len(), "{:?}", result); + assert!(!result.has_next_page); + + Ok(()) +} + +#[tokio::test] +async fn test_events() -> Result<(), anyhow::Error> { + let cluster = TestClusterBuilder::new() + .with_indexer_backed_rpc() + .build() + .await; + + // publish package + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests/move_test_code"); + let move_package = publish_package(&cluster.wallet, path).await.0; + + // execute a transaction to generate events + let function = "emit_3"; + let arguments = vec![CallArg::Pure(bcs::to_bytes(&5u64).unwrap())]; + let transaction = cluster + .test_transaction_builder() + .await + .move_call(move_package, "events_queries", function, arguments) + .build(); + let signed_transaction = cluster.wallet.sign_transaction(&transaction); + cluster.execute_transaction(signed_transaction).await; + + // query for events + let http_client = cluster.rpc_client(); + + // start with ascending order + let event_filter = EventFilter::All([]); + let mut cursor: Option = None; + let mut limit = None; + let mut descending_order = Some(false); + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(3, result.data.len()); + assert!(!result.has_next_page); + let forward_paginated_events = result.data; + + // Fetch the initial event + limit = Some(1); + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(1, result.data.len()); + assert!(result.has_next_page); + assert_eq!(forward_paginated_events[0], result.data[0]); + + // Fetch remaining events + cursor = result.next_cursor; + limit = None; + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(2, result.data.len()); + assert_eq!(forward_paginated_events[1..], result.data[..]); + + // now descending order - make sure to reset parameters + cursor = None; + descending_order = Some(true); + limit = None; + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(3, result.data.len()); + assert!(!result.has_next_page); + let backward_paginated_events = result.data; + + // Fetch the initial event + limit = Some(1); + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(1, result.data.len()); + assert!(result.has_next_page); + assert_eq!(backward_paginated_events[0], result.data[0]); + assert_eq!(forward_paginated_events[2], result.data[0]); + + // Fetch remaining events + cursor = result.next_cursor; + limit = None; + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(2, result.data.len()); + assert_eq!(backward_paginated_events[1..], result.data[..]); + + // check that the forward and backward paginated events are in reverse order + assert_eq!( + forward_paginated_events + .into_iter() + .rev() + .collect::>(), + backward_paginated_events + ); + + Ok(()) +} diff --git a/crates/sui-mvr-indexer/tests/move_test_code/Move.toml b/crates/sui-mvr-indexer/tests/move_test_code/Move.toml new file mode 100644 index 0000000000000..09e9e50f000f0 --- /dev/null +++ b/crates/sui-mvr-indexer/tests/move_test_code/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "move_test_code" +version = "0.0.1" +edition = "2024.beta" + +[dependencies] +Sui = { local = "../../../sui-framework/packages/sui-framework" } + +[addresses] +move_test_code = "0x0" diff --git a/crates/sui-mvr-indexer/tests/move_test_code/sources/events.move b/crates/sui-mvr-indexer/tests/move_test_code/sources/events.move new file mode 100644 index 0000000000000..f32cc7fe109f3 --- /dev/null +++ b/crates/sui-mvr-indexer/tests/move_test_code/sources/events.move @@ -0,0 +1,26 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + + +module move_test_code::events_queries { + use sui::event; + + public struct EventA has copy, drop { + new_value: u64 + } + + public entry fun emit_1(value: u64) { + event::emit(EventA { new_value: value }) + } + + public entry fun emit_2(value: u64) { + event::emit(EventA { new_value: value }); + event::emit(EventA { new_value: value + 1}) + } + + public entry fun emit_3(value: u64) { + event::emit(EventA { new_value: value }); + event::emit(EventA { new_value: value + 1}); + event::emit(EventA { new_value: value + 2}); + } +} diff --git a/crates/sui-mvr-indexer/tests/read_api_tests.rs b/crates/sui-mvr-indexer/tests/read_api_tests.rs new file mode 100644 index 0000000000000..d17b431888f01 --- /dev/null +++ b/crates/sui-mvr-indexer/tests/read_api_tests.rs @@ -0,0 +1,50 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use jsonrpsee::core::RpcResult; +use simulacrum::Simulacrum; +use std::sync::Arc; +use sui_json_rpc_api::ReadApiServer; +use sui_mvr_indexer::apis::read_api::ReadApi; +use sui_mvr_indexer::indexer_reader::IndexerReader; +use sui_mvr_indexer::test_utils::{set_up, wait_for_checkpoint}; +use tempfile::tempdir; + +#[tokio::test] +async fn test_checkpoint_apis() -> RpcResult<()> { + let tempdir = tempdir().unwrap(); + let mut sim = Simulacrum::new(); + let data_ingestion_path = tempdir.path().to_path_buf(); + sim.set_data_ingestion_path(data_ingestion_path.clone()); + sim.create_checkpoint(); + sim.create_checkpoint(); + + let (_, pg_store, _, _database) = set_up(Arc::new(sim), data_ingestion_path).await; + wait_for_checkpoint(&pg_store, 2).await.unwrap(); + + // Test get_latest_checkpoint_sequence_number + let read_api = ReadApi::new(IndexerReader::new(pg_store.pool())); + let latest_checkpoint = read_api.get_latest_checkpoint_sequence_number().await?; + assert_eq!(latest_checkpoint.into_inner(), 2); + + // Test get_checkpoint + let checkpoint_id = sui_json_rpc_types::CheckpointId::SequenceNumber(1); + let checkpoint = read_api.get_checkpoint(checkpoint_id).await?; + assert_eq!(checkpoint.sequence_number, 1); + + // Test get_checkpoints + let checkpoints = read_api.get_checkpoints(None, Some(10), false).await?; + assert_eq!(checkpoints.data.len(), 3); // 0, 1, 2 + assert!(!checkpoints.has_next_page); + assert_eq!(checkpoints.next_cursor, Some(2.into())); + + let checkpoints = read_api + .get_checkpoints(Some(2.into()), Some(2), true) + .await?; + assert_eq!(checkpoints.data.len(), 2); + assert!(!checkpoints.has_next_page); + assert_eq!(checkpoints.next_cursor, Some(0.into())); + assert_eq!(checkpoints.data[0].sequence_number, 1); + assert_eq!(checkpoints.data[1].sequence_number, 0); + Ok(()) +} diff --git a/crates/sui-network/build.rs b/crates/sui-network/build.rs index 3b6926e0fae15..f082ba5f2d24a 100644 --- a/crates/sui-network/build.rs +++ b/crates/sui-network/build.rs @@ -141,15 +141,6 @@ fn build_anemo_services(out_dir: &Path) { let discovery = anemo_build::manual::Service::builder() .name("Discovery") .package("sui") - .method( - anemo_build::manual::Method::builder() - .name("get_known_peers") - .route_name("GetKnownPeers") - .request_type("()") - .response_type("crate::discovery::GetKnownPeersResponse") - .codec_path(codec_path) - .build(), - ) .method( anemo_build::manual::Method::builder() .name("get_known_peers_v2") diff --git a/crates/sui-network/src/discovery/builder.rs b/crates/sui-network/src/discovery/builder.rs index ef56e208f9567..b18c65ba9efe5 100644 --- a/crates/sui-network/src/discovery/builder.rs +++ b/crates/sui-network/src/discovery/builder.rs @@ -58,7 +58,7 @@ impl Builder { // Apply rate limits from configuration as needed. if let Some(limit) = discovery_config.get_known_peers_rate_limit { - discovery_server = discovery_server.add_layer_for_get_known_peers( + discovery_server = discovery_server.add_layer_for_get_known_peers_v2( InboundRequestLayer::new(rate_limit::RateLimitLayer::new( governor::Quota::per_second(limit), rate_limit::WaitMode::Block, diff --git a/crates/sui-network/src/discovery/mod.rs b/crates/sui-network/src/discovery/mod.rs index d701c4f3fb412..a63bc9ca8ca5b 100644 --- a/crates/sui-network/src/discovery/mod.rs +++ b/crates/sui-network/src/discovery/mod.rs @@ -47,7 +47,6 @@ pub use generated::{ discovery_client::DiscoveryClient, discovery_server::{Discovery, DiscoveryServer}, }; -pub use server::GetKnownPeersResponse; pub use server::GetKnownPeersResponseV2; use self::metrics::Metrics; @@ -268,7 +267,6 @@ impl DiscoveryEventLoop { // Query the new node for any peers self.tasks.spawn(query_peer_for_their_known_peers( peer, - self.discovery_config.clone(), self.state.clone(), self.metrics.clone(), self.allowlisted_peers.clone(), @@ -424,7 +422,6 @@ async fn try_to_connect_to_seed_peers( async fn query_peer_for_their_known_peers( peer: Peer, - config: Arc, state: Arc>, metrics: Metrics, allowlisted_peers: Arc>>, @@ -432,50 +429,24 @@ async fn query_peer_for_their_known_peers( let mut client = DiscoveryClient::new(peer); let request = Request::new(()).with_timeout(TIMEOUT); - let found_peers = if config.enable_node_info_signatures() { - client - .get_known_peers_v2(request) - .await - .ok() - .map(Response::into_inner) - .map( - |GetKnownPeersResponseV2 { - own_info, - mut known_peers, - }| { - if !own_info.addresses.is_empty() { - known_peers.push(own_info) - } - known_peers - }, - ) - } else { - client - .get_known_peers(request) - .await - .ok() - .map(Response::into_inner) - .map( - |GetKnownPeersResponse { - own_info, - mut known_peers, - }| { - if !own_info.addresses.is_empty() { - known_peers.push(own_info) - } - known_peers - .into_iter() - .map(|info| { - // SignedNodeInfo with fake default signatures will only work if - // signature verification is disabled. - SignedNodeInfo::new_from_data_and_sig(info, Ed25519Signature::default()) - }) - .collect() - }, - ) - }; + let found_peers = client + .get_known_peers_v2(request) + .await + .ok() + .map(Response::into_inner) + .map( + |GetKnownPeersResponseV2 { + own_info, + mut known_peers, + }| { + if !own_info.addresses.is_empty() { + known_peers.push(own_info) + } + known_peers + }, + ); if let Some(found_peers) = found_peers { - update_known_peers(config, state, metrics, found_peers, allowlisted_peers); + update_known_peers(state, metrics, found_peers, allowlisted_peers); } } @@ -494,57 +465,27 @@ async fn query_connected_peers_for_their_known_peers( .flat_map(|id| network.peer(id)) .choose_multiple(&mut rand::thread_rng(), config.peers_to_query()); - let enable_node_info_signatures = config.enable_node_info_signatures(); let found_peers = peers_to_query .into_iter() .map(DiscoveryClient::new) .map(|mut client| async move { let request = Request::new(()).with_timeout(TIMEOUT); - if enable_node_info_signatures { - client - .get_known_peers_v2(request) - .await - .ok() - .map(Response::into_inner) - .map( - |GetKnownPeersResponseV2 { - own_info, - mut known_peers, - }| { - if !own_info.addresses.is_empty() { - known_peers.push(own_info) - } - known_peers - }, - ) - } else { - client - .get_known_peers(request) - .await - .ok() - .map(Response::into_inner) - .map( - |GetKnownPeersResponse { - own_info, - mut known_peers, - }| { - if !own_info.addresses.is_empty() { - known_peers.push(own_info) - } - known_peers - .into_iter() - .map(|info| { - // SignedNodeInfo with fake default signatures will only work if - // signature verification is disabled. - SignedNodeInfo::new_from_data_and_sig( - info, - Ed25519Signature::default(), - ) - }) - .collect() - }, - ) - } + client + .get_known_peers_v2(request) + .await + .ok() + .map(Response::into_inner) + .map( + |GetKnownPeersResponseV2 { + own_info, + mut known_peers, + }| { + if !own_info.addresses.is_empty() { + known_peers.push(own_info) + } + known_peers + }, + ) }) .pipe(futures::stream::iter) .buffer_unordered(config.peers_to_query()) @@ -553,11 +494,10 @@ async fn query_connected_peers_for_their_known_peers( .collect::>() .await; - update_known_peers(config, state, metrics, found_peers, allowlisted_peers); + update_known_peers(state, metrics, found_peers, allowlisted_peers); } fn update_known_peers( - config: Arc, state: Arc>, metrics: Metrics, found_peers: Vec, @@ -602,24 +542,22 @@ fn update_known_peers( { continue; } - if config.enable_node_info_signatures() { - let Ok(public_key) = Ed25519PublicKey::from_bytes(&peer_info.peer_id.0) else { - debug_fatal!( - // This should never happen. - "Failed to convert anemo PeerId {:?} to Ed25519PublicKey", - peer_info.peer_id - ); - continue; - }; - let msg = bcs::to_bytes(peer_info.data()).expect("BCS serialization should not fail"); - if let Err(e) = public_key.verify(&msg, peer_info.auth_sig()) { - info!( - "Discovery failed to verify signature for NodeInfo for peer {:?}: {e:?}", - peer_info.peer_id - ); - // TODO: consider denylisting the source of bad NodeInfo from future requests. - continue; - } + let Ok(public_key) = Ed25519PublicKey::from_bytes(&peer_info.peer_id.0) else { + debug_fatal!( + // This should never happen. + "Failed to convert anemo PeerId {:?} to Ed25519PublicKey", + peer_info.peer_id + ); + continue; + }; + let msg = bcs::to_bytes(peer_info.data()).expect("BCS serialization should not fail"); + if let Err(e) = public_key.verify(&msg, peer_info.auth_sig()) { + info!( + "Discovery failed to verify signature for NodeInfo for peer {:?}: {e:?}", + peer_info.peer_id + ); + // TODO: consider denylisting the source of bad NodeInfo from future requests. + continue; } let peer = VerifiedSignedNodeInfo::new_from_verified(peer_info); diff --git a/crates/sui-network/src/discovery/server.rs b/crates/sui-network/src/discovery/server.rs index 1a1993273689a..20535a3d6dc4f 100644 --- a/crates/sui-network/src/discovery/server.rs +++ b/crates/sui-network/src/discovery/server.rs @@ -1,7 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::{Discovery, NodeInfo, SignedNodeInfo, State, MAX_PEERS_TO_SEND}; +use super::{Discovery, SignedNodeInfo, State, MAX_PEERS_TO_SEND}; use anemo::{Request, Response}; use rand::seq::IteratorRandom; use serde::{Deserialize, Serialize}; @@ -10,12 +10,6 @@ use std::{ sync::{Arc, RwLock}, }; -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct GetKnownPeersResponse { - pub own_info: NodeInfo, - pub known_peers: Vec, -} - #[derive(Clone, Debug, Serialize, Deserialize)] pub struct GetKnownPeersResponseV2 { pub own_info: SignedNodeInfo, @@ -28,21 +22,6 @@ pub(super) struct Server { #[anemo::async_trait] impl Discovery for Server { - async fn get_known_peers( - &self, - request: Request<()>, - ) -> Result, anemo::rpc::Status> { - let resp = self.get_known_peers_v2(request).await?; - Ok(resp.map(|body| GetKnownPeersResponse { - own_info: body.own_info.into_data(), - known_peers: body - .known_peers - .into_iter() - .map(|e| e.into_data()) - .collect(), - })) - } - async fn get_known_peers_v2( &self, _request: Request<()>, diff --git a/crates/sui-network/src/discovery/tests.rs b/crates/sui-network/src/discovery/tests.rs index 01704e7600f9d..5fdf479bea27a 100644 --- a/crates/sui-network/src/discovery/tests.rs +++ b/crates/sui-network/src/discovery/tests.rs @@ -19,7 +19,10 @@ async fn get_known_peers() -> Result<()> { .build_internal(); // Err when own_info not set - server.get_known_peers(Request::new(())).await.unwrap_err(); + server + .get_known_peers_v2(Request::new(())) + .await + .unwrap_err(); // Normal response with our_info let our_info = NodeInfo { @@ -33,11 +36,11 @@ async fn get_known_peers() -> Result<()> { Ed25519Signature::default(), )); let response = server - .get_known_peers(Request::new(())) + .get_known_peers_v2(Request::new(())) .await .unwrap() .into_inner(); - assert_eq!(response.own_info, our_info); + assert_eq!(response.own_info.data(), &our_info); assert!(response.known_peers.is_empty()); // Normal response with some known peers @@ -55,27 +58,32 @@ async fn get_known_peers() -> Result<()> { )), ); let response = server - .get_known_peers(Request::new(())) + .get_known_peers_v2(Request::new(())) .await .unwrap() .into_inner(); - assert_eq!(response.own_info, our_info); - assert_eq!(response.known_peers, vec![other_peer]); + assert_eq!(response.own_info.data(), &our_info); + assert_eq!( + response + .known_peers + .into_iter() + .map(|peer| peer.into_data()) + .collect::>(), + vec![other_peer] + ); Ok(()) } #[tokio::test] async fn make_connection_to_seed_peer() -> Result<()> { - let config = P2pConfig::default().set_discovery_config(DiscoveryConfig { - enable_node_info_signatures: Some(true), - ..DiscoveryConfig::default() - }); - let (builder, server) = Builder::new(create_test_channel().1).config(config).build(); + let mut config = P2pConfig::default(); + let (builder, server) = Builder::new(create_test_channel().1) + .config(config.clone()) + .build(); let (network_1, key_1) = build_network_and_key(|router| router.add_rpc_service(server)); let (_event_loop_1, _handle_1) = builder.build(network_1.clone(), key_1); - let mut config = P2pConfig::default(); config.seed_peers.push(SeedPeer { peer_id: None, address: format!("/dns/localhost/udp/{}", network_1.local_addr().port()).parse()?, @@ -103,15 +111,13 @@ async fn make_connection_to_seed_peer() -> Result<()> { #[tokio::test] async fn make_connection_to_seed_peer_with_peer_id() -> Result<()> { - let config = P2pConfig::default().set_discovery_config(DiscoveryConfig { - enable_node_info_signatures: Some(true), - ..DiscoveryConfig::default() - }); - let (builder, server) = Builder::new(create_test_channel().1).config(config).build(); + let mut config = P2pConfig::default(); + let (builder, server) = Builder::new(create_test_channel().1) + .config(config.clone()) + .build(); let (network_1, key_1) = build_network_and_key(|router| router.add_rpc_service(server)); let (_event_loop_1, _handle_1) = builder.build(network_1.clone(), key_1); - let mut config = P2pConfig::default(); config.seed_peers.push(SeedPeer { peer_id: Some(network_1.peer_id()), address: format!("/dns/localhost/udp/{}", network_1.local_addr().port()).parse()?, @@ -140,15 +146,13 @@ async fn make_connection_to_seed_peer_with_peer_id() -> Result<()> { #[tokio::test(flavor = "current_thread", start_paused = true)] async fn three_nodes_can_connect_via_discovery() -> Result<()> { // Setup the peer that will be the seed for the other two - let config = P2pConfig::default().set_discovery_config(DiscoveryConfig { - enable_node_info_signatures: Some(true), - ..DiscoveryConfig::default() - }); - let (builder, server) = Builder::new(create_test_channel().1).config(config).build(); + let mut config = P2pConfig::default(); + let (builder, server) = Builder::new(create_test_channel().1) + .config(config.clone()) + .build(); let (network_1, key_1) = build_network_and_key(|router| router.add_rpc_service(server)); let (event_loop_1, _handle_1) = builder.build(network_1.clone(), key_1); - let mut config = P2pConfig::default(); config.seed_peers.push(SeedPeer { peer_id: Some(network_1.peer_id()), address: format!("/dns/localhost/udp/{}", network_1.local_addr().port()).parse()?, @@ -207,10 +211,7 @@ async fn three_nodes_can_connect_via_discovery() -> Result<()> { #[tokio::test(flavor = "current_thread", start_paused = true)] async fn peers_are_added_from_reconfig_channel() -> Result<()> { let (tx_1, rx_1) = create_test_channel(); - let config = P2pConfig::default().set_discovery_config(DiscoveryConfig { - enable_node_info_signatures: Some(true), - ..DiscoveryConfig::default() - }); + let config = P2pConfig::default(); let (builder, server) = Builder::new(rx_1).config(config.clone()).build(); let (network_1, key_1) = build_network_and_key(|router| router.add_rpc_service(server)); let (event_loop_1, _handle_1) = builder.build(network_1.clone(), key_1); @@ -299,7 +300,6 @@ async fn test_access_types() { let default_discovery_config = DiscoveryConfig { target_concurrent_connections: Some(100), interval_period_ms: Some(1000), - enable_node_info_signatures: Some(true), ..Default::default() }; let default_p2p_config = P2pConfig { @@ -310,7 +310,6 @@ async fn test_access_types() { target_concurrent_connections: Some(100), interval_period_ms: Some(1000), access_type: Some(AccessType::Private), - enable_node_info_signatures: Some(true), ..Default::default() }; diff --git a/crates/sui-node/src/lib.rs b/crates/sui-node/src/lib.rs index e0fc567bffda6..9fd3db906fd60 100644 --- a/crates/sui-node/src/lib.rs +++ b/crates/sui-node/src/lib.rs @@ -13,6 +13,7 @@ use arc_swap::ArcSwap; use fastcrypto_zkp::bn254::zk_login::JwkId; use fastcrypto_zkp::bn254::zk_login::OIDCProvider; use futures::TryFutureExt; +use mysten_network::server::SUI_TLS_SERVER_NAME; use prometheus::Registry; use std::collections::{BTreeSet, HashMap, HashSet}; use std::fmt; @@ -45,10 +46,8 @@ use sui_types::messages_consensus::AuthorityCapabilitiesV2; use sui_types::sui_system_state::SuiSystemState; use tap::tap::TapFallible; use tokio::runtime::Handle; -use tokio::sync::broadcast; -use tokio::sync::mpsc; -use tokio::sync::{watch, Mutex}; -use tokio::task::JoinHandle; +use tokio::sync::{broadcast, mpsc, watch, Mutex}; +use tokio::task::{JoinHandle, JoinSet}; use tower::ServiceBuilder; use tracing::{debug, error, warn}; use tracing::{error_span, info, Instrument}; @@ -150,10 +149,8 @@ pub struct ValidatorComponents { consensus_manager: ConsensusManager, consensus_store_pruner: ConsensusStorePruner, consensus_adapter: Arc, - // dropping this will eventually stop checkpoint tasks. The receiver side of this channel - // is copied into each checkpoint service task, and they are listening to any change to this - // channel. When the sender is dropped, a change is triggered and those tasks will exit. - checkpoint_service_exit: watch::Sender<()>, + // Keeping the handle to the checkpoint service tasks to shut them down during reconfiguration. + checkpoint_service_tasks: JoinSet<()>, checkpoint_metrics: Arc, sui_tx_validator_metrics: Arc, } @@ -1289,7 +1286,7 @@ impl SuiNode { sui_node_metrics: Arc, sui_tx_validator_metrics: Arc, ) -> Result { - let (checkpoint_service, checkpoint_service_exit) = Self::start_checkpoint_service( + let (checkpoint_service, checkpoint_service_tasks) = Self::start_checkpoint_service( config, consensus_adapter.clone(), checkpoint_store, @@ -1375,7 +1372,7 @@ impl SuiNode { consensus_manager, consensus_store_pruner, consensus_adapter, - checkpoint_service_exit, + checkpoint_service_tasks, checkpoint_metrics, sui_tx_validator_metrics, }) @@ -1390,7 +1387,7 @@ impl SuiNode { state_sync_handle: state_sync::Handle, accumulator: Weak, checkpoint_metrics: Arc, - ) -> (Arc, watch::Sender<()>) { + ) -> (Arc, JoinSet<()>) { let epoch_start_timestamp_ms = epoch_store.epoch_start_state().epoch_start_timestamp_ms(); let epoch_duration_ms = epoch_store.epoch_start_state().epoch_duration_ms(); @@ -1477,8 +1474,13 @@ impl SuiNode { server_builder = server_builder.add_service(ValidatorServer::new(validator_service)); + let tls_config = sui_tls::create_rustls_server_config( + config.network_key_pair().copy().private(), + SUI_TLS_SERVER_NAME.to_string(), + sui_tls::AllowAll, + ); let server = server_builder - .bind(config.network_address()) + .bind(config.network_address(), Some(tls_config)) .await .map_err(|err| anyhow!(err.to_string()))?; let local_addr = server.local_addr(); @@ -1684,16 +1686,28 @@ impl SuiNode { consensus_manager, consensus_store_pruner, consensus_adapter, - checkpoint_service_exit, + mut checkpoint_service_tasks, checkpoint_metrics, sui_tx_validator_metrics, }) = self.validator_components.lock().await.take() { info!("Reconfiguring the validator."); - // Stop the old checkpoint service. - drop(checkpoint_service_exit); + // Cancel the old checkpoint service tasks. + // Waiting for checkpoint builder to finish gracefully is not possible, because it + // may wait on transactions while consensus on peers have already shut down. + checkpoint_service_tasks.abort_all(); + while let Some(result) = checkpoint_service_tasks.join_next().await { + if let Err(err) = result { + if err.is_panic() { + std::panic::resume_unwind(err.into_panic()); + } + warn!("Error in checkpoint service task: {:?}", err); + } + } + info!("Checkpoint service has shut down."); consensus_manager.shutdown().await; + info!("Consensus has shut down."); let new_epoch_store = self .reconfigure_state( @@ -1704,6 +1718,7 @@ impl SuiNode { accumulator.clone(), ) .await; + info!("Epoch store finished reconfiguration."); // No other components should be holding a strong reference to state accumulator // at this point. Confirm here before we swap in the new accumulator. diff --git a/crates/sui-node/src/main.rs b/crates/sui-node/src/main.rs index 167d08ddbf3b7..26db36f28d810 100644 --- a/crates/sui-node/src/main.rs +++ b/crates/sui-node/src/main.rs @@ -47,8 +47,8 @@ fn main() { // TODO: re-enable after we figure out how to eliminate crashes in prod because of this. // ProtocolConfig::poison_get_for_min_version(); - move_vm_profiler::gas_profiler_feature_enabled! { - panic!("Cannot run the sui-node binary with gas-profiler feature enabled"); + move_vm_profiler::tracing_feature_enabled! { + panic!("Cannot run the sui-node binary with tracing feature enabled"); } let args = Args::parse(); diff --git a/crates/sui-open-rpc/Cargo.toml b/crates/sui-open-rpc/Cargo.toml index e6000fe18d0e2..0326794bec69e 100644 --- a/crates/sui-open-rpc/Cargo.toml +++ b/crates/sui-open-rpc/Cargo.toml @@ -21,7 +21,7 @@ anyhow.workspace = true clap.workspace = true pretty_assertions.workspace = true tokio = { workspace = true, features = ["full"] } -fastcrypto = { workspace = true } +fastcrypto.workspace = true sui-json-rpc.workspace = true sui-json-rpc-api.workspace = true sui-json-rpc-types.workspace = true diff --git a/crates/sui-open-rpc/spec/openrpc.json b/crates/sui-open-rpc/spec/openrpc.json index 251b8df3073d9..add1b298f6e2d 100644 --- a/crates/sui-open-rpc/spec/openrpc.json +++ b/crates/sui-open-rpc/spec/openrpc.json @@ -12,7 +12,7 @@ "name": "Apache-2.0", "url": "https://raw.githubusercontent.com/MystenLabs/sui/main/LICENSE" }, - "version": "1.36.1" + "version": "1.37.1" }, "methods": [ { @@ -1293,7 +1293,7 @@ "name": "Result", "value": { "minSupportedProtocolVersion": "1", - "maxSupportedProtocolVersion": "65", + "maxSupportedProtocolVersion": "68", "protocolVersion": "6", "featureFlags": { "accept_zklogin_in_multisig": false, @@ -1310,6 +1310,7 @@ "disable_invariant_violation_check_in_swap_loc": false, "disallow_adding_abilities_on_upgrade": false, "disallow_change_struct_type_params_on_upgrade": false, + "disallow_new_modules_in_deps_only_packages": false, "enable_coin_deny_list": false, "enable_coin_deny_list_v2": false, "enable_effects_v2": false, @@ -1353,6 +1354,7 @@ "soft_bundle": false, "throughput_aware_consensus_submission": false, "txn_base_cost_as_multiplier": false, + "uncompressed_g1_group_elements": false, "upgraded_multisig_supported": false, "validate_identifier_inputs": false, "verify_legacy_zklogin_address": false, @@ -1592,6 +1594,7 @@ "u64": "2" }, "execution_version": null, + "gas_budget_based_txn_cost_absolute_cap_commit_count": null, "gas_budget_based_txn_cost_cap_factor": null, "gas_model_version": { "u64": "5" @@ -1630,6 +1633,7 @@ "group_ops_bls12381_g1_msm_base_cost_per_input": null, "group_ops_bls12381_g1_mul_cost": null, "group_ops_bls12381_g1_sub_cost": null, + "group_ops_bls12381_g1_to_uncompressed_g1_cost": null, "group_ops_bls12381_g2_add_cost": null, "group_ops_bls12381_g2_div_cost": null, "group_ops_bls12381_g2_hash_to_base_cost": null, @@ -1648,6 +1652,10 @@ "group_ops_bls12381_scalar_div_cost": null, "group_ops_bls12381_scalar_mul_cost": null, "group_ops_bls12381_scalar_sub_cost": null, + "group_ops_bls12381_uncompressed_g1_sum_base_cost": null, + "group_ops_bls12381_uncompressed_g1_sum_cost_per_term": null, + "group_ops_bls12381_uncompressed_g1_sum_max_terms": null, + "group_ops_bls12381_uncompressed_g1_to_g1_cost": null, "hash_blake2b256_cost_base": { "u64": "52" }, @@ -1681,6 +1689,7 @@ "hmac_hmac_sha3_256_input_cost_per_byte": { "u64": "2" }, + "max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit": null, "max_accumulated_txn_cost_per_object_in_mysticeti_commit": null, "max_accumulated_txn_cost_per_object_in_narwhal_commit": null, "max_age_of_jwk_in_epochs": null, @@ -1811,6 +1820,7 @@ "max_tx_size_bytes": { "u64": "131072" }, + "max_txn_cost_overage_per_object_in_commit": null, "max_type_argument_depth": { "u32": "16" }, @@ -10714,7 +10724,7 @@ "TransactionFilter": { "oneOf": [ { - "description": "Query by checkpoint.", + "description": "CURRENTLY NOT SUPPORTED. Query by checkpoint.", "type": "object", "required": [ "Checkpoint" @@ -10850,7 +10860,7 @@ "additionalProperties": false }, { - "description": "Query txs that have a given address as sender or recipient.", + "description": "CURRENTLY NOT SUPPORTED. Query txs that have a given address as sender or recipient.", "type": "object", "required": [ "FromOrToAddress" diff --git a/crates/sui-protocol-config/src/lib.rs b/crates/sui-protocol-config/src/lib.rs index 835463599f7b8..caccb8d70d662 100644 --- a/crates/sui-protocol-config/src/lib.rs +++ b/crates/sui-protocol-config/src/lib.rs @@ -18,7 +18,7 @@ use tracing::{info, warn}; /// The minimum and maximum protocol versions supported by this build. const MIN_PROTOCOL_VERSION: u64 = 1; -const MAX_PROTOCOL_VERSION: u64 = 65; +const MAX_PROTOCOL_VERSION: u64 = 68; // Record history of protocol version allocations here: // @@ -185,7 +185,16 @@ const MAX_PROTOCOL_VERSION: u64 = 65; // Add feature flag for Mysticeti fastpath. // Version 62: Makes the event's sending module package upgrade-aware. // Version 63: Enable gas based congestion control in consensus commit. -// Version 64: Switch to distributed vote scoring in consensus in mainnet +// Version 64: Revert congestion control change. +// Version 65: Enable distributed vote scoring in mainnet. +// Version 66: Revert distributed vote scoring in mainnet. +// Framework fix for fungible staking book-keeping. +// Version 67: Re-enable distributed vote scoring in mainnet. +// Version 68: Add G1Uncompressed group to group ops. +// Update to Move stdlib. +// Enable gas based congestion control with overage. +// Further reduce minimum number of random beacon shares. +// Disallow adding new modules in `deps-only` packages. #[derive(Copy, Clone, Debug, Hash, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] pub struct ProtocolVersion(u64); @@ -548,6 +557,13 @@ struct FeatureFlags { // Makes the event's sending module version-aware. #[serde(skip_serializing_if = "is_false")] relocate_event_module: bool, + + // Enable uncompressed group elements in BLS123-81 G1 + #[serde(skip_serializing_if = "is_false")] + uncompressed_g1_group_elements: bool, + + #[serde(skip_serializing_if = "is_false")] + disallow_new_modules_in_deps_only_packages: bool, } fn is_false(b: &bool) -> bool { @@ -1137,6 +1153,11 @@ pub struct ProtocolConfig { group_ops_bls12381_g2_msm_base_cost_per_input: Option, group_ops_bls12381_msm_max_len: Option, group_ops_bls12381_pairing_cost: Option, + group_ops_bls12381_g1_to_uncompressed_g1_cost: Option, + group_ops_bls12381_uncompressed_g1_to_g1_cost: Option, + group_ops_bls12381_uncompressed_g1_sum_base_cost: Option, + group_ops_bls12381_uncompressed_g1_sum_cost_per_term: Option, + group_ops_bls12381_uncompressed_g1_sum_max_terms: Option, // hmac::hmac_sha3_256 hmac_hmac_sha3_256_cost_base: Option, @@ -1235,16 +1256,17 @@ pub struct ProtocolConfig { /// The maximum number of transactions included in a consensus block. consensus_max_num_transactions_in_block: Option, - /// The max accumulated txn execution cost per object in a Narwhal commit. Transactions - /// in a checkpoint will be deferred once their touch shared objects hit this limit. - /// This config is meant to be used when consensus protocol is Narwhal, where each - /// consensus commit corresponding to 1 checkpoint (or 2 if randomness is enabled) + /// DEPRECATED. Do not use. max_accumulated_txn_cost_per_object_in_narwhal_commit: Option, /// The max number of consensus rounds a transaction can be deferred due to shared object congestion. /// Transactions will be cancelled after this many rounds. max_deferral_rounds_for_congestion_control: Option, + /// If >0, congestion control will allow up to one transaction per object to exceed + /// the configured maximum accumulated cost by the given amount. + max_txn_cost_overage_per_object_in_commit: Option, + /// Minimum interval of commit timestamps between consecutive checkpoints. min_checkpoint_interval_ms: Option, @@ -1260,11 +1282,16 @@ pub struct ProtocolConfig { bridge_should_try_to_finalize_committee: Option, /// The max accumulated txn execution cost per object in a mysticeti. Transactions - /// in a commit will be deferred once their touch shared objects hit this limit. + /// in a commit will be deferred once their touch shared objects hit this limit, + /// unless the selected congestion control mode allows overage. /// This config plays the same role as `max_accumulated_txn_cost_per_object_in_narwhal_commit` /// but for mysticeti commits due to that mysticeti has higher commit rate. max_accumulated_txn_cost_per_object_in_mysticeti_commit: Option, + /// As above, but separate per-commit budget for transactions that use randomness. + /// If not configured, uses the setting for `max_accumulated_txn_cost_per_object_in_mysticeti_commit`. + max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit: Option, + /// Configures the garbage collection depth for consensus. When is unset or `0` then the garbage collection /// is disabled. consensus_gc_depth: Option, @@ -1273,6 +1300,10 @@ pub struct ProtocolConfig { /// object congestion control strategy. Basically the max transaction cost is calculated as /// (num of input object + num of commands) * this factor. gas_budget_based_txn_cost_cap_factor: Option, + + /// Adds an absolute cap on the maximum transaction cost when using TotalGasBudgetWithCap at + /// the given multiple of the per-commit budget. + gas_budget_based_txn_cost_absolute_cap_commit_count: Option, } // feature flags @@ -1628,6 +1659,15 @@ impl ProtocolConfig { pub fn relocate_event_module(&self) -> bool { self.feature_flags.relocate_event_module } + + pub fn uncompressed_g1_group_elements(&self) -> bool { + self.feature_flags.uncompressed_g1_group_elements + } + + pub fn disallow_new_modules_in_deps_only_packages(&self) -> bool { + self.feature_flags + .disallow_new_modules_in_deps_only_packages + } } #[cfg(not(msim))] @@ -2048,6 +2088,11 @@ impl ProtocolConfig { group_ops_bls12381_g2_msm_base_cost_per_input: None, group_ops_bls12381_msm_max_len: None, group_ops_bls12381_pairing_cost: None, + group_ops_bls12381_g1_to_uncompressed_g1_cost: None, + group_ops_bls12381_uncompressed_g1_to_g1_cost: None, + group_ops_bls12381_uncompressed_g1_sum_base_cost: None, + group_ops_bls12381_uncompressed_g1_sum_cost_per_term: None, + group_ops_bls12381_uncompressed_g1_sum_max_terms: None, // zklogin::check_zklogin_id check_zklogin_id_cost_base: None, @@ -2132,6 +2177,8 @@ impl ProtocolConfig { max_deferral_rounds_for_congestion_control: None, + max_txn_cost_overage_per_object_in_commit: None, + min_checkpoint_interval_ms: None, checkpoint_summary_version_specific_data: None, @@ -2142,9 +2189,13 @@ impl ProtocolConfig { max_accumulated_txn_cost_per_object_in_mysticeti_commit: None, + max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit: None, + consensus_gc_depth: None, gas_budget_based_txn_cost_cap_factor: None, + + gas_budget_based_txn_cost_absolute_cap_commit_count: None, // When adding a new constant, set it to None in the earliest version, like this: // new_constant: None, }; @@ -2844,6 +2895,43 @@ impl ProtocolConfig { cfg.feature_flags .consensus_distributed_vote_scoring_strategy = true; } + 66 => { + if chain == Chain::Mainnet { + // Revert the distributed vote scoring for mainnet (for one protocol upgrade) + cfg.feature_flags + .consensus_distributed_vote_scoring_strategy = false; + } + } + 67 => { + // Enable it once again. + cfg.feature_flags + .consensus_distributed_vote_scoring_strategy = true; + } + 68 => { + cfg.group_ops_bls12381_g1_to_uncompressed_g1_cost = Some(26); + cfg.group_ops_bls12381_uncompressed_g1_to_g1_cost = Some(52); + cfg.group_ops_bls12381_uncompressed_g1_sum_base_cost = Some(26); + cfg.group_ops_bls12381_uncompressed_g1_sum_cost_per_term = Some(13); + cfg.group_ops_bls12381_uncompressed_g1_sum_max_terms = Some(2000); + + if chain != Chain::Mainnet && chain != Chain::Testnet { + cfg.feature_flags.uncompressed_g1_group_elements = true; + } + + cfg.feature_flags.per_object_congestion_control_mode = + PerObjectCongestionControlMode::TotalGasBudgetWithCap; + cfg.gas_budget_based_txn_cost_cap_factor = Some(400_000); + cfg.max_accumulated_txn_cost_per_object_in_mysticeti_commit = Some(18_500_000); + cfg.max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit = + Some(3_700_000); // 20% of above + cfg.max_txn_cost_overage_per_object_in_commit = Some(u64::MAX); + cfg.gas_budget_based_txn_cost_absolute_cap_commit_count = Some(50); + + // Further reduce minimum number of random beacon shares. + cfg.random_beacon_reduction_lower_bound = Some(500); + + cfg.feature_flags.disallow_new_modules_in_deps_only_packages = true; + } // Use this template when making changes: // // // modify an existing constant. @@ -3009,6 +3097,11 @@ impl ProtocolConfig { pub fn set_gc_depth_for_testing(&mut self, val: u32) { self.consensus_gc_depth = Some(val); } + + pub fn set_disallow_new_modules_in_deps_only_packages_for_testing(&mut self, val: bool) { + self.feature_flags + .disallow_new_modules_in_deps_only_packages = val; + } } type OverrideFn = dyn Fn(ProtocolVersion, ProtocolConfig) -> ProtocolConfig + Send; diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_66.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_66.snap new file mode 100644 index 0000000000000..07a447b25d844 --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_66.snap @@ -0,0 +1,329 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 66 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalTxCount + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + rethrow_serialization_type_layout_errors: true + consensus_round_prober: true + validate_identifier_inputs: true + relocate_event_module: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 700 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 3 +gas_budget_based_txn_cost_cap_factor: 400000 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_67.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_67.snap new file mode 100644 index 0000000000000..f050b4e16302d --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_67.snap @@ -0,0 +1,330 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 67 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalTxCount + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + rethrow_serialization_type_layout_errors: true + consensus_distributed_vote_scoring_strategy: true + consensus_round_prober: true + validate_identifier_inputs: true + relocate_event_module: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 700 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 3 +gas_budget_based_txn_cost_cap_factor: 400000 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_68.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_68.snap new file mode 100644 index 0000000000000..492aebef1a352 --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_68.snap @@ -0,0 +1,339 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 68 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalGasBudgetWithCap + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + rethrow_serialization_type_layout_errors: true + consensus_distributed_vote_scoring_strategy: true + consensus_round_prober: true + validate_identifier_inputs: true + relocate_event_module: true + disallow_new_modules_in_deps_only_packages: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +group_ops_bls12381_g1_to_uncompressed_g1_cost: 26 +group_ops_bls12381_uncompressed_g1_to_g1_cost: 52 +group_ops_bls12381_uncompressed_g1_sum_base_cost: 26 +group_ops_bls12381_uncompressed_g1_sum_cost_per_term: 13 +group_ops_bls12381_uncompressed_g1_sum_max_terms: 2000 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 500 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +max_txn_cost_overage_per_object_in_commit: 18446744073709551615 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 18500000 +max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit: 3700000 +gas_budget_based_txn_cost_cap_factor: 400000 +gas_budget_based_txn_cost_absolute_cap_commit_count: 50 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_66.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_66.snap new file mode 100644 index 0000000000000..4d141e8b8de23 --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_66.snap @@ -0,0 +1,330 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 66 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalTxCount + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + rethrow_serialization_type_layout_errors: true + consensus_distributed_vote_scoring_strategy: true + consensus_round_prober: true + validate_identifier_inputs: true + relocate_event_module: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 700 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 3 +gas_budget_based_txn_cost_cap_factor: 400000 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_67.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_67.snap new file mode 100644 index 0000000000000..f050b4e16302d --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_67.snap @@ -0,0 +1,330 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 67 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalTxCount + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + rethrow_serialization_type_layout_errors: true + consensus_distributed_vote_scoring_strategy: true + consensus_round_prober: true + validate_identifier_inputs: true + relocate_event_module: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 700 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 3 +gas_budget_based_txn_cost_cap_factor: 400000 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_68.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_68.snap new file mode 100644 index 0000000000000..492aebef1a352 --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_68.snap @@ -0,0 +1,339 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 68 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalGasBudgetWithCap + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + rethrow_serialization_type_layout_errors: true + consensus_distributed_vote_scoring_strategy: true + consensus_round_prober: true + validate_identifier_inputs: true + relocate_event_module: true + disallow_new_modules_in_deps_only_packages: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +group_ops_bls12381_g1_to_uncompressed_g1_cost: 26 +group_ops_bls12381_uncompressed_g1_to_g1_cost: 52 +group_ops_bls12381_uncompressed_g1_sum_base_cost: 26 +group_ops_bls12381_uncompressed_g1_sum_cost_per_term: 13 +group_ops_bls12381_uncompressed_g1_sum_max_terms: 2000 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 500 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +max_txn_cost_overage_per_object_in_commit: 18446744073709551615 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 18500000 +max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit: 3700000 +gas_budget_based_txn_cost_cap_factor: 400000 +gas_budget_based_txn_cost_absolute_cap_commit_count: 50 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_66.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_66.snap new file mode 100644 index 0000000000000..747c437d6bbee --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_66.snap @@ -0,0 +1,340 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 66 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_poseidon: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + enable_group_ops_native_function_msm: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalTxCount + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + enable_vdf: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + passkey_auth: true + authority_capabilities_v2: true + rethrow_serialization_type_layout_errors: true + consensus_distributed_vote_scoring_strategy: true + consensus_round_prober: true + validate_identifier_inputs: true + mysticeti_fastpath: true + relocate_event_module: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +poseidon_bn254_cost_base: 260 +poseidon_bn254_cost_per_block: 10 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +vdf_verify_vdf_cost: 1500 +vdf_hash_to_input_cost: 100 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 700 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 3 +gas_budget_based_txn_cost_cap_factor: 400000 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_67.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_67.snap new file mode 100644 index 0000000000000..f7710f5f46b34 --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_67.snap @@ -0,0 +1,340 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 67 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_poseidon: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + enable_group_ops_native_function_msm: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalTxCount + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + enable_vdf: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + passkey_auth: true + authority_capabilities_v2: true + rethrow_serialization_type_layout_errors: true + consensus_distributed_vote_scoring_strategy: true + consensus_round_prober: true + validate_identifier_inputs: true + mysticeti_fastpath: true + relocate_event_module: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +poseidon_bn254_cost_base: 260 +poseidon_bn254_cost_per_block: 10 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +vdf_verify_vdf_cost: 1500 +vdf_hash_to_input_cost: 100 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 700 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 3 +gas_budget_based_txn_cost_cap_factor: 400000 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_68.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_68.snap new file mode 100644 index 0000000000000..68e55955524b4 --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_68.snap @@ -0,0 +1,350 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 68 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_poseidon: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + enable_group_ops_native_function_msm: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalGasBudgetWithCap + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + enable_vdf: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + passkey_auth: true + authority_capabilities_v2: true + rethrow_serialization_type_layout_errors: true + consensus_distributed_vote_scoring_strategy: true + consensus_round_prober: true + validate_identifier_inputs: true + mysticeti_fastpath: true + relocate_event_module: true + uncompressed_g1_group_elements: true + disallow_new_modules_in_deps_only_packages: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +poseidon_bn254_cost_base: 260 +poseidon_bn254_cost_per_block: 10 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +group_ops_bls12381_g1_to_uncompressed_g1_cost: 26 +group_ops_bls12381_uncompressed_g1_to_g1_cost: 52 +group_ops_bls12381_uncompressed_g1_sum_base_cost: 26 +group_ops_bls12381_uncompressed_g1_sum_cost_per_term: 13 +group_ops_bls12381_uncompressed_g1_sum_max_terms: 2000 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +vdf_verify_vdf_cost: 1500 +vdf_hash_to_input_cost: 100 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 500 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +max_txn_cost_overage_per_object_in_commit: 18446744073709551615 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 18500000 +max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit: 3700000 +gas_budget_based_txn_cost_cap_factor: 400000 +gas_budget_based_txn_cost_absolute_cap_commit_count: 50 + diff --git a/crates/sui-proxy/src/peers.rs b/crates/sui-proxy/src/peers.rs index b139865f3a4fa..aec70efa490df 100644 --- a/crates/sui-proxy/src/peers.rs +++ b/crates/sui-proxy/src/peers.rs @@ -404,14 +404,22 @@ async fn extract_bridge( } }; // Parse the URL - let mut bridge_url = match Url::parse(&url_str) { + let bridge_url = match Url::parse(&url_str) { Ok(url) => url, Err(_) => { warn!(url_str, "Unable to parse http_rest_url"); return None; } }; - bridge_url.set_path("/metrics_pub_key"); + + // Append "metrics_pub_key" to the path + let bridge_url = match append_path_segment(bridge_url, "metrics_pub_key") { + Some(url) => url, + None => { + warn!(url_str, "Unable to append path segment to URL"); + return None; + } + }; // Use the host portion of the http_rest_url as the "name" let bridge_host = match bridge_url.host_str() { @@ -524,6 +532,11 @@ fn fallback_to_cached_key( } } +fn append_path_segment(mut url: Url, segment: &str) -> Option { + url.path_segments_mut().ok()?.pop_if_empty().push(segment); + Some(url) +} + #[cfg(test)] mod tests { use super::*; @@ -647,4 +660,62 @@ mod tests { "Cache should still contain the original key" ); } + + #[test] + fn test_append_path_segment() { + let test_cases = vec![ + ( + "https://example.com", + "metrics_pub_key", + "https://example.com/metrics_pub_key", + ), + ( + "https://example.com/api", + "metrics_pub_key", + "https://example.com/api/metrics_pub_key", + ), + ( + "https://example.com/", + "metrics_pub_key", + "https://example.com/metrics_pub_key", + ), + ( + "https://example.com/api/", + "metrics_pub_key", + "https://example.com/api/metrics_pub_key", + ), + ( + "https://example.com:8080", + "metrics_pub_key", + "https://example.com:8080/metrics_pub_key", + ), + ( + "https://example.com?param=value", + "metrics_pub_key", + "https://example.com/metrics_pub_key?param=value", + ), + ( + "https://example.com:8080/api/v1?param=value", + "metrics_pub_key", + "https://example.com:8080/api/v1/metrics_pub_key?param=value", + ), + ]; + + for (input_url, segment, expected_output) in test_cases { + let url = Url::parse(input_url).unwrap(); + let result = append_path_segment(url, segment); + assert!( + result.is_some(), + "Failed to append segment for URL: {}", + input_url + ); + let result_url = result.unwrap(); + assert_eq!( + result_url.as_str(), + expected_output, + "Unexpected result for input URL: {}", + input_url + ); + } + } } diff --git a/crates/sui-replay/src/data_fetcher.rs b/crates/sui-replay/src/data_fetcher.rs index 897e9cb511a6f..f7d5fcfcb0cc5 100644 --- a/crates/sui-replay/src/data_fetcher.rs +++ b/crates/sui-replay/src/data_fetcher.rs @@ -6,7 +6,7 @@ use crate::types::EPOCH_CHANGE_STRUCT_TAG; use async_trait::async_trait; use futures::future::join_all; use lru::LruCache; -use move_core_types::parser::parse_struct_tag; +use move_core_types::language_storage::StructTag; use parking_lot::RwLock; use rand::Rng; use std::collections::BTreeMap; @@ -568,7 +568,7 @@ impl DataFetcher for RemoteFetcher { reverse: bool, ) -> Result, ReplayEngineError> { let struct_tag_str = EPOCH_CHANGE_STRUCT_TAG.to_string(); - let struct_tag = parse_struct_tag(&struct_tag_str)?; + let struct_tag = StructTag::from_str(&struct_tag_str)?; let mut epoch_change_events: Vec = vec![]; let mut has_next_page = true; diff --git a/crates/sui-replay/src/replay.rs b/crates/sui-replay/src/replay.rs index b41e0eddf3b0e..6f140a122c57d 100644 --- a/crates/sui-replay/src/replay.rs +++ b/crates/sui-replay/src/replay.rs @@ -473,8 +473,13 @@ impl LocalExec { objs: Vec, protocol_version: u64, ) -> Result, ReplayEngineError> { - let syst_packages = self.system_package_versions_for_protocol_version(protocol_version)?; - let syst_packages_objs = self.multi_download(&syst_packages).await?; + let syst_packages_objs = if self.protocol_version.is_some_and(|i| i < 0) { + BuiltInFramework::genesis_objects().collect() + } else { + let syst_packages = + self.system_package_versions_for_protocol_version(protocol_version)?; + self.multi_download(&syst_packages).await? + }; // Download latest version of all packages that are not system packages // This is okay since the versions can never change @@ -707,17 +712,6 @@ impl LocalExec { expensive_safety_check_config: ExpensiveSafetyCheckConfig, ) -> Result { let tx_digest = &tx_info.tx_digest; - // TODO: Support system transactions. - if tx_info.sender_signed_data.transaction_data().is_system_tx() { - warn!( - "System TX replay not supported: {}, skipping transaction", - tx_digest - ); - return Err(ReplayEngineError::TransactionNotSupported { - digest: *tx_digest, - reason: "System transaction".to_string(), - }); - } // Before protocol version 16, the generation of effects depends on the wrapped tombstones. // It is not possible to retrieve such data for replay. if tx_info.protocol_version.as_u64() < 16 { @@ -759,30 +753,32 @@ impl LocalExec { let expensive_checks = true; let transaction_kind = override_transaction_kind.unwrap_or(tx_info.kind.clone()); let certificate_deny_set = HashSet::new(); - let (inner_store, gas_status, effects, result) = if let Ok(gas_status) = SuiGasStatus::new( - tx_info.gas_budget, - tx_info.gas_price, - tx_info.reference_gas_price, - protocol_config, - ) { - executor.execute_transaction_to_effects( - &self, + let gas_status = if tx_info.kind.is_system_tx() { + SuiGasStatus::new_unmetered() + } else { + SuiGasStatus::new( + tx_info.gas_budget, + tx_info.gas_price, + tx_info.reference_gas_price, protocol_config, - metrics.clone(), - expensive_checks, - &certificate_deny_set, - &tx_info.executed_epoch, - tx_info.epoch_start_timestamp, - CheckedInputObjects::new_for_replay(input_objects.clone()), - tx_info.gas.clone(), - gas_status, - transaction_kind.clone(), - tx_info.sender, - *tx_digest, ) - } else { - unreachable!("Transaction was valid so gas status must be valid"); + .expect("Failed to create gas status") }; + let (inner_store, gas_status, effects, result) = executor.execute_transaction_to_effects( + &self, + protocol_config, + metrics.clone(), + expensive_checks, + &certificate_deny_set, + &tx_info.executed_epoch, + tx_info.epoch_start_timestamp, + CheckedInputObjects::new_for_replay(input_objects.clone()), + tx_info.gas.clone(), + gas_status, + transaction_kind.clone(), + tx_info.sender, + *tx_digest, + ); if let Err(err) = self.pretty_print_for_tracing( &gas_status, @@ -1798,7 +1794,11 @@ impl LocalExec { self.multi_download_and_store(&shared_refs).await?; // Download gas (although this should already be in cache from modified at versions?) - let gas_refs: Vec<_> = tx_info.gas.iter().map(|w| (w.0, w.1)).collect(); + let gas_refs: Vec<_> = tx_info + .gas + .iter() + .filter_map(|w| (w.0 != ObjectID::ZERO).then_some((w.0, w.1))) + .collect(); self.multi_download_and_store(&gas_refs).await?; // Fetch the input objects we know from the raw transaction diff --git a/crates/sui-rest-api/Cargo.toml b/crates/sui-rest-api/Cargo.toml index 3f6412fa11af6..249c5569b7e6b 100644 --- a/crates/sui-rest-api/Cargo.toml +++ b/crates/sui-rest-api/Cargo.toml @@ -27,6 +27,7 @@ prometheus.workspace = true openapiv3 = { git = "https://github.com/bmwill/openapiv3.git", rev = "ca4b4845b7c159a39f5c68ad8f7f76cb6f4d6963" } schemars.workspace = true documented = "0.6.0" +prost.workspace = true fastcrypto.workspace = true sui-types.workspace = true @@ -37,3 +38,4 @@ move-core-types.workspace = true [dev-dependencies] diffy = "0.3" +prost-build = "0.13.3" diff --git a/crates/sui-rest-api/openapi/openapi.json b/crates/sui-rest-api/openapi/openapi.json index 494555500fef9..3e264a26e6c3a 100644 --- a/crates/sui-rest-api/openapi/openapi.json +++ b/crates/sui-rest-api/openapi/openapi.json @@ -77,6 +77,150 @@ } } }, + "/checkpoints": { + "get": { + "tags": [ + "Checkpoint" + ], + "description": "[![stable](https://img.shields.io/badge/api-stable-53b576?style=for-the-badge)](#)\n\nList Checkpoints\n\nRequest a page of checkpoints, and optionally their contents, ordered by\n`CheckpointSequenceNumber`.\n\nIf the requested page is below the Node's `lowest_available_checkpoint`, a 410 will be\nreturned.", + "operationId": "List Checkpoints", + "parameters": [ + { + "in": "query", + "name": "contents", + "description": "Request `CheckpointContents` be included in the response", + "schema": { + "description": "Request `CheckpointContents` be included in the response", + "default": false, + "type": "boolean" + }, + "style": "form" + }, + { + "in": "query", + "name": "direction", + "description": "The direction to paginate in.\n\nDefaults to `descending` if not provided.", + "schema": { + "description": "The direction to paginate in.\n\nDefaults to `descending` if not provided.", + "allOf": [ + { + "$ref": "#/components/schemas/Direction" + } + ] + }, + "style": "form" + }, + { + "in": "query", + "name": "limit", + "description": "Page size limit for the response.\n\nDefaults to `50` if not provided with a maximum page size of `100`.", + "schema": { + "description": "Page size limit for the response.\n\nDefaults to `50` if not provided with a maximum page size of `100`.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "style": "form" + }, + { + "in": "query", + "name": "start", + "description": "The checkpoint to start listing from.\n\nDefaults to the latest checkpoint if not provided.", + "schema": { + "description": "The checkpoint to start listing from.\n\nDefaults to the latest checkpoint if not provided.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "style": "form" + } + ], + "responses": { + "200": { + "description": "", + "headers": { + "x-sui-cursor": { + "style": "simple", + "schema": { + "type": "string" + } + } + }, + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CheckpointResponse" + } + } + }, + "application/bcs": {}, + "application/x-protobuf": {} + } + }, + "410": { + "description": "" + }, + "500": { + "description": "" + } + } + } + }, + "/checkpoints/{checkpoint}": { + "get": { + "tags": [ + "Checkpoint" + ], + "description": "[![stable](https://img.shields.io/badge/api-stable-53b576?style=for-the-badge)](#)\n\nFetch a Checkpoint\n\nFetch a checkpoint either by `CheckpointSequenceNumber` (checkpoint height) or by\n`CheckpointDigest` and optionally request its contents.\n\nIf the checkpoint has been pruned and is not available, a 410 will be returned.", + "operationId": "Get Checkpoint", + "parameters": [ + { + "in": "path", + "name": "checkpoint", + "required": true, + "schema": { + "$ref": "#/components/schemas/CheckpointId" + }, + "style": "simple" + }, + { + "in": "query", + "name": "contents", + "description": "Request `CheckpointContents` be included in the response", + "schema": { + "description": "Request `CheckpointContents` be included in the response", + "default": false, + "type": "boolean" + }, + "style": "form" + } + ], + "responses": { + "200": { + "description": "", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CheckpointResponse" + } + }, + "application/bcs": {} + } + }, + "404": { + "description": "" + }, + "410": { + "description": "" + }, + "500": { + "description": "" + } + } + } + }, "/accounts/{account}/objects": { "get": { "tags": [ @@ -162,9 +306,10 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Object" + "$ref": "#/components/schemas/ObjectResponse" } }, + "application/x-protobuf": {}, "application/bcs": {} } }, @@ -209,9 +354,10 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Object" + "$ref": "#/components/schemas/ObjectResponse" } }, + "application/x-protobuf": {}, "application/bcs": {} } }, @@ -282,90 +428,20 @@ } } }, - "/checkpoints": { - "get": { - "tags": [ - "Checkpoint" - ], - "description": "[![unstable](https://img.shields.io/badge/api-unstable-red?style=for-the-badge)](#) _Api subject to change; use at your own risk_\n\n", - "operationId": "ListCheckpoints", - "parameters": [ - { - "in": "query", - "name": "direction", - "schema": { - "$ref": "#/components/schemas/Direction" - }, - "style": "form" - }, - { - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "style": "form" - }, - { - "in": "query", - "name": "start", - "description": "The checkpoint to start listing from.\n\nDefaults to the latest checkpoint if not provided.", - "schema": { - "description": "The checkpoint to start listing from.\n\nDefaults to the latest checkpoint if not provided.", - "type": "integer", - "format": "uint64", - "minimum": 0.0 - }, - "style": "form" - } - ], - "responses": { - "200": { - "description": "", - "headers": { - "x-sui-cursor": { - "style": "simple", - "schema": { - "type": "string" - } - } - }, - "content": { - "application/json": { - "schema": { - "type": "array", - "items": { - "$ref": "#/components/schemas/SignedCheckpointSummary" - } - } - }, - "application/bcs": {} - } - }, - "410": { - "description": "" - } - } - } - }, - "/checkpoints/{checkpoint}": { + "/checkpoints/{checkpoint}/full": { "get": { "tags": [ "Checkpoint" ], - "description": "[![unstable](https://img.shields.io/badge/api-unstable-red?style=for-the-badge)](#) _Api subject to change; use at your own risk_\n\n", - "operationId": "GetCheckpoint", + "description": "[![unstable](https://img.shields.io/badge/api-unstable-red?style=for-the-badge)](#) _Api subject to change; use at your own risk_\n\nFetch a Full Checkpoint\n\nRequest a checkpoint and all data associated with it including:\n- CheckpointSummary\n- Validator Signature\n- CheckpointContents\n- Transactions, Effects, Events, as well as all input and output objects\n\nIf the requested checkpoint is below the Node's `lowest_available_checkpoint_objects`, a 410\nwill be returned.", + "operationId": "Get Full Checkpoint", "parameters": [ { "in": "path", "name": "checkpoint", "required": true, "schema": { - "type": "integer", - "format": "uint64", - "minimum": 0.0 + "$ref": "#/components/schemas/CheckpointId" }, "style": "simple" } @@ -374,11 +450,6 @@ "200": { "description": "", "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SignedCheckpointSummary" - } - }, "application/bcs": {} } }, @@ -387,6 +458,9 @@ }, "410": { "description": "" + }, + "500": { + "description": "" } } } @@ -418,6 +492,7 @@ "$ref": "#/components/schemas/TransactionResponse" } }, + "application/x-protobuf": {}, "application/bcs": {} } }, @@ -482,6 +557,7 @@ } } }, + "application/x-protobuf": {}, "application/bcs": {} } }, @@ -556,6 +632,7 @@ "$ref": "#/components/schemas/TransactionExecutionResponse" } }, + "application/x-protobuf": {}, "application/bcs": {} } } @@ -591,6 +668,7 @@ "$ref": "#/components/schemas/ValidatorCommittee" } }, + "application/x-protobuf": {}, "application/bcs": {} } }, @@ -616,6 +694,7 @@ "$ref": "#/components/schemas/ValidatorCommittee" } }, + "application/x-protobuf": {}, "application/bcs": {} } } @@ -806,6 +885,7 @@ "$ref": "#/components/schemas/TransactionSimulationResponse" } }, + "application/x-protobuf": {}, "application/bcs": {} } } @@ -883,6 +963,7 @@ "$ref": "#/components/schemas/ResolveTransactionResponse" } }, + "application/x-protobuf": {}, "application/bcs": {} } } @@ -1224,12 +1305,65 @@ } ] }, + "CheckpointContents": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CheckpointTransactionInfo" + } + }, "CheckpointContentsDigest": { "$ref": "#/components/schemas/Digest" }, "CheckpointDigest": { "$ref": "#/components/schemas/Digest" }, + "CheckpointId": { + "anyOf": [ + { + "title": "SequenceNumber", + "description": "Sequence number or height of a Checkpoint", + "examples": [ + 0 + ], + "type": "string", + "format": "u64" + }, + { + "title": "Digest", + "description": "Base58 encoded 32-byte digest of a Checkpoint", + "examples": [ + "4btiuiMPvEENsttpZC7CZ53DruC3MAgfznDbASZ7DR6S" + ], + "allOf": [ + { + "$ref": "#/components/schemas/CheckpointDigest" + } + ] + } + ] + }, + "CheckpointResponse": { + "type": "object", + "required": [ + "digest", + "signature", + "summary" + ], + "properties": { + "contents": { + "$ref": "#/components/schemas/CheckpointContents" + }, + "digest": { + "$ref": "#/components/schemas/CheckpointDigest" + }, + "signature": { + "$ref": "#/components/schemas/ValidatorAggregatedSignature" + }, + "summary": { + "$ref": "#/components/schemas/CheckpointSummary" + } + } + }, "CheckpointSummary": { "type": "object", "required": [ @@ -1297,6 +1431,28 @@ } } }, + "CheckpointTransactionInfo": { + "type": "object", + "required": [ + "effects", + "signatures", + "transaction" + ], + "properties": { + "effects": { + "$ref": "#/components/schemas/TransactionEffectsDigest" + }, + "signatures": { + "type": "array", + "items": { + "$ref": "#/components/schemas/UserSignature" + } + }, + "transaction": { + "$ref": "#/components/schemas/TransactionDigest" + } + } + }, "CircomG1": { "description": "A G1 point in BN254 serialized as a vector of three strings which is the canonical decimal representation of the projective coordinates in Fq.", "type": "array", @@ -3728,6 +3884,21 @@ } } }, + "ObjectResponse": { + "type": "object", + "required": [ + "digest", + "object" + ], + "properties": { + "digest": { + "$ref": "#/components/schemas/ObjectDigest" + }, + "object": { + "$ref": "#/components/schemas/Object" + } + } + }, "Owner": { "oneOf": [ { @@ -3996,21 +4167,6 @@ "type": "string", "format": "base64" }, - "SignedCheckpointSummary": { - "type": "object", - "required": [ - "checkpoint", - "signature" - ], - "properties": { - "checkpoint": { - "$ref": "#/components/schemas/CheckpointSummary" - }, - "signature": { - "$ref": "#/components/schemas/ValidatorAggregatedSignature" - } - } - }, "SimpleSignature": { "oneOf": [ { @@ -4659,6 +4815,9 @@ } ] }, + "TransactionEffectsDigest": { + "$ref": "#/components/schemas/Digest" + }, "TransactionEvents": { "type": "array", "items": { diff --git a/crates/sui-rest-api/proto/rest.proto b/crates/sui-rest-api/proto/rest.proto new file mode 100644 index 0000000000000..099efa57e81bc --- /dev/null +++ b/crates/sui-rest-api/proto/rest.proto @@ -0,0 +1,156 @@ +syntax = "proto3"; + +package sui.rest; + +// Sui `TransactionData` type serialized as Bcs +message Transaction { + bytes transaction = 1; +} + +// Sui `TransactionEffects` type serialized as Bcs +message TransactionEffects { + bytes effects = 1; +} + +// Sui `TransactionEvents` type serialized as Bcs +message TransactionEvents { + bytes events = 1; +} + +// Sui `Object` type serialized as Bcs +message Object { + bytes object = 1; +} + +// Sui `CheckpointSummary` type serialized as Bcs +message CheckpointSummary { + bytes summary = 1; +} + +// Sui `CheckpointContents` type serialized as Bcs +message CheckpointContents { + bytes contents = 1; +} + +// Sui `UserSignature` type serialized as bytes +message UserSignature { + bytes signature = 1; +} + +// Sui `ValidatorAggregatedSignature` type serialized as Bcs +message ValidatorAggregatedSignature { + bytes signature = 1; +} + +message GetTransactionResponse { + // The digest of this transaction + bytes digest = 1; + optional Transaction transaction = 2; + repeated UserSignature signatures = 3; + optional TransactionEffects effects = 4; + optional TransactionEvents events = 5; + optional uint64 checkpoint = 6; + optional uint64 timestamp_ms = 7; +} + +message GetObjectResponse { + // The digest of this object + bytes digest = 1; + optional Object object = 2; +} + +message GetCheckpointResponse { + // The digest of this CheckpointSummary + bytes digest = 1; + optional CheckpointSummary summary = 2; + optional ValidatorAggregatedSignature signature = 3; + optional CheckpointContents contents = 4; +} + +message FullCheckpoint { + optional CheckpointSummary summary = 1; + optional ValidatorAggregatedSignature signature = 2; + optional CheckpointContents contents = 3; + repeated CheckpointTransaction transactions = 4; +} + +message CheckpointTransaction { + optional Transaction transaction = 1; + repeated UserSignature signatures = 2; + optional TransactionEffects effects = 3; + optional TransactionEvents events = 4; + repeated Object input_objects = 5; + repeated Object output_objects = 6; +} + +message ListCheckpointResponse { + repeated GetCheckpointResponse checkpoints = 1; +} + +message ListTransactionsResponse { + repeated GetTransactionResponse transactions = 1; +} + +message Address { + bytes address = 1; +} + +message TypeTag { + string type_tag = 1; +} + +message I128 { + bytes little_endian_bytes = 1; +} + +message BalanceChange { + Address address = 1; + TypeTag coin_type = 2; + I128 amount = 3; +} + +message EffectsFinality { + optional ValidatorAggregatedSignature signature = 1; + optional uint64 checkpoint = 2; +} + +message TransactionExecutionResponse { + optional TransactionEffects effects = 1; + optional EffectsFinality finality = 2; + optional TransactionEvents events = 3; + repeated BalanceChange balance_changes = 4; + repeated Object input_objects = 5; + repeated Object output_objects = 6; +} + +message TransactionSimulationResponse { + optional TransactionEffects effects = 1; + optional TransactionEvents events = 2; + repeated BalanceChange balance_changes = 3; + repeated Object input_objects = 4; + repeated Object output_objects = 5; +} + +message ResolveTransactionResponse { + optional Transaction transaction = 1; + optional TransactionSimulationResponse simulation = 2; +} + +message ExecuteTransactionRequest { + optional Transaction transaction = 1; + repeated UserSignature signatures = 2; +} + +message SimulateTransactionRequest { + optional Transaction transaction = 1; +} + +message ValidatorCommittee { + uint64 epoch = 1; + repeated ValidatorCommitteeMember members = 2; +} + +message ValidatorCommitteeMember { + bytes public_key = 1; + uint64 stake = 2; +} diff --git a/crates/sui-rest-api/src/accept.rs b/crates/sui-rest-api/src/accept.rs index 193158f1520c0..fbb93606c95c5 100644 --- a/crates/sui-rest-api/src/accept.rs +++ b/crates/sui-rest-api/src/accept.rs @@ -8,6 +8,7 @@ use mime::Mime; // include type information // "application/x.sui.+bcs" pub const APPLICATION_BCS: &str = "application/bcs"; +pub const APPLICATION_PROTOBUF: &str = "application/x-protobuf"; /// `Accept` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.3.2) #[derive(Debug, Clone)] @@ -51,6 +52,7 @@ where pub enum AcceptFormat { Json, Bcs, + // Protobuf, } #[axum::async_trait] @@ -80,6 +82,42 @@ where } } +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum AcceptJsonProtobufBcs { + Json, + Protobuf, + Bcs, +} + +#[axum::async_trait] +impl axum::extract::FromRequestParts for AcceptJsonProtobufBcs +where + S: Send + Sync, +{ + type Rejection = std::convert::Infallible; + + async fn from_request_parts( + parts: &mut http::request::Parts, + s: &S, + ) -> Result { + let accept = Accept::from_request_parts(parts, s).await?; + + for mime in accept.0 { + let essence = mime.essence_str(); + + if essence == mime::APPLICATION_JSON.essence_str() { + return Ok(Self::Json); + } else if essence == APPLICATION_PROTOBUF { + return Ok(Self::Protobuf); + } else if essence == APPLICATION_BCS { + return Ok(Self::Bcs); + } + } + + Ok(Self::Json) + } +} + #[cfg(test)] mod tests { use std::str::FromStr; diff --git a/crates/sui-rest-api/src/checkpoints.rs b/crates/sui-rest-api/src/checkpoints.rs index 425828d398c6a..b3f887fd817c2 100644 --- a/crates/sui-rest-api/src/checkpoints.rs +++ b/crates/sui-rest-api/src/checkpoints.rs @@ -4,96 +4,38 @@ use axum::extract::Query; use axum::extract::{Path, State}; use sui_sdk_types::types::{ - CheckpointData, CheckpointDigest, CheckpointSequenceNumber, SignedCheckpointSummary, + CheckpointContents, CheckpointDigest, CheckpointSequenceNumber, CheckpointSummary, + SignedCheckpointSummary, ValidatorAggregatedSignature, }; use sui_types::storage::ReadStore; use tap::Pipe; +use crate::accept::AcceptJsonProtobufBcs; use crate::openapi::{ApiEndpoint, OperationBuilder, ResponseBuilder, RouteHandler}; +use crate::proto; +use crate::proto::ListCheckpointResponse; use crate::reader::StateReader; -use crate::Page; -use crate::{accept::AcceptFormat, response::ResponseContent, Result}; +use crate::response::{JsonProtobufBcs, ProtobufBcs}; +use crate::PageCursor; use crate::{Direction, RestService}; - -pub struct GetCheckpointFull; - -impl ApiEndpoint for GetCheckpointFull { - fn method(&self) -> axum::http::Method { - axum::http::Method::GET - } - - fn path(&self) -> &'static str { - "/checkpoints/{checkpoint}/full" - } - - fn hidden(&self) -> bool { - true - } - - fn operation( - &self, - generator: &mut schemars::gen::SchemaGenerator, - ) -> openapiv3::v3_1::Operation { - OperationBuilder::new() - .tag("Checkpoint") - .operation_id("GetCheckpointFull") - .path_parameter::("checkpoint", generator) - .response( - 200, - ResponseBuilder::new() - .json_content::(generator) - .bcs_content() - .build(), - ) - .response(404, ResponseBuilder::new().build()) - .response(410, ResponseBuilder::new().build()) - .build() - } - - fn handler(&self) -> RouteHandler { - RouteHandler::new(self.method(), get_checkpoint_full) - } -} - -async fn get_checkpoint_full( - Path(checkpoint_id): Path, - accept: AcceptFormat, - State(state): State, -) -> Result> { - let verified_summary = match checkpoint_id { - CheckpointId::SequenceNumber(s) => { - // Since we need object contents we need to check for the lowest available checkpoint - // with objects that hasn't been pruned - let oldest_checkpoint = state.inner().get_lowest_available_checkpoint_objects()?; - if s < oldest_checkpoint { - return Err(crate::RestError::new( - axum::http::StatusCode::GONE, - "Old checkpoints have been pruned", - )); - } - - state.inner().get_checkpoint_by_sequence_number(s) - } - CheckpointId::Digest(d) => state.inner().get_checkpoint_by_digest(&d.into()), - }? - .ok_or(CheckpointNotFoundError(checkpoint_id))?; - - let checkpoint_contents = state - .inner() - .get_checkpoint_contents_by_digest(&verified_summary.content_digest)? - .ok_or(CheckpointNotFoundError(checkpoint_id))?; - - let checkpoint_data = state - .inner() - .get_checkpoint_data(verified_summary, checkpoint_contents)?; - - match accept { - AcceptFormat::Json => ResponseContent::Json(checkpoint_data), - AcceptFormat::Bcs => ResponseContent::Bcs(checkpoint_data), - } - .pipe(Ok) +use crate::{RestError, Result}; +use documented::Documented; + +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] +pub struct CheckpointResponse { + pub digest: CheckpointDigest, + pub summary: CheckpointSummary, + pub signature: ValidatorAggregatedSignature, + pub contents: Option, } +/// Fetch a Checkpoint +/// +/// Fetch a checkpoint either by `CheckpointSequenceNumber` (checkpoint height) or by +/// `CheckpointDigest` and optionally request its contents. +/// +/// If the checkpoint has been pruned and is not available, a 410 will be returned. +#[derive(Documented)] pub struct GetCheckpoint; impl ApiEndpoint for GetCheckpoint { @@ -105,23 +47,30 @@ impl ApiEndpoint for GetCheckpoint { "/checkpoints/{checkpoint}" } + fn stable(&self) -> bool { + true + } + fn operation( &self, generator: &mut schemars::gen::SchemaGenerator, ) -> openapiv3::v3_1::Operation { OperationBuilder::new() .tag("Checkpoint") - .operation_id("GetCheckpoint") - .path_parameter::("checkpoint", generator) + .operation_id("Get Checkpoint") + .description(Self::DOCS) + .path_parameter::("checkpoint", generator) + .query_parameters::(generator) .response( 200, ResponseBuilder::new() - .json_content::(generator) + .json_content::(generator) .bcs_content() .build(), ) .response(404, ResponseBuilder::new().build()) .response(410, ResponseBuilder::new().build()) + .response(500, ResponseBuilder::new().build()) .build() } @@ -132,10 +81,14 @@ impl ApiEndpoint for GetCheckpoint { async fn get_checkpoint( Path(checkpoint_id): Path, - accept: AcceptFormat, + Query(parameters): Query, + accept: AcceptJsonProtobufBcs, State(state): State, -) -> Result> { - let summary = match checkpoint_id { +) -> Result> { + let SignedCheckpointSummary { + checkpoint, + signature, + } = match checkpoint_id { CheckpointId::SequenceNumber(s) => { let oldest_checkpoint = state.inner().get_lowest_available_checkpoint()?; if s < oldest_checkpoint { @@ -153,19 +106,53 @@ async fn get_checkpoint( .into_inner() .try_into()?; + let contents = if parameters.contents { + Some( + state + .inner() + .get_checkpoint_contents_by_sequence_number(checkpoint.sequence_number)? + .ok_or(CheckpointNotFoundError(checkpoint_id))? + .try_into()?, + ) + } else { + None + }; + + let response = CheckpointResponse { + digest: checkpoint.digest(), + summary: checkpoint, + signature, + contents, + }; + match accept { - AcceptFormat::Json => ResponseContent::Json(summary), - AcceptFormat::Bcs => ResponseContent::Bcs(summary), + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(response), + AcceptJsonProtobufBcs::Protobuf => JsonProtobufBcs::Protobuf(response.try_into()?), + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(response), } .pipe(Ok) } -#[derive(Debug, Copy, Clone, Eq, PartialEq)] +#[derive(Debug, Copy, Clone, Eq, PartialEq, schemars::JsonSchema)] +#[schemars(untagged)] pub enum CheckpointId { - SequenceNumber(CheckpointSequenceNumber), + #[schemars( + title = "SequenceNumber", + example = "CheckpointSequenceNumber::default" + )] + /// Sequence number or height of a Checkpoint + SequenceNumber(#[schemars(with = "crate::_schemars::U64")] CheckpointSequenceNumber), + #[schemars(title = "Digest", example = "example_digest")] + /// Base58 encoded 32-byte digest of a Checkpoint Digest(CheckpointDigest), } +fn example_digest() -> CheckpointDigest { + "4btiuiMPvEENsttpZC7CZ53DruC3MAgfznDbASZ7DR6S" + .parse() + .unwrap() +} + impl<'de> serde::Deserialize<'de> for CheckpointId { fn deserialize(deserializer: D) -> std::result::Result where @@ -221,6 +208,22 @@ impl From for crate::RestError { } } +/// Query parameters for the GetCheckpoint endpoint +#[derive(Debug, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] +pub struct GetCheckpointQueryParameters { + /// Request `CheckpointContents` be included in the response + #[serde(default)] + pub contents: bool, +} + +/// List Checkpoints +/// +/// Request a page of checkpoints, and optionally their contents, ordered by +/// `CheckpointSequenceNumber`. +/// +/// If the requested page is below the Node's `lowest_available_checkpoint`, a 410 will be +/// returned. +#[derive(Documented)] pub struct ListCheckpoints; impl ApiEndpoint for ListCheckpoints { @@ -232,23 +235,30 @@ impl ApiEndpoint for ListCheckpoints { "/checkpoints" } + fn stable(&self) -> bool { + true + } + fn operation( &self, generator: &mut schemars::gen::SchemaGenerator, ) -> openapiv3::v3_1::Operation { OperationBuilder::new() .tag("Checkpoint") - .operation_id("ListCheckpoints") + .operation_id("List Checkpoints") + .description(Self::DOCS) .query_parameters::(generator) .response( 200, ResponseBuilder::new() - .json_content::>(generator) + .json_content::>(generator) .bcs_content() + .protobuf_content() .header::(crate::types::X_SUI_CURSOR, generator) .build(), ) .response(410, ResponseBuilder::new().build()) + .response(500, ResponseBuilder::new().build()) .build() } @@ -259,9 +269,12 @@ impl ApiEndpoint for ListCheckpoints { async fn list_checkpoints( Query(parameters): Query, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, State(state): State, -) -> Result> { +) -> Result<( + PageCursor, + JsonProtobufBcs, ListCheckpointResponse, Vec>, +)> { let latest_checkpoint = state.inner().get_latest_checkpoint()?.sequence_number; let oldest_checkpoint = state.inner().get_lowest_available_checkpoint()?; let limit = parameters.limit(); @@ -281,33 +294,76 @@ async fn list_checkpoints( .map(|result| { result .map_err(Into::into) - .and_then(|(checkpoint, _contents)| { - SignedCheckpointSummary::try_from(checkpoint).map_err(Into::into) + .and_then(|(checkpoint, contents)| { + let SignedCheckpointSummary { + checkpoint, + signature, + } = checkpoint.try_into()?; + let contents = if parameters.contents { + Some(contents.try_into()?) + } else { + None + }; + Ok(CheckpointResponse { + digest: checkpoint.digest(), + summary: checkpoint, + signature, + contents, + }) }) }) .collect::>>()?; let cursor = checkpoints.last().and_then(|checkpoint| match direction { - Direction::Ascending => checkpoint.checkpoint.sequence_number.checked_add(1), - Direction::Descending => checkpoint.checkpoint.sequence_number.checked_sub(1), + Direction::Ascending => checkpoint.summary.sequence_number.checked_add(1), + Direction::Descending => { + let cursor = checkpoint.summary.sequence_number.checked_sub(1); + // If we've exhausted our available checkpoint range then there are no more pages left + if cursor < Some(oldest_checkpoint) { + None + } else { + cursor + } + } }); match accept { - AcceptFormat::Json => ResponseContent::Json(checkpoints), - AcceptFormat::Bcs => ResponseContent::Bcs(checkpoints), + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(checkpoints), + AcceptJsonProtobufBcs::Protobuf => JsonProtobufBcs::Protobuf(checkpoints.try_into()?), + // In order to work around compatibility issues with existing clients, keep the BCS form as + // the old format without contents + AcceptJsonProtobufBcs::Bcs => { + let checkpoints = checkpoints + .into_iter() + .map(|c| SignedCheckpointSummary { + checkpoint: c.summary, + signature: c.signature, + }) + .collect(); + JsonProtobufBcs::Bcs(checkpoints) + } } - .pipe(|entries| Page { entries, cursor }) + .pipe(|entries| (PageCursor(cursor), entries)) .pipe(Ok) } -#[derive(Debug, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] +#[derive(Debug, Default, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] pub struct ListCheckpointsQueryParameters { + /// Page size limit for the response. + /// + /// Defaults to `50` if not provided with a maximum page size of `100`. pub limit: Option, /// The checkpoint to start listing from. /// /// Defaults to the latest checkpoint if not provided. pub start: Option, + /// The direction to paginate in. + /// + /// Defaults to `descending` if not provided. pub direction: Option, + /// Request `CheckpointContents` be included in the response + #[serde(default)] + pub contents: bool, } impl ListCheckpointsQueryParameters { @@ -325,3 +381,110 @@ impl ListCheckpointsQueryParameters { self.direction.unwrap_or(Direction::Descending) } } + +/// Fetch a Full Checkpoint +/// +/// Request a checkpoint and all data associated with it including: +/// - CheckpointSummary +/// - Validator Signature +/// - CheckpointContents +/// - Transactions, Effects, Events, as well as all input and output objects +/// +/// If the requested checkpoint is below the Node's `lowest_available_checkpoint_objects`, a 410 +/// will be returned. +#[derive(Documented)] +pub struct GetFullCheckpoint; + +impl ApiEndpoint for GetFullCheckpoint { + fn method(&self) -> axum::http::Method { + axum::http::Method::GET + } + + fn path(&self) -> &'static str { + "/checkpoints/{checkpoint}/full" + } + + fn stable(&self) -> bool { + // TODO transactions are serialized with an intent message, do we want to change this + // format to remove it (and remove user signature duplication) prior to stabalizing the + // format? + false + } + + fn operation( + &self, + generator: &mut schemars::gen::SchemaGenerator, + ) -> openapiv3::v3_1::Operation { + OperationBuilder::new() + .tag("Checkpoint") + .operation_id("Get Full Checkpoint") + .description(Self::DOCS) + .path_parameter::("checkpoint", generator) + .response(200, ResponseBuilder::new().bcs_content().build()) + .response(404, ResponseBuilder::new().build()) + .response(410, ResponseBuilder::new().build()) + .response(500, ResponseBuilder::new().build()) + .build() + } + + fn handler(&self) -> RouteHandler { + RouteHandler::new(self.method(), get_full_checkpoint) + } +} + +async fn get_full_checkpoint( + Path(checkpoint_id): Path, + accept: AcceptJsonProtobufBcs, + State(state): State, +) -> Result> +{ + match accept { + AcceptJsonProtobufBcs::Protobuf => {} + AcceptJsonProtobufBcs::Bcs => {} + _ => { + return Err(RestError::new( + axum::http::StatusCode::BAD_REQUEST, + "invalid accept type; only 'application/x-protobuf' is supported", + )) + } + } + + let verified_summary = match checkpoint_id { + CheckpointId::SequenceNumber(s) => { + // Since we need object contents we need to check for the lowest available checkpoint + // with objects that hasn't been pruned + let oldest_checkpoint = state.inner().get_lowest_available_checkpoint_objects()?; + if s < oldest_checkpoint { + return Err(crate::RestError::new( + axum::http::StatusCode::GONE, + "Old checkpoints have been pruned", + )); + } + + state.inner().get_checkpoint_by_sequence_number(s) + } + CheckpointId::Digest(d) => state.inner().get_checkpoint_by_digest(&d.into()), + }? + .ok_or(CheckpointNotFoundError(checkpoint_id))?; + + let checkpoint_contents = state + .inner() + .get_checkpoint_contents_by_digest(&verified_summary.content_digest)? + .ok_or(CheckpointNotFoundError(checkpoint_id))?; + + let checkpoint_data = state + .inner() + .get_checkpoint_data(verified_summary, checkpoint_contents)?; + + match accept { + AcceptJsonProtobufBcs::Protobuf => ProtobufBcs::Protobuf(checkpoint_data.try_into()?), + AcceptJsonProtobufBcs::Bcs => ProtobufBcs::Bcs(checkpoint_data), + _ => { + return Err(RestError::new( + axum::http::StatusCode::BAD_REQUEST, + "invalid accept type; only 'application/x-protobuf' is supported", + )) + } + } + .pipe(Ok) +} diff --git a/crates/sui-rest-api/src/client/mod.rs b/crates/sui-rest-api/src/client/mod.rs index a5582a0928662..d9cbe0f5ca7f0 100644 --- a/crates/sui-rest-api/src/client/mod.rs +++ b/crates/sui-rest-api/src/client/mod.rs @@ -51,15 +51,15 @@ impl Client { .url() .join(&format!("checkpoints/{checkpoint_sequence_number}/full"))?; - let response = self - .inner - .client() - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; - - self.inner.bcs(response).await.map(Response::into_inner) + let request = self.inner.client().get(url); + + self.inner.bcs(request).await.map(Response::into_inner) + // let proto = self + // .inner + // .protobuf::(request) + // .await? + // .into_inner(); + // proto.try_into().map_err(Into::into) } pub async fn get_checkpoint_summary( @@ -70,7 +70,14 @@ impl Client { .get_checkpoint(checkpoint_sequence_number) .await .map(Response::into_inner) - .and_then(|checkpoint| checkpoint.try_into().map_err(Into::into)) + .and_then(|checkpoint| { + sui_sdk_types::types::SignedCheckpointSummary { + checkpoint: checkpoint.summary, + signature: checkpoint.signature, + } + .try_into() + .map_err(Into::into) + }) } pub async fn get_object(&self, object_id: ObjectID) -> Result { @@ -110,18 +117,15 @@ impl Client { signatures: &transaction.inner().tx_signatures, })?; - let response = self + let request = self .inner .client() .post(url) .query(parameters) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) .header(reqwest::header::CONTENT_TYPE, crate::APPLICATION_BCS) - .body(body) - .send() - .await?; + .body(body); - self.inner.bcs(response).await.map(Response::into_inner) + self.inner.bcs(request).await.map(Response::into_inner) } } diff --git a/crates/sui-rest-api/src/client/sdk.rs b/crates/sui-rest-api/src/client/sdk.rs index 36702a316cc17..76d6227f0215f 100644 --- a/crates/sui-rest-api/src/client/sdk.rs +++ b/crates/sui-rest-api/src/client/sdk.rs @@ -23,6 +23,7 @@ use tap::Pipe; use crate::accounts::AccountOwnedObjectInfo; use crate::accounts::ListAccountOwnedObjectsQueryParameters; +use crate::checkpoints::CheckpointResponse; use crate::checkpoints::ListCheckpointsQueryParameters; use crate::coins::CoinInfo; use crate::health::Threshold; @@ -93,14 +94,9 @@ impl Client { pub async fn node_info(&self) -> Result> { let url = self.url().join("")?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) - .send() - .await?; + let request = self.inner.get(url); - self.json(response).await + self.json(request).await } pub async fn health_check(&self, threshold_seconds: Option) -> Result> { @@ -115,14 +111,9 @@ impl Client { pub async fn get_coin_info(&self, coin_type: &StructTag) -> Result> { let url = self.url().join(&format!("coins/{coin_type}"))?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) - .send() - .await?; + let request = self.inner.get(url); - self.json(response).await + self.json(request).await } pub async fn list_account_objects( @@ -132,28 +123,17 @@ impl Client { ) -> Result>> { let url = self.url().join(&format!("account/{account}/objects"))?; - let response = self - .inner - .get(url) - .query(parameters) - .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) - .send() - .await?; + let request = self.inner.get(url).query(parameters); - self.json(response).await + self.json(request).await } pub async fn get_object(&self, object_id: ObjectId) -> Result> { let url = self.url().join(&format!("objects/{object_id}"))?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url); - self.bcs(response).await + self.bcs(request).await } pub async fn get_object_with_version( @@ -165,14 +145,9 @@ impl Client { .url() .join(&format!("objects/{object_id}/version/{version}"))?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url); - self.bcs(response).await + self.bcs(request).await } pub async fn list_dynamic_fields( @@ -182,28 +157,17 @@ impl Client { ) -> Result>> { let url = self.url().join(&format!("objects/{object_id}"))?; - let response = self - .inner - .get(url) - .query(parameters) - .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) - .send() - .await?; + let request = self.inner.get(url).query(parameters); - self.json(response).await + self.json(request).await } pub async fn get_gas_info(&self) -> Result> { let url = self.url().join("system/gas")?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) - .send() - .await?; + let request = self.inner.get(url); - self.json(response).await + self.json(request).await } pub async fn get_reference_gas_price(&self) -> Result { @@ -216,14 +180,9 @@ impl Client { pub async fn get_current_protocol_config(&self) -> Result> { let url = self.url().join("system/protocol")?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) - .send() - .await?; + let request = self.inner.get(url); - self.json(response).await + self.json(request).await } pub async fn get_protocol_config( @@ -232,71 +191,46 @@ impl Client { ) -> Result> { let url = self.url().join(&format!("system/protocol/{version}"))?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) - .send() - .await?; + let request = self.inner.get(url); - self.json(response).await + self.json(request).await } pub async fn get_system_state_summary(&self) -> Result> { let url = self.url().join("system")?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) - .send() - .await?; + let request = self.inner.get(url); - self.json(response).await + self.json(request).await } pub async fn get_current_committee(&self) -> Result> { let url = self.url().join("system/committee")?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url); - self.bcs(response).await + self.bcs(request).await } pub async fn get_committee(&self, epoch: EpochId) -> Result> { let url = self.url().join(&format!("system/committee/{epoch}"))?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url); - self.bcs(response).await + self.bcs(request).await } pub async fn get_checkpoint( &self, checkpoint_sequence_number: CheckpointSequenceNumber, - ) -> Result> { + ) -> Result> { let url = self .url() .join(&format!("checkpoints/{checkpoint_sequence_number}"))?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url); - self.bcs(response).await + self.bcs(request).await } pub async fn get_latest_checkpoint(&self) -> Result> { @@ -304,6 +238,7 @@ impl Client { limit: Some(1), start: None, direction: None, + contents: false, }; let (mut page, parts) = self.list_checkpoints(¶meters).await?.into_parts(); @@ -321,15 +256,17 @@ impl Client { ) -> Result>> { let url = self.url().join("checkpoints")?; - let response = self - .inner - .get(url) - .query(parameters) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url).query(parameters); - self.bcs(response).await + self.bcs(request).await + // self.protobuf::(request) + // .await? + // .try_map(|page| { + // page.checkpoints + // .into_iter() + // .map(TryInto::try_into) + // .collect() + // }) } pub async fn get_full_checkpoint( @@ -340,14 +277,16 @@ impl Client { .url() .join(&format!("checkpoints/{checkpoint_sequence_number}/full"))?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url); - self.bcs(response).await + self.bcs(request).await + // self.protobuf::(request) + // .await? + // // TODO make this more efficient and convert directly into the sui-sdk-types version + // .try_map(|proto| { + // sui_types::full_checkpoint_content::CheckpointData::try_from(proto) + // .and_then(TryInto::try_into) + // }) } pub async fn get_transaction( @@ -356,14 +295,9 @@ impl Client { ) -> Result> { let url = self.url().join(&format!("transactions/{transaction}"))?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url); - self.bcs(response).await + self.bcs(request).await } pub async fn list_transactions( @@ -372,15 +306,9 @@ impl Client { ) -> Result>> { let url = self.url().join("transactions")?; - let response = self - .inner - .get(url) - .query(parameters) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url).query(parameters); - self.bcs(response).await + self.bcs(request).await } pub async fn execute_transaction( @@ -392,17 +320,14 @@ impl Client { let body = bcs::to_bytes(transaction)?; - let response = self + let request = self .inner .post(url) .query(parameters) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) .header(reqwest::header::CONTENT_TYPE, crate::APPLICATION_BCS) - .body(body) - .send() - .await?; + .body(body); - self.bcs(response).await + self.bcs(request).await } pub async fn simulate_transaction( @@ -413,16 +338,13 @@ impl Client { let body = bcs::to_bytes(transaction)?; - let response = self + let request = self .inner .post(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) .header(reqwest::header::CONTENT_TYPE, crate::APPLICATION_BCS) - .body(body) - .send() - .await?; + .body(body); - self.bcs(response).await + self.bcs(request).await } pub async fn resolve_transaction( @@ -431,15 +353,9 @@ impl Client { ) -> Result> { let url = self.url.join("transactions/resolve")?; - let response = self - .inner - .post(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .json(unresolved_transaction) - .send() - .await?; + let request = self.inner.post(url).json(unresolved_transaction); - self.bcs(response).await + self.bcs(request).await } pub async fn resolve_transaction_with_parameters( @@ -449,16 +365,13 @@ impl Client { ) -> Result> { let url = self.url.join("transactions/resolve")?; - let response = self + let request = self .inner .post(url) .query(¶meters) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .json(unresolved_transaction) - .send() - .await?; + .json(unresolved_transaction); - self.bcs(response).await + self.bcs(request).await } async fn check_response( @@ -487,8 +400,13 @@ impl Client { async fn json( &self, - response: reqwest::Response, + request: reqwest::RequestBuilder, ) -> Result> { + let response = request + .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) + .send() + .await?; + let (response, parts) = self.check_response(response).await?; let json = response.json().await?; @@ -497,8 +415,13 @@ impl Client { pub(super) async fn bcs( &self, - response: reqwest::Response, + request: reqwest::RequestBuilder, ) -> Result> { + let response = request + .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) + .send() + .await?; + let (response, parts) = self.check_response(response).await?; let bytes = response.bytes().await?; @@ -507,6 +430,25 @@ impl Client { Err(e) => Err(Error::from_error(e).with_parts(parts)), } } + + #[allow(unused)] + pub(super) async fn protobuf( + &self, + request: reqwest::RequestBuilder, + ) -> Result> { + let response = request + .header(reqwest::header::ACCEPT, crate::APPLICATION_PROTOBUF) + .send() + .await?; + + let (response, parts) = self.check_response(response).await?; + + let bytes = response.bytes().await?; + match T::decode(bytes) { + Ok(v) => Ok(Response::new(v, parts)), + Err(e) => Err(Error::from_error(e).with_parts(parts)), + } + } } #[derive(Debug)] @@ -617,8 +559,20 @@ impl Response { where F: FnOnce(T) -> U, { - let (inner, state) = self.into_parts(); - Response::new(f(inner), state) + let (inner, parts) = self.into_parts(); + Response::new(f(inner), parts) + } + + pub fn try_map(self, f: F) -> Result> + where + F: FnOnce(T) -> Result, + E: Into, + { + let (inner, parts) = self.into_parts(); + match f(inner) { + Ok(out) => Ok(Response::new(out, parts)), + Err(e) => Err(Error::from_error(e).with_parts(parts)), + } } } diff --git a/crates/sui-rest-api/src/committee.rs b/crates/sui-rest-api/src/committee.rs index 8ec03dbb81688..25e653c9c1f7b 100644 --- a/crates/sui-rest-api/src/committee.rs +++ b/crates/sui-rest-api/src/committee.rs @@ -2,10 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - accept::AcceptFormat, + accept::AcceptJsonProtobufBcs, openapi::{ApiEndpoint, OperationBuilder, ResponseBuilder, RouteHandler}, + proto, reader::StateReader, - response::ResponseContent, + response::JsonProtobufBcs, RestService, Result, }; use axum::extract::{Path, State}; @@ -35,6 +36,7 @@ impl ApiEndpoint for GetLatestCommittee { 200, ResponseBuilder::new() .json_content::(generator) + .protobuf_content() .bcs_content() .build(), ) @@ -47,17 +49,18 @@ impl ApiEndpoint for GetLatestCommittee { } async fn get_latest_committee( - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, State(state): State, -) -> Result> { +) -> Result> { let current_epoch = state.inner().get_latest_checkpoint()?.epoch(); let committee = state .get_committee(current_epoch)? .ok_or_else(|| CommitteeNotFoundError::new(current_epoch))?; match accept { - AcceptFormat::Json => ResponseContent::Json(committee), - AcceptFormat::Bcs => ResponseContent::Bcs(committee), + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(committee), + AcceptJsonProtobufBcs::Protobuf => JsonProtobufBcs::Protobuf(committee.into()), + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(committee), } .pipe(Ok) } @@ -85,6 +88,7 @@ impl ApiEndpoint for GetCommittee { 200, ResponseBuilder::new() .json_content::(generator) + .protobuf_content() .bcs_content() .build(), ) @@ -99,16 +103,17 @@ impl ApiEndpoint for GetCommittee { async fn get_committee( Path(epoch): Path, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, State(state): State, -) -> Result> { +) -> Result> { let committee = state .get_committee(epoch)? .ok_or_else(|| CommitteeNotFoundError::new(epoch))?; match accept { - AcceptFormat::Json => ResponseContent::Json(committee), - AcceptFormat::Bcs => ResponseContent::Bcs(committee), + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(committee), + AcceptJsonProtobufBcs::Protobuf => JsonProtobufBcs::Protobuf(committee.into()), + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(committee), } .pipe(Ok) } diff --git a/crates/sui-rest-api/src/lib.rs b/crates/sui-rest-api/src/lib.rs index b0bd1696a4f3b..c2151b5f554a2 100644 --- a/crates/sui-rest-api/src/lib.rs +++ b/crates/sui-rest-api/src/lib.rs @@ -1,7 +1,11 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use axum::{response::Redirect, routing::get, Router}; +use axum::{ + response::{Redirect, ResponseParts}, + routing::get, + Router, +}; use mysten_network::callback::CallbackLayer; use openapi::ApiEndpoint; use reader::StateReader; @@ -23,21 +27,26 @@ mod info; mod metrics; mod objects; pub mod openapi; +pub mod proto; mod reader; mod response; mod system; pub mod transactions; pub mod types; +pub use checkpoints::CheckpointResponse; +pub use checkpoints::ListCheckpointsQueryParameters; pub use client::Client; pub use error::{RestError, Result}; pub use metrics::RestMetrics; +pub use objects::ObjectResponse; pub use sui_types::full_checkpoint_content::{CheckpointData, CheckpointTransaction}; pub use transactions::ExecuteTransactionQueryParameters; pub const TEXT_PLAIN_UTF_8: &str = "text/plain; charset=utf-8"; pub const APPLICATION_BCS: &str = "application/bcs"; pub const APPLICATION_JSON: &str = "application/json"; +pub const APPLICATION_PROTOBUF: &str = "application/x-protobuf"; #[derive(Debug, Copy, Clone, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] #[serde(rename_all = "lowercase")] @@ -46,12 +55,40 @@ pub enum Direction { Descending, } +impl Direction { + pub fn is_descending(self) -> bool { + matches!(self, Self::Descending) + } +} + #[derive(Debug)] pub struct Page { pub entries: response::ResponseContent>, pub cursor: Option, } +pub struct PageCursor(pub Option); + +impl axum::response::IntoResponseParts for PageCursor { + type Error = (axum::http::StatusCode, String); + + fn into_response_parts( + self, + res: ResponseParts, + ) -> std::result::Result { + self.0 + .map(|cursor| [(crate::types::X_SUI_CURSOR, cursor.to_string())]) + .into_response_parts(res) + .map_err(|e| (axum::http::StatusCode::INTERNAL_SERVER_ERROR, e.to_string())) + } +} + +impl axum::response::IntoResponse for PageCursor { + fn into_response(self) -> axum::response::Response { + (self, ()).into_response() + } +} + pub const DEFAULT_PAGE_SIZE: usize = 50; pub const MAX_PAGE_SIZE: usize = 100; @@ -69,14 +106,14 @@ const ENDPOINTS: &[&dyn ApiEndpoint] = &[ // stable APIs &info::GetNodeInfo, &health::HealthCheck, + &checkpoints::ListCheckpoints, + &checkpoints::GetCheckpoint, // unstable APIs &accounts::ListAccountObjects, &objects::GetObject, &objects::GetObjectWithVersion, &objects::ListDynamicFields, - &checkpoints::ListCheckpoints, - &checkpoints::GetCheckpoint, - &checkpoints::GetCheckpointFull, + &checkpoints::GetFullCheckpoint, &transactions::GetTransaction, &transactions::ListTransactions, &committee::GetCommittee, diff --git a/crates/sui-rest-api/src/objects.rs b/crates/sui-rest-api/src/objects.rs index 714f652f14a78..5bd04e0f8ad1a 100644 --- a/crates/sui-rest-api/src/objects.rs +++ b/crates/sui-rest-api/src/objects.rs @@ -2,16 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - accept::AcceptFormat, + accept::{AcceptFormat, AcceptJsonProtobufBcs}, openapi::{ApiEndpoint, OperationBuilder, ResponseBuilder, RouteHandler}, + proto::GetObjectResponse, reader::StateReader, - response::ResponseContent, + response::{JsonProtobufBcs, ResponseContent}, Page, RestError, RestService, Result, }; use axum::extract::Query; use axum::extract::{Path, State}; use serde::{Deserialize, Serialize}; -use sui_sdk_types::types::{Object, ObjectId, TypeTag, Version}; +use sui_sdk_types::types::{Object, ObjectDigest, ObjectId, TypeTag, Version}; use sui_types::sui_sdk_types_conversions::type_tag_core_to_sdk; use sui_types::{ storage::{DynamicFieldIndexInfo, DynamicFieldKey}, @@ -19,6 +20,12 @@ use sui_types::{ }; use tap::Pipe; +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] +pub struct ObjectResponse { + pub digest: ObjectDigest, + pub object: Object, +} + pub struct GetObject; impl ApiEndpoint for GetObject { @@ -41,7 +48,8 @@ impl ApiEndpoint for GetObject { .response( 200, ResponseBuilder::new() - .json_content::(generator) + .json_content::(generator) + .protobuf_content() .bcs_content() .build(), ) @@ -56,16 +64,24 @@ impl ApiEndpoint for GetObject { pub async fn get_object( Path(object_id): Path, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, State(state): State, -) -> Result> { +) -> Result> { let object = state .get_object(object_id)? .ok_or_else(|| ObjectNotFoundError::new(object_id))?; + let object = ObjectResponse { + digest: object.digest(), + object, + }; + match accept { - AcceptFormat::Json => ResponseContent::Json(object), - AcceptFormat::Bcs => ResponseContent::Bcs(object), + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(object), + AcceptJsonProtobufBcs::Protobuf => { + JsonProtobufBcs::Protobuf(GetObjectResponse::try_from(object)?) + } + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(object.object), } .pipe(Ok) } @@ -93,7 +109,8 @@ impl ApiEndpoint for GetObjectWithVersion { .response( 200, ResponseBuilder::new() - .json_content::(generator) + .json_content::(generator) + .protobuf_content() .bcs_content() .build(), ) @@ -108,16 +125,24 @@ impl ApiEndpoint for GetObjectWithVersion { pub async fn get_object_with_version( Path((object_id, version)): Path<(ObjectId, Version)>, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, State(state): State, -) -> Result> { +) -> Result> { let object = state .get_object_with_version(object_id, version)? .ok_or_else(|| ObjectNotFoundError::new_with_version(object_id, version))?; + let object = ObjectResponse { + digest: object.digest(), + object, + }; + match accept { - AcceptFormat::Json => ResponseContent::Json(object), - AcceptFormat::Bcs => ResponseContent::Bcs(object), + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(object), + AcceptJsonProtobufBcs::Protobuf => { + JsonProtobufBcs::Protobuf(GetObjectResponse::try_from(object)?) + } + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(object.object), } .pipe(Ok) } diff --git a/crates/sui-rest-api/src/openapi.rs b/crates/sui-rest-api/src/openapi.rs index 424be1d2f8048..cebcaf69a9e06 100644 --- a/crates/sui-rest-api/src/openapi.rs +++ b/crates/sui-rest-api/src/openapi.rs @@ -652,6 +652,10 @@ impl ResponseBuilder { self.content(crate::APPLICATION_BCS, MediaType::default()) } + pub fn protobuf_content(&mut self) -> &mut Self { + self.content(crate::APPLICATION_PROTOBUF, MediaType::default()) + } + pub fn text_content(&mut self) -> &mut Self { self.content(mime::TEXT_PLAIN_UTF_8.as_ref(), MediaType::default()) } diff --git a/crates/sui-rest-api/src/proto/generated/sui.rest.rs b/crates/sui-rest-api/src/proto/generated/sui.rest.rs new file mode 100644 index 0000000000000..6a0f29adb7181 --- /dev/null +++ b/crates/sui-rest-api/src/proto/generated/sui.rest.rs @@ -0,0 +1,215 @@ +// This file is @generated by prost-build. +/// Sui `TransactionData` type serialized as Bcs +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Transaction { + #[prost(bytes = "bytes", tag = "1")] + pub transaction: ::prost::bytes::Bytes, +} +/// Sui `TransactionEffects` type serialized as Bcs +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionEffects { + #[prost(bytes = "bytes", tag = "1")] + pub effects: ::prost::bytes::Bytes, +} +/// Sui `TransactionEvents` type serialized as Bcs +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionEvents { + #[prost(bytes = "bytes", tag = "1")] + pub events: ::prost::bytes::Bytes, +} +/// Sui `Object` type serialized as Bcs +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Object { + #[prost(bytes = "bytes", tag = "1")] + pub object: ::prost::bytes::Bytes, +} +/// Sui `CheckpointSummary` type serialized as Bcs +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CheckpointSummary { + #[prost(bytes = "bytes", tag = "1")] + pub summary: ::prost::bytes::Bytes, +} +/// Sui `CheckpointContents` type serialized as Bcs +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CheckpointContents { + #[prost(bytes = "bytes", tag = "1")] + pub contents: ::prost::bytes::Bytes, +} +/// Sui `UserSignature` type serialized as bytes +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UserSignature { + #[prost(bytes = "bytes", tag = "1")] + pub signature: ::prost::bytes::Bytes, +} +/// Sui `ValidatorAggregatedSignature` type serialized as Bcs +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ValidatorAggregatedSignature { + #[prost(bytes = "bytes", tag = "1")] + pub signature: ::prost::bytes::Bytes, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTransactionResponse { + /// The digest of this transaction + #[prost(bytes = "bytes", tag = "1")] + pub digest: ::prost::bytes::Bytes, + #[prost(message, optional, tag = "2")] + pub transaction: ::core::option::Option, + #[prost(message, repeated, tag = "3")] + pub signatures: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "4")] + pub effects: ::core::option::Option, + #[prost(message, optional, tag = "5")] + pub events: ::core::option::Option, + #[prost(uint64, optional, tag = "6")] + pub checkpoint: ::core::option::Option, + #[prost(uint64, optional, tag = "7")] + pub timestamp_ms: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetObjectResponse { + /// The digest of this object + #[prost(bytes = "bytes", tag = "1")] + pub digest: ::prost::bytes::Bytes, + #[prost(message, optional, tag = "2")] + pub object: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetCheckpointResponse { + /// The digest of this CheckpointSummary + #[prost(bytes = "bytes", tag = "1")] + pub digest: ::prost::bytes::Bytes, + #[prost(message, optional, tag = "2")] + pub summary: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub signature: ::core::option::Option, + #[prost(message, optional, tag = "4")] + pub contents: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FullCheckpoint { + #[prost(message, optional, tag = "1")] + pub summary: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub signature: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub contents: ::core::option::Option, + #[prost(message, repeated, tag = "4")] + pub transactions: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CheckpointTransaction { + #[prost(message, optional, tag = "1")] + pub transaction: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub signatures: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub effects: ::core::option::Option, + #[prost(message, optional, tag = "4")] + pub events: ::core::option::Option, + #[prost(message, repeated, tag = "5")] + pub input_objects: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "6")] + pub output_objects: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListCheckpointResponse { + #[prost(message, repeated, tag = "1")] + pub checkpoints: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListTransactionsResponse { + #[prost(message, repeated, tag = "1")] + pub transactions: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Address { + #[prost(bytes = "bytes", tag = "1")] + pub address: ::prost::bytes::Bytes, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TypeTag { + #[prost(string, tag = "1")] + pub type_tag: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct I128 { + #[prost(bytes = "bytes", tag = "1")] + pub little_endian_bytes: ::prost::bytes::Bytes, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BalanceChange { + #[prost(message, optional, tag = "1")] + pub address: ::core::option::Option
, + #[prost(message, optional, tag = "2")] + pub coin_type: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub amount: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EffectsFinality { + #[prost(message, optional, tag = "1")] + pub signature: ::core::option::Option, + #[prost(uint64, optional, tag = "2")] + pub checkpoint: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionExecutionResponse { + #[prost(message, optional, tag = "1")] + pub effects: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub finality: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub events: ::core::option::Option, + #[prost(message, repeated, tag = "4")] + pub balance_changes: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "5")] + pub input_objects: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "6")] + pub output_objects: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionSimulationResponse { + #[prost(message, optional, tag = "1")] + pub effects: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub events: ::core::option::Option, + #[prost(message, repeated, tag = "3")] + pub balance_changes: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "4")] + pub input_objects: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "5")] + pub output_objects: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResolveTransactionResponse { + #[prost(message, optional, tag = "1")] + pub transaction: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub simulation: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecuteTransactionRequest { + #[prost(message, optional, tag = "1")] + pub transaction: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub signatures: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SimulateTransactionRequest { + #[prost(message, optional, tag = "1")] + pub transaction: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ValidatorCommittee { + #[prost(uint64, tag = "1")] + pub epoch: u64, + #[prost(message, repeated, tag = "2")] + pub members: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ValidatorCommitteeMember { + #[prost(bytes = "bytes", tag = "1")] + pub public_key: ::prost::bytes::Bytes, + #[prost(uint64, tag = "2")] + pub stake: u64, +} diff --git a/crates/sui-rest-api/src/proto/mod.rs b/crates/sui-rest-api/src/proto/mod.rs new file mode 100644 index 0000000000000..546cd7f0d01b0 --- /dev/null +++ b/crates/sui-rest-api/src/proto/mod.rs @@ -0,0 +1,1196 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[path = "generated/sui.rest.rs"] +mod generated; +pub use generated::*; +use tap::Pipe; + +// +// Transaction +// + +impl TryFrom<&sui_sdk_types::types::Transaction> for Transaction { + type Error = bcs::Error; + + fn try_from(value: &sui_sdk_types::types::Transaction) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + transaction: bytes.into(), + }) + } +} + +impl TryFrom<&Transaction> for sui_sdk_types::types::Transaction { + type Error = bcs::Error; + + fn try_from(value: &Transaction) -> Result { + bcs::from_bytes(&value.transaction) + } +} + +impl TryFrom<&sui_types::transaction::TransactionData> for Transaction { + type Error = bcs::Error; + + fn try_from(value: &sui_types::transaction::TransactionData) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + transaction: bytes.into(), + }) + } +} + +impl TryFrom<&Transaction> for sui_types::transaction::TransactionData { + type Error = bcs::Error; + + fn try_from(value: &Transaction) -> Result { + bcs::from_bytes(&value.transaction) + } +} + +// +// TransactionEffects +// + +impl TryFrom<&sui_sdk_types::types::TransactionEffects> for TransactionEffects { + type Error = bcs::Error; + + fn try_from(value: &sui_sdk_types::types::TransactionEffects) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + effects: bytes.into(), + }) + } +} + +impl TryFrom<&TransactionEffects> for sui_sdk_types::types::TransactionEffects { + type Error = bcs::Error; + + fn try_from(value: &TransactionEffects) -> Result { + bcs::from_bytes(&value.effects) + } +} + +impl TryFrom<&sui_types::effects::TransactionEffects> for TransactionEffects { + type Error = bcs::Error; + + fn try_from(value: &sui_types::effects::TransactionEffects) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + effects: bytes.into(), + }) + } +} + +impl TryFrom<&TransactionEffects> for sui_types::effects::TransactionEffects { + type Error = bcs::Error; + + fn try_from(value: &TransactionEffects) -> Result { + bcs::from_bytes(&value.effects) + } +} + +// +// TransactionEvents +// + +impl TryFrom<&sui_sdk_types::types::TransactionEvents> for TransactionEvents { + type Error = bcs::Error; + + fn try_from(value: &sui_sdk_types::types::TransactionEvents) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + events: bytes.into(), + }) + } +} + +impl TryFrom<&TransactionEvents> for sui_sdk_types::types::TransactionEvents { + type Error = bcs::Error; + + fn try_from(value: &TransactionEvents) -> Result { + bcs::from_bytes(&value.events) + } +} + +impl TryFrom<&sui_types::effects::TransactionEvents> for TransactionEvents { + type Error = bcs::Error; + + fn try_from(value: &sui_types::effects::TransactionEvents) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + events: bytes.into(), + }) + } +} + +impl TryFrom<&TransactionEvents> for sui_types::effects::TransactionEvents { + type Error = bcs::Error; + + fn try_from(value: &TransactionEvents) -> Result { + bcs::from_bytes(&value.events) + } +} + +// +// Object +// + +impl TryFrom<&sui_sdk_types::types::Object> for Object { + type Error = bcs::Error; + + fn try_from(value: &sui_sdk_types::types::Object) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + object: bytes.into(), + }) + } +} + +impl TryFrom<&Object> for sui_sdk_types::types::Object { + type Error = bcs::Error; + + fn try_from(value: &Object) -> Result { + bcs::from_bytes(&value.object) + } +} + +impl TryFrom<&sui_types::object::Object> for Object { + type Error = bcs::Error; + + fn try_from(value: &sui_types::object::Object) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + object: bytes.into(), + }) + } +} + +impl TryFrom<&Object> for sui_types::object::Object { + type Error = bcs::Error; + + fn try_from(value: &Object) -> Result { + bcs::from_bytes(&value.object) + } +} + +// +// CheckpointSummary +// + +impl TryFrom<&sui_sdk_types::types::CheckpointSummary> for CheckpointSummary { + type Error = bcs::Error; + + fn try_from(value: &sui_sdk_types::types::CheckpointSummary) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + summary: bytes.into(), + }) + } +} + +impl TryFrom<&CheckpointSummary> for sui_sdk_types::types::CheckpointSummary { + type Error = bcs::Error; + + fn try_from(value: &CheckpointSummary) -> Result { + bcs::from_bytes(&value.summary) + } +} + +impl TryFrom<&sui_types::messages_checkpoint::CheckpointSummary> for CheckpointSummary { + type Error = bcs::Error; + + fn try_from( + value: &sui_types::messages_checkpoint::CheckpointSummary, + ) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + summary: bytes.into(), + }) + } +} + +impl TryFrom<&CheckpointSummary> for sui_types::messages_checkpoint::CheckpointSummary { + type Error = bcs::Error; + + fn try_from(value: &CheckpointSummary) -> Result { + bcs::from_bytes(&value.summary) + } +} + +// +// CheckpointContents +// + +impl TryFrom<&sui_sdk_types::types::CheckpointContents> for CheckpointContents { + type Error = bcs::Error; + + fn try_from(value: &sui_sdk_types::types::CheckpointContents) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + contents: bytes.into(), + }) + } +} + +impl TryFrom<&CheckpointContents> for sui_sdk_types::types::CheckpointContents { + type Error = bcs::Error; + + fn try_from(value: &CheckpointContents) -> Result { + bcs::from_bytes(&value.contents) + } +} + +impl TryFrom<&sui_types::messages_checkpoint::CheckpointContents> for CheckpointContents { + type Error = bcs::Error; + + fn try_from( + value: &sui_types::messages_checkpoint::CheckpointContents, + ) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + contents: bytes.into(), + }) + } +} + +impl TryFrom<&CheckpointContents> for sui_types::messages_checkpoint::CheckpointContents { + type Error = bcs::Error; + + fn try_from(value: &CheckpointContents) -> Result { + bcs::from_bytes(&value.contents) + } +} + +// +// ValidatorAggregatedSignature +// + +impl TryFrom<&sui_sdk_types::types::ValidatorAggregatedSignature> for ValidatorAggregatedSignature { + type Error = bcs::Error; + + fn try_from( + value: &sui_sdk_types::types::ValidatorAggregatedSignature, + ) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + signature: bytes.into(), + }) + } +} + +impl TryFrom<&ValidatorAggregatedSignature> for sui_sdk_types::types::ValidatorAggregatedSignature { + type Error = bcs::Error; + + fn try_from(value: &ValidatorAggregatedSignature) -> Result { + bcs::from_bytes(&value.signature) + } +} + +impl TryFrom<&sui_types::crypto::AuthorityStrongQuorumSignInfo> for ValidatorAggregatedSignature { + type Error = bcs::Error; + + fn try_from( + value: &sui_types::crypto::AuthorityStrongQuorumSignInfo, + ) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + signature: bytes.into(), + }) + } +} + +impl TryFrom<&ValidatorAggregatedSignature> for sui_types::crypto::AuthorityStrongQuorumSignInfo { + type Error = bcs::Error; + + fn try_from(value: &ValidatorAggregatedSignature) -> Result { + bcs::from_bytes(&value.signature) + } +} + +// +// UserSignature +// + +impl TryFrom<&sui_sdk_types::types::UserSignature> for UserSignature { + type Error = bcs::Error; + + fn try_from(value: &sui_sdk_types::types::UserSignature) -> Result { + Ok(Self { + signature: value.to_bytes().into(), + }) + } +} + +impl TryFrom<&UserSignature> for sui_sdk_types::types::UserSignature { + type Error = bcs::Error; + + fn try_from(value: &UserSignature) -> Result { + Self::from_bytes(&value.signature).map_err(|e| bcs::Error::Custom(e.to_string())) + } +} + +impl TryFrom<&sui_types::signature::GenericSignature> for UserSignature { + type Error = bcs::Error; + + fn try_from(value: &sui_types::signature::GenericSignature) -> Result { + Ok(Self { + signature: sui_types::crypto::ToFromBytes::as_bytes(value) + .to_vec() + .into(), + }) + } +} + +impl TryFrom<&UserSignature> for sui_types::signature::GenericSignature { + type Error = bcs::Error; + + fn try_from(value: &UserSignature) -> Result { + sui_types::crypto::ToFromBytes::from_bytes(&value.signature) + .map_err(|e| bcs::Error::Custom(e.to_string())) + } +} + +// +// GetObjectResponse +// + +impl TryFrom for GetObjectResponse { + type Error = bcs::Error; + + fn try_from(value: crate::objects::ObjectResponse) -> Result { + Ok(Self { + digest: value.digest.as_bytes().to_vec().into(), + object: Some(Object::try_from(&value.object)?), + }) + } +} + +impl TryFrom for crate::objects::ObjectResponse { + type Error = bcs::Error; + + fn try_from(value: GetObjectResponse) -> Result { + Ok(Self { + digest: sui_sdk_types::types::ObjectDigest::from_bytes(&value.digest) + .map_err(|e| bcs::Error::Custom(e.to_string()))?, + object: value + .object + .ok_or_else(|| bcs::Error::Custom("missing object".into()))? + .pipe_ref(TryInto::try_into)?, + }) + } +} + +// +// GetCheckpointResponse +// + +impl TryFrom for GetCheckpointResponse { + type Error = bcs::Error; + + fn try_from(c: crate::checkpoints::CheckpointResponse) -> Result { + Ok(Self { + digest: c.digest.as_bytes().to_vec().into(), + summary: Some(CheckpointSummary::try_from(&c.summary)?), + signature: Some(ValidatorAggregatedSignature::try_from(&c.signature)?), + contents: c + .contents + .as_ref() + .map(CheckpointContents::try_from) + .transpose()?, + }) + } +} + +impl TryFrom for crate::checkpoints::CheckpointResponse { + type Error = bcs::Error; + + fn try_from(value: GetCheckpointResponse) -> Result { + let summary = value + .summary + .ok_or_else(|| bcs::Error::Custom("missing summary".into()))? + .pipe_ref(TryInto::try_into)?; + let signature = value + .signature + .ok_or_else(|| bcs::Error::Custom("missing signature".into()))? + .pipe_ref(TryInto::try_into)?; + + let contents = value.contents.as_ref().map(TryInto::try_into).transpose()?; + + Ok(Self { + digest: sui_sdk_types::types::CheckpointDigest::from_bytes(&value.digest) + .map_err(|e| bcs::Error::Custom(e.to_string()))?, + summary, + signature, + contents, + }) + } +} + +impl TryFrom> for ListCheckpointResponse { + type Error = bcs::Error; + fn try_from(value: Vec) -> Result { + let checkpoints = value + .into_iter() + .map(TryInto::try_into) + .collect::>()?; + + Ok(Self { checkpoints }) + } +} + +// +// GetTransactionResponse +// + +impl TryFrom for GetTransactionResponse { + type Error = bcs::Error; + + fn try_from(value: crate::transactions::TransactionResponse) -> Result { + Ok(Self { + digest: value.digest.as_bytes().to_vec().into(), + transaction: Some(Transaction::try_from(&value.transaction)?), + signatures: value + .signatures + .iter() + .map(UserSignature::try_from) + .collect::>()?, + effects: Some(TransactionEffects::try_from(&value.effects)?), + events: value + .events + .as_ref() + .map(TransactionEvents::try_from) + .transpose()?, + checkpoint: value.checkpoint, + timestamp_ms: value.timestamp_ms, + }) + } +} + +impl TryFrom for crate::transactions::TransactionResponse { + type Error = bcs::Error; + + fn try_from(value: GetTransactionResponse) -> Result { + Ok(Self { + digest: sui_sdk_types::types::TransactionDigest::from_bytes(&value.digest) + .map_err(|e| bcs::Error::Custom(e.to_string()))?, + transaction: value + .transaction + .ok_or_else(|| bcs::Error::Custom("missing transaction".into()))? + .pipe_ref(TryInto::try_into)?, + signatures: value + .signatures + .iter() + .map(TryInto::try_into) + .collect::>()?, + effects: value + .effects + .ok_or_else(|| bcs::Error::Custom("missing effects".into()))? + .pipe_ref(TryInto::try_into)?, + events: value.events.as_ref().map(TryInto::try_into).transpose()?, + checkpoint: value.checkpoint, + timestamp_ms: value.timestamp_ms, + }) + } +} + +// +// CheckpointTransaction +// + +impl TryFrom for CheckpointTransaction { + type Error = bcs::Error; + + fn try_from( + transaction: sui_types::full_checkpoint_content::CheckpointTransaction, + ) -> Result { + Ok(Self { + transaction: Some(Transaction::try_from( + &transaction.transaction.intent_message().value, + )?), + signatures: transaction + .transaction + .tx_signatures() + .iter() + .map(UserSignature::try_from) + .collect::>()?, + effects: Some(TransactionEffects::try_from(&transaction.effects)?), + events: transaction + .events + .as_ref() + .map(TransactionEvents::try_from) + .transpose()?, + input_objects: transaction + .input_objects + .iter() + .map(Object::try_from) + .collect::>()?, + output_objects: transaction + .output_objects + .iter() + .map(Object::try_from) + .collect::>()?, + }) + } +} + +impl TryFrom for sui_types::full_checkpoint_content::CheckpointTransaction { + type Error = bcs::Error; + + fn try_from(transaction: CheckpointTransaction) -> Result { + let transaction_data = transaction + .transaction + .ok_or_else(|| bcs::Error::Custom("missing transaction".into()))? + .pipe_ref(TryInto::try_into)?; + let user_signatures = transaction + .signatures + .iter() + .map(TryInto::try_into) + .collect::>()?; + + Ok(Self { + transaction: sui_types::transaction::Transaction::new( + sui_types::transaction::SenderSignedData::new(transaction_data, user_signatures), + ), + effects: transaction + .effects + .ok_or_else(|| bcs::Error::Custom("missing Effects".into()))? + .pipe_ref(TryInto::try_into)?, + events: transaction + .events + .as_ref() + .map(TryInto::try_into) + .transpose()?, + input_objects: transaction + .input_objects + .iter() + .map(TryInto::try_into) + .collect::>()?, + output_objects: transaction + .output_objects + .iter() + .map(TryInto::try_into) + .collect::>()?, + }) + } +} + +// +// FullCheckpoint +// + +impl TryFrom for FullCheckpoint { + type Error = bcs::Error; + + fn try_from( + c: sui_types::full_checkpoint_content::CheckpointData, + ) -> Result { + Ok(Self { + summary: Some(CheckpointSummary::try_from(c.checkpoint_summary.data())?), + signature: Some(ValidatorAggregatedSignature::try_from( + c.checkpoint_summary.auth_sig(), + )?), + contents: Some(CheckpointContents::try_from(&c.checkpoint_contents)?), + transactions: c + .transactions + .into_iter() + .map(TryInto::try_into) + .collect::>()?, + }) + } +} + +impl TryFrom for sui_types::full_checkpoint_content::CheckpointData { + type Error = bcs::Error; + + fn try_from(checkpoint: FullCheckpoint) -> Result { + let summary = checkpoint + .summary + .ok_or_else(|| bcs::Error::Custom("missing summary".into()))? + .pipe_ref(TryInto::try_into)?; + let signature = checkpoint + .signature + .ok_or_else(|| bcs::Error::Custom("missing signature".into()))? + .pipe_ref(TryInto::try_into)?; + let checkpoint_summary = + sui_types::messages_checkpoint::CertifiedCheckpointSummary::new_from_data_and_sig( + summary, signature, + ); + + let contents = checkpoint + .contents + .ok_or_else(|| bcs::Error::Custom("missing checkpoint contents".into()))? + .pipe_ref(TryInto::try_into)?; + + let transactions = checkpoint + .transactions + .into_iter() + .map(TryInto::try_into) + .collect::>()?; + + Ok(Self { + checkpoint_summary, + checkpoint_contents: contents, + transactions, + }) + } +} + +// +// Address +// + +impl From<&sui_sdk_types::types::Address> for Address { + fn from(value: &sui_sdk_types::types::Address) -> Self { + Self { + address: value.as_bytes().to_vec().into(), + } + } +} + +impl TryFrom<&Address> for sui_sdk_types::types::Address { + type Error = bcs::Error; + + fn try_from(value: &Address) -> Result { + Self::from_bytes(&value.address).map_err(|e| bcs::Error::Custom(e.to_string())) + } +} + +impl TryFrom<&Address> for sui_types::base_types::SuiAddress { + type Error = bcs::Error; + + fn try_from(value: &Address) -> Result { + Self::from_bytes(&value.address).map_err(|e| bcs::Error::Custom(e.to_string())) + } +} + +// +// TypeTag +// + +impl From<&sui_sdk_types::types::TypeTag> for TypeTag { + fn from(value: &sui_sdk_types::types::TypeTag) -> Self { + Self { + type_tag: value.to_string(), + } + } +} + +impl TryFrom<&TypeTag> for sui_sdk_types::types::TypeTag { + type Error = sui_sdk_types::types::TypeParseError; + + fn try_from(value: &TypeTag) -> Result { + value.type_tag.parse() + } +} + +impl TryFrom<&TypeTag> for sui_types::TypeTag { + type Error = bcs::Error; + + fn try_from(value: &TypeTag) -> Result { + value + .type_tag + .parse::() + .map_err(|e| bcs::Error::Custom(e.to_string())) + } +} + +// +// I128 +// + +impl From for I128 { + fn from(value: i128) -> Self { + Self { + little_endian_bytes: value.to_le_bytes().to_vec().into(), + } + } +} + +impl TryFrom<&I128> for i128 { + type Error = std::array::TryFromSliceError; + + fn try_from(value: &I128) -> Result { + Ok(i128::from_le_bytes( + value.little_endian_bytes.as_ref().try_into()?, + )) + } +} + +// +// BalanceChange +// + +impl From<&sui_sdk_types::types::BalanceChange> for BalanceChange { + fn from(value: &sui_sdk_types::types::BalanceChange) -> Self { + Self { + address: Some(Address::from(&value.address)), + coin_type: Some(TypeTag::from(&value.coin_type)), + amount: Some(I128::from(value.amount)), + } + } +} + +impl TryFrom<&BalanceChange> for sui_sdk_types::types::BalanceChange { + type Error = bcs::Error; + + fn try_from(value: &BalanceChange) -> Result { + let address = value + .address + .as_ref() + .ok_or_else(|| bcs::Error::Custom("missing address".into()))? + .try_into()?; + + let coin_type = value + .coin_type + .as_ref() + .ok_or_else(|| bcs::Error::Custom("missing coin_type".into()))? + .pipe(sui_sdk_types::types::TypeTag::try_from) + .map_err(|e| bcs::Error::Custom(e.to_string()))?; + + let amount = value + .amount + .as_ref() + .ok_or_else(|| bcs::Error::Custom("missing amount".into()))? + .pipe(i128::try_from) + .map_err(|e| bcs::Error::Custom(e.to_string()))?; + + Ok(Self { + address, + coin_type, + amount, + }) + } +} + +impl TryFrom<&BalanceChange> for crate::client::BalanceChange { + type Error = bcs::Error; + + fn try_from(value: &BalanceChange) -> Result { + let address = value + .address + .as_ref() + .ok_or_else(|| bcs::Error::Custom("missing address".into()))? + .try_into()?; + + let coin_type = value + .coin_type + .as_ref() + .ok_or_else(|| bcs::Error::Custom("missing coin_type".into()))? + .pipe(sui_types::TypeTag::try_from) + .map_err(|e| bcs::Error::Custom(e.to_string()))?; + + let amount = value + .amount + .as_ref() + .ok_or_else(|| bcs::Error::Custom("missing amount".into()))? + .pipe(i128::try_from) + .map_err(|e| bcs::Error::Custom(e.to_string()))?; + + Ok(Self { + address, + coin_type, + amount, + }) + } +} +// +// EffectsFinality +// + +impl TryFrom<&crate::transactions::EffectsFinality> for EffectsFinality { + type Error = bcs::Error; + + fn try_from(value: &crate::transactions::EffectsFinality) -> Result { + let (signature, checkpoint) = match value { + crate::transactions::EffectsFinality::Certified { signature } => { + (Some(signature.try_into()?), None) + } + crate::transactions::EffectsFinality::Checkpointed { checkpoint } => { + (None, Some(*checkpoint)) + } + }; + + Ok(Self { + signature, + checkpoint, + }) + } +} + +impl TryFrom<&EffectsFinality> for crate::transactions::EffectsFinality { + type Error = bcs::Error; + + fn try_from(value: &EffectsFinality) -> Result { + let signature = value + .signature + .as_ref() + .map(sui_sdk_types::types::ValidatorAggregatedSignature::try_from) + .transpose()?; + match (signature, value.checkpoint) { + (Some(signature), _) => crate::transactions::EffectsFinality::Certified { signature }, + (None, Some(checkpoint)) => { + crate::transactions::EffectsFinality::Checkpointed { checkpoint } + } + (None, None) => { + return Err(bcs::Error::Custom( + "missing signature or checkpoint field".into(), + )) + } + } + .pipe(Ok) + } +} + +impl TryFrom<&EffectsFinality> for crate::client::EffectsFinality { + type Error = bcs::Error; + + fn try_from(value: &EffectsFinality) -> Result { + let signature = value + .signature + .as_ref() + .map(sui_types::crypto::AuthorityStrongQuorumSignInfo::try_from) + .transpose()?; + match (signature, value.checkpoint) { + (Some(signature), _) => crate::client::EffectsFinality::Certified { signature }, + (None, Some(checkpoint)) => crate::client::EffectsFinality::Checkpointed { checkpoint }, + (None, None) => { + return Err(bcs::Error::Custom( + "missing signature or checkpoint field".into(), + )) + } + } + .pipe(Ok) + } +} + +// +// TransactionExecutionResponse +// + +impl TryFrom for TransactionExecutionResponse { + type Error = bcs::Error; + + fn try_from( + value: crate::transactions::TransactionExecutionResponse, + ) -> Result { + Ok(Self { + effects: Some(TransactionEffects::try_from(&value.effects)?), + finality: Some(EffectsFinality::try_from(&value.finality)?), + events: value + .events + .as_ref() + .map(TransactionEvents::try_from) + .transpose()?, + balance_changes: value + .balance_changes + .iter() + .flat_map(|balance_changes| balance_changes.iter()) + .map(BalanceChange::from) + .collect(), + input_objects: value + .input_objects + .iter() + .flat_map(|objects| objects.iter()) + .map(Object::try_from) + .collect::>()?, + output_objects: value + .output_objects + .iter() + .flat_map(|objects| objects.iter()) + .map(Object::try_from) + .collect::>()?, + }) + } +} + +impl TryFrom for crate::transactions::TransactionExecutionResponse { + type Error = bcs::Error; + + fn try_from(value: TransactionExecutionResponse) -> Result { + Ok(Self { + effects: value + .effects + .ok_or_else(|| bcs::Error::Custom("missing Effects".into()))? + .pipe_ref(TryInto::try_into)?, + events: value.events.as_ref().map(TryInto::try_into).transpose()?, + input_objects: Some( + value + .input_objects + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + output_objects: Some( + value + .output_objects + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + finality: value + .finality + .ok_or_else(|| bcs::Error::Custom("missing finality".into()))? + .pipe_ref(TryInto::try_into)?, + balance_changes: Some( + value + .balance_changes + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + }) + } +} + +impl TryFrom for crate::client::TransactionExecutionResponse { + type Error = bcs::Error; + + fn try_from(value: TransactionExecutionResponse) -> Result { + Ok(Self { + effects: value + .effects + .ok_or_else(|| bcs::Error::Custom("missing Effects".into()))? + .pipe_ref(TryInto::try_into)?, + events: value.events.as_ref().map(TryInto::try_into).transpose()?, + input_objects: Some( + value + .input_objects + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + output_objects: Some( + value + .output_objects + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + finality: value + .finality + .ok_or_else(|| bcs::Error::Custom("missing finality".into()))? + .pipe_ref(TryInto::try_into)?, + balance_changes: Some( + value + .balance_changes + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + }) + } +} + +// +// TransactionSimulationResponse +// + +impl TryFrom for TransactionSimulationResponse { + type Error = bcs::Error; + + fn try_from( + value: crate::transactions::TransactionSimulationResponse, + ) -> Result { + Ok(Self { + effects: Some(TransactionEffects::try_from(&value.effects)?), + events: value + .events + .as_ref() + .map(TransactionEvents::try_from) + .transpose()?, + balance_changes: value + .balance_changes + .iter() + .flat_map(|balance_changes| balance_changes.iter()) + .map(BalanceChange::from) + .collect(), + input_objects: value + .input_objects + .iter() + .flat_map(|objects| objects.iter()) + .map(Object::try_from) + .collect::>()?, + output_objects: value + .output_objects + .iter() + .flat_map(|objects| objects.iter()) + .map(Object::try_from) + .collect::>()?, + }) + } +} + +impl TryFrom for crate::transactions::TransactionSimulationResponse { + type Error = bcs::Error; + + fn try_from(value: TransactionSimulationResponse) -> Result { + Ok(Self { + effects: value + .effects + .ok_or_else(|| bcs::Error::Custom("missing Effects".into()))? + .pipe_ref(TryInto::try_into)?, + events: value.events.as_ref().map(TryInto::try_into).transpose()?, + input_objects: Some( + value + .input_objects + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + output_objects: Some( + value + .output_objects + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + balance_changes: Some( + value + .balance_changes + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + }) + } +} + +// +// ResolveTransactionResponse +// + +impl TryFrom for ResolveTransactionResponse { + type Error = bcs::Error; + + fn try_from( + value: crate::transactions::ResolveTransactionResponse, + ) -> Result { + Ok(Self { + transaction: Some(Transaction::try_from(&value.transaction)?), + simulation: value + .simulation + .map(TransactionSimulationResponse::try_from) + .transpose()?, + }) + } +} + +impl TryFrom for crate::transactions::ResolveTransactionResponse { + type Error = bcs::Error; + + fn try_from(value: ResolveTransactionResponse) -> Result { + Ok(Self { + transaction: value + .transaction + .ok_or_else(|| bcs::Error::Custom("missing transaction".into()))? + .pipe_ref(TryInto::try_into)?, + simulation: value.simulation.map(TryInto::try_into).transpose()?, + }) + } +} + +// +// ExecuteTransactionRequest +// + +impl TryFrom for ExecuteTransactionRequest { + type Error = bcs::Error; + + fn try_from(value: sui_sdk_types::types::SignedTransaction) -> Result { + Ok(Self { + transaction: Some(Transaction::try_from(&value.transaction)?), + signatures: value + .signatures + .iter() + .map(UserSignature::try_from) + .collect::>()?, + }) + } +} + +impl TryFrom for sui_sdk_types::types::SignedTransaction { + type Error = bcs::Error; + + fn try_from(value: ExecuteTransactionRequest) -> Result { + Ok(Self { + transaction: value + .transaction + .ok_or_else(|| bcs::Error::Custom("missing transaction".into()))? + .pipe_ref(TryInto::try_into)?, + signatures: value + .signatures + .iter() + .map(TryInto::try_into) + .collect::>()?, + }) + } +} + +// +// SimulateTransactionRequest +// + +impl TryFrom for SimulateTransactionRequest { + type Error = bcs::Error; + + fn try_from(value: sui_sdk_types::types::Transaction) -> Result { + Ok(Self { + transaction: Some(Transaction::try_from(&value)?), + }) + } +} + +impl TryFrom for sui_sdk_types::types::Transaction { + type Error = bcs::Error; + + fn try_from(value: SimulateTransactionRequest) -> Result { + value + .transaction + .ok_or_else(|| bcs::Error::Custom("missing transaction".into()))? + .pipe_ref(TryInto::try_into) + } +} + +// +// ValidatorCommitteeMember +// + +impl From<&sui_sdk_types::types::ValidatorCommitteeMember> for ValidatorCommitteeMember { + fn from(value: &sui_sdk_types::types::ValidatorCommitteeMember) -> Self { + Self { + public_key: value.public_key.as_bytes().to_vec().into(), + stake: value.stake, + } + } +} + +impl TryFrom for sui_sdk_types::types::ValidatorCommitteeMember { + type Error = bcs::Error; + + fn try_from(value: ValidatorCommitteeMember) -> Result { + Ok(Self { + public_key: sui_sdk_types::types::Bls12381PublicKey::from_bytes(&value.public_key) + .map_err(|e| bcs::Error::Custom(e.to_string()))?, + stake: value.stake, + }) + } +} + +// +// ValidatorCommittee +// + +impl From for ValidatorCommittee { + fn from(value: sui_sdk_types::types::ValidatorCommittee) -> Self { + Self { + epoch: value.epoch, + members: value + .members + .iter() + .map(ValidatorCommitteeMember::from) + .collect(), + } + } +} + +impl TryFrom for sui_sdk_types::types::ValidatorCommittee { + type Error = bcs::Error; + + fn try_from(value: ValidatorCommittee) -> Result { + Ok(Self { + epoch: value.epoch, + members: value + .members + .into_iter() + .map(TryInto::try_into) + .collect::>()?, + }) + } +} diff --git a/crates/sui-rest-api/src/response.rs b/crates/sui-rest-api/src/response.rs index 1863bc6c45d54..0a6df29b0b00a 100644 --- a/crates/sui-rest-api/src/response.rs +++ b/crates/sui-rest-api/src/response.rs @@ -6,8 +6,10 @@ use axum::{ http::{HeaderMap, StatusCode}, response::{IntoResponse, Response}, }; +use prost::bytes::BytesMut; use crate::{ + accept::APPLICATION_PROTOBUF, content_type::ContentType, types::{ X_SUI_CHAIN, X_SUI_CHAIN_ID, X_SUI_CHECKPOINT_HEIGHT, X_SUI_EPOCH, @@ -19,12 +21,6 @@ use crate::{ pub struct Bcs(pub T); -#[derive(Debug)] -pub enum ResponseContent { - Bcs(T), - Json(J), -} - impl axum::response::IntoResponse for Bcs where T: serde::Serialize, @@ -112,6 +108,12 @@ impl axum::response::IntoResponse for BcsRejection { } } +#[derive(Debug)] +pub enum ResponseContent { + Bcs(T), + Json(J), +} + impl axum::response::IntoResponse for ResponseContent where T: serde::Serialize, @@ -125,6 +127,47 @@ where } } +#[derive(Debug)] +pub enum JsonProtobufBcs { + Json(J), + Protobuf(P), + Bcs(T), +} + +impl axum::response::IntoResponse for JsonProtobufBcs +where + J: serde::Serialize, + P: prost::Message + std::default::Default, + T: serde::Serialize, +{ + fn into_response(self) -> axum::response::Response { + match self { + JsonProtobufBcs::Json(inner) => axum::Json(inner).into_response(), + JsonProtobufBcs::Protobuf(inner) => Protobuf(inner).into_response(), + JsonProtobufBcs::Bcs(inner) => Bcs(inner).into_response(), + } + } +} + +#[derive(Debug)] +pub enum ProtobufBcs { + Protobuf(P), + Bcs(T), +} + +impl axum::response::IntoResponse for ProtobufBcs +where + P: prost::Message + std::default::Default, + T: serde::Serialize, +{ + fn into_response(self) -> axum::response::Response { + match self { + Self::Protobuf(inner) => Protobuf(inner).into_response(), + Self::Bcs(inner) => Bcs(inner).into_response(), + } + } +} + pub async fn append_info_headers( State(state): State, response: Response, @@ -169,3 +212,90 @@ pub async fn append_info_headers( (headers, response) } + +pub struct Protobuf(pub T); + +impl axum::response::IntoResponse for Protobuf +where + T: prost::Message, +{ + fn into_response(self) -> axum::response::Response { + let mut buf = BytesMut::new(); + match self.0.encode(&mut buf) { + Ok(()) => ( + [( + axum::http::header::CONTENT_TYPE, + axum::http::HeaderValue::from_static(APPLICATION_PROTOBUF), + )], + buf, + ) + .into_response(), + Err(err) => ( + StatusCode::INTERNAL_SERVER_ERROR, + [( + axum::http::header::CONTENT_TYPE, + axum::http::HeaderValue::from_static(TEXT_PLAIN_UTF_8), + )], + err.to_string(), + ) + .into_response(), + } + } +} + +#[axum::async_trait] +impl axum::extract::FromRequest for Protobuf +where + T: prost::Message + std::default::Default, + S: Send + Sync, +{ + type Rejection = ProtobufRejection; + + async fn from_request( + req: axum::http::Request, + state: &S, + ) -> Result { + if protobuf_content_type(req.headers()) { + let bytes = axum::body::Bytes::from_request(req, state) + .await + .map_err(ProtobufRejection::BytesRejection)?; + T::decode(bytes) + .map(Self) + .map_err(ProtobufRejection::DeserializationError) + } else { + Err(ProtobufRejection::MissingProtobufContentType) + } + } +} + +fn protobuf_content_type(headers: &HeaderMap) -> bool { + let Some(ContentType(mime)) = ContentType::from_headers(headers) else { + return false; + }; + + mime.essence_str() == APPLICATION_PROTOBUF +} + +pub enum ProtobufRejection { + MissingProtobufContentType, + DeserializationError(prost::DecodeError), + BytesRejection(axum::extract::rejection::BytesRejection), +} + +impl axum::response::IntoResponse for ProtobufRejection { + fn into_response(self) -> axum::response::Response { + match self { + ProtobufRejection::MissingProtobufContentType => ( + StatusCode::UNSUPPORTED_MEDIA_TYPE, + "Expected request with `Content-Type: application/x-protobuf`", + ) + .into_response(), + ProtobufRejection::DeserializationError(e) => ( + StatusCode::UNPROCESSABLE_ENTITY, + format!("Failed to deserialize the protobuf body into the target type: {e}"), + ) + .into_response(), + ProtobufRejection::BytesRejection(bytes_rejection) => bytes_rejection.into_response(), + } + } +} diff --git a/crates/sui-rest-api/src/transactions/execution.rs b/crates/sui-rest-api/src/transactions/execution.rs index d1994b83e4d40..0e445f687cb6b 100644 --- a/crates/sui-rest-api/src/transactions/execution.rs +++ b/crates/sui-rest-api/src/transactions/execution.rs @@ -1,12 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use crate::accept::AcceptJsonProtobufBcs; use crate::openapi::{ ApiEndpoint, OperationBuilder, RequestBodyBuilder, ResponseBuilder, RouteHandler, }; -use crate::response::Bcs; -use crate::{accept::AcceptFormat, response::ResponseContent}; -use crate::{RestError, RestService, Result}; +use crate::response::{Bcs, JsonProtobufBcs}; +use crate::{proto, RestError, RestService, Result}; use axum::extract::{Query, State}; use schemars::JsonSchema; use std::net::SocketAddr; @@ -43,6 +43,7 @@ impl ApiEndpoint for ExecuteTransaction { 200, ResponseBuilder::new() .json_content::(generator) + .protobuf_content() .bcs_content() .build(), ) @@ -65,9 +66,15 @@ async fn execute_transaction( State(state): State>>, Query(parameters): Query, client_address: Option>, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, Bcs(transaction): Bcs, -) -> Result> { +) -> Result< + JsonProtobufBcs< + TransactionExecutionResponse, + proto::TransactionExecutionResponse, + TransactionExecutionResponse, + >, +> { let executor = state.ok_or_else(|| anyhow::anyhow!("No Transaction Executor"))?; let request = sui_types::quorum_driver_types::ExecuteTransactionRequestV3 { transaction: transaction.try_into()?, @@ -161,8 +168,9 @@ async fn execute_transaction( }; match accept { - AcceptFormat::Json => ResponseContent::Json(response), - AcceptFormat::Bcs => ResponseContent::Bcs(response), + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(response), + AcceptJsonProtobufBcs::Protobuf => JsonProtobufBcs::Protobuf(response.try_into()?), + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(response), } .pipe(Ok) } @@ -191,13 +199,13 @@ pub struct ExecuteTransactionQueryParameters { /// Response type for the execute transaction endpoint #[derive(Debug, serde::Serialize, serde::Deserialize, JsonSchema)] pub struct TransactionExecutionResponse { - effects: TransactionEffects, + pub effects: TransactionEffects, - finality: EffectsFinality, - events: Option, - balance_changes: Option>, - input_objects: Option>, - output_objects: Option>, + pub finality: EffectsFinality, + pub events: Option, + pub balance_changes: Option>, + pub input_objects: Option>, + pub output_objects: Option>, } #[derive(Clone, Debug)] @@ -377,6 +385,7 @@ impl ApiEndpoint for SimulateTransaction { 200, ResponseBuilder::new() .json_content::(generator) + .protobuf_content() .bcs_content() .build(), ) @@ -391,16 +400,26 @@ impl ApiEndpoint for SimulateTransaction { async fn simulate_transaction( State(state): State>>, Query(parameters): Query, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, //TODO allow accepting JSON as well as BCS Bcs(transaction): Bcs, -) -> Result> { +) -> Result< + JsonProtobufBcs< + TransactionSimulationResponse, + proto::TransactionSimulationResponse, + TransactionSimulationResponse, + >, +> { let executor = state.ok_or_else(|| anyhow::anyhow!("No Transaction Executor"))?; - simulate_transaction_impl(&executor, ¶meters, transaction).map(|response| match accept { - AcceptFormat::Json => ResponseContent::Json(response), - AcceptFormat::Bcs => ResponseContent::Bcs(response), - }) + let response = simulate_transaction_impl(&executor, ¶meters, transaction)?; + + match accept { + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(response), + AcceptJsonProtobufBcs::Protobuf => JsonProtobufBcs::Protobuf(response.try_into()?), + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(response), + } + .pipe(Ok) } pub(super) fn simulate_transaction_impl( diff --git a/crates/sui-rest-api/src/transactions/mod.rs b/crates/sui-rest-api/src/transactions/mod.rs index 362aa4c40ed2c..261790855353e 100644 --- a/crates/sui-rest-api/src/transactions/mod.rs +++ b/crates/sui-rest-api/src/transactions/mod.rs @@ -24,17 +24,20 @@ use sui_sdk_types::types::{ }; use tap::Pipe; +use crate::accept::AcceptJsonProtobufBcs; use crate::openapi::ApiEndpoint; use crate::openapi::OperationBuilder; use crate::openapi::ResponseBuilder; use crate::openapi::RouteHandler; +use crate::proto; +use crate::proto::ListTransactionsResponse; use crate::reader::StateReader; +use crate::response::JsonProtobufBcs; use crate::Direction; -use crate::Page; +use crate::PageCursor; use crate::RestError; use crate::RestService; use crate::Result; -use crate::{accept::AcceptFormat, response::ResponseContent}; pub struct GetTransaction; @@ -59,6 +62,7 @@ impl ApiEndpoint for GetTransaction { 200, ResponseBuilder::new() .json_content::(generator) + .protobuf_content() .bcs_content() .build(), ) @@ -73,14 +77,16 @@ impl ApiEndpoint for GetTransaction { async fn get_transaction( Path(transaction_digest): Path, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, State(state): State, -) -> Result> { +) -> Result> +{ let response = state.get_transaction_response(transaction_digest)?; match accept { - AcceptFormat::Json => ResponseContent::Json(response), - AcceptFormat::Bcs => ResponseContent::Bcs(response), + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(response), + AcceptJsonProtobufBcs::Protobuf => JsonProtobufBcs::Protobuf(response.try_into()?), + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(response), } .pipe(Ok) } @@ -145,6 +151,7 @@ impl ApiEndpoint for ListTransactions { 200, ResponseBuilder::new() .json_content::>(generator) + .protobuf_content() .bcs_content() .header::(crate::types::X_SUI_CURSOR, generator) .build(), @@ -160,9 +167,12 @@ impl ApiEndpoint for ListTransactions { async fn list_transactions( Query(parameters): Query, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, State(state): State, -) -> Result> { +) -> Result<( + PageCursor, + JsonProtobufBcs, ListTransactionsResponse, Vec>, +)> { let latest_checkpoint = state.inner().get_latest_checkpoint()?.sequence_number; let oldest_checkpoint = state.inner().get_lowest_available_checkpoint()?; let limit = parameters.limit(); @@ -195,12 +205,7 @@ async fn list_transactions( timestamp_ms: Some(cursor_info.timestamp_ms), }) }) - .collect::>()?; - - let entries = match accept { - AcceptFormat::Json => ResponseContent::Json(transactions), - AcceptFormat::Bcs => ResponseContent::Bcs(transactions), - }; + .collect::, _>>()?; let cursor = next_cursor.and_then(|(checkpoint, index)| { if checkpoint < oldest_checkpoint { @@ -210,7 +215,21 @@ async fn list_transactions( } }); - Ok(Page { entries, cursor }) + match accept { + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(transactions), + AcceptJsonProtobufBcs::Protobuf => { + let proto = ListTransactionsResponse { + transactions: transactions + .into_iter() + .map(TryInto::try_into) + .collect::>()?, + }; + JsonProtobufBcs::Protobuf(proto) + } + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(transactions), + } + .pipe(|entries| (PageCursor(cursor), entries)) + .pipe(Ok) } /// A Cursor that points at a specific transaction in history. @@ -278,7 +297,7 @@ impl serde::Serialize for TransactionCursor { } } -#[derive(Debug, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] +#[derive(Debug, Default, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] pub struct ListTransactionsQueryParameters { pub limit: Option, #[schemars(with = "Option")] diff --git a/crates/sui-rest-api/src/transactions/resolve/mod.rs b/crates/sui-rest-api/src/transactions/resolve/mod.rs index dcb905d8f9104..a8373a226875e 100644 --- a/crates/sui-rest-api/src/transactions/resolve/mod.rs +++ b/crates/sui-rest-api/src/transactions/resolve/mod.rs @@ -6,15 +6,16 @@ use std::collections::HashMap; use super::execution::SimulateTransactionQueryParameters; use super::TransactionSimulationResponse; -use crate::accept::AcceptFormat; +use crate::accept::AcceptJsonProtobufBcs; use crate::objects::ObjectNotFoundError; use crate::openapi::ApiEndpoint; use crate::openapi::OperationBuilder; use crate::openapi::RequestBodyBuilder; use crate::openapi::ResponseBuilder; use crate::openapi::RouteHandler; +use crate::proto; use crate::reader::StateReader; -use crate::response::ResponseContent; +use crate::response::JsonProtobufBcs; use crate::RestError; use crate::RestService; use crate::Result; @@ -79,6 +80,7 @@ impl ApiEndpoint for ResolveTransaction { 200, ResponseBuilder::new() .json_content::(generator) + .protobuf_content() .bcs_content() .build(), ) @@ -93,9 +95,15 @@ impl ApiEndpoint for ResolveTransaction { async fn resolve_transaction( State(state): State, Query(parameters): Query, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, Json(unresolved_transaction): Json, -) -> Result> { +) -> Result< + JsonProtobufBcs< + ResolveTransactionResponse, + proto::ResolveTransactionResponse, + ResolveTransactionResponse, + >, +> { let executor = state .executor .as_ref() @@ -183,14 +191,16 @@ async fn resolve_transaction( None }; - ResolveTransactionResponse { + let response = ResolveTransactionResponse { transaction: resolved_transaction.try_into()?, simulation, + }; + + match accept { + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(response), + AcceptJsonProtobufBcs::Protobuf => JsonProtobufBcs::Protobuf(response.try_into()?), + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(response), } - .pipe(|response| match accept { - AcceptFormat::Json => ResponseContent::Json(response), - AcceptFormat::Bcs => ResponseContent::Bcs(response), - }) .pipe(Ok) } diff --git a/crates/sui-rest-api/tests/bootstrap.rs b/crates/sui-rest-api/tests/bootstrap.rs new file mode 100644 index 0000000000000..d5e606dc0a446 --- /dev/null +++ b/crates/sui-rest-api/tests/bootstrap.rs @@ -0,0 +1,56 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::{fs, path::PathBuf, process::Command}; + +#[test] +fn bootstrap() { + let root_dir = PathBuf::from(std::env!("CARGO_MANIFEST_DIR")); + let proto_dir = root_dir.join("proto"); + let proto_ext = std::ffi::OsStr::new("proto"); + let proto_files = fs::read_dir(&proto_dir).and_then(|dir| { + dir.filter_map(|entry| { + (|| { + let entry = entry?; + if entry.file_type()?.is_dir() { + return Ok(None); + } + + let path = entry.path(); + if path.extension() != Some(proto_ext) { + return Ok(None); + } + + Ok(Some(path)) + })() + .transpose() + }) + .collect::, _>>() + }); + let proto_files = match proto_files { + Ok(files) => files, + Err(error) => panic!("failed to list proto files: {}", error), + }; + + let out_dir = root_dir.join("src").join("proto").join("generated"); + + if let Err(error) = prost_build::Config::new() + .bytes(["."]) + .out_dir(&out_dir) + .compile_protos(&proto_files[..], &[proto_dir]) + { + panic!("failed to compile `rest` protobuf: {}", error); + } + + let status = Command::new("git") + .arg("diff") + .arg("--exit-code") + .arg("--") + .arg(out_dir) + .status(); + match status { + Ok(status) if !status.success() => panic!("You should commit the protobuf files"), + Err(error) => panic!("failed to run `git diff`: {}", error), + Ok(_) => {} + } +} diff --git a/crates/sui-single-node-benchmark/src/mock_storage.rs b/crates/sui-single-node-benchmark/src/mock_storage.rs index c6f03fe1bb8bd..78d9eedb0ec82 100644 --- a/crates/sui-single-node-benchmark/src/mock_storage.rs +++ b/crates/sui-single-node-benchmark/src/mock_storage.rs @@ -48,7 +48,7 @@ impl InMemoryObjectStore { tx_key: &TransactionKey, input_object_kinds: &[InputObjectKind], ) -> SuiResult { - let shared_locks_cell: OnceCell> = OnceCell::new(); + let shared_locks_cell: OnceCell>> = OnceCell::new(); let mut input_objects = Vec::new(); for kind in input_object_kinds { let obj: Option = match kind { @@ -58,11 +58,17 @@ impl InMemoryObjectStore { } InputObjectKind::SharedMoveObject { id, .. } => { - let shared_locks = shared_locks_cell.get_or_try_init(|| { - Ok::, SuiError>( - shared_locks.get_shared_locks(tx_key)?.into_iter().collect(), - ) - })?; + let shared_locks = shared_locks_cell + .get_or_init(|| { + shared_locks + .get_shared_locks(tx_key) + .expect("get_shared_locks should not fail") + .map(|l| l.into_iter().collect()) + }) + .as_ref() + .ok_or_else(|| SuiError::GenericAuthorityError { + error: "Shared object locks should have been set.".to_string(), + })?; let version = shared_locks.get(id).unwrap_or_else(|| { panic!("Shared object locks should have been set. key: {tx_key:?}, obj id: {id:?}") }); @@ -174,7 +180,7 @@ impl GetSharedLocks for InMemoryObjectStore { fn get_shared_locks( &self, _key: &TransactionKey, - ) -> Result, SuiError> { + ) -> SuiResult>> { unreachable!() } } diff --git a/crates/sui-storage/Cargo.toml b/crates/sui-storage/Cargo.toml index c56fd99370f45..4c388f65c428c 100644 --- a/crates/sui-storage/Cargo.toml +++ b/crates/sui-storage/Cargo.toml @@ -66,5 +66,5 @@ pretty_assertions.workspace = true once_cell.workspace = true sui-test-transaction-builder.workspace = true sui-types = { workspace = true, features = ["test-utils"] } -sui-macros = { workspace = true } -sui-simulator = { workspace = true } +sui-macros.workspace = true +sui-simulator.workspace = true diff --git a/crates/sui-swarm-config/tests/snapshots/snapshot_tests__genesis_config_snapshot_matches.snap b/crates/sui-swarm-config/tests/snapshots/snapshot_tests__genesis_config_snapshot_matches.snap index 74ac933740527..2fcba9da75085 100644 --- a/crates/sui-swarm-config/tests/snapshots/snapshot_tests__genesis_config_snapshot_matches.snap +++ b/crates/sui-swarm-config/tests/snapshots/snapshot_tests__genesis_config_snapshot_matches.snap @@ -6,7 +6,7 @@ ssfn_config_info: ~ validator_config_info: ~ parameters: chain_start_timestamp_ms: 0 - protocol_version: 65 + protocol_version: 68 allow_insertion_of_extra_objects: true epoch_duration_ms: 86400000 stake_subsidy_start_epoch: 0 @@ -49,3 +49,4 @@ accounts: - 30000000000000000 - 30000000000000000 - 30000000000000000 + diff --git a/crates/sui-swarm-config/tests/snapshots/snapshot_tests__populated_genesis_snapshot_matches-2.snap b/crates/sui-swarm-config/tests/snapshots/snapshot_tests__populated_genesis_snapshot_matches-2.snap index d36c1fad5d370..7e39140036125 100644 --- a/crates/sui-swarm-config/tests/snapshots/snapshot_tests__populated_genesis_snapshot_matches-2.snap +++ b/crates/sui-swarm-config/tests/snapshots/snapshot_tests__populated_genesis_snapshot_matches-2.snap @@ -3,7 +3,7 @@ source: crates/sui-swarm-config/tests/snapshot_tests.rs expression: genesis.sui_system_object().into_genesis_version_for_tooling() --- epoch: 0 -protocol_version: 65 +protocol_version: 68 system_state_version: 1 validators: total_stake: 20000000000000000 @@ -240,13 +240,13 @@ validators: next_epoch_worker_address: ~ extra_fields: id: - id: "0x834dab6f0617450d0dcfc6dd58e2a918d439fe50d2f914ac6e60dbbc38328ad3" + id: "0x2f1c192f30b36b0d0a47520ff814ace58ce8a73580c9bf86c0fc729781351bcc" size: 0 voting_power: 10000 - operation_cap_id: "0x72b130e2d852f3468d46e67179268cf2b1a000855a549b0dcab002426836a768" + operation_cap_id: "0x0e83ac0a1c9938e12a692c734f8f38dfe5858076b17611402d46afcd5887ba8e" gas_price: 1000 staking_pool: - id: "0xdc1b1962050243cbe5efd781c560b88d1c4d43da28ddc1b3f1b558210ca24009" + id: "0x222477b804c11404854c3c14cf29a2840472651c91d8870e07ae852a98c0a2e3" activation_epoch: 0 deactivation_epoch: ~ sui_balance: 20000000000000000 @@ -254,14 +254,14 @@ validators: value: 0 pool_token_balance: 20000000000000000 exchange_rates: - id: "0xb972b09a2f5914997289ba4ebbff641d7f0a3faae622ee29997c1f6713fe7e78" + id: "0xf532945be4e9eb7ef597867c6dee34dc1d89f55f711d084bc6aa01c7c99ea179" size: 1 pending_stake: 0 pending_total_sui_withdraw: 0 pending_pool_token_withdraw: 0 extra_fields: id: - id: "0xc6dec0733287765e9f8600169f776566db59a0f6cb1a854de1865db22cda913d" + id: "0x4b5abcdcefc7404834889f2890b2b626ab85c15a20b19130b56cbee9bbe2b0af" size: 0 commission_rate: 200 next_epoch_stake: 20000000000000000 @@ -269,27 +269,27 @@ validators: next_epoch_commission_rate: 200 extra_fields: id: - id: "0xe6c77a880c82d4f3e1b8a5d503f3a8e88881de8c7b722e66569ff35f8f505d29" + id: "0xeb9ab0c31391cb533e672f2aa1a919f474a988620c5eac625dab9e28e15a7661" size: 0 pending_active_validators: contents: - id: "0xb84831d86c7697202c857ede215fb5739e4c68e1aee6051efb99496833578d22" + id: "0x1e0beb565adb7f908bce1bb65d14b5da4c6e4e0ff281e91e4c79fd7a20947d35" size: 0 pending_removals: [] staking_pool_mappings: - id: "0xb892dd544e8efe2b3c9c89be5689a297ca4ef59804308a81b11c1d89b90f6e18" + id: "0xabce5d04c1673e4e317e5c0f78bc346c4960d25473e095c9fb668ac32f5e216d" size: 1 inactive_validators: - id: "0xe285cf22b5d3c56a32961398e8f64a9f4282eb94782aef9080d9a6954e85c7d5" + id: "0x9069998be467d392b0a8ce1f598a353c415729af75bb2ebafbe66d26114ad52f" size: 0 validator_candidates: - id: "0x207f4b15b8cd26b0af90e308b677c2589bd914280198b2e8e8528a37f7240c35" + id: "0x68667de51bea6086d3fd60059841df6da32a6fd475ad52ad10597797ec6a3ca9" size: 0 at_risk_validators: contents: [] extra_fields: id: - id: "0x41921a36773858d7ea5e092810acf3e1ecbd5927a34ec4f460a2988390a57969" + id: "0xfc98b9ca99540332ff24894fd810f83a85e726542c2119bc1325d350b0399434" size: 0 storage_fund: total_object_storage_rebates: @@ -306,7 +306,7 @@ parameters: validator_low_stake_grace_period: 7 extra_fields: id: - id: "0xe96139872f584b831f86b074cf24c6158f23dd472df821c8b75a5777463d1c3d" + id: "0x16212fe3db87d96453a048041166f3f491c06f00c45a4efe181bf7708c292d3f" size: 0 reference_gas_price: 1000 validator_report_records: @@ -320,7 +320,7 @@ stake_subsidy: stake_subsidy_decrease_rate: 1000 extra_fields: id: - id: "0xe1172cf766a6e4d4fb8d0a228d794e097462e114626bdedce942087b1c029965" + id: "0x3110ada5ccc4394928c0116629587c1ad110099430f19ea183d799689eb5a8df" size: 0 safe_mode: false safe_mode_storage_rewards: @@ -332,5 +332,6 @@ safe_mode_non_refundable_storage_fee: 0 epoch_start_timestamp_ms: 10 extra_fields: id: - id: "0x531d74b5c7080de67c235dd165095164784ab991a92932bc878c60eaf4fa2a3d" + id: "0x34587a89960874da16d01bb778a02f7603278b0da8ec9258668982948f9b9535" size: 0 + diff --git a/crates/sui-swarm/Cargo.toml b/crates/sui-swarm/Cargo.toml index 51caeadf3b797..6823246c0cd0a 100644 --- a/crates/sui-swarm/Cargo.toml +++ b/crates/sui-swarm/Cargo.toml @@ -25,6 +25,7 @@ sui-swarm-config.workspace = true sui-macros.workspace = true sui-node.workspace = true sui-protocol-config.workspace = true +sui-tls.workspace = true sui-types.workspace = true mysten-metrics.workspace = true mysten-network.workspace = true diff --git a/crates/sui-swarm/src/memory/node.rs b/crates/sui-swarm/src/memory/node.rs index 5cc10a6b1f7b5..541c2bc962850 100644 --- a/crates/sui-swarm/src/memory/node.rs +++ b/crates/sui-swarm/src/memory/node.rs @@ -9,6 +9,7 @@ use sui_config::NodeConfig; use sui_node::SuiNodeHandle; use sui_types::base_types::AuthorityName; use sui_types::base_types::ConciseableName; +use sui_types::crypto::KeypairTraits; use tap::TapFallible; use tracing::{error, info}; @@ -106,7 +107,12 @@ impl Node { if is_validator { let network_address = self.config().network_address().clone(); - let channel = mysten_network::client::connect(&network_address) + let tls_config = sui_tls::create_rustls_client_config( + self.config().network_key_pair().public().to_owned(), + sui_tls::SUI_VALIDATOR_SERVER_NAME.to_string(), + None, + ); + let channel = mysten_network::client::connect(&network_address, Some(tls_config)) .await .map_err(|err| anyhow!(err.to_string())) .map_err(HealthCheckError::Failure) diff --git a/crates/sui-synthetic-ingestion/Cargo.toml b/crates/sui-synthetic-ingestion/Cargo.toml new file mode 100644 index 0000000000000..07391de552efe --- /dev/null +++ b/crates/sui-synthetic-ingestion/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "sui-synthetic-ingestion" +version = "0.0.0" +authors = ["Mysten Labs "] +license = "Apache-2.0" +publish = false +edition = "2021" + +[dependencies] +async-trait.workspace = true +simulacrum.workspace = true +sui-test-transaction-builder.workspace = true +sui-types = { workspace = true, features = ["test-utils"] } +tokio.workspace = true +tracing.workspace = true + +[dev-dependencies] +tempfile.workspace = true diff --git a/crates/sui-synthetic-ingestion/src/benchmark.rs b/crates/sui-synthetic-ingestion/src/benchmark.rs new file mode 100644 index 0000000000000..5eafe1348de94 --- /dev/null +++ b/crates/sui-synthetic-ingestion/src/benchmark.rs @@ -0,0 +1,160 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::synthetic_ingestion::generate_ingestion; +use crate::tps_tracker::TpsTracker; +use crate::{IndexerProgress, SyntheticIngestionConfig}; +use std::time::Duration; +use tokio::sync::watch; +use tracing::{error, info}; + +/// A trait that can be implemented on top of any indexer to benchmark its throughput. +/// It will generate synthetic transactions and checkpoints as ingestion source. +#[async_trait::async_trait] +pub trait BenchmarkableIndexer { + /// Allows the benchmark to subscribe and monitor the committed checkpoints progress. + /// This is needed both in order to log periodic throughput, but also + /// to know when the benchmark can stop. + fn subscribe_to_committed_checkpoints(&self) -> watch::Receiver>; + /// Start the indexer. Note that we only start a timer before calling this function. + /// So the implementation should only start the indexer when this function is called. + async fn start(&mut self); + /// Stop the indexer. This would allow the benchmark to exit. + async fn stop(mut self); +} + +pub async fn run_benchmark( + config: SyntheticIngestionConfig, + mut indexer: I, +) -> u64 { + assert!( + config.starting_checkpoint > 0, + "Checkpoint 0 is reserved for genesis checkpoint" + ); + let expected_last_checkpoint = config.starting_checkpoint + config.num_checkpoints - 1; + if dir_not_empty(&config.ingestion_dir) { + info!("Starting from an existing ingestion workload directory"); + } else { + generate_ingestion(config.clone()); + } + + let mut rx = indexer.subscribe_to_committed_checkpoints(); + let mut tps_tracker = TpsTracker::new(Duration::from_secs(1)); + info!("Starting benchmark..."); + indexer.start().await; + + loop { + if let Err(err) = rx.changed().await { + error!("Error polling from watch channel, exiting early: {:?}", err); + break; + } + let committed_checkpoint = rx.borrow_and_update().clone(); + if let Some(checkpoint) = committed_checkpoint { + tps_tracker.update(checkpoint.clone()); + if checkpoint.checkpoint == expected_last_checkpoint { + break; + } + } + } + let seq = tps_tracker.finish(); + indexer.stop().await; + seq +} + +fn dir_not_empty(dir: &std::path::Path) -> bool { + dir.read_dir() + .map(|mut it| it.next().is_some()) + .unwrap_or(false) +} + +#[cfg(test)] +mod test { + use crate::benchmark::{run_benchmark, BenchmarkableIndexer}; + use crate::{IndexerProgress, SyntheticIngestionConfig}; + use std::path::PathBuf; + use std::time::Duration; + use sui_types::messages_checkpoint::CheckpointSequenceNumber; + use tokio::sync::watch; + + struct MockIndexer { + starting_checkpoint: CheckpointSequenceNumber, + ingestion_dir: PathBuf, + committed_checkpoint_tx: Option>>, + committed_checkpoint_rx: watch::Receiver>, + } + + impl MockIndexer { + fn new(starting_checkpoint: CheckpointSequenceNumber, ingestion_dir: PathBuf) -> Self { + let (committed_checkpoint_tx, committed_checkpoint_rx) = watch::channel(None); + Self { + starting_checkpoint, + ingestion_dir, + committed_checkpoint_tx: Some(committed_checkpoint_tx), + committed_checkpoint_rx, + } + } + } + + #[async_trait::async_trait] + impl BenchmarkableIndexer for MockIndexer { + fn subscribe_to_committed_checkpoints(&self) -> watch::Receiver> { + self.committed_checkpoint_rx.clone() + } + + async fn start(&mut self) { + let tx = self.committed_checkpoint_tx.take().unwrap(); + let mut checkpoint = self.starting_checkpoint; + let dir = self.ingestion_dir.clone(); + tokio::task::spawn(async move { + loop { + tokio::time::sleep(Duration::from_millis(100)).await; + let path = dir.join(format!("{}.chk", checkpoint)); + if std::fs::metadata(&path).is_err() { + break; + } + tx.send(Some(IndexerProgress { + checkpoint, + network_total_transactions: 0, + })) + .unwrap(); + checkpoint += 1; + } + }); + } + + async fn stop(mut self) {} + } + + #[tokio::test] + async fn test_run_ingestion_benchmark() { + let tmp_dir = tempfile::tempdir().unwrap(); + let config = SyntheticIngestionConfig { + ingestion_dir: tmp_dir.path().to_path_buf(), + checkpoint_size: 10, + num_checkpoints: 10, + starting_checkpoint: 1, + }; + let indexer = MockIndexer::new(config.starting_checkpoint, tmp_dir.path().to_path_buf()); + let last_checkpoint = + tokio::time::timeout(Duration::from_secs(10), run_benchmark(config, indexer)) + .await + .unwrap(); + assert_eq!(last_checkpoint, 10); + } + #[tokio::test] + async fn test_run_ingestion_benchmark_custom_starting_checkpoint() { + let tmp_dir = tempfile::tempdir().unwrap(); + let config = SyntheticIngestionConfig { + ingestion_dir: tmp_dir.path().to_path_buf(), + checkpoint_size: 10, + num_checkpoints: 10, + starting_checkpoint: 1000, + }; + let indexer = MockIndexer::new(config.starting_checkpoint, tmp_dir.path().to_path_buf()); + let last_checkpoint = + tokio::time::timeout(Duration::from_secs(10), run_benchmark(config, indexer)) + .await + .unwrap(); + assert_eq!(last_checkpoint, 1009); + } +} diff --git a/crates/sui-synthetic-ingestion/src/lib.rs b/crates/sui-synthetic-ingestion/src/lib.rs new file mode 100644 index 0000000000000..d7f52b8d34b3a --- /dev/null +++ b/crates/sui-synthetic-ingestion/src/lib.rs @@ -0,0 +1,29 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::path::PathBuf; +use sui_types::messages_checkpoint::CheckpointSequenceNumber; + +pub mod benchmark; +mod synthetic_ingestion; +mod tps_tracker; + +#[derive(Clone, Debug)] +pub struct SyntheticIngestionConfig { + /// Directory to write the ingestion data to. + pub ingestion_dir: PathBuf, + /// Number of transactions in a checkpoint. + pub checkpoint_size: u64, + /// Total number of synthetic checkpoints to generate. + pub num_checkpoints: u64, + /// Customize the first checkpoint sequence number to be committed. + /// This is useful if we want to benchmark on a non-empty database. + /// Note that this must be > 0, because the genesis checkpoint is always 0. + pub starting_checkpoint: CheckpointSequenceNumber, +} + +#[derive(Clone, Debug)] +pub struct IndexerProgress { + pub checkpoint: CheckpointSequenceNumber, + pub network_total_transactions: u64, +} diff --git a/crates/sui-synthetic-ingestion/src/synthetic_ingestion.rs b/crates/sui-synthetic-ingestion/src/synthetic_ingestion.rs new file mode 100644 index 0000000000000..1ce4848506a84 --- /dev/null +++ b/crates/sui-synthetic-ingestion/src/synthetic_ingestion.rs @@ -0,0 +1,56 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::SyntheticIngestionConfig; +use simulacrum::Simulacrum; +use sui_test_transaction_builder::TestTransactionBuilder; +use sui_types::crypto::get_account_key_pair; +use sui_types::effects::TransactionEffectsAPI; +use sui_types::gas_coin::MIST_PER_SUI; +use sui_types::utils::to_sender_signed_transaction; +use tracing::info; + +// TODO: Simulacrum does serial execution which could be slow if +// we need to generate a large number of transactions. +// We may want to make Simulacrum support parallel execution. + +pub(crate) fn generate_ingestion(config: SyntheticIngestionConfig) { + info!("Generating synthetic ingestion data. config: {:?}", config); + let timer = std::time::Instant::now(); + let mut sim = Simulacrum::new(); + let SyntheticIngestionConfig { + ingestion_dir, + checkpoint_size, + num_checkpoints, + starting_checkpoint, + } = config; + sim.set_data_ingestion_path(ingestion_dir); + sim.override_last_checkpoint_number(starting_checkpoint - 1); + + let gas_price = sim.reference_gas_price(); + let (sender, keypair) = get_account_key_pair(); + let effects = sim.request_gas(sender, MIST_PER_SUI * 1000000).unwrap(); + let mut gas_object = effects.created()[0].0; + let mut tx_count = 0; + for i in 0..num_checkpoints { + for _ in 0..checkpoint_size { + let tx_data = TestTransactionBuilder::new(sender, gas_object, gas_price) + .transfer_sui(Some(1), sender) + .build(); + let tx = to_sender_signed_transaction(tx_data, &keypair); + let (effects, _) = sim.execute_transaction(tx).unwrap(); + gas_object = effects.gas_object().0; + tx_count += 1; + } + sim.create_checkpoint(); + if (i + 1) % 100 == 0 { + info!("Generated {} checkpoints, {} transactions", i + 1, tx_count); + } + } + info!( + "Generated {} transactions in {} checkpoints. Total time: {:?}", + tx_count, + num_checkpoints, + timer.elapsed() + ); +} diff --git a/crates/sui-synthetic-ingestion/src/tps_tracker.rs b/crates/sui-synthetic-ingestion/src/tps_tracker.rs new file mode 100644 index 0000000000000..481e92e6fc8b9 --- /dev/null +++ b/crates/sui-synthetic-ingestion/src/tps_tracker.rs @@ -0,0 +1,80 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::IndexerProgress; +use std::time::{Duration, Instant}; +use sui_types::messages_checkpoint::CheckpointSequenceNumber; +use tracing::info; + +pub(crate) struct TpsTracker { + start_time: Instant, + starting_state: Option, + + prev_time: Instant, + prev_timed_state: Option, + + cur_state: Option, + + peak_tps: f64, + + /// Log time elapsed and TPS every log_frequency duration. + log_frequency: Duration, +} + +impl TpsTracker { + pub fn new(log_frequency: Duration) -> Self { + let start_time = Instant::now(); + Self { + start_time, + starting_state: None, + prev_time: start_time, + prev_timed_state: None, + cur_state: None, + peak_tps: 0.0, + log_frequency, + } + } + + pub fn update(&mut self, cur_state: IndexerProgress) { + self.cur_state = Some(cur_state.clone()); + let cur_time = Instant::now(); + let Some(prev_timed_state) = self.prev_timed_state.clone() else { + self.prev_time = cur_time; + self.prev_timed_state = Some(cur_state.clone()); + self.start_time = cur_time; + self.starting_state = Some(cur_state); + return; + }; + let elapsed = cur_time - self.prev_time; + if elapsed < self.log_frequency { + return; + } + let tps = (cur_state.network_total_transactions + - prev_timed_state.network_total_transactions) as f64 + / elapsed.as_secs_f64(); + let cps = + (cur_state.checkpoint - prev_timed_state.checkpoint) as f64 / elapsed.as_secs_f64(); + info!( + "Last processed checkpoint: {}, Current TPS: {:.2}, CPS: {:.2}", + cur_state.checkpoint, tps, cps + ); + self.peak_tps = self.peak_tps.max(tps); + self.prev_time = cur_time; + self.prev_timed_state = Some(cur_state); + } + + pub fn finish(&mut self) -> CheckpointSequenceNumber { + let elapsed = Instant::now() - self.start_time; + let cur_state = self.cur_state.clone().unwrap(); + let starting_state = self.starting_state.clone().unwrap(); + let tps = (cur_state.network_total_transactions - starting_state.network_total_transactions) + as f64 + / elapsed.as_secs_f64(); + let cps = (cur_state.checkpoint - starting_state.checkpoint) as f64 / elapsed.as_secs_f64(); + info!( + "Benchmark completed. Total time: {:?}, Average TPS: {:.2}, CPS: {:.2}. Peak TPS: {:.2}", + elapsed, tps, cps, self.peak_tps, + ); + cur_state.checkpoint + } +} diff --git a/crates/sui-tls/src/lib.rs b/crates/sui-tls/src/lib.rs index 7b3b9c23f5796..7f40317d43303 100644 --- a/crates/sui-tls/src/lib.rs +++ b/crates/sui-tls/src/lib.rs @@ -5,10 +5,9 @@ mod acceptor; mod certgen; mod verifier; -pub const SUI_VALIDATOR_SERVER_NAME: &str = "sui"; - pub use acceptor::{TlsAcceptor, TlsConnectionInfo}; pub use certgen::SelfSignedCertificate; +use rustls::ClientConfig; pub use verifier::{ public_key_from_certificate, AllowAll, AllowPublicKeys, Allower, ClientCertVerifier, ServerCertVerifier, @@ -16,6 +15,46 @@ pub use verifier::{ pub use rustls; +use fastcrypto::ed25519::{Ed25519PrivateKey, Ed25519PublicKey}; +use tokio_rustls::rustls::ServerConfig; + +pub const SUI_VALIDATOR_SERVER_NAME: &str = "sui"; + +pub fn create_rustls_server_config( + private_key: Ed25519PrivateKey, + server_name: String, + allower: A, +) -> ServerConfig { + let verifier = ClientCertVerifier::new(allower, server_name.clone()); + // TODO: refactor to use key bytes + let self_signed_cert = SelfSignedCertificate::new(private_key, server_name.as_str()); + let tls_cert = self_signed_cert.rustls_certificate(); + let tls_private_key = self_signed_cert.rustls_private_key(); + let mut tls_config = verifier + .rustls_server_config(vec![tls_cert], tls_private_key) + .unwrap_or_else(|e| panic!("Failed to create TLS server config: {:?}", e)); + tls_config.alpn_protocols = vec![b"h2".to_vec()]; + tls_config +} + +pub fn create_rustls_client_config( + target_public_key: Ed25519PublicKey, + server_name: String, + client_key: Option, // optional self-signed cert for client verification +) -> ClientConfig { + let tls_config = ServerCertVerifier::new(target_public_key, server_name.clone()); + let tls_config = if let Some(private_key) = client_key { + let self_signed_cert = SelfSignedCertificate::new(private_key, server_name.as_str()); + let tls_cert = self_signed_cert.rustls_certificate(); + let tls_private_key = self_signed_cert.rustls_private_key(); + tls_config.rustls_client_config_with_client_auth(vec![tls_cert], tls_private_key) + } else { + tls_config.rustls_client_config_with_no_client_auth() + } + .unwrap_or_else(|e| panic!("Failed to create TLS client config: {e:?}")); + tls_config +} + #[cfg(test)] mod tests { use std::collections::BTreeSet; diff --git a/crates/sui-tls/src/verifier.rs b/crates/sui-tls/src/verifier.rs index 562cc34d48973..b1e87fbf88823 100644 --- a/crates/sui-tls/src/verifier.rs +++ b/crates/sui-tls/src/verifier.rs @@ -178,20 +178,30 @@ impl ServerCertVerifier { Self { public_key, name } } - pub fn rustls_client_config( + pub fn rustls_client_config_with_client_auth( self, certificates: Vec>, private_key: PrivateKeyDer<'static>, ) -> Result { - let mut config = rustls::ClientConfig::builder_with_provider(Arc::new( + rustls::ClientConfig::builder_with_provider(Arc::new( rustls::crypto::ring::default_provider(), )) .with_safe_default_protocol_versions()? .dangerous() .with_custom_certificate_verifier(std::sync::Arc::new(self)) - .with_client_auth_cert(certificates, private_key)?; - config.alpn_protocols = vec![b"h2".to_vec()]; - Ok(config) + .with_client_auth_cert(certificates, private_key) + } + + pub fn rustls_client_config_with_no_client_auth( + self, + ) -> Result { + Ok(rustls::ClientConfig::builder_with_provider(Arc::new( + rustls::crypto::ring::default_provider(), + )) + .with_safe_default_protocol_versions()? + .dangerous() + .with_custom_certificate_verifier(std::sync::Arc::new(self)) + .with_no_client_auth()) } } diff --git a/crates/sui-tool/Cargo.toml b/crates/sui-tool/Cargo.toml index cb9bff954df3e..e2d0ce18338df 100644 --- a/crates/sui-tool/Cargo.toml +++ b/crates/sui-tool/Cargo.toml @@ -47,4 +47,5 @@ sui-storage.workspace = true sui-types.workspace = true sui-archival.workspace = true sui-package-dump.workspace = true +sui-tls.workspace = true bin-version.workspace = true diff --git a/crates/sui-tool/src/lib.rs b/crates/sui-tool/src/lib.rs index 853445cf9f921..f73e73a9eef5a 100644 --- a/crates/sui-tool/src/lib.rs +++ b/crates/sui-tool/src/lib.rs @@ -106,8 +106,14 @@ async fn make_clients( for validator in active_validators { let net_addr = Multiaddr::try_from(validator.net_address).unwrap(); + // TODO: Enable TLS on this interface with below config, once support is rolled out to validators. + // let tls_config = sui_tls::create_rustls_client_config( + // sui_types::crypto::NetworkPublicKey::from_bytes(&validator.network_pubkey_bytes)?, + // sui_tls::SUI_VALIDATOR_SERVER_NAME.to_string(), + // None, + // ); let channel = net_config - .connect_lazy(&net_addr) + .connect_lazy(&net_addr, None) .map_err(|err| anyhow!(err.to_string()))?; let client = NetworkAuthorityClient::new(channel); let public_key_bytes = @@ -498,8 +504,8 @@ pub(crate) fn make_anemo_config() -> anemo_cli::Config { .add_service( "Discovery", anemo_cli::ServiceInfo::new().add_method( - "GetKnownPeers", - anemo_cli::ron_method!(DiscoveryClient, get_known_peers, ()), + "GetKnownPeersV2", + anemo_cli::ron_method!(DiscoveryClient, get_known_peers_v2, ()), ), ) // Sui state sync diff --git a/crates/sui-transactional-test-runner/src/args.rs b/crates/sui-transactional-test-runner/src/args.rs index 2d73a54ea99d9..5e0e12299942d 100644 --- a/crates/sui-transactional-test-runner/src/args.rs +++ b/crates/sui-transactional-test-runner/src/args.rs @@ -5,10 +5,13 @@ use crate::test_adapter::{FakeID, SuiTestAdapter}; use anyhow::{bail, ensure}; use clap; use clap::{Args, Parser}; -use move_command_line_common::parser::{parse_u256, parse_u64}; -use move_command_line_common::values::{ParsableValue, ParsedValue}; -use move_command_line_common::{parser::Parser as MoveCLParser, values::ValueToken}; use move_compiler::editions::Flavor; +use move_core_types::parsing::{ + parser::Parser as MoveCLParser, + parser::{parse_u256, parse_u64}, + values::ValueToken, + values::{ParsableValue, ParsedValue}, +}; use move_core_types::runtime_value::{MoveStruct, MoveValue}; use move_core_types::u256::U256; use move_symbol_pool::Symbol; diff --git a/crates/sui-transactional-test-runner/src/lib.rs b/crates/sui-transactional-test-runner/src/lib.rs index 854ab9d096d81..abf62845c2cb6 100644 --- a/crates/sui-transactional-test-runner/src/lib.rs +++ b/crates/sui-transactional-test-runner/src/lib.rs @@ -15,6 +15,7 @@ use simulacrum::SimulatorStore; use simulator_persisted_store::PersistedStore; use std::path::Path; use std::sync::Arc; +use sui_core::authority::authority_per_epoch_store::CertLockGuard; use sui_core::authority::authority_test_utils::send_and_confirm_transaction_with_execution_error; use sui_core::authority::AuthorityState; use sui_json_rpc::authority_state::StateRead; @@ -142,7 +143,11 @@ impl TransactionalAdapter for ValidatorWithFullnode { ); let epoch_store = self.validator.load_epoch_store_one_call_per_task().clone(); - self.validator.read_objects_for_execution(&tx, &epoch_store) + self.validator.read_objects_for_execution( + &CertLockGuard::dummy_for_tests(), + &tx, + &epoch_store, + ) } fn prepare_txn( diff --git a/crates/sui-transactional-test-runner/src/programmable_transaction_test_parser/parser.rs b/crates/sui-transactional-test-runner/src/programmable_transaction_test_parser/parser.rs index 3dfd6ec45ce6e..eb9397e40e5ea 100644 --- a/crates/sui-transactional-test-runner/src/programmable_transaction_test_parser/parser.rs +++ b/crates/sui-transactional-test-runner/src/programmable_transaction_test_parser/parser.rs @@ -3,7 +3,7 @@ use std::{borrow::BorrowMut, marker::PhantomData, str::FromStr}; -use move_command_line_common::{ +use move_core_types::parsing::{ parser::{Parser, Token}, types::{ParsedType, TypeToken}, }; diff --git a/crates/sui-transactional-test-runner/src/programmable_transaction_test_parser/token.rs b/crates/sui-transactional-test-runner/src/programmable_transaction_test_parser/token.rs index 145bc5347c499..d4d03475881ad 100644 --- a/crates/sui-transactional-test-runner/src/programmable_transaction_test_parser/token.rs +++ b/crates/sui-transactional-test-runner/src/programmable_transaction_test_parser/token.rs @@ -4,8 +4,8 @@ use std::fmt::{self, Display}; use anyhow::bail; -use move_command_line_common::parser::Token; use move_core_types::identifier; +use move_core_types::parsing::parser::Token; #[derive(Eq, PartialEq, Debug, Clone, Copy)] pub enum CommandToken { diff --git a/crates/sui-transactional-test-runner/src/test_adapter.rs b/crates/sui-transactional-test-runner/src/test_adapter.rs index 6ee27e331e740..f9113bb41b8da 100644 --- a/crates/sui-transactional-test-runner/src/test_adapter.rs +++ b/crates/sui-transactional-test-runner/src/test_adapter.rs @@ -15,15 +15,14 @@ use fastcrypto::encoding::{Base64, Encoding}; use fastcrypto::traits::ToFromBytes; use move_binary_format::CompiledModule; use move_bytecode_utils::module_cache::GetModule; -use move_command_line_common::{ - address::ParsedAddress, files::verify_and_create_named_address_mapping, -}; +use move_command_line_common::files::verify_and_create_named_address_mapping; use move_compiler::{ editions::{Edition, Flavor}, shared::{NumberFormat, NumericalAddress, PackageConfig, PackagePaths}, Flags, FullyCompiledProgram, }; use move_core_types::ident_str; +use move_core_types::parsing::address::ParsedAddress; use move_core_types::{ account_address::AccountAddress, identifier::IdentStr, diff --git a/crates/sui-types/Cargo.toml b/crates/sui-types/Cargo.toml index 3150c7c4238b7..fcb060ce2bd7c 100644 --- a/crates/sui-types/Cargo.toml +++ b/crates/sui-types/Cargo.toml @@ -101,8 +101,8 @@ harness = false [features] default = [] test-utils = [] -gas-profiler = [ - "move-vm-profiler/gas-profiler", - "move-vm-test-utils/gas-profiler", +tracing = [ + "move-vm-profiler/tracing", + "move-vm-test-utils/tracing", ] fuzzing = ["move-core-types/fuzzing"] diff --git a/crates/sui-types/src/coin.rs b/crates/sui-types/src/coin.rs index d1f19defc6cc3..6c2fc3abd6ece 100644 --- a/crates/sui-types/src/coin.rs +++ b/crates/sui-types/src/coin.rs @@ -67,18 +67,17 @@ impl Coin { /// If the given object is a Coin, deserialize its contents and extract the balance Ok(Some(u64)). /// If it's not a Coin, return Ok(None). /// The cost is 2 comparisons if not a coin, and deserialization if its a Coin. - pub fn extract_balance_if_coin(object: &Object) -> Result, bcs::Error> { - match &object.data { - Data::Move(move_obj) => { - if !move_obj.is_coin() { - return Ok(None); - } + pub fn extract_balance_if_coin(object: &Object) -> Result, bcs::Error> { + let Data::Move(obj) = &object.data else { + return Ok(None); + }; - let coin = Self::from_bcs_bytes(move_obj.contents())?; - Ok(Some(coin.value())) - } - _ => Ok(None), // package - } + let Some(type_) = obj.type_().coin_type_maybe() else { + return Ok(None); + }; + + let coin = Self::from_bcs_bytes(obj.contents())?; + Ok(Some((type_, coin.value()))) } pub fn id(&self) -> &ObjectID { diff --git a/crates/sui-types/src/committee.rs b/crates/sui-types/src/committee.rs index 906fd6ba1c94c..fea794ffd259c 100644 --- a/crates/sui-types/src/committee.rs +++ b/crates/sui-types/src/committee.rs @@ -3,7 +3,9 @@ // SPDX-License-Identifier: Apache-2.0 use super::base_types::*; -use crate::crypto::{random_committee_key_pairs_of_size, AuthorityKeyPair, AuthorityPublicKey}; +use crate::crypto::{ + random_committee_key_pairs_of_size, AuthorityKeyPair, AuthorityPublicKey, NetworkPublicKey, +}; use crate::error::{SuiError, SuiResult}; use crate::multiaddr::Multiaddr; use fastcrypto::traits::KeyPair; @@ -353,18 +355,17 @@ pub trait CommitteeTrait { fn weight(&self, author: &K) -> StakeUnit; } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug)] pub struct NetworkMetadata { pub network_address: Multiaddr, pub narwhal_primary_address: Multiaddr, + pub network_public_key: Option, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug)] pub struct CommitteeWithNetworkMetadata { epoch_id: EpochId, validators: BTreeMap, - - #[serde(skip)] committee: OnceCell, } diff --git a/crates/sui-types/src/error.rs b/crates/sui-types/src/error.rs index 7d97c8ffa7509..f89e948aef12a 100644 --- a/crates/sui-types/src/error.rs +++ b/crates/sui-types/src/error.rs @@ -412,8 +412,9 @@ pub enum SuiError { }, #[error("Signatures in a certificate must form a quorum")] CertificateRequiresQuorum, - #[error("Transaction certificate processing failed: {err}")] - ErrorWhileProcessingCertificate { err: String }, + #[allow(non_camel_case_types)] + #[error("DEPRECATED")] + DEPRECATED_ErrorWhileProcessingCertificate, #[error( "Failed to get a quorum of signed effects when processing transaction: {effects_map:?}" )] diff --git a/crates/sui-types/src/executable_transaction.rs b/crates/sui-types/src/executable_transaction.rs index 964bf9b235947..493204a8e918f 100644 --- a/crates/sui-types/src/executable_transaction.rs +++ b/crates/sui-types/src/executable_transaction.rs @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 use crate::messages_checkpoint::CheckpointSequenceNumber; -use crate::messages_consensus::{AuthorityIndex, Round, TransactionIndex}; use crate::{committee::EpochId, crypto::AuthorityStrongQuorumSignInfo}; use crate::message_envelope::{Envelope, TrustedEnvelope, VerifiedEnvelope}; @@ -24,9 +23,8 @@ pub enum CertificateProof { QuorumExecuted(EpochId), /// Transaction generated by the system, for example Clock update transaction SystemTransaction(EpochId), - /// Validity was proven through consensus. Round, authority and transaction index indicate - /// the position of the transaction in the consensus DAG for debugging. - Consensus(EpochId, Round, AuthorityIndex, TransactionIndex), + /// Validity was proven through voting in consensus. + Consensus(EpochId), } impl CertificateProof { @@ -42,13 +40,8 @@ impl CertificateProof { Self::SystemTransaction(epoch) } - pub fn new_from_consensus( - epoch: EpochId, - round: Round, - authority: AuthorityIndex, - transaction_index: TransactionIndex, - ) -> Self { - Self::Consensus(epoch, round, authority, transaction_index) + pub fn new_from_consensus(epoch: EpochId) -> Self { + Self::Consensus(epoch) } pub fn epoch(&self) -> EpochId { @@ -56,7 +49,7 @@ impl CertificateProof { Self::Checkpoint(epoch, _) | Self::QuorumExecuted(epoch) | Self::SystemTransaction(epoch) - | Self::Consensus(epoch, _, _, _) => *epoch, + | Self::Consensus(epoch) => *epoch, Self::Certified(sig) => sig.epoch, } } diff --git a/crates/sui-types/src/lib.rs b/crates/sui-types/src/lib.rs index 4823253ab985f..0a3c4dc45fe90 100644 --- a/crates/sui-types/src/lib.rs +++ b/crates/sui-types/src/lib.rs @@ -153,7 +153,7 @@ pub fn sui_framework_address_concat_string(suffix: &str) -> String { /// Parsing succeeds if and only if `s` matches one of these formats exactly, with no remaining /// suffix. This function is intended for use within the authority codebases. pub fn parse_sui_address(s: &str) -> anyhow::Result { - use move_command_line_common::address::ParsedAddress; + use move_core_types::parsing::address::ParsedAddress; Ok(ParsedAddress::parse(s)? .into_account_address(&resolve_address)? .into()) @@ -163,7 +163,7 @@ pub fn parse_sui_address(s: &str) -> anyhow::Result { /// module name (an identifier). Parsing succeeds if and only if `s` matches this format exactly, /// with no remaining input. This function is intended for use within the authority codebases. pub fn parse_sui_module_id(s: &str) -> anyhow::Result { - use move_command_line_common::types::ParsedModuleId; + use move_core_types::parsing::types::ParsedModuleId; ParsedModuleId::parse(s)?.into_module_id(&resolve_address) } @@ -172,7 +172,7 @@ pub fn parse_sui_module_id(s: &str) -> anyhow::Result { /// format exactly, with no remaining input. This function is intended for use within the authority /// codebases. pub fn parse_sui_fq_name(s: &str) -> anyhow::Result<(ModuleId, String)> { - use move_command_line_common::types::ParsedFqName; + use move_core_types::parsing::types::ParsedFqName; ParsedFqName::parse(s)?.into_fq_name(&resolve_address) } @@ -181,7 +181,7 @@ pub fn parse_sui_fq_name(s: &str) -> anyhow::Result<(ModuleId, String)> { /// brackets). Parsing succeeds if and only if `s` matches this format exactly, with no remaining /// input. This function is intended for use within the authority codebase. pub fn parse_sui_struct_tag(s: &str) -> anyhow::Result { - use move_command_line_common::types::ParsedStructType; + use move_core_types::parsing::types::ParsedStructType; ParsedStructType::parse(s)?.into_struct_tag(&resolve_address) } @@ -189,7 +189,7 @@ pub fn parse_sui_struct_tag(s: &str) -> anyhow::Result { /// vector with a type parameter. Parsing succeeds if and only if `s` matches this format exactly, /// with no remaining input. This function is intended for use within the authority codebase. pub fn parse_sui_type_tag(s: &str) -> anyhow::Result { - use move_command_line_common::types::ParsedType; + use move_core_types::parsing::types::ParsedType; ParsedType::parse(s)?.into_type_tag(&resolve_address) } @@ -384,7 +384,7 @@ mod tests { #[test] fn test_parse_sui_struct_tag_long_account_addr() { let result = parse_sui_struct_tag( - "0x00000000000000000000000000000000000000000000000000000000000000002::sui::SUI", + "0x0000000000000000000000000000000000000000000000000000000000000002::sui::SUI", ) .expect("should not error"); diff --git a/crates/sui-types/src/message_envelope.rs b/crates/sui-types/src/message_envelope.rs index 4a15a39ccef97..37abb210c344f 100644 --- a/crates/sui-types/src/message_envelope.rs +++ b/crates/sui-types/src/message_envelope.rs @@ -10,7 +10,6 @@ use crate::crypto::{ use crate::error::SuiResult; use crate::executable_transaction::CertificateProof; use crate::messages_checkpoint::CheckpointSequenceNumber; -use crate::messages_consensus::{AuthorityIndex, Round, TransactionIndex}; use crate::transaction::SenderSignedData; use fastcrypto::traits::KeyPair; use once_cell::sync::OnceCell; @@ -456,9 +455,6 @@ impl VerifiedEnvelope { pub fn new_from_consensus( transaction: VerifiedEnvelope, epoch: EpochId, - round: Round, - authority: AuthorityIndex, - transaction_index: TransactionIndex, ) -> Self { let inner = transaction.into_inner(); let Envelope { @@ -469,12 +465,7 @@ impl VerifiedEnvelope { VerifiedEnvelope::new_unchecked(Envelope { digest, data, - auth_signature: CertificateProof::new_from_consensus( - epoch, - round, - authority, - transaction_index, - ), + auth_signature: CertificateProof::new_from_consensus(epoch), }) } diff --git a/crates/sui-types/src/mock_checkpoint_builder.rs b/crates/sui-types/src/mock_checkpoint_builder.rs index d97a186c46ce9..da086f770b53b 100644 --- a/crates/sui-types/src/mock_checkpoint_builder.rs +++ b/crates/sui-types/src/mock_checkpoint_builder.rs @@ -63,6 +63,17 @@ impl MockCheckpointBuilder { .push(VerifiedExecutionData::new(transaction, effects)) } + pub fn override_last_checkpoint_number( + &mut self, + checkpoint_number: u64, + validator_keys: &impl ValidatorKeypairProvider, + ) { + let mut summary = self.previous_checkpoint.data().clone(); + summary.sequence_number = checkpoint_number; + let checkpoint = Self::create_certified_checkpoint(validator_keys, summary); + self.previous_checkpoint = checkpoint; + } + /// Builds a checkpoint using internally buffered transactions. pub fn build( &mut self, diff --git a/crates/sui-types/src/move_package.rs b/crates/sui-types/src/move_package.rs index 787c83adc3da6..8b12643e4ec16 100644 --- a/crates/sui-types/src/move_package.rs +++ b/crates/sui-types/src/move_package.rs @@ -483,6 +483,10 @@ impl MovePackage { /// The ObjectID that this package's modules believe they are from, at runtime (can differ from /// `MovePackage::id()` in the case of package upgrades). pub fn original_package_id(&self) -> ObjectID { + if self.version == OBJECT_START_VERSION { + // for a non-upgraded package, original ID is just the package ID + return self.id; + } let bytes = self.module_map.values().next().expect("Empty module map"); let module = CompiledModule::deserialize_with_defaults(bytes) .expect("A Move package contains a module that cannot be deserialized"); diff --git a/crates/sui-types/src/passkey_authenticator.rs b/crates/sui-types/src/passkey_authenticator.rs index d67ac5a057741..1c7a5dcd5f38f 100644 --- a/crates/sui-types/src/passkey_authenticator.rs +++ b/crates/sui-types/src/passkey_authenticator.rs @@ -20,8 +20,7 @@ use once_cell::sync::OnceCell; use passkey_types::webauthn::{ClientDataType, CollectedClientData}; use schemars::JsonSchema; use serde::{Deserialize, Deserializer, Serialize}; -use shared_crypto::intent::Intent; -use shared_crypto::intent::{IntentMessage, INTENT_PREFIX_LENGTH}; +use shared_crypto::intent::IntentMessage; use std::hash::Hash; use std::hash::Hasher; use std::sync::Arc; @@ -55,13 +54,10 @@ pub struct PasskeyAuthenticator { #[serde(skip)] pk: Secp256r1PublicKey, - /// Valid intent parsed from the first 3 bytes of `client_data_json.challenge`. + /// Decoded `client_data_json.challenge` which is expected to be the signing message + /// `hash(Intent | bcs_message)` #[serde(skip)] - intent: Intent, - - /// Valid digest parsed from the last 32 bytes of `client_data_json.challenge`. - #[serde(skip)] - digest: [u8; DefaultHash::OUTPUT_SIZE], + challenge: [u8; DefaultHash::OUTPUT_SIZE], /// Initialization of bytes for passkey in serialized form. #[serde(skip)] @@ -94,22 +90,13 @@ impl TryFrom for PasskeyAuthenticator { }); }; - let parsed_challenge = Base64UrlUnpadded::decode_vec(&client_data_json_parsed.challenge) + let challenge = Base64UrlUnpadded::decode_vec(&client_data_json_parsed.challenge) .map_err(|_| SuiError::InvalidSignature { error: "Invalid encoded challenge".to_string(), - })?; - - let intent = - Intent::from_bytes(&parsed_challenge[..INTENT_PREFIX_LENGTH]).map_err(|_| { - SuiError::InvalidSignature { - error: "Invalid intent from challenge".to_string(), - } - })?; - - let digest = parsed_challenge[INTENT_PREFIX_LENGTH..] + })? .try_into() .map_err(|_| SuiError::InvalidSignature { - error: "Invalid digest from challenge".to_string(), + error: "Invalid encoded challenge".to_string(), })?; if raw.user_signature.scheme() != SignatureScheme::Secp256r1 { @@ -134,8 +121,7 @@ impl TryFrom for PasskeyAuthenticator { client_data_json: raw.client_data_json, signature, pk, - intent, - digest, + challenge, bytes: OnceCell::new(), }) } @@ -235,7 +221,7 @@ impl AuthenticatorTrait for PasskeyAuthenticator { T: Serialize, { // Check the intent and signing is consisted from what's parsed from client_data_json.challenge - if intent_msg.intent != self.intent || to_signing_digest(intent_msg) != self.digest { + if self.challenge != to_signing_message(intent_msg) { return Err(SuiError::InvalidSignature { error: "Invalid challenge".to_string(), }); @@ -289,26 +275,12 @@ impl AsRef<[u8]> for PasskeyAuthenticator { .expect("OnceCell invariant violated") } } -/// Compute the digest that the signature committed over as `intent || hash(tx_data)`, total -/// of 3 + 32 = 35 bytes. -pub fn to_signing_message( - intent_msg: &IntentMessage, -) -> [u8; INTENT_PREFIX_LENGTH + DefaultHash::OUTPUT_SIZE] { - let mut extended = [0; INTENT_PREFIX_LENGTH + DefaultHash::OUTPUT_SIZE]; - extended[..INTENT_PREFIX_LENGTH].copy_from_slice(&intent_msg.intent.to_bytes()); - extended[INTENT_PREFIX_LENGTH..].copy_from_slice(&to_signing_digest(intent_msg)); - extended -} -/// Compute the BCS hash of the value in intent message. In the case of transaction data, -/// this is the BCS hash of `struct TransactionData`, different from the transaction digest -/// itself that computes the BCS hash of the Rust type prefix and `struct TransactionData`. -/// (See `fn digest` in `impl Message for SenderSignedData`). -pub fn to_signing_digest( +/// Compute the signing digest that the signature committed over as `hash(intent || tx_data)` +pub fn to_signing_message( intent_msg: &IntentMessage, ) -> [u8; DefaultHash::OUTPUT_SIZE] { let mut hasher = DefaultHash::default(); - bcs::serialize_into(&mut hasher, &intent_msg.value) - .expect("Message serialization should not fail"); + bcs::serialize_into(&mut hasher, intent_msg).expect("Message serialization should not fail"); hasher.finalize().digest } diff --git a/crates/sui-types/src/storage/mod.rs b/crates/sui-types/src/storage/mod.rs index 93cb31330eb24..bdb9462b0fa3e 100644 --- a/crates/sui-types/src/storage/mod.rs +++ b/crates/sui-types/src/storage/mod.rs @@ -609,5 +609,5 @@ pub trait GetSharedLocks: Send + Sync { fn get_shared_locks( &self, key: &TransactionKey, - ) -> Result, SuiError>; + ) -> SuiResult>>; } diff --git a/crates/sui-types/src/sui_system_state/epoch_start_sui_system_state.rs b/crates/sui-types/src/sui_system_state/epoch_start_sui_system_state.rs index 9c7736ec470b9..118f15c89b420 100644 --- a/crates/sui-types/src/sui_system_state/epoch_start_sui_system_state.rs +++ b/crates/sui-types/src/sui_system_state/epoch_start_sui_system_state.rs @@ -159,6 +159,7 @@ impl EpochStartSystemStateTrait for EpochStartSystemStateV1 { NetworkMetadata { network_address: validator.sui_net_address.clone(), narwhal_primary_address: validator.narwhal_primary_address.clone(), + network_public_key: Some(validator.narwhal_network_pubkey.clone()), }, ), ) diff --git a/crates/sui-types/src/sui_system_state/simtest_sui_system_state_inner.rs b/crates/sui-types/src/sui_system_state/simtest_sui_system_state_inner.rs index 0a365fec9c972..380ce1708bda5 100644 --- a/crates/sui-types/src/sui_system_state/simtest_sui_system_state_inner.rs +++ b/crates/sui-types/src/sui_system_state/simtest_sui_system_state_inner.rs @@ -175,6 +175,7 @@ impl SuiSystemStateTrait for SimTestSuiSystemStateInnerV1 { NetworkMetadata { network_address: verified_metadata.net_address.clone(), narwhal_primary_address: verified_metadata.primary_address.clone(), + network_public_key: Some(verified_metadata.network_pubkey.clone()), }, ), ) @@ -291,6 +292,7 @@ impl SuiSystemStateTrait for SimTestSuiSystemStateInnerShallowV2 { NetworkMetadata { network_address: verified_metadata.net_address.clone(), narwhal_primary_address: verified_metadata.primary_address.clone(), + network_public_key: Some(verified_metadata.network_pubkey.clone()), }, ), ) @@ -436,6 +438,7 @@ impl SuiSystemStateTrait for SimTestSuiSystemStateInnerDeepV2 { NetworkMetadata { network_address: verified_metadata.net_address.clone(), narwhal_primary_address: verified_metadata.primary_address.clone(), + network_public_key: Some(verified_metadata.network_pubkey.clone()), }, ), ) diff --git a/crates/sui-types/src/sui_system_state/sui_system_state_inner_v1.rs b/crates/sui-types/src/sui_system_state/sui_system_state_inner_v1.rs index 9eeeb145f9828..c759b9254490e 100644 --- a/crates/sui-types/src/sui_system_state/sui_system_state_inner_v1.rs +++ b/crates/sui-types/src/sui_system_state/sui_system_state_inner_v1.rs @@ -553,6 +553,7 @@ impl SuiSystemStateTrait for SuiSystemStateInnerV1 { NetworkMetadata { network_address: verified_metadata.net_address.clone(), narwhal_primary_address: verified_metadata.primary_address.clone(), + network_public_key: Some(verified_metadata.network_pubkey.clone()), }, ), ) diff --git a/crates/sui-types/src/sui_system_state/sui_system_state_inner_v2.rs b/crates/sui-types/src/sui_system_state/sui_system_state_inner_v2.rs index f0863c2119466..1b8ce5f75d6b9 100644 --- a/crates/sui-types/src/sui_system_state/sui_system_state_inner_v2.rs +++ b/crates/sui-types/src/sui_system_state/sui_system_state_inner_v2.rs @@ -132,6 +132,7 @@ impl SuiSystemStateTrait for SuiSystemStateInnerV2 { NetworkMetadata { network_address: verified_metadata.net_address.clone(), narwhal_primary_address: verified_metadata.primary_address.clone(), + network_public_key: Some(verified_metadata.network_pubkey.clone()), }, ), ) diff --git a/crates/sui-types/src/sui_system_state/sui_system_state_summary.rs b/crates/sui-types/src/sui_system_state/sui_system_state_summary.rs index 0b76e4c344b02..525650a730157 100644 --- a/crates/sui-types/src/sui_system_state/sui_system_state_summary.rs +++ b/crates/sui-types/src/sui_system_state/sui_system_state_summary.rs @@ -4,6 +4,7 @@ use super::{SuiSystemState, SuiSystemStateTrait}; use crate::base_types::{AuthorityName, ObjectID, SuiAddress}; use crate::committee::{CommitteeWithNetworkMetadata, NetworkMetadata}; +use crate::crypto::NetworkPublicKey; use crate::dynamic_field::get_dynamic_field_from_store; use crate::error::SuiError; use crate::id::ID; @@ -202,6 +203,10 @@ impl SuiSystemStateSummary { validator.primary_address.clone(), ) .unwrap(), + network_public_key: NetworkPublicKey::from_bytes( + &validator.network_pubkey_bytes, + ) + .ok(), }, ), ) diff --git a/crates/sui-types/src/unit_tests/passkey_authenticator_test.rs b/crates/sui-types/src/unit_tests/passkey_authenticator_test.rs index a42d73f7f9eda..20e8b0f7fad63 100644 --- a/crates/sui-types/src/unit_tests/passkey_authenticator_test.rs +++ b/crates/sui-types/src/unit_tests/passkey_authenticator_test.rs @@ -1,7 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::{str::FromStr, sync::Arc}; +use std::sync::Arc; use super::to_signing_message; use crate::crypto::DefaultHash; @@ -15,10 +15,9 @@ use crate::{ signature_verification::VerifiedDigestCache, transaction::{TransactionData, TEST_ONLY_GAS_UNIT_FOR_TRANSFER}, }; -use fastcrypto::encoding::{Encoding, Hex}; use fastcrypto::hash::HashFunction; use fastcrypto::rsa::{Base64UrlUnpadded, Encoding as _}; -use fastcrypto::{encoding::Base64, traits::ToFromBytes}; +use fastcrypto::traits::ToFromBytes; use p256::pkcs8::DecodePublicKey; use passkey_authenticator::{Authenticator, UserValidationMethod}; use passkey_client::Client; @@ -33,7 +32,7 @@ use passkey_types::{ }, Bytes, Passkey, }; -use shared_crypto::intent::{Intent, IntentMessage, INTENT_PREFIX_LENGTH}; +use shared_crypto::intent::{Intent, IntentMessage}; use url::Url; /// Helper struct to initialize passkey client. @@ -261,7 +260,7 @@ async fn test_passkey_fails_invalid_json() { error: "Invalid client data json".to_string() } ); - const CORRECT_LEN: usize = INTENT_PREFIX_LENGTH + DefaultHash::OUTPUT_SIZE; + const CORRECT_LEN: usize = DefaultHash::OUTPUT_SIZE; let client_data_json_too_short = format!( r#"{{"type":"webauthn.get", "challenge":"{}","origin":"http://localhost:5173","crossOrigin":false, "unknown": "unknown"}}"#, Base64UrlUnpadded::encode_string(&[0; CORRECT_LEN - 1]) @@ -341,56 +340,56 @@ async fn test_passkey_fails_wrong_client_data_type() { ); } -#[tokio::test] -async fn test_passkey_fails_not_normalized_signature() { - // crafts a particular not normalized signature, fails to verify. this is produced from typescript client https://github.com/joyqvq/sui-webauthn-poc/tree/joy/tx-example - let tx_data: TransactionData = bcs::from_bytes(&Base64::decode("AAAAAHaTZLc0GGZ6RNYAqPC8LWZV7xHO+54zf71arV1MwFUtAcDum6pkbPZZN/iYq0zJpOxiV2wrZAnVU0bnNpOjombGAgAAAAAAAAAgAIiQFrz1abd2rNdo76dQS026yMAS1noA7FiGsggyt9V2k2S3NBhmekTWAKjwvC1mVe8RzvueM3+9Wq1dTMBVLegDAAAAAAAAgIQeAAAAAAAA").unwrap()).unwrap(); - let response = PasskeyResponse:: { - user_sig_bytes: Hex::decode("02bbd02ace0bad3b32eb3a891dc5c85e56274f52695d24db41b247ec694d1531d6fe1a5bec11a8063d1eb0512e7971bfd23395c2cb8862f73049d0f78fd204c6d602276d5f3a22f3e698cdd2272a63da8bfdd9344de73312c7f7f9eca21bfc304f2e").unwrap(), - authenticator_data: Hex::decode("49960de5880e8c687434170f6476605b8fe4aeb9a28632c7995cf3ba831d97631d00000000").unwrap(), - client_data_json: r#"{"type":"webauthn.get","challenge":"AAAAZgUD1inhS1l9qUfZePaivu6IbIo_SxCGmYcfTwrmcFU","origin":"http://localhost:5173","crossOrigin":false}"#.to_string(), - intent_msg: IntentMessage::new(Intent::sui_transaction(), tx_data), - sender: SuiAddress::from_str("0x769364b73418667a44d600a8f0bc2d6655ef11cefb9e337fbd5aad5d4cc0552d").unwrap() - }; - let sig = GenericSignature::PasskeyAuthenticator( - PasskeyAuthenticator::new_for_testing( - response.authenticator_data, - response.client_data_json, - Signature::from_bytes(&response.user_sig_bytes).unwrap(), - ) - .unwrap(), - ); - - let res = sig.verify_authenticator( - &response.intent_msg, - response.sender, - 0, - &Default::default(), - Arc::new(VerifiedDigestCache::new_empty()), - ); - let err = res.unwrap_err(); - assert_eq!( - err, - SuiError::InvalidSignature { - error: "Fails to verify".to_string() - } - ); -} - -#[tokio::test] -async fn test_real_passkey_output() { - // response from a real passkey authenticator created in iCloud, from typescript client: https://github.com/joyqvq/sui-webauthn-poc/tree/joy/tx-example - let address = - SuiAddress::from_str("0xac8564f638fbf673fc92eb85b5abe5f7c29bdaa60a4a10329868fbe6c551dda2") - .unwrap(); - let sig = GenericSignature::from_bytes(&Base64::decode("BiVJlg3liA6MaHQ0Fw9kdmBbj+SuuaKGMseZXPO6gx2XYx0AAAAAigF7InR5cGUiOiJ3ZWJhdXRobi5nZXQiLCJjaGFsbGVuZ2UiOiJBQUFBdF9taklCMXZiVnBZTTZXVjZZX29peDZKOGFOXzlzYjhTS0ZidWtCZmlRdyIsIm9yaWdpbiI6Imh0dHA6Ly9sb2NhbGhvc3Q6NTE3MyIsImNyb3NzT3JpZ2luIjpmYWxzZX1iApjskL9Xyfopyg9Av7MSrcchSpfWqAYoJ+qfSId4gNmoQ1YNgj2alDpRIbq9kthmyGY25+k24FrW114PEoy5C+8DPRcOCTtACi3ZywtZ4UILhwV+Suh79rWtbKqDqhBQwxM=").unwrap()).unwrap(); - let tx_data: TransactionData = bcs::from_bytes(&Base64::decode("AAAAAKyFZPY4+/Zz/JLrhbWr5ffCm9qmCkoQMpho++bFUd2iAUwOMmeNHuxq2hS4PvO1uivs9exQGefW2wNQAt7tRkkdAgAAAAAAAAAgCsJHAaWbb8oUlZsGdsyW3Atf3d51wBEr9HLkrBF0/UushWT2OPv2c/yS64W1q+X3wpvapgpKEDKYaPvmxVHdougDAAAAAAAAgIQeAAAAAAAA").unwrap()).unwrap(); - let res = sig.verify_authenticator( - &IntentMessage::new(Intent::sui_transaction(), tx_data), - address, - 0, - &Default::default(), - Arc::new(VerifiedDigestCache::new_empty()), - ); - assert!(res.is_ok()); -} +// #[tokio::test] +// async fn test_passkey_fails_not_normalized_signature() { +// // crafts a particular not normalized signature, fails to verify. this is produced from typescript client https://github.com/joyqvq/sui-webauthn-poc/tree/joy/tx-example +// let tx_data: TransactionData = bcs::from_bytes(&Base64::decode("AAAAAHaTZLc0GGZ6RNYAqPC8LWZV7xHO+54zf71arV1MwFUtAcDum6pkbPZZN/iYq0zJpOxiV2wrZAnVU0bnNpOjombGAgAAAAAAAAAgAIiQFrz1abd2rNdo76dQS026yMAS1noA7FiGsggyt9V2k2S3NBhmekTWAKjwvC1mVe8RzvueM3+9Wq1dTMBVLegDAAAAAAAAgIQeAAAAAAAA").unwrap()).unwrap(); +// let response = PasskeyResponse:: { +// user_sig_bytes: Hex::decode("02bbd02ace0bad3b32eb3a891dc5c85e56274f52695d24db41b247ec694d1531d6fe1a5bec11a8063d1eb0512e7971bfd23395c2cb8862f73049d0f78fd204c6d602276d5f3a22f3e698cdd2272a63da8bfdd9344de73312c7f7f9eca21bfc304f2e").unwrap(), +// authenticator_data: Hex::decode("49960de5880e8c687434170f6476605b8fe4aeb9a28632c7995cf3ba831d97631d00000000").unwrap(), +// client_data_json: r#"{"type":"webauthn.get","challenge":"AAAAZgUD1inhS1l9qUfZePaivu6IbIo_SxCGmYcfTwrmcFU","origin":"http://localhost:5173","crossOrigin":false}"#.to_string(), +// intent_msg: IntentMessage::new(Intent::sui_transaction(), tx_data), +// sender: SuiAddress::from_str("0x769364b73418667a44d600a8f0bc2d6655ef11cefb9e337fbd5aad5d4cc0552d").unwrap() +// }; +// let sig = GenericSignature::PasskeyAuthenticator( +// PasskeyAuthenticator::new_for_testing( +// response.authenticator_data, +// response.client_data_json, +// Signature::from_bytes(&response.user_sig_bytes).unwrap(), +// ) +// .unwrap(), +// ); + +// let res = sig.verify_authenticator( +// &response.intent_msg, +// response.sender, +// 0, +// &Default::default(), +// Arc::new(VerifiedDigestCache::new_empty()), +// ); +// let err = res.unwrap_err(); +// assert_eq!( +// err, +// SuiError::InvalidSignature { +// error: "Fails to verify".to_string() +// } +// ); +// } + +// #[tokio::test] +// async fn test_real_passkey_output() { +// // response from a real passkey authenticator created in iCloud, from typescript client: https://github.com/joyqvq/sui-webauthn-poc/tree/joy/tx-example +// let address = +// SuiAddress::from_str("0xac8564f638fbf673fc92eb85b5abe5f7c29bdaa60a4a10329868fbe6c551dda2") +// .unwrap(); +// let sig = GenericSignature::from_bytes(&Base64::decode("BiVJlg3liA6MaHQ0Fw9kdmBbj+SuuaKGMseZXPO6gx2XYx0AAAAAigF7InR5cGUiOiJ3ZWJhdXRobi5nZXQiLCJjaGFsbGVuZ2UiOiJBQUFBdF9taklCMXZiVnBZTTZXVjZZX29peDZKOGFOXzlzYjhTS0ZidWtCZmlRdyIsIm9yaWdpbiI6Imh0dHA6Ly9sb2NhbGhvc3Q6NTE3MyIsImNyb3NzT3JpZ2luIjpmYWxzZX1iApjskL9Xyfopyg9Av7MSrcchSpfWqAYoJ+qfSId4gNmoQ1YNgj2alDpRIbq9kthmyGY25+k24FrW114PEoy5C+8DPRcOCTtACi3ZywtZ4UILhwV+Suh79rWtbKqDqhBQwxM=").unwrap()).unwrap(); +// let tx_data: TransactionData = bcs::from_bytes(&Base64::decode("AAAAAKyFZPY4+/Zz/JLrhbWr5ffCm9qmCkoQMpho++bFUd2iAUwOMmeNHuxq2hS4PvO1uivs9exQGefW2wNQAt7tRkkdAgAAAAAAAAAgCsJHAaWbb8oUlZsGdsyW3Atf3d51wBEr9HLkrBF0/UushWT2OPv2c/yS64W1q+X3wpvapgpKEDKYaPvmxVHdougDAAAAAAAAgIQeAAAAAAAA").unwrap()).unwrap(); +// let res = sig.verify_authenticator( +// &IntentMessage::new(Intent::sui_transaction(), tx_data), +// address, +// 0, +// &Default::default(), +// Arc::new(VerifiedDigestCache::new_empty()), +// ); +// assert!(res.is_ok()); +// } diff --git a/crates/sui/Cargo.toml b/crates/sui/Cargo.toml index 5d19ba411cc3c..9078ceac6b6dd 100644 --- a/crates/sui/Cargo.toml +++ b/crates/sui/Cargo.toml @@ -20,6 +20,7 @@ bin-version.workspace = true bip32.workspace = true camino.workspace = true clap.workspace = true +codespan-reporting.workspace = true datatest-stable.workspace = true futures.workspace = true http.workspace = true @@ -56,8 +57,8 @@ sui-cluster-test.workspace = true sui-execution = { path = "../../sui-execution" } sui-faucet.workspace = true sui-swarm-config.workspace = true -sui-graphql-rpc = {workspace = true } -sui-indexer = { workspace = true } +sui-graphql-rpc.workspace = true +sui-indexer.workspace = true sui-genesis-builder.workspace = true sui-types.workspace = true sui-json.workspace = true @@ -74,6 +75,7 @@ shared-crypto.workspace = true sui-replay.workspace = true sui-transaction-builder.workspace = true move-binary-format.workspace = true +move-bytecode-source-map.workspace = true test-cluster.workspace = true fastcrypto.workspace = true @@ -92,9 +94,11 @@ move-analyzer.workspace = true move-bytecode-verifier-meter.workspace = true move-core-types.workspace = true move-package.workspace = true +move-compiler.workspace = true csv.workspace = true move-vm-profiler.workspace = true move-vm-config.workspace = true +move-ir-types.workspace = true move-command-line-common.workspace = true [target.'cfg(not(target_env = "msvc"))'.dependencies] @@ -129,7 +133,7 @@ name = "ptb_files_tests" harness = false [features] -gas-profiler = [ - "sui-types/gas-profiler", - "sui-execution/gas-profiler", +tracing = [ + "sui-types/tracing", + "sui-execution/tracing", ] diff --git a/crates/sui/src/client_commands.rs b/crates/sui/src/client_commands.rs index 9f97b95c7b0f0..5d77f53690c8e 100644 --- a/crates/sui/src/client_commands.rs +++ b/crates/sui/src/client_commands.rs @@ -42,9 +42,9 @@ use sui_source_validation::{BytecodeSourceVerifier, ValidationMode}; use shared_crypto::intent::Intent; use sui_json::SuiJsonValue; use sui_json_rpc_types::{ - Coin, DryRunTransactionBlockResponse, DynamicFieldPage, SuiCoinMetadata, SuiData, - SuiExecutionStatus, SuiObjectData, SuiObjectDataOptions, SuiObjectResponse, - SuiObjectResponseQuery, SuiParsedData, SuiProtocolConfigValue, SuiRawData, + Coin, DevInspectArgs, DevInspectResults, DryRunTransactionBlockResponse, DynamicFieldPage, + SuiCoinMetadata, SuiData, SuiExecutionStatus, SuiObjectData, SuiObjectDataOptions, + SuiObjectResponse, SuiObjectResponseQuery, SuiParsedData, SuiProtocolConfigValue, SuiRawData, SuiTransactionBlockEffects, SuiTransactionBlockEffectsAPI, SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, }; @@ -76,6 +76,7 @@ use sui_types::{ object::Owner, parse_sui_type_tag, signature::GenericSignature, + sui_serde, transaction::{ SenderSignedData, Transaction, TransactionData, TransactionDataAPI, TransactionKind, }, @@ -593,6 +594,9 @@ pub struct Opts { /// Perform a dry run of the transaction, without executing it. #[arg(long)] pub dry_run: bool, + /// Perform a dev inspect + #[arg(long)] + pub dev_inspect: bool, /// Instead of executing the transaction, serialize the bcs bytes of the unsigned transaction data /// (TransactionData) using base64 encoding, and print out the string . The string can /// be used to execute transaction with `sui client execute-signed-tx --tx-bytes `. @@ -623,6 +627,7 @@ impl Opts { Self { gas_budget: Some(gas_budget), dry_run: false, + dev_inspect: false, serialize_unsigned_transaction: false, serialize_signed_transaction: false, } @@ -633,6 +638,7 @@ impl Opts { Self { gas_budget: Some(gas_budget), dry_run: true, + dev_inspect: false, serialize_unsigned_transaction: false, serialize_signed_transaction: false, } @@ -673,10 +679,10 @@ impl SuiClientCommands { tx_digest, profile_output, } => { - move_vm_profiler::gas_profiler_feature_disabled! { + move_vm_profiler::tracing_feature_disabled! { bail!( - "gas-profiler feature is not enabled, rebuild or reinstall with \ - --features gas-profiler" + "tracing feature is not enabled, rebuild or reinstall with \ + --features tracing" ); }; @@ -908,7 +914,7 @@ impl SuiClientCommands { previous_id, )?; } - let (package_id, compiled_modules, dependencies, package_digest, upgrade_policy) = + let (package_id, compiled_modules, dependencies, package_digest, upgrade_policy, _) = upgrade_result?; let tx_kind = client @@ -1685,7 +1691,17 @@ pub(crate) async fn upgrade_package( with_unpublished_dependencies: bool, skip_dependency_verification: bool, env_alias: Option, -) -> Result<(ObjectID, Vec>, PackageDependencies, [u8; 32], u8), anyhow::Error> { +) -> Result< + ( + ObjectID, + Vec>, + PackageDependencies, + [u8; 32], + u8, + CompiledPackage, + ), + anyhow::Error, +> { let (dependencies, compiled_modules, compiled_package, package_id) = compile_package( read_api, build_config, @@ -1752,6 +1768,7 @@ pub(crate) async fn upgrade_package( dependencies, package_digest, upgrade_policy, + compiled_package, )) } @@ -2215,6 +2232,9 @@ impl Display for SuiClientCommandResult { SuiClientCommandResult::DryRun(response) => { writeln!(f, "{}", Pretty(response))?; } + SuiClientCommandResult::DevInspect(response) => { + writeln!(f, "{}", Pretty(response))?; + } } write!(f, "{}", writer.trim_end_matches('\n')) } @@ -2317,6 +2337,7 @@ impl SuiClientCommandResult { | SuiClientCommandResult::Balance(_, _) | SuiClientCommandResult::ChainIdentifier(_) | SuiClientCommandResult::DynamicFieldQuery(_) + | SuiClientCommandResult::DevInspect(_) | SuiClientCommandResult::Envs(_, _) | SuiClientCommandResult::Gas(_) | SuiClientCommandResult::NewAddress(_) @@ -2466,6 +2487,7 @@ pub enum SuiClientCommandResult { ChainIdentifier(String), DynamicFieldQuery(DynamicFieldPage), DryRun(DryRunTransactionBlockResponse), + DevInspect(DevInspectResults), Envs(Vec, Option), Gas(Vec), NewAddress(NewAddressOutput), @@ -2788,8 +2810,15 @@ pub(crate) async fn dry_run_or_execute_or_serialize( gas: Option, opts: Opts, ) -> Result { - let (dry_run, gas_budget, serialize_unsigned_transaction, serialize_signed_transaction) = ( + let ( + dry_run, + dev_inspect, + gas_budget, + serialize_unsigned_transaction, + serialize_signed_transaction, + ) = ( opts.dry_run, + opts.dev_inspect, opts.gas_budget, opts.serialize_unsigned_transaction, opts.serialize_signed_transaction, @@ -2804,12 +2833,27 @@ pub(crate) async fn dry_run_or_execute_or_serialize( context.get_reference_gas_price().await? }; + let client = context.get_client().await?; + + if dev_inspect { + return execute_dev_inspect( + context, + signer, + tx_kind, + gas_budget, + gas_price, + gas_payment, + None, + None, + ) + .await; + } + let gas = match gas_payment { Some(obj_ids) => Some(obj_ids), None => gas.map(|x| vec![x]), }; - let client = context.get_client().await?; if dry_run { return execute_dry_run( context, @@ -2894,6 +2938,49 @@ pub(crate) async fn dry_run_or_execute_or_serialize( } } +async fn execute_dev_inspect( + context: &mut WalletContext, + signer: SuiAddress, + tx_kind: TransactionKind, + gas_budget: Option, + gas_price: u64, + gas_payment: Option>, + gas_sponsor: Option, + skip_checks: Option, +) -> Result { + let client = context.get_client().await?; + let gas_budget = gas_budget.map(sui_serde::BigInt::from); + let mut gas_objs = vec![]; + let gas_objects = if let Some(gas_payment) = gas_payment { + for o in gas_payment.iter() { + let obj_ref = context.get_object_ref(*o).await?; + gas_objs.push(obj_ref); + } + Some(gas_objs) + } else { + None + }; + + let dev_inspect_args = DevInspectArgs { + gas_sponsor, + gas_budget, + gas_objects, + skip_checks, + show_raw_txn_data_and_effects: None, + }; + let dev_inspect_result = client + .read_api() + .dev_inspect_transaction_block( + signer, + tx_kind, + Some(sui_serde::BigInt::from(gas_price)), + None, + Some(dev_inspect_args), + ) + .await?; + Ok(SuiClientCommandResult::DevInspect(dev_inspect_result)) +} + pub(crate) async fn prerender_clever_errors( effects: &mut SuiTransactionBlockEffects, read_api: &ReadApi, diff --git a/crates/sui/src/client_ptb/ast.rs b/crates/sui/src/client_ptb/ast.rs index effa26e0a9603..94eb9c23a5dc9 100644 --- a/crates/sui/src/client_ptb/ast.rs +++ b/crates/sui/src/client_ptb/ast.rs @@ -3,7 +3,7 @@ use std::fmt; -use move_command_line_common::{ +use move_core_types::parsing::{ address::{NumericalAddress, ParsedAddress}, types::{ParsedFqName, ParsedModuleId, ParsedStructType, ParsedType}, }; @@ -35,6 +35,7 @@ pub const SUMMARY: &str = "summary"; pub const GAS_COIN: &str = "gas-coin"; pub const JSON: &str = "json"; pub const DRY_RUN: &str = "dry-run"; +pub const DEV_INSPECT: &str = "dev-inspect"; pub const SERIALIZE_UNSIGNED: &str = "serialize-unsigned-transaction"; pub const SERIALIZE_SIGNED: &str = "serialize-signed-transaction"; @@ -74,6 +75,7 @@ pub const COMMANDS: &[&str] = &[ GAS_COIN, JSON, DRY_RUN, + DEV_INSPECT, SERIALIZE_UNSIGNED, SERIALIZE_SIGNED, ]; @@ -111,6 +113,7 @@ pub struct ProgramMetadata { pub gas_object_id: Option>, pub json_set: bool, pub dry_run_set: bool, + pub dev_inspect_set: bool, pub gas_budget: Option>, } diff --git a/crates/sui/src/client_ptb/builder.rs b/crates/sui/src/client_ptb/builder.rs index d0bdf2efbb0fd..8df2dbf90dd7e 100644 --- a/crates/sui/src/client_ptb/builder.rs +++ b/crates/sui/src/client_ptb/builder.rs @@ -16,7 +16,7 @@ use miette::Severity; use move_binary_format::{ binary_config::BinaryConfig, file_format::SignatureToken, CompiledModule, }; -use move_command_line_common::{ +use move_core_types::parsing::{ address::{NumericalAddress, ParsedAddress}, parser::NumberFormat, }; @@ -1029,7 +1029,7 @@ impl<'a> PTBBuilder<'a> { ) .map_err(|e| err!(path_loc, "{e}"))?; } - let (package_id, compiled_modules, dependencies, package_digest, upgrade_policy) = + let (package_id, compiled_modules, dependencies, package_digest, upgrade_policy, _) = upgrade_result.map_err(|e| err!(path_loc, "{e}"))?; let upgrade_arg = self diff --git a/crates/sui/src/client_ptb/parser.rs b/crates/sui/src/client_ptb/parser.rs index 58766866848d7..e9e769a39cfa3 100644 --- a/crates/sui/src/client_ptb/parser.rs +++ b/crates/sui/src/client_ptb/parser.rs @@ -3,7 +3,7 @@ use std::iter::Peekable; -use move_command_line_common::{ +use move_core_types::parsing::{ address::{NumericalAddress, ParsedAddress}, parser::{parse_u128, parse_u16, parse_u256, parse_u32, parse_u64, parse_u8}, types::{ParsedFqName, ParsedModuleId, ParsedStructType, ParsedType}, @@ -41,6 +41,7 @@ struct ProgramParsingState { serialize_signed_set: bool, json_set: bool, dry_run_set: bool, + dev_inspect_set: bool, gas_object_id: Option>, gas_budget: Option>, } @@ -63,6 +64,7 @@ impl<'a, I: Iterator> ProgramParser<'a, I> { serialize_signed_set: false, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_object_id: None, gas_budget: None, }, @@ -110,6 +112,7 @@ impl<'a, I: Iterator> ProgramParser<'a, I> { L(T::Command, A::SUMMARY) => flag!(summary_set), L(T::Command, A::JSON) => flag!(json_set), L(T::Command, A::DRY_RUN) => flag!(dry_run_set), + L(T::Command, A::DEV_INSPECT) => flag!(dev_inspect_set), L(T::Command, A::PREVIEW) => flag!(preview_set), L(T::Command, A::WARN_SHADOWS) => flag!(warn_shadows_set), L(T::Command, A::GAS_COIN) => { @@ -207,6 +210,7 @@ impl<'a, I: Iterator> ProgramParser<'a, I> { gas_object_id: self.state.gas_object_id, json_set: self.state.json_set, dry_run_set: self.state.dry_run_set, + dev_inspect_set: self.state.dev_inspect_set, gas_budget: self.state.gas_budget, }, )) diff --git a/crates/sui/src/client_ptb/ptb.rs b/crates/sui/src/client_ptb/ptb.rs index 2796529de4e69..4a05195a11e97 100644 --- a/crates/sui/src/client_ptb/ptb.rs +++ b/crates/sui/src/client_ptb/ptb.rs @@ -150,6 +150,7 @@ impl PTB { gas: program_metadata.gas_object_id.map(|x| x.value), rest: Opts { dry_run: program_metadata.dry_run_set, + dev_inspect: program_metadata.dev_inspect_set, gas_budget: program_metadata.gas_budget.map(|x| x.value), serialize_unsigned_transaction: program_metadata.serialize_unsigned_set, serialize_signed_transaction: program_metadata.serialize_signed_set, @@ -295,6 +296,10 @@ pub fn ptb_description() -> clap::Command { --"dry-run" "Perform a dry run of the PTB instead of executing it." )) + .arg(arg!( + --"dev-inspect" + "Perform a dev-inspect of the PTB instead of executing it." + )) .arg(arg!( --"gas-coin" ... "The object ID of the gas coin to use. If not specified, it will try to use the first \ diff --git a/crates/sui/src/client_ptb/snapshots/sui__client_ptb__parser__tests__parse_commands.snap b/crates/sui/src/client_ptb/snapshots/sui__client_ptb__parser__tests__parse_commands.snap index d90e362c4bae7..89e72eb85b680 100644 --- a/crates/sui/src/client_ptb/snapshots/sui__client_ptb__parser__tests__parse_commands.snap +++ b/crates/sui/src/client_ptb/snapshots/sui__client_ptb__parser__tests__parse_commands.snap @@ -32,6 +32,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -72,6 +73,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -121,6 +123,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -189,6 +192,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -248,6 +252,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -322,6 +327,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -390,6 +396,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -449,6 +456,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -517,6 +525,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -576,6 +585,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -623,6 +633,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -689,6 +700,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -802,6 +814,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -892,6 +905,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -982,6 +996,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1023,6 +1038,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1089,6 +1105,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1140,6 +1157,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1191,6 +1209,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1276,6 +1295,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1333,6 +1353,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1396,6 +1417,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1428,6 +1450,7 @@ expression: parsed ), json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1452,6 +1475,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1476,6 +1500,7 @@ expression: parsed gas_object_id: None, json_set: true, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1500,6 +1525,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1524,6 +1550,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { diff --git a/crates/sui/src/client_ptb/snapshots/sui__client_ptb__parser__tests__parse_publish.snap b/crates/sui/src/client_ptb/snapshots/sui__client_ptb__parser__tests__parse_publish.snap index fef41e7a46396..fe9c30cd71280 100644 --- a/crates/sui/src/client_ptb/snapshots/sui__client_ptb__parser__tests__parse_publish.snap +++ b/crates/sui/src/client_ptb/snapshots/sui__client_ptb__parser__tests__parse_publish.snap @@ -32,6 +32,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -72,6 +73,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { diff --git a/crates/sui/src/displays/dev_inspect.rs b/crates/sui/src/displays/dev_inspect.rs new file mode 100644 index 0000000000000..a0a83fb4508e0 --- /dev/null +++ b/crates/sui/src/displays/dev_inspect.rs @@ -0,0 +1,46 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::displays::Pretty; +use std::fmt::{Display, Formatter}; +use sui_json_rpc_types::{DevInspectResults, SuiTransactionBlockEffectsAPI}; + +impl<'a> Display for Pretty<'a, DevInspectResults> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let Pretty(response) = self; + + if let Some(error) = &response.error { + writeln!(f, "Dev inspect failed: {}", error)?; + return Ok(()); + } + + writeln!( + f, + "Dev inspect completed, execution status: {}", + response.effects.status() + )?; + + writeln!(f, "{}", response.effects)?; + write!(f, "{}", response.events)?; + + if let Some(results) = &response.results { + for result in results { + writeln!(f, "Execution Result")?; + writeln!(f, " Mutable Reference Outputs")?; + for m in result.mutable_reference_outputs.iter() { + writeln!(f, " Sui Argument: {}", m.0)?; + writeln!(f, " Sui TypeTag: {:?}", m.2)?; + writeln!(f, " Bytes: {:?}", m.1)?; + } + + writeln!(f, " Return values")?; + for val in result.return_values.iter() { + writeln!(f, " Sui TypeTag: {:?}", val.1)?; + writeln!(f, " Bytes: {:?}", val.0)?; + } + } + } + + Ok(()) + } +} diff --git a/crates/sui/src/displays/mod.rs b/crates/sui/src/displays/mod.rs index ef12a0ed23e77..a70ba885b9ebe 100644 --- a/crates/sui/src/displays/mod.rs +++ b/crates/sui/src/displays/mod.rs @@ -1,6 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +mod dev_inspect; mod dry_run_tx_block; mod gas_cost_summary; mod ptb_preview; diff --git a/crates/sui/src/keytool.rs b/crates/sui/src/keytool.rs index 08e4aabefee40..df1fc5b3087e0 100644 --- a/crates/sui/src/keytool.rs +++ b/crates/sui/src/keytool.rs @@ -629,8 +629,15 @@ impl KeyToolCommand { match SuiKeyPair::decode(&input_string) { Ok(skp) => { info!("Importing Bech32 encoded private key to keystore"); - let key = Key::from(&skp); - keystore.add_key(alias, skp)?; + let mut key = Key::from(&skp); + keystore.add_key(alias.clone(), skp)?; + + let alias = match alias { + Some(x) => x, + None => keystore.get_alias_by_address(&key.sui_address)?, + }; + + key.alias = Some(alias); CommandOutput::Import(key) } Err(_) => { @@ -639,10 +646,17 @@ impl KeyToolCommand { &input_string, key_scheme, derivation_path, - alias, + alias.clone(), )?; let skp = keystore.get_key(&sui_address)?; - let key = Key::from(skp); + let mut key = Key::from(skp); + + let alias = match alias { + Some(x) => x, + None => keystore.get_alias_by_address(&key.sui_address)?, + }; + + key.alias = Some(alias); CommandOutput::Import(key) } } diff --git a/crates/sui/src/sui_commands.rs b/crates/sui/src/sui_commands.rs index 392ba66fa7549..ea23510d24c2e 100644 --- a/crates/sui/src/sui_commands.rs +++ b/crates/sui/src/sui_commands.rs @@ -720,6 +720,8 @@ async fn start( None, Some(data_ingestion_path.clone()), None, + None, /* start_checkpoint */ + None, /* end_checkpoint */ ) .await; info!("Indexer started in writer mode"); diff --git a/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/Move.toml b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/Move.toml new file mode 100644 index 0000000000000..39cc2752ab46d --- /dev/null +++ b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/Move.toml @@ -0,0 +1,6 @@ +[package] +name = "upgrades" +edition = "2024.beta" # edition = "legacy" to use legacy (pre-2024) Move + +[addresses] +upgrades = "0x0" diff --git a/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/enum.move b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/enum.move new file mode 100644 index 0000000000000..d0c55d3ba99a4 --- /dev/null +++ b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/enum.move @@ -0,0 +1,10 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module upgrades::enum_ { + public enum EnumToBeRemoved { + A, + B + } +} + diff --git a/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/func.move b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/func.move new file mode 100644 index 0000000000000..bb83aedef2f57 --- /dev/null +++ b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/func.move @@ -0,0 +1,9 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module upgrades::func_ { + public fun fun_to_be_removed(): u64 { + 0 + } +} + diff --git a/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/struct.move b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/struct.move new file mode 100644 index 0000000000000..f75e460159e18 --- /dev/null +++ b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/struct.move @@ -0,0 +1,10 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[allow(unused_field)] +module upgrades::struct_ { + public struct StructToBeRemoved { + b: u64 + } +} + diff --git a/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/Move.toml b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/Move.toml new file mode 100644 index 0000000000000..70f14c105d35c --- /dev/null +++ b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/Move.toml @@ -0,0 +1,6 @@ +[package] +name = "upgrades" +edition = "2024.beta" # edition = "legacy" to use legacy (pre-2024) Move + +[addresses] +upgrades = "0x0" \ No newline at end of file diff --git a/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/enum.move b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/enum.move new file mode 100644 index 0000000000000..8ba135d6bb144 --- /dev/null +++ b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/enum.move @@ -0,0 +1,7 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module upgrades::enum_ { + //public enum EnumToBeRemoved {} +} + diff --git a/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/func.move b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/func.move new file mode 100644 index 0000000000000..ab40afde72c86 --- /dev/null +++ b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/func.move @@ -0,0 +1,7 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module upgrades::func_ { + // public fun fun_to_be_removed(): u64 {} +} + diff --git a/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/struct.move b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/struct.move new file mode 100644 index 0000000000000..6d69ddc71b227 --- /dev/null +++ b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/struct.move @@ -0,0 +1,7 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module upgrades::struct_ { + // public struct StructToBeRemoved {} +} + diff --git a/crates/sui/src/unit_tests/profiler_tests.rs b/crates/sui/src/unit_tests/profiler_tests.rs index c4fcbaa094d94..5d2596e8c5453 100644 --- a/crates/sui/src/unit_tests/profiler_tests.rs +++ b/crates/sui/src/unit_tests/profiler_tests.rs @@ -1,14 +1,14 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -/// This test exists to make sure that the feature gating for all the code under `gas-profiler` -/// remains fully connected such that if and only if we enable the feature here, the `gas-profiler` +/// This test exists to make sure that the feature gating for all the code under `tracing` +/// remains fully connected such that if and only if we enable the feature here, the `tracing` /// feature gets enabled anywhere. /// /// If this test fails, check for the following. /// -/// Any crate that has code decorated with #[cfg(feature = "gas-profiler")] needs to have -/// a feature declared in its Cargo.toml named `gas-profiler`. If moving / refactoring code with +/// Any crate that has code decorated with #[cfg(feature = "tracing")] needs to have +/// a feature declared in its Cargo.toml named `tracing`. If moving / refactoring code with /// this decorator from a crate to a different crate, it is likely needed to copy over some of the /// feature declaration defined in the original crate. Also ensure we do not include the feature in /// any dependency of the dependencies section so that the feature won't get partially enabled as @@ -21,18 +21,18 @@ /// defined in all the other crates that the decorated code in the current crate depends on. /// /// Note this crate will always have the feature enabled in testing due to the addition of -/// `sui = { path = ".", features = ["gas-profiler"] }` to our dev-dependencies. +/// `sui = { path = ".", features = ["tracing"] }` to our dev-dependencies. -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] #[test] fn test_macro_shows_feature_enabled() { - move_vm_profiler::gas_profiler_feature_disabled! { + move_vm_profiler::tracing_feature_disabled! { panic!("gas profile feature graph became disconnected"); } } #[ignore] -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] #[tokio::test(flavor = "multi_thread")] async fn test_profiler() { use std::fs; diff --git a/crates/sui/src/unit_tests/snapshots/sui__upgrade_compatibility__upgrade_compatibility_tests__declarations_missing.snap b/crates/sui/src/unit_tests/snapshots/sui__upgrade_compatibility__upgrade_compatibility_tests__declarations_missing.snap new file mode 100644 index 0000000000000..e47d7459c9250 --- /dev/null +++ b/crates/sui/src/unit_tests/snapshots/sui__upgrade_compatibility__upgrade_compatibility_tests__declarations_missing.snap @@ -0,0 +1,36 @@ +--- +source: crates/sui/src/unit_tests/upgrade_compatibility_tests.rs +expression: normalize_path(err.to_string()) +--- +error[Compatibility E01001]: missing public declaration + ┌─ /fixtures/upgrade_errors/declarations_missing_v2/sources/enum.move:4:18 + │ +4 │ module upgrades::enum_ { + │ ^^^^^ enum 'EnumToBeRemoved' is missing + │ + = enum is missing expected enum 'EnumToBeRemoved', but found none + = enums are part of a module's public interface and cannot be removed or changed during an upgrade + = add missing enum 'EnumToBeRemoved' back to the module 'enum_'. + +error[Compatibility E01001]: missing public declaration + ┌─ /fixtures/upgrade_errors/declarations_missing_v2/sources/func.move:4:18 + │ +4 │ module upgrades::func_ { + │ ^^^^^ public function 'fun_to_be_removed' is missing + │ + = public function is missing expected public function 'fun_to_be_removed', but found none + = public functions are part of a module's public interface and cannot be removed or changed during an upgrade + = add missing public function 'fun_to_be_removed' back to the module 'func_'. + +error[Compatibility E01001]: missing public declaration + ┌─ /fixtures/upgrade_errors/declarations_missing_v2/sources/struct.move:4:18 + │ +4 │ module upgrades::struct_ { + │ ^^^^^^^ struct 'StructToBeRemoved' is missing + │ + = struct is missing expected struct 'StructToBeRemoved', but found none + = structs are part of a module's public interface and cannot be removed or changed during an upgrade + = add missing struct 'StructToBeRemoved' back to the module 'struct_'. + + +Upgrade failed, this package requires changes to be compatible with the existing package. Its upgrade policy is set to 'Compatible'. diff --git a/crates/sui/src/unit_tests/upgrade_compatibility_tests.rs b/crates/sui/src/unit_tests/upgrade_compatibility_tests.rs index 1f0e38eae9995..8e6d002c712e0 100644 --- a/crates/sui/src/unit_tests/upgrade_compatibility_tests.rs +++ b/crates/sui/src/unit_tests/upgrade_compatibility_tests.rs @@ -6,26 +6,25 @@ use insta::assert_snapshot; use move_binary_format::CompiledModule; use std::path::PathBuf; use sui_move_build::BuildConfig; +use sui_move_build::CompiledPackage; #[test] +#[should_panic] fn test_all_fail() { - let (pkg_v1, pkg_v2) = get_packages("all"); + let (mods_v1, pkg_v2) = get_packages("all"); - let result = compare_packages(pkg_v1, pkg_v2); - assert!(result.is_err()); - let err = result.unwrap_err(); - - assert_snapshot!(err.to_string()); + // panics: Not all errors are implemented yet + compare_packages(mods_v1, pkg_v2).unwrap(); } #[test] -fn test_struct_missing() { - let (pkg_v1, pkg_v2) = get_packages("struct_missing"); +fn test_declarations_missing() { + let (pkg_v1, pkg_v2) = get_packages("declarations_missing"); let result = compare_packages(pkg_v1, pkg_v2); assert!(result.is_err()); let err = result.unwrap_err(); - assert_snapshot!(err.to_string()); + assert_snapshot!(normalize_path(err.to_string())); } #[test] @@ -42,12 +41,12 @@ fn test_entry_linking_ok() { assert!(compare_packages(pkg_v1, pkg_v2).is_ok()); } -fn get_packages(name: &str) -> (Vec, Vec) { +fn get_packages(name: &str) -> (Vec, CompiledPackage) { let mut path: PathBuf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); path.push("src/unit_tests/fixtures/upgrade_errors/"); path.push(format!("{}_v1", name)); - let pkg_v1 = BuildConfig::new_for_testing() + let mods_v1 = BuildConfig::new_for_testing() .build(&path) .unwrap() .into_modules(); @@ -56,10 +55,18 @@ fn get_packages(name: &str) -> (Vec, Vec) { path.push("src/unit_tests/fixtures/upgrade_errors/"); path.push(format!("{}_v2", name)); - let pkg_v2 = BuildConfig::new_for_testing() - .build(&path) - .unwrap() - .into_modules(); + let pkg_v2 = BuildConfig::new_for_testing().build(&path).unwrap(); + + (mods_v1, pkg_v2) +} - (pkg_v1, pkg_v2) +/// Snapshots will differ on each machine, normalize to prevent test failures +fn normalize_path(err_string: String) -> String { + //test + let re = regex::Regex::new(r"^ ┌─ .*(\/fixtures\/.*\.move:\d+:\d+)$").unwrap(); + err_string + .lines() + .map(|line| re.replace(line, " ┌─ $1").into_owned()) + .collect::>() + .join("\n") } diff --git a/crates/sui/src/upgrade_compatibility.rs b/crates/sui/src/upgrade_compatibility.rs index bad3fca2c1a5d..96d5a64d01947 100644 --- a/crates/sui/src/upgrade_compatibility.rs +++ b/crates/sui/src/upgrade_compatibility.rs @@ -5,8 +5,12 @@ #[cfg(test)] mod upgrade_compatibility_tests; +use std::collections::{HashMap, HashSet}; +use std::fs; +use std::io::{stdout, IsTerminal}; +use std::sync::Arc; + use anyhow::{anyhow, Context, Error}; -use std::collections::HashMap; use move_binary_format::{ compatibility::Compatibility, @@ -15,19 +19,35 @@ use move_binary_format::{ normalized::{Enum, Function, Module, Struct}, CompiledModule, }; +use move_command_line_common::files::FileHash; +use move_compiler::diagnostics::codes::DiagnosticInfo; +use move_compiler::{ + diag, + diagnostics::{ + codes::{custom, Severity}, + report_diagnostics_to_buffer, Diagnostic, Diagnostics, + }, + shared::files::{FileName, FilesSourceText}, +}; use move_core_types::{ account_address::AccountAddress, identifier::{IdentStr, Identifier}, }; +use move_ir_types::location::Loc; +use move_package::compilation::compiled_package::CompiledUnitWithSource; use sui_json_rpc_types::{SuiObjectDataOptions, SuiRawData}; +use sui_move_build::CompiledPackage; use sui_protocol_config::ProtocolConfig; use sui_sdk::SuiClient; use sui_types::{base_types::ObjectID, execution_config_utils::to_binary_config}; /// Errors that can occur during upgrade compatibility checks. /// one-to-one related to the underlying trait functions see: [`CompatibilityMode`] -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) enum UpgradeCompatibilityModeError { + ModuleMissing { + name: Identifier, + }, StructMissing { name: Identifier, old_struct: Struct, @@ -102,8 +122,11 @@ pub(crate) enum UpgradeCompatibilityModeError { } impl UpgradeCompatibilityModeError { + /// check if the error breaks compatibility for a given [`Compatibility`] fn breaks_compatibility(&self, compatability: &Compatibility) -> bool { match self { + UpgradeCompatibilityModeError::ModuleMissing { .. } => true, + UpgradeCompatibilityModeError::StructAbilityMismatch { .. } | UpgradeCompatibilityModeError::StructTypeParamMismatch { .. } | UpgradeCompatibilityModeError::EnumAbilityMismatch { .. } @@ -149,7 +172,7 @@ pub(crate) struct CliCompatibilityMode { } impl CompatibilityMode for CliCompatibilityMode { - type Error = anyhow::Error; + type Error = Vec; // ignored, address is not populated pre-tx fn module_id_mismatch( &mut self, @@ -323,37 +346,78 @@ impl CompatibilityMode for CliCompatibilityMode { }); } - fn finish(&self, compatability: &Compatibility) -> Result<(), Self::Error> { - let errors: Vec = self + fn finish(self, compatability: &Compatibility) -> Result<(), Self::Error> { + let errors: Vec = self .errors - .iter() + .into_iter() .filter(|e| e.breaks_compatibility(compatability)) - .map(|e| format!("- {:?}", e)) .collect(); if !errors.is_empty() { - return Err(anyhow!( - "Upgrade compatibility check failed with the following errors:\n{}", - errors.join("\n") - )); + return Err(errors); } Ok(()) } } +const COMPATIBILITY_PREFIX: &str = "Compatibility "; +/// Generates an enum Category along with individual enum for each individual category +/// and impls into diagnostic info for each category. +macro_rules! upgrade_codes { + ($($cat:ident: [ + $($code:ident: { msg: $code_msg:literal }),* $(,)? + ]),* $(,)?) => { + #[derive(PartialEq, Eq, Clone, Copy, Debug, Hash, PartialOrd, Ord)] + #[repr(u8)] + pub enum Category { + #[allow(dead_code)] + ZeroPlaceholder, + $($cat,)* + } + + $( + #[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)] + #[repr(u8)] + pub enum $cat { + #[allow(dead_code)] + ZeroPlaceholder, + $($code,)* + } + + // impl into diagnostic info + impl Into for $cat { + fn into(self) -> DiagnosticInfo { + match self { + Self::ZeroPlaceholder => + panic!("do not use placeholder error code"), + $(Self::$code => custom( + COMPATIBILITY_PREFIX, + Severity::NonblockingError, + Category::$cat as u8, + self as u8, + $code_msg, + ),)* + } + } + } + )* + }; +} + +// Used to generate diagnostics primary labels for upgrade compatibility errors. +upgrade_codes!( + Declarations: [ + PublicMissing: { msg: "missing public declaration" }, + ], +); + /// Check the upgrade compatibility of a new package with an existing on-chain package. pub(crate) async fn check_compatibility( client: &SuiClient, package_id: ObjectID, - compiled_modules: &[Vec], + new_package: CompiledPackage, protocol_config: ProtocolConfig, ) -> Result<(), Error> { - let new_modules = compiled_modules - .iter() - .map(|b| CompiledModule::deserialize_with_config(b, &to_binary_config(&protocol_config))) - .collect::, _>>() - .context("Unable to to deserialize compiled module")?; - let existing_obj_read = client .read_api() .get_object_with_options(package_id, SuiObjectDataOptions::new().with_bcs()) @@ -378,36 +442,156 @@ pub(crate) async fn check_compatibility( .collect::, _>>() .context("Unable to get existing package")?; - compare_packages(existing_modules, new_modules) + compare_packages(existing_modules, new_package) } +/// Collect all the errors into a single error message. fn compare_packages( existing_modules: Vec, - new_modules: Vec, + new_package: CompiledPackage, ) -> Result<(), Error> { // create a map from the new modules - let new_modules_map: HashMap = new_modules - .iter() + let new_modules_map: HashMap = new_package + .get_modules() .map(|m| (m.self_id().name().to_owned(), m.clone())) .collect(); - // for each existing find the new one run compatibility check - for existing_module in existing_modules { - let name = existing_module.self_id().name().to_owned(); - - // find the new module with the same name - match new_modules_map.get(&name) { - Some(new_module) => { - Compatibility::upgrade_check().check_with_mode::( - &Module::new(&existing_module), - &Module::new(new_module), - )?; - } - None => { - Err(anyhow!("Module {} is missing from the package", name))?; + let errors: Vec<(Identifier, UpgradeCompatibilityModeError)> = existing_modules + .iter() + .flat_map(|existing_module| { + let name = existing_module.self_id().name().to_owned(); + + // find the new module with the same name + match new_modules_map.get(&name) { + Some(new_module) => { + let compatible = Compatibility::upgrade_check() + .check_with_mode::( + &Module::new(existing_module), + &Module::new(new_module), + ); + if let Err(errors) = compatible { + errors.into_iter().map(|e| (name.to_owned(), e)).collect() + } else { + vec![] + } + } + None => vec![( + name.clone(), + UpgradeCompatibilityModeError::ModuleMissing { name }, + )], } + }) + .collect(); + + if errors.is_empty() { + return Ok(()); + } + + let mut files: FilesSourceText = HashMap::new(); + let mut file_set = HashSet::new(); + + let mut diags = Diagnostics::new(); + + for (name, err) in errors { + let compiled_unit_with_source = new_package + .package + .get_module_by_name_from_root(name.as_str()) + .context("Unable to get module")?; + + if !file_set.contains(&compiled_unit_with_source.source_path) { + let file_contents: Arc = + fs::read_to_string(&compiled_unit_with_source.source_path) + .context("Unable to read source file")? + .into(); + let file_hash = FileHash::new(&file_contents); + + files.insert( + file_hash, + ( + FileName::from(compiled_unit_with_source.source_path.to_string_lossy()), + file_contents, + ), + ); + + file_set.insert(compiled_unit_with_source.source_path.clone()); } + + diags.add(diag_from_error(&err, compiled_unit_with_source)) + } + + Err(anyhow!( + "{}\nUpgrade failed, this package requires changes to be compatible with the existing package. Its upgrade policy is set to 'Compatible'.", + String::from_utf8(report_diagnostics_to_buffer(&files.into(), diags, stdout().is_terminal())).context("Unable to convert buffer to string")? + )) +} + +/// Convert an error to a diagnostic using the specific error type's function. +fn diag_from_error( + error: &UpgradeCompatibilityModeError, + compiled_unit_with_source: &CompiledUnitWithSource, +) -> Diagnostic { + match error { + UpgradeCompatibilityModeError::StructMissing { name, .. } => missing_definition_diag( + Declarations::PublicMissing, + "struct", + &name, + compiled_unit_with_source, + ), + UpgradeCompatibilityModeError::EnumMissing { name, .. } => missing_definition_diag( + Declarations::PublicMissing, + "enum", + &name, + compiled_unit_with_source, + ), + UpgradeCompatibilityModeError::FunctionMissingPublic { name, .. } => { + missing_definition_diag( + Declarations::PublicMissing, + "public function", + &name, + compiled_unit_with_source, + ) + } + UpgradeCompatibilityModeError::FunctionMissingEntry { name, .. } => { + missing_definition_diag( + Declarations::PublicMissing, + "entry function", + &name, + compiled_unit_with_source, + ) + } + _ => todo!("Implement diag_from_error for {:?}", error), } +} - Ok(()) +/// Return a diagnostic for a missing definition. +fn missing_definition_diag( + error: impl Into, + declaration_kind: &str, + identifier_name: &Identifier, + compiled_unit_with_source: &CompiledUnitWithSource, +) -> Diagnostic { + let module_name = compiled_unit_with_source.unit.name; + let loc = compiled_unit_with_source + .unit + .source_map + .definition_location; + + Diagnostic::new( + error, + (loc, format!( + "{declaration_kind} '{identifier_name}' is missing", + declaration_kind = declaration_kind, + identifier_name = identifier_name, + )), + std::iter::empty::<(Loc, String)>(), + vec![format!( + "{declaration_kind} is missing expected {declaration_kind} '{identifier_name}', but found none", + ), + format!( + "{declaration_kind}s are part of a module's public interface and cannot be removed or changed during an upgrade", + ), + format!( + "add missing {declaration_kind} '{identifier_name}' back to the module '{module_name}'.", + )] + ) } diff --git a/crates/sui/tests/cli_tests.rs b/crates/sui/tests/cli_tests.rs index 7073cd7d76ed2..0d822a93c856c 100644 --- a/crates/sui/tests/cli_tests.rs +++ b/crates/sui/tests/cli_tests.rs @@ -2897,6 +2897,7 @@ async fn test_serialize_tx() -> Result<(), anyhow::Error> { opts: Opts { gas_budget: Some(rgp * TEST_ONLY_GAS_UNIT_FOR_TRANSFER), dry_run: false, + dev_inspect: false, serialize_unsigned_transaction: true, serialize_signed_transaction: false, }, @@ -2911,6 +2912,7 @@ async fn test_serialize_tx() -> Result<(), anyhow::Error> { opts: Opts { gas_budget: Some(rgp * TEST_ONLY_GAS_UNIT_FOR_TRANSFER), dry_run: false, + dev_inspect: false, serialize_unsigned_transaction: false, serialize_signed_transaction: true, }, @@ -2926,6 +2928,7 @@ async fn test_serialize_tx() -> Result<(), anyhow::Error> { opts: Opts { gas_budget: Some(rgp * TEST_ONLY_GAS_UNIT_FOR_TRANSFER), dry_run: false, + dev_inspect: false, serialize_unsigned_transaction: false, serialize_signed_transaction: true, }, @@ -3772,6 +3775,7 @@ async fn test_gas_estimation() -> Result<(), anyhow::Error> { opts: Opts { gas_budget: None, dry_run: false, + dev_inspect: false, serialize_unsigned_transaction: false, serialize_signed_transaction: false, }, diff --git a/crates/suiop-cli/Cargo.toml b/crates/suiop-cli/Cargo.toml index 0c39aa9b6e3a3..2f2ba91dea176 100644 --- a/crates/suiop-cli/Cargo.toml +++ b/crates/suiop-cli/Cargo.toml @@ -56,6 +56,7 @@ futures.workspace = true thiserror.workspace = true strsim = "0.11.1" futures-timer = "3.0.3" +tempfile.workspace = true [dev-dependencies] diff --git a/crates/suiop-cli/src/cli/env/mod.rs b/crates/suiop-cli/src/cli/env/mod.rs new file mode 100644 index 0000000000000..51d5b8c92ede8 --- /dev/null +++ b/crates/suiop-cli/src/cli/env/mod.rs @@ -0,0 +1,72 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::run_cmd; +use anyhow::Result; +use clap::Parser; +use inquire::Select; +use std::io::Write; +use tracing::{debug, info}; + +/// Load an environment from pulumi +/// +/// if no environment name is provided, the user will be prompted to select one from the list +#[derive(Parser, Debug)] +pub struct LoadEnvironmentArgs { + /// the optional name of the environment to load + environment_name: Option, +} + +pub fn load_environment_cmd(args: &LoadEnvironmentArgs) -> Result<()> { + setup_pulumi_environment(&args.environment_name.clone().unwrap_or_else(|| { + let output = run_cmd(vec!["pulumi", "env", "ls"], None).expect("Running pulumi env ls"); + let output_str = String::from_utf8_lossy(&output.stdout); + let options: Vec<&str> = output_str.lines().collect(); + + if options.is_empty() { + panic!("No environments found. Make sure you are logged into the correct pulumi org."); + } + + Select::new("Select an environment:", options) + .prompt() + .expect("Failed to select environment") + .to_owned() + })) +} + +pub fn setup_pulumi_environment(environment_name: &str) -> Result<()> { + let output = run_cmd(vec!["pulumi", "env", "open", environment_name], None)?; + let output_str = String::from_utf8_lossy(&output.stdout); + let output_json: serde_json::Value = serde_json::from_str(&output_str)?; + let env_vars = &output_json["environmentVariables"]; + // Open a file to write environment variables + let home_dir = std::env::var("HOME").expect("HOME environment variable not set"); + let suiop_dir = format!("{}/.suiop", home_dir); + std::fs::create_dir_all(&suiop_dir).expect("Failed to create .suiop directory"); + let env_file_path = format!("{}/env_vars", suiop_dir); + let mut env_file = + std::fs::File::create(&env_file_path).expect("Failed to create env_vars file"); + + if let serde_json::Value::Object(env_vars) = env_vars { + for (key, value) in env_vars { + if let Some(value_str) = value.as_str() { + writeln!(env_file, "{}={}", key, value_str)?; + info!("writing environment variable {}", key); + debug!("={}", value_str); + } else { + info!( + "Failed to set environment variable: {}. Value is not a string.", + key + ); + } + } + } else { + info!("Environment variables are not in the expected format."); + debug!("env: {:?}", output_json); + } + info!( + "finished loading environment. use `source {}` to load them into your shell", + env_file_path + ); + Ok(()) +} diff --git a/crates/suiop-cli/src/cli/incidents/mod.rs b/crates/suiop-cli/src/cli/incidents/mod.rs index b1d29afc2730a..c659d14ef5811 100644 --- a/crates/suiop-cli/src/cli/incidents/mod.rs +++ b/crates/suiop-cli/src/cli/incidents/mod.rs @@ -17,7 +17,7 @@ use jira::generate_follow_up_tasks; use pd::print_recent_incidents; use selection::review_recent_incidents; use std::path::PathBuf; -use tracing::debug; +use tracing::{debug, info}; #[derive(Parser, Debug, Clone)] pub struct IncidentsArgs { @@ -59,6 +59,7 @@ pub enum IncidentsAction { /// - Return the combined incident list. async fn get_incidents(limit: &usize, days: &usize) -> Result> { let current_time = Local::now(); + info!("going back {} days", days); let start_time = current_time - Duration::days(*days as i64); let slack = Slack::new().await; Ok(pd::fetch_incidents(*limit, start_time, current_time) diff --git a/crates/suiop-cli/src/cli/incidents/notion.rs b/crates/suiop-cli/src/cli/incidents/notion.rs index 96721bbe00250..0d2a83beb62fb 100644 --- a/crates/suiop-cli/src/cli/incidents/notion.rs +++ b/crates/suiop-cli/src/cli/incidents/notion.rs @@ -116,6 +116,7 @@ impl Notion { pub fn new() -> Self { let token = env::var("NOTION_API_TOKEN") .expect("Please set the NOTION_API_TOKEN environment variable"); + debug!("using notion token {}", token); let client = NotionApi::new(token.clone()).expect("Failed to create Notion API client"); Self { client, token } } @@ -145,8 +146,12 @@ impl Notion { if !response.status().is_success() { return Err(anyhow::anyhow!( - "Request failed with status: {}", - response.status() + "Request failed with status: {}, response: {}", + response.status(), + response + .text() + .await + .unwrap_or("no response text".to_owned()) )); } diff --git a/crates/suiop-cli/src/cli/incidents/pd/mod.rs b/crates/suiop-cli/src/cli/incidents/pd/mod.rs index ff96fcd4ece71..b1039f3e533ef 100644 --- a/crates/suiop-cli/src/cli/incidents/pd/mod.rs +++ b/crates/suiop-cli/src/cli/incidents/pd/mod.rs @@ -11,6 +11,7 @@ use reqwest::header::AUTHORIZATION; use serde::{Deserialize, Serialize}; use serde_json::Value as JsonValue; use std::env; +use tracing::debug; use super::incident::Incident; @@ -49,15 +50,18 @@ pub async fn fetch_incidents( ) -> Result> { let url = "https://api.pagerduty.com/incidents"; + let api_key = env::var("PD_API_KEY").expect("please set the PD_API_KEY env var"); + if api_key.is_empty() { + panic!("PD_API_KEY is not set"); + } + + debug!("fetching incidents from pagerduty with {}", api_key); let mut headers = HeaderMap::new(); headers.insert( AUTHORIZATION, - format!( - "Token token={}", - env::var("PD_API_KEY").expect("please set the PD_API_KEY env var") - ) - .parse() - .expect("header parsing"), + format!("Token token={}", api_key) + .parse() + .expect("header parsing"), ); headers.insert( ACCEPT, diff --git a/crates/suiop-cli/src/cli/mod.rs b/crates/suiop-cli/src/cli/mod.rs index 2111eb17153c3..fffcbf3b08387 100644 --- a/crates/suiop-cli/src/cli/mod.rs +++ b/crates/suiop-cli/src/cli/mod.rs @@ -3,6 +3,7 @@ mod ci; pub mod docker; +mod env; mod iam; mod incidents; pub mod lib; @@ -13,6 +14,7 @@ mod slack; pub use ci::{ci_cmd, CIArgs}; pub use docker::{docker_cmd, DockerArgs}; +pub use env::{load_environment_cmd, LoadEnvironmentArgs}; pub use iam::{iam_cmd, IAMArgs}; pub use incidents::{incidents_cmd, IncidentsArgs}; pub use pulumi::{pulumi_cmd, PulumiArgs}; diff --git a/crates/suiop-cli/src/cli/pulumi/init.rs b/crates/suiop-cli/src/cli/pulumi/init.rs index 7fa9551722fc7..90967a0af4e8c 100644 --- a/crates/suiop-cli/src/cli/pulumi/init.rs +++ b/crates/suiop-cli/src/cli/pulumi/init.rs @@ -12,6 +12,8 @@ use std::fs; use std::path::{Path, PathBuf}; use tracing::{debug, error, info, warn}; +use super::PulumiProjectRuntime; + #[derive(clap::Subcommand, Clone, Debug)] pub enum ProjectType { App, @@ -23,7 +25,12 @@ pub enum ProjectType { const KEYRING: &str = "pulumi-kms-automation-f22939d"; impl ProjectType { - pub fn create_project(&self, use_kms: &bool, project_name: Option) -> Result<()> { + pub fn create_project( + &self, + use_kms: &bool, + project_name: Option, + runtime: &PulumiProjectRuntime, + ) -> Result<()> { // make sure we're in suiops let suiops_path = ensure_in_suiops_repo()?; info!("suipop path: {}", suiops_path); @@ -87,11 +94,12 @@ impl ProjectType { &project_dir, Self::App, &project_opts, + runtime, )?; } Self::Basic => { info!("creating basic pulumi project"); - create_basic_project(&project_name, &project_dir, &project_opts)?; + create_basic_project(&project_name, &project_dir, &project_opts, runtime)?; } Self::CronJob => { info!("creating k8s cronjob project"); @@ -100,6 +108,7 @@ impl ProjectType { &project_dir, Self::CronJob, &project_opts, + runtime, )?; } } @@ -145,6 +154,7 @@ fn run_pulumi_new( project_name: &str, project_dir_str: &str, project_opts: &[String], + runtime: &PulumiProjectRuntime, ) -> Result<()> { info!( "creating new pulumi project in {}", @@ -152,12 +162,16 @@ fn run_pulumi_new( ); let opts = project_opts.join(" "); info!("extra pulumi options added: {}", &opts.bright_purple()); + let runtime_arg = match runtime { + PulumiProjectRuntime::Go => "go", + PulumiProjectRuntime::Typescript => "ts", + }; run_cmd( vec![ "bash", "-c", &format!( - r#"pulumi new go --dir {0} -d "pulumi project for {1}" --name "{1}" --stack mysten/dev --yes {2}"#, + r#"pulumi new {runtime_arg} --dir {0} -d "pulumi project for {1}" --name "{1}" --stack mysten/dev --yes {2}"#, project_dir_str, project_name, opts ), ], @@ -171,15 +185,17 @@ fn run_pulumi_new_from_template( project_dir_str: &str, project_type: ProjectType, project_opts: &[String], + runtime: &PulumiProjectRuntime, ) -> Result<()> { info!( "creating new pulumi project in {}", project_dir_str.bright_purple() ); - let template_dir = match project_type { - ProjectType::App | ProjectType::Service => "app-go", - ProjectType::CronJob => "cronjob-go", - _ => "app-go", + let template_dir = match (project_type, runtime) { + (ProjectType::App | ProjectType::Service, PulumiProjectRuntime::Go) => "app-go", + (ProjectType::CronJob, PulumiProjectRuntime::Go) => "cronjob-go", + (ProjectType::App | ProjectType::Service, PulumiProjectRuntime::Typescript) => "app-ts", + _ => panic!("unsupported runtime for this project type"), }; let opts = project_opts.join(" "); info!("extra pulumi options added: {}", &opts.bright_purple()); @@ -288,6 +304,7 @@ fn create_basic_project( project_name: &str, project_dir: &PathBuf, project_opts: &[String], + runtime: &PulumiProjectRuntime, ) -> Result<()> { let project_dir_str = project_dir.to_str().expect("project dir to str"); info!( @@ -296,13 +313,16 @@ fn create_basic_project( ); fs::create_dir_all(project_dir).context("failed to create project directory")?; // initialize pulumi project - run_pulumi_new(project_name, project_dir_str, project_opts).inspect_err(|_| { + run_pulumi_new(project_name, project_dir_str, project_opts, runtime).inspect_err(|_| { remove_project_dir(project_dir).unwrap(); let backend = get_current_backend().unwrap(); remove_stack(&backend, project_name, "mysten/dev").unwrap(); })?; // run go mod tidy to make sure all dependencies are installed - run_go_mod_tidy(project_dir_str)?; + if runtime == &PulumiProjectRuntime::Go { + debug!("running go mod tidy"); + run_go_mod_tidy(project_dir_str)?; + } // set pulumi env set_pulumi_env(project_dir_str)?; // try a pulumi preview to make sure it's good @@ -314,6 +334,7 @@ fn create_mysten_k8s_project( project_dir: &PathBuf, project_type: ProjectType, project_opts: &[String], + runtime: &PulumiProjectRuntime, ) -> Result<()> { let project_dir_str = project_dir.to_str().expect("project dir to str"); info!( @@ -322,14 +343,23 @@ fn create_mysten_k8s_project( ); fs::create_dir_all(project_dir).context("failed to create project directory")?; // initialize pulumi project - run_pulumi_new_from_template(project_name, project_dir_str, project_type, project_opts) - .inspect_err(|_| { - remove_project_dir(project_dir).unwrap(); - let backend = get_current_backend().unwrap(); - remove_stack(&backend, project_name, "mysten/dev").unwrap(); - })?; + run_pulumi_new_from_template( + project_name, + project_dir_str, + project_type, + project_opts, + runtime, + ) + .inspect_err(|_| { + remove_project_dir(project_dir).unwrap(); + let backend = get_current_backend().unwrap(); + remove_stack(&backend, project_name, "mysten/dev").unwrap(); + })?; // run go mod tidy to make sure all dependencies are installed - run_go_mod_tidy(project_dir_str)?; + if runtime == &PulumiProjectRuntime::Go { + debug!("running go mod tidy"); + run_go_mod_tidy(project_dir_str)?; + } // we don't run preview for templated apps because the user // has to give the repo dir (improvements to this coming soon) diff --git a/crates/suiop-cli/src/cli/pulumi/mod.rs b/crates/suiop-cli/src/cli/pulumi/mod.rs index 68b0d6145e999..2d9df6d16f078 100644 --- a/crates/suiop-cli/src/cli/pulumi/mod.rs +++ b/crates/suiop-cli/src/cli/pulumi/mod.rs @@ -5,11 +5,21 @@ mod init; mod setup; use anyhow::Result; +use clap::arg; use clap::Parser; +use clap::ValueEnum; use init::ProjectType; use setup::ensure_gcloud; use setup::ensure_pulumi_setup; +#[derive(ValueEnum, PartialEq, Clone, Debug)] +pub enum PulumiProjectRuntime { + #[clap(alias = "golang")] + Go, + #[clap(alias = "ts")] + Typescript, +} + #[derive(Parser, Debug, Clone)] pub struct PulumiArgs { #[command(subcommand)] @@ -32,6 +42,10 @@ pub enum PulumiAction { /// the name of the project to be created #[arg(long, aliases = ["name"])] project_name: Option, + + /// the runtime to use for the project + #[arg(long, default_value = "go")] + runtime: PulumiProjectRuntime, }, } @@ -42,11 +56,12 @@ pub fn pulumi_cmd(args: &PulumiArgs) -> Result<()> { project_type, kms, project_name, + runtime, } => { if *kms { ensure_gcloud()?; } - project_type.create_project(kms, project_name.clone()) + project_type.create_project(kms, project_name.clone(), runtime) } } } diff --git a/crates/suiop-cli/src/cli/slack/mod.rs b/crates/suiop-cli/src/cli/slack/mod.rs index 95d77d601ee53..b2ab0ae5d2f6d 100644 --- a/crates/suiop-cli/src/cli/slack/mod.rs +++ b/crates/suiop-cli/src/cli/slack/mod.rs @@ -32,7 +32,12 @@ fn get_serialize_filepath(subname: &str) -> PathBuf { /// Serialize the obj into ~/.suiop/{subname} so we can cache it across /// executions pub fn serialize_to_file(subname: &str, obj: &Vec) -> Result<()> { - let file = File::create(get_serialize_filepath(subname).as_path())?; + let filepath = get_serialize_filepath(subname); + // Ensure the parent directory exists + if let Some(parent) = filepath.parent() { + std::fs::create_dir_all(parent)?; + } + let file = File::create(filepath.as_path())?; serde_json::to_writer(file, obj)?; Ok(()) } @@ -70,6 +75,7 @@ impl Slack { let token = std::env::var("SLACK_BOT_TOKEN").expect( "Please set SLACK_BOT_TOKEN env var ('slack bot token (incidentbot)' in 1password)", ); + debug!("using slack token {}", token); let mut headers = header::HeaderMap::new(); headers.insert( header::AUTHORIZATION, diff --git a/crates/suiop-cli/src/main.rs b/crates/suiop-cli/src/main.rs index 3d3c7c99c83ea..f80de84e677a7 100644 --- a/crates/suiop-cli/src/main.rs +++ b/crates/suiop-cli/src/main.rs @@ -5,12 +5,12 @@ use anyhow::Result; use clap::Parser; use suioplib::{ cli::{ - ci_cmd, docker_cmd, iam_cmd, incidents_cmd, pulumi_cmd, service_cmd, CIArgs, DockerArgs, - IAMArgs, IncidentsArgs, PulumiArgs, ServiceArgs, + ci_cmd, docker_cmd, iam_cmd, incidents_cmd, load_environment_cmd, pulumi_cmd, service_cmd, + CIArgs, DockerArgs, IAMArgs, IncidentsArgs, LoadEnvironmentArgs, PulumiArgs, ServiceArgs, }, DEBUG_MODE, }; -use tracing::info; +use tracing::{debug, info, warn}; use tracing_subscriber::{ filter::{EnvFilter, LevelFilter}, FmtSubscriber, @@ -38,6 +38,8 @@ pub(crate) enum Resource { Service(ServiceArgs), #[clap()] CI(CIArgs), + #[clap(name="load-env", aliases = ["e", "env"])] + LoadEnvironment(LoadEnvironmentArgs), } #[tokio::main(flavor = "current_thread")] @@ -52,6 +54,24 @@ async fn main() -> Result<()> { tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed"); + // Load environment variables from ~/.suiop/env_vars + debug!("loading environment variables"); + let home_dir = std::env::var("HOME").expect("HOME environment variable not set"); + let env_file_path = std::path::Path::new(&home_dir) + .join(".suiop") + .join("env_vars"); + + if let Ok(env_contents) = std::fs::read_to_string(env_file_path) { + for line in env_contents.lines() { + if let Some((key, value)) = line.split_once('=') { + debug!("setting environment variable {}={}", key, value); + std::env::set_var(key.trim(), value.trim()); + } + } + } else { + warn!("Warning: Could not read ~/.suiop/env_vars file. Environment variables not loaded."); + } + if *DEBUG_MODE { info!("Debug mode enabled"); } @@ -76,6 +96,9 @@ async fn main() -> Result<()> { Resource::CI(args) => { ci_cmd(&args).await?; } + Resource::LoadEnvironment(args) => { + load_environment_cmd(&args)?; + } } Ok(()) diff --git a/crates/test-cluster/Cargo.toml b/crates/test-cluster/Cargo.toml index 58ca4d37edd93..70bb3d0be5023 100644 --- a/crates/test-cluster/Cargo.toml +++ b/crates/test-cluster/Cargo.toml @@ -17,11 +17,14 @@ futures.workspace = true tracing.workspace = true jsonrpsee.workspace = true tokio = { workspace = true, features = ["full", "tracing", "test-util"] } +tokio-util.workspace = true rand.workspace = true +tempfile.workspace = true sui-config.workspace = true sui-core = { workspace = true, features = ["test-utils"] } sui-framework.workspace = true sui-swarm-config.workspace = true +sui-indexer.workspace = true sui-json-rpc.workspace = true sui-json-rpc-types.workspace = true sui-json-rpc-api.workspace = true diff --git a/crates/test-cluster/src/indexer_util.rs b/crates/test-cluster/src/indexer_util.rs new file mode 100644 index 0000000000000..5d3b8b5605dd7 --- /dev/null +++ b/crates/test-cluster/src/indexer_util.rs @@ -0,0 +1,84 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use jsonrpsee::http_client::{HttpClient, HttpClientBuilder}; +use std::path::PathBuf; +use std::time::Duration; +use sui_config::local_ip_utils::get_available_port; +use sui_indexer::tempdb::TempDb; +use sui_indexer::test_utils::{ + start_indexer_jsonrpc_for_testing, start_indexer_writer_for_testing, +}; +use sui_json_rpc_api::ReadApiClient; +use sui_sdk::{SuiClient, SuiClientBuilder}; +use tempfile::TempDir; +use tokio::time::sleep; + +pub(crate) struct IndexerHandle { + pub(crate) rpc_client: HttpClient, + pub(crate) sui_client: SuiClient, + pub(crate) rpc_url: String, + #[allow(unused)] + cancellation_tokens: Vec, + #[allow(unused)] + data_ingestion_dir: Option, + #[allow(unused)] + database: TempDb, +} + +// TODO: this only starts indexer writer and reader (jsonrpc server) today. +// Consider adding graphql server here as well. +pub(crate) async fn setup_indexer_backed_rpc( + fullnode_rpc_url: String, + temp_data_ingestion_dir: Option, + data_ingestion_path: PathBuf, +) -> IndexerHandle { + let mut cancellation_tokens = vec![]; + let database = TempDb::new().unwrap(); + let pg_address = database.database().url().as_str().to_owned(); + let indexer_jsonrpc_address = format!("127.0.0.1:{}", get_available_port("127.0.0.1")); + + // Start indexer writer + let (_, _, writer_token) = start_indexer_writer_for_testing( + pg_address.clone(), + None, + None, + Some(data_ingestion_path.clone()), + None, + ) + .await; + cancellation_tokens.push(writer_token.drop_guard()); + + // Start indexer jsonrpc service + let (_, reader_token) = start_indexer_jsonrpc_for_testing( + pg_address.clone(), + fullnode_rpc_url, + indexer_jsonrpc_address.clone(), + None, + ) + .await; + cancellation_tokens.push(reader_token.drop_guard()); + + let rpc_address = format!("http://{}", indexer_jsonrpc_address); + + let rpc_client = HttpClientBuilder::default().build(&rpc_address).unwrap(); + + // Wait for the rpc client to be ready + while rpc_client.get_chain_identifier().await.is_err() { + sleep(Duration::from_millis(100)).await; + } + + let sui_client = SuiClientBuilder::default() + .build(&rpc_address) + .await + .unwrap(); + + IndexerHandle { + rpc_client, + sui_client, + rpc_url: rpc_address.clone(), + database, + data_ingestion_dir: temp_data_ingestion_dir, + cancellation_tokens, + } +} diff --git a/crates/test-cluster/src/lib.rs b/crates/test-cluster/src/lib.rs index 05643d8716b01..5cfaf7d2b2ad8 100644 --- a/crates/test-cluster/src/lib.rs +++ b/crates/test-cluster/src/lib.rs @@ -61,6 +61,8 @@ use tokio::time::{timeout, Instant}; use tokio::{task::JoinHandle, time::sleep}; use tracing::{error, info}; +mod test_indexer_handle; + const NUM_VALIDATOR: usize = 4; pub struct FullNodeHandle { @@ -90,23 +92,33 @@ pub struct TestCluster { pub swarm: Swarm, pub wallet: WalletContext, pub fullnode_handle: FullNodeHandle, + indexer_handle: Option, } impl TestCluster { pub fn rpc_client(&self) -> &HttpClient { - &self.fullnode_handle.rpc_client + self.indexer_handle + .as_ref() + .map(|h| &h.rpc_client) + .unwrap_or(&self.fullnode_handle.rpc_client) } pub fn sui_client(&self) -> &SuiClient { - &self.fullnode_handle.sui_client + self.indexer_handle + .as_ref() + .map(|h| &h.sui_client) + .unwrap_or(&self.fullnode_handle.sui_client) } - pub fn quorum_driver_api(&self) -> &QuorumDriverApi { - self.sui_client().quorum_driver_api() + pub fn rpc_url(&self) -> &str { + self.indexer_handle + .as_ref() + .map(|h| h.rpc_url.as_str()) + .unwrap_or(&self.fullnode_handle.rpc_url) } - pub fn rpc_url(&self) -> &str { - &self.fullnode_handle.rpc_url + pub fn quorum_driver_api(&self) -> &QuorumDriverApi { + self.sui_client().quorum_driver_api() } pub fn wallet(&mut self) -> &WalletContext { @@ -829,6 +841,8 @@ pub struct TestClusterBuilder { max_submit_position: Option, submit_delay_step_override_millis: Option, validator_state_accumulator_v2_enabled_config: StateAccumulatorV2EnabledConfig, + + indexer_backed_rpc: bool, } impl TestClusterBuilder { @@ -859,6 +873,7 @@ impl TestClusterBuilder { validator_state_accumulator_v2_enabled_config: StateAccumulatorV2EnabledConfig::Global( true, ), + indexer_backed_rpc: false, } } @@ -1057,6 +1072,11 @@ impl TestClusterBuilder { self } + pub fn with_indexer_backed_rpc(mut self) -> Self { + self.indexer_backed_rpc = true; + self + } + pub async fn build(mut self) -> TestCluster { // All test clusters receive a continuous stream of random JWKs. // If we later use zklogin authenticated transactions in tests we will need to supply @@ -1087,20 +1107,50 @@ impl TestClusterBuilder { })); } + let mut temp_data_ingestion_dir = None; + let mut data_ingestion_path = None; + + if self.indexer_backed_rpc { + if self.data_ingestion_dir.is_none() { + temp_data_ingestion_dir = Some(tempfile::tempdir().unwrap()); + self.data_ingestion_dir = Some( + temp_data_ingestion_dir + .as_ref() + .unwrap() + .path() + .to_path_buf(), + ); + assert!(self.data_ingestion_dir.is_some()); + } + assert!(self.data_ingestion_dir.is_some()); + data_ingestion_path = Some(self.data_ingestion_dir.as_ref().unwrap().to_path_buf()); + } + let swarm = self.start_swarm().await.unwrap(); let working_dir = swarm.dir(); - let mut wallet_conf: SuiClientConfig = - PersistedConfig::read(&working_dir.join(SUI_CLIENT_CONFIG)).unwrap(); - let fullnode = swarm.fullnodes().next().unwrap(); let json_rpc_address = fullnode.config().json_rpc_address; let fullnode_handle = FullNodeHandle::new(fullnode.get_node_handle().unwrap(), json_rpc_address).await; + let (rpc_url, indexer_handle) = if self.indexer_backed_rpc { + let handle = test_indexer_handle::IndexerHandle::new( + fullnode_handle.rpc_url.clone(), + temp_data_ingestion_dir, + data_ingestion_path.unwrap(), + ) + .await; + (handle.rpc_url.clone(), Some(handle)) + } else { + (fullnode_handle.rpc_url.clone(), None) + }; + + let mut wallet_conf: SuiClientConfig = + PersistedConfig::read(&working_dir.join(SUI_CLIENT_CONFIG)).unwrap(); wallet_conf.envs.push(SuiEnv { alias: "localnet".to_string(), - rpc: fullnode_handle.rpc_url.clone(), + rpc: rpc_url, ws: None, basic_auth: None, }); @@ -1118,6 +1168,7 @@ impl TestClusterBuilder { swarm, wallet, fullnode_handle, + indexer_handle, } } diff --git a/crates/test-cluster/src/test_indexer_handle.rs b/crates/test-cluster/src/test_indexer_handle.rs new file mode 100644 index 0000000000000..ec399de40f0d3 --- /dev/null +++ b/crates/test-cluster/src/test_indexer_handle.rs @@ -0,0 +1,88 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use jsonrpsee::http_client::{HttpClient, HttpClientBuilder}; +use std::path::PathBuf; +use std::time::Duration; +use sui_config::local_ip_utils::new_local_tcp_socket_for_testing_string; +use sui_indexer::tempdb::TempDb; +use sui_indexer::test_utils::{ + start_indexer_jsonrpc_for_testing, start_indexer_writer_for_testing, +}; +use sui_json_rpc_api::ReadApiClient; +use sui_sdk::{SuiClient, SuiClientBuilder}; +use tempfile::TempDir; +use tokio::time::sleep; + +pub(crate) struct IndexerHandle { + pub(crate) rpc_client: HttpClient, + pub(crate) sui_client: SuiClient, + pub(crate) rpc_url: String, + #[allow(unused)] + cancellation_tokens: Vec, + #[allow(unused)] + data_ingestion_dir: Option, + #[allow(unused)] + database: TempDb, +} + +impl IndexerHandle { + // TODO: this only starts indexer writer and reader (jsonrpc server) today. + // Consider adding graphql server here as well. + pub async fn new( + fullnode_rpc_url: String, + data_ingestion_dir: Option, + data_ingestion_path: PathBuf, + ) -> IndexerHandle { + let mut cancellation_tokens = vec![]; + let database = TempDb::new().unwrap(); + let pg_address = database.database().url().as_str().to_owned(); + let indexer_jsonrpc_address = new_local_tcp_socket_for_testing_string(); + + // Start indexer writer + let (_, _, writer_token) = start_indexer_writer_for_testing( + pg_address.clone(), + None, + None, + Some(data_ingestion_path.clone()), + None, + None, + None, + ) + .await; + cancellation_tokens.push(writer_token.drop_guard()); + + // Start indexer jsonrpc service + let (_, reader_token) = start_indexer_jsonrpc_for_testing( + pg_address.clone(), + fullnode_rpc_url, + indexer_jsonrpc_address.clone(), + None, + ) + .await; + cancellation_tokens.push(reader_token.drop_guard()); + + let rpc_address = format!("http://{}", indexer_jsonrpc_address); + + let rpc_client = HttpClientBuilder::default().build(&rpc_address).unwrap(); + + // Wait for the rpc client to be ready + while rpc_client.get_chain_identifier().await.is_err() { + sleep(Duration::from_millis(100)).await; + } + + let sui_client = SuiClientBuilder::default() + .build(&rpc_address) + .await + .unwrap(); + + IndexerHandle { + rpc_client, + sui_client, + rpc_url: rpc_address.clone(), + database, + data_ingestion_dir, + cancellation_tokens, + } + } +} diff --git a/crates/x/src/lint.rs b/crates/x/src/lint.rs index 55116070aa5b4..d84c9c3ca581f 100644 --- a/crates/x/src/lint.rs +++ b/crates/x/src/lint.rs @@ -143,7 +143,8 @@ pub fn handle_lint_results_exclude_external_crate_checks( |source: &LintSource, path: &Utf8Path| -> bool { (path.starts_with(EXTERNAL_CRATE_DIR) || path.starts_with(CREATE_DAPP_TEMPLATE_DIR) - || path.to_string().contains("/generated/")) + || path.to_string().contains("/generated/") + || path.to_string().contains("/proto/")) && source.name() == "license-header" }, // ignore check to skip buck related code paths, meta (fb) derived starlark, etc. diff --git a/dapps/multisig-toolkit/package.json b/dapps/multisig-toolkit/package.json index 33363e105e1f3..2bd735f3411c1 100644 --- a/dapps/multisig-toolkit/package.json +++ b/dapps/multisig-toolkit/package.json @@ -21,6 +21,7 @@ "@hookform/resolvers": "^3.9.0", "@mysten/dapp-kit": "workspace:*", "@mysten/sui": "workspace:*", + "@noble/hashes": "^1.4.0", "@radix-ui/react-dialog": "^1.1.1", "@radix-ui/react-label": "^2.1.0", "@radix-ui/react-navigation-menu": "^1.2.0", @@ -43,6 +44,7 @@ "devDependencies": { "@tailwindcss/forms": "^0.5.7", "@tsconfig/docusaurus": "^2.0.3", + "@types/node": "^20.14.10", "@types/react": "^18.3.3", "@types/react-dom": "^18.3.0", "@vitejs/plugin-react": "^4.3.1", diff --git a/dapps/multisig-toolkit/src/routes/offline-signer.tsx b/dapps/multisig-toolkit/src/routes/offline-signer.tsx index 9136e630ab4f7..5430b1b17c297 100644 --- a/dapps/multisig-toolkit/src/routes/offline-signer.tsx +++ b/dapps/multisig-toolkit/src/routes/offline-signer.tsx @@ -3,10 +3,13 @@ import { useCurrentAccount, useSignTransaction, useSuiClientContext } from '@mysten/dapp-kit'; import { getFullnodeUrl, SuiClient } from '@mysten/sui/client'; +import { messageWithIntent } from '@mysten/sui/cryptography'; import { Transaction } from '@mysten/sui/transactions'; +import { fromBase64, toHex } from '@mysten/sui/utils'; +import { blake2b } from '@noble/hashes/blake2b'; import { useMutation } from '@tanstack/react-query'; import { AlertCircle, Terminal } from 'lucide-react'; -import { useEffect, useState } from 'react'; +import { useEffect, useMemo, useState } from 'react'; import { ConnectWallet } from '@/components/connect'; import { DryRunProvider, type Network } from '@/components/preview-effects/DryRunContext'; @@ -70,6 +73,21 @@ export default function OfflineSigner() { }, }); + // Step 3: compute the blake2b hash + const ledgerTransactionHash = useMemo(() => { + if (!bytes) return null; + try { + // Decode the base64-encoded transaction bytes + const decodedBytes = fromBase64(bytes); + const intentMessage = messageWithIntent('TransactionData', decodedBytes); + const intentMessageDigest = blake2b(intentMessage, { dkLen: 32 }); + const intentMessageDigestHex = toHex(intentMessageDigest); + return `0x${intentMessageDigestHex}`; + } catch (error) { + return 'Error computing hash'; + } + }, [bytes]); + return (

@@ -145,6 +163,15 @@ export default function OfflineSigner() { )} + + {ledgerTransactionHash && ( +
+

Ledger Transaction Hash

+
+ {ledgerTransactionHash} +
+
+ )}

diff --git a/docker/sui-graphql-rpc-staging/build.sh b/docker/sui-graphql-rpc-staging/build.sh new file mode 100755 index 0000000000000..84038b6538c15 --- /dev/null +++ b/docker/sui-graphql-rpc-staging/build.sh @@ -0,0 +1,36 @@ +#!/bin/sh +# Copyright (c) Mysten Labs, Inc. +# SPDX-License-Identifier: Apache-2.0 + +# fast fail. +set -e + +DIR="$( cd "$( dirname "$0" )" && pwd )" +REPO_ROOT="$(git rev-parse --show-toplevel)" +DOCKERFILE="$DIR/Dockerfile" +GIT_REVISION="$(git describe --always --abbrev=12 --dirty --exclude '*')" +BUILD_DATE="$(date -u +'%Y-%m-%d')" + +# option to build using debug symbols +if [ "$1" = "--debug-symbols" ]; then + PROFILE="bench" + echo "Building with full debug info enabled ... WARNING: binary size might significantly increase" + shift +else + PROFILE="release" +fi + +echo +echo "Building sui-graphql-rpc docker image" +echo "Dockerfile: \t$DOCKERFILE" +echo "docker context: $REPO_ROOT" +echo "build date: \t$BUILD_DATE" +echo "git revision: \t$GIT_REVISION" +echo + +docker build -f "$DOCKERFILE" "$REPO_ROOT" \ + --build-arg GIT_REVISION="$GIT_REVISION" \ + --build-arg BUILD_DATE="$BUILD_DATE" \ + --build-arg PROFILE="$PROFILE" \ + --features staging \ + "$@" diff --git a/docker/sui-indexer-alt/Dockerfile b/docker/sui-indexer-alt/Dockerfile new file mode 100644 index 0000000000000..088295fa97bc5 --- /dev/null +++ b/docker/sui-indexer-alt/Dockerfile @@ -0,0 +1,37 @@ +# Build application +# +# Copy in all crates, Cargo.toml and Cargo.lock unmodified, +# and build the application. +FROM rust:1.81-bullseye AS builder +ARG PROFILE=release +ARG GIT_REVISION +ENV GIT_REVISION=$GIT_REVISION +WORKDIR "$WORKDIR/sui" + +# sui-indexer need ca-certificates +RUN apt update && apt install -y ca-certificates postgresql + +RUN apt-get update && apt-get install -y cmake clang + +COPY Cargo.toml Cargo.lock ./ +COPY consensus consensus +COPY crates crates +COPY sui-execution sui-execution +COPY narwhal narwhal +COPY external-crates external-crates + +RUN cargo build --profile ${PROFILE} --bin sui-indexer-alt + +# Production Image +FROM debian:bullseye-slim AS runtime +# Use jemalloc as memory allocator +RUN apt-get update && apt-get install -y libjemalloc-dev ca-certificates curl +ENV LD_PRELOAD /usr/lib/x86_64-linux-gnu/libjemalloc.so +WORKDIR "$WORKDIR/sui" +COPY --from=builder /sui/target/release/sui-indexer-alt /usr/local/bin +RUN apt update && apt install -y ca-certificates postgresql + +ARG BUILD_DATE +ARG GIT_REVISION +LABEL build-date=$BUILD_DATE +LABEL git-revision=$GIT_REVISION diff --git a/docker/sui-indexer-alt/build.sh b/docker/sui-indexer-alt/build.sh new file mode 100644 index 0000000000000..f11ac4fa88ef7 --- /dev/null +++ b/docker/sui-indexer-alt/build.sh @@ -0,0 +1,25 @@ +#!/bin/sh +# Copyright (c) Mysten Labs, Inc. +# SPDX-License-Identifier: Apache-2.0 + +# fast fail. +set -e + +DIR="$( cd "$( dirname "$0" )" && pwd )" +REPO_ROOT="$(git rev-parse --show-toplevel)" +DOCKERFILE="$DIR/Dockerfile" +GIT_REVISION="$(git describe --always --abbrev=12 --dirty --exclude '*')" +BUILD_DATE="$(date -u +'%Y-%m-%d')" + +echo +echo "Building sui-indexer-alt docker image" +echo "Dockerfile: \t$DOCKERFILE" +echo "docker context: $REPO_ROOT" +echo "build date: \t$BUILD_DATE" +echo "git revision: \t$GIT_REVISION" +echo + +docker build -f "$DOCKERFILE" "$REPO_ROOT" \ + --build-arg GIT_REVISION="$GIT_REVISION" \ + --build-arg BUILD_DATE="$BUILD_DATE" \ + "$@" \ No newline at end of file diff --git a/docker/sui-mvr-indexer/Dockerfile b/docker/sui-mvr-indexer/Dockerfile new file mode 100644 index 0000000000000..bf0f2be29e2e7 --- /dev/null +++ b/docker/sui-mvr-indexer/Dockerfile @@ -0,0 +1,37 @@ +# Build application +# +# Copy in all crates, Cargo.toml and Cargo.lock unmodified, +# and build the application. +FROM rust:1.81-bullseye AS builder +ARG PROFILE=release +ARG GIT_REVISION +ENV GIT_REVISION=$GIT_REVISION +WORKDIR "$WORKDIR/sui" + +# sui-mvr-indexer needs postgres libpq5 and ca-certificates +RUN apt update && apt install -y libpq5 ca-certificates libpq-dev postgresql + +RUN apt-get update && apt-get install -y cmake clang + +COPY Cargo.toml Cargo.lock ./ +COPY consensus consensus +COPY crates crates +COPY sui-execution sui-execution +COPY narwhal narwhal +COPY external-crates external-crates + +RUN cargo build --profile ${PROFILE} --bin sui-mvr-indexer + +# Production Image +FROM debian:bullseye-slim AS runtime +# Use jemalloc as memory allocator +RUN apt-get update && apt-get install -y libjemalloc-dev ca-certificates curl +ENV LD_PRELOAD /usr/lib/x86_64-linux-gnu/libjemalloc.so +WORKDIR "$WORKDIR/sui" +COPY --from=builder /sui/target/release/sui-mvr-indexer /usr/local/bin +RUN apt update && apt install -y libpq5 ca-certificates libpq-dev postgresql + +ARG BUILD_DATE +ARG GIT_REVISION +LABEL build-date=$BUILD_DATE +LABEL git-revision=$GIT_REVISION diff --git a/docker/sui-mvr-indexer/build.sh b/docker/sui-mvr-indexer/build.sh new file mode 100755 index 0000000000000..5e1c8c1623fe7 --- /dev/null +++ b/docker/sui-mvr-indexer/build.sh @@ -0,0 +1,25 @@ +#!/bin/sh +# Copyright (c) Mysten Labs, Inc. +# SPDX-License-Identifier: Apache-2.0 + +# fast fail. +set -e + +DIR="$( cd "$( dirname "$0" )" && pwd )" +REPO_ROOT="$(git rev-parse --show-toplevel)" +DOCKERFILE="$DIR/Dockerfile" +GIT_REVISION="$(git describe --always --abbrev=12 --dirty --exclude '*')" +BUILD_DATE="$(date -u +'%Y-%m-%d')" + +echo +echo "Building sui-mvr-indexer docker image" +echo "Dockerfile: \t$DOCKERFILE" +echo "docker context: $REPO_ROOT" +echo "build date: \t$BUILD_DATE" +echo "git revision: \t$GIT_REVISION" +echo + +docker build -f "$DOCKERFILE" "$REPO_ROOT" \ + --build-arg GIT_REVISION="$GIT_REVISION" \ + --build-arg BUILD_DATE="$BUILD_DATE" \ + "$@" diff --git a/docs/content/concepts/sui-bridge.mdx b/docs/content/concepts/sui-bridge.mdx new file mode 100644 index 0000000000000..508f716c15dc8 --- /dev/null +++ b/docs/content/concepts/sui-bridge.mdx @@ -0,0 +1,87 @@ +--- +title: Sui Bridge +draft: true +--- + +[Sui Bridge](https://bridge.sui.io/) is the native bridge for the Sui network. Unlike third-party solutions, a native bridge is typically built into the core architecture of a blockchain, ensuring optimal integration and operation. The bridge allows users to move digital assets from one chain to another, preserving security and maintaining interoperability between diverse ecosystems. Sui Bridge is the native bridge on Sui, designed to enhance interoperability between Sui and Ethereum. Sui Bridge enables users to securely and efficiently transfer native and wrapped ETH to and from Sui. The bridge leverages the unique capabilities of the Sui network to offer fast transaction speeds, lower transaction costs, and a decentralized architecture. + +You can bridge tokens in the official Sui Bridge website: https://bridge.sui.io/. + +## Operation and governance + +Sui Bridge is operated and governed by Sui network validators, the same set of validators that secure the Sui network. Bridge transfers and other actions require validator signatures with a minimal threshold of voting power. + +Similar to the Sui network, all governance related to the Sui Bridge is done via validator voting. + +## Architecture + +WIP + +## Supported chains and tokens + +Sui Bridge supports token bridging between Sui and Ethereum Mainnet with the following supported assets: + +| Asset | Description | +| --- | --- | +| Ethereum (ETH) | The native cryptocurrency of the Ethereum network, widely used for transactions and smart contract interactions. | +| Wrapped Ethereum (WETH) | Tokenized representation of native ETH. | + +You can transfer these assets both to and from the Sui network, utilizing the bridge’s capabilities to provide fast, secure, and efficient cross-chain transactions. + +## Package IDs and contract addresses + +The following package IDs and addresses are reserved for the Sui Bridge. + +| Asset | Address/ID | +| --- | --- | +| Sui Bridge Package on Sui | [`0xb`](https://suiscan.xyz/mainnet/object/0x000000000000000000000000000000000000000000000000000000000000000b/txs) | +| Sui Bridge Object on Sui | [`0x9`](https://suiscan.xyz/mainnet/object/0x0000000000000000000000000000000000000000000000000000000000000009) | +| Sui Bridge Proxy on Etheruem Mainnet | [`0xda3bD1fE1973470312db04551B65f401Bc8a92fD`](https://etherscan.io/address/0xda3bd1fe1973470312db04551b65f401bc8a92fd) | +| ETH on Sui | [`0xd0e89b2af5e4910726fbcd8b8dd37bb79b29e5f83f7491bca830e94f7f226d29::eth::ETH`](https://suiscan.xyz/mainnet/coin/0xd0e89b2af5e4910726fbcd8b8dd37bb79b29e5f83f7491bca830e94f7f226d29::eth::ETH/txs) | +| ETH on Ethereum | Native Ether | +| WETH on Ethereum | [`0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2`](https://etherscan.io/address/0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2) | + +## Source code + +The source code for Sui Bridge is open-source and found in the following locations: + +- Move: https://github.com/MystenLabs/sui/tree/main/crates/sui-framework/packages/bridge +- Solidity: https://github.com/MystenLabs/sui/tree/main/bridge/evm +- Bridged ETH (Move): https://github.com/MystenLabs/sui/tree/main/bridge/move/tokens/eth +- Bridge Node: https://github.com/MystenLabs/sui/tree/main/crates/sui-bridge +- Bridge Indexer: https://github.com/MystenLabs/sui/tree/main/crates/sui-bridge-indexer + +## Audits + +There are two audit reports available for Sui Bridge: +- [OtterSec](https://github.com/sui-foundation/security-audits/blob/main/docs/Sui_bridge_v1_OtterSec.pdf) +- [Zellic](https://github.com/sui-foundation/security-audits/blob/main/docs/Sui_Bridge_v1_Zellic.pdf) + +## Global limiter {#global-limiter} + +A limiter protects the user's funds by constraining the total value of assets leaving Sui Bridge in any 24-hour window. It tracks total value hourly and aggregates values from the previous 24 hours. Therefore, when the limiter cools down, it refreshes every hour. + +The limit applies globally and varies per direction. For example, the amount might be different between Ethereum to Sui and Sui to Ethereum. + +The limit also impacts the maximal amount of single transfer. Simply put, in one bridge transfer, you cannot move assets worth more than the limit. The bridge frontend might apply stricter restrictions to protect user assets. + +The limit per route is governed by the validator committee through voting. + +The global limit is currently $8 million from Ethereum to Sui and $5 million from Sui to Etheruem every 24 hours. + +## Asset price + +Sui Bridge v1 uses static pricing to calculate limits. The price for ETH is configured at $2,600.00. Namely, bridging one ETH consumes $2,600 USD in limit calculation. + +The validator committee governs the pricing through voting. It works together with the global limiter to protect user funds. + +## Transfer limit + +There is no minimal limit for transfer but a tiny fraction might be rounded down. Particularly for native Ethereum (ETH) and wrapped Ethereum (WETH) because of reduced precision of eight decimal places, the value of 10.0000000000000001 (W)ETH is rounded down to 10 (W)ETH. + +| Token | Minimal value | +| --- | --- | +| ETH | 0.00000001 ETH (eight decimal places of precision) | +| WETH | 0.00000001 ETH (eight decimal places of precision) | + +The maximum limit per transfer is the global limit in USD value. Namely a user cannot claim assets on the destination chain if the USD value is higher than the global limit. See the [Global limiter section](#global-limiter) for details. \ No newline at end of file diff --git a/docs/content/guides/developer/getting-started/local-network.mdx b/docs/content/guides/developer/getting-started/local-network.mdx index afbf5f6cacaf5..1b2beadf903db 100644 --- a/docs/content/guides/developer/getting-started/local-network.mdx +++ b/docs/content/guides/developer/getting-started/local-network.mdx @@ -21,7 +21,7 @@ This command: * Instructs Rust to set specific logging through the `RUST_LOG`=`off,sui_node=info` flags, which turns off logging for all components except `sui-node`. If you want to see more detailed logs, you can remove `RUST_LOG` from the command. :::info -Each time you start the network by passing `--force-regenesis`, the local network starts from a random genesis with no previous data, and the local network is not persisted. If you'd like to persist data, skip passing the `--force-regenesis` flag. For more details, see the [Persist local network state](#persist-local-network) section. +Each time you start the network by passing `--force-regenesis`, the local network starts from a random genesis with no previous data, and the local network is not persisted. If you'd like to persist data, skip passing the `--force-regenesis` flag. For more details, see the [Persist local network state](#persist-local-network-state) section. ::: To customize your local Sui network, such as starting other services or changing default ports and hosts, include additional flags or options in the `sui start` command. diff --git a/docs/content/references/cli.mdx b/docs/content/references/cli.mdx index 48fe196107026..e55cbeb6abc75 100644 --- a/docs/content/references/cli.mdx +++ b/docs/content/references/cli.mdx @@ -12,12 +12,12 @@ Sui provides a command line interface (CLI) tool to interact with the Sui networ To get the latest version of the CLI, you can run the following command from a terminal or console. Be sure to replace `` with `main`, `devnet`, `testnet`, or `mainnet` to get the desired version. For more information on the branches available, see [Sui Environment Setup](./contribute/sui-environment.mdx). ```shell -cargo install --locked --git https://github.com/MystenLabs/sui.git --branch --features gas-profiler sui +cargo install --locked --git https://github.com/MystenLabs/sui.git --branch --features tracing sui ``` :::info -The `--features gas-profiler` flag is necessary only if you want to run gas profiles for transactions. +The `--features tracing` flag is necessary only if you want to run gas profiles for transactions. ::: diff --git a/docs/content/references/cli/client.mdx b/docs/content/references/cli/client.mdx index 8bedd5c602943..3057f535e8bf2 100644 --- a/docs/content/references/cli/client.mdx +++ b/docs/content/references/cli/client.mdx @@ -473,10 +473,10 @@ and produce a gas profile. Similar to the `replay` command, this command fetches Full node specified in the client environment that are needed to execute the transaction. During the local execution of the transaction, this command records all the Move function invocations and the gas cost breakdown for each invocation. -To enable the profiler, you must either install or build the Sui Client binary locally with the `--features gas-profiler` flag. +To enable the profiler, you must either install or build the Sui Client binary locally with the `--features tracing` flag. ```shell -cargo install --locked --git https://github.com/MystenLabs/sui.git --branch --features gas-profiler sui +cargo install --locked --git https://github.com/MystenLabs/sui.git --branch --features tracing sui ``` The command outputs a profile to the current working directory in the format `gas_profile_{tx_digest}_{unix_timestamp}.json`. diff --git a/docs/content/sidebars/concepts.js b/docs/content/sidebars/concepts.js index 490d4978e25cb..ffe5f546f0e9c 100644 --- a/docs/content/sidebars/concepts.js +++ b/docs/content/sidebars/concepts.js @@ -178,6 +178,7 @@ const concepts = [ 'concepts/tokenomics/gas-in-sui', ], }, + 'concepts/sui-bridge', 'concepts/research-papers', ]; module.exports = concepts; diff --git a/docs/content/standards/deepbook.mdx b/docs/content/standards/deepbook.mdx index e2ae5555ce796..e58df3b5e0100 100644 --- a/docs/content/standards/deepbook.mdx +++ b/docs/content/standards/deepbook.mdx @@ -9,9 +9,7 @@ DeepBook is a next-generation decentralized central limit order book (CLOB) buil ## Versioning -The latest development iteration of DeepBook is version 3 (DeepBookV3). [DeebBookV3](./deepbookv3.mdx) is currently available in both Devnet and Testnet. [DeepBookV2](./deepbookv2.mdx) is the current Mainnet iteration of DeepBook. - -DeepBookV3 is scheduled to replace DeepBookV2 on Mainnet in the second half of 2024. +The latest development iteration of DeepBook is version 3 (DeepBookV3). [DeepBookV3](./deepbookv3.mdx) is currently available across all Sui networks. [DeepBookV2](./deepbookv2.mdx) is currently still available to provide time for legacy contracts to migrate to DeepBookV3. @@ -19,10 +17,8 @@ DeepBookV3 is scheduled to replace DeepBookV2 on Mainnet in the second half of 2 DeepBook is open for community development. You can use the [Sui Improvement Proposals](https://github.com/sui-foundation/sips?ref=blog.sui.io) (SIPs) process to suggest changes to make DeepBook better. -- [DeepBookV2 packages](https://github.com/MystenLabs/sui/tree/main/crates/sui-framework/packages/deepbook) (part of Sui framework) -- [DeepBookV3 repository on GitHub](https://github.com/MystenLabs/deepbookv3) - - ## Related links +- [DeepBookV3 repository on GitHub](https://github.com/MystenLabs/deepbookv3) - DeepBook framework docs: Autogenerated framework documentation for DeepBookV2. + diff --git a/docs/content/standards/deepbookv3/query-the-pool.mdx b/docs/content/standards/deepbookv3/query-the-pool.mdx index 24ad2495c0591..94a831c9a9483 100644 --- a/docs/content/standards/deepbookv3/query-the-pool.mdx +++ b/docs/content/standards/deepbookv3/query-the-pool.mdx @@ -6,7 +6,7 @@ title: Query the Pool The `Pool` shared object represents a market, such as a SUI/USDC market. That `Pool` is the only one representing that unique pairing (SUI/USDC) and the pairing is the only member of that particular `Pool`. See [DeepBook Design](./design.mdx#pool) to learn more about the structure of pools. -To perform trades, you pass a `BalanceManager` and `TradeProof` into the relvant `Pool`. Unlike `Pool`s, `BalanceManager` shared objects can contain any type of token, such that the same `BalanceManager` can access multiple `Pool`s to interact with many different trade pairings. See [BalanceManager](./balance-manager.mdx) to learn more. +To perform trades, you pass a `BalanceManager` and `TradeProof` into the relevant `Pool`. Unlike `Pool`s, `BalanceManager` shared objects can contain any type of token, such that the same `BalanceManager` can access multiple `Pool`s to interact with many different trade pairings. See [BalanceManager](./balance-manager.mdx) to learn more. ## API @@ -32,7 +32,7 @@ public fun get_quote_quantity_out( ): (u64, u64, u64) ``` -### Check quote quantity against quote +### Check base quantity against quote Dry run to determine the base quantity out for a given quote quantity. @@ -195,4 +195,4 @@ Returns the `OrderDeepPrice` struct for the pool, which determines the conversio public fun get_order_deep_price( self: &Pool, ): OrderDeepPrice -``` \ No newline at end of file +``` diff --git a/docs/site/src/components/API/api-ref/compnav.js b/docs/site/src/components/API/api-ref/compnav.js new file mode 100644 index 0000000000000..03009d56a5984 --- /dev/null +++ b/docs/site/src/components/API/api-ref/compnav.js @@ -0,0 +1,29 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +import React from "react"; +import Link from "@docusaurus/Link"; + +const CompNav = (props) => { + const { json, apis } = props; + + return ( +
+
+

Component schemas

+ {Object.keys(json["components"]["schemas"]).map((component) => { + return ( +
+ + {component} + +
+ )})} +
+
+ ); +}; + +export default CompNav; diff --git a/docs/site/src/components/API/api-ref/components.js b/docs/site/src/components/API/api-ref/components.js new file mode 100644 index 0000000000000..c95234a23d486 --- /dev/null +++ b/docs/site/src/components/API/api-ref/components.js @@ -0,0 +1,326 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +import React, { useRef } from "react"; +import Link from "@docusaurus/Link"; +import Markdown from "markdown-to-jsx"; +import { Light as SyntaxHighlighter } from "react-syntax-highlighter"; +import js from "react-syntax-highlighter/dist/esm/languages/hljs/json"; +import docco from "react-syntax-highlighter/dist/esm/styles/hljs/docco"; +import dark from "react-syntax-highlighter/dist/esm/styles/hljs/dracula"; +import ScrollSpy from "react-ui-scrollspy"; + +SyntaxHighlighter.registerLanguage("json", js); + +const pillStyle = + "p-2 border border-solid border-sui-blue-dark rounded-lg max-w-max bg-sui-ghost-white dark:bg-sui-gray-90"; + +const RefLink = (props) => { + const { refer } = props; + const link = refer.substring(refer.lastIndexOf("/") + 1); + return {link}; +}; + +const Of = (props) => { + const { of, type } = props; + return ( + <> + {of.map((o) => { + if (o["$ref"]) { + return ( +
+

+ +

+ {o.description && ( +

+ {o.description} +

+ )} +
+ ); + } else if (o.type && o.type === "object") { + return ( +
+

+ Object +

+ {o.description && ( +

+ {o.description} +

+ )} + {o.properties && ( + + )} +
+ ); + } else if (o.type && o.type === "string") { + return ( +
+

+ String{" "} + {o.enum && o.enum.length > 0 && ( + + enum: [ {o.enum.map((e) => `"${e}"`).join(" | ")} ] + + )} +

+ {o.description && ( +

+ {o.description} +

+ )} +
+ ); + } else if (o.type && o.type === "integer") { + return ( +
+

+ {o.type[0].toUpperCase()} + {o.type.substring(1)}<{o.format}> Minimum: {o.minimum} +

+ {o.description && {o.description}} +
+ ); + } else if (o.type && o.type === "boolean") { + return ( +
+

+ Boolean +

+ {o.description && {o.description}} +
+ ); + } else if (o.type && o.type === "array") { + return ( +
+

+ [ + {o.items && + Object.keys(o.items).map((k) => { + if (k === "$ref") { + return ; + } + })} + ] +

+ {o.description && ( +

+ {o.description} +

+ )} +
+ ); + } else if (o.anyOf) { + return ; + } else if (o.type) { + return

{o.type}

; + } + })} + + ); +}; + +const AllOf = (props) => { + const { allof } = props; + return ( +
+ +
+ ); +}; + +const AnyOf = (props) => { + const { anyof } = props; + return ( +
+

+ Any of +

+
+ +
+
+ ); +}; + +const AnyOfInline = (props) => { + const { anyof, pill } = props; + return ( +
+ {anyof.map((a, i) => { + if (a["$ref"]) { + return ( + <> + + {i % 2 === 0 ? " | " : ""} + + ); + } + if (a.type) { + return ( + <> + {a.type} + {i % 2 === 0 ? " | " : ""} + + ); + } + })} +
+ ); +}; + +const OneOf = (props) => { + const { oneof } = props; + return ( +
+

+ One of +

+
+ +
+
+ ); +}; + +const PropertiesTable = (props) => { + const { properties, schema } = props; + if (!properties) { + return; + } + return ( +
+ + + + + + + + + + {properties.map(([k, v]) => ( + <> + + + + + + + {v.type === "object" ? ( + + + + + ) : ( + "" + )} + + ))} + +
PropertyTypeReq?Description
{k} + {Array.isArray(v.type) ? v.type.join(" | ") : v.type} + {v.enum && + ` enum [ ${v.enum.map((e) => `"${e}"`).join(" | ")} ]`} + {v["$ref"] && } + {v.anyOf && } + {v.allOf && } + {v.oneOf && "ONEOFCELL"} + {v === true && "true"} + + {schema.required && schema.required.includes(k) ? "Yes" : "No"} + {v.description && v.description}
+ {v.additionalProperties && "Additional properties"} + + {v.additionalProperties && v.additionalProperties["$ref"] && ( + + )} + {!v.additionalProperties && v.properties && ( + + )} + {v.additionalProperties && + v.additionalProperties.type && + v.additionalProperties.type} + {v.additionalProperties && v.additionalProperties.anyOf && ( + + )} + {v.additionalProperties && + v.additionalProperties === true && + "true"} +
+ ); +}; + +const Components = (props) => { + const { schemas } = props; + const names = Object.keys(schemas); + const parentScrollContainerRef = () => { + (useRef < React.HTMLDivElement) | (null > null); + }; + return ( +

+

Component schemas

+ + {names && + names.map((name) => { + return ( +
+

{name}

+ + {schemas[name].description && ( +

+ {schemas[name].description} +

+ )} + {schemas[name].type && ( +

+ {schemas[name].type[0].toUpperCase()} + {schemas[name].type.substring(1)} + {schemas[name].enum && + ` enum [ ${schemas[name].enum.map((e) => `"${e}"`).join(" | ")} ]`} +

+ )} + + {schemas[name].properties && ( + + )} + {schemas[name].allOf && } + {schemas[name].oneOf && } + {schemas[name].anyOf && } + {schemas[name]["$ref"] && ( + + )} +
+ + Toggle raw JSON + +
+                    {`"${name}":  ${JSON.stringify(schemas[name], null, 2)}`}
+                  
+
+
+ ); + })} +
+
+ ); +}; + +export default Components; diff --git a/docs/site/src/components/API/api-ref/refnav.js b/docs/site/src/components/API/api-ref/refnav.js index d7398e6b35903..003903cf8023f 100644 --- a/docs/site/src/components/API/api-ref/refnav.js +++ b/docs/site/src/components/API/api-ref/refnav.js @@ -9,7 +9,7 @@ const RefNav = (props) => { const { json, apis } = props; return ( -
+
@@ -40,6 +40,7 @@ const RefNav = (props) => { ); })} +
); })} diff --git a/docs/site/src/components/API/api-ref/result.js b/docs/site/src/components/API/api-ref/result.js index 542536775dd5c..45eb1f021d34b 100644 --- a/docs/site/src/components/API/api-ref/result.js +++ b/docs/site/src/components/API/api-ref/result.js @@ -41,7 +41,6 @@ const Property = (props) => { const Result = (props) => { const { json, result } = props; - //console.log(result) const hasRef = typeof result.schema["$ref"] !== "undefined"; let refObj = {}; diff --git a/docs/site/src/components/API/index.js b/docs/site/src/components/API/index.js index ccc5fbd053362..f2fe2f1aa21b7 100644 --- a/docs/site/src/components/API/index.js +++ b/docs/site/src/components/API/index.js @@ -4,7 +4,9 @@ import React, { useState, useEffect } from "react"; import ExecutionEnvironment from "@docusaurus/ExecutionEnvironment"; import RefNav from "./api-ref/refnav"; +import CompNav from "./api-ref/compnav"; import Methods from "./api-ref/method"; +import Components from "./api-ref/components"; import ScrollSpy from "react-ui-scrollspy"; @@ -81,6 +83,7 @@ const Rpc = () => {
+
@@ -89,12 +92,15 @@ const Rpc = () => {

Sui JSON-RPC Reference - Version: {openrpc.info.version}

- +

{openrpc.info.description}

+ + +
-
+
diff --git a/docs/site/src/pages/index.js b/docs/site/src/pages/index.js index 35e4c66203ce0..081e10cfc486c 100644 --- a/docs/site/src/pages/index.js +++ b/docs/site/src/pages/index.js @@ -88,17 +88,6 @@ export default function Home() { Standards - - - Tokenomics - - - Cryptography - - - Standards - - Result<()> { + async fn process_checkpoint(&self, checkpoint: &CheckpointData) -> Result<()> { // custom processing logic println!("Processing Local checkpoint: {}", checkpoint.checkpoint_summary.to_string()); Ok(()) diff --git a/examples/custom-indexer/rust/remote_reader.rs b/examples/custom-indexer/rust/remote_reader.rs index ed91f1523a5de..65cd99f5c32ff 100644 --- a/examples/custom-indexer/rust/remote_reader.rs +++ b/examples/custom-indexer/rust/remote_reader.rs @@ -11,7 +11,7 @@ struct CustomWorker; #[async_trait] impl Worker for CustomWorker { type Result = (); - async fn process_checkpoint(&self, checkpoint: CheckpointData) -> Result<()> { + async fn process_checkpoint(&self, checkpoint: &CheckpointData) -> Result<()> { // custom processing logic // print out the checkpoint number println!("Processing checkpoint: {}", checkpoint.checkpoint_summary.to_string()); diff --git a/external-crates/move/Cargo.lock b/external-crates/move/Cargo.lock index cdb0abbe485ff..107c1c64e07b9 100644 --- a/external-crates/move/Cargo.lock +++ b/external-crates/move/Cargo.lock @@ -1804,7 +1804,6 @@ dependencies = [ "hex", "move-binary-format", "move-core-types", - "num-bigint", "once_cell", "proptest", "serde", diff --git a/external-crates/move/Cargo.toml b/external-crates/move/Cargo.toml index 3231118dec233..d7f9e31040a67 100644 --- a/external-crates/move/Cargo.toml +++ b/external-crates/move/Cargo.toml @@ -76,7 +76,6 @@ memory-stats = "1.0.0" mirai-annotations = "1.10.1" named-lock = "0.2.0" num = "0.4.0" -num-bigint = "0.4.0" num_cpus = "1.13.0" once_cell = "1.7.2" ouroboros = "0.17.2" diff --git a/external-crates/move/crates/move-analyzer/src/analysis/parsing_analysis.rs b/external-crates/move/crates/move-analyzer/src/analysis/parsing_analysis.rs index ff8c342ccf196..c8d68aeb97743 100644 --- a/external-crates/move/crates/move-analyzer/src/analysis/parsing_analysis.rs +++ b/external-crates/move/crates/move-analyzer/src/analysis/parsing_analysis.rs @@ -424,7 +424,11 @@ impl<'a> ParsingAnalysisContext<'a> { self.exp_symbols(e1); self.exp_symbols(e2); } - E::Abort(e) => self.exp_symbols(e), + E::Abort(oe) => { + if let Some(e) = oe.as_ref() { + self.exp_symbols(e) + } + } E::Return(_, oe) => { if let Some(e) = oe.as_ref() { self.exp_symbols(e) diff --git a/external-crates/move/crates/move-analyzer/src/analysis/typing_analysis.rs b/external-crates/move/crates/move-analyzer/src/analysis/typing_analysis.rs index e22321ddf20ac..1e0a02d11266d 100644 --- a/external-crates/move/crates/move-analyzer/src/analysis/typing_analysis.rs +++ b/external-crates/move/crates/move-analyzer/src/analysis/typing_analysis.rs @@ -11,7 +11,7 @@ use crate::{ }; use move_compiler::{ - diagnostics as diag, + diagnostics::warning_filters::WarningFilters, expansion::ast::{self as E, ModuleIdent}, naming::ast as N, parser::ast::{self as P, ConstantName}, @@ -661,7 +661,7 @@ impl TypingAnalysisContext<'_> { impl<'a> TypingVisitorContext for TypingAnalysisContext<'a> { // Nothing to do -- we're not producing errors. - fn add_warning_filter_scope(&mut self, _filter: diag::WarningFilters) {} + fn push_warning_filter_scope(&mut self, _filter: WarningFilters) {} // Nothing to do -- we're not producing errors. fn pop_warning_filter_scope(&mut self) {} @@ -907,7 +907,7 @@ impl<'a> TypingVisitorContext for TypingAnalysisContext<'a> { match &fdef.body.value { T::FunctionBody_::Defined(seq) => { - self.visit_seq(seq); + self.visit_seq(fdef.body.loc, seq); } T::FunctionBody_::Macro | T::FunctionBody_::Native => (), } @@ -985,7 +985,7 @@ impl<'a> TypingVisitorContext for TypingAnalysisContext<'a> { } } - fn visit_seq(&mut self, (use_funs, seq): &T::Sequence) { + fn visit_seq(&mut self, _loc: Loc, (use_funs, seq): &T::Sequence) { let old_traverse_mode = self.traverse_only; // start adding new use-defs etc. when processing arguments if use_funs.color == 0 { diff --git a/external-crates/move/crates/move-analyzer/src/analyzer.rs b/external-crates/move/crates/move-analyzer/src/analyzer.rs index bb8c067e97b6c..62f406e9215c5 100644 --- a/external-crates/move/crates/move-analyzer/src/analyzer.rs +++ b/external-crates/move/crates/move-analyzer/src/analyzer.rs @@ -45,9 +45,10 @@ pub fn run() { let (connection, io_threads) = Connection::stdio(); let symbols_map = Arc::new(Mutex::new(BTreeMap::new())); - let pkg_deps = Arc::new(Mutex::new( - BTreeMap::::new(), - )); + let pkg_deps = Arc::new(Mutex::new(BTreeMap::< + PathBuf, + symbols::PrecomputedPkgDepsInfo, + >::new())); let ide_files_root: VfsPath = MemoryFS::new().into(); let (id, client_response) = connection @@ -147,7 +148,8 @@ pub fn run() { // main reason for this is to enable unit tests that rely on the symbolication information // to be available right after the client is initialized. if let Some(uri) = initialize_params.root_uri { - if let Some(p) = symbols::SymbolicatorRunner::root_dir(&uri.to_file_path().unwrap()) { + let build_path = uri.to_file_path().unwrap(); + if let Some(p) = symbols::SymbolicatorRunner::root_dir(&build_path) { if let Ok((Some(new_symbols), _)) = symbols::get_symbols( Arc::new(Mutex::new(BTreeMap::new())), ide_files_root.clone(), @@ -277,7 +279,7 @@ fn on_request( context: &Context, request: &Request, ide_files_root: VfsPath, - pkg_dependencies: Arc>>, + pkg_dependencies: Arc>>, shutdown_request_received: bool, ) -> bool { if shutdown_request_received { diff --git a/external-crates/move/crates/move-analyzer/src/completions/mod.rs b/external-crates/move/crates/move-analyzer/src/completions/mod.rs index 362fca386d22b..470c8e0c6923b 100644 --- a/external-crates/move/crates/move-analyzer/src/completions/mod.rs +++ b/external-crates/move/crates/move-analyzer/src/completions/mod.rs @@ -10,7 +10,7 @@ use crate::{ utils::{completion_item, PRIMITIVE_TYPE_COMPLETIONS}, }, context::Context, - symbols::{self, CursorContext, PrecompiledPkgDeps, SymbolicatorRunner, Symbols}, + symbols::{self, CursorContext, PrecomputedPkgDepsInfo, SymbolicatorRunner, Symbols}, }; use lsp_server::Request; use lsp_types::{CompletionItem, CompletionItemKind, CompletionParams, Position}; @@ -78,7 +78,7 @@ pub fn on_completion_request( context: &Context, request: &Request, ide_files_root: VfsPath, - pkg_dependencies: Arc>>, + pkg_dependencies: Arc>>, ) { eprintln!("handling completion request"); let parameters = serde_json::from_value::(request.params.clone()) @@ -119,7 +119,7 @@ pub fn on_completion_request( fn completions( context: &Context, ide_files_root: VfsPath, - pkg_dependencies: Arc>>, + pkg_dependencies: Arc>>, path: &Path, pos: Position, ) -> Option> { @@ -143,7 +143,7 @@ fn completions( pub fn compute_completions( current_symbols: &Symbols, ide_files_root: VfsPath, - pkg_dependencies: Arc>>, + pkg_dependencies: Arc>>, path: &Path, pos: Position, ) -> Vec { @@ -156,7 +156,7 @@ pub fn compute_completions( /// view of the code (returns `None` if the symbols could not be re-computed). fn compute_completions_new_symbols( ide_files_root: VfsPath, - pkg_dependencies: Arc>>, + pkg_dependencies: Arc>>, path: &Path, cursor_position: Position, ) -> Option> { diff --git a/external-crates/move/crates/move-analyzer/src/symbols.rs b/external-crates/move/crates/move-analyzer/src/symbols.rs index 4bec62fce555b..fd9fa72c781dd 100644 --- a/external-crates/move/crates/move-analyzer/src/symbols.rs +++ b/external-crates/move/crates/move-analyzer/src/symbols.rs @@ -78,6 +78,7 @@ use std::{ path::{Path, PathBuf}, sync::{Arc, Condvar, Mutex}, thread, + time::Instant, }; use tempfile::tempdir; use url::Url; @@ -127,27 +128,63 @@ const MANIFEST_FILE_NAME: &str = "Move.toml"; const STD_LIB_PKG_ADDRESS: &str = "0x1"; type SourceFiles = BTreeMap; +/// Information about compiled program (ASTs at different levels) +#[derive(Clone)] +struct CompiledProgram { + parsed: P::Program, + typed: T::Program, +} + +/// Information about cached dependencies used during compilation and analysis +#[derive(Clone)] +struct CachedDeps { + /// Cached fully compiled program representing dependencies + compiled_program: Arc, + /// Cached symbols computation data for dependencies + symbols_data: Option>, +} + /// Information about the compiled package and data structures -/// computed during compilation +/// computed during compilation and analysis #[derive(Clone)] pub struct CompiledPkgInfo { - parsed_program: P::Program, - typed_program: T::Program, - libs: Option>, + /// Package path + path: PathBuf, + /// Manifest hash + manifest_hash: Option, + /// A combined hash for manifest files of the dependencies + deps_hash: String, + /// Information about cached dependencies + cached_deps: Option, + /// Compiled user program + program: CompiledProgram, + /// Source files source_files: SourceFiles, + /// Maped files mapped_files: MappedFiles, + /// Edition of the compiler edition: Option, + /// Compiler info compiler_info: Option, + /// Comments for both user code and the dependencies all_comments: CommentMap, } /// Data used during symbols computation #[derive(Clone)] pub struct SymbolsComputationData { + /// Outermost definitions in a module (structs, consts, functions), keyed on a ModuleIdent + /// string mod_outer_defs: BTreeMap, + /// A UseDefMap for a given module (needs to be appropriately set before the module + /// processing starts) keyed on a ModuleIdent string mod_use_defs: BTreeMap, + /// Uses (references) for a definition at a given location references: BTreeMap>, + /// Additional information about a definitions at a given location def_info: BTreeMap, + /// Module name lengths in access paths for a given module (needs to be appropriately + /// set before the module processing starts) keyed on a ModuleIdent string mod_to_alias_lengths: BTreeMap>, } @@ -163,15 +200,17 @@ impl SymbolsComputationData { } } -/// Information about precompiled package dependencies +/// Precomputed information about package dependencies. #[derive(Clone)] -pub struct PrecompiledPkgDeps { +pub struct PrecomputedPkgDepsInfo { /// Hash of the manifest file for a given package manifest_hash: Option, /// Hash of dependency source files deps_hash: String, /// Precompiled deps deps: Arc, + /// Symbols computation data + deps_symbols_data: Arc, } /// Location of a use's identifier @@ -404,7 +443,8 @@ pub type StructFieldOrderInfo = BTreeMap>; /// Map from enum name to variant name to field order information pub type VariantFieldOrderInfo = BTreeMap>>; -/// Information about field order in structs and enums +/// Information about field order in structs and enums needed for auto-completion +/// to be consistent with field order in the source code #[derive(Debug, Clone, Ord, PartialOrd, PartialEq, Eq)] pub struct FieldOrderInfo { structs: BTreeMap, @@ -1305,7 +1345,7 @@ impl SymbolicatorRunner { pub fn new( ide_files_root: VfsPath, symbols_map: Arc>>, - pkg_deps: Arc>>, + pkg_deps: Arc>>, sender: Sender>>>, lint: LintLevel, ) -> Self { @@ -1643,17 +1683,11 @@ impl UseDefMap { self.0.len() } - pub fn extend_inner(&mut self, use_defs: BTreeMap>) { + pub fn extend(&mut self, use_defs: BTreeMap>) { for (k, v) in use_defs { self.0.entry(k).or_default().extend(v); } } - - pub fn extend(&mut self, use_defs: Self) { - for (k, v) in use_defs.0 { - self.0.entry(k).or_default().extend(v); - } - } } impl Symbols { @@ -1688,7 +1722,7 @@ impl Symbols { fn has_precompiled_deps( pkg_path: &Path, - pkg_dependencies: Arc>>, + pkg_dependencies: Arc>>, ) -> bool { let pkg_deps = pkg_dependencies.lock().unwrap(); pkg_deps.contains_key(pkg_path) @@ -1697,7 +1731,7 @@ fn has_precompiled_deps( /// Builds a package at a given path and, if successful, returns parsed AST /// and typed AST as well as (regardless of success) diagnostics. pub fn get_compiled_pkg( - pkg_dependencies: Arc>>, + pkg_dependencies: Arc>>, ide_files_root: VfsPath, pkg_path: &Path, lint: LintLevel, @@ -1758,7 +1792,7 @@ pub fn get_compiled_pkg( let mut diagnostics = None; let mut dependencies = build_plan.compute_dependencies(); - let compiled_libs = if let Ok(deps_package_paths) = dependencies.make_deps_for_compiler() { + let cached_deps = if let Ok(deps_package_paths) = dependencies.make_deps_for_compiler() { // Partition deps_package according whether src is available let src_deps = deps_package_paths .iter() @@ -1776,16 +1810,19 @@ pub fn get_compiled_pkg( .filter_map(|p| p.name.as_ref().map(|(n, _)| *n)) .collect::>(); - let mut pkg_deps = pkg_dependencies.lock().unwrap(); - let compiled_deps = match pkg_deps.get(pkg_path) { + let pkg_deps = pkg_dependencies.lock().unwrap(); + let pkg_cached_deps = match pkg_deps.get(pkg_path) { Some(d) if manifest_hash.is_some() && manifest_hash == d.manifest_hash && deps_hash == d.deps_hash => { - eprintln!("found pre-compiled libs for {:?}", pkg_path); + eprintln!("found cached deps for {:?}", pkg_path); mapped_files.extend_with_duplicates(d.deps.files.clone()); - Some(d.deps.clone()) + Some(CachedDeps { + compiled_program: d.deps.clone(), + symbols_data: Some(d.deps_symbols_data.clone()), + }) } _ => construct_pre_compiled_lib( src_deps, @@ -1798,30 +1835,27 @@ pub fn get_compiled_pkg( .map(|libs| { eprintln!("created pre-compiled libs for {:?}", pkg_path); mapped_files.extend_with_duplicates(libs.files.clone()); - let deps = Arc::new(libs); - pkg_deps.insert( - pkg_path.to_path_buf(), - PrecompiledPkgDeps { - manifest_hash, - deps_hash, - deps: deps.clone(), - }, - ); - deps + CachedDeps { + compiled_program: Arc::new(libs), + symbols_data: None, + } }), }; - if compiled_deps.is_some() { + if pkg_cached_deps.is_some() { // if successful, remove only source deps but keep bytecode deps as they // were not used to construct pre-compiled lib in the first place dependencies.remove_deps(src_names); } - compiled_deps + pkg_cached_deps } else { None }; let mut edition = None; let mut comments = None; + let compiled_libs = cached_deps + .clone() + .map(|deps| deps.compiled_program.clone()); build_plan.compile_with_driver_and_deps(dependencies, &mut std::io::sink(), |compiler| { let compiler = compiler.set_ide_mode(); // extract expansion AST @@ -1841,7 +1875,7 @@ pub fn get_compiled_pkg( eprintln!("compiled to parsed AST"); let (compiler, parsed_program) = compiler.into_ast(); parsed_ast = Some(parsed_program.clone()); - mapped_files.extend_with_duplicates(compiler.compilation_env_ref().mapped_files().clone()); + mapped_files.extend_with_duplicates(compiler.compilation_env().mapped_files().clone()); // extract typed AST let compilation_result = compiler.at_parser(parsed_program).run::(); @@ -1856,17 +1890,17 @@ pub fn get_compiled_pkg( } }; eprintln!("compiled to typed AST"); - let (mut compiler, typed_program) = compiler.into_ast(); + let (compiler, typed_program) = compiler.into_ast(); typed_ast = Some(typed_program.clone()); compiler_info = Some(CompilerInfo::from( - compiler.compilation_env().ide_information.clone(), + compiler.compilation_env().ide_information().clone(), )); edition = Some(compiler.compilation_env().edition(Some(root_pkg_name))); // compile to CFGIR for accurate diags eprintln!("compiling to CFGIR"); let compilation_result = compiler.at_typing(typed_program).run::(); - let mut compiler = match compilation_result { + let compiler = match compilation_result { Ok(v) => v, Err((_pass, diags)) => { let failure = false; @@ -1906,9 +1940,14 @@ pub fn get_compiled_pkg( all_comments.extend(libs.comments.clone()); } let compiled_pkg_info = CompiledPkgInfo { - parsed_program, - typed_program, - libs: compiled_libs, + path: pkg_path.into(), + manifest_hash, + deps_hash, + cached_deps, + program: CompiledProgram { + parsed: parsed_program, + typed: typed_program, + }, source_files, mapped_files, edition, @@ -1921,17 +1960,18 @@ pub fn get_compiled_pkg( /// Preprocess parsed and typed programs prior to actual symbols computation. pub fn compute_symbols_pre_process( computation_data: &mut SymbolsComputationData, - compiled_pkg_info: &CompiledPkgInfo, + computation_data_deps: &mut SymbolsComputationData, + compiled_pkg_info: &mut CompiledPkgInfo, cursor_info: Option<(&PathBuf, Position)>, ) -> Option { let mut fields_order_info = FieldOrderInfo::new(); - - pre_process_parsed_program(&compiled_pkg_info.parsed_program, &mut fields_order_info); + let parsed_program = &compiled_pkg_info.program.parsed; + let typed_program = &compiled_pkg_info.program.typed; + pre_process_parsed_program(parsed_program, &mut fields_order_info); let mut cursor_context = compute_cursor_context(&compiled_pkg_info.mapped_files, cursor_info); - pre_process_typed_modules( - &compiled_pkg_info.typed_program.modules, + &typed_program.modules, &fields_order_info, &compiled_pkg_info.mapped_files, &mut computation_data.mod_outer_defs, @@ -1943,29 +1983,51 @@ pub fn compute_symbols_pre_process( &compiled_pkg_info.all_comments, ); - if let Some(libs) = compiled_pkg_info.libs.clone() { - pre_process_typed_modules( - &libs.typing.modules, - &fields_order_info, - &compiled_pkg_info.mapped_files, - &mut computation_data.mod_outer_defs, - &mut computation_data.mod_use_defs, - &mut computation_data.references, - &mut computation_data.def_info, - &compiled_pkg_info.edition, - None, // Cursor can never be in a compiled library(?) - &compiled_pkg_info.all_comments, - ); + if let Some(cached_deps) = compiled_pkg_info.cached_deps.clone() { + // we have at least compiled program available + let (deps_mod_outer_defs, deps_def_info) = + if let Some(cached_symbols_data) = cached_deps.symbols_data { + // We have cached results of the dependency symbols computation from the previous run. + ( + cached_symbols_data.mod_outer_defs.clone(), + cached_symbols_data.def_info.clone(), + ) + } else { + // No cached dependency symbols data but we still have cached compilation results. + // Fill out dependency symbols from compiled package info to cache them at the end of analysis + pre_process_typed_modules( + &cached_deps.compiled_program.typing.modules, + &FieldOrderInfo::new(), + &compiled_pkg_info.mapped_files, + &mut computation_data_deps.mod_outer_defs, + &mut computation_data_deps.mod_use_defs, + &mut computation_data_deps.references, + &mut computation_data_deps.def_info, + &compiled_pkg_info.edition, + None, // Cursor can never be in a compiled library(?) + &compiled_pkg_info.all_comments, + ); + ( + computation_data_deps.mod_outer_defs.clone(), + computation_data_deps.def_info.clone(), + ) + }; + // We need to update definitions for the code being currently processed + // so that these definitions are available when ASTs for this code are visited + computation_data.mod_outer_defs.extend(deps_mod_outer_defs); + computation_data.def_info.extend(deps_def_info); } + cursor_context } -/// Process parsed program for symbols computation. -pub fn compute_symbols_parsed_program( +/// Run parsing analysis for either main program or dependencies +fn run_parsing_analysis( computation_data: &mut SymbolsComputationData, compiled_pkg_info: &CompiledPkgInfo, - mut cursor_context: Option, -) -> Option { + cursor_context: Option<&mut CursorContext>, + parsed_program: &P::Program, +) { let mut parsing_symbolicator = parsing_analysis::ParsingAnalysisContext { mod_outer_defs: &mut computation_data.mod_outer_defs, files: &compiled_pkg_info.mapped_files, @@ -1975,66 +2037,156 @@ pub fn compute_symbols_parsed_program( current_mod_ident_str: None, alias_lengths: BTreeMap::new(), pkg_addresses: &NamedAddressMap::new(), - cursor: cursor_context.as_mut(), + cursor: cursor_context, }; parsing_symbolicator.prog_symbols( - &compiled_pkg_info.parsed_program, + parsed_program, &mut computation_data.mod_use_defs, &mut computation_data.mod_to_alias_lengths, ); - if let Some(libs) = compiled_pkg_info.libs.clone() { - parsing_symbolicator.cursor = None; - parsing_symbolicator.prog_symbols( - &libs.parser, - &mut computation_data.mod_use_defs, - &mut computation_data.mod_to_alias_lengths, - ); +} + +/// Process parsed program for symbols computation. +pub fn compute_symbols_parsed_program( + computation_data: &mut SymbolsComputationData, + computation_data_deps: &mut SymbolsComputationData, + compiled_pkg_info: &CompiledPkgInfo, + mut cursor_context: Option, +) -> Option { + run_parsing_analysis( + computation_data, + compiled_pkg_info, + cursor_context.as_mut(), + &compiled_pkg_info.program.parsed, + ); + if let Some(cached_deps) = &compiled_pkg_info.cached_deps { + // run parsing analysis only if cached symbols computation data + // is not available to fill out dependency symbols from compiled package info + // to cache them at the end of analysis + if cached_deps.symbols_data.is_none() { + run_parsing_analysis( + computation_data_deps, + compiled_pkg_info, + None, + &cached_deps.compiled_program.parser, + ); + } } cursor_context } -/// Process typed program for symbols computation. -pub fn compute_symbols_typed_program( +/// Run typing analysis for either main program or dependencies +fn run_typing_analysis( mut computation_data: SymbolsComputationData, - mut compiled_pkg_info: CompiledPkgInfo, - cursor_context: Option, -) -> Symbols { - let mut file_use_defs = BTreeMap::new(); - let mut compiler_info = compiled_pkg_info.compiler_info.unwrap(); + mapped_files: &MappedFiles, + compiler_info: &mut CompilerInfo, + typed_program: &T::Program, +) -> SymbolsComputationData { let mut typing_symbolicator = typing_analysis::TypingAnalysisContext { mod_outer_defs: &mut computation_data.mod_outer_defs, - files: &compiled_pkg_info.mapped_files, + files: mapped_files, references: &mut computation_data.references, def_info: &mut computation_data.def_info, use_defs: UseDefMap::new(), current_mod_ident_str: None, alias_lengths: &BTreeMap::new(), traverse_only: false, - compiler_info: &mut compiler_info, + compiler_info, type_params: BTreeMap::new(), expression_scope: OrdMap::new(), }; process_typed_modules( - &mut compiled_pkg_info.typed_program.modules, - &compiled_pkg_info.source_files, + &typed_program.modules, &computation_data.mod_to_alias_lengths, &mut typing_symbolicator, - &mut file_use_defs, &mut computation_data.mod_use_defs, ); + computation_data +} - if let Some(libs) = compiled_pkg_info.libs { - process_typed_modules( - &mut libs.typing.modules.clone(), - &compiled_pkg_info.source_files, - &computation_data.mod_to_alias_lengths, - &mut typing_symbolicator, - &mut file_use_defs, - &mut computation_data.mod_use_defs, - ); +// Given use-defs for a the main program or dependencies, update the per-file +// use-def map +fn update_file_use_defs( + computation_data: &SymbolsComputationData, + source_files: &SourceFiles, + file_use_defs: &mut FileUseDefs, +) { + for (module_ident_str, use_defs) in &computation_data.mod_use_defs { + // unwrap here is safe as all modules in a given program have the module_defs entry + // in the map + let module_defs = computation_data + .mod_outer_defs + .get(module_ident_str) + .unwrap(); + let fpath = match source_files.get(&module_defs.fhash) { + Some((p, _, _)) => p, + None => return, + }; + + let fpath_buffer = + dunce::canonicalize(fpath.as_str()).unwrap_or_else(|_| PathBuf::from(fpath.as_str())); + + file_use_defs + .entry(fpath_buffer) + .or_default() + .extend(use_defs.clone().elements()); } +} + +/// Process typed program for symbols computation. +pub fn compute_symbols_typed_program( + computation_data: SymbolsComputationData, + computation_data_deps: SymbolsComputationData, + mut compiled_pkg_info: CompiledPkgInfo, + cursor_context: Option, +) -> (Symbols, Option>) { + // run typing analysis for the main user program + let compiler_info = &mut compiled_pkg_info.compiler_info.as_mut().unwrap(); + let mapped_files = &compiled_pkg_info.mapped_files; + let source_files = &compiled_pkg_info.source_files; + let mut computation_data = run_typing_analysis( + computation_data, + mapped_files, + compiler_info, + &compiled_pkg_info.program.typed, + ); + let mut file_use_defs = BTreeMap::new(); + update_file_use_defs(&computation_data, source_files, &mut file_use_defs); + + let cacheable_symbols_data_opt = + if let Some(cached_deps) = compiled_pkg_info.cached_deps.clone() { + // we have at least compiled program available + let deps_symbols_data = if let Some(cached_symbols_data) = cached_deps.symbols_data { + // We have cached results of the dependency symbols computation from the previous run. + cached_symbols_data + } else { + // No cached dependency symbols data but we still have cached compilation results. + // Fill out dependency symbols from compiled package info to cache them at the end of analysis + let computation_data_deps = run_typing_analysis( + computation_data_deps, + mapped_files, + compiler_info, + &cached_deps.compiled_program.typing, + ); + Arc::new(computation_data_deps) + }; + // create `file_use_defs` map and merge references to produce complete symbols data + // (mod_outer_defs and def_info have already been merged to facilitate user program + // analysis) + update_file_use_defs(&deps_symbols_data, source_files, &mut file_use_defs); + for (def_loc, uses) in &deps_symbols_data.references { + computation_data + .references + .entry(*def_loc) + .or_default() + .extend(uses); + } + Some(deps_symbols_data) + } else { + None + }; let mut file_mods: FileModules = BTreeMap::new(); for d in computation_data.mod_outer_defs.into_values() { @@ -2042,36 +2194,77 @@ pub fn compute_symbols_typed_program( file_mods.entry(path.to_path_buf()).or_default().insert(d); } - Symbols { - references: computation_data.references, - file_use_defs, - file_mods, - def_info: computation_data.def_info, - files: compiled_pkg_info.mapped_files, - compiler_info, - cursor_context, - } + ( + Symbols { + references: computation_data.references, + file_use_defs, + file_mods, + def_info: computation_data.def_info, + files: compiled_pkg_info.mapped_files, + compiler_info: compiled_pkg_info.compiler_info.unwrap(), + cursor_context, + }, + cacheable_symbols_data_opt, + ) } /// Compute symbols for a given package from the parsed and typed ASTs, /// as well as other auxiliary data provided in `compiled_pkg_info`. pub fn compute_symbols( - compiled_pkg_info: CompiledPkgInfo, + pkg_dependencies: Arc>>, + mut compiled_pkg_info: CompiledPkgInfo, cursor_info: Option<(&PathBuf, Position)>, ) -> Symbols { + let pkg_path = compiled_pkg_info.path.clone(); + let manifest_hash = compiled_pkg_info.manifest_hash; + let cached_dep_opt = compiled_pkg_info.cached_deps.clone(); + let deps_hash = compiled_pkg_info.deps_hash.clone(); let mut symbols_computation_data = SymbolsComputationData::new(); + let mut symbols_computation_data_deps = SymbolsComputationData::new(); let cursor_context = compute_symbols_pre_process( &mut symbols_computation_data, - &compiled_pkg_info, + &mut symbols_computation_data_deps, + &mut compiled_pkg_info, cursor_info, ); let cursor_context = compute_symbols_parsed_program( &mut symbols_computation_data, + &mut symbols_computation_data_deps, &compiled_pkg_info, cursor_context, ); - compute_symbols_typed_program(symbols_computation_data, compiled_pkg_info, cursor_context) + let (symbols, cacheable_symbols_data_opt) = compute_symbols_typed_program( + symbols_computation_data, + symbols_computation_data_deps, + compiled_pkg_info, + cursor_context, + ); + + let mut pkg_deps = pkg_dependencies.lock().unwrap(); + + if let Some(cached_deps) = cached_dep_opt { + // we have at least compiled program available, either already cached + // or created for the purpose of this analysis + if cached_deps.symbols_data.is_none() { + // if no symbols computation data was cached, it means that + // compiled program was created for the purpose of this analysis + // and we need to cache both + if let Some(deps_symbols_data) = cacheable_symbols_data_opt { + eprintln!("caching pre-compiled program and pre-computed symbols"); + pkg_deps.insert( + pkg_path, + PrecomputedPkgDepsInfo { + manifest_hash, + deps_hash, + deps: cached_deps.compiled_program.clone(), + deps_symbols_data, + }, + ); + } + } + } + symbols } /// Main driver to get symbols for the whole package. Returned symbols is an option as only the @@ -2079,19 +2272,22 @@ pub fn compute_symbols( /// actually (re)computed and the diagnostics are returned, the old symbolic information should /// be retained even if it's getting out-of-date. pub fn get_symbols( - pkg_dependencies: Arc>>, + pkg_dependencies: Arc>>, ide_files_root: VfsPath, pkg_path: &Path, lint: LintLevel, cursor_info: Option<(&PathBuf, Position)>, ) -> Result<(Option, BTreeMap>)> { + let compilation_start = Instant::now(); let (compiled_pkg_info_opt, ide_diagnostics) = - get_compiled_pkg(pkg_dependencies, ide_files_root, pkg_path, lint)?; + get_compiled_pkg(pkg_dependencies.clone(), ide_files_root, pkg_path, lint)?; + eprintln!("compilation complete in: {:?}", compilation_start.elapsed()); let Some(compiled_pkg_info) = compiled_pkg_info_opt else { return Ok((None, ide_diagnostics)); }; - let symbols = compute_symbols(compiled_pkg_info, cursor_info); - + let analysis_start = Instant::now(); + let symbols = compute_symbols(pkg_dependencies, compiled_pkg_info, cursor_info); + eprintln!("analysis complete in {:?}", analysis_start.elapsed()); eprintln!("get_symbols load complete"); Ok((Some(symbols), ide_diagnostics)) @@ -2209,32 +2405,19 @@ fn pre_process_typed_modules( } fn process_typed_modules<'a>( - typed_modules: &mut UniqueMap, - source_files: &SourceFiles, + typed_modules: &UniqueMap, mod_to_alias_lengths: &'a BTreeMap>, typing_symbolicator: &mut typing_analysis::TypingAnalysisContext<'a>, - file_use_defs: &mut FileUseDefs, mod_use_defs: &mut BTreeMap, ) { - for (module_ident, module_def) in typed_modules.key_cloned_iter_mut() { + for (module_ident, module_def) in typed_modules.key_cloned_iter() { let mod_ident_str = expansion_mod_ident_to_map_key(&module_ident.value); typing_symbolicator.use_defs = mod_use_defs.remove(&mod_ident_str).unwrap(); typing_symbolicator.alias_lengths = mod_to_alias_lengths.get(&mod_ident_str).unwrap(); typing_symbolicator.visit_module(module_ident, module_def); - let fpath = match source_files.get(&module_ident.loc.file_hash()) { - Some((p, _, _)) => p, - None => continue, - }; - - let fpath_buffer = - dunce::canonicalize(fpath.as_str()).unwrap_or_else(|_| PathBuf::from(fpath.as_str())); - let use_defs = std::mem::replace(&mut typing_symbolicator.use_defs, UseDefMap::new()); - file_use_defs - .entry(fpath_buffer) - .or_default() - .extend_inner(use_defs.elements()); + mod_use_defs.insert(mod_ident_str, use_defs); } } diff --git a/external-crates/move/crates/move-analyzer/tests/ide_testsuite.rs b/external-crates/move/crates/move-analyzer/tests/ide_testsuite.rs index bfb9b0ae2c9c1..1111d7e20f3c6 100644 --- a/external-crates/move/crates/move-analyzer/tests/ide_testsuite.rs +++ b/external-crates/move/crates/move-analyzer/tests/ide_testsuite.rs @@ -178,7 +178,7 @@ impl CompletionTest { fn test( &self, test_idx: usize, - compiled_pkg_info: CompiledPkgInfo, + mut compiled_pkg_info: CompiledPkgInfo, symbols: &mut Symbols, output: &mut dyn std::io::Write, use_file_path: &Path, @@ -195,15 +195,18 @@ impl CompletionTest { let cursor_path = use_file_path.to_path_buf(); let cursor_info = Some((&cursor_path, use_pos)); let mut symbols_computation_data = SymbolsComputationData::new(); + let mut symbols_computation_data_deps = SymbolsComputationData::new(); // we only compute cursor context and tag it on the existing symbols to avoid spending time // recomputing all symbols (saves quite a bit of time when running the test suite) let mut cursor_context = compute_symbols_pre_process( &mut symbols_computation_data, - &compiled_pkg_info, + &mut symbols_computation_data_deps, + &mut compiled_pkg_info, cursor_info, ); cursor_context = compute_symbols_parsed_program( &mut symbols_computation_data, + &mut symbols_computation_data_deps, &compiled_pkg_info, cursor_context, ); @@ -239,7 +242,7 @@ impl CursorTest { fn test( &self, test_ndx: usize, - compiled_pkg_info: CompiledPkgInfo, + mut compiled_pkg_info: CompiledPkgInfo, symbols: &mut Symbols, output: &mut dyn std::io::Write, path: &Path, @@ -257,13 +260,16 @@ impl CursorTest { let cursor_path = path.to_path_buf(); let cursor_info = Some((&cursor_path, Position { line, character })); let mut symbols_computation_data = SymbolsComputationData::new(); + let mut symbols_computation_data_deps = SymbolsComputationData::new(); let mut cursor_context = compute_symbols_pre_process( &mut symbols_computation_data, - &compiled_pkg_info, + &mut symbols_computation_data_deps, + &mut compiled_pkg_info, cursor_info, ); cursor_context = compute_symbols_parsed_program( &mut symbols_computation_data, + &mut symbols_computation_data_deps, &compiled_pkg_info, cursor_context, ); @@ -379,7 +385,11 @@ fn initial_symbols( )?; let compiled_pkg_info = compiled_pkg_info_opt.ok_or("PACKAGE COMPILATION FAILED")?; - let symbols = compute_symbols(compiled_pkg_info.clone(), None); + let symbols = compute_symbols( + Arc::new(Mutex::new(BTreeMap::new())), + compiled_pkg_info.clone(), + None, + ); Ok((project_path, compiled_pkg_info, symbols)) } diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/.mocharc.yaml b/external-crates/move/crates/move-analyzer/trace-adapter/.mocharc.yaml new file mode 100644 index 0000000000000..106233efe8557 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/.mocharc.yaml @@ -0,0 +1,3 @@ +require: './tests/run_spec.js' +spec: + - 'tests/**/*.spec.js' diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/package-lock.json b/external-crates/move/crates/move-analyzer/trace-adapter/package-lock.json index 4f6a65b91fa49..d5d8cc6862162 100644 --- a/external-crates/move/crates/move-analyzer/trace-adapter/package-lock.json +++ b/external-crates/move/crates/move-analyzer/trace-adapter/package-lock.json @@ -15,6 +15,8 @@ "@vscode/debugadapter-testsupport": "^1.56.0", "@vscode/debugprotocol": "1.66.0", "eslint": "^8.57.0", + "line-diff": "^2.1.1", + "mocha": "10.2.0", "toml": "^3.0.0", "typescript": "^5.4.5" } @@ -456,6 +458,15 @@ "url": "https://github.com/sponsors/epoberezkin" } }, + "node_modules/ansi-colors": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", + "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, "node_modules/ansi-regex": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", @@ -480,6 +491,19 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, "node_modules/argparse": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", @@ -501,6 +525,18 @@ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", "dev": true }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/brace-expansion": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", @@ -522,6 +558,12 @@ "node": ">=8" } }, + "node_modules/browser-stdout": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", + "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", + "dev": true + }, "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", @@ -531,6 +573,18 @@ "node": ">=6" } }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/chalk": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", @@ -559,6 +613,44 @@ "node": ">=8" } }, + "node_modules/chokidar": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", + "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ], + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dev": true, + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", @@ -614,12 +706,33 @@ } } }, + "node_modules/decamelize": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", + "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/deep-is": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", "dev": true }, + "node_modules/diff": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.0.0.tgz", + "integrity": "sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==", + "dev": true, + "engines": { + "node": ">=0.3.1" + } + }, "node_modules/dir-glob": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", @@ -644,6 +757,21 @@ "node": ">=6.0.0" } }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, "node_modules/escape-string-regexp": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", @@ -915,6 +1043,15 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "dev": true, + "bin": { + "flat": "cli.js" + } + }, "node_modules/flat-cache": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", @@ -941,6 +1078,50 @@ "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", "dev": true }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/glob": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", + "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/glob-parent": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", @@ -953,6 +1134,28 @@ "node": ">= 6" } }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, "node_modules/globals": { "version": "13.24.0", "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", @@ -1003,6 +1206,15 @@ "node": ">=8" } }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true, + "bin": { + "he": "bin/he" + } + }, "node_modules/ignore": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", @@ -1054,6 +1266,18 @@ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", "dev": true }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", @@ -1063,6 +1287,15 @@ "node": ">=0.10.0" } }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, "node_modules/is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", @@ -1093,6 +1326,27 @@ "node": ">=8" } }, + "node_modules/is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", @@ -1138,6 +1392,12 @@ "json-buffer": "3.0.1" } }, + "node_modules/levdist": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/levdist/-/levdist-1.0.0.tgz", + "integrity": "sha512-YguwC2spb0pqpJM3a5OsBhih/GG2ZHoaSHnmBqhEI7997a36buhqcRTegEjozHxyxByIwLpZHZTVYMThq+Zd3g==", + "dev": true + }, "node_modules/levn": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", @@ -1151,6 +1411,15 @@ "node": ">= 0.8.0" } }, + "node_modules/line-diff": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/line-diff/-/line-diff-2.1.1.tgz", + "integrity": "sha512-vswdynAI5AMPJacOo2o+JJ4caDJbnY2NEqms4MhMW0NJbjh3skP/brpVTAgBxrg55NRZ2Vtw88ef18hnagIpYQ==", + "dev": true, + "dependencies": { + "levdist": "^1.0.0" + } + }, "node_modules/locate-path": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", @@ -1172,6 +1441,22 @@ "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", "dev": true }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/merge2": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", @@ -1209,18 +1494,120 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/mocha": { + "version": "10.2.0", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-10.2.0.tgz", + "integrity": "sha512-IDY7fl/BecMwFHzoqF2sg/SHHANeBoMMXFlS9r0OXKDssYE1M5O43wUY/9BVPeIvfH2zmEbBfseqN9gBQZzXkg==", + "dev": true, + "dependencies": { + "ansi-colors": "4.1.1", + "browser-stdout": "1.3.1", + "chokidar": "3.5.3", + "debug": "4.3.4", + "diff": "5.0.0", + "escape-string-regexp": "4.0.0", + "find-up": "5.0.0", + "glob": "7.2.0", + "he": "1.2.0", + "js-yaml": "4.1.0", + "log-symbols": "4.1.0", + "minimatch": "5.0.1", + "ms": "2.1.3", + "nanoid": "3.3.3", + "serialize-javascript": "6.0.0", + "strip-json-comments": "3.1.1", + "supports-color": "8.1.1", + "workerpool": "6.2.1", + "yargs": "16.2.0", + "yargs-parser": "20.2.4", + "yargs-unparser": "2.0.0" + }, + "bin": { + "_mocha": "bin/_mocha", + "mocha": "bin/mocha.js" + }, + "engines": { + "node": ">= 14.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/mochajs" + } + }, + "node_modules/mocha/node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/mocha/node_modules/debug/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "node_modules/mocha/node_modules/minimatch": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.0.1.tgz", + "integrity": "sha512-nLDxIFRyhDblz3qMuq+SoRZED4+miJ/G+tdDrjkkkRnjAsBexeGpgjLEQ0blJy7rHhR2b93rhQY4SvyWu9v03g==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/mocha/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true + }, "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", "dev": true }, + "node_modules/nanoid": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.3.tgz", + "integrity": "sha512-p1sjXuopFs0xg+fPASzQ28agW1oHD7xDsd9Xkf3T15H3c/cifrFHVwrh74PdoklAPi+i7MdRsE47vm2r6JoB+w==", + "dev": true, + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, "node_modules/natural-compare": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", "dev": true }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", @@ -1375,6 +1762,36 @@ } ] }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/resolve-from": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", @@ -1476,6 +1893,26 @@ "queue-microtask": "^1.2.2" } }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, "node_modules/semver": { "version": "7.6.3", "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", @@ -1488,6 +1925,15 @@ "node": ">=10" } }, + "node_modules/serialize-javascript": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", + "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", + "dev": true, + "dependencies": { + "randombytes": "^2.1.0" + } + }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", @@ -1518,6 +1964,20 @@ "node": ">=8" } }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/strip-ansi": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", @@ -1542,6 +2002,21 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, "node_modules/text-table": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", @@ -1654,12 +2129,86 @@ "node": ">=0.10.0" } }, + "node_modules/workerpool": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.2.1.tgz", + "integrity": "sha512-ILEIE97kDZvF9Wb9f6h5aXK4swSlKGUcOEGiIYb2OOu/IrDU9iwj0fD//SsA6E5ibwJxpEvhullJY4Sl4GcpAw==", + "dev": true + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", "dev": true }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dev": true, + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-parser": { + "version": "20.2.4", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.4.tgz", + "integrity": "sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-unparser": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", + "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", + "dev": true, + "dependencies": { + "camelcase": "^6.0.0", + "decamelize": "^4.0.0", + "flat": "^5.0.2", + "is-plain-obj": "^2.1.0" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/yocto-queue": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/package.json b/external-crates/move/crates/move-analyzer/trace-adapter/package.json index d61e8914330fc..f80b31d4e6793 100644 --- a/external-crates/move/crates/move-analyzer/trace-adapter/package.json +++ b/external-crates/move/crates/move-analyzer/trace-adapter/package.json @@ -6,17 +6,20 @@ "main": "./out/server.js", "scripts": { "compile": "tsc -p ./", - "lint": "eslint src --ext ts" + "lint": "eslint src --ext ts", + "test": "npm run compile && mocha --config ./.mocharc.yaml" }, "devDependencies": { "@types/node": "20.x", "@typescript-eslint/eslint-plugin": "^7.14.1", "@typescript-eslint/parser": "^7.11.0", "eslint": "^8.57.0", + "line-diff": "^2.1.1", + "mocha": "10.2.0", "typescript": "^5.4.5", "@vscode/debugadapter": "^1.56.0", "@vscode/debugadapter-testsupport": "^1.56.0", "@vscode/debugprotocol": "1.66.0", "toml": "^3.0.0" } -} \ No newline at end of file +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/src/adapter.ts b/external-crates/move/crates/move-analyzer/trace-adapter/src/adapter.ts index 311155f5e2874..38f5550f1875d 100644 --- a/external-crates/move/crates/move-analyzer/trace-adapter/src/adapter.ts +++ b/external-crates/move/crates/move-analyzer/trace-adapter/src/adapter.ts @@ -1,4 +1,5 @@ import { + Breakpoint, Handles, Logger, logger, @@ -18,9 +19,10 @@ import { RuntimeEvents, RuntimeValueType, IRuntimeVariableScope, - CompoundType + CompoundType, + IRuntimeRefValue } from './runtime'; -import { run } from 'node:test'; + const enum LogLevel { Log = 'log', @@ -103,6 +105,9 @@ export class MoveDebugSession extends LoggingDebugSession { this.runtime.on(RuntimeEvents.stopOnStep, () => { this.sendEvent(new StoppedEvent('step', MoveDebugSession.THREAD_ID)); }); + this.runtime.on(RuntimeEvents.stopOnLineBreakpoint, () => { + this.sendEvent(new StoppedEvent('breakpoint', MoveDebugSession.THREAD_ID)); + }); this.runtime.on(RuntimeEvents.end, () => { this.sendEvent(new TerminatedEvent()); }); @@ -117,6 +122,12 @@ export class MoveDebugSession extends LoggingDebugSession { // the adapter implements the configurationDone request response.body.supportsConfigurationDoneRequest = false; + // the adapter supports conditional breakpoints + response.body.supportsConditionalBreakpoints = false; + + // the adapter supports breakpoints that break execution after a specified number of hits + response.body.supportsHitConditionalBreakpoints = false; + // make VS Code use 'evaluate' when hovering over source response.body.supportsEvaluateForHovers = false; @@ -176,6 +187,7 @@ export class MoveDebugSession extends LoggingDebugSession { ): Promise { logger.setup(convertLoggerLogLevel(args.logLevel ?? LogLevel.None), false); logger.log(`Launching trace viewer for file: ${args.source} and trace: ${args.traceInfo}`); + try { await this.runtime.start(args.source, args.traceInfo, args.stopOnEntry || false); } catch (err) { @@ -186,13 +198,6 @@ export class MoveDebugSession extends LoggingDebugSession { this.sendEvent(new StoppedEvent('entry', MoveDebugSession.THREAD_ID)); } - protected configurationDoneRequest( - response: DebugProtocol.ConfigurationDoneResponse, - _args: DebugProtocol.ConfigurationDoneArguments - ): void { - this.sendResponse(response); - } - protected threadsRequest(response: DebugProtocol.ThreadsResponse): void { response.body = { threads: [ @@ -216,7 +221,7 @@ export class MoveDebugSession extends LoggingDebugSession { }).reverse(), totalFrames: stack_height, optimized_lines: stack_height > 0 - ? runtimeStack.frames[stack_height - 1].sourceMap.optimizedLines + ? runtimeStack.frames[stack_height - 1].optimizedLines : [] }; } catch (err) { @@ -229,14 +234,15 @@ export class MoveDebugSession extends LoggingDebugSession { /** * Gets the scopes for a given frame. * - * @param frameId identifier of the frame scopes are requested for. + * @param frameID identifier of the frame scopes are requested for. * @returns an array of scopes. + * @throws Error with a descriptive error message if scopes cannot be retrieved. */ - private getScopes(frameId: number): DebugProtocol.Scope[] { + private getScopes(frameID: number): DebugProtocol.Scope[] { const runtimeStack = this.runtime.stack(); - const frame = runtimeStack.frames.find(frame => frame.id === frameId); + const frame = runtimeStack.frames.find(frame => frame.id === frameID); if (!frame) { - throw new Error(`No frame found for id: ${frameId}`); + throw new Error(`No frame found for id: ${frameID} when getting scopes`); } const scopes: DebugProtocol.Scope[] = []; if (frame.locals.length > 0) { @@ -272,6 +278,48 @@ export class MoveDebugSession extends LoggingDebugSession { this.sendResponse(response); } + /** + * Converts a runtime reference value to a DAP variable. + * + * @param value reference value. + * @param name name of variable containing the reference value. + * @param type optional type of the variable containing the reference value. + * @returns a DAP variable. + * @throws Error with a descriptive error message if conversion fails. + */ + private convertRefValue( + value: IRuntimeRefValue, + name: string, + type?: string + ): DebugProtocol.Variable { + const frameID = value.loc.frameID; + const localIndex = value.loc.localIndex; + const runtimeStack = this.runtime.stack(); + const frame = runtimeStack.frames.find(frame => frame.id === frameID); + if (!frame) { + throw new Error('No frame found for id ' + + frameID + + ' when converting ref value for local index ' + + localIndex); + } + // a local will be in one of the scopes at a position corresponding to its local index + let local = undefined; + for (const scope of frame.locals) { + local = scope[localIndex]; + if (local) { + break; + } + } + if (!local) { + throw new Error('No local found for index ' + + localIndex + + ' when converting ref value for frame id ' + + frameID); + } + + return this.convertRuntimeValue(local.value, name, type); + } + /** * Converts a runtime value to a DAP variable. * @@ -300,21 +348,28 @@ export class MoveDebugSession extends LoggingDebugSession { value: '(' + value.length + ')[...]', variablesReference: compoundValueReference }; - } else { + } else if ('fields' in value) { const compoundValueReference = this.variableHandles.create(value); - const accessChainParts = value.type.split('::'); + // use type if available as it will have information about whether + // it's a reference or not (e.g., `&mut 0x42::mod::SomeStruct`), + // as opposed to the type that come with the value + // (e.g., `0x42::mod::SomeStruct`) + const actualType = type ? type : value.type; + const accessChainParts = actualType.split('::'); const datatypeName = accessChainParts[accessChainParts.length - 1]; return { name, type: value.variantName - ? value.type + '::' + value.variantName - : value.type, + ? actualType + '::' + value.variantName + : actualType, value: (value.variantName ? datatypeName + '::' + value.variantName : datatypeName ) + '{...}', variablesReference: compoundValueReference }; + } else { + return this.convertRefValue(value, name, type); } } @@ -440,6 +495,27 @@ export class MoveDebugSession extends LoggingDebugSession { this.sendResponse(response); } + protected setBreakPointsRequest(response: DebugProtocol.SetBreakpointsResponse, args: DebugProtocol.SetBreakpointsArguments): void { + try { + const finalBreakpoints = []; + if (args.breakpoints && args.source.path) { + const breakpointLines = args.breakpoints.map(bp => bp.line); + const validatedBreakpoints = this.runtime.setLineBreakpoints(args.source.path, breakpointLines); + for (let i = 0; i < breakpointLines.length; i++) { + finalBreakpoints.push(new Breakpoint(validatedBreakpoints[i], breakpointLines[i])); + } + } + response.body = { + breakpoints: finalBreakpoints + }; + } catch (err) { + response.success = false; + response.message = err instanceof Error ? err.message : String(err); + } + this.sendResponse(response); + } + + protected disconnectRequest( response: DebugProtocol.DisconnectResponse, _args: DebugProtocol.DisconnectArguments diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/src/runtime.ts b/external-crates/move/crates/move-analyzer/trace-adapter/src/runtime.ts index 233e561c3fa41..32fbbac44a6a0 100644 --- a/external-crates/move/crates/move-analyzer/trace-adapter/src/runtime.ts +++ b/external-crates/move/crates/move-analyzer/trace-adapter/src/runtime.ts @@ -6,9 +6,14 @@ import * as crypto from 'crypto'; import * as fs from 'fs'; import * as path from 'path'; import toml from 'toml'; -import { ISourceMap, IFileInfo, readAllSourceMaps } from './source_map_utils'; -import { TraceEffectKind, TraceEvent, TraceEventKind, TraceInstructionKind, TraceLocKind, TraceValKind, TraceValue, readTrace } from './trace_utils'; -import { ModuleInfo } from './utils'; +import { IFileInfo, readAllSourceMaps } from './source_map_utils'; +import { + TraceEffectKind, + TraceEvent, + TraceEventKind, + TraceInstructionKind, + readTrace +} from './trace_utils'; /** * Describes the runtime variable scope (e.g., local variables @@ -30,8 +35,27 @@ export type CompoundType = RuntimeValueType[] | IRuntimeCompundValue; * - boolean, number, string (converted to string) * - compound type (vector, struct, enum) */ -export type RuntimeValueType = string | CompoundType; +export type RuntimeValueType = string | CompoundType | IRuntimeRefValue; +/** + * Locaction of a local variable in the runtime. + */ +export interface IRuntimeVariableLoc { + frameID: number; + localIndex: number; +} + +/** + * Value of a reference in the runtime. + */ +export interface IRuntimeRefValue { + mutable: boolean; + loc: IRuntimeVariableLoc +} + +/** + * Information about a runtime compound value (struct/enum). + */ export interface IRuntimeCompundValue { fields: [string, RuntimeValueType][]; type: string; @@ -53,26 +77,44 @@ interface IRuntimeVariable { * during trace viewing session. */ interface IRuntimeStackFrame { - // Source map for the frame. - sourceMap: ISourceMap; - // Frame identifier. + /** + * Frame identifier. + */ id: number; - // Name of the function in this frame. + /** + * Name of the function in this frame. + */ name: string; - // Path to the file containing the function. + /** + * Path to the file containing the function. + */ file: string; - // Current line in the file correponding to currently viewed instruction. + /** + * Current line in the file correponding to currently viewed instruction. + */ line: number; // 1-based - // Local variable types by variable frame index. + /** + * Local variable types by variable frame index. + */ localsTypes: string[]; - // Local variables per scope (local scope at 0 and then following block scopes), - // indexed by variable frame index. + /** + * Local variable names by variable frame index. + */ + localsNames: string[]; + /** + * Local variables per scope (local scope at 0 and then following block scopes), + * indexed by variable frame index. + */ locals: (IRuntimeVariable | undefined)[][]; /** * Line of the last call instruction that was processed in this frame. * It's needed to make sure that step/next into/over call works correctly. */ lastCallInstructionLine: number | undefined; + /** + * Lines that are not present in the source map. + */ + optimizedLines: number[] } /** @@ -87,9 +129,19 @@ export interface IRuntimeStack { * Events emitted by the runtime during trace viewing session. */ export enum RuntimeEvents { - // Stop after step/next action is performed. + /** + * Stop after step/next action is performed. + */ stopOnStep = 'stopOnStep', - // Finish trace viewing session. + + /** + * Stop after a line breakpoint is hit. + */ + stopOnLineBreakpoint = 'stopOnLineBreakpoint', + + /** + * Finish trace viewing session. + */ end = 'end', } @@ -101,7 +153,11 @@ export class Runtime extends EventEmitter { /** * Trace being viewed. */ - private trace = { events: [] as TraceEvent[], localLifetimeEnds: new Map() }; + private trace = { + events: [] as TraceEvent[], + localLifetimeEnds: new Map(), + tracedLines: new Map>() + }; /** * Index of the current trace event being processed. @@ -119,15 +175,16 @@ export class Runtime extends EventEmitter { private filesMap = new Map(); /** - * Map of stringified module info to source maps. + * Map of line breakpoints, keyed on a file path. */ - private sourceMapsMap = new Map(); + private lineBreakpoints = new Map>(); /** * Start a trace viewing session and set up the initial state of the runtime. * * @param source path to the Move source file whose traces are to be viewed. * @param traceInfo trace selected for viewing. + * @throws Error with a descriptive error message if starting runtime has failed. * */ public async start(source: string, traceInfo: string, stopOnEntry: boolean): Promise { @@ -151,11 +208,11 @@ export class Runtime extends EventEmitter { hashToFileMap(path.join(pkgRoot, 'sources'), this.filesMap); // create source maps for all modules in the `build` directory - this.sourceMapsMap = readAllSourceMaps(path.join(pkgRoot, 'build', pkg_name, 'source_maps'), this.filesMap); + const sourceMapsMap = readAllSourceMaps(path.join(pkgRoot, 'build', pkg_name, 'source_maps'), this.filesMap); // reconstruct trace file path from trace info const traceFilePath = path.join(pkgRoot, 'traces', traceInfo.replace(/:/g, '_') + '.json'); - this.trace = readTrace(traceFilePath); + this.trace = readTrace(traceFilePath, sourceMapsMap, this.filesMap); // start trace viewing session with the first trace event this.eventIndex = 0; @@ -169,8 +226,10 @@ export class Runtime extends EventEmitter { this.newStackFrame( currentEvent.id, currentEvent.name, - currentEvent.modInfo, - currentEvent.localsTypes + currentEvent.fileHash, + currentEvent.localsTypes, + currentEvent.localsNames, + currentEvent.optimizedLines ); this.frameStack = { frames: [newFrame] @@ -206,8 +265,10 @@ export class Runtime extends EventEmitter { if (currentEvent.type === TraceEventKind.Instruction) { const stackHeight = this.frameStack.frames.length; if (stackHeight <= 0) { - throw new Error('No frame on the stack when processing Instruction event at PC: ' - + currentEvent.pc); + throw new Error('No frame on the stack when processing Instruction event on line: ' + + currentEvent.loc.line + + ' in column: ' + + currentEvent.loc.column); } const currentFrame = this.frameStack.frames[stackHeight - 1]; // remember last call instruction line before it (potentially) changes @@ -287,13 +348,26 @@ export class Runtime extends EventEmitter { this.sendEvent(RuntimeEvents.stopOnStep); return false; } else if (currentEvent.type === TraceEventKind.OpenFrame) { + // if function is native then the next event will be CloseFrame + if (currentEvent.isNative) { + if (this.trace.events.length <= this.eventIndex + 1 || + this.trace.events[this.eventIndex + 1].type !== TraceEventKind.CloseFrame) { + throw new Error('Expected an CloseFrame event after native OpenFrame event'); + } + // skip over CloseFrame as there is no frame to pop + this.eventIndex++; + return this.step(next, stopAtCloseFrame); + } + // create a new frame and push it onto the stack const newFrame = this.newStackFrame( currentEvent.id, currentEvent.name, - currentEvent.modInfo, - currentEvent.localsTypes + currentEvent.fileHash, + currentEvent.localsTypes, + currentEvent.localsNames, + currentEvent.optimizedLines ); // set values of parameters in the new frame this.frameStack.frames.push(newFrame); @@ -325,16 +399,18 @@ export class Runtime extends EventEmitter { } else if (currentEvent.type === TraceEventKind.Effect) { const effect = currentEvent.effect; if (effect.type === TraceEffectKind.Write) { - const stackHeight = this.frameStack.frames.length; - if (stackHeight <= 0) { - throw new Error('No frame on the stack when processing a write'); - } - const currentFrame = this.frameStack.frames[stackHeight - 1]; - const traceLocation = effect.location; + const traceLocation = effect.loc; const traceValue = effect.value; - if (traceLocation.type === TraceLocKind.Local) { - localWrite(currentFrame, traceLocation.localIndex, traceValue); + const frame = this.frameStack.frames.find( + frame => frame.id === traceLocation.frameID + ); + if (!frame) { + throw new Error('Cannot find frame with ID: ' + + traceLocation.frameID + + ' when processing Write effect for local variable at index: ' + + traceLocation.localIndex); } + localWrite(frame, traceLocation.localIndex, traceValue); } return this.step(next, stopAtCloseFrame); } else { @@ -396,7 +472,54 @@ export class Runtime extends EventEmitter { if (this.step(/* next */ false, /* stopAtCloseFrame */ false)) { return true; } + let currentEvent = this.trace.events[this.eventIndex]; + if (currentEvent.type === TraceEventKind.Instruction) { + const stackHeight = this.frameStack.frames.length; + if (stackHeight <= 0) { + throw new Error('No frame on the stack when processing Instruction event on line: ' + + currentEvent.loc.line + + ' in column: ' + + currentEvent.loc.column); + } + const currentFrame = this.frameStack.frames[stackHeight - 1]; + const breakpoints = this.lineBreakpoints.get(currentFrame.file); + if (!breakpoints) { + continue; + } + if (breakpoints.has(currentEvent.loc.line)) { + this.sendEvent(RuntimeEvents.stopOnLineBreakpoint); + return false; + } + } + } + } + + /** + * Sets line breakpoints for a file (resetting any existing ones). + * + * @param path file path. + * @param lines breakpoints lines. + * @returns array of booleans indicating if a breakpoint was set on a line. + * @throws Error with a descriptive error message if breakpoints cannot be set. + */ + public setLineBreakpoints(path: string, lines: number[]): boolean[] { + const breakpoints = new Set(); + const tracedLines = this.trace.tracedLines.get(path); + // Set all breakpoints to invalid and validate the correct ones in the loop, + // otherwise let them all be invalid if there are no traced lines. + // Valid breakpoints are those that are on lines that have at least + // one instruction in the trace on them. + const validated = lines.map(() => false); + if (tracedLines) { + for (let i = 0; i < lines.length; i++) { + if (tracedLines.has(lines[i])) { + validated[i] = true; + breakpoints.add(lines[i]); + } + } } + this.lineBreakpoints.set(path, breakpoints); + return validated; } /** @@ -411,24 +534,6 @@ export class Runtime extends EventEmitter { currentFrame: IRuntimeStackFrame, instructionEvent: Extract ): [boolean, number] { - const currentFun = currentFrame.sourceMap.functions.get(currentFrame.name); - if (!currentFun) { - throw new Error(`Cannot find function: ${currentFrame.name} in source map`); - } - - // if map does not contain an entry for a PC that can be found in the trace file, - // it means that the position of the last PC in the source map should be used - let currentPCLoc = instructionEvent.pc >= currentFun.pcLocs.length - ? currentFun.pcLocs[currentFun.pcLocs.length - 1] - : currentFun.pcLocs[instructionEvent.pc]; - - if (!currentPCLoc) { - throw new Error('Cannot find location for PC: ' - + instructionEvent.pc - + ' in function: ' - + currentFrame.name); - } - // if current instruction ends lifetime of a local variable, mark this in the // local variable array const frameLocalLifetimeEnds = this.trace.localLifetimeEnds.get(currentFrame.id); @@ -451,18 +556,18 @@ export class Runtime extends EventEmitter { } } } - + const loc = instructionEvent.loc; if (instructionEvent.kind === TraceInstructionKind.CALL || instructionEvent.kind === TraceInstructionKind.CALL_GENERIC) { - currentFrame.lastCallInstructionLine = currentPCLoc.line; + currentFrame.lastCallInstructionLine = loc.line; } - if (currentPCLoc.line === currentFrame.line) { + if (loc.line === currentFrame.line) { // so that instructions on the same line can be bypassed - return [true, currentPCLoc.line]; + return [true, loc.line]; } else { - currentFrame.line = currentPCLoc.line; - return [false, currentPCLoc.line]; + currentFrame.line = loc.line; + return [false, loc.line]; } } @@ -474,41 +579,38 @@ export class Runtime extends EventEmitter { * @param funName function name. * @param modInfo information about module containing the function. * @param localsTypes types of local variables in the frame. + * @param localsNames names of local variables in the frame. + * @param optimizedLines lines that are not present in the source map. * @returns new frame. * @throws Error with a descriptive error message if frame cannot be constructed. */ private newStackFrame( frameID: number, funName: string, - modInfo: ModuleInfo, - localsTypes: string[] + fileHash: string, + localsTypes: string[], + localsNames: string[], + optimizedLines: number[] ): IRuntimeStackFrame { - const sourceMap = this.sourceMapsMap.get(JSON.stringify(modInfo)); - - if (!sourceMap) { - throw new Error('Cannot find source map for module: ' - + modInfo.name - + ' in package: ' - + modInfo.addr); - } - const currentFile = this.filesMap.get(sourceMap.fileHash); + const currentFile = this.filesMap.get(fileHash); if (!currentFile) { - throw new Error(`Cannot find file with hash: ${sourceMap.fileHash}`); + throw new Error(`Cannot find file with hash: ${fileHash}`); } let locals = []; // create first scope for local variables locals[0] = []; const stackFrame: IRuntimeStackFrame = { - sourceMap, id: frameID, name: funName, file: currentFile.path, line: 0, // line will be updated when next event (Instruction) is processed localsTypes, + localsNames, locals, lastCallInstructionLine: undefined, + optimizedLines }; if (this.trace.events.length <= this.eventIndex + 1 || @@ -529,53 +631,181 @@ export class Runtime extends EventEmitter { this.emit(event, ...args); }, 0); } + + // + // Utility functions for testing and debugging. + // + + /** + * Whitespace used for indentation in the string representation of the runtime. + */ + private singleTab = ' '; + + /** + * Returns a string representig the current state of the runtime. + * + * @returns string representation of the runtime. + */ + public toString(): string { + let res = 'current frame stack:\n'; + for (const frame of this.frameStack.frames) { + res += this.singleTab + 'function: ' + frame.name + ' (line ' + frame.line + ')\n'; + for (let i = 0; i < frame.locals.length; i++) { + res += this.singleTab + this.singleTab + 'scope ' + i + ' :\n'; + for (let j = 0; j < frame.locals[i].length; j++) { + const local = frame.locals[i][j]; + if (local) { + res += this.varToString(this.singleTab + + this.singleTab + + this.singleTab, local) + '\n'; + } + } + } + } + if (this.lineBreakpoints && this.lineBreakpoints.size > 0) { + res += 'line breakpoints\n'; + for (const [file, breakpoints] of this.lineBreakpoints) { + res += this.singleTab + path.basename(file) + '\n'; + for (const line of breakpoints) { + res += this.singleTab + this.singleTab + line + '\n'; + } + } + } + return res; + } + /** + * Returns a string representation of a runtime variable. + * + * @param variable runtime variable. + * @returns string representation of the variable. + */ + private varToString(tabs: string, variable: IRuntimeVariable): string { + return this.valueToString(tabs, variable.value, variable.name, variable.type); + } + + /** + * Returns a string representation of a runtime compound value. + * + * @param compoundValue runtime compound value. + * @returns string representation of the compound value. + */ + private compoundValueToString(tabs: string, compoundValue: IRuntimeCompundValue): string { + const type = compoundValue.variantName + ? compoundValue.type + '::' + compoundValue.variantName + : compoundValue.type; + let res = '(' + type + ') {\n'; + for (const [name, value] of compoundValue.fields) { + res += this.valueToString(tabs + this.singleTab, value, name); + } + res += tabs + '}\n'; + return res; + } + + /** + * Returns a string representation of a runtime reference value. + * + * @param refValue runtime reference value. + * @param name name of the variable containing reference value. + * @param type optional type of the variable containing reference value. + * @returns string representation of the reference value. + */ + private refValueToString( + tabs: string, + refValue: IRuntimeRefValue, + name: string, + type?: string + ): string { + let res = ''; + const frame = this.frameStack.frames.find(frame => frame.id === refValue.loc.frameID); + let local = undefined; + if (!frame) { + return res; + } + for (const scope of frame.locals) { + local = scope[refValue.loc.localIndex]; + if (local) { + break; + } + } + if (!local) { + return res; + } + return this.valueToString(tabs, local.value, name, type); + } + + /** + * Returns a string representation of a runtime value. + * + * @param value runtime value. + * @param name name of the variable containing the value. + * @param type optional type of the variable containing the value. + * @returns string representation of the value. + */ + private valueToString( + tabs: string, + value: RuntimeValueType, + name: string, + type?: string + ): string { + let res = ''; + if (typeof value === 'string') { + res += tabs + name + ' : ' + value + '\n'; + if (type) { + res += tabs + 'type: ' + type + '\n'; + } + } else if (Array.isArray(value)) { + res += tabs + name + ' : [\n'; + for (let i = 0; i < value.length; i++) { + res += this.valueToString(tabs + this.singleTab, value[i], String(i)); + } + res += tabs + ']\n'; + if (type) { + res += tabs + 'type: ' + type + '\n'; + } + return res; + } else if ('fields' in value) { + res += tabs + name + ' : ' + this.compoundValueToString(tabs, value); + if (type) { + res += tabs + 'type: ' + type + '\n'; + } + } else { + res += this.refValueToString(tabs, value, name, type); + } + return res; + } } /** - * Handles a write to a local variable in the current frame. + * Handles a write to a local variable in a stack frame. * - * @param currentFrame current frame. + * @param frame stack frame frame. * @param localIndex variable index in the frame. * @param runtimeValue variable value. */ function localWrite( - currentFrame: IRuntimeStackFrame, + frame: IRuntimeStackFrame, localIndex: number, - traceValue: TraceValue + value: RuntimeValueType ): void { - if (traceValue.type !== TraceValKind.Runtime) { - throw new Error('Expected a RuntimeValue when writing local variable at index: ' - + localIndex - + ' in function: ' - + currentFrame.name - + ' but got: ' - + traceValue.type); - } - const type = currentFrame.localsTypes[localIndex]; + const type = frame.localsTypes[localIndex]; if (!type) { throw new Error('Cannot find type for local variable at index: ' + localIndex + ' in function: ' - + currentFrame.name); - } - const value = traceValue.value; - const funEntry = currentFrame.sourceMap.functions.get(currentFrame.name); - if (!funEntry) { - throw new Error('Cannot find function entry in source map for function: ' - + currentFrame.name); + + frame.name); } - const name = funEntry.localsNames[localIndex]; + const name = frame.localsNames[localIndex]; if (!name) { throw new Error('Cannot find local variable at index: ' + localIndex + ' in function: ' - + currentFrame.name); + + frame.name); } - const scopesCount = currentFrame.locals.length; + const scopesCount = frame.locals.length; if (scopesCount <= 0) { throw new Error("There should be at least one variable scope in functon" - + currentFrame.name); + + frame.name); } // If a variable has the same name but a different index (it is shadowed) // it has to be put in a different scope (e.g., locals[1], locals[2], etc.). @@ -583,7 +813,7 @@ function localWrite( // the outermost one let existingVarScope = -1; for (let i = scopesCount - 1; i >= 0; i--) { - const existingVarIndex = currentFrame.locals[i].findIndex(runtimeVar => { + const existingVarIndex = frame.locals[i].findIndex(runtimeVar => { return runtimeVar && runtimeVar.name === name; }); if (existingVarIndex !== -1 && existingVarIndex !== localIndex) { @@ -592,14 +822,14 @@ function localWrite( } } if (existingVarScope >= 0) { - const shadowedScope = currentFrame.locals[existingVarScope + 1]; + const shadowedScope = frame.locals[existingVarScope + 1]; if (!shadowedScope) { - currentFrame.locals.push([]); + frame.locals.push([]); } - currentFrame.locals[existingVarScope + 1][localIndex] = { name, value, type }; + frame.locals[existingVarScope + 1][localIndex] = { name, value, type }; } else { // put variable in the "main" locals scope - currentFrame.locals[0][localIndex] = { name, value, type }; + frame.locals[0][localIndex] = { name, value, type }; } } @@ -680,3 +910,4 @@ function fileHash(fileContents: string): Uint8Array { const hash = crypto.createHash('sha256').update(fileContents).digest(); return new Uint8Array(hash); } + diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/src/source_map_utils.ts b/external-crates/move/crates/move-analyzer/trace-adapter/src/source_map_utils.ts index 23edbd8424175..9fdc0193224dc 100644 --- a/external-crates/move/crates/move-analyzer/trace-adapter/src/source_map_utils.ts +++ b/external-crates/move/crates/move-analyzer/trace-adapter/src/source_map_utils.ts @@ -49,7 +49,7 @@ interface JSONSrcRootObject { /** * Describes a location in the source file. */ -interface ILoc { +export interface ILoc { line: number; column: number; } @@ -88,7 +88,9 @@ export interface ISourceMap { fileHash: string modInfo: ModuleInfo, functions: Map, - // Lines that are not present in the source map. + /** + * Lines that are not present in the source map. + */ optimizedLines: number[] } diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/src/trace_utils.ts b/external-crates/move/crates/move-analyzer/trace-adapter/src/trace_utils.ts index b4f48e9d9b25a..76b7230b86539 100644 --- a/external-crates/move/crates/move-analyzer/trace-adapter/src/trace_utils.ts +++ b/external-crates/move/crates/move-analyzer/trace-adapter/src/trace_utils.ts @@ -3,7 +3,13 @@ import * as fs from 'fs'; import { FRAME_LIFETIME, ModuleInfo } from './utils'; -import { IRuntimeCompundValue, RuntimeValueType } from './runtime'; +import { + IRuntimeCompundValue, + RuntimeValueType, + IRuntimeVariableLoc, + IRuntimeRefValue +} from './runtime'; +import { ISourceMap, ILoc, IFileInfo } from './source_map_utils'; // Data types corresponding to trace file JSON schema. @@ -30,15 +36,20 @@ interface JSONVectorType { type JSONBaseType = string | JSONStructType | JSONVectorType; +enum JSONTraceRefType { + Mut = 'Mut', + Imm = 'Imm' +} + interface JSONTraceType { - ref_type: string | null; type_: JSONBaseType; + ref_type?: JSONTraceRefType } -type JSONTraceValueType = boolean | number | string | JSONTraceValueType[] | JSONTraceCompound; +type JSONTraceRuntimeValueType = boolean | number | string | JSONTraceRuntimeValueType[] | JSONTraceCompound; interface JSONTraceFields { - [key: string]: JSONTraceValueType; + [key: string]: JSONTraceRuntimeValueType; } interface JSONTraceCompound { @@ -48,14 +59,31 @@ interface JSONTraceCompound { variant_tag?: number; } -interface JSONTraceRuntimeValue { - value: JSONTraceValueType; +interface JSONTraceRefValueContent { + location: JSONTraceLocation; + snapshot: JSONTraceRuntimeValueType; } -interface JSONTraceValue { - RuntimeValue: JSONTraceRuntimeValue; +interface JSONTraceMutRefValue { + MutRef: JSONTraceRefValueContent; } +interface JSONTraceImmRefValue { + ImmRef: JSONTraceRefValueContent; +} + +interface JSONTraceRuntimeValueContent { + value: JSONTraceRuntimeValueType; +} + +interface JSONTraceRuntimeValue { + RuntimeValue: JSONTraceRuntimeValueContent; +} + +export type JSONTraceRefValue = JSONTraceMutRefValue | JSONTraceImmRefValue; + +export type JSONTraceValue = JSONTraceRuntimeValue | JSONTraceRefValue; + interface JSONTraceFrame { binary_member_index: number; frame_id: number; @@ -92,17 +120,17 @@ type JSONTraceLocation = JSONTraceLocalLocation | JSONTraceIndexedLocation; interface JSONTraceWriteEffect { location: JSONTraceLocation; - root_value_after_write: JSONTraceValue; + root_value_after_write: JSONTraceRuntimeValue; } interface JSONTraceReadEffect { location: JSONTraceLocation; moved: boolean; - root_value_read: JSONTraceValue; + root_value_read: JSONTraceRuntimeValue; } interface JSONTracePushEffect { - RuntimeValue?: JSONTraceRuntimeValue; + RuntimeValue?: JSONTraceRuntimeValueContent; MutRef?: { location: JSONTraceLocation; snapshot: any[]; @@ -110,7 +138,7 @@ interface JSONTracePushEffect { } interface JSONTracePopEffect { - RuntimeValue?: JSONTraceRuntimeValue; + RuntimeValue?: JSONTraceRuntimeValueContent; MutRef?: { location: JSONTraceLocation; snapshot: any[]; @@ -127,7 +155,7 @@ interface JSONTraceEffect { interface JSONTraceCloseFrame { frame_id: number; gas_left: number; - return_: JSONTraceRuntimeValue[]; + return_: JSONTraceRuntimeValueContent[]; } interface JSONTraceEvent { @@ -179,42 +207,17 @@ export type TraceEvent = type: TraceEventKind.OpenFrame, id: number, name: string, - modInfo: ModuleInfo, + fileHash: string + isNative: boolean, localsTypes: string[], - paramValues: TraceValue[] + localsNames: string[], + paramValues: RuntimeValueType[] + optimizedLines: number[] } | { type: TraceEventKind.CloseFrame, id: number } - | { type: TraceEventKind.Instruction, pc: number, kind: TraceInstructionKind } + | { type: TraceEventKind.Instruction, pc: number, loc: ILoc, kind: TraceInstructionKind } | { type: TraceEventKind.Effect, effect: EventEffect }; -/** - * Kind of a location in the trace. - */ -export enum TraceLocKind { - Local = 'Local' - // TODO: other location types -} - -/** - * Location in the trace. - */ -export type TraceLocation = - | { type: TraceLocKind.Local, frameId: number, localIndex: number }; - -/** - * Kind of a value in the trace. - */ -export enum TraceValKind { - Runtime = 'RuntimeValue' - // TODO: other value types -} - -/** - * Value in the trace. - */ -export type TraceValue = - | { type: TraceValKind.Runtime, value: RuntimeValueType }; - /** * Kind of an effect of an instruction. */ @@ -227,7 +230,7 @@ export enum TraceEffectKind { * Effect of an instruction. */ export type EventEffect = - | { type: TraceEffectKind.Write, location: TraceLocation, value: TraceValue }; + | { type: TraceEffectKind.Write, loc: IRuntimeVariableLoc, value: RuntimeValueType }; /** * Execution trace consisting of a sequence of trace events. @@ -241,6 +244,30 @@ interface ITrace { * the last variable access). */ localLifetimeEnds: Map; + + /** + * Maps file path to the lines of code present in the trace instructions + * in functions defined in the file. + */ + tracedLines: Map>; +} + +/** + * Information about the frame being currently processsed used during trace generation. + */ +interface ITraceGenFrameInfo { + /** + * Frame ID. + */ + ID: number; + /** + * PC locations traced in the frame + */ + pcLocs: ILoc[]; + /** + * Path to a file containing function represented by the frame. + */ + filePath: string; } /** @@ -248,8 +275,13 @@ interface ITrace { * * @param traceFilePath path to the trace JSON file. * @returns execution trace. + * @throws Error with a descriptive error message if reading trace has failed. */ -export function readTrace(traceFilePath: string): ITrace { +export function readTrace( + traceFilePath: string, + sourceMapsMap: Map, + filesMap: Map +): ITrace { const traceJSON: JSONTraceRootObject = JSON.parse(fs.readFileSync(traceFilePath, 'utf8')); const events: TraceEvent[] = []; // We compute the end of lifetime for a local variable as follows. @@ -272,13 +304,15 @@ export function readTrace(traceFilePath: string): ITrace { // the loop const localLifetimeEnds = new Map(); const locaLifetimeEndsMax = new Map(); - let frameIDs = []; + const tracedLines = new Map>(); + // stack of frame infos OpenFrame and popped on CloseFrame + const frameInfoStack: ITraceGenFrameInfo[] = []; for (const event of traceJSON.events) { if (event.OpenFrame) { const localsTypes = []; const frame = event.OpenFrame.frame; for (const type of frame.locals_types) { - localsTypes.push(JSONTraceTypeToString(type.type_)); + localsTypes.push(JSONTraceTypeToString(type.type_, type.ref_type)); } // process parameters - store their values in trace and set their // initial lifetimes @@ -287,39 +321,85 @@ export function readTrace(traceFilePath: string): ITrace { for (let i = 0; i < frame.parameters.length; i++) { const value = frame.parameters[i]; if (value) { - const runtimeValue: TraceValue = - { - type: TraceValKind.Runtime, - value: traceValueFromJSON(value.RuntimeValue.value) - }; + const runtimeValue: RuntimeValueType = 'RuntimeValue' in value + ? traceRuntimeValueFromJSON(value.RuntimeValue.value) + : traceRefValueFromJSON(value); + paramValues.push(runtimeValue); lifetimeEnds[i] = FRAME_LIFETIME; } } localLifetimeEnds.set(frame.frame_id, lifetimeEnds); + const modInfo = { + addr: frame.module.address, + name: frame.module.name + }; + const sourceMap = sourceMapsMap.get(JSON.stringify(modInfo)); + if (!sourceMap) { + throw new Error('Source map for module ' + + modInfo.name + + ' in package ' + + modInfo.addr + + ' not found'); + } + const funEntry = sourceMap.functions.get(frame.function_name); + if (!funEntry) { + throw new Error('Cannot find function entry in source map for function: ' + + frame.function_name); + } events.push({ type: TraceEventKind.OpenFrame, id: frame.frame_id, name: frame.function_name, - modInfo: { - addr: frame.module.address, - name: frame.module.name - }, + fileHash: sourceMap.fileHash, + isNative: frame.is_native, localsTypes, + localsNames: funEntry.localsNames, paramValues, + optimizedLines: sourceMap.optimizedLines + }); + const currentFile = filesMap.get(sourceMap.fileHash); + + if (!currentFile) { + throw new Error(`Cannot find file with hash: ${sourceMap.fileHash}`); + } + frameInfoStack.push({ + ID: frame.frame_id, + pcLocs: funEntry.pcLocs, + filePath: currentFile.path }); - frameIDs.push(frame.frame_id); } else if (event.CloseFrame) { events.push({ type: TraceEventKind.CloseFrame, id: event.CloseFrame.frame_id }); - frameIDs.pop(); + frameInfoStack.pop(); } else if (event.Instruction) { const name = event.Instruction.instruction; + const frameInfo = frameInfoStack[frameInfoStack.length - 1]; + const fid = frameInfo.ID; + const pcLocs = frameInfo.pcLocs; + // if map does not contain an entry for a PC that can be found in the trace file, + // it means that the position of the last PC in the source map should be used + let loc = event.Instruction.pc >= pcLocs.length + ? pcLocs[pcLocs.length - 1] + : pcLocs[event.Instruction.pc]; + + if (!loc) { + throw new Error('Cannot find location for PC: ' + + event.Instruction.pc + + ' in frame: ' + + fid); + } + + const filePath = frameInfo.filePath; + const lines = tracedLines.get(filePath) || new Set(); + lines.add(loc.line); + tracedLines.set(filePath, lines); events.push({ type: TraceEventKind.Instruction, pc: event.Instruction.pc, + loc, kind: name in TraceInstructionKind ? TraceInstructionKind[name as keyof typeof TraceInstructionKind] : TraceInstructionKind.UNKNOWN @@ -327,7 +407,7 @@ export function readTrace(traceFilePath: string): ITrace { // Set end of lifetime for all locals to the max instruction PC ever seen // for a given local (if they are live after this instructions, they will // be reset to INFINITE_LIFETIME when processing subsequent effects). - const currentFrameID = frameIDs[frameIDs.length - 1]; + const currentFrameID = frameInfoStack[frameInfoStack.length - 1].ID; const lifetimeEnds = localLifetimeEnds.get(currentFrameID) || []; const lifetimeEndsMax = locaLifetimeEndsMax.get(currentFrameID) || []; for (let i = 0; i < lifetimeEnds.length; i++) { @@ -349,53 +429,50 @@ export function readTrace(traceFilePath: string): ITrace { // if a local is read or written, set its end of lifetime // to infinite (end of frame) const location = effect.Write ? effect.Write.location : effect.Read!.location; - // there must be at least one frame on the stack when processing a write effect - // so we can safely access the last frame ID - const currentFrameID = frameIDs[frameIDs.length - 1]; - const localIndex = processJSONLocation(location, localLifetimeEnds, currentFrameID); - if (localIndex === undefined) { - continue; - } + const loc = processJSONLocalLocation(location, localLifetimeEnds); if (effect.Write) { - const value = traceValueFromJSON(effect.Write.root_value_after_write.RuntimeValue.value); - const traceValue: TraceValue = { - type: TraceValKind.Runtime, - value - }; - const traceLocation: TraceLocation = { - type: TraceLocKind.Local, - frameId: currentFrameID, - localIndex - }; + if (!loc) { + throw new Error('Unsupported location type in Write effect'); + } + // process a write only if the location is supported + const value = 'RuntimeValue' in effect.Write.root_value_after_write + ? traceRuntimeValueFromJSON(effect.Write.root_value_after_write.RuntimeValue.value) + : traceRefValueFromJSON(effect.Write.root_value_after_write); events.push({ type: TraceEventKind.Effect, effect: { type: TraceEffectKind.Write, - location: traceLocation, - value: traceValue + loc, + value } }); } } } } - return { events, localLifetimeEnds }; + return { events, localLifetimeEnds, tracedLines }; } /** * Converts a JSON trace type to a string representation. */ -function JSONTraceTypeToString(type: JSONBaseType): string { - if (typeof type === 'string') { - return type; - } else if ('vector' in type) { - return `vector<${JSONTraceTypeToString(type.vector)}>`; +function JSONTraceTypeToString(baseType: JSONBaseType, refType?: JSONTraceRefType): string { + const refPrefix = refType === JSONTraceRefType.Mut + ? '&mut ' + : (refType === JSONTraceRefType.Imm + ? '&' + : ''); + if (typeof baseType === 'string') { + return refPrefix + baseType; + } else if ('vector' in baseType) { + return refPrefix + `vector<${JSONTraceTypeToString(baseType.vector)}>`; } else { - return JSONTraceAddressToHexString(type.struct.address) + return refPrefix + + JSONTraceAddressToHexString(baseType.struct.address) + "::" - + type.struct.module + + baseType.struct.module + "::" - + type.struct.name; + + baseType.struct.name; } } @@ -415,45 +492,83 @@ function JSONTraceAddressToHexString(address: string): string { } } -/// Processes a location in a JSON trace (sets the end of lifetime for a local variable) -/// and returns the local index if the location is a local variable in the current frame. -function processJSONLocation( - location: JSONTraceLocation, - localLifetimeEnds: Map, - currentFrameID: number -): number | undefined { - // TODO: handle Global and Indexed for other frames - if ('Local' in location) { - const frameId = location.Local[0]; - const localIndex = location.Local[1]; - const lifetimeEnds = localLifetimeEnds.get(frameId) || []; - lifetimeEnds[localIndex] = FRAME_LIFETIME; - localLifetimeEnds.set(frameId, lifetimeEnds); - return localIndex; - } else if ('Indexed' in location) { - const frameId = location.Indexed[0].Local[0]; - if (frameId === currentFrameID) { - const localIndex = location.Indexed[0].Local[1]; - const lifetimeEnds = localLifetimeEnds.get(frameId) || []; +/** + * Processes a location of a local variable in a JSON trace: sets the end of its lifetime + * when requested and returns its location + * @param traceLocation location in the trace. + * @param localLifetimeEnds map of local variable lifetimes (defined if local variable + * lifetime should happen). + * @returns variable location. + */ +function processJSONLocalLocation( + traceLocation: JSONTraceLocation, + localLifetimeEnds?: Map, +): IRuntimeVariableLoc | undefined { + if ('Local' in traceLocation) { + const frameID = traceLocation.Local[0]; + const localIndex = traceLocation.Local[1]; + if (localLifetimeEnds) { + const lifetimeEnds = localLifetimeEnds.get(frameID) || []; lifetimeEnds[localIndex] = FRAME_LIFETIME; - localLifetimeEnds.set(frameId, lifetimeEnds); - return localIndex; + localLifetimeEnds.set(frameID, lifetimeEnds); + } + return { frameID, localIndex }; + } else if ('Indexed' in traceLocation) { + return processJSONLocalLocation(traceLocation.Indexed[0], localLifetimeEnds); + } else { + // Currently, there is nothing that needs to be done for 'Global' locations, + // neither with respect to lifetime nor with respect to location itself. + // This is because `Global` locations currently only represent read-only + // refererence values returned from native functions. If there ever was + // a native functino that would return a mutable reference, we should + // consider how to handle value changes via such reference, but it's unlikely + // that such a function would ever be added to either Move stdlib or + // the Sui framework. + return undefined; + } +} + +/** + * Converts a JSON trace reference value to a runtime value. + * + * @param value JSON trace reference value. + * @returns runtime value. + * @throws Error with a descriptive error message if conversion has failed. + */ +function traceRefValueFromJSON(value: JSONTraceRefValue): RuntimeValueType { + if ('MutRef' in value) { + const loc = processJSONLocalLocation(value.MutRef.location); + if (!loc) { + throw new Error('Unsupported location type in MutRef'); + } + const ret: IRuntimeRefValue = { mutable: true, loc }; + return ret; + } else { + const loc = processJSONLocalLocation(value.ImmRef.location); + if (!loc) { + throw new Error('Unsupported location type in ImmRef'); } + const ret: IRuntimeRefValue = { mutable: false, loc }; + return ret; } - return undefined; } -/// Converts a JSON trace value to a runtime trace value. -function traceValueFromJSON(value: JSONTraceValueType): RuntimeValueType { +/** + * Converts a JSON trace runtime value to a runtime trace value. + * + * @param value JSON trace runtime value. + * @returns runtime trace value. + */ +function traceRuntimeValueFromJSON(value: JSONTraceRuntimeValueType): RuntimeValueType { if (typeof value === 'boolean' || typeof value === 'number' || typeof value === 'string') { return String(value); } else if (Array.isArray(value)) { - return value.map(item => traceValueFromJSON(item)); + return value.map(item => traceRuntimeValueFromJSON(item)); } else { const fields: [string, RuntimeValueType][] = - Object.entries(value.fields).map(([key, value]) => [key, traceValueFromJSON(value)]); + Object.entries(value.fields).map(([key, value]) => [key, traceRuntimeValueFromJSON(value)]); const compoundValue: IRuntimeCompundValue = { fields, type: value.type, diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/.gitignore b/external-crates/move/crates/move-analyzer/trace-adapter/tests/.gitignore new file mode 100644 index 0000000000000..4946de71472b6 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/.gitignore @@ -0,0 +1,6 @@ +**/dependency/* +**/dependencies/* +*.mvsm +*.yaml +*~ +!**/build/ diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/Move.toml new file mode 100644 index 0000000000000..2402f9f6a7a6a --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "breakpoints_line" +edition = "2024.beta" + +[dependencies] +MoveStdlib = { local = "../../../../move-stdlib" } + +[addresses] +breakpoints_line = "0x0" +std = "0x1" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/build/breakpoints_line/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/build/breakpoints_line/bytecode_modules/m.mv new file mode 100644 index 0000000000000000000000000000000000000000..85130144db6d7d18126d1b0596f8b939287abe81 GIT binary patch literal 369 zcmbtQT@JxO5T2RcE!|x|O+*kL@E~r%NgAZ254zIUfw+NlxrEt@jd=Av%N9UlmU5|Fzfkey<~P5=rZAyC`Nq0Dx5mR9+x$jT}`x-zY^ zs?Lj(I68-C>9hD+U@ { + const filePath = path.join(__dirname, 'sources', `m.move`); + let res = ''; + runtime.setLineBreakpoints(filePath, [ + 10, // invalid (in if branch not traced) + 12, // valid (in traced if branch) + 14, // invalid (empty line) + 18, // valid (past loop) + 20 // valid (in loop) + ]); + res += runtime.toString(); + // advance to the caller + runtime.continue(); + res += runtime.toString(); + // advance beyond the loop + runtime.continue(); + res += runtime.toString(); + // advance into the loop + runtime.continue(); + res += runtime.toString(); + // advance into the loop again + runtime.continue(); + res += runtime.toString(); + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/traces/breakpoints_line__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/traces/breakpoints_line__m__test.json new file mode 100644 index 0000000000000..7527c40759090 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/traces/breakpoints_line__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999997,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999997,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":4,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":1}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999997}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999978,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":1}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999975,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999972,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999971,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999953,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":1}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999950,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999947,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Push":{"RuntimeValue":{"value":2}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999946,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":2}}}},{"Effect":{"Write":{"location":{"Local":[4,1]},"root_value_after_write":{"RuntimeValue":{"value":2}}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999928,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,1]},"root_value_read":{"RuntimeValue":{"value":2}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":2}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999927,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":2}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":2}}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999909,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":2}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":2}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999906,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999903,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":2}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999902,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999901,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999883,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":2}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":2}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999880,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999877,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":2}}}},{"Effect":{"Push":{"RuntimeValue":{"value":3}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999876,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":3}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":3}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999875,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999857,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":3}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":3}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999854,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999851,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":3}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999850,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999849,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999831,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":3}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":3}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999828,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999825,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":3}}}},{"Effect":{"Push":{"RuntimeValue":{"value":4}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999824,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":4}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":4}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999823,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999805,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":4}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":4}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999802,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999799,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":4}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999798,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999797,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999779,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":4}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":4}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999776,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999773,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":4}}}},{"Effect":{"Push":{"RuntimeValue":{"value":5}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999772,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":5}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":5}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999771,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999753,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":5}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":5}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999750,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999747,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":5}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999746,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999745,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999727,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":5}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":5}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999724,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999721,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":5}}}},{"Effect":{"Push":{"RuntimeValue":{"value":6}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999720,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":6}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":6}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999719,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999701,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":6}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":6}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999698,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999695,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":6}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999694,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999693,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999675,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":6}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":6}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999672,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999669,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":6}}}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999668,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":7}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999667,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999649,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":7}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999646,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999643,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999642,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999641,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999623,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":7}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999620,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999617,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Push":{"RuntimeValue":{"value":8}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999616,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":8}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":8}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999615,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999597,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":8}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":8}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999594,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999591,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":8}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999590,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999589,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999571,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":8}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":8}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999568,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999565,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":8}}}},{"Effect":{"Push":{"RuntimeValue":{"value":9}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999564,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":9}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":9}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999563,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999545,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":9}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":9}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999542,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999539,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":9}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999538,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999537,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999519,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":9}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":9}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999516,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999513,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":9}}}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999512,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":10}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999511,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999493,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":10}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999490,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999487,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999486,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":25,"gas_left":999999468,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":10}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":26,"gas_left":999999450,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":1}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":27,"gas_left":999999447,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Push":{"RuntimeValue":{"value":11}}}},{"Instruction":{"type_parameters":[],"pc":28,"gas_left":999999446,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":11}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":11}}}}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999428,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":11}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":11}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999425,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999422,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":11}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999421,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":33,"gas_left":999999420,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":34,"gas_left":999999402,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":11}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":11}}}},{"Instruction":{"type_parameters":[],"pc":35,"gas_left":999999399,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":36,"gas_left":999999396,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":11}}}},{"Effect":{"Push":{"RuntimeValue":{"value":12}}}},{"Instruction":{"type_parameters":[],"pc":37,"gas_left":999999395,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":12}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":12}}}}},{"Instruction":{"type_parameters":[],"pc":38,"gas_left":999999394,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999376,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":12}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":12}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999373,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999370,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":12}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999369,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":33,"gas_left":999999368,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":34,"gas_left":999999350,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":12}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":12}}}},{"Instruction":{"type_parameters":[],"pc":35,"gas_left":999999347,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":36,"gas_left":999999344,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":12}}}},{"Effect":{"Push":{"RuntimeValue":{"value":13}}}},{"Instruction":{"type_parameters":[],"pc":37,"gas_left":999999343,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":13}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":13}}}}},{"Instruction":{"type_parameters":[],"pc":38,"gas_left":999999342,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999324,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":13}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":13}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999321,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999318,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":13}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999317,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":33,"gas_left":999999316,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":34,"gas_left":999999298,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":13}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":13}}}},{"Instruction":{"type_parameters":[],"pc":35,"gas_left":999999295,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":36,"gas_left":999999292,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":13}}}},{"Effect":{"Push":{"RuntimeValue":{"value":14}}}},{"Instruction":{"type_parameters":[],"pc":37,"gas_left":999999291,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":14}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":14}}}}},{"Instruction":{"type_parameters":[],"pc":38,"gas_left":999999290,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999272,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":14}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":14}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999269,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999266,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":14}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999265,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":33,"gas_left":999999264,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":34,"gas_left":999999246,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":14}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":14}}}},{"Instruction":{"type_parameters":[],"pc":35,"gas_left":999999243,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":36,"gas_left":999999240,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":14}}}},{"Effect":{"Push":{"RuntimeValue":{"value":15}}}},{"Instruction":{"type_parameters":[],"pc":37,"gas_left":999999239,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":15}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":15}}}}},{"Instruction":{"type_parameters":[],"pc":38,"gas_left":999999238,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999220,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":15}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":15}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999217,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999214,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":15}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999213,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":33,"gas_left":999999212,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":34,"gas_left":999999194,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":15}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":15}}}},{"Instruction":{"type_parameters":[],"pc":35,"gas_left":999999191,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":36,"gas_left":999999188,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":15}}}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":37,"gas_left":999999187,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":16}}}}},{"Instruction":{"type_parameters":[],"pc":38,"gas_left":999999186,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999168,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":16}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999165,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999162,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999161,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":39,"gas_left":999999143,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":16}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":40,"gas_left":999999142,"instruction":"RET"}},{"CloseFrame":{"frame_id":4,"return_":[{"RuntimeValue":{"value":16}}],"gas_left":999999142}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999141,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Write":{"location":{"Local":[0,0]},"root_value_after_write":{"RuntimeValue":{"value":16}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999123,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":16}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999105,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":16}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999105,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":409,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":16}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999105}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999086,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[409,0]},"root_value_read":{"RuntimeValue":{"value":16}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999083,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999080,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999079,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999061,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[409,0]},"root_value_read":{"RuntimeValue":{"value":16}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999058,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999055,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Push":{"RuntimeValue":{"value":17}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999054,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":17}}}},{"Effect":{"Write":{"location":{"Local":[409,1]},"root_value_after_write":{"RuntimeValue":{"value":17}}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999036,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[409,1]},"root_value_read":{"RuntimeValue":{"value":17}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":17}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999035,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":17}}}},{"Effect":{"Write":{"location":{"Local":[409,2]},"root_value_after_write":{"RuntimeValue":{"value":17}}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999017,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[409,2]},"root_value_read":{"RuntimeValue":{"value":17}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":17}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999014,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999011,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":17}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999010,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":25,"gas_left":999998992,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[409,2]},"root_value_read":{"RuntimeValue":{"value":17}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":17}}}},{"Instruction":{"type_parameters":[],"pc":26,"gas_left":999998974,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[409,0]},"root_value_read":{"RuntimeValue":{"value":16}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":27,"gas_left":999998971,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":17}}}},{"Effect":{"Push":{"RuntimeValue":{"value":33}}}},{"Instruction":{"type_parameters":[],"pc":28,"gas_left":999998970,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":33}}}},{"Effect":{"Write":{"location":{"Local":[409,2]},"root_value_after_write":{"RuntimeValue":{"value":33}}}}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999998952,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[409,2]},"root_value_read":{"RuntimeValue":{"value":33}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":33}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999998949,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999998946,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":33}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999998945,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":39,"gas_left":999998927,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[409,2]},"root_value_read":{"RuntimeValue":{"value":33}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":33}}}},{"Instruction":{"type_parameters":[],"pc":40,"gas_left":999998926,"instruction":"RET"}},{"CloseFrame":{"frame_id":409,"return_":[{"RuntimeValue":{"value":33}}],"gas_left":999998926}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999998923,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":33}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Push":{"RuntimeValue":{"value":49}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999998922,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":49}}}},{"Effect":{"Write":{"location":{"Local":[0,0]},"root_value_after_write":{"RuntimeValue":{"value":49}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999998904,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":49}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":49}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999998886,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":49}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":49}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999998886,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":493,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":49}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999998886}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999998867,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[493,0]},"root_value_read":{"RuntimeValue":{"value":49}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":49}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999998864,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999998861,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":49}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999998860,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999998842,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[493,0]},"root_value_read":{"RuntimeValue":{"value":49}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":49}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999998839,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999998836,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":49}}}},{"Effect":{"Push":{"RuntimeValue":{"value":50}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999998835,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":50}}}},{"Effect":{"Write":{"location":{"Local":[493,1]},"root_value_after_write":{"RuntimeValue":{"value":50}}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999998817,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[493,1]},"root_value_read":{"RuntimeValue":{"value":50}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":50}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999998816,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":50}}}},{"Effect":{"Write":{"location":{"Local":[493,2]},"root_value_after_write":{"RuntimeValue":{"value":50}}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999998798,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[493,2]},"root_value_read":{"RuntimeValue":{"value":50}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":50}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999998795,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999998792,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":50}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999998791,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":25,"gas_left":999998773,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[493,2]},"root_value_read":{"RuntimeValue":{"value":50}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":50}}}},{"Instruction":{"type_parameters":[],"pc":26,"gas_left":999998755,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[493,0]},"root_value_read":{"RuntimeValue":{"value":49}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":49}}}},{"Instruction":{"type_parameters":[],"pc":27,"gas_left":999998752,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":49}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":50}}}},{"Effect":{"Push":{"RuntimeValue":{"value":99}}}},{"Instruction":{"type_parameters":[],"pc":28,"gas_left":999998751,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":99}}}},{"Effect":{"Write":{"location":{"Local":[493,2]},"root_value_after_write":{"RuntimeValue":{"value":99}}}}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999998733,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[493,2]},"root_value_read":{"RuntimeValue":{"value":99}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":99}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999998730,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999998727,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":99}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999998726,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":39,"gas_left":999998708,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[493,2]},"root_value_read":{"RuntimeValue":{"value":99}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":99}}}},{"Instruction":{"type_parameters":[],"pc":40,"gas_left":999998707,"instruction":"RET"}},{"CloseFrame":{"frame_id":493,"return_":[{"RuntimeValue":{"value":99}}],"gas_left":999998707}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999998704,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":99}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":49}}}},{"Effect":{"Push":{"RuntimeValue":{"value":148}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999998703,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":148}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999998702,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999998702}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/Move.toml new file mode 100644 index 0000000000000..3ae5f158e61ac --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "compound" +edition = "2024.beta" + +[dependencies] +MoveStdlib = { local = "../../../../move-stdlib" } + +[addresses] +compound = "0x0" +std = "0x1" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/bytecode_modules/dependencies/MoveStdlib/vector.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/bytecode_modules/dependencies/MoveStdlib/vector.mv new file mode 100644 index 0000000000000000000000000000000000000000..79c6d2eb99157a351717e0a6c0cc80cc20a46ce2 GIT binary patch literal 1123 zcmaJ=%}(4f5T3CeCmCm#-35WvB9-mZ_SQ4UQbGF|(XwkGDkM=7{vN0IK1vTg_0(%m zeTmLE5J4qs$)BD1`DQ#b`MUeZSpY~7j5f9DOmw@Bf8h^IzUe*tT~qc`|2D7%Gq8XN zKuR;Bi5N2$0jQY0!KK8=QpSlB+#s)MxVdr~z` zbwmA;UtV>|sw%r>QMNBujbC;?Z;P|iH*J1eHF@XTt|-q&Yd)LmF87ypcNYarigN95 z^Xk;HuW!1fXnVE2@a0)|p43&HpDb5j()y}B4>#NR%j(+4jlbpqK5d!$!gp0^+M8wV z1oGY_y()_?M4t?|ysnD2D($sjbydUl``<#HaLalNR8k|Zj^e^9FuWt)mUqls+KTZ9 zdZ5d2gbt3#p-QD+ZwTHWQl?4E2!%B_j(e|$?@T#u**2`53`r3#%xmH?BK%BF4a#jc z#o>!hqp2{-c))ao2QbD1+%s7e6O8bfjwn)W;PBIs9}}=~cT7tDE&b}DKm64F2TlTqN7TQ5H}paO q+lOd_!>fr<>@bCt-A2@>6eu5M#np7^6jMmpDH;GXJZ(DTDf$cNL2%3f literal 0 HcmV?d00001 diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/bytecode_modules/m.mv new file mode 100644 index 0000000000000000000000000000000000000000..ed59f3dde6851c23188a484d9da7a69839727947 GIT binary patch literal 721 zcmbtS!D`$v5SAZ90IvSpn0R`@tYZqe!ad;CjiPKB$QZ@zmZF+KE0h>{2+ZppUt&6 zv%ke3oc)z1`>y7vKa~WFr+5aE00eIm04X3qMr?9o>IEB2qy}231VKqPAq@gGfYhkb z2%`xFP5}*X_Kmgcs&SVWRaaMS%+7tNW9{2&f5+HO-5mCA6T9O!>Wy#Qm-g70-Wj>P zYJL3by3#RU?rOKce6{Ud6&=%F|<8$05h!-YXBj4!&J{b6c*YZ(@`giQ(UoNH`+; z4xYCVNX^AWqtaLy@se+v2sICh$Mw#j!8KShAX+lG0xJ#XxI(K^oWUH5R4`zX5(5>f zWDrHF7-W$GkCvtAB>7+GV`mLda;t~WecqfU`O$FQ87A-VR5L*07bAh>y;dffXysW5$m2>$?8)^z6p literal 0 HcmV?d00001 diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/source_maps/dependencies/MoveStdlib/vector.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/source_maps/dependencies/MoveStdlib/vector.json new file mode 100644 index 0000000000000..126416a07eb7e --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/source_maps/dependencies/MoveStdlib/vector.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":261,"end":267},"module_name":["0000000000000000000000000000000000000000000000000000000000000001","vector"],"struct_map":{},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1202,"end":1207},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1208,"end":1215}]],"parameters":[],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1220,"end":1235}],"locals":[],"nops":{},"code_map":{},"is_native":true},"1":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1329,"end":1335},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1336,"end":1343}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1345,"end":1346}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1367,"end":1370}],"locals":[],"nops":{},"code_map":{},"is_native":true},"2":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1563,"end":1569},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1570,"end":1577}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1579,"end":1580}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1600,"end":1601}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1609,"end":1617}],"locals":[],"nops":{},"code_map":{},"is_native":true},"3":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1724,"end":1733},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1734,"end":1741}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1743,"end":1744}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1768,"end":1769}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"4":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1968,"end":1978},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1979,"end":1986}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1988,"end":1989}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2013,"end":2014}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2022,"end":2034}],"locals":[],"nops":{},"code_map":{},"is_native":true},"5":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2170,"end":2178},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2179,"end":2186}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2188,"end":2189}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2214,"end":2221}],"locals":[],"nops":{},"code_map":{},"is_native":true},"6":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2342,"end":2355},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2356,"end":2363}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2365,"end":2366}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"7":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2561,"end":2565},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2566,"end":2573}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2575,"end":2576}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2600,"end":2601}],["j#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2608,"end":2609}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"8":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2694,"end":2703},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2704,"end":2711}]],"parameters":[["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2713,"end":2714}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2726,"end":2741}],"locals":[["v#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2760,"end":2761}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2764,"end":2771},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2756,"end":2761},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2781,"end":2782},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2793,"end":2794},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2781,"end":2795},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2805,"end":2806}},"is_native":false},"9":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2900,"end":2907},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2908,"end":2915}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2917,"end":2918}]],"returns":[],"locals":[["back_index#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3057,"end":3067}],["front_index#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3024,"end":3035}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2956,"end":2959}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2962,"end":2963},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2962,"end":2972},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2956,"end":2959},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2986,"end":2989},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2993,"end":2994},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2990,"end":2992},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2982,"end":3005},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2996,"end":3005},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3038,"end":3039},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3020,"end":3035},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3070,"end":3073},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3076,"end":3077},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3074,"end":3075},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3053,"end":3067},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3094,"end":3105},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3108,"end":3118},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3106,"end":3107},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3087,"end":3260},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3134,"end":3135},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3141,"end":3152},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3154,"end":3164},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3134,"end":3165},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3193,"end":3204},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3207,"end":3208},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3205,"end":3206},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3179,"end":3190},"29":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3235,"end":3245},"30":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3248,"end":3249},"31":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3246,"end":3247},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3222,"end":3232},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3087,"end":3260}},"is_native":false},"10":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3363,"end":3369},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3370,"end":3377}]],"parameters":[["lhs#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3379,"end":3382}],["other#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3410,"end":3415}]],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3444,"end":3449},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3444,"end":3459},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3477,"end":3482},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3477,"end":3493},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3476,"end":3477},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3469,"end":3526},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3495,"end":3498},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3509,"end":3514},"9":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3509,"end":3525},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3495,"end":3526},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3469,"end":3526},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3557},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3541},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3557},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3557,"end":3558}},"is_native":false},"11":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3660,"end":3668},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3669,"end":3676}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3678,"end":3679}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3700,"end":3704}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3716},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3725},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3729,"end":3730},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3726,"end":3728},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3730}},"is_native":false},"12":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3836,"end":3844},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3845,"end":3852}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3854,"end":3855}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3875,"end":3876}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3889,"end":3893}],"locals":[["i#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3912,"end":3913}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3931,"end":3934}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3916,"end":3917},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3908,"end":3913},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3937,"end":3938},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3937,"end":3947},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3931,"end":3934},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3964,"end":3965},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3968,"end":3971},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3966,"end":3967},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3957,"end":4048},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3992,"end":3993},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3994,"end":3995},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3991,"end":3996},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4000,"end":4001},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3997,"end":3999},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3987,"end":4014},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4003,"end":4014},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4010,"end":4014},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4003,"end":4014},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4032,"end":4033},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4036,"end":4037},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4034,"end":4035},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4028,"end":4029},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3957,"end":4048},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4058,"end":4063}},"is_native":false},"13":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4196,"end":4204},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4205,"end":4212}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4214,"end":4215}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4235,"end":4236}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4250,"end":4254},{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4256,"end":4259}],"locals":[["i#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4279,"end":4280}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4298,"end":4301}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4283,"end":4284},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4275,"end":4280},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4304,"end":4305},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4304,"end":4314},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4298,"end":4301},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4331,"end":4332},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4335,"end":4338},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4333,"end":4334},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4324,"end":4420},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4359,"end":4360},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4361,"end":4362},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4358,"end":4363},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4367,"end":4368},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4364,"end":4366},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4354,"end":4386},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4370,"end":4386},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4378,"end":4382},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4384,"end":4385},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4370,"end":4386},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4404,"end":4405},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4408,"end":4409},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4406,"end":4407},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4400,"end":4401},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4324,"end":4420},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4430,"end":4440},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4431,"end":4436},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4438,"end":4439},"34":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4430,"end":4440}},"is_native":false},"14":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4660,"end":4666},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4667,"end":4674}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4676,"end":4677}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4705,"end":4706}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4714,"end":4721}],"locals":[["%#1",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898}],["%#2",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4740,"end":4743}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4746,"end":4747},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4746,"end":4756},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4736,"end":4743},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4804,"end":4805},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4809,"end":4812},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4806,"end":4808},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4800,"end":4840},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4814,"end":4840},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4820,"end":4840},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4814,"end":4840},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4857,"end":4860},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4863,"end":4864},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4861,"end":4862},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4851,"end":4854},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4881,"end":4882},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4885,"end":4888},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4883,"end":4884},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4874,"end":4917},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4906,"end":4907},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4910,"end":4911},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4908,"end":4909},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4902,"end":4903},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891},"29":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898},"30":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4913,"end":4914},"31":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4917},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4874,"end":4917},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4927,"end":4928},"34":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4927,"end":4939}},"is_native":false},"15":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5290,"end":5296},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5297,"end":5304}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5306,"end":5307}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5331,"end":5332}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5347,"end":5348}]],"returns":[],"locals":[["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5369,"end":5372}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5375,"end":5376},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5375,"end":5385},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5369,"end":5372},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5426,"end":5427},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5430,"end":5433},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5428,"end":5429},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5422,"end":5461},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5435,"end":5461},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5441,"end":5461},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5435,"end":5461},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5472,"end":5473},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5484,"end":5485},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5472,"end":5486},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5503,"end":5504},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5507,"end":5510},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5505,"end":5506},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5496,"end":5573},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5526,"end":5527},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5533,"end":5534},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5536,"end":5539},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5526,"end":5540},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5558,"end":5559},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5562,"end":5563},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5560,"end":5561},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5554,"end":5555},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5496,"end":5573}},"is_native":false},"16":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5812,"end":5823},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5824,"end":5831}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5833,"end":5834}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5858,"end":5859}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5867,"end":5874}],"locals":[["last_idx#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5943,"end":5951}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5894,"end":5895},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5894,"end":5906},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5893,"end":5894},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5885,"end":5929},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5908,"end":5928},"9":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5885,"end":5929},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5954,"end":5955},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5954,"end":5964},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5967,"end":5968},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5965,"end":5966},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5943,"end":5951},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5978,"end":5979},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5985,"end":5986},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5988,"end":5996},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5978,"end":5997},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6007,"end":6008},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6007,"end":6019}},"is_native":false},"17":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":249,"end":13981},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":249,"end":13981}},"is_native":false}},"constant_map":{"EINDEX_OUT_OF_BOUNDS":0}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/source_maps/m.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/source_maps/m.json new file mode 100644 index 0000000000000..2b19308a79b3b --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/source_maps/m.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":98,"end":99},"module_name":["0000000000000000000000000000000000000000000000000000000000000000","m"],"struct_map":{"0":{"definition_location":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":231,"end":241},"type_parameters":[],"fields":[{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":257,"end":269},{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":280,"end":290},{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":306,"end":324},{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":340,"end":356},{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":375,"end":391}]},"1":{"definition_location":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":432,"end":444},"type_parameters":[],"fields":[{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":466,"end":471}]}},"enum_map":{"0":{"definition_location":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":114,"end":122},"type_parameters":[],"variants":[[["PositionalVariant",{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":138,"end":165}],[{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":156,"end":159},{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":161,"end":164}]],[["NamedVariant",{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":171,"end":212}],[{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":186,"end":192},{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":199,"end":205}]]]}},"function_map":{"0":{"definition_location":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":485,"end":488},"type_parameters":[],"parameters":[["some_struct#0#0",{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":493,"end":504}],["p#0#0",{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":518,"end":519}]],"returns":[{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":527,"end":537}],"locals":[["named_variant#1#0",{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":605,"end":618}],["pos_variant#1#0",{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":548,"end":559}],["v#1#0",{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":699,"end":700}],["v_struct#1#0",{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":733,"end":741}]],"nops":{},"code_map":{"0":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":590,"end":591},"1":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":593,"end":594},"2":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":562,"end":595},"3":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":548,"end":559},"4":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":662,"end":663},"5":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":681,"end":682},"6":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":621,"end":689},"7":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":605,"end":618},"8":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":721,"end":722},"9":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":703,"end":723},"10":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":699,"end":700},"11":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":784,"end":785},"12":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":762,"end":787},"13":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":744,"end":788},"14":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":733,"end":741},"15":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":822,"end":823},"16":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":795,"end":819},"18":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":795,"end":823},"19":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":854,"end":865},"20":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":829,"end":851},"22":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":829,"end":865},"23":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":904,"end":917},"24":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":871,"end":901},"26":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":871,"end":917},"27":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":954,"end":955},"28":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":923,"end":951},"30":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":923,"end":955},"31":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":992,"end":1000},"32":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":961,"end":989},"34":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":961,"end":1000},"35":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1007,"end":1018}},"is_native":false},"1":{"definition_location":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1026,"end":1037},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1041,"end":1051}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1093,"end":1094},"1":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1144,"end":1145},"2":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1147,"end":1148},"3":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1116,"end":1149},"4":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1207,"end":1208},"5":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1210,"end":1211},"6":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1179,"end":1212},"7":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1258,"end":1259},"8":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1240,"end":1260},"9":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1328,"end":1329},"10":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1306,"end":1331},"11":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1288,"end":1332},"12":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1058,"end":1339}},"is_native":false},"2":{"definition_location":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1355,"end":1359},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1386,"end":1399},"1":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1422,"end":1424},"2":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1405,"end":1425},"4":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1425,"end":1426}},"is_native":false},"3":{"definition_location":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":81,"end":1428},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":81,"end":1428}},"is_native":false}},"constant_map":{}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/sources/dependencies/MoveStdlib/vector.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/sources/dependencies/MoveStdlib/vector.move new file mode 100644 index 0000000000000..55c1abac34b74 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/sources/dependencies/MoveStdlib/vector.move @@ -0,0 +1,364 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[defines_primitive(vector)] +/// A variable-sized container that can hold any type. Indexing is 0-based, and +/// vectors are growable. This module has many native functions. +module std::vector { + /// Allows calling `.to_string()` on a vector of `u8` to get a utf8 `String`. + public use fun std::string::utf8 as vector.to_string; + + /// Allows calling `.try_to_string()` on a vector of `u8` to get a utf8 `String`. + /// This will return `None` if the vector is not valid utf8. + public use fun std::string::try_utf8 as vector.try_to_string; + + /// Allows calling `.to_ascii_string()` on a vector of `u8` to get an `ascii::String`. + public use fun std::ascii::string as vector.to_ascii_string; + + /// Allows calling `.try_to_ascii_string()` on a vector of `u8` to get an + /// `ascii::String`. This will return `None` if the vector is not valid ascii. + public use fun std::ascii::try_string as vector.try_to_ascii_string; + + /// The index into the vector is out of bounds + const EINDEX_OUT_OF_BOUNDS: u64 = 0x20000; + + #[bytecode_instruction] + /// Create an empty vector. + public native fun empty(): vector; + + #[bytecode_instruction] + /// Return the length of the vector. + public native fun length(v: &vector): u64; + + #[syntax(index)] + #[bytecode_instruction] + /// Acquire an immutable reference to the `i`th element of the vector `v`. + /// Aborts if `i` is out of bounds. + public native fun borrow(v: &vector, i: u64): ∈ + + #[bytecode_instruction] + /// Add element `e` to the end of the vector `v`. + public native fun push_back(v: &mut vector, e: Element); + + #[syntax(index)] + #[bytecode_instruction] + /// Return a mutable reference to the `i`th element in the vector `v`. + /// Aborts if `i` is out of bounds. + public native fun borrow_mut(v: &mut vector, i: u64): &mut Element; + + #[bytecode_instruction] + /// Pop an element from the end of vector `v`. + /// Aborts if `v` is empty. + public native fun pop_back(v: &mut vector): Element; + + #[bytecode_instruction] + /// Destroy the vector `v`. + /// Aborts if `v` is not empty. + public native fun destroy_empty(v: vector); + + #[bytecode_instruction] + /// Swaps the elements at the `i`th and `j`th indices in the vector `v`. + /// Aborts if `i` or `j` is out of bounds. + public native fun swap(v: &mut vector, i: u64, j: u64); + + /// Return an vector of size one containing element `e`. + public fun singleton(e: Element): vector { + let mut v = empty(); + v.push_back(e); + v + } + + /// Reverses the order of the elements in the vector `v` in place. + public fun reverse(v: &mut vector) { + let len = v.length(); + if (len == 0) return (); + + let mut front_index = 0; + let mut back_index = len - 1; + while (front_index < back_index) { + v.swap(front_index, back_index); + front_index = front_index + 1; + back_index = back_index - 1; + } + } + + /// Pushes all of the elements of the `other` vector into the `lhs` vector. + public fun append(lhs: &mut vector, mut other: vector) { + other.reverse(); + while (!other.is_empty()) lhs.push_back(other.pop_back()); + other.destroy_empty(); + } + + /// Return `true` if the vector `v` has no elements and `false` otherwise. + public fun is_empty(v: &vector): bool { + v.length() == 0 + } + + /// Return true if `e` is in the vector `v`. + /// Otherwise, returns false. + public fun contains(v: &vector, e: &Element): bool { + let mut i = 0; + let len = v.length(); + while (i < len) { + if (&v[i] == e) return true; + i = i + 1; + }; + false + } + + /// Return `(true, i)` if `e` is in the vector `v` at index `i`. + /// Otherwise, returns `(false, 0)`. + public fun index_of(v: &vector, e: &Element): (bool, u64) { + let mut i = 0; + let len = v.length(); + while (i < len) { + if (&v[i] == e) return (true, i); + i = i + 1; + }; + (false, 0) + } + + /// Remove the `i`th element of the vector `v`, shifting all subsequent elements. + /// This is O(n) and preserves ordering of elements in the vector. + /// Aborts if `i` is out of bounds. + public fun remove(v: &mut vector, mut i: u64): Element { + let mut len = v.length(); + // i out of bounds; abort + if (i >= len) abort EINDEX_OUT_OF_BOUNDS; + + len = len - 1; + while (i < len) v.swap(i, { i = i + 1; i }); + v.pop_back() + } + + /// Insert `e` at position `i` in the vector `v`. + /// If `i` is in bounds, this shifts the old `v[i]` and all subsequent elements to the right. + /// If `i == v.length()`, this adds `e` to the end of the vector. + /// This is O(n) and preserves ordering of elements in the vector. + /// Aborts if `i > v.length()` + public fun insert(v: &mut vector, e: Element, mut i: u64) { + let len = v.length(); + // i too big abort + if (i > len) abort EINDEX_OUT_OF_BOUNDS; + + v.push_back(e); + while (i < len) { + v.swap(i, len); + i = i + 1 + } + } + + /// Swap the `i`th element of the vector `v` with the last element and then pop the vector. + /// This is O(1), but does not preserve ordering of elements in the vector. + /// Aborts if `i` is out of bounds. + public fun swap_remove(v: &mut vector, i: u64): Element { + assert!(!v.is_empty(), EINDEX_OUT_OF_BOUNDS); + let last_idx = v.length() - 1; + v.swap(i, last_idx); + v.pop_back() + } + + // === Macros === + + /// Create a vector of length `n` by calling the function `f` on each index. + public macro fun tabulate<$T>($n: u64, $f: |u64| -> $T): vector<$T> { + let mut v = vector[]; + let n = $n; + n.do!(|i| v.push_back($f(i))); + v + } + + /// Destroy the vector `v` by calling `f` on each element and then destroying the vector. + /// Does not preserve the order of elements in the vector (starts from the end of the vector). + public macro fun destroy<$T>($v: vector<$T>, $f: |$T|) { + let mut v = $v; + while (!v.is_empty()) $f(v.pop_back()); + v.destroy_empty(); + } + + /// Destroy the vector `v` by calling `f` on each element and then destroying the vector. + /// Preserves the order of elements in the vector. + public macro fun do<$T>($v: vector<$T>, $f: |$T|) { + let mut v = $v; + v.reverse(); + while (!v.is_empty()) $f(v.pop_back()); + v.destroy_empty(); + } + + /// Perform an action `f` on each element of the vector `v`. The vector is not modified. + public macro fun do_ref<$T>($v: &vector<$T>, $f: |&$T|) { + let v = $v; + v.length().do!(|i| $f(&v[i])) + } + + /// Perform an action `f` on each element of the vector `v`. + /// The function `f` takes a mutable reference to the element. + public macro fun do_mut<$T>($v: &mut vector<$T>, $f: |&mut $T|) { + let v = $v; + v.length().do!(|i| $f(&mut v[i])) + } + + /// Map the vector `v` to a new vector by applying the function `f` to each element. + /// Preserves the order of elements in the vector, first is called first. + public macro fun map<$T, $U>($v: vector<$T>, $f: |$T| -> $U): vector<$U> { + let v = $v; + let mut r = vector[]; + v.do!(|e| r.push_back($f(e))); + r + } + + /// Map the vector `v` to a new vector by applying the function `f` to each element. + /// Preserves the order of elements in the vector, first is called first. + public macro fun map_ref<$T, $U>($v: &vector<$T>, $f: |&$T| -> $U): vector<$U> { + let v = $v; + let mut r = vector[]; + v.do_ref!(|e| r.push_back($f(e))); + r + } + + /// Filter the vector `v` by applying the function `f` to each element. + /// Return a new vector containing only the elements for which `f` returns `true`. + public macro fun filter<$T: drop>($v: vector<$T>, $f: |&$T| -> bool): vector<$T> { + let v = $v; + let mut r = vector[]; + v.do!(|e| if ($f(&e)) r.push_back(e)); + r + } + + /// Split the vector `v` into two vectors by applying the function `f` to each element. + /// Return a tuple containing two vectors: the first containing the elements for which `f` returns `true`, + /// and the second containing the elements for which `f` returns `false`. + public macro fun partition<$T>($v: vector<$T>, $f: |&$T| -> bool): (vector<$T>, vector<$T>) { + let v = $v; + let mut r1 = vector[]; + let mut r2 = vector[]; + v.do!(|e| if ($f(&e)) r1.push_back(e) else r2.push_back(e)); + (r1, r2) + } + + /// Finds the index of first element in the vector `v` that satisfies the predicate `f`. + /// Returns `some(index)` if such an element is found, otherwise `none()`. + public macro fun find_index<$T>($v: &vector<$T>, $f: |&$T| -> bool): Option { + let v = $v; + 'find_index: { + v.length().do!(|i| if ($f(&v[i])) return 'find_index option::some(i)); + option::none() + } + } + + /// Count how many elements in the vector `v` satisfy the predicate `f`. + public macro fun count<$T>($v: &vector<$T>, $f: |&$T| -> bool): u64 { + let v = $v; + let mut count = 0; + v.do_ref!(|e| if ($f(e)) count = count + 1); + count + } + + /// Reduce the vector `v` to a single value by applying the function `f` to each element. + /// Similar to `fold_left` in Rust and `reduce` in Python and JavaScript. + public macro fun fold<$T, $Acc>($v: vector<$T>, $init: $Acc, $f: |$Acc, $T| -> $Acc): $Acc { + let v = $v; + let mut acc = $init; + v.do!(|e| acc = $f(acc, e)); + acc + } + + /// Whether any element in the vector `v` satisfies the predicate `f`. + /// If the vector is empty, returns `false`. + public macro fun any<$T>($v: &vector<$T>, $f: |&$T| -> bool): bool { + let v = $v; + 'any: { + v.do_ref!(|e| if ($f(e)) return 'any true); + false + } + } + + /// Whether all elements in the vector `v` satisfy the predicate `f`. + /// If the vector is empty, returns `true`. + public macro fun all<$T>($v: &vector<$T>, $f: |&$T| -> bool): bool { + let v = $v; + 'all: { + v.do_ref!(|e| if (!$f(e)) return 'all false); + true + } + } + + /// Destroys two vectors `v1` and `v2` by calling `f` to each pair of elements. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do<$T1, $T2>($v1: vector<$T1>, $v2: vector<$T2>, $f: |$T1, $T2|) { + let v1 = $v1; + let mut v2 = $v2; + v2.reverse(); + let len = v1.length(); + assert!(len == v2.length()); + v1.do!(|el1| $f(el1, v2.pop_back())); + } + + /// Destroys two vectors `v1` and `v2` by calling `f` to each pair of elements. + /// Aborts if the vectors are not of the same length. + /// Starts from the end of the vectors. + public macro fun zip_do_reverse<$T1, $T2>($v1: vector<$T1>, $v2: vector<$T2>, $f: |$T1, $T2|) { + let v1 = $v1; + let mut v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + v1.destroy!(|el1| $f(el1, v2.pop_back())); + } + + /// Iterate through `v1` and `v2` and apply the function `f` to references of each pair of + /// elements. The vectors are not modified. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do_ref<$T1, $T2>($v1: &vector<$T1>, $v2: &vector<$T2>, $f: |&$T1, &$T2|) { + let v1 = $v1; + let v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + len.do!(|i| $f(&v1[i], &v2[i])); + } + + /// Iterate through `v1` and `v2` and apply the function `f` to mutable references of each pair + /// of elements. The vectors may be modified. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do_mut<$T1, $T2>( + $v1: &mut vector<$T1>, + $v2: &mut vector<$T2>, + $f: |&mut $T1, &mut $T2|, + ) { + let v1 = $v1; + let v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + len.do!(|i| $f(&mut v1[i], &mut v2[i])); + } + + /// Destroys two vectors `v1` and `v2` by applying the function `f` to each pair of elements. + /// The returned values are collected into a new vector. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_map<$T1, $T2, $U>( + $v1: vector<$T1>, + $v2: vector<$T2>, + $f: |$T1, $T2| -> $U, + ): vector<$U> { + let mut r = vector[]; + zip_do!($v1, $v2, |el1, el2| r.push_back($f(el1, el2))); + r + } + + /// Iterate through `v1` and `v2` and apply the function `f` to references of each pair of + /// elements. The returned values are collected into a new vector. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_map_ref<$T1, $T2, $U>( + $v1: &vector<$T1>, + $v2: &vector<$T2>, + $f: |&$T1, &$T2| -> $U, + ): vector<$U> { + let mut r = vector[]; + zip_do_ref!($v1, $v2, |el1, el2| r.push_back($f(el1, el2))); + r + } +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/sources/m.move new file mode 100644 index 0000000000000..193501e8267d0 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/sources/m.move @@ -0,0 +1,54 @@ +// Test tracking values of compound type variables +// (structs, enums, vectors). +module compound::m; + +public enum SomeEnum has drop { + PositionalVariant(u64, u64), + NamedVariant { field1: u64, field2: u64 }, +} + +public struct SomeStruct has drop { + simple_field: u64, + enum_field: SomeEnum, + another_enum_field: SomeEnum, + vec_simple_field: vector, + vec_struct_field: vector, +} + +public struct SimpleStruct has drop, copy { + field: u64, +} + +fun foo(mut some_struct: SomeStruct, p: u64): SomeStruct { + let pos_variant = SomeEnum::PositionalVariant(p, p); + let named_variant = SomeEnum::NamedVariant { + field1: p, + field2: p, + }; + let v = vector::singleton(p); + let v_struct = vector::singleton(SimpleStruct { field: p }); + + some_struct.simple_field = p; + some_struct.enum_field = pos_variant; + some_struct.another_enum_field = named_variant; + some_struct.vec_simple_field = v; + some_struct.vec_struct_field = v_struct; + + some_struct +} + +fun some_struct(): SomeStruct { + SomeStruct { + simple_field: 0, + enum_field: SomeEnum::PositionalVariant(0, 0), + another_enum_field: SomeEnum::PositionalVariant(0, 0), + vec_simple_field: vector::singleton(0), + vec_struct_field: vector::singleton(SimpleStruct { field: 0 }), + } +} + +#[test] +fun test() { + let some_struct = some_struct(); + foo(some_struct, 42); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/sources/m.move new file mode 100644 index 0000000000000..193501e8267d0 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/sources/m.move @@ -0,0 +1,54 @@ +// Test tracking values of compound type variables +// (structs, enums, vectors). +module compound::m; + +public enum SomeEnum has drop { + PositionalVariant(u64, u64), + NamedVariant { field1: u64, field2: u64 }, +} + +public struct SomeStruct has drop { + simple_field: u64, + enum_field: SomeEnum, + another_enum_field: SomeEnum, + vec_simple_field: vector, + vec_struct_field: vector, +} + +public struct SimpleStruct has drop, copy { + field: u64, +} + +fun foo(mut some_struct: SomeStruct, p: u64): SomeStruct { + let pos_variant = SomeEnum::PositionalVariant(p, p); + let named_variant = SomeEnum::NamedVariant { + field1: p, + field2: p, + }; + let v = vector::singleton(p); + let v_struct = vector::singleton(SimpleStruct { field: p }); + + some_struct.simple_field = p; + some_struct.enum_field = pos_variant; + some_struct.another_enum_field = named_variant; + some_struct.vec_simple_field = v; + some_struct.vec_struct_field = v_struct; + + some_struct +} + +fun some_struct(): SomeStruct { + SomeStruct { + simple_field: 0, + enum_field: SomeEnum::PositionalVariant(0, 0), + another_enum_field: SomeEnum::PositionalVariant(0, 0), + vec_simple_field: vector::singleton(0), + vec_struct_field: vector::singleton(SimpleStruct { field: 0 }), + } +} + +#[test] +fun test() { + let some_struct = some_struct(); + foo(some_struct, 42); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/test.exp b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/test.exp new file mode 100644 index 0000000000000..f04773e0e1867 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/test.exp @@ -0,0 +1,55 @@ +current frame stack: + function: test (line 53) + scope 0 : + function: foo (line 23) + scope 0 : + some_struct : (0x0::m::SomeStruct) { + simple_field : 0 + enum_field : (0x0::m::SomeEnum::PositionalVariant) { + pos0 : 0 + pos1 : 0 + } + another_enum_field : (0x0::m::SomeEnum::PositionalVariant) { + pos0 : 0 + pos1 : 0 + } + vec_simple_field : [ + 0 : 0 + ] + vec_struct_field : [ + 0 : (0x0::m::SimpleStruct) { + field : 0 + } + ] + } + type: 0x0::m::SomeStruct + + p : 42 + type: u64 + +current frame stack: + function: test (line 53) + scope 0 : + function: foo (line 37) + scope 0 : + some_struct : (0x0::m::SomeStruct) { + simple_field : 42 + enum_field : (0x0::m::SomeEnum::PositionalVariant) { + pos0 : 42 + pos1 : 42 + } + another_enum_field : (0x0::m::SomeEnum::NamedVariant) { + field1 : 42 + field2 : 42 + } + vec_simple_field : [ + 0 : 42 + ] + vec_struct_field : [ + 0 : (0x0::m::SimpleStruct) { + field : 42 + } + ] + } + type: 0x0::m::SomeStruct + diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/trace.spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/trace.spec.js new file mode 100644 index 0000000000000..9837778eaf084 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/trace.spec.js @@ -0,0 +1,23 @@ +let action = (runtime) => { + let res = ''; + // step over a function creating a complex struct + runtime.step(true); + // step into a function + runtime.step(false); + res += runtime.toString(); + // advance until all struct fields are updated + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + res += runtime.toString(); + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/traces/compound__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/traces/compound__m__test.json new file mode 100644 index 0000000000000..1b4e077a6155d --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/traces/compound__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":2,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":1000000000,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":2,"function_name":"some_struct","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":null}],"locals_types":[],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999996,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999993,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999990,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999986,"instruction":"PACK_VARIANT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999983,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999980,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999976,"instruction":"PACK_VARIANT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}}}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999973,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999973,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":24,"function_name":"singleton","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"vector"},"binary_member_index":8,"type_instantiation":["u64"],"parameters":[{"RuntimeValue":{"value":0}}],"return_types":[{"type_":{"vector":"u64"},"ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":{"vector":"u64"},"ref_type":null}],"is_native":false},"gas_left":999999973}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999962,"instruction":"VEC_PACK"}},{"Effect":{"Push":{"RuntimeValue":{"value":[]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999961,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[]}}}},{"Effect":{"Write":{"location":{"Local":[24,1]},"root_value_after_write":{"RuntimeValue":{"value":[]}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999951,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[24,1]},"root_value_read":{"RuntimeValue":{"value":[]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[24,1]},"snapshot":[]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999933,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[24,0]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999932,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[24,1]},"snapshot":[]}}}},{"Effect":{"Write":{"location":{"Local":[24,1]},"root_value_after_write":{"RuntimeValue":{"value":[0]}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999922,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[24,1]},"root_value_read":{"RuntimeValue":{"value":[0]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[0]}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999921,"instruction":"RET"}},{"CloseFrame":{"frame_id":24,"return_":[{"RuntimeValue":{"value":[0]}}],"gas_left":999999921}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999918,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999914,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":0}}}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999914,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":51,"function_name":"singleton","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"vector"},"binary_member_index":8,"type_instantiation":[{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}}],"parameters":[{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":0}}}}],"return_types":[{"type_":{"vector":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}}},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}},"ref_type":null},{"type_":{"vector":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}}},"ref_type":null}],"is_native":false},"gas_left":999999914}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999903,"instruction":"VEC_PACK"}},{"Effect":{"Push":{"RuntimeValue":{"value":[]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999902,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[]}}}},{"Effect":{"Write":{"location":{"Local":[51,1]},"root_value_after_write":{"RuntimeValue":{"value":[]}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999892,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[51,1]},"root_value_read":{"RuntimeValue":{"value":[]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[51,1]},"snapshot":[]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999872,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[51,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":0}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":0}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999871,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":0}}}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[51,1]},"snapshot":[]}}}},{"Effect":{"Write":{"location":{"Local":[51,1]},"root_value_after_write":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999849,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[51,1]},"root_value_read":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999848,"instruction":"RET"}},{"CloseFrame":{"frame_id":51,"return_":[{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}],"gas_left":999999848}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999844,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":[0]}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":0,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999843,"instruction":"RET"}},{"CloseFrame":{"frame_id":2,"return_":[{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":0,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}],"gas_left":999999843}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999840,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999840,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":84,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":0,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}},{"RuntimeValue":{"value":42}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":null},{"type_":"u64","ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeEnum","type_args":[]}},"ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeEnum","type_args":[]}},"ref_type":null},{"type_":{"vector":"u64"},"ref_type":null},{"type_":{"vector":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}}},"ref_type":null}],"is_native":false},"gas_left":999999840}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999821,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999803,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999799,"instruction":"PACK_VARIANT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999798,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}}}}}},{"Effect":{"Write":{"location":{"Local":[84,3]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999780,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999762,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999758,"instruction":"PACK_VARIANT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}}}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999757,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}}}}}},{"Effect":{"Write":{"location":{"Local":[84,2]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}}}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999739,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999739,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":115,"function_name":"singleton","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"vector"},"binary_member_index":8,"type_instantiation":["u64"],"parameters":[{"RuntimeValue":{"value":42}}],"return_types":[{"type_":{"vector":"u64"},"ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":{"vector":"u64"},"ref_type":null}],"is_native":false},"gas_left":999999739}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999728,"instruction":"VEC_PACK"}},{"Effect":{"Push":{"RuntimeValue":{"value":[]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999727,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[]}}}},{"Effect":{"Write":{"location":{"Local":[115,1]},"root_value_after_write":{"RuntimeValue":{"value":[]}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999717,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[115,1]},"root_value_read":{"RuntimeValue":{"value":[]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[115,1]},"snapshot":[]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999699,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[115,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999698,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[115,1]},"snapshot":[]}}}},{"Effect":{"Write":{"location":{"Local":[115,1]},"root_value_after_write":{"RuntimeValue":{"value":[42]}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999688,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[115,1]},"root_value_read":{"RuntimeValue":{"value":[42]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[42]}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999687,"instruction":"RET"}},{"CloseFrame":{"frame_id":115,"return_":[{"RuntimeValue":{"value":[42]}}],"gas_left":999999687}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999686,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[42]}}}},{"Effect":{"Write":{"location":{"Local":[84,4]},"root_value_after_write":{"RuntimeValue":{"value":[42]}}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999668,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999664,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":42}}}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999664,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":146,"function_name":"singleton","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"vector"},"binary_member_index":8,"type_instantiation":[{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}}],"parameters":[{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":42}}}}],"return_types":[{"type_":{"vector":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}}},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}},"ref_type":null},{"type_":{"vector":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}}},"ref_type":null}],"is_native":false},"gas_left":999999664}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999653,"instruction":"VEC_PACK"}},{"Effect":{"Push":{"RuntimeValue":{"value":[]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999652,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[]}}}},{"Effect":{"Write":{"location":{"Local":[146,1]},"root_value_after_write":{"RuntimeValue":{"value":[]}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999642,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[146,1]},"root_value_read":{"RuntimeValue":{"value":[]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[146,1]},"snapshot":[]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999622,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[146,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":42}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":42}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999621,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":42}}}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[146,1]},"snapshot":[]}}}},{"Effect":{"Write":{"location":{"Local":[146,1]},"root_value_after_write":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999599,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[146,1]},"root_value_read":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999598,"instruction":"RET"}},{"CloseFrame":{"frame_id":146,"return_":[{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}],"gas_left":999999598}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999597,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}},{"Effect":{"Write":{"location":{"Local":[84,5]},"root_value_after_write":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999579,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999569,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":0,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":0,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999559,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":0,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":0,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999541,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":0,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[84,0]},0]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999505,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,3]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}}}}}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999495,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999485,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999449,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}}}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[84,0]},1]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999413,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,2]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999403,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":25,"gas_left":999999393,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},2]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":26,"gas_left":999999357,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},2]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}}}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[84,0]},2]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}}},{"Instruction":{"type_parameters":[],"pc":27,"gas_left":999999347,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,4]},"root_value_read":{"RuntimeValue":{"value":[42]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[42]}}}},{"Instruction":{"type_parameters":[],"pc":28,"gas_left":999999337,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999327,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},3]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999317,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},3]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":[42]}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[84,0]},3]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999295,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,5]},"root_value_read":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999285,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":33,"gas_left":999999275,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},4]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":34,"gas_left":999999253,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},4]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[84,0]},4]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}}}}},{"Instruction":{"type_parameters":[],"pc":35,"gas_left":999999137,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":36,"gas_left":999999136,"instruction":"RET"}},{"CloseFrame":{"frame_id":84,"return_":[{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}}],"gas_left":999999136}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999135,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999134,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999999134}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/Move.toml new file mode 100644 index 0000000000000..f8a896e2210ea --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "global_loc" +edition = "2024.beta" + +[dependencies] +Sui = { git = "https://github.com/MystenLabs/sui.git", subdir = "crates/sui-framework/packages/sui-framework", rev = "framework/mainnet" } + +[addresses] +global_loc = "0x0" +Sui = "0x2" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/bytecode_modules/m.mv new file mode 100644 index 0000000000000000000000000000000000000000..d24eb56fae5a632609168a8edaac128572263ac4 GIT binary patch literal 520 zcmbu6u};G<5Qgv0PU6^ZXaH3e;t4ubLh8T_6`~UZFbCSWRS>aJ<3jrg%sc^)hnRQ+ zoFY|XVd2-E_1ijs!+-C`pDG4`6hV;avi^`?P;@y=p5IWs(w+Pup})w2#4dGc4^RL> z0>n5lA_x`%vexC{K#m=tND`2UVic4nVP0toEG7jJr*K6ET%aVP0K+&9OvE^}Bn(fX z7$%{mrpZ`dyV_h$AI!|BH|uj}y;*y9I~gC%mge4@qNx_vESqBPmW4Nsuk0c&&7<)q zDpz&AVRPrGDpge$-WAhLn8BJ^^TO<^j@_2b+mF((i`fr9UD?WSY2BSGo?O*98?yS3 zLRfz)1V=)U4tWBweg>zYDWg2rEMx7WwmM4>HCJ!|I?B1?BNdH$3P(L^@hZ=Sh7RP( W$zUH&2Kg`F!w@?M;yYN*kl-8jKtwVC literal 0 HcmV?d00001 diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/MoveStdlib/bcs.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/MoveStdlib/bcs.json new file mode 100644 index 0000000000000..569ac0491d84c --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/MoveStdlib/bcs.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[34,201,103,208,120,108,208,171,127,162,154,113,96,186,51,169,173,216,199,217,88,54,128,150,101,140,27,7,37,201,47,24],"start":395,"end":398},"module_name":["0000000000000000000000000000000000000000000000000000000000000001","bcs"],"struct_map":{},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[34,201,103,208,120,108,208,171,127,162,154,113,96,186,51,169,173,216,199,217,88,54,128,150,101,140,27,7,37,201,47,24],"start":510,"end":518},"type_parameters":[["MoveValue",{"file_hash":[34,201,103,208,120,108,208,171,127,162,154,113,96,186,51,169,173,216,199,217,88,54,128,150,101,140,27,7,37,201,47,24],"start":519,"end":528}]],"parameters":[["v#0#0",{"file_hash":[34,201,103,208,120,108,208,171,127,162,154,113,96,186,51,169,173,216,199,217,88,54,128,150,101,140,27,7,37,201,47,24],"start":530,"end":531}]],"returns":[{"file_hash":[34,201,103,208,120,108,208,171,127,162,154,113,96,186,51,169,173,216,199,217,88,54,128,150,101,140,27,7,37,201,47,24],"start":546,"end":556}],"locals":[],"nops":{},"code_map":{},"is_native":true},"1":{"definition_location":{"file_hash":[34,201,103,208,120,108,208,171,127,162,154,113,96,186,51,169,173,216,199,217,88,54,128,150,101,140,27,7,37,201,47,24],"start":383,"end":557},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[34,201,103,208,120,108,208,171,127,162,154,113,96,186,51,169,173,216,199,217,88,54,128,150,101,140,27,7,37,201,47,24],"start":383,"end":557}},"is_native":false}},"constant_map":{}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/Sui/object.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/Sui/object.json new file mode 100644 index 0000000000000..34fa8e3bae45c --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/Sui/object.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":114,"end":120},"module_name":["0000000000000000000000000000000000000000000000000000000000000002","object"],"struct_map":{"0":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":1986,"end":1988},"type_parameters":[],"fields":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":2345,"end":2350}]},"1":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":2882,"end":2885},"type_parameters":[],"fields":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":2902,"end":2904}]}},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":2971,"end":2982},"type_parameters":[],"parameters":[["id#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":2983,"end":2985}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":2993,"end":3003}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3025,"end":3027},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3024,"end":3033},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3010,"end":3034}},"is_native":false},"1":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3096,"end":3109},"type_parameters":[],"parameters":[["id#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3110,"end":3112}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3120,"end":3127}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3134,"end":3136},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3134,"end":3142}},"is_native":false},"2":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3190,"end":3203},"type_parameters":[],"parameters":[["bytes#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3204,"end":3209}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3224,"end":3226}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3253,"end":3258},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3233,"end":3259},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3233,"end":3267}},"is_native":false},"3":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3316,"end":3331},"type_parameters":[],"parameters":[["bytes#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3332,"end":3337}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3349,"end":3351}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3363,"end":3368},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3358,"end":3370}},"is_native":false},"4":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3539,"end":3555},"type_parameters":[],"parameters":[["ctx#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3556,"end":3559}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3574,"end":3577}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3592,"end":3595},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3592,"end":3604},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3608,"end":3612},"3":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3605,"end":3607},"4":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3584,"end":3632},"6":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3614,"end":3631},"7":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3584,"end":3632},"8":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3668,"end":3694},"9":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3656,"end":3696},"10":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3638,"end":3703}},"is_native":false},"5":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3832,"end":3837},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3841,"end":3844}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3881,"end":3900},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3869,"end":3902},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3851,"end":3909}},"is_native":false},"6":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4065,"end":4084},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4088,"end":4091}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4128,"end":4154},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4116,"end":4156},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4098,"end":4163}},"is_native":false},"7":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4294,"end":4310},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4314,"end":4317}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4354,"end":4367},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4342,"end":4369},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4324,"end":4376}},"is_native":false},"8":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4512,"end":4535},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4539,"end":4542}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4579,"end":4602},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4567,"end":4604},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4549,"end":4611}},"is_native":false},"9":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4752,"end":4758},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4762,"end":4765}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4802,"end":4815},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4790,"end":4817},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4772,"end":4824}},"is_native":false},"10":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4871,"end":4883},"type_parameters":[],"parameters":[["uid#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4884,"end":4887}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4896,"end":4899}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4907,"end":4910},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4906,"end":4913}},"is_native":false},"11":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4974,"end":4986},"type_parameters":[],"parameters":[["uid#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4987,"end":4990}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4999,"end":5001}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5008,"end":5011},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5008,"end":5014}},"is_native":false},"12":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5062,"end":5074},"type_parameters":[],"parameters":[["uid#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5075,"end":5078}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5087,"end":5097}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5119,"end":5122},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5119,"end":5131},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5118,"end":5131},"3":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5104,"end":5132}},"is_native":false},"13":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5194,"end":5208},"type_parameters":[],"parameters":[["uid#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5209,"end":5212}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5221,"end":5228}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5235,"end":5238},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5235,"end":5247}},"is_native":false},"14":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5408,"end":5411},"type_parameters":[],"parameters":[["ctx#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5412,"end":5415}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5434,"end":5437}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5474,"end":5477},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5474,"end":5500},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5462,"end":5502},"3":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5444,"end":5509}},"is_native":false},"15":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5860,"end":5866},"type_parameters":[],"parameters":[["id#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5867,"end":5869}]],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5913,"end":5915},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5886,"end":5910},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5896,"end":5908},"3":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5921,"end":5939}},"is_native":false},"16":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5991,"end":5993},"type_parameters":[["T",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5994,"end":5995}]],"parameters":[["obj#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6002,"end":6005}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6012,"end":6014}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6032,"end":6035},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6021,"end":6036},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6021,"end":6039}},"is_native":false},"17":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6094,"end":6103},"type_parameters":[["T",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6104,"end":6105}]],"parameters":[["obj#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6112,"end":6115}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6122,"end":6125}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6144,"end":6147},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6133,"end":6148},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6132,"end":6151}},"is_native":false},"18":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6221,"end":6229},"type_parameters":[["T",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6230,"end":6231}]],"parameters":[["obj#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6238,"end":6241}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6248,"end":6258}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6291,"end":6294},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6280,"end":6295},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6279,"end":6298},"3":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6265,"end":6299}},"is_native":false},"19":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6371,"end":6381},"type_parameters":[["T",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6382,"end":6383}]],"parameters":[["obj#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6390,"end":6393}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6400,"end":6407}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6425,"end":6428},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6414,"end":6429},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6414,"end":6438}},"is_native":false},"20":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6761,"end":6771},"type_parameters":[["T",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6772,"end":6773}]],"parameters":[["obj#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6780,"end":6783}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6790,"end":6794}],"locals":[],"nops":{},"code_map":{},"is_native":true},"21":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6889,"end":6906},"type_parameters":[],"parameters":[["bytes#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6907,"end":6912}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6924,"end":6927}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6949,"end":6954},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6934,"end":6955},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6976,"end":6981},"3":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6971,"end":6983},"4":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6961,"end":6985}},"is_native":false},"22":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7052,"end":7063},"type_parameters":[],"parameters":[["id#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7064,"end":7066}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"23":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7128,"end":7142},"type_parameters":[],"parameters":[["id#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7143,"end":7145}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"24":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7228,"end":7240},"type_parameters":[],"parameters":[["ctx#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7241,"end":7244}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7259,"end":7261}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7280,"end":7283},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7280,"end":7308},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7268,"end":7310}},"is_native":false},"25":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":102,"end":7312},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":102,"end":7312}},"is_native":false}},"constant_map":{"ENotSystemAddress":6,"SUI_AUTHENTICATOR_STATE_ID":2,"SUI_BRIDGE_ID":5,"SUI_CLOCK_OBJECT_ID":1,"SUI_DENY_LIST_OBJECT_ID":4,"SUI_RANDOM_ID":3,"SUI_SYSTEM_STATE_OBJECT_ID":0}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/Sui/tx_context.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/Sui/tx_context.json new file mode 100644 index 0000000000000..a85ccb124ed83 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/Sui/tx_context.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":87,"end":97},"module_name":["0000000000000000000000000000000000000000000000000000000000000002","tx_context"],"struct_map":{"0":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":705,"end":714},"type_parameters":[],"fields":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":798,"end":804},{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":859,"end":866},{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":917,"end":922},{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":977,"end":995},{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1146,"end":1157}]}},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1253,"end":1259},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1260,"end":1264}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1279,"end":1286}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1293,"end":1297},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1293,"end":1304}},"is_native":false},"1":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1432,"end":1438},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1439,"end":1443}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1458,"end":1469}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1477,"end":1481},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1476,"end":1489}},"is_native":false},"2":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1533,"end":1538},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1539,"end":1543}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1558,"end":1561}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1568,"end":1572},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1568,"end":1578}},"is_native":false},"3":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1662,"end":1680},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1681,"end":1685}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1700,"end":1703}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1710,"end":1714},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1710,"end":1733}},"is_native":false},"4":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1949,"end":1969},"type_parameters":[],"parameters":[["ctx#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1970,"end":1973}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1992,"end":1999}],"locals":[["id#1#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2049,"end":2051}],["ids_created#1#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2010,"end":2021}]],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2024,"end":2027},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2024,"end":2039},"3":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2010,"end":2021},"4":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2066,"end":2069},"5":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2065,"end":2077},"6":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2064,"end":2077},"7":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2079,"end":2090},"8":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2054,"end":2091},"9":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2049,"end":2051},"10":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2115,"end":2126},"11":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2129,"end":2130},"12":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2127,"end":2128},"13":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2097,"end":2100},"14":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2097,"end":2112},"15":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2097,"end":2130},"16":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2136,"end":2138}},"is_native":false},"5":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2279,"end":2290},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2291,"end":2295}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2310,"end":2313}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2320,"end":2324},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2320,"end":2336}},"is_native":false},"6":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2423,"end":2432},"type_parameters":[],"parameters":[["tx_hash#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2433,"end":2440}],["ids_created#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2454,"end":2465}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2473,"end":2480}],"locals":[],"nops":{},"code_map":{},"is_native":true},"7":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2578,"end":2581},"type_parameters":[],"parameters":[["sender#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2587,"end":2593}],["tx_hash#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2608,"end":2615}],["epoch#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2633,"end":2638}],["epoch_timestamp_ms#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2649,"end":2667}],["ids_created#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2678,"end":2689}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2699,"end":2708}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2723,"end":2730},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2723,"end":2739},"2":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2743,"end":2757},"3":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2740,"end":2742},"4":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2715,"end":2776},"6":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2759,"end":2775},"7":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2715,"end":2776},"8":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2794,"end":2800},"9":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2802,"end":2809},"10":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2811,"end":2816},"11":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2818,"end":2836},"12":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2838,"end":2849},"13":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2782,"end":2851}},"is_native":false},"8":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2959,"end":2972},"type_parameters":[],"parameters":[["addr#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2978,"end":2982}],["hint#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2997,"end":3001}],["epoch#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3012,"end":3017}],["epoch_timestamp_ms#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3028,"end":3046}],["ids_created#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3057,"end":3068}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3078,"end":3087}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3098,"end":3102},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3128,"end":3132},"2":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3104,"end":3133},"3":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3135,"end":3140},"4":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3142,"end":3160},"5":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3162,"end":3173},"6":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3094,"end":3174}},"is_native":false},"9":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3245,"end":3250},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3254,"end":3263}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3361,"end":3365},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3284,"end":3351},"2":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3376,"end":3377},"3":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3379,"end":3380},"4":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3382,"end":3383},"5":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3357,"end":3384}},"is_native":false},"10":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3527,"end":3550},"type_parameters":[],"parameters":[["hint#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3551,"end":3555}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3563,"end":3573}],"locals":[["tx_hash#1#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3588,"end":3595}]],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3617,"end":3622},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3598,"end":3623},"2":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3584,"end":3595},"3":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3636,"end":3643},"4":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3636,"end":3652},"5":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3655,"end":3669},"6":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3653,"end":3654},"7":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3629,"end":3691},"9":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3671,"end":3678},"10":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3689,"end":3690},"11":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3671,"end":3691},"12":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3629,"end":3691},"13":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3697,"end":3704}},"is_native":false},"11":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3732,"end":3747},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3748,"end":3752}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3767,"end":3770}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3789,"end":3793},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3777,"end":3794}},"is_native":false},"12":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3868,"end":3890},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3891,"end":3895}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3910,"end":3917}],"locals":[["ids_created#1#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3928,"end":3939}]],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3942,"end":3946},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3942,"end":3958},"3":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3928,"end":3939},"4":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3972,"end":3983},"5":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3986,"end":3987},"6":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3984,"end":3985},"7":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3964,"end":4003},"11":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3989,"end":4002},"12":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3964,"end":4003},"13":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4021,"end":4025},"14":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4020,"end":4033},"15":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4019,"end":4033},"16":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4035,"end":4046},"17":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4049,"end":4050},"18":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4047,"end":4048},"19":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4009,"end":4051}},"is_native":false},"13":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4079,"end":4101},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4102,"end":4106}]],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4143,"end":4147},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4143,"end":4153},"3":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4156,"end":4157},"4":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4154,"end":4155},"5":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4130,"end":4134},"6":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4130,"end":4140},"7":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4130,"end":4157}},"is_native":false},"14":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4185,"end":4210},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4211,"end":4215}],["delta_ms#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4233,"end":4241}]],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4280,"end":4284},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4280,"end":4303},"3":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4306,"end":4314},"4":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4304,"end":4305},"5":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4254,"end":4258},"6":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4254,"end":4277},"7":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4254,"end":4314}},"is_native":false},"15":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":75,"end":4316},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":75,"end":4316}},"is_native":false}},"constant_map":{"EBadTxHashLength":1,"ENoIDsCreated":2,"TX_HASH_LENGTH":0}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/m.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/m.json new file mode 100644 index 0000000000000..e82c0419837d9 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/m.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":70,"end":71},"module_name":["0000000000000000000000000000000000000000000000000000000000000000","m"],"struct_map":{"0":{"definition_location":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":88,"end":98},"type_parameters":[],"fields":[{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":113,"end":115},{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":126,"end":129}]}},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":142,"end":145},"type_parameters":[],"parameters":[["o#0#0",{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":146,"end":147}],["p#0#0",{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":161,"end":162}]],"returns":[{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":169,"end":172}],"locals":[["%#1",{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":187,"end":201}],["%#2",{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":187,"end":212}],["n#1#0",{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":183,"end":184}],["num#1#0",{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":242,"end":245}]],"nops":{},"code_map":{"0":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":198,"end":200},"1":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":187,"end":201},"4":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":187,"end":212},"6":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":187,"end":215},"7":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":213,"end":214},"8":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":187,"end":215},"10":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":183,"end":184},"11":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":250,"end":251},"12":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":225,"end":247},"13":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":242,"end":245},"14":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":257,"end":275},"15":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":282,"end":283},"16":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":282,"end":290},"17":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":295,"end":298},"18":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":295,"end":305},"19":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":292,"end":293},"20":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":310,"end":311},"21":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":310,"end":318},"22":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":307,"end":308},"23":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":281,"end":319}},"is_native":false},"1":{"definition_location":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":335,"end":339},"type_parameters":[],"parameters":[],"returns":[],"locals":[["ctx#1#0",{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":356,"end":359}]],"nops":{},"code_map":{"0":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":362,"end":381},"1":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":352,"end":359},"2":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":435,"end":443},"3":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":423,"end":444},"4":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":451,"end":453},"5":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":406,"end":455},"6":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":457,"end":459},"7":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":402,"end":460},"8":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":590,"end":598},"9":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":578,"end":599},"10":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":606,"end":608},"11":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":561,"end":610},"12":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":612,"end":614},"13":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":557,"end":615},"14":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":555,"end":556},"15":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":543,"end":547},"16":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":615,"end":616}},"is_native":false},"2":{"definition_location":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":51,"end":618},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":51,"end":618}},"is_native":false}},"constant_map":{}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/MoveStdlib/bcs.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/MoveStdlib/bcs.move new file mode 100644 index 0000000000000..7e0cec97d2a6d --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/MoveStdlib/bcs.move @@ -0,0 +1,11 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/// Utility for converting a Move value to its binary representation in BCS (Binary Canonical +/// Serialization). BCS is the binary encoding for Move resources and other non-module values +/// published on-chain. See https://github.com/diem/bcs#binary-canonical-serialization-bcs for more +/// details on BCS. +module std::bcs; + +/// Return the binary representation of `v` in BCS (Binary Canonical Serialization) format +public native fun to_bytes(v: &MoveValue): vector; diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/Sui/object.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/Sui/object.move new file mode 100644 index 0000000000000..8bc0c67c38fc8 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/Sui/object.move @@ -0,0 +1,233 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/// Sui object identifiers +module sui::object; + +use std::bcs; +use sui::address; + +/// Allows calling `.to_address` on an `ID` to get an `address`. +public use fun id_to_address as ID.to_address; + +/// Allows calling `.to_bytes` on an `ID` to get a `vector`. +public use fun id_to_bytes as ID.to_bytes; + +/// Allows calling `.as_inner` on a `UID` to get an `&ID`. +public use fun uid_as_inner as UID.as_inner; + +/// Allows calling `.to_inner` on a `UID` to get an `ID`. +public use fun uid_to_inner as UID.to_inner; + +/// Allows calling `.to_address` on a `UID` to get an `address`. +public use fun uid_to_address as UID.to_address; + +/// Allows calling `.to_bytes` on a `UID` to get a `vector`. +public use fun uid_to_bytes as UID.to_bytes; + +/// The hardcoded ID for the singleton Sui System State Object. +const SUI_SYSTEM_STATE_OBJECT_ID: address = @0x5; + +/// The hardcoded ID for the singleton Clock Object. +const SUI_CLOCK_OBJECT_ID: address = @0x6; + +/// The hardcoded ID for the singleton AuthenticatorState Object. +const SUI_AUTHENTICATOR_STATE_ID: address = @0x7; + +/// The hardcoded ID for the singleton Random Object. +const SUI_RANDOM_ID: address = @0x8; + +/// The hardcoded ID for the singleton DenyList. +const SUI_DENY_LIST_OBJECT_ID: address = @0x403; + +/// The hardcoded ID for the Bridge Object. +const SUI_BRIDGE_ID: address = @0x9; + +/// Sender is not @0x0 the system address. +const ENotSystemAddress: u64 = 0; + +/// An object ID. This is used to reference Sui Objects. +/// This is *not* guaranteed to be globally unique--anyone can create an `ID` from a `UID` or +/// from an object, and ID's can be freely copied and dropped. +/// Here, the values are not globally unique because there can be multiple values of type `ID` +/// with the same underlying bytes. For example, `object::id(&obj)` can be called as many times +/// as you want for a given `obj`, and each `ID` value will be identical. +public struct ID has copy, drop, store { + // We use `address` instead of `vector` here because `address` has a more + // compact serialization. `address` is serialized as a BCS fixed-length sequence, + // which saves us the length prefix we would pay for if this were `vector`. + // See https://github.com/diem/bcs#fixed-and-variable-length-sequences. + bytes: address, +} + +/// Globally unique IDs that define an object's ID in storage. Any Sui Object, that is a struct +/// with the `key` ability, must have `id: UID` as its first field. +/// These are globally unique in the sense that no two values of type `UID` are ever equal, in +/// other words for any two values `id1: UID` and `id2: UID`, `id1` != `id2`. +/// This is a privileged type that can only be derived from a `TxContext`. +/// `UID` doesn't have the `drop` ability, so deleting a `UID` requires a call to `delete`. +public struct UID has store { + id: ID, +} + +// === id === + +/// Get the raw bytes of a `ID` +public fun id_to_bytes(id: &ID): vector { + bcs::to_bytes(&id.bytes) +} + +/// Get the inner bytes of `id` as an address. +public fun id_to_address(id: &ID): address { + id.bytes +} + +/// Make an `ID` from raw bytes. +public fun id_from_bytes(bytes: vector): ID { + address::from_bytes(bytes).to_id() +} + +/// Make an `ID` from an address. +public fun id_from_address(bytes: address): ID { + ID { bytes } +} + +// === uid === + +#[allow(unused_function)] +/// Create the `UID` for the singleton `SuiSystemState` object. +/// This should only be called once from `sui_system`. +fun sui_system_state(ctx: &TxContext): UID { + assert!(ctx.sender() == @0x0, ENotSystemAddress); + UID { + id: ID { bytes: SUI_SYSTEM_STATE_OBJECT_ID }, + } +} + +/// Create the `UID` for the singleton `Clock` object. +/// This should only be called once from `clock`. +public(package) fun clock(): UID { + UID { + id: ID { bytes: SUI_CLOCK_OBJECT_ID }, + } +} + +/// Create the `UID` for the singleton `AuthenticatorState` object. +/// This should only be called once from `authenticator_state`. +public(package) fun authenticator_state(): UID { + UID { + id: ID { bytes: SUI_AUTHENTICATOR_STATE_ID }, + } +} + +/// Create the `UID` for the singleton `Random` object. +/// This should only be called once from `random`. +public(package) fun randomness_state(): UID { + UID { + id: ID { bytes: SUI_RANDOM_ID }, + } +} + +/// Create the `UID` for the singleton `DenyList` object. +/// This should only be called once from `deny_list`. +public(package) fun sui_deny_list_object_id(): UID { + UID { + id: ID { bytes: SUI_DENY_LIST_OBJECT_ID }, + } +} + +#[allow(unused_function)] +/// Create the `UID` for the singleton `Bridge` object. +/// This should only be called once from `bridge`. +fun bridge(): UID { + UID { + id: ID { bytes: SUI_BRIDGE_ID }, + } +} + +/// Get the inner `ID` of `uid` +public fun uid_as_inner(uid: &UID): &ID { + &uid.id +} + +/// Get the raw bytes of a `uid`'s inner `ID` +public fun uid_to_inner(uid: &UID): ID { + uid.id +} + +/// Get the raw bytes of a `UID` +public fun uid_to_bytes(uid: &UID): vector { + bcs::to_bytes(&uid.id.bytes) +} + +/// Get the inner bytes of `id` as an address. +public fun uid_to_address(uid: &UID): address { + uid.id.bytes +} + +// === any object === + +/// Create a new object. Returns the `UID` that must be stored in a Sui object. +/// This is the only way to create `UID`s. +public fun new(ctx: &mut TxContext): UID { + UID { + id: ID { bytes: ctx.fresh_object_address() }, + } +} + +/// Delete the object and it's `UID`. This is the only way to eliminate a `UID`. +// This exists to inform Sui of object deletions. When an object +// gets unpacked, the programmer will have to do something with its +// `UID`. The implementation of this function emits a deleted +// system event so Sui knows to process the object deletion +public fun delete(id: UID) { + let UID { id: ID { bytes } } = id; + delete_impl(bytes) +} + +/// Get the underlying `ID` of `obj` +public fun id(obj: &T): ID { + borrow_uid(obj).id +} + +/// Borrow the underlying `ID` of `obj` +public fun borrow_id(obj: &T): &ID { + &borrow_uid(obj).id +} + +/// Get the raw bytes for the underlying `ID` of `obj` +public fun id_bytes(obj: &T): vector { + bcs::to_bytes(&borrow_uid(obj).id) +} + +/// Get the inner bytes for the underlying `ID` of `obj` +public fun id_address(obj: &T): address { + borrow_uid(obj).id.bytes +} + +/// Get the `UID` for `obj`. +/// Safe because Sui has an extra bytecode verifier pass that forces every struct with +/// the `key` ability to have a distinguished `UID` field. +/// Cannot be made public as the access to `UID` for a given object must be privileged, and +/// restrictable in the object's module. +native fun borrow_uid(obj: &T): &UID; + +/// Generate a new UID specifically used for creating a UID from a hash +public(package) fun new_uid_from_hash(bytes: address): UID { + record_new_uid(bytes); + UID { id: ID { bytes } } +} + +// === internal functions === + +// helper for delete +native fun delete_impl(id: address); + +// marks newly created UIDs from hash +native fun record_new_uid(id: address); + +#[test_only] +/// Return the most recent created object ID. +public fun last_created(ctx: &TxContext): ID { + ID { bytes: ctx.last_created_object_id() } +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/Sui/tx_context.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/Sui/tx_context.move new file mode 100644 index 0000000000000..1fdef9ff83a81 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/Sui/tx_context.move @@ -0,0 +1,141 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module sui::tx_context; + +#[test_only] +/// Number of bytes in an tx hash (which will be the transaction digest) +const TX_HASH_LENGTH: u64 = 32; + +#[test_only] +/// Expected an tx hash of length 32, but found a different length +const EBadTxHashLength: u64 = 0; + +#[test_only] +/// Attempt to get the most recent created object ID when none has been created. +const ENoIDsCreated: u64 = 1; + +/// Information about the transaction currently being executed. +/// This cannot be constructed by a transaction--it is a privileged object created by +/// the VM and passed in to the entrypoint of the transaction as `&mut TxContext`. +public struct TxContext has drop { + /// The address of the user that signed the current transaction + sender: address, + /// Hash of the current transaction + tx_hash: vector, + /// The current epoch number + epoch: u64, + /// Timestamp that the epoch started at + epoch_timestamp_ms: u64, + /// Counter recording the number of fresh id's created while executing + /// this transaction. Always 0 at the start of a transaction + ids_created: u64, +} + +/// Return the address of the user that signed the current +/// transaction +public fun sender(self: &TxContext): address { + self.sender +} + +/// Return the transaction digest (hash of transaction inputs). +/// Please do not use as a source of randomness. +public fun digest(self: &TxContext): &vector { + &self.tx_hash +} + +/// Return the current epoch +public fun epoch(self: &TxContext): u64 { + self.epoch +} + +/// Return the epoch start time as a unix timestamp in milliseconds. +public fun epoch_timestamp_ms(self: &TxContext): u64 { + self.epoch_timestamp_ms +} + +/// Create an `address` that has not been used. As it is an object address, it will never +/// occur as the address for a user. +/// In other words, the generated address is a globally unique object ID. +public fun fresh_object_address(ctx: &mut TxContext): address { + let ids_created = ctx.ids_created; + let id = derive_id(*&ctx.tx_hash, ids_created); + ctx.ids_created = ids_created + 1; + id +} + +#[allow(unused_function)] +/// Return the number of id's created by the current transaction. +/// Hidden for now, but may expose later +fun ids_created(self: &TxContext): u64 { + self.ids_created +} + +/// Native function for deriving an ID via hash(tx_hash || ids_created) +native fun derive_id(tx_hash: vector, ids_created: u64): address; + +// ==== test-only functions ==== + +#[test_only] +/// Create a `TxContext` for testing +public fun new( + sender: address, + tx_hash: vector, + epoch: u64, + epoch_timestamp_ms: u64, + ids_created: u64, +): TxContext { + assert!(tx_hash.length() == TX_HASH_LENGTH, EBadTxHashLength); + TxContext { sender, tx_hash, epoch, epoch_timestamp_ms, ids_created } +} + +#[test_only] +/// Create a `TxContext` for testing, with a potentially non-zero epoch number. +public fun new_from_hint( + addr: address, + hint: u64, + epoch: u64, + epoch_timestamp_ms: u64, + ids_created: u64, +): TxContext { + new(addr, dummy_tx_hash_with_hint(hint), epoch, epoch_timestamp_ms, ids_created) +} + +#[test_only] +/// Create a dummy `TxContext` for testing +public fun dummy(): TxContext { + let tx_hash = x"3a985da74fe225b2045c172d6bd390bd855f086e3e9d525b46bfe24511431532"; + new(@0x0, tx_hash, 0, 0, 0) +} + +#[test_only] +/// Utility for creating 256 unique input hashes. +/// These hashes are guaranteed to be unique given a unique `hint: u64` +fun dummy_tx_hash_with_hint(hint: u64): vector { + let mut tx_hash = std::bcs::to_bytes(&hint); + while (tx_hash.length() < TX_HASH_LENGTH) tx_hash.push_back(0); + tx_hash +} + +#[test_only] +public fun get_ids_created(self: &TxContext): u64 { + ids_created(self) +} + +#[test_only] +/// Return the most recent created object ID. +public fun last_created_object_id(self: &TxContext): address { + let ids_created = self.ids_created; + assert!(ids_created > 0, ENoIDsCreated); + derive_id(*&self.tx_hash, ids_created - 1) +} + +#[test_only] +public fun increment_epoch_number(self: &mut TxContext) { + self.epoch = self.epoch + 1 +} + +#[test_only] +public fun increment_epoch_timestamp(self: &mut TxContext, delta_ms: u64) { + self.epoch_timestamp_ms = self.epoch_timestamp_ms + delta_ms +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/m.move new file mode 100644 index 0000000000000..86c1a237632e8 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/m.move @@ -0,0 +1,22 @@ +// Test handling of global locations in the trace. +module global_loc::m; + +public struct SomeObject has key { + id: UID, + num: u8, +} + +fun foo(o: SomeObject, p: u8): u64 { + let n = object::id(&o).to_bytes()[0]; + let SomeObject { id, num } = o; + object::delete(id); + (n as u64) + (num as u64) + (p as u64) +} + +#[test] +fun test() { + let mut ctx = tx_context::dummy(); + let mut _res = foo(SomeObject { id: object::new(&mut ctx), num: 42 }, 42); + // line below is to force another unoptimized read to keep `res` visible + _res = _res + foo(SomeObject { id: object::new(&mut ctx), num: 42 }, 42); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/sources/m.move new file mode 100644 index 0000000000000..86c1a237632e8 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/sources/m.move @@ -0,0 +1,22 @@ +// Test handling of global locations in the trace. +module global_loc::m; + +public struct SomeObject has key { + id: UID, + num: u8, +} + +fun foo(o: SomeObject, p: u8): u64 { + let n = object::id(&o).to_bytes()[0]; + let SomeObject { id, num } = o; + object::delete(id); + (n as u64) + (num as u64) + (p as u64) +} + +#[test] +fun test() { + let mut ctx = tx_context::dummy(); + let mut _res = foo(SomeObject { id: object::new(&mut ctx), num: 42 }, 42); + // line below is to force another unoptimized read to keep `res` visible + _res = _res + foo(SomeObject { id: object::new(&mut ctx), num: 42 }, 42); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/test.exp b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/test.exp new file mode 100644 index 0000000000000..fee44233ccc7a --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/test.exp @@ -0,0 +1,45 @@ +current frame stack: + function: test (line 21) + scope 0 : + ctx : (0x2::tx_context::TxContext) { + sender : 0000000000000000000000000000000000000000000000000000000000000000 + tx_hash : [ + 0 : 58 + 1 : 152 + 2 : 93 + 3 : 167 + 4 : 79 + 5 : 226 + 6 : 37 + 7 : 178 + 8 : 4 + 9 : 92 + 10 : 23 + 11 : 45 + 12 : 107 + 13 : 211 + 14 : 144 + 15 : 189 + 16 : 133 + 17 : 95 + 18 : 8 + 19 : 110 + 20 : 62 + 21 : 157 + 22 : 82 + 23 : 91 + 24 : 70 + 25 : 191 + 26 : 226 + 27 : 69 + 28 : 17 + 29 : 67 + 30 : 21 + 31 : 50 + ] + epoch : 0 + epoch_timestamp_ms : 0 + ids_created : 1 + } + type: 0x2::tx_context::TxContext + diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/trace.spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/trace.spec.js new file mode 100644 index 0000000000000..12de693d855d9 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/trace.spec.js @@ -0,0 +1,10 @@ +let action = (runtime) => { + let res = ''; + // step over context creation + runtime.step(true); + // step over function creating a global location + runtime.step(true); + res += runtime.toString(); + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/traces/global_loc__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/traces/global_loc__m__test.json new file mode 100644 index 0000000000000..844dd32b381ac --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/traces/global_loc__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"tx_context","name":"TxContext","type_args":[]}},"ref_type":null}],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":1000000000,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":2,"function_name":"dummy","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"tx_context"},"binary_member_index":9,"type_instantiation":[],"parameters":[],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"tx_context","name":"TxContext","type_args":[]}},"ref_type":null}],"locals_types":[],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999965,"instruction":"LD_CONST"}},{"Effect":{"Push":{"RuntimeValue":{"value":"0000000000000000000000000000000000000000000000000000000000000000"}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999930,"instruction":"LD_CONST"}},{"Effect":{"Push":{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999927,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999924,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999921,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999921,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":14,"function_name":"new","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"tx_context"},"binary_member_index":7,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":"0000000000000000000000000000000000000000000000000000000000000000"}},{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}},{"RuntimeValue":{"value":0}},{"RuntimeValue":{"value":0}},{"RuntimeValue":{"value":0}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"tx_context","name":"TxContext","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":"address","ref_type":null},{"type_":{"vector":"u8"},"ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999921}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999910,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[14,1]},"root_value_read":{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[14,1]},"snapshot":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999907,"instruction":"VEC_LEN"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[14,1]},"snapshot":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}}}},{"Effect":{"Push":{"RuntimeValue":{"value":32}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999897,"instruction":"LD_CONST"}},{"Effect":{"Push":{"RuntimeValue":{"value":32}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999862,"instruction":"EQ"}},{"Effect":{"Pop":{"RuntimeValue":{"value":32}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":32}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999861,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999860,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999826,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[14,0]},"root_value_read":{"RuntimeValue":{"value":"0000000000000000000000000000000000000000000000000000000000000000"}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":"0000000000000000000000000000000000000000000000000000000000000000"}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999792,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[14,1]},"root_value_read":{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999774,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[14,2]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999756,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[14,3]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999738,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[14,4]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999734,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":"0000000000000000000000000000000000000000000000000000000000000000"}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999733,"instruction":"RET"}},{"CloseFrame":{"frame_id":14,"return_":[{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}],"gas_left":999999733}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999732,"instruction":"RET"}},{"CloseFrame":{"frame_id":2,"return_":[{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}],"gas_left":999999732}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999731,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Effect":{"Write":{"location":{"Local":[0,0]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999721,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999721,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":63,"function_name":"new","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":14,"type_instantiation":[],"parameters":[{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"UID","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"tx_context","name":"TxContext","type_args":[]}},"ref_type":"Mut"}],"is_native":false},"gas_left":999999721}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999710,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[63,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999710,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":68,"function_name":"fresh_object_address","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"tx_context"},"binary_member_index":4,"type_instantiation":[],"parameters":[{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}],"return_types":[{"type_":"address","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"tx_context","name":"TxContext","type_args":[]}},"ref_type":"Mut"},{"type_":"address","ref_type":null},{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999710}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999699,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999689,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},4]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999671,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},4]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Local":[0,0]},4]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999670,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Write":{"location":{"Local":[68,2]},"root_value_after_write":{"RuntimeValue":{"value":0}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999660,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999650,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},1]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999616,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},1]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Local":[0,0]},1]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999598,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,2]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999598,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":96,"function_name":"derive_id","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"tx_context"},"binary_member_index":6,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}},{"RuntimeValue":{"value":0}}],"return_types":[{"type_":"address","ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":null},{"type_":"u64","ref_type":null}],"is_native":true},"gas_left":999999598}},{"Effect":{"Push":{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},{"CloseFrame":{"frame_id":96,"return_":[{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}],"gas_left":999999511}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999510,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},{"Effect":{"Write":{"location":{"Local":[68,1]},"root_value_after_write":{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999492,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,2]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999489,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999486,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999476,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999466,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,0]},4]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999448,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,0]},4]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[0,0]},4]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999414,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,1]},"root_value_read":{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999413,"instruction":"RET"}},{"CloseFrame":{"frame_id":68,"return_":[{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}],"gas_left":999999413}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999409,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999405,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999404,"instruction":"RET"}},{"CloseFrame":{"frame_id":63,"return_":[{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}],"gas_left":999999404}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999401,"instruction":"LD_U8"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999397,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999394,"instruction":"LD_U8"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999394,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":143,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}},{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}},"ref_type":null},{"type_":"u8","ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"ID","type_args":[]}},"ref_type":null},{"type_":{"vector":"u8"},"ref_type":null},{"type_":"u8","ref_type":null},{"type_":"u8","ref_type":null}],"is_native":false},"gas_left":999999394}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999383,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[143,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[143,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999383,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":148,"function_name":"id","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":16,"type_instantiation":[{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}}],"parameters":[{"ImmRef":{"location":{"Local":[143,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"ID","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}},"ref_type":"Imm"}],"is_native":false},"gas_left":999999383}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999372,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[148,0]},"root_value_read":{"ImmRef":{"location":{"Local":[143,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[143,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999372,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":153,"function_name":"borrow_uid","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":20,"type_instantiation":[{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}}],"parameters":[{"ImmRef":{"location":{"Local":[143,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"UID","type_args":[]}},"ref_type":"Imm"}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}},"ref_type":"Imm"}],"is_native":true},"gas_left":999999372}},{"Effect":{"DataLoad":{"ref_type":"Imm","location":{"Global":154},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Global":154},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"CloseFrame":{"frame_id":153,"return_":[{"ImmRef":{"location":{"Global":154},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}],"gas_left":999999309}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999299,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Global":154},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Global":154},0]},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999263,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Global":154},0]},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Global":154},0]},"root_value_read":{"ImmRef":{"location":{"Global":154},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999262,"instruction":"RET"}},{"CloseFrame":{"frame_id":148,"return_":[{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}],"gas_left":999999262}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999261,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Effect":{"Write":{"location":{"Local":[143,2]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999251,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[143,2]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[143,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999251,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":173,"function_name":"id_to_bytes","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Local":[143,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}],"return_types":[{"type_":{"vector":"u8"},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"ID","type_args":[]}},"ref_type":"Imm"}],"is_native":false},"gas_left":999999251}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999240,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[173,0]},"root_value_read":{"ImmRef":{"location":{"Local":[143,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[143,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999230,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[143,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[143,2]},0]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999230,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":181,"function_name":"to_bytes","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"bcs"},"binary_member_index":0,"type_instantiation":["address"],"parameters":[{"ImmRef":{"location":{"Indexed":[{"Local":[143,2]},0]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}],"return_types":[{"type_":{"vector":"u8"},"ref_type":null}],"locals_types":[{"type_":"address","ref_type":"Imm"}],"is_native":true},"gas_left":999999230}},{"Effect":{"Push":{"RuntimeValue":{"value":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}}},{"CloseFrame":{"frame_id":181,"return_":[{"RuntimeValue":{"value":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}],"gas_left":999999131}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999130,"instruction":"RET"}},{"CloseFrame":{"frame_id":173,"return_":[{"RuntimeValue":{"value":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}],"gas_left":999999130}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999129,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}}},{"Effect":{"Write":{"location":{"Local":[143,3]},"root_value_after_write":{"RuntimeValue":{"value":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999119,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[143,3]},"root_value_read":{"RuntimeValue":{"value":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[143,3]},"snapshot":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999116,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999106,"instruction":"VEC_IMM_BORROW"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[143,3]},"snapshot":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[143,3]},0]},"snapshot":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999088,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[143,3]},0]},"snapshot":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Local":[143,3]},0]},"root_value_read":{"RuntimeValue":{"value":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":56}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999087,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":56}}}},{"Effect":{"Write":{"location":{"Local":[143,4]},"root_value_after_write":{"RuntimeValue":{"value":56}}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999031,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[143,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999028,"instruction":"UNPACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999027,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Local":[143,5]},"root_value_after_write":{"RuntimeValue":{"value":42}}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999027,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":216,"function_name":"delete","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":15,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}],"return_types":[],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"UID","type_args":[]}},"ref_type":null}],"is_native":false},"gas_left":999999027}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999998988,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[216,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999998986,"instruction":"UNPACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999998984,"instruction":"UNPACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999998984,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":227,"function_name":"delete_impl","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":22,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}],"return_types":[],"locals_types":[{"type_":"address","ref_type":null}],"is_native":true},"gas_left":999998984}},{"CloseFrame":{"frame_id":227,"return_":[],"gas_left":999998930}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999998929,"instruction":"RET"}},{"CloseFrame":{"frame_id":216,"return_":[],"gas_left":999998929}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999998911,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[143,4]},"root_value_read":{"RuntimeValue":{"value":56}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":56}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999998908,"instruction":"CAST_U64"}},{"Effect":{"Pop":{"RuntimeValue":{"value":56}}}},{"Effect":{"Push":{"RuntimeValue":{"value":56}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999998890,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[143,5]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999998887,"instruction":"CAST_U64"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999998884,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":56}}}},{"Effect":{"Push":{"RuntimeValue":{"value":98}}}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999998866,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[143,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999998863,"instruction":"CAST_U64"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999998860,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":98}}}},{"Effect":{"Push":{"RuntimeValue":{"value":140}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999998859,"instruction":"RET"}},{"CloseFrame":{"frame_id":143,"return_":[{"RuntimeValue":{"value":140}}],"gas_left":999998859}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999998849,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999998849,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":263,"function_name":"new","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":14,"type_instantiation":[],"parameters":[{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"UID","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"tx_context","name":"TxContext","type_args":[]}},"ref_type":"Mut"}],"is_native":false},"gas_left":999998849}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999998838,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[263,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999998838,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":268,"function_name":"fresh_object_address","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"tx_context"},"binary_member_index":4,"type_instantiation":[],"parameters":[{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}],"return_types":[{"type_":"address","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"tx_context","name":"TxContext","type_args":[]}},"ref_type":"Mut"},{"type_":"address","ref_type":null},{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999998838}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999998827,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[268,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999998817,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},4]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999998799,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},4]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Local":[0,0]},4]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999998798,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Write":{"location":{"Local":[268,2]},"root_value_after_write":{"RuntimeValue":{"value":1}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999998788,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[268,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999998778,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},1]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999998744,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},1]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Local":[0,0]},1]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999998726,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[268,2]},"root_value_read":{"RuntimeValue":{"value":1}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999998726,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":296,"function_name":"derive_id","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"tx_context"},"binary_member_index":6,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}},{"RuntimeValue":{"value":1}}],"return_types":[{"type_":"address","ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":null},{"type_":"u64","ref_type":null}],"is_native":true},"gas_left":999998726}},{"Effect":{"Push":{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},{"CloseFrame":{"frame_id":296,"return_":[{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}],"gas_left":999998639}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999998638,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},{"Effect":{"Write":{"location":{"Local":[268,1]},"root_value_after_write":{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999998620,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[268,2]},"root_value_read":{"RuntimeValue":{"value":1}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999998617,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999998614,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Push":{"RuntimeValue":{"value":2}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999998604,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[268,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999998594,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,0]},4]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999998576,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,0]},4]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":2}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[0,0]},4]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":2}}}}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999998542,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[268,1]},"root_value_read":{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999998541,"instruction":"RET"}},{"CloseFrame":{"frame_id":268,"return_":[{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}],"gas_left":999998541}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999998537,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999998533,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999998532,"instruction":"RET"}},{"CloseFrame":{"frame_id":263,"return_":[{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}],"gas_left":999998532}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999998529,"instruction":"LD_U8"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999998525,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999998522,"instruction":"LD_U8"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999998522,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":343,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}},{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}},"ref_type":null},{"type_":"u8","ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"ID","type_args":[]}},"ref_type":null},{"type_":{"vector":"u8"},"ref_type":null},{"type_":"u8","ref_type":null},{"type_":"u8","ref_type":null}],"is_native":false},"gas_left":999998522}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999998511,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[343,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[343,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999998511,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":348,"function_name":"id","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":16,"type_instantiation":[{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}}],"parameters":[{"ImmRef":{"location":{"Local":[343,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"ID","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}},"ref_type":"Imm"}],"is_native":false},"gas_left":999998511}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999998500,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[348,0]},"root_value_read":{"ImmRef":{"location":{"Local":[343,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[343,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999998500,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":353,"function_name":"borrow_uid","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":20,"type_instantiation":[{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}}],"parameters":[{"ImmRef":{"location":{"Local":[343,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"UID","type_args":[]}},"ref_type":"Imm"}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}},"ref_type":"Imm"}],"is_native":true},"gas_left":999998500}},{"Effect":{"DataLoad":{"ref_type":"Imm","location":{"Global":354},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Global":354},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"CloseFrame":{"frame_id":353,"return_":[{"ImmRef":{"location":{"Global":354},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}],"gas_left":999998437}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999998427,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Global":354},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Global":354},0]},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999998391,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Global":354},0]},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Global":354},0]},"root_value_read":{"ImmRef":{"location":{"Global":354},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999998390,"instruction":"RET"}},{"CloseFrame":{"frame_id":348,"return_":[{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}],"gas_left":999998390}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999998389,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Effect":{"Write":{"location":{"Local":[343,2]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999998379,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[343,2]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[343,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999998379,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":373,"function_name":"id_to_bytes","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Local":[343,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}],"return_types":[{"type_":{"vector":"u8"},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"ID","type_args":[]}},"ref_type":"Imm"}],"is_native":false},"gas_left":999998379}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999998368,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[373,0]},"root_value_read":{"ImmRef":{"location":{"Local":[343,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[343,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999998358,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[343,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[343,2]},0]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999998358,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":381,"function_name":"to_bytes","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"bcs"},"binary_member_index":0,"type_instantiation":["address"],"parameters":[{"ImmRef":{"location":{"Indexed":[{"Local":[343,2]},0]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}],"return_types":[{"type_":{"vector":"u8"},"ref_type":null}],"locals_types":[{"type_":"address","ref_type":"Imm"}],"is_native":true},"gas_left":999998358}},{"Effect":{"Push":{"RuntimeValue":{"value":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}}},{"CloseFrame":{"frame_id":381,"return_":[{"RuntimeValue":{"value":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}],"gas_left":999998259}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999998258,"instruction":"RET"}},{"CloseFrame":{"frame_id":373,"return_":[{"RuntimeValue":{"value":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}],"gas_left":999998258}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999998257,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}}},{"Effect":{"Write":{"location":{"Local":[343,3]},"root_value_after_write":{"RuntimeValue":{"value":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999998247,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[343,3]},"root_value_read":{"RuntimeValue":{"value":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[343,3]},"snapshot":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999998244,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999998234,"instruction":"VEC_IMM_BORROW"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[343,3]},"snapshot":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[343,3]},0]},"snapshot":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999998216,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[343,3]},0]},"snapshot":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Local":[343,3]},0]},"root_value_read":{"RuntimeValue":{"value":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":238}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999998215,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":238}}}},{"Effect":{"Write":{"location":{"Local":[343,4]},"root_value_after_write":{"RuntimeValue":{"value":238}}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999998159,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[343,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999998156,"instruction":"UNPACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999998155,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Local":[343,5]},"root_value_after_write":{"RuntimeValue":{"value":42}}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999998155,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":416,"function_name":"delete","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":15,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}],"return_types":[],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"UID","type_args":[]}},"ref_type":null}],"is_native":false},"gas_left":999998155}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999998116,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[416,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999998114,"instruction":"UNPACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999998112,"instruction":"UNPACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999998112,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":427,"function_name":"delete_impl","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":22,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}],"return_types":[],"locals_types":[{"type_":"address","ref_type":null}],"is_native":true},"gas_left":999998112}},{"CloseFrame":{"frame_id":427,"return_":[],"gas_left":999998058}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999998057,"instruction":"RET"}},{"CloseFrame":{"frame_id":416,"return_":[],"gas_left":999998057}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999998039,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[343,4]},"root_value_read":{"RuntimeValue":{"value":238}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":238}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999998036,"instruction":"CAST_U64"}},{"Effect":{"Pop":{"RuntimeValue":{"value":238}}}},{"Effect":{"Push":{"RuntimeValue":{"value":238}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999998018,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[343,5]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999998015,"instruction":"CAST_U64"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999998012,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":238}}}},{"Effect":{"Push":{"RuntimeValue":{"value":280}}}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999997994,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[343,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999997991,"instruction":"CAST_U64"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999997988,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":280}}}},{"Effect":{"Push":{"RuntimeValue":{"value":322}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999997987,"instruction":"RET"}},{"CloseFrame":{"frame_id":343,"return_":[{"RuntimeValue":{"value":322}}],"gas_left":999997987}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999997984,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":322}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":140}}}},{"Effect":{"Push":{"RuntimeValue":{"value":462}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999997983,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":462}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999997982,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999997982}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/Move.toml new file mode 100644 index 0000000000000..bd6df8584e0bf --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "native_fun" +edition = "2024.beta" + +[dependencies] +MoveStdlib = { local = "../../../../move-stdlib" } + +[addresses] +native_fun = "0x0" +std = "0x1" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/bytecode_modules/m.mv new file mode 100644 index 0000000000000000000000000000000000000000..ed85ed416f69dcb7aa713b0da4c010ecef76082f GIT binary patch literal 369 zcmbtQOG*SW5Ph$bPBNWlGzha0(XDs_m+m}(xRhql^w=Z61I2Z@kLI)|{5;W#6kNvPaeH`O9$9C#>Lma2JOJkeklnbT0lw3bN#kV$fd@<9~ zpl5b<82Y@_9shalYwD*o=tJ(V%M1Upa;vGH<9?rN5ylzz^E~C(XtZb0jq-Y}g7UJF jq5*hh5wxYb`rRAQpe`Dfq<92uScYhbp5, + } + + /// An ASCII character. + public struct Char has copy, drop, store { + byte: u8, + } + + /// Convert a `byte` into a `Char` that is checked to make sure it is valid ASCII. + public fun char(byte: u8): Char { + assert!(is_valid_char(byte), EInvalidASCIICharacter); + Char { byte } + } + + /// Convert a vector of bytes `bytes` into an `String`. Aborts if + /// `bytes` contains non-ASCII characters. + public fun string(bytes: vector): String { + let x = try_string(bytes); + assert!(x.is_some(), EInvalidASCIICharacter); + x.destroy_some() + } + + /// Convert a vector of bytes `bytes` into an `String`. Returns + /// `Some()` if the `bytes` contains all valid ASCII + /// characters. Otherwise returns `None`. + public fun try_string(bytes: vector): Option { + let is_valid = bytes.all!(|byte| is_valid_char(*byte)); + if (is_valid) option::some(String { bytes }) + else option::none() + } + + /// Returns `true` if all characters in `string` are printable characters + /// Returns `false` otherwise. Not all `String`s are printable strings. + public fun all_characters_printable(string: &String): bool { + string.bytes.all!(|byte| is_printable_char(*byte)) + } + + /// Push a `Char` to the end of the `string`. + public fun push_char(string: &mut String, char: Char) { + string.bytes.push_back(char.byte); + } + + /// Pop a `Char` from the end of the `string`. + public fun pop_char(string: &mut String): Char { + Char { byte: string.bytes.pop_back() } + } + + /// Returns the length of the `string` in bytes. + public fun length(string: &String): u64 { + string.as_bytes().length() + } + + /// Append the `other` string to the end of `string`. + public fun append(string: &mut String, other: String) { + string.bytes.append(other.into_bytes()) + } + + /// Insert the `other` string at the `at` index of `string`. + public fun insert(s: &mut String, at: u64, o: String) { + assert!(at <= s.length(), EInvalidIndex); + o.into_bytes().destroy!(|e| s.bytes.insert(e, at)); + } + + /// Copy the slice of the `string` from `i` to `j` into a new `String`. + public fun substring(string: &String, i: u64, j: u64): String { + assert!(i <= j && j <= string.length(), EInvalidIndex); + let mut bytes = vector[]; + i.range_do!(j, |i| bytes.push_back(string.bytes[i])); + String { bytes } + } + + /// Get the inner bytes of the `string` as a reference + public fun as_bytes(string: &String): &vector { + &string.bytes + } + + /// Unpack the `string` to get its backing bytes + public fun into_bytes(string: String): vector { + let String { bytes } = string; + bytes + } + + /// Unpack the `char` into its underlying bytes. + public fun byte(char: Char): u8 { + let Char { byte } = char; + byte + } + + /// Returns `true` if `b` is a valid ASCII character. + /// Returns `false` otherwise. + public fun is_valid_char(b: u8): bool { + b <= 0x7F + } + + /// Returns `true` if `byte` is an printable ASCII character. + /// Returns `false` otherwise. + public fun is_printable_char(byte: u8): bool { + byte >= 0x20 && // Disallow metacharacters + byte <= 0x7E // Don't allow DEL metacharacter + } + + /// Returns `true` if `string` is empty. + public fun is_empty(string: &String): bool { + string.bytes.is_empty() + } + + /// Convert a `string` to its uppercase equivalent. + public fun to_uppercase(string: &String): String { + let bytes = string.as_bytes().map_ref!(|byte| char_to_uppercase(*byte)); + String { bytes } + } + + /// Convert a `string` to its lowercase equivalent. + public fun to_lowercase(string: &String): String { + let bytes = string.as_bytes().map_ref!(|byte| char_to_lowercase(*byte)); + String { bytes } + } + + /// Computes the index of the first occurrence of the `substr` in the `string`. + /// Returns the length of the `string` if the `substr` is not found. + /// Returns 0 if the `substr` is empty. + public fun index_of(string: &String, substr: &String): u64 { + let mut i = 0; + let (n, m) = (string.length(), substr.length()); + if (n < m) return n; + while (i <= n - m) { + let mut j = 0; + while (j < m && string.bytes[i + j] == substr.bytes[j]) j = j + 1; + if (j == m) return i; + i = i + 1; + }; + n + } + + /// Convert a `char` to its lowercase equivalent. + fun char_to_uppercase(byte: u8): u8 { + if (byte >= 0x61 && byte <= 0x7A) byte - 0x20 + else byte + } + + /// Convert a `char` to its lowercase equivalent. + fun char_to_lowercase(byte: u8): u8 { + if (byte >= 0x41 && byte <= 0x5A) byte + 0x20 + else byte + } +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/string.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/string.move new file mode 100644 index 0000000000000..0939b2cbe45f3 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/string.move @@ -0,0 +1,137 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/// The `string` module defines the `String` type which represents UTF8 encoded +/// strings. +module std::string { + use std::ascii; + + /// An invalid UTF8 encoding. + const EInvalidUTF8: u64 = 1; + + /// Index out of range. + const EInvalidIndex: u64 = 2; + + /// A `String` holds a sequence of bytes which is guaranteed to be in utf8 + /// format. + public struct String has copy, drop, store { + bytes: vector, + } + + /// Creates a new string from a sequence of bytes. Aborts if the bytes do + /// not represent valid utf8. + public fun utf8(bytes: vector): String { + assert!(internal_check_utf8(&bytes), EInvalidUTF8); + String { bytes } + } + + /// Convert an ASCII string to a UTF8 string + public fun from_ascii(s: ascii::String): String { + String { bytes: s.into_bytes() } + } + + /// Convert an UTF8 string to an ASCII string. + /// Aborts if `s` is not valid ASCII + public fun to_ascii(s: String): ascii::String { + let String { bytes } = s; + bytes.to_ascii_string() + } + + /// Tries to create a new string from a sequence of bytes. + public fun try_utf8(bytes: vector): Option { + if (internal_check_utf8(&bytes)) option::some(String { bytes }) + else option::none() + } + + /// Returns a reference to the underlying byte vector. + public fun as_bytes(s: &String): &vector { + &s.bytes + } + + /// Unpack the `string` to get its underlying bytes. + public fun into_bytes(s: String): vector { + let String { bytes } = s; + bytes + } + + /// Checks whether this string is empty. + public fun is_empty(s: &String): bool { + s.bytes.is_empty() + } + + /// Returns the length of this string, in bytes. + public fun length(s: &String): u64 { + s.bytes.length() + } + + /// Appends a string. + public fun append(s: &mut String, r: String) { + s.bytes.append(r.bytes) + } + + /// Appends bytes which must be in valid utf8 format. + public fun append_utf8(s: &mut String, bytes: vector) { + s.append(utf8(bytes)) + } + + /// Insert the other string at the byte index in given string. The index + /// must be at a valid utf8 char boundary. + public fun insert(s: &mut String, at: u64, o: String) { + let bytes = &s.bytes; + assert!( + at <= bytes.length() && internal_is_char_boundary(bytes, at), + EInvalidIndex, + ); + let l = s.length(); + let mut front = s.substring(0, at); + let end = s.substring(at, l); + front.append(o); + front.append(end); + *s = front; + } + + /// Returns a sub-string using the given byte indices, where `i` is the first + /// byte position and `j` is the start of the first byte not included (or the + /// length of the string). The indices must be at valid utf8 char boundaries, + /// guaranteeing that the result is valid utf8. + public fun substring(s: &String, i: u64, j: u64): String { + let bytes = &s.bytes; + let l = bytes.length(); + assert!( + j <= l && + i <= j && + internal_is_char_boundary(bytes, i) && + internal_is_char_boundary(bytes, j), + EInvalidIndex, + ); + String { bytes: internal_sub_string(bytes, i, j) } + } + + /// Computes the index of the first occurrence of a string. Returns `s.length()` + /// if no occurrence found. + public fun index_of(s: &String, r: &String): u64 { + internal_index_of(&s.bytes, &r.bytes) + } + + // Native API + + native fun internal_check_utf8(v: &vector): bool; + native fun internal_is_char_boundary(v: &vector, i: u64): bool; + native fun internal_sub_string(v: &vector, i: u64, j: u64): vector; + native fun internal_index_of(v: &vector, r: &vector): u64; + + #[test_only] + public fun internal_sub_string_for_testing(v: &vector, i: u64, j: u64): vector { + internal_sub_string(v, i, j) + } + + // === Deprecated === + + #[deprecated(note = b"Use `std::string::as_bytes` instead.")] + public fun bytes(s: &String): &vector { s.as_bytes() } + + #[deprecated(note = b"Use `std::string::substring` instead.")] + public fun sub_string(s: &String, i: u64, j: u64): String { + s.substring(i, j) + } +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/vector.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/vector.move new file mode 100644 index 0000000000000..55c1abac34b74 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/vector.move @@ -0,0 +1,364 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[defines_primitive(vector)] +/// A variable-sized container that can hold any type. Indexing is 0-based, and +/// vectors are growable. This module has many native functions. +module std::vector { + /// Allows calling `.to_string()` on a vector of `u8` to get a utf8 `String`. + public use fun std::string::utf8 as vector.to_string; + + /// Allows calling `.try_to_string()` on a vector of `u8` to get a utf8 `String`. + /// This will return `None` if the vector is not valid utf8. + public use fun std::string::try_utf8 as vector.try_to_string; + + /// Allows calling `.to_ascii_string()` on a vector of `u8` to get an `ascii::String`. + public use fun std::ascii::string as vector.to_ascii_string; + + /// Allows calling `.try_to_ascii_string()` on a vector of `u8` to get an + /// `ascii::String`. This will return `None` if the vector is not valid ascii. + public use fun std::ascii::try_string as vector.try_to_ascii_string; + + /// The index into the vector is out of bounds + const EINDEX_OUT_OF_BOUNDS: u64 = 0x20000; + + #[bytecode_instruction] + /// Create an empty vector. + public native fun empty(): vector; + + #[bytecode_instruction] + /// Return the length of the vector. + public native fun length(v: &vector): u64; + + #[syntax(index)] + #[bytecode_instruction] + /// Acquire an immutable reference to the `i`th element of the vector `v`. + /// Aborts if `i` is out of bounds. + public native fun borrow(v: &vector, i: u64): ∈ + + #[bytecode_instruction] + /// Add element `e` to the end of the vector `v`. + public native fun push_back(v: &mut vector, e: Element); + + #[syntax(index)] + #[bytecode_instruction] + /// Return a mutable reference to the `i`th element in the vector `v`. + /// Aborts if `i` is out of bounds. + public native fun borrow_mut(v: &mut vector, i: u64): &mut Element; + + #[bytecode_instruction] + /// Pop an element from the end of vector `v`. + /// Aborts if `v` is empty. + public native fun pop_back(v: &mut vector): Element; + + #[bytecode_instruction] + /// Destroy the vector `v`. + /// Aborts if `v` is not empty. + public native fun destroy_empty(v: vector); + + #[bytecode_instruction] + /// Swaps the elements at the `i`th and `j`th indices in the vector `v`. + /// Aborts if `i` or `j` is out of bounds. + public native fun swap(v: &mut vector, i: u64, j: u64); + + /// Return an vector of size one containing element `e`. + public fun singleton(e: Element): vector { + let mut v = empty(); + v.push_back(e); + v + } + + /// Reverses the order of the elements in the vector `v` in place. + public fun reverse(v: &mut vector) { + let len = v.length(); + if (len == 0) return (); + + let mut front_index = 0; + let mut back_index = len - 1; + while (front_index < back_index) { + v.swap(front_index, back_index); + front_index = front_index + 1; + back_index = back_index - 1; + } + } + + /// Pushes all of the elements of the `other` vector into the `lhs` vector. + public fun append(lhs: &mut vector, mut other: vector) { + other.reverse(); + while (!other.is_empty()) lhs.push_back(other.pop_back()); + other.destroy_empty(); + } + + /// Return `true` if the vector `v` has no elements and `false` otherwise. + public fun is_empty(v: &vector): bool { + v.length() == 0 + } + + /// Return true if `e` is in the vector `v`. + /// Otherwise, returns false. + public fun contains(v: &vector, e: &Element): bool { + let mut i = 0; + let len = v.length(); + while (i < len) { + if (&v[i] == e) return true; + i = i + 1; + }; + false + } + + /// Return `(true, i)` if `e` is in the vector `v` at index `i`. + /// Otherwise, returns `(false, 0)`. + public fun index_of(v: &vector, e: &Element): (bool, u64) { + let mut i = 0; + let len = v.length(); + while (i < len) { + if (&v[i] == e) return (true, i); + i = i + 1; + }; + (false, 0) + } + + /// Remove the `i`th element of the vector `v`, shifting all subsequent elements. + /// This is O(n) and preserves ordering of elements in the vector. + /// Aborts if `i` is out of bounds. + public fun remove(v: &mut vector, mut i: u64): Element { + let mut len = v.length(); + // i out of bounds; abort + if (i >= len) abort EINDEX_OUT_OF_BOUNDS; + + len = len - 1; + while (i < len) v.swap(i, { i = i + 1; i }); + v.pop_back() + } + + /// Insert `e` at position `i` in the vector `v`. + /// If `i` is in bounds, this shifts the old `v[i]` and all subsequent elements to the right. + /// If `i == v.length()`, this adds `e` to the end of the vector. + /// This is O(n) and preserves ordering of elements in the vector. + /// Aborts if `i > v.length()` + public fun insert(v: &mut vector, e: Element, mut i: u64) { + let len = v.length(); + // i too big abort + if (i > len) abort EINDEX_OUT_OF_BOUNDS; + + v.push_back(e); + while (i < len) { + v.swap(i, len); + i = i + 1 + } + } + + /// Swap the `i`th element of the vector `v` with the last element and then pop the vector. + /// This is O(1), but does not preserve ordering of elements in the vector. + /// Aborts if `i` is out of bounds. + public fun swap_remove(v: &mut vector, i: u64): Element { + assert!(!v.is_empty(), EINDEX_OUT_OF_BOUNDS); + let last_idx = v.length() - 1; + v.swap(i, last_idx); + v.pop_back() + } + + // === Macros === + + /// Create a vector of length `n` by calling the function `f` on each index. + public macro fun tabulate<$T>($n: u64, $f: |u64| -> $T): vector<$T> { + let mut v = vector[]; + let n = $n; + n.do!(|i| v.push_back($f(i))); + v + } + + /// Destroy the vector `v` by calling `f` on each element and then destroying the vector. + /// Does not preserve the order of elements in the vector (starts from the end of the vector). + public macro fun destroy<$T>($v: vector<$T>, $f: |$T|) { + let mut v = $v; + while (!v.is_empty()) $f(v.pop_back()); + v.destroy_empty(); + } + + /// Destroy the vector `v` by calling `f` on each element and then destroying the vector. + /// Preserves the order of elements in the vector. + public macro fun do<$T>($v: vector<$T>, $f: |$T|) { + let mut v = $v; + v.reverse(); + while (!v.is_empty()) $f(v.pop_back()); + v.destroy_empty(); + } + + /// Perform an action `f` on each element of the vector `v`. The vector is not modified. + public macro fun do_ref<$T>($v: &vector<$T>, $f: |&$T|) { + let v = $v; + v.length().do!(|i| $f(&v[i])) + } + + /// Perform an action `f` on each element of the vector `v`. + /// The function `f` takes a mutable reference to the element. + public macro fun do_mut<$T>($v: &mut vector<$T>, $f: |&mut $T|) { + let v = $v; + v.length().do!(|i| $f(&mut v[i])) + } + + /// Map the vector `v` to a new vector by applying the function `f` to each element. + /// Preserves the order of elements in the vector, first is called first. + public macro fun map<$T, $U>($v: vector<$T>, $f: |$T| -> $U): vector<$U> { + let v = $v; + let mut r = vector[]; + v.do!(|e| r.push_back($f(e))); + r + } + + /// Map the vector `v` to a new vector by applying the function `f` to each element. + /// Preserves the order of elements in the vector, first is called first. + public macro fun map_ref<$T, $U>($v: &vector<$T>, $f: |&$T| -> $U): vector<$U> { + let v = $v; + let mut r = vector[]; + v.do_ref!(|e| r.push_back($f(e))); + r + } + + /// Filter the vector `v` by applying the function `f` to each element. + /// Return a new vector containing only the elements for which `f` returns `true`. + public macro fun filter<$T: drop>($v: vector<$T>, $f: |&$T| -> bool): vector<$T> { + let v = $v; + let mut r = vector[]; + v.do!(|e| if ($f(&e)) r.push_back(e)); + r + } + + /// Split the vector `v` into two vectors by applying the function `f` to each element. + /// Return a tuple containing two vectors: the first containing the elements for which `f` returns `true`, + /// and the second containing the elements for which `f` returns `false`. + public macro fun partition<$T>($v: vector<$T>, $f: |&$T| -> bool): (vector<$T>, vector<$T>) { + let v = $v; + let mut r1 = vector[]; + let mut r2 = vector[]; + v.do!(|e| if ($f(&e)) r1.push_back(e) else r2.push_back(e)); + (r1, r2) + } + + /// Finds the index of first element in the vector `v` that satisfies the predicate `f`. + /// Returns `some(index)` if such an element is found, otherwise `none()`. + public macro fun find_index<$T>($v: &vector<$T>, $f: |&$T| -> bool): Option { + let v = $v; + 'find_index: { + v.length().do!(|i| if ($f(&v[i])) return 'find_index option::some(i)); + option::none() + } + } + + /// Count how many elements in the vector `v` satisfy the predicate `f`. + public macro fun count<$T>($v: &vector<$T>, $f: |&$T| -> bool): u64 { + let v = $v; + let mut count = 0; + v.do_ref!(|e| if ($f(e)) count = count + 1); + count + } + + /// Reduce the vector `v` to a single value by applying the function `f` to each element. + /// Similar to `fold_left` in Rust and `reduce` in Python and JavaScript. + public macro fun fold<$T, $Acc>($v: vector<$T>, $init: $Acc, $f: |$Acc, $T| -> $Acc): $Acc { + let v = $v; + let mut acc = $init; + v.do!(|e| acc = $f(acc, e)); + acc + } + + /// Whether any element in the vector `v` satisfies the predicate `f`. + /// If the vector is empty, returns `false`. + public macro fun any<$T>($v: &vector<$T>, $f: |&$T| -> bool): bool { + let v = $v; + 'any: { + v.do_ref!(|e| if ($f(e)) return 'any true); + false + } + } + + /// Whether all elements in the vector `v` satisfy the predicate `f`. + /// If the vector is empty, returns `true`. + public macro fun all<$T>($v: &vector<$T>, $f: |&$T| -> bool): bool { + let v = $v; + 'all: { + v.do_ref!(|e| if (!$f(e)) return 'all false); + true + } + } + + /// Destroys two vectors `v1` and `v2` by calling `f` to each pair of elements. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do<$T1, $T2>($v1: vector<$T1>, $v2: vector<$T2>, $f: |$T1, $T2|) { + let v1 = $v1; + let mut v2 = $v2; + v2.reverse(); + let len = v1.length(); + assert!(len == v2.length()); + v1.do!(|el1| $f(el1, v2.pop_back())); + } + + /// Destroys two vectors `v1` and `v2` by calling `f` to each pair of elements. + /// Aborts if the vectors are not of the same length. + /// Starts from the end of the vectors. + public macro fun zip_do_reverse<$T1, $T2>($v1: vector<$T1>, $v2: vector<$T2>, $f: |$T1, $T2|) { + let v1 = $v1; + let mut v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + v1.destroy!(|el1| $f(el1, v2.pop_back())); + } + + /// Iterate through `v1` and `v2` and apply the function `f` to references of each pair of + /// elements. The vectors are not modified. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do_ref<$T1, $T2>($v1: &vector<$T1>, $v2: &vector<$T2>, $f: |&$T1, &$T2|) { + let v1 = $v1; + let v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + len.do!(|i| $f(&v1[i], &v2[i])); + } + + /// Iterate through `v1` and `v2` and apply the function `f` to mutable references of each pair + /// of elements. The vectors may be modified. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do_mut<$T1, $T2>( + $v1: &mut vector<$T1>, + $v2: &mut vector<$T2>, + $f: |&mut $T1, &mut $T2|, + ) { + let v1 = $v1; + let v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + len.do!(|i| $f(&mut v1[i], &mut v2[i])); + } + + /// Destroys two vectors `v1` and `v2` by applying the function `f` to each pair of elements. + /// The returned values are collected into a new vector. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_map<$T1, $T2, $U>( + $v1: vector<$T1>, + $v2: vector<$T2>, + $f: |$T1, $T2| -> $U, + ): vector<$U> { + let mut r = vector[]; + zip_do!($v1, $v2, |el1, el2| r.push_back($f(el1, el2))); + r + } + + /// Iterate through `v1` and `v2` and apply the function `f` to references of each pair of + /// elements. The returned values are collected into a new vector. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_map_ref<$T1, $T2, $U>( + $v1: &vector<$T1>, + $v2: &vector<$T2>, + $f: |&$T1, &$T2| -> $U, + ): vector<$U> { + let mut r = vector[]; + zip_do_ref!($v1, $v2, |el1, el2| r.push_back($f(el1, el2))); + r + } +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/m.move new file mode 100644 index 0000000000000..1c64328ea361b --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/m.move @@ -0,0 +1,14 @@ +// Test native function execution (vector length). +module native_fun::m; + +use std::string::{String, utf8, index_of}; + +fun foo(s: String, sub: vector, p: u64): u64 { + s.index_of(&utf8(sub)) + p +} + +#[test] +fun test() { + let mut _res = foo(utf8(b"hello"), b"e", 42); + _res = _res + foo(utf8(b"hello"), b"l", _res); // to force another unoptimized read to keep `res` visible +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/sources/m.move new file mode 100644 index 0000000000000..1c64328ea361b --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/sources/m.move @@ -0,0 +1,14 @@ +// Test native function execution (vector length). +module native_fun::m; + +use std::string::{String, utf8, index_of}; + +fun foo(s: String, sub: vector, p: u64): u64 { + s.index_of(&utf8(sub)) + p +} + +#[test] +fun test() { + let mut _res = foo(utf8(b"hello"), b"e", 42); + _res = _res + foo(utf8(b"hello"), b"l", _res); // to force another unoptimized read to keep `res` visible +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/test.exp b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/test.exp new file mode 100644 index 0000000000000..480d23d264c6f --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/test.exp @@ -0,0 +1,6 @@ +current frame stack: + function: test (line 13) + scope 0 : + _res : 43 + type: u64 + diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/trace.spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/trace.spec.js new file mode 100644 index 0000000000000..2c2fa18537ff9 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/trace.spec.js @@ -0,0 +1,8 @@ +let action = (runtime) => { + let res = ''; + // step over a function containing a native call + runtime.step(true); + res += runtime.toString(); + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/traces/native_fun__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/traces/native_fun__m__test.json new file mode 100644 index 0000000000000..979d3eac38d65 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/traces/native_fun__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999992,"instruction":"LD_CONST"}},{"Effect":{"Push":{"RuntimeValue":{"value":[104,101,108,108,111]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999992,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":4,"function_name":"utf8","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":[104,101,108,108,111]}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":null}],"is_native":false},"gas_left":999999992}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999981,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":[104,101,108,108,111]}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[4,0]},"snapshot":[104,101,108,108,111]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999981,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":9,"function_name":"internal_check_utf8","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":13,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Local":[4,0]},"snapshot":[104,101,108,108,111]}}],"return_types":[{"type_":"bool","ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":"Imm"}],"is_native":true},"gas_left":999999981}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"CloseFrame":{"frame_id":9,"return_":[{"RuntimeValue":{"value":true}}],"gas_left":999999900}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999899,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999898,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999891,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":[104,101,108,108,111]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[104,101,108,108,111]}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999887,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[104,101,108,108,111]}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999886,"instruction":"RET"}},{"CloseFrame":{"frame_id":4,"return_":[{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}],"gas_left":999999886}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999882,"instruction":"LD_CONST"}},{"Effect":{"Push":{"RuntimeValue":{"value":[101]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999879,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999879,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":28,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},{"RuntimeValue":{"value":[101]}},{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":null},{"type_":{"vector":"u8"},"ref_type":null},{"type_":"u64","ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":"Imm"}],"is_native":false},"gas_left":999999879}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999868,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[28,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999867,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Effect":{"Write":{"location":{"Local":[28,4]},"root_value_after_write":{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999864,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[28,1]},"root_value_read":{"RuntimeValue":{"value":[101]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[101]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999864,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":39,"function_name":"utf8","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":[101]}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":null}],"is_native":false},"gas_left":999999864}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999853,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[39,0]},"root_value_read":{"RuntimeValue":{"value":[101]}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[39,0]},"snapshot":[101]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999853,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":44,"function_name":"internal_check_utf8","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":13,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Local":[39,0]},"snapshot":[101]}}],"return_types":[{"type_":"bool","ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":"Imm"}],"is_native":true},"gas_left":999999853}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"CloseFrame":{"frame_id":44,"return_":[{"RuntimeValue":{"value":true}}],"gas_left":999999780}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999779,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999778,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999775,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[39,0]},"root_value_read":{"RuntimeValue":{"value":[101]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[101]}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999771,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[101]}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999770,"instruction":"RET"}},{"CloseFrame":{"frame_id":39,"return_":[{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}],"gas_left":999999770}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999769,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}}},{"Effect":{"Write":{"location":{"Local":[28,3]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999759,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[28,4]},"root_value_read":{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999749,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[28,3]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[101]}}}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[28,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999749,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":68,"function_name":"index_of","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":12,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},{"ImmRef":{"location":{"Local":[28,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":"Imm"},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":"Imm"}],"is_native":false},"gas_left":999999749}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999738,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,0]},"root_value_read":{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999728,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[28,0]},0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999718,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,1]},"root_value_read":{"ImmRef":{"location":{"Local":[28,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[101]}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[28,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999708,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[28,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[28,3]},0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999708,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":82,"function_name":"internal_index_of","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":16,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Indexed":[{"Local":[28,0]},0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},{"ImmRef":{"location":{"Indexed":[{"Local":[28,3]},0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":"Imm"},{"type_":{"vector":"u8"},"ref_type":"Imm"}],"is_native":true},"gas_left":999999708}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"CloseFrame":{"frame_id":82,"return_":[{"RuntimeValue":{"value":1}}],"gas_left":999999633}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999632,"instruction":"RET"}},{"CloseFrame":{"frame_id":68,"return_":[{"RuntimeValue":{"value":1}}],"gas_left":999999632}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999614,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[28,2]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999611,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Push":{"RuntimeValue":{"value":43}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999610,"instruction":"RET"}},{"CloseFrame":{"frame_id":28,"return_":[{"RuntimeValue":{"value":43}}],"gas_left":999999610}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999609,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":43}}}},{"Effect":{"Write":{"location":{"Local":[0,0]},"root_value_after_write":{"RuntimeValue":{"value":43}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999591,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":43}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":43}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999583,"instruction":"LD_CONST"}},{"Effect":{"Push":{"RuntimeValue":{"value":[104,101,108,108,111]}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999583,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":105,"function_name":"utf8","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":[104,101,108,108,111]}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":null}],"is_native":false},"gas_left":999999583}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999572,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[105,0]},"root_value_read":{"RuntimeValue":{"value":[104,101,108,108,111]}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[105,0]},"snapshot":[104,101,108,108,111]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999572,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":110,"function_name":"internal_check_utf8","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":13,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Local":[105,0]},"snapshot":[104,101,108,108,111]}}],"return_types":[{"type_":"bool","ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":"Imm"}],"is_native":true},"gas_left":999999572}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"CloseFrame":{"frame_id":110,"return_":[{"RuntimeValue":{"value":true}}],"gas_left":999999491}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999490,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999489,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999482,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[105,0]},"root_value_read":{"RuntimeValue":{"value":[104,101,108,108,111]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[104,101,108,108,111]}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999478,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[104,101,108,108,111]}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999477,"instruction":"RET"}},{"CloseFrame":{"frame_id":105,"return_":[{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}],"gas_left":999999477}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999473,"instruction":"LD_CONST"}},{"Effect":{"Push":{"RuntimeValue":{"value":[108]}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999455,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":43}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":43}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999455,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":130,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},{"RuntimeValue":{"value":[108]}},{"RuntimeValue":{"value":43}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":null},{"type_":{"vector":"u8"},"ref_type":null},{"type_":"u64","ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":"Imm"}],"is_native":false},"gas_left":999999455}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999444,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[130,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999443,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Effect":{"Write":{"location":{"Local":[130,4]},"root_value_after_write":{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999440,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[130,1]},"root_value_read":{"RuntimeValue":{"value":[108]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[108]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999440,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":141,"function_name":"utf8","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":[108]}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":null}],"is_native":false},"gas_left":999999440}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999429,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[141,0]},"root_value_read":{"RuntimeValue":{"value":[108]}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[141,0]},"snapshot":[108]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999429,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":146,"function_name":"internal_check_utf8","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":13,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Local":[141,0]},"snapshot":[108]}}],"return_types":[{"type_":"bool","ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":"Imm"}],"is_native":true},"gas_left":999999429}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"CloseFrame":{"frame_id":146,"return_":[{"RuntimeValue":{"value":true}}],"gas_left":999999356}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999355,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999354,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999351,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[141,0]},"root_value_read":{"RuntimeValue":{"value":[108]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[108]}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999347,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[108]}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999346,"instruction":"RET"}},{"CloseFrame":{"frame_id":141,"return_":[{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}],"gas_left":999999346}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999345,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}}},{"Effect":{"Write":{"location":{"Local":[130,3]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999335,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[130,4]},"root_value_read":{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999325,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[130,3]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[108]}}}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[130,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999325,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":170,"function_name":"index_of","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":12,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},{"ImmRef":{"location":{"Local":[130,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":"Imm"},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":"Imm"}],"is_native":false},"gas_left":999999325}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999314,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[170,0]},"root_value_read":{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999304,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[130,0]},0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999294,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[170,1]},"root_value_read":{"ImmRef":{"location":{"Local":[130,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[108]}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[130,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999284,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[130,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[130,3]},0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999284,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":184,"function_name":"internal_index_of","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":16,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Indexed":[{"Local":[130,0]},0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},{"ImmRef":{"location":{"Indexed":[{"Local":[130,3]},0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":"Imm"},{"type_":{"vector":"u8"},"ref_type":"Imm"}],"is_native":true},"gas_left":999999284}},{"Effect":{"Push":{"RuntimeValue":{"value":2}}}},{"CloseFrame":{"frame_id":184,"return_":[{"RuntimeValue":{"value":2}}],"gas_left":999999207}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999206,"instruction":"RET"}},{"CloseFrame":{"frame_id":170,"return_":[{"RuntimeValue":{"value":2}}],"gas_left":999999206}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999188,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[130,2]},"root_value_read":{"RuntimeValue":{"value":43}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":43}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999185,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":43}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":2}}}},{"Effect":{"Push":{"RuntimeValue":{"value":45}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999184,"instruction":"RET"}},{"CloseFrame":{"frame_id":130,"return_":[{"RuntimeValue":{"value":45}}],"gas_left":999999184}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999181,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":45}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":43}}}},{"Effect":{"Push":{"RuntimeValue":{"value":88}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999180,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":88}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999179,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999999179}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/Move.toml new file mode 100644 index 0000000000000..441337ac1f606 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "references" +edition = "2024.beta" + +[dependencies] +MoveStdlib = { local = "../../../../move-stdlib" } + +[addresses] +references = "0x0" +std = "0x1" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/bytecode_modules/dependencies/MoveStdlib/vector.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/bytecode_modules/dependencies/MoveStdlib/vector.mv new file mode 100644 index 0000000000000000000000000000000000000000..79c6d2eb99157a351717e0a6c0cc80cc20a46ce2 GIT binary patch literal 1123 zcmaJ=%}(4f5T3CeCmCm#-35WvB9-mZ_SQ4UQbGF|(XwkGDkM=7{vN0IK1vTg_0(%m zeTmLE5J4qs$)BD1`DQ#b`MUeZSpY~7j5f9DOmw@Bf8h^IzUe*tT~qc`|2D7%Gq8XN zKuR;Bi5N2$0jQY0!KK8=QpSlB+#s)MxVdr~z` zbwmA;UtV>|sw%r>QMNBujbC;?Z;P|iH*J1eHF@XTt|-q&Yd)LmF87ypcNYarigN95 z^Xk;HuW!1fXnVE2@a0)|p43&HpDb5j()y}B4>#NR%j(+4jlbpqK5d!$!gp0^+M8wV z1oGY_y()_?M4t?|ysnD2D($sjbydUl``<#HaLalNR8k|Zj^e^9FuWt)mUqls+KTZ9 zdZ5d2gbt3#p-QD+ZwTHWQl?4E2!%B_j(e|$?@T#u**2`53`r3#%xmH?BK%BF4a#jc z#o>!hqp2{-c))ao2QbD1+%s7e6O8bfjwn)W;PBIs9}}=~cT7tDE&b}DKm64F2TlTqN7TQ5H}paO q+lOd_!>fr<>@bCt-A2@>6eu5M#np7^6jMmpDH;GXJZ(DTDf$cNL2%3f literal 0 HcmV?d00001 diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/bytecode_modules/m.mv new file mode 100644 index 0000000000000000000000000000000000000000..d9ae7daf44225ae558ee3aa99f6872d33a2f5c10 GIT binary patch literal 612 zcmbtSJ5Iwu5S`C@ZLimf6WJ*c5)Gn20@{=axPcE~q2NT85<9XD_o1TW033mH&~XDQ z#&+b;v&HV{&6_tn`}XzpJJ0}N5Hx9zCui!4oSSnueM9dDZ_Niq_LIIad{-Vg7~lku zq<{ewAb>{_84}3KhJ+rF>u3}%yB@fPH6ue0v&-lM8XuTFFpS39wAk&pdD_;`tJb8| z?hu}=>in_Iv!>XTdEI2|s?OTHX^V2>tc!g6#OtcUoo)K^tcUzYvfI3^%AgUUtm#GV za10dHY531&(e`T57-jpaXsR-L$yd^|yV6jg1!${U2L8`NAoe}f9LmsyEKerSccCv2 zq6Vfg#l%o75hb#5XG*{B(eg+PI5GG)rH(0;1QZ7lQM?H7dL-RpydW`3Y#FinQDelG uFo(`YaHAR8BVj4VhNRu$Ht>LBM++f_G=Z7e&ZvOYC^l2wCy}l)Is5{zyH8vI literal 0 HcmV?d00001 diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/source_maps/dependencies/MoveStdlib/vector.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/source_maps/dependencies/MoveStdlib/vector.json new file mode 100644 index 0000000000000..126416a07eb7e --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/source_maps/dependencies/MoveStdlib/vector.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":261,"end":267},"module_name":["0000000000000000000000000000000000000000000000000000000000000001","vector"],"struct_map":{},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1202,"end":1207},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1208,"end":1215}]],"parameters":[],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1220,"end":1235}],"locals":[],"nops":{},"code_map":{},"is_native":true},"1":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1329,"end":1335},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1336,"end":1343}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1345,"end":1346}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1367,"end":1370}],"locals":[],"nops":{},"code_map":{},"is_native":true},"2":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1563,"end":1569},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1570,"end":1577}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1579,"end":1580}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1600,"end":1601}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1609,"end":1617}],"locals":[],"nops":{},"code_map":{},"is_native":true},"3":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1724,"end":1733},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1734,"end":1741}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1743,"end":1744}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1768,"end":1769}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"4":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1968,"end":1978},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1979,"end":1986}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1988,"end":1989}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2013,"end":2014}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2022,"end":2034}],"locals":[],"nops":{},"code_map":{},"is_native":true},"5":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2170,"end":2178},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2179,"end":2186}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2188,"end":2189}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2214,"end":2221}],"locals":[],"nops":{},"code_map":{},"is_native":true},"6":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2342,"end":2355},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2356,"end":2363}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2365,"end":2366}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"7":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2561,"end":2565},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2566,"end":2573}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2575,"end":2576}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2600,"end":2601}],["j#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2608,"end":2609}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"8":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2694,"end":2703},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2704,"end":2711}]],"parameters":[["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2713,"end":2714}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2726,"end":2741}],"locals":[["v#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2760,"end":2761}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2764,"end":2771},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2756,"end":2761},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2781,"end":2782},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2793,"end":2794},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2781,"end":2795},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2805,"end":2806}},"is_native":false},"9":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2900,"end":2907},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2908,"end":2915}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2917,"end":2918}]],"returns":[],"locals":[["back_index#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3057,"end":3067}],["front_index#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3024,"end":3035}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2956,"end":2959}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2962,"end":2963},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2962,"end":2972},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2956,"end":2959},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2986,"end":2989},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2993,"end":2994},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2990,"end":2992},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2982,"end":3005},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2996,"end":3005},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3038,"end":3039},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3020,"end":3035},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3070,"end":3073},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3076,"end":3077},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3074,"end":3075},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3053,"end":3067},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3094,"end":3105},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3108,"end":3118},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3106,"end":3107},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3087,"end":3260},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3134,"end":3135},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3141,"end":3152},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3154,"end":3164},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3134,"end":3165},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3193,"end":3204},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3207,"end":3208},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3205,"end":3206},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3179,"end":3190},"29":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3235,"end":3245},"30":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3248,"end":3249},"31":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3246,"end":3247},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3222,"end":3232},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3087,"end":3260}},"is_native":false},"10":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3363,"end":3369},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3370,"end":3377}]],"parameters":[["lhs#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3379,"end":3382}],["other#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3410,"end":3415}]],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3444,"end":3449},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3444,"end":3459},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3477,"end":3482},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3477,"end":3493},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3476,"end":3477},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3469,"end":3526},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3495,"end":3498},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3509,"end":3514},"9":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3509,"end":3525},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3495,"end":3526},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3469,"end":3526},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3557},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3541},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3557},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3557,"end":3558}},"is_native":false},"11":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3660,"end":3668},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3669,"end":3676}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3678,"end":3679}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3700,"end":3704}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3716},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3725},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3729,"end":3730},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3726,"end":3728},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3730}},"is_native":false},"12":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3836,"end":3844},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3845,"end":3852}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3854,"end":3855}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3875,"end":3876}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3889,"end":3893}],"locals":[["i#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3912,"end":3913}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3931,"end":3934}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3916,"end":3917},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3908,"end":3913},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3937,"end":3938},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3937,"end":3947},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3931,"end":3934},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3964,"end":3965},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3968,"end":3971},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3966,"end":3967},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3957,"end":4048},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3992,"end":3993},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3994,"end":3995},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3991,"end":3996},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4000,"end":4001},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3997,"end":3999},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3987,"end":4014},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4003,"end":4014},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4010,"end":4014},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4003,"end":4014},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4032,"end":4033},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4036,"end":4037},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4034,"end":4035},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4028,"end":4029},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3957,"end":4048},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4058,"end":4063}},"is_native":false},"13":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4196,"end":4204},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4205,"end":4212}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4214,"end":4215}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4235,"end":4236}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4250,"end":4254},{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4256,"end":4259}],"locals":[["i#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4279,"end":4280}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4298,"end":4301}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4283,"end":4284},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4275,"end":4280},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4304,"end":4305},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4304,"end":4314},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4298,"end":4301},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4331,"end":4332},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4335,"end":4338},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4333,"end":4334},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4324,"end":4420},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4359,"end":4360},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4361,"end":4362},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4358,"end":4363},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4367,"end":4368},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4364,"end":4366},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4354,"end":4386},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4370,"end":4386},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4378,"end":4382},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4384,"end":4385},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4370,"end":4386},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4404,"end":4405},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4408,"end":4409},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4406,"end":4407},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4400,"end":4401},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4324,"end":4420},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4430,"end":4440},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4431,"end":4436},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4438,"end":4439},"34":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4430,"end":4440}},"is_native":false},"14":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4660,"end":4666},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4667,"end":4674}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4676,"end":4677}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4705,"end":4706}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4714,"end":4721}],"locals":[["%#1",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898}],["%#2",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4740,"end":4743}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4746,"end":4747},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4746,"end":4756},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4736,"end":4743},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4804,"end":4805},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4809,"end":4812},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4806,"end":4808},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4800,"end":4840},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4814,"end":4840},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4820,"end":4840},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4814,"end":4840},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4857,"end":4860},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4863,"end":4864},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4861,"end":4862},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4851,"end":4854},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4881,"end":4882},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4885,"end":4888},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4883,"end":4884},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4874,"end":4917},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4906,"end":4907},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4910,"end":4911},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4908,"end":4909},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4902,"end":4903},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891},"29":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898},"30":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4913,"end":4914},"31":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4917},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4874,"end":4917},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4927,"end":4928},"34":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4927,"end":4939}},"is_native":false},"15":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5290,"end":5296},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5297,"end":5304}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5306,"end":5307}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5331,"end":5332}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5347,"end":5348}]],"returns":[],"locals":[["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5369,"end":5372}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5375,"end":5376},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5375,"end":5385},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5369,"end":5372},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5426,"end":5427},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5430,"end":5433},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5428,"end":5429},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5422,"end":5461},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5435,"end":5461},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5441,"end":5461},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5435,"end":5461},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5472,"end":5473},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5484,"end":5485},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5472,"end":5486},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5503,"end":5504},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5507,"end":5510},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5505,"end":5506},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5496,"end":5573},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5526,"end":5527},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5533,"end":5534},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5536,"end":5539},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5526,"end":5540},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5558,"end":5559},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5562,"end":5563},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5560,"end":5561},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5554,"end":5555},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5496,"end":5573}},"is_native":false},"16":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5812,"end":5823},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5824,"end":5831}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5833,"end":5834}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5858,"end":5859}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5867,"end":5874}],"locals":[["last_idx#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5943,"end":5951}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5894,"end":5895},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5894,"end":5906},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5893,"end":5894},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5885,"end":5929},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5908,"end":5928},"9":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5885,"end":5929},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5954,"end":5955},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5954,"end":5964},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5967,"end":5968},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5965,"end":5966},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5943,"end":5951},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5978,"end":5979},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5985,"end":5986},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5988,"end":5996},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5978,"end":5997},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6007,"end":6008},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6007,"end":6019}},"is_native":false},"17":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":249,"end":13981},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":249,"end":13981}},"is_native":false}},"constant_map":{"EINDEX_OUT_OF_BOUNDS":0}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/source_maps/m.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/source_maps/m.json new file mode 100644 index 0000000000000..3b00c96ae5de9 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/source_maps/m.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":54,"end":55},"module_name":["0000000000000000000000000000000000000000000000000000000000000000","m"],"struct_map":{"0":{"definition_location":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":72,"end":82},"type_parameters":[],"fields":[{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":98,"end":110},{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":130,"end":142},{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":153,"end":169}]},"1":{"definition_location":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":201,"end":213},"type_parameters":[],"fields":[{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":235,"end":240}]}},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":254,"end":257},"type_parameters":[],"parameters":[["some_struct_ref#0#0",{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":263,"end":278}],["vec_ref#0#0",{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":301,"end":308}],["num_ref#0#0",{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":332,"end":339}]],"returns":[{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":350,"end":353}],"locals":[["e1#1#0",{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":455,"end":457}],["e2#1#0",{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":545,"end":547}]],"nops":{},"code_map":{"0":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":397,"end":399},"1":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":360,"end":375},"2":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":360,"end":394},"4":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":360,"end":399},"5":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":437,"end":444},"6":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":436,"end":444},"7":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":405,"end":420},"8":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":405,"end":433},"9":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":405,"end":444},"10":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":484,"end":499},"11":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":479,"end":516},"12":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":518,"end":519},"13":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":460,"end":520},"14":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":455,"end":457},"15":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":532,"end":534},"16":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":527,"end":529},"17":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":526,"end":534},"18":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":569,"end":576},"19":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":578,"end":579},"20":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":550,"end":580},"21":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":545,"end":547},"22":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":592,"end":594},"23":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":587,"end":589},"24":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":586,"end":594},"25":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":601,"end":608},"26":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":600,"end":608},"27":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":611,"end":626},"28":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":611,"end":639},"30":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":609,"end":610},"31":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":642,"end":649},"33":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":650,"end":651},"34":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":642,"end":652},"36":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":640,"end":641},"37":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":600,"end":652}},"is_native":false},"1":{"definition_location":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":660,"end":671},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":675,"end":685}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":749,"end":750},"1":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":727,"end":752},"2":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":776,"end":777},"3":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":823,"end":824},"4":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":805,"end":825},"5":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":692,"end":832}},"is_native":false},"2":{"definition_location":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":848,"end":852},"type_parameters":[],"parameters":[],"returns":[],"locals":[["num#1#0",{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":982,"end":985}],["some_struct#1#0",{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":869,"end":880}],["vec#1#0",{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":910,"end":913}]],"nops":{},"code_map":{"0":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":883,"end":896},"1":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":865,"end":880},"2":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":934,"end":935},"3":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":916,"end":936},"4":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":906,"end":913},"5":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":960,"end":968},"6":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":970,"end":971},"7":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":942,"end":972},"8":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":988,"end":990},"9":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":982,"end":985},"10":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":1000,"end":1016},"11":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":1018,"end":1026},"12":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":1028,"end":1032},"13":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":996,"end":1033},"15":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":1033,"end":1034}},"is_native":false},"3":{"definition_location":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":35,"end":1036},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":35,"end":1036}},"is_native":false}},"constant_map":{}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/sources/dependencies/MoveStdlib/vector.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/sources/dependencies/MoveStdlib/vector.move new file mode 100644 index 0000000000000..55c1abac34b74 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/sources/dependencies/MoveStdlib/vector.move @@ -0,0 +1,364 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[defines_primitive(vector)] +/// A variable-sized container that can hold any type. Indexing is 0-based, and +/// vectors are growable. This module has many native functions. +module std::vector { + /// Allows calling `.to_string()` on a vector of `u8` to get a utf8 `String`. + public use fun std::string::utf8 as vector.to_string; + + /// Allows calling `.try_to_string()` on a vector of `u8` to get a utf8 `String`. + /// This will return `None` if the vector is not valid utf8. + public use fun std::string::try_utf8 as vector.try_to_string; + + /// Allows calling `.to_ascii_string()` on a vector of `u8` to get an `ascii::String`. + public use fun std::ascii::string as vector.to_ascii_string; + + /// Allows calling `.try_to_ascii_string()` on a vector of `u8` to get an + /// `ascii::String`. This will return `None` if the vector is not valid ascii. + public use fun std::ascii::try_string as vector.try_to_ascii_string; + + /// The index into the vector is out of bounds + const EINDEX_OUT_OF_BOUNDS: u64 = 0x20000; + + #[bytecode_instruction] + /// Create an empty vector. + public native fun empty(): vector; + + #[bytecode_instruction] + /// Return the length of the vector. + public native fun length(v: &vector): u64; + + #[syntax(index)] + #[bytecode_instruction] + /// Acquire an immutable reference to the `i`th element of the vector `v`. + /// Aborts if `i` is out of bounds. + public native fun borrow(v: &vector, i: u64): ∈ + + #[bytecode_instruction] + /// Add element `e` to the end of the vector `v`. + public native fun push_back(v: &mut vector, e: Element); + + #[syntax(index)] + #[bytecode_instruction] + /// Return a mutable reference to the `i`th element in the vector `v`. + /// Aborts if `i` is out of bounds. + public native fun borrow_mut(v: &mut vector, i: u64): &mut Element; + + #[bytecode_instruction] + /// Pop an element from the end of vector `v`. + /// Aborts if `v` is empty. + public native fun pop_back(v: &mut vector): Element; + + #[bytecode_instruction] + /// Destroy the vector `v`. + /// Aborts if `v` is not empty. + public native fun destroy_empty(v: vector); + + #[bytecode_instruction] + /// Swaps the elements at the `i`th and `j`th indices in the vector `v`. + /// Aborts if `i` or `j` is out of bounds. + public native fun swap(v: &mut vector, i: u64, j: u64); + + /// Return an vector of size one containing element `e`. + public fun singleton(e: Element): vector { + let mut v = empty(); + v.push_back(e); + v + } + + /// Reverses the order of the elements in the vector `v` in place. + public fun reverse(v: &mut vector) { + let len = v.length(); + if (len == 0) return (); + + let mut front_index = 0; + let mut back_index = len - 1; + while (front_index < back_index) { + v.swap(front_index, back_index); + front_index = front_index + 1; + back_index = back_index - 1; + } + } + + /// Pushes all of the elements of the `other` vector into the `lhs` vector. + public fun append(lhs: &mut vector, mut other: vector) { + other.reverse(); + while (!other.is_empty()) lhs.push_back(other.pop_back()); + other.destroy_empty(); + } + + /// Return `true` if the vector `v` has no elements and `false` otherwise. + public fun is_empty(v: &vector): bool { + v.length() == 0 + } + + /// Return true if `e` is in the vector `v`. + /// Otherwise, returns false. + public fun contains(v: &vector, e: &Element): bool { + let mut i = 0; + let len = v.length(); + while (i < len) { + if (&v[i] == e) return true; + i = i + 1; + }; + false + } + + /// Return `(true, i)` if `e` is in the vector `v` at index `i`. + /// Otherwise, returns `(false, 0)`. + public fun index_of(v: &vector, e: &Element): (bool, u64) { + let mut i = 0; + let len = v.length(); + while (i < len) { + if (&v[i] == e) return (true, i); + i = i + 1; + }; + (false, 0) + } + + /// Remove the `i`th element of the vector `v`, shifting all subsequent elements. + /// This is O(n) and preserves ordering of elements in the vector. + /// Aborts if `i` is out of bounds. + public fun remove(v: &mut vector, mut i: u64): Element { + let mut len = v.length(); + // i out of bounds; abort + if (i >= len) abort EINDEX_OUT_OF_BOUNDS; + + len = len - 1; + while (i < len) v.swap(i, { i = i + 1; i }); + v.pop_back() + } + + /// Insert `e` at position `i` in the vector `v`. + /// If `i` is in bounds, this shifts the old `v[i]` and all subsequent elements to the right. + /// If `i == v.length()`, this adds `e` to the end of the vector. + /// This is O(n) and preserves ordering of elements in the vector. + /// Aborts if `i > v.length()` + public fun insert(v: &mut vector, e: Element, mut i: u64) { + let len = v.length(); + // i too big abort + if (i > len) abort EINDEX_OUT_OF_BOUNDS; + + v.push_back(e); + while (i < len) { + v.swap(i, len); + i = i + 1 + } + } + + /// Swap the `i`th element of the vector `v` with the last element and then pop the vector. + /// This is O(1), but does not preserve ordering of elements in the vector. + /// Aborts if `i` is out of bounds. + public fun swap_remove(v: &mut vector, i: u64): Element { + assert!(!v.is_empty(), EINDEX_OUT_OF_BOUNDS); + let last_idx = v.length() - 1; + v.swap(i, last_idx); + v.pop_back() + } + + // === Macros === + + /// Create a vector of length `n` by calling the function `f` on each index. + public macro fun tabulate<$T>($n: u64, $f: |u64| -> $T): vector<$T> { + let mut v = vector[]; + let n = $n; + n.do!(|i| v.push_back($f(i))); + v + } + + /// Destroy the vector `v` by calling `f` on each element and then destroying the vector. + /// Does not preserve the order of elements in the vector (starts from the end of the vector). + public macro fun destroy<$T>($v: vector<$T>, $f: |$T|) { + let mut v = $v; + while (!v.is_empty()) $f(v.pop_back()); + v.destroy_empty(); + } + + /// Destroy the vector `v` by calling `f` on each element and then destroying the vector. + /// Preserves the order of elements in the vector. + public macro fun do<$T>($v: vector<$T>, $f: |$T|) { + let mut v = $v; + v.reverse(); + while (!v.is_empty()) $f(v.pop_back()); + v.destroy_empty(); + } + + /// Perform an action `f` on each element of the vector `v`. The vector is not modified. + public macro fun do_ref<$T>($v: &vector<$T>, $f: |&$T|) { + let v = $v; + v.length().do!(|i| $f(&v[i])) + } + + /// Perform an action `f` on each element of the vector `v`. + /// The function `f` takes a mutable reference to the element. + public macro fun do_mut<$T>($v: &mut vector<$T>, $f: |&mut $T|) { + let v = $v; + v.length().do!(|i| $f(&mut v[i])) + } + + /// Map the vector `v` to a new vector by applying the function `f` to each element. + /// Preserves the order of elements in the vector, first is called first. + public macro fun map<$T, $U>($v: vector<$T>, $f: |$T| -> $U): vector<$U> { + let v = $v; + let mut r = vector[]; + v.do!(|e| r.push_back($f(e))); + r + } + + /// Map the vector `v` to a new vector by applying the function `f` to each element. + /// Preserves the order of elements in the vector, first is called first. + public macro fun map_ref<$T, $U>($v: &vector<$T>, $f: |&$T| -> $U): vector<$U> { + let v = $v; + let mut r = vector[]; + v.do_ref!(|e| r.push_back($f(e))); + r + } + + /// Filter the vector `v` by applying the function `f` to each element. + /// Return a new vector containing only the elements for which `f` returns `true`. + public macro fun filter<$T: drop>($v: vector<$T>, $f: |&$T| -> bool): vector<$T> { + let v = $v; + let mut r = vector[]; + v.do!(|e| if ($f(&e)) r.push_back(e)); + r + } + + /// Split the vector `v` into two vectors by applying the function `f` to each element. + /// Return a tuple containing two vectors: the first containing the elements for which `f` returns `true`, + /// and the second containing the elements for which `f` returns `false`. + public macro fun partition<$T>($v: vector<$T>, $f: |&$T| -> bool): (vector<$T>, vector<$T>) { + let v = $v; + let mut r1 = vector[]; + let mut r2 = vector[]; + v.do!(|e| if ($f(&e)) r1.push_back(e) else r2.push_back(e)); + (r1, r2) + } + + /// Finds the index of first element in the vector `v` that satisfies the predicate `f`. + /// Returns `some(index)` if such an element is found, otherwise `none()`. + public macro fun find_index<$T>($v: &vector<$T>, $f: |&$T| -> bool): Option { + let v = $v; + 'find_index: { + v.length().do!(|i| if ($f(&v[i])) return 'find_index option::some(i)); + option::none() + } + } + + /// Count how many elements in the vector `v` satisfy the predicate `f`. + public macro fun count<$T>($v: &vector<$T>, $f: |&$T| -> bool): u64 { + let v = $v; + let mut count = 0; + v.do_ref!(|e| if ($f(e)) count = count + 1); + count + } + + /// Reduce the vector `v` to a single value by applying the function `f` to each element. + /// Similar to `fold_left` in Rust and `reduce` in Python and JavaScript. + public macro fun fold<$T, $Acc>($v: vector<$T>, $init: $Acc, $f: |$Acc, $T| -> $Acc): $Acc { + let v = $v; + let mut acc = $init; + v.do!(|e| acc = $f(acc, e)); + acc + } + + /// Whether any element in the vector `v` satisfies the predicate `f`. + /// If the vector is empty, returns `false`. + public macro fun any<$T>($v: &vector<$T>, $f: |&$T| -> bool): bool { + let v = $v; + 'any: { + v.do_ref!(|e| if ($f(e)) return 'any true); + false + } + } + + /// Whether all elements in the vector `v` satisfy the predicate `f`. + /// If the vector is empty, returns `true`. + public macro fun all<$T>($v: &vector<$T>, $f: |&$T| -> bool): bool { + let v = $v; + 'all: { + v.do_ref!(|e| if (!$f(e)) return 'all false); + true + } + } + + /// Destroys two vectors `v1` and `v2` by calling `f` to each pair of elements. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do<$T1, $T2>($v1: vector<$T1>, $v2: vector<$T2>, $f: |$T1, $T2|) { + let v1 = $v1; + let mut v2 = $v2; + v2.reverse(); + let len = v1.length(); + assert!(len == v2.length()); + v1.do!(|el1| $f(el1, v2.pop_back())); + } + + /// Destroys two vectors `v1` and `v2` by calling `f` to each pair of elements. + /// Aborts if the vectors are not of the same length. + /// Starts from the end of the vectors. + public macro fun zip_do_reverse<$T1, $T2>($v1: vector<$T1>, $v2: vector<$T2>, $f: |$T1, $T2|) { + let v1 = $v1; + let mut v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + v1.destroy!(|el1| $f(el1, v2.pop_back())); + } + + /// Iterate through `v1` and `v2` and apply the function `f` to references of each pair of + /// elements. The vectors are not modified. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do_ref<$T1, $T2>($v1: &vector<$T1>, $v2: &vector<$T2>, $f: |&$T1, &$T2|) { + let v1 = $v1; + let v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + len.do!(|i| $f(&v1[i], &v2[i])); + } + + /// Iterate through `v1` and `v2` and apply the function `f` to mutable references of each pair + /// of elements. The vectors may be modified. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do_mut<$T1, $T2>( + $v1: &mut vector<$T1>, + $v2: &mut vector<$T2>, + $f: |&mut $T1, &mut $T2|, + ) { + let v1 = $v1; + let v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + len.do!(|i| $f(&mut v1[i], &mut v2[i])); + } + + /// Destroys two vectors `v1` and `v2` by applying the function `f` to each pair of elements. + /// The returned values are collected into a new vector. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_map<$T1, $T2, $U>( + $v1: vector<$T1>, + $v2: vector<$T2>, + $f: |$T1, $T2| -> $U, + ): vector<$U> { + let mut r = vector[]; + zip_do!($v1, $v2, |el1, el2| r.push_back($f(el1, el2))); + r + } + + /// Iterate through `v1` and `v2` and apply the function `f` to references of each pair of + /// elements. The returned values are collected into a new vector. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_map_ref<$T1, $T2, $U>( + $v1: &vector<$T1>, + $v2: &vector<$T2>, + $f: |&$T1, &$T2| -> $U, + ): vector<$U> { + let mut r = vector[]; + zip_do_ref!($v1, $v2, |el1, el2| r.push_back($f(el1, el2))); + r + } +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/sources/m.move new file mode 100644 index 0000000000000..3b4c1e24e4170 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/sources/m.move @@ -0,0 +1,45 @@ +// Test tracking reference values. +module references::m; + +public struct SomeStruct has drop { + struct_field: SimpleStruct, + simple_field: u64, + vec_simple_field: vector, +} + +public struct SimpleStruct has drop, copy { + field: u64, +} + +fun foo( + some_struct_ref: &mut SomeStruct, + vec_ref: &mut vector, + num_ref: &u64, +): u64 { + some_struct_ref.struct_field.field = 42; + some_struct_ref.simple_field = *num_ref; + + let e1 = vector::borrow_mut(&mut some_struct_ref.vec_simple_field, 0); + *e1 = 42; + + let e2 = vector::borrow_mut(vec_ref, 0); + *e2 = 42; + *num_ref + some_struct_ref.simple_field + vec_ref[0] +} + +fun some_struct(): SomeStruct { + SomeStruct { + struct_field: SimpleStruct { field: 0 }, + simple_field: 0, + vec_simple_field: vector::singleton(0), + } +} + +#[test] +fun test() { + let mut some_struct = some_struct(); + let mut vec = vector::singleton(0); + vector::push_back(&mut vec, 7); + let num = 42; + foo(&mut some_struct, &mut vec, &num); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/sources/m.move new file mode 100644 index 0000000000000..3b4c1e24e4170 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/sources/m.move @@ -0,0 +1,45 @@ +// Test tracking reference values. +module references::m; + +public struct SomeStruct has drop { + struct_field: SimpleStruct, + simple_field: u64, + vec_simple_field: vector, +} + +public struct SimpleStruct has drop, copy { + field: u64, +} + +fun foo( + some_struct_ref: &mut SomeStruct, + vec_ref: &mut vector, + num_ref: &u64, +): u64 { + some_struct_ref.struct_field.field = 42; + some_struct_ref.simple_field = *num_ref; + + let e1 = vector::borrow_mut(&mut some_struct_ref.vec_simple_field, 0); + *e1 = 42; + + let e2 = vector::borrow_mut(vec_ref, 0); + *e2 = 42; + *num_ref + some_struct_ref.simple_field + vec_ref[0] +} + +fun some_struct(): SomeStruct { + SomeStruct { + struct_field: SimpleStruct { field: 0 }, + simple_field: 0, + vec_simple_field: vector::singleton(0), + } +} + +#[test] +fun test() { + let mut some_struct = some_struct(); + let mut vec = vector::singleton(0); + vector::push_back(&mut vec, 7); + let num = 42; + foo(&mut some_struct, &mut vec, &num); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/test.exp b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/test.exp new file mode 100644 index 0000000000000..77d861d3ef1d5 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/test.exp @@ -0,0 +1,90 @@ +current frame stack: + function: test (line 44) + scope 0 : + num : 42 + type: u64 + + some_struct : (0x0::m::SomeStruct) { + struct_field : (0x0::m::SimpleStruct) { + field : 0 + } + simple_field : 0 + vec_simple_field : [ + 0 : 0 + ] + } + type: 0x0::m::SomeStruct + + vec : [ + 0 : 0 + 1 : 7 + ] + type: vector + + function: foo (line 19) + scope 0 : + some_struct_ref : (0x0::m::SomeStruct) { + struct_field : (0x0::m::SimpleStruct) { + field : 0 + } + simple_field : 0 + vec_simple_field : [ + 0 : 0 + ] + } + type: &mut 0x0::m::SomeStruct + + vec_ref : [ + 0 : 0 + 1 : 7 + ] + type: &mut vector + + num_ref : 42 + type: &u64 + +current frame stack: + function: test (line 44) + scope 0 : + num : 42 + type: u64 + + some_struct : (0x0::m::SomeStruct) { + struct_field : (0x0::m::SimpleStruct) { + field : 42 + } + simple_field : 42 + vec_simple_field : [ + 0 : 42 + ] + } + type: 0x0::m::SomeStruct + + vec : [ + 0 : 42 + 1 : 7 + ] + type: vector + + function: foo (line 27) + scope 0 : + some_struct_ref : (0x0::m::SomeStruct) { + struct_field : (0x0::m::SimpleStruct) { + field : 42 + } + simple_field : 42 + vec_simple_field : [ + 0 : 42 + ] + } + type: &mut 0x0::m::SomeStruct + + vec_ref : [ + 0 : 42 + 1 : 7 + ] + type: &mut vector + + num_ref : 42 + type: &u64 + diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/trace.spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/trace.spec.js new file mode 100644 index 0000000000000..6963fb9aafe86 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/trace.spec.js @@ -0,0 +1,21 @@ +let action = (runtime) => { + let res = ''; + // step over functions creating data to be referenced + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + // step into a function + runtime.step(false); + res += runtime.toString(); + // advance until all references are updated + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + res += runtime.toString(); + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/traces/references__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/traces/references__m__test.json new file mode 100644 index 0000000000000..15a2d2aade33f --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/traces/references__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":2,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[{"type_":"u64","ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":null},{"type_":{"vector":"u64"},"ref_type":null}],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":1000000000,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":2,"function_name":"some_struct","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":null}],"locals_types":[],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999996,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999992,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":0}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999989,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999986,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999986,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":13,"function_name":"singleton","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"vector"},"binary_member_index":8,"type_instantiation":["u64"],"parameters":[{"RuntimeValue":{"value":0}}],"return_types":[{"type_":{"vector":"u64"},"ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":{"vector":"u64"},"ref_type":null}],"is_native":false},"gas_left":999999986}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999975,"instruction":"VEC_PACK"}},{"Effect":{"Push":{"RuntimeValue":{"value":[]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999974,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[]}}}},{"Effect":{"Write":{"location":{"Local":[13,1]},"root_value_after_write":{"RuntimeValue":{"value":[]}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999964,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[13,1]},"root_value_read":{"RuntimeValue":{"value":[]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[13,1]},"snapshot":[]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999946,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[13,0]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999945,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[13,1]},"snapshot":[]}}}},{"Effect":{"Write":{"location":{"Local":[13,1]},"root_value_after_write":{"RuntimeValue":{"value":[0]}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999935,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[13,1]},"root_value_read":{"RuntimeValue":{"value":[0]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[0]}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999934,"instruction":"RET"}},{"CloseFrame":{"frame_id":13,"return_":[{"RuntimeValue":{"value":[0]}}],"gas_left":999999934}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999930,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[0]}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":0}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999929,"instruction":"RET"}},{"CloseFrame":{"frame_id":2,"return_":[{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}],"gas_left":999999929}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999928,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Effect":{"Write":{"location":{"Local":[0,1]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999925,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999925,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":47,"function_name":"singleton","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"vector"},"binary_member_index":8,"type_instantiation":["u64"],"parameters":[{"RuntimeValue":{"value":0}}],"return_types":[{"type_":{"vector":"u64"},"ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":{"vector":"u64"},"ref_type":null}],"is_native":false},"gas_left":999999925}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999914,"instruction":"VEC_PACK"}},{"Effect":{"Push":{"RuntimeValue":{"value":[]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999913,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[]}}}},{"Effect":{"Write":{"location":{"Local":[47,1]},"root_value_after_write":{"RuntimeValue":{"value":[]}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999903,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[47,1]},"root_value_read":{"RuntimeValue":{"value":[]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[47,1]},"snapshot":[]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999885,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[47,0]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999884,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[47,1]},"snapshot":[]}}}},{"Effect":{"Write":{"location":{"Local":[47,1]},"root_value_after_write":{"RuntimeValue":{"value":[0]}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999874,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[47,1]},"root_value_read":{"RuntimeValue":{"value":[0]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[0]}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999873,"instruction":"RET"}},{"CloseFrame":{"frame_id":47,"return_":[{"RuntimeValue":{"value":[0]}}],"gas_left":999999873}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999872,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[0]}}}},{"Effect":{"Write":{"location":{"Local":[0,2]},"root_value_after_write":{"RuntimeValue":{"value":[0]}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999862,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,2]},"root_value_read":{"RuntimeValue":{"value":[0]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[0]}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999859,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999858,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[0]}}}},{"Effect":{"Write":{"location":{"Local":[0,2]},"root_value_after_write":{"RuntimeValue":{"value":[0,7]}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999855,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999854,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Local":[0,0]},"root_value_after_write":{"RuntimeValue":{"value":42}}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999844,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,1]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999834,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,2]},"root_value_read":{"RuntimeValue":{"value":[0,7]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[0,7]}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999824,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[0,0]},"snapshot":42}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999824,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":95,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}},{"MutRef":{"location":{"Local":[0,2]},"snapshot":[0,7]}},{"ImmRef":{"location":{"Local":[0,0]},"snapshot":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":"Mut"},{"type_":{"vector":"u64"},"ref_type":"Mut"},{"type_":"u64","ref_type":"Imm"},{"type_":"u64","ref_type":"Mut"},{"type_":"u64","ref_type":"Mut"}],"is_native":false},"gas_left":999999824}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999820,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999810,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999800,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,1]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999790,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,1]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999772,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},0]},0]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":0,"vec_simple_field":[0]}}}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999762,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,2]},"root_value_read":{"ImmRef":{"location":{"Local":[0,0]},"snapshot":42}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[0,0]},"snapshot":42}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999744,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[0,0]},"snapshot":42}}}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999734,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":0,"vec_simple_field":[0]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999724,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,1]},1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999706,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,1]},1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[0,1]},1]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999696,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999686,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,1]},2]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999683,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999673,"instruction":"VEC_MUT_BORROW"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,1]},2]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},2]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999672,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},2]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}},{"Effect":{"Write":{"location":{"Local":[95,3]},"root_value_after_write":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},2]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999669,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999659,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,3]},"root_value_read":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},2]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},2]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999641,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},2]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},2]},0]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[42]}}}}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999631,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,1]},"root_value_read":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[0,7]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[0,7]}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999628,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999618,"instruction":"VEC_MUT_BORROW"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[0,7]}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,2]},0]},"snapshot":[0,7]}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999617,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,2]},0]},"snapshot":[0,7]}}}},{"Effect":{"Write":{"location":{"Local":[95,4]},"root_value_after_write":{"MutRef":{"location":{"Indexed":[{"Local":[0,2]},0]},"snapshot":[0,7]}}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999614,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999604,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,4]},"root_value_read":{"MutRef":{"location":{"Indexed":[{"Local":[0,2]},0]},"snapshot":[0,7]}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,2]},0]},"snapshot":[0,7]}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999586,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,2]},0]},"snapshot":[0,7]}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[0,2]},0]},"root_value_after_write":{"RuntimeValue":{"value":[42,7]}}}}},{"Instruction":{"type_parameters":[],"pc":25,"gas_left":999999576,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,2]},"root_value_read":{"ImmRef":{"location":{"Local":[0,0]},"snapshot":42}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[0,0]},"snapshot":42}}}},{"Instruction":{"type_parameters":[],"pc":26,"gas_left":999999558,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[0,0]},"snapshot":42}}}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":27,"gas_left":999999548,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[42]}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[42]}}}}}},{"Instruction":{"type_parameters":[],"pc":28,"gas_left":999999538,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[42]}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[0,1]},1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[42]}}}}}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999520,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[0,1]},1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[42]}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Local":[0,1]},1]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[42]}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999517,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999507,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,1]},"root_value_read":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[42,7]}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[42,7]}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999497,"instruction":"FREEZE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[42,7]}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[0,2]},"snapshot":[42,7]}}}},{"Instruction":{"type_parameters":[],"pc":33,"gas_left":999999494,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":34,"gas_left":999999484,"instruction":"VEC_IMM_BORROW"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[0,2]},"snapshot":[42,7]}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[0,2]},0]},"snapshot":[42,7]}}}},{"Instruction":{"type_parameters":[],"pc":35,"gas_left":999999466,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[0,2]},0]},"snapshot":[42,7]}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Local":[0,2]},0]},"root_value_read":{"RuntimeValue":{"value":[42,7]}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":36,"gas_left":999999463,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Push":{"RuntimeValue":{"value":126}}}},{"Instruction":{"type_parameters":[],"pc":37,"gas_left":999999462,"instruction":"RET"}},{"CloseFrame":{"frame_id":95,"return_":[{"RuntimeValue":{"value":126}}],"gas_left":999999462}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999461,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":126}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999460,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999999460}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/Move.toml new file mode 100644 index 0000000000000..f6707c4981583 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "references_deep" +edition = "2024.beta" + +[dependencies] +MoveStdlib = { local = "../../../../move-stdlib" } + +[addresses] +references_deep = "0x0" +std = "0x1" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/bytecode_modules/dependencies/MoveStdlib/vector.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/bytecode_modules/dependencies/MoveStdlib/vector.mv new file mode 100644 index 0000000000000000000000000000000000000000..79c6d2eb99157a351717e0a6c0cc80cc20a46ce2 GIT binary patch literal 1123 zcmaJ=%}(4f5T3CeCmCm#-35WvB9-mZ_SQ4UQbGF|(XwkGDkM=7{vN0IK1vTg_0(%m zeTmLE5J4qs$)BD1`DQ#b`MUeZSpY~7j5f9DOmw@Bf8h^IzUe*tT~qc`|2D7%Gq8XN zKuR;Bi5N2$0jQY0!KK8=QpSlB+#s)MxVdr~z` zbwmA;UtV>|sw%r>QMNBujbC;?Z;P|iH*J1eHF@XTt|-q&Yd)LmF87ypcNYarigN95 z^Xk;HuW!1fXnVE2@a0)|p43&HpDb5j()y}B4>#NR%j(+4jlbpqK5d!$!gp0^+M8wV z1oGY_y()_?M4t?|ysnD2D($sjbydUl``<#HaLalNR8k|Zj^e^9FuWt)mUqls+KTZ9 zdZ5d2gbt3#p-QD+ZwTHWQl?4E2!%B_j(e|$?@T#u**2`53`r3#%xmH?BK%BF4a#jc z#o>!hqp2{-c))ao2QbD1+%s7e6O8bfjwn)W;PBIs9}}=~cT7tDE&b}DKm64F2TlTqN7TQ5H}paO q+lOd_!>fr<>@bCt-A2@>6eu5M#np7^6jMmpDH;GXJZ(DTDf$cNL2%3f literal 0 HcmV?d00001 diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/bytecode_modules/m.mv new file mode 100644 index 0000000000000000000000000000000000000000..2f61b892a81f73ab4b1d647d7430c13b0a2788a7 GIT binary patch literal 504 zcmbtRu};G<5PfGmPU1MUsel0(nHU%l10w=&9Mt+39;0yQ( zE@@Sah!f@{pjc80GX1e;>Q`7s(?mey2j^WY;-((%nD7#^Z-G*{ryXA}O3F*42!$2MH zQTgiY&Nu0stG1n*QSdE`@E;=xN)jSL*02shlpFNQ4t7tL^u_Ly(NTuWeeI48dkrD2 z!3ZNNkYS85{cSiFtT|+~&)r}`6NrA3$_inT4_qDZc0BMu75o}S3R3N9MMVAQ0l#vm BLYn{p literal 0 HcmV?d00001 diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/source_maps/dependencies/MoveStdlib/vector.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/source_maps/dependencies/MoveStdlib/vector.json new file mode 100644 index 0000000000000..126416a07eb7e --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/source_maps/dependencies/MoveStdlib/vector.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":261,"end":267},"module_name":["0000000000000000000000000000000000000000000000000000000000000001","vector"],"struct_map":{},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1202,"end":1207},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1208,"end":1215}]],"parameters":[],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1220,"end":1235}],"locals":[],"nops":{},"code_map":{},"is_native":true},"1":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1329,"end":1335},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1336,"end":1343}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1345,"end":1346}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1367,"end":1370}],"locals":[],"nops":{},"code_map":{},"is_native":true},"2":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1563,"end":1569},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1570,"end":1577}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1579,"end":1580}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1600,"end":1601}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1609,"end":1617}],"locals":[],"nops":{},"code_map":{},"is_native":true},"3":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1724,"end":1733},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1734,"end":1741}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1743,"end":1744}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1768,"end":1769}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"4":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1968,"end":1978},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1979,"end":1986}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1988,"end":1989}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2013,"end":2014}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2022,"end":2034}],"locals":[],"nops":{},"code_map":{},"is_native":true},"5":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2170,"end":2178},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2179,"end":2186}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2188,"end":2189}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2214,"end":2221}],"locals":[],"nops":{},"code_map":{},"is_native":true},"6":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2342,"end":2355},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2356,"end":2363}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2365,"end":2366}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"7":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2561,"end":2565},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2566,"end":2573}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2575,"end":2576}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2600,"end":2601}],["j#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2608,"end":2609}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"8":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2694,"end":2703},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2704,"end":2711}]],"parameters":[["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2713,"end":2714}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2726,"end":2741}],"locals":[["v#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2760,"end":2761}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2764,"end":2771},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2756,"end":2761},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2781,"end":2782},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2793,"end":2794},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2781,"end":2795},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2805,"end":2806}},"is_native":false},"9":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2900,"end":2907},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2908,"end":2915}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2917,"end":2918}]],"returns":[],"locals":[["back_index#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3057,"end":3067}],["front_index#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3024,"end":3035}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2956,"end":2959}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2962,"end":2963},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2962,"end":2972},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2956,"end":2959},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2986,"end":2989},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2993,"end":2994},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2990,"end":2992},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2982,"end":3005},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2996,"end":3005},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3038,"end":3039},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3020,"end":3035},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3070,"end":3073},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3076,"end":3077},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3074,"end":3075},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3053,"end":3067},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3094,"end":3105},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3108,"end":3118},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3106,"end":3107},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3087,"end":3260},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3134,"end":3135},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3141,"end":3152},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3154,"end":3164},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3134,"end":3165},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3193,"end":3204},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3207,"end":3208},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3205,"end":3206},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3179,"end":3190},"29":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3235,"end":3245},"30":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3248,"end":3249},"31":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3246,"end":3247},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3222,"end":3232},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3087,"end":3260}},"is_native":false},"10":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3363,"end":3369},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3370,"end":3377}]],"parameters":[["lhs#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3379,"end":3382}],["other#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3410,"end":3415}]],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3444,"end":3449},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3444,"end":3459},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3477,"end":3482},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3477,"end":3493},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3476,"end":3477},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3469,"end":3526},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3495,"end":3498},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3509,"end":3514},"9":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3509,"end":3525},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3495,"end":3526},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3469,"end":3526},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3557},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3541},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3557},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3557,"end":3558}},"is_native":false},"11":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3660,"end":3668},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3669,"end":3676}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3678,"end":3679}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3700,"end":3704}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3716},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3725},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3729,"end":3730},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3726,"end":3728},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3730}},"is_native":false},"12":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3836,"end":3844},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3845,"end":3852}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3854,"end":3855}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3875,"end":3876}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3889,"end":3893}],"locals":[["i#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3912,"end":3913}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3931,"end":3934}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3916,"end":3917},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3908,"end":3913},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3937,"end":3938},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3937,"end":3947},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3931,"end":3934},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3964,"end":3965},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3968,"end":3971},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3966,"end":3967},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3957,"end":4048},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3992,"end":3993},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3994,"end":3995},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3991,"end":3996},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4000,"end":4001},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3997,"end":3999},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3987,"end":4014},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4003,"end":4014},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4010,"end":4014},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4003,"end":4014},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4032,"end":4033},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4036,"end":4037},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4034,"end":4035},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4028,"end":4029},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3957,"end":4048},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4058,"end":4063}},"is_native":false},"13":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4196,"end":4204},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4205,"end":4212}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4214,"end":4215}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4235,"end":4236}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4250,"end":4254},{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4256,"end":4259}],"locals":[["i#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4279,"end":4280}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4298,"end":4301}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4283,"end":4284},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4275,"end":4280},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4304,"end":4305},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4304,"end":4314},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4298,"end":4301},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4331,"end":4332},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4335,"end":4338},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4333,"end":4334},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4324,"end":4420},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4359,"end":4360},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4361,"end":4362},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4358,"end":4363},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4367,"end":4368},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4364,"end":4366},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4354,"end":4386},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4370,"end":4386},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4378,"end":4382},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4384,"end":4385},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4370,"end":4386},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4404,"end":4405},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4408,"end":4409},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4406,"end":4407},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4400,"end":4401},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4324,"end":4420},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4430,"end":4440},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4431,"end":4436},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4438,"end":4439},"34":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4430,"end":4440}},"is_native":false},"14":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4660,"end":4666},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4667,"end":4674}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4676,"end":4677}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4705,"end":4706}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4714,"end":4721}],"locals":[["%#1",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898}],["%#2",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4740,"end":4743}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4746,"end":4747},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4746,"end":4756},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4736,"end":4743},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4804,"end":4805},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4809,"end":4812},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4806,"end":4808},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4800,"end":4840},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4814,"end":4840},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4820,"end":4840},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4814,"end":4840},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4857,"end":4860},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4863,"end":4864},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4861,"end":4862},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4851,"end":4854},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4881,"end":4882},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4885,"end":4888},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4883,"end":4884},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4874,"end":4917},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4906,"end":4907},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4910,"end":4911},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4908,"end":4909},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4902,"end":4903},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891},"29":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898},"30":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4913,"end":4914},"31":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4917},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4874,"end":4917},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4927,"end":4928},"34":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4927,"end":4939}},"is_native":false},"15":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5290,"end":5296},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5297,"end":5304}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5306,"end":5307}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5331,"end":5332}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5347,"end":5348}]],"returns":[],"locals":[["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5369,"end":5372}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5375,"end":5376},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5375,"end":5385},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5369,"end":5372},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5426,"end":5427},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5430,"end":5433},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5428,"end":5429},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5422,"end":5461},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5435,"end":5461},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5441,"end":5461},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5435,"end":5461},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5472,"end":5473},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5484,"end":5485},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5472,"end":5486},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5503,"end":5504},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5507,"end":5510},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5505,"end":5506},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5496,"end":5573},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5526,"end":5527},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5533,"end":5534},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5536,"end":5539},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5526,"end":5540},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5558,"end":5559},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5562,"end":5563},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5560,"end":5561},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5554,"end":5555},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5496,"end":5573}},"is_native":false},"16":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5812,"end":5823},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5824,"end":5831}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5833,"end":5834}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5858,"end":5859}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5867,"end":5874}],"locals":[["last_idx#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5943,"end":5951}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5894,"end":5895},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5894,"end":5906},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5893,"end":5894},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5885,"end":5929},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5908,"end":5928},"9":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5885,"end":5929},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5954,"end":5955},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5954,"end":5964},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5967,"end":5968},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5965,"end":5966},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5943,"end":5951},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5978,"end":5979},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5985,"end":5986},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5988,"end":5996},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5978,"end":5997},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6007,"end":6008},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6007,"end":6019}},"is_native":false},"17":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":249,"end":13981},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":249,"end":13981}},"is_native":false}},"constant_map":{"EINDEX_OUT_OF_BOUNDS":0}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/source_maps/m.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/source_maps/m.json new file mode 100644 index 0000000000000..e9db4cf3eeb04 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/source_maps/m.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":107,"end":108},"module_name":["0000000000000000000000000000000000000000000000000000000000000000","m"],"struct_map":{"0":{"definition_location":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":125,"end":135},"type_parameters":[],"fields":[{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":151,"end":163}]},"1":{"definition_location":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":193,"end":202},"type_parameters":[],"fields":[{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":224,"end":233}]}},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":255,"end":258},"type_parameters":[],"parameters":[["vec_ref#0#0",{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":259,"end":266}]],"returns":[{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":287,"end":290}],"locals":[["e#1#0",{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":301,"end":302}]],"nops":{},"code_map":{"0":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":324,"end":331},"1":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":333,"end":334},"2":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":305,"end":335},"3":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":301,"end":302},"4":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":346,"end":348},"5":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":342,"end":343},"6":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":341,"end":348},"7":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":354,"end":361},"9":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":362,"end":363},"10":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":354,"end":364}},"is_native":false},"1":{"definition_location":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":372,"end":375},"type_parameters":[],"parameters":[["some_struct_ref#0#0",{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":376,"end":391}]],"returns":[{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":411,"end":414}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":440,"end":455},"1":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":440,"end":478},"2":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":435,"end":478},"3":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":431,"end":479},"4":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":491,"end":506},"5":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":491,"end":532},"7":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":530,"end":531},"8":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":491,"end":532},"10":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":489,"end":490},"11":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":485,"end":532}},"is_native":false},"2":{"definition_location":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":540,"end":551},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":555,"end":565}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":648,"end":649},"1":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":630,"end":650},"2":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":607,"end":652},"3":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":572,"end":658}},"is_native":false},"3":{"definition_location":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":674,"end":678},"type_parameters":[],"parameters":[],"returns":[],"locals":[["some_struct#1#0",{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":695,"end":706}]],"nops":{},"code_map":{"0":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":709,"end":722},"1":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":691,"end":706},"2":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":728,"end":762},"5":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":773,"end":774},"6":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":728,"end":775},"7":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":785,"end":801},"8":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":781,"end":802},"10":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":802,"end":803}},"is_native":false},"4":{"definition_location":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":83,"end":805},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":83,"end":805}},"is_native":false}},"constant_map":{}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/sources/dependencies/MoveStdlib/vector.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/sources/dependencies/MoveStdlib/vector.move new file mode 100644 index 0000000000000..55c1abac34b74 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/sources/dependencies/MoveStdlib/vector.move @@ -0,0 +1,364 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[defines_primitive(vector)] +/// A variable-sized container that can hold any type. Indexing is 0-based, and +/// vectors are growable. This module has many native functions. +module std::vector { + /// Allows calling `.to_string()` on a vector of `u8` to get a utf8 `String`. + public use fun std::string::utf8 as vector.to_string; + + /// Allows calling `.try_to_string()` on a vector of `u8` to get a utf8 `String`. + /// This will return `None` if the vector is not valid utf8. + public use fun std::string::try_utf8 as vector.try_to_string; + + /// Allows calling `.to_ascii_string()` on a vector of `u8` to get an `ascii::String`. + public use fun std::ascii::string as vector.to_ascii_string; + + /// Allows calling `.try_to_ascii_string()` on a vector of `u8` to get an + /// `ascii::String`. This will return `None` if the vector is not valid ascii. + public use fun std::ascii::try_string as vector.try_to_ascii_string; + + /// The index into the vector is out of bounds + const EINDEX_OUT_OF_BOUNDS: u64 = 0x20000; + + #[bytecode_instruction] + /// Create an empty vector. + public native fun empty(): vector; + + #[bytecode_instruction] + /// Return the length of the vector. + public native fun length(v: &vector): u64; + + #[syntax(index)] + #[bytecode_instruction] + /// Acquire an immutable reference to the `i`th element of the vector `v`. + /// Aborts if `i` is out of bounds. + public native fun borrow(v: &vector, i: u64): ∈ + + #[bytecode_instruction] + /// Add element `e` to the end of the vector `v`. + public native fun push_back(v: &mut vector, e: Element); + + #[syntax(index)] + #[bytecode_instruction] + /// Return a mutable reference to the `i`th element in the vector `v`. + /// Aborts if `i` is out of bounds. + public native fun borrow_mut(v: &mut vector, i: u64): &mut Element; + + #[bytecode_instruction] + /// Pop an element from the end of vector `v`. + /// Aborts if `v` is empty. + public native fun pop_back(v: &mut vector): Element; + + #[bytecode_instruction] + /// Destroy the vector `v`. + /// Aborts if `v` is not empty. + public native fun destroy_empty(v: vector); + + #[bytecode_instruction] + /// Swaps the elements at the `i`th and `j`th indices in the vector `v`. + /// Aborts if `i` or `j` is out of bounds. + public native fun swap(v: &mut vector, i: u64, j: u64); + + /// Return an vector of size one containing element `e`. + public fun singleton(e: Element): vector { + let mut v = empty(); + v.push_back(e); + v + } + + /// Reverses the order of the elements in the vector `v` in place. + public fun reverse(v: &mut vector) { + let len = v.length(); + if (len == 0) return (); + + let mut front_index = 0; + let mut back_index = len - 1; + while (front_index < back_index) { + v.swap(front_index, back_index); + front_index = front_index + 1; + back_index = back_index - 1; + } + } + + /// Pushes all of the elements of the `other` vector into the `lhs` vector. + public fun append(lhs: &mut vector, mut other: vector) { + other.reverse(); + while (!other.is_empty()) lhs.push_back(other.pop_back()); + other.destroy_empty(); + } + + /// Return `true` if the vector `v` has no elements and `false` otherwise. + public fun is_empty(v: &vector): bool { + v.length() == 0 + } + + /// Return true if `e` is in the vector `v`. + /// Otherwise, returns false. + public fun contains(v: &vector, e: &Element): bool { + let mut i = 0; + let len = v.length(); + while (i < len) { + if (&v[i] == e) return true; + i = i + 1; + }; + false + } + + /// Return `(true, i)` if `e` is in the vector `v` at index `i`. + /// Otherwise, returns `(false, 0)`. + public fun index_of(v: &vector, e: &Element): (bool, u64) { + let mut i = 0; + let len = v.length(); + while (i < len) { + if (&v[i] == e) return (true, i); + i = i + 1; + }; + (false, 0) + } + + /// Remove the `i`th element of the vector `v`, shifting all subsequent elements. + /// This is O(n) and preserves ordering of elements in the vector. + /// Aborts if `i` is out of bounds. + public fun remove(v: &mut vector, mut i: u64): Element { + let mut len = v.length(); + // i out of bounds; abort + if (i >= len) abort EINDEX_OUT_OF_BOUNDS; + + len = len - 1; + while (i < len) v.swap(i, { i = i + 1; i }); + v.pop_back() + } + + /// Insert `e` at position `i` in the vector `v`. + /// If `i` is in bounds, this shifts the old `v[i]` and all subsequent elements to the right. + /// If `i == v.length()`, this adds `e` to the end of the vector. + /// This is O(n) and preserves ordering of elements in the vector. + /// Aborts if `i > v.length()` + public fun insert(v: &mut vector, e: Element, mut i: u64) { + let len = v.length(); + // i too big abort + if (i > len) abort EINDEX_OUT_OF_BOUNDS; + + v.push_back(e); + while (i < len) { + v.swap(i, len); + i = i + 1 + } + } + + /// Swap the `i`th element of the vector `v` with the last element and then pop the vector. + /// This is O(1), but does not preserve ordering of elements in the vector. + /// Aborts if `i` is out of bounds. + public fun swap_remove(v: &mut vector, i: u64): Element { + assert!(!v.is_empty(), EINDEX_OUT_OF_BOUNDS); + let last_idx = v.length() - 1; + v.swap(i, last_idx); + v.pop_back() + } + + // === Macros === + + /// Create a vector of length `n` by calling the function `f` on each index. + public macro fun tabulate<$T>($n: u64, $f: |u64| -> $T): vector<$T> { + let mut v = vector[]; + let n = $n; + n.do!(|i| v.push_back($f(i))); + v + } + + /// Destroy the vector `v` by calling `f` on each element and then destroying the vector. + /// Does not preserve the order of elements in the vector (starts from the end of the vector). + public macro fun destroy<$T>($v: vector<$T>, $f: |$T|) { + let mut v = $v; + while (!v.is_empty()) $f(v.pop_back()); + v.destroy_empty(); + } + + /// Destroy the vector `v` by calling `f` on each element and then destroying the vector. + /// Preserves the order of elements in the vector. + public macro fun do<$T>($v: vector<$T>, $f: |$T|) { + let mut v = $v; + v.reverse(); + while (!v.is_empty()) $f(v.pop_back()); + v.destroy_empty(); + } + + /// Perform an action `f` on each element of the vector `v`. The vector is not modified. + public macro fun do_ref<$T>($v: &vector<$T>, $f: |&$T|) { + let v = $v; + v.length().do!(|i| $f(&v[i])) + } + + /// Perform an action `f` on each element of the vector `v`. + /// The function `f` takes a mutable reference to the element. + public macro fun do_mut<$T>($v: &mut vector<$T>, $f: |&mut $T|) { + let v = $v; + v.length().do!(|i| $f(&mut v[i])) + } + + /// Map the vector `v` to a new vector by applying the function `f` to each element. + /// Preserves the order of elements in the vector, first is called first. + public macro fun map<$T, $U>($v: vector<$T>, $f: |$T| -> $U): vector<$U> { + let v = $v; + let mut r = vector[]; + v.do!(|e| r.push_back($f(e))); + r + } + + /// Map the vector `v` to a new vector by applying the function `f` to each element. + /// Preserves the order of elements in the vector, first is called first. + public macro fun map_ref<$T, $U>($v: &vector<$T>, $f: |&$T| -> $U): vector<$U> { + let v = $v; + let mut r = vector[]; + v.do_ref!(|e| r.push_back($f(e))); + r + } + + /// Filter the vector `v` by applying the function `f` to each element. + /// Return a new vector containing only the elements for which `f` returns `true`. + public macro fun filter<$T: drop>($v: vector<$T>, $f: |&$T| -> bool): vector<$T> { + let v = $v; + let mut r = vector[]; + v.do!(|e| if ($f(&e)) r.push_back(e)); + r + } + + /// Split the vector `v` into two vectors by applying the function `f` to each element. + /// Return a tuple containing two vectors: the first containing the elements for which `f` returns `true`, + /// and the second containing the elements for which `f` returns `false`. + public macro fun partition<$T>($v: vector<$T>, $f: |&$T| -> bool): (vector<$T>, vector<$T>) { + let v = $v; + let mut r1 = vector[]; + let mut r2 = vector[]; + v.do!(|e| if ($f(&e)) r1.push_back(e) else r2.push_back(e)); + (r1, r2) + } + + /// Finds the index of first element in the vector `v` that satisfies the predicate `f`. + /// Returns `some(index)` if such an element is found, otherwise `none()`. + public macro fun find_index<$T>($v: &vector<$T>, $f: |&$T| -> bool): Option { + let v = $v; + 'find_index: { + v.length().do!(|i| if ($f(&v[i])) return 'find_index option::some(i)); + option::none() + } + } + + /// Count how many elements in the vector `v` satisfy the predicate `f`. + public macro fun count<$T>($v: &vector<$T>, $f: |&$T| -> bool): u64 { + let v = $v; + let mut count = 0; + v.do_ref!(|e| if ($f(e)) count = count + 1); + count + } + + /// Reduce the vector `v` to a single value by applying the function `f` to each element. + /// Similar to `fold_left` in Rust and `reduce` in Python and JavaScript. + public macro fun fold<$T, $Acc>($v: vector<$T>, $init: $Acc, $f: |$Acc, $T| -> $Acc): $Acc { + let v = $v; + let mut acc = $init; + v.do!(|e| acc = $f(acc, e)); + acc + } + + /// Whether any element in the vector `v` satisfies the predicate `f`. + /// If the vector is empty, returns `false`. + public macro fun any<$T>($v: &vector<$T>, $f: |&$T| -> bool): bool { + let v = $v; + 'any: { + v.do_ref!(|e| if ($f(e)) return 'any true); + false + } + } + + /// Whether all elements in the vector `v` satisfy the predicate `f`. + /// If the vector is empty, returns `true`. + public macro fun all<$T>($v: &vector<$T>, $f: |&$T| -> bool): bool { + let v = $v; + 'all: { + v.do_ref!(|e| if (!$f(e)) return 'all false); + true + } + } + + /// Destroys two vectors `v1` and `v2` by calling `f` to each pair of elements. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do<$T1, $T2>($v1: vector<$T1>, $v2: vector<$T2>, $f: |$T1, $T2|) { + let v1 = $v1; + let mut v2 = $v2; + v2.reverse(); + let len = v1.length(); + assert!(len == v2.length()); + v1.do!(|el1| $f(el1, v2.pop_back())); + } + + /// Destroys two vectors `v1` and `v2` by calling `f` to each pair of elements. + /// Aborts if the vectors are not of the same length. + /// Starts from the end of the vectors. + public macro fun zip_do_reverse<$T1, $T2>($v1: vector<$T1>, $v2: vector<$T2>, $f: |$T1, $T2|) { + let v1 = $v1; + let mut v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + v1.destroy!(|el1| $f(el1, v2.pop_back())); + } + + /// Iterate through `v1` and `v2` and apply the function `f` to references of each pair of + /// elements. The vectors are not modified. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do_ref<$T1, $T2>($v1: &vector<$T1>, $v2: &vector<$T2>, $f: |&$T1, &$T2|) { + let v1 = $v1; + let v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + len.do!(|i| $f(&v1[i], &v2[i])); + } + + /// Iterate through `v1` and `v2` and apply the function `f` to mutable references of each pair + /// of elements. The vectors may be modified. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do_mut<$T1, $T2>( + $v1: &mut vector<$T1>, + $v2: &mut vector<$T2>, + $f: |&mut $T1, &mut $T2|, + ) { + let v1 = $v1; + let v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + len.do!(|i| $f(&mut v1[i], &mut v2[i])); + } + + /// Destroys two vectors `v1` and `v2` by applying the function `f` to each pair of elements. + /// The returned values are collected into a new vector. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_map<$T1, $T2, $U>( + $v1: vector<$T1>, + $v2: vector<$T2>, + $f: |$T1, $T2| -> $U, + ): vector<$U> { + let mut r = vector[]; + zip_do!($v1, $v2, |el1, el2| r.push_back($f(el1, el2))); + r + } + + /// Iterate through `v1` and `v2` and apply the function `f` to references of each pair of + /// elements. The returned values are collected into a new vector. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_map_ref<$T1, $T2, $U>( + $v1: &vector<$T1>, + $v2: &vector<$T2>, + $f: |&$T1, &$T2| -> $U, + ): vector<$U> { + let mut r = vector[]; + zip_do_ref!($v1, $v2, |el1, el2| r.push_back($f(el1, el2))); + r + } +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/sources/m.move new file mode 100644 index 0000000000000..f8e266674ec44 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/sources/m.move @@ -0,0 +1,34 @@ +// Test tracking reference values when multiple levels of references are involved. +module references_deep::m; + +public struct SomeStruct has drop { + struct_field: VecStruct, +} + +public struct VecStruct has drop, copy { + vec_field: vector, +} + +fun bar(vec_ref: &mut vector): u64 { + let e = vector::borrow_mut(vec_ref, 0); + *e = 42; + vec_ref[0] +} + +fun foo(some_struct_ref: &mut SomeStruct): u64 { + let res = bar(&mut some_struct_ref.struct_field.vec_field); + res + some_struct_ref.struct_field.vec_field[0] +} + +fun some_struct(): SomeStruct { + SomeStruct { + struct_field: VecStruct { vec_field: vector::singleton(0) } + } +} + +#[test] +fun test() { + let mut some_struct = some_struct(); + some_struct.struct_field.vec_field.push_back(7); + foo(&mut some_struct); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/sources/m.move new file mode 100644 index 0000000000000..f8e266674ec44 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/sources/m.move @@ -0,0 +1,34 @@ +// Test tracking reference values when multiple levels of references are involved. +module references_deep::m; + +public struct SomeStruct has drop { + struct_field: VecStruct, +} + +public struct VecStruct has drop, copy { + vec_field: vector, +} + +fun bar(vec_ref: &mut vector): u64 { + let e = vector::borrow_mut(vec_ref, 0); + *e = 42; + vec_ref[0] +} + +fun foo(some_struct_ref: &mut SomeStruct): u64 { + let res = bar(&mut some_struct_ref.struct_field.vec_field); + res + some_struct_ref.struct_field.vec_field[0] +} + +fun some_struct(): SomeStruct { + SomeStruct { + struct_field: VecStruct { vec_field: vector::singleton(0) } + } +} + +#[test] +fun test() { + let mut some_struct = some_struct(); + some_struct.struct_field.vec_field.push_back(7); + foo(&mut some_struct); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/test.exp b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/test.exp new file mode 100644 index 0000000000000..5327fe3b67341 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/test.exp @@ -0,0 +1,74 @@ +current frame stack: + function: test (line 33) + scope 0 : + some_struct : (0x0::m::SomeStruct) { + struct_field : (0x0::m::VecStruct) { + vec_field : [ + 0 : 0 + 1 : 7 + ] + } + } + type: 0x0::m::SomeStruct + + function: foo (line 19) + scope 0 : + some_struct_ref : (0x0::m::SomeStruct) { + struct_field : (0x0::m::VecStruct) { + vec_field : [ + 0 : 0 + 1 : 7 + ] + } + } + type: &mut 0x0::m::SomeStruct + + function: bar (line 13) + scope 0 : + vec_ref : (0x0::m::SomeStruct) { + struct_field : (0x0::m::VecStruct) { + vec_field : [ + 0 : 0 + 1 : 7 + ] + } + } + type: &mut vector + +current frame stack: + function: test (line 33) + scope 0 : + some_struct : (0x0::m::SomeStruct) { + struct_field : (0x0::m::VecStruct) { + vec_field : [ + 0 : 42 + 1 : 7 + ] + } + } + type: 0x0::m::SomeStruct + + function: foo (line 19) + scope 0 : + some_struct_ref : (0x0::m::SomeStruct) { + struct_field : (0x0::m::VecStruct) { + vec_field : [ + 0 : 42 + 1 : 7 + ] + } + } + type: &mut 0x0::m::SomeStruct + + function: bar (line 15) + scope 0 : + vec_ref : (0x0::m::SomeStruct) { + struct_field : (0x0::m::VecStruct) { + vec_field : [ + 0 : 42 + 1 : 7 + ] + } + } + type: &mut vector + diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/trace.spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/trace.spec.js new file mode 100644 index 0000000000000..88ef8e7c14f28 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/trace.spec.js @@ -0,0 +1,17 @@ +let action = (runtime) => { + let res = ''; + // step over functions creating data to be referenced + runtime.step(true); + runtime.step(true); + // step into a function taking a reference as an argument + runtime.step(false); + // step into another function taking a reference as an argument + runtime.step(false); + res += runtime.toString(); + // advance until all references are updated + runtime.step(true); + runtime.step(true); + res += runtime.toString(); + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/traces/references_deep__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/traces/references_deep__m__test.json new file mode 100644 index 0000000000000..78359c58a39b8 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/traces/references_deep__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":3,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":null}],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":1000000000,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":2,"function_name":"some_struct","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":2,"type_instantiation":[],"parameters":[],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":null}],"locals_types":[],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999996,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999996,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":6,"function_name":"singleton","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"vector"},"binary_member_index":8,"type_instantiation":["u64"],"parameters":[{"RuntimeValue":{"value":0}}],"return_types":[{"type_":{"vector":"u64"},"ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":{"vector":"u64"},"ref_type":null}],"is_native":false},"gas_left":999999996}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999985,"instruction":"VEC_PACK"}},{"Effect":{"Push":{"RuntimeValue":{"value":[]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999984,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[]}}}},{"Effect":{"Write":{"location":{"Local":[6,1]},"root_value_after_write":{"RuntimeValue":{"value":[]}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999974,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[6,1]},"root_value_read":{"RuntimeValue":{"value":[]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[6,1]},"snapshot":[]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999956,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[6,0]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999955,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[6,1]},"snapshot":[]}}}},{"Effect":{"Write":{"location":{"Local":[6,1]},"root_value_after_write":{"RuntimeValue":{"value":[0]}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999945,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[6,1]},"root_value_read":{"RuntimeValue":{"value":[0]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[0]}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999944,"instruction":"RET"}},{"CloseFrame":{"frame_id":6,"return_":[{"RuntimeValue":{"value":[0]}}],"gas_left":999999944}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999940,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[0]}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999936,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999935,"instruction":"RET"}},{"CloseFrame":{"frame_id":2,"return_":[{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}],"gas_left":999999935}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999934,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}},{"Effect":{"Write":{"location":{"Local":[0,0]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999924,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999914,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999904,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999901,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999900,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999890,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999890,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":57,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":"Mut"}],"is_native":false},"gas_left":999999890}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999879,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[57,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999869,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999859,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999859,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":68,"function_name":"bar","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"vector":"u64"},"ref_type":"Mut"},{"type_":"u64","ref_type":"Mut"}],"is_native":false},"gas_left":999999859}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999848,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,0]},"root_value_read":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999845,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999835,"instruction":"VEC_MUT_BORROW"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999834,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Effect":{"Write":{"location":{"Local":[68,1]},"root_value_after_write":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999831,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999821,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,1]},"root_value_read":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999803,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999793,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,0]},"root_value_read":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999783,"instruction":"FREEZE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999780,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999770,"instruction":"VEC_IMM_BORROW"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999752,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999751,"instruction":"RET"}},{"CloseFrame":{"frame_id":68,"return_":[{"RuntimeValue":{"value":42}}],"gas_left":999999751}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999741,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[57,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999731,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999721,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999718,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999708,"instruction":"VEC_IMM_BORROW"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999690,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999687,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999686,"instruction":"RET"}},{"CloseFrame":{"frame_id":57,"return_":[{"RuntimeValue":{"value":84}}],"gas_left":999999686}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999685,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999684,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999999684}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/run_spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/run_spec.js new file mode 100644 index 0000000000000..07c3c12b10f3e --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/run_spec.js @@ -0,0 +1,51 @@ +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +const assert = require('assert'); +const linediff = require('line-diff'); +const fs = require('fs'); +const path = require('path'); +const runtime = require('../out/runtime'); + +const UB = process.env['UB']; + +/** + * Testing harness, assuming that the tested function + * is the `test` function in the `m` module. It executes + * a given callback function and compares its result with + * the expected one stored in a file. + * + * @param dirname the directory where the test (its manifest file) is located + * @param action a function to be executed by the harness that + * takes DAP runtime as argument and returns a string representing + * test result + */ +global.run_spec = function (dirname, action) { + const test_dir = path.basename(dirname); + describe(test_dir, () => { + it(test_dir, () => { + const rt = new runtime.Runtime(); + // assume that the test is always in the `test` function + // of the `m` module + const traceInfo = test_dir + '::' + 'm::test'; + return rt.start(path.join(dirname, 'sources', `m.move`), traceInfo, true).then(() => { + const result = action(rt); + const exp_file = 'test.exp'; + const exp_path = path.join(dirname, exp_file); + if (UB === '1') { + // user asked to regenerate output + fs.writeFileSync(exp_path, result, 'utf8'); + return; + } + if (!fs.existsSync(exp_path)) { + assert.fail(`\n${result}\nNo expected output file`); + } + const exp_out = fs.readFileSync(exp_path, { encoding: 'utf8' }); + if (result !== exp_out) { + const out_diff = new linediff(exp_out, result).toString(); + assert.fail(`${out_diff}\nCurrent output does not match the expected one (run with UB=1 to save the current output)`); + } + }); + }); + }); +}; diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/Move.toml new file mode 100644 index 0000000000000..2eb5111ce9d09 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "shadowing" +edition = "2024.beta" + +[dependencies] +MoveStdlib = { local = "../../../../move-stdlib" } + +[addresses] +shadowing = "0x0" +std = "0x1" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/build/shadowing/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/build/shadowing/bytecode_modules/m.mv new file mode 100644 index 0000000000000000000000000000000000000000..67ca2edde8a592a63123c84ac4e288ad8bb4a638 GIT binary patch literal 352 zcmbtQ+ls<45S?36Hwmb)6j^)_6a>FQzodv%w-4JaZT%VFeDmM@2`5sD;G;7zIp;8$ zImvZ?_Z9#wg0Os>**IJ8M!(x|!wUkT0BQmg0T?aH1O%BG8MHFG@N;O&eODH3b*jsz zEsn7%y0Y!6`ozZ=@lO)_I@eYA)F%Tf{$tg~TK9hv>Q5c6(leeB#Dgq6I?jV}#%_9j uDRWQG { + let res = ''; + // step into a function + runtime.step(false); + // advance until first shadowed variable is created + runtime.step(true); + runtime.step(true); + runtime.step(true); + res += runtime.toString(); + // advance until second shadowed variable is created + runtime.step(true); + runtime.step(true); + res += runtime.toString(); + // advance until second shadowed variable disappears + runtime.step(true); + runtime.step(true); + runtime.step(true); + res += runtime.toString(); + // advance until first shadowed variable disappears + runtime.step(true); + res += runtime.toString(); + + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/traces/shadowing__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/traces/shadowing__m__test.json new file mode 100644 index 0000000000000..e7b4dad767010 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/traces/shadowing__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999997,"instruction":"LD_TRUE"}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999994,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999991,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999988,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999988,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":10,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":true}},{"RuntimeValue":{"value":7}},{"RuntimeValue":{"value":7}},{"RuntimeValue":{"value":7}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"bool","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999988}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999984,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999983,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Write":{"location":{"Local":[10,4]},"root_value_after_write":{"RuntimeValue":{"value":0}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999965,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,0]},"root_value_read":{"RuntimeValue":{"value":true}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999964,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999946,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,1]},"root_value_read":{"RuntimeValue":{"value":7}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999928,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,3]},"root_value_read":{"RuntimeValue":{"value":7}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999925,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Push":{"RuntimeValue":{"value":14}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999924,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":14}}}},{"Effect":{"Write":{"location":{"Local":[10,5]},"root_value_after_write":{"RuntimeValue":{"value":14}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999906,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,5]},"root_value_read":{"RuntimeValue":{"value":14}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":14}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999903,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999900,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":14}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999899,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999881,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,2]},"root_value_read":{"RuntimeValue":{"value":7}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999863,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,5]},"root_value_read":{"RuntimeValue":{"value":14}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":14}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999860,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":14}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Push":{"RuntimeValue":{"value":21}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999859,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":21}}}},{"Effect":{"Write":{"location":{"Local":[10,6]},"root_value_after_write":{"RuntimeValue":{"value":21}}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999841,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,6]},"root_value_read":{"RuntimeValue":{"value":21}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":21}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999838,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999835,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":21}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999834,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999831,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999813,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,6]},"root_value_read":{"RuntimeValue":{"value":21}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":21}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999810,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":21}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":63}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999809,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":63}}}},{"Effect":{"Write":{"location":{"Local":[10,4]},"root_value_after_write":{"RuntimeValue":{"value":63}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999791,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,4]},"root_value_read":{"RuntimeValue":{"value":63}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":63}}}},{"Instruction":{"type_parameters":[],"pc":25,"gas_left":999999773,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,6]},"root_value_read":{"RuntimeValue":{"value":21}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":21}}}},{"Instruction":{"type_parameters":[],"pc":26,"gas_left":999999770,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":21}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":63}}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":27,"gas_left":999999769,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Write":{"location":{"Local":[10,4]},"root_value_after_write":{"RuntimeValue":{"value":84}}}}},{"Instruction":{"type_parameters":[],"pc":28,"gas_left":999999751,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,4]},"root_value_read":{"RuntimeValue":{"value":84}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999733,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,5]},"root_value_read":{"RuntimeValue":{"value":14}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":14}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999730,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":14}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Push":{"RuntimeValue":{"value":98}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999729,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":98}}}},{"Effect":{"Write":{"location":{"Local":[10,4]},"root_value_after_write":{"RuntimeValue":{"value":98}}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999711,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,4]},"root_value_read":{"RuntimeValue":{"value":98}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":98}}}},{"Instruction":{"type_parameters":[],"pc":33,"gas_left":999999693,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,3]},"root_value_read":{"RuntimeValue":{"value":7}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":34,"gas_left":999999690,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":98}}}},{"Effect":{"Push":{"RuntimeValue":{"value":105}}}},{"Instruction":{"type_parameters":[],"pc":35,"gas_left":999999689,"instruction":"RET"}},{"CloseFrame":{"frame_id":10,"return_":[{"RuntimeValue":{"value":105}}],"gas_left":999999689}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999688,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":105}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999687,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999999687}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/Move.toml new file mode 100644 index 0000000000000..7eccf6f05afef --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "stepping" +edition = "2024.beta" + +[dependencies] +MoveStdlib = { local = "../../../../move-stdlib" } + +[addresses] +stepping = "0x0" +std = "0x1" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/bytecode_modules/m.mv new file mode 100644 index 0000000000000000000000000000000000000000..dcd6f02ab7588c8302b8d402f12d1d5bf2c77a92 GIT binary patch literal 256 zcmbtPK?=e!5S&fYsBIAoQbfcD_zDI85k%6GLla5jd&Kwo36miGfOFW{*@0o_efTsI zfFuYmw$)B9j+do6w=cSakpLwVoDx-OBw`>As@whD>s!<+n9*t<)Zn~|Iz(efJ~&5{ y$ZGYpCYJkV-qgdH;A{^51LZLiH5&A+h_yWZ@;bE0Mu&c0h?P1bw=L*6F?<1T*B+w) literal 0 HcmV?d00001 diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/source_maps/m.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/source_maps/m.json new file mode 100644 index 0000000000000..29f3badfbee25 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/source_maps/m.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":136,"end":137},"module_name":["0000000000000000000000000000000000000000000000000000000000000000","m"],"struct_map":{},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":144,"end":147},"type_parameters":[],"parameters":[["p#0#0",{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":148,"end":149}]],"returns":[{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":157,"end":160}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":167,"end":168},"1":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":171,"end":172},"2":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":169,"end":170},"3":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":167,"end":172}},"is_native":false},"1":{"definition_location":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":188,"end":192},"type_parameters":[],"parameters":[],"returns":[],"locals":[["_res#1#0",{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":209,"end":213}]],"nops":{},"code_map":{"0":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":220,"end":222},"1":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":216,"end":223},"2":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":205,"end":213},"3":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":236,"end":240},"4":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":247,"end":251},"5":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":243,"end":252},"6":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":241,"end":242},"7":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":229,"end":233},"8":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":265,"end":269},"9":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":276,"end":280},"10":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":272,"end":281},"11":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":270,"end":271},"12":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":258,"end":262},"13":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":281,"end":282}},"is_native":false},"2":{"definition_location":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":119,"end":343},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":119,"end":343}},"is_native":false}},"constant_map":{}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/sources/m.move new file mode 100644 index 0000000000000..55709169e5686 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/sources/m.move @@ -0,0 +1,16 @@ +// Test simple stepping functionality: +// - step into a function +// - step out of a function +// - step over a function +module stepping::m; + +fun foo(p: u64): u64 { + p + p +} + +#[test] +fun test() { + let mut _res = foo(42); + _res = _res + foo(_res); + _res = _res + foo(_res); // to force another unoptimized read to keep `res` visible +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/sources/m.move new file mode 100644 index 0000000000000..55709169e5686 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/sources/m.move @@ -0,0 +1,16 @@ +// Test simple stepping functionality: +// - step into a function +// - step out of a function +// - step over a function +module stepping::m; + +fun foo(p: u64): u64 { + p + p +} + +#[test] +fun test() { + let mut _res = foo(42); + _res = _res + foo(_res); + _res = _res + foo(_res); // to force another unoptimized read to keep `res` visible +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/test.exp b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/test.exp new file mode 100644 index 0000000000000..69556b2e44ac9 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/test.exp @@ -0,0 +1,20 @@ +current frame stack: + function: test (line 13) + scope 0 : + function: foo (line 8) + scope 0 : + p : 42 + type: u64 + +current frame stack: + function: test (line 14) + scope 0 : + _res : 84 + type: u64 + +current frame stack: + function: test (line 15) + scope 0 : + _res : 252 + type: u64 + diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/trace.spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/trace.spec.js new file mode 100644 index 0000000000000..f5296cd01981c --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/trace.spec.js @@ -0,0 +1,14 @@ +let action = (runtime) => { + let res = ''; + // step into a function + runtime.step(false); + res += runtime.toString(); + // step out of a function + runtime.stepOut(false); + res += runtime.toString(); + // step over a function + runtime.step(true); + res += runtime.toString(); + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/traces/stepping__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/traces/stepping__m__test.json new file mode 100644 index 0000000000000..028dbd2996d64 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/traces/stepping__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999997,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999997,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":4,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999997}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999978,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999960,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999957,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999956,"instruction":"RET"}},{"CloseFrame":{"frame_id":4,"return_":[{"RuntimeValue":{"value":84}}],"gas_left":999999956}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999955,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Write":{"location":{"Local":[0,0]},"root_value_after_write":{"RuntimeValue":{"value":84}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999937,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":84}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999919,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":84}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999919,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":27,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":84}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999919}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999900,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[27,0]},"root_value_read":{"RuntimeValue":{"value":84}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999882,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[27,0]},"root_value_read":{"RuntimeValue":{"value":84}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999879,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Push":{"RuntimeValue":{"value":168}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999878,"instruction":"RET"}},{"CloseFrame":{"frame_id":27,"return_":[{"RuntimeValue":{"value":168}}],"gas_left":999999878}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999875,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":168}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Push":{"RuntimeValue":{"value":252}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999874,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":252}}}},{"Effect":{"Write":{"location":{"Local":[0,0]},"root_value_after_write":{"RuntimeValue":{"value":252}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999856,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":252}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":252}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999838,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":252}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":252}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999838,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":54,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":252}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999838}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999819,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[54,0]},"root_value_read":{"RuntimeValue":{"value":252}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":252}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999801,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[54,0]},"root_value_read":{"RuntimeValue":{"value":252}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":252}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999798,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":252}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":252}}}},{"Effect":{"Push":{"RuntimeValue":{"value":504}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999797,"instruction":"RET"}},{"CloseFrame":{"frame_id":54,"return_":[{"RuntimeValue":{"value":504}}],"gas_left":999999797}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999794,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":504}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":252}}}},{"Effect":{"Push":{"RuntimeValue":{"value":756}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999793,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":756}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999792,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999999792}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/Move.toml new file mode 100644 index 0000000000000..e79c6841d9106 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "stepping_call" +edition = "2024.beta" + +[dependencies] +MoveStdlib = { local = "../../../../move-stdlib" } + +[addresses] +stepping_call = "0x0" +std = "0x1" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/bytecode_modules/m.mv new file mode 100644 index 0000000000000000000000000000000000000000..d86d93f23837895b36dac9554296cb3ff0675788 GIT binary patch literal 321 zcmbV|I}(C05J2}6KobgSC>*u0bUcKur(n&)PwBD`Z_dqXew{J zz8QSgx}ge<54P{Pbq;STcFVE1Ve+dVRd;8->*ve4H_y9mlU6iDm~1T6ojNuMKv2J68VG=F6JWd literal 0 HcmV?d00001 diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/source_maps/m.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/source_maps/m.json new file mode 100644 index 0000000000000..94b0650f11d24 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/source_maps/m.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":334,"end":335},"module_name":["0000000000000000000000000000000000000000000000000000000000000000","m"],"struct_map":{},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":342,"end":345},"type_parameters":[],"parameters":[["p#0#0",{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":346,"end":347}]],"returns":[{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":355,"end":358}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":365,"end":366}},"is_native":false},"1":{"definition_location":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":374,"end":377},"type_parameters":[],"parameters":[["p#0#0",{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":378,"end":379}]],"returns":[{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":387,"end":390}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":397,"end":398}},"is_native":false},"2":{"definition_location":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":406,"end":409},"type_parameters":[],"parameters":[["p#0#0",{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":410,"end":411}]],"returns":[{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":419,"end":422}],"locals":[["v1#1#0",{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":433,"end":435}],["v2#1#0",{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":470,"end":472}],["v3#1#0",{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":500,"end":502}]],"nops":{},"code_map":{"0":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":438,"end":439},"1":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":442,"end":443},"2":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":440,"end":441},"3":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":450,"end":451},"4":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":446,"end":452},"5":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":444,"end":445},"6":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":455,"end":456},"7":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":453,"end":454},"8":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":459,"end":460},"9":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":457,"end":458},"10":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":433,"end":435},"11":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":479,"end":480},"12":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":475,"end":481},"13":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":488,"end":489},"14":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":484,"end":490},"15":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":482,"end":483},"16":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":470,"end":472},"17":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":509,"end":510},"18":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":505,"end":511},"19":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":518,"end":519},"20":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":514,"end":520},"21":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":512,"end":513},"22":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":500,"end":502},"23":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":526,"end":528},"24":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":531,"end":533},"25":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":529,"end":530},"26":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":536,"end":538},"27":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":534,"end":535},"28":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":526,"end":538}},"is_native":false},"3":{"definition_location":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":554,"end":558},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":571,"end":573},"1":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":567,"end":574},"3":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":574,"end":575}},"is_native":false},"4":{"definition_location":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":312,"end":577},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":312,"end":577}},"is_native":false}},"constant_map":{}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/sources/m.move new file mode 100644 index 0000000000000..d1e63ee566f45 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/sources/m.move @@ -0,0 +1,26 @@ +// Test stepping functionality in presence of function calls: +// - with other instructions on the same line a call, step over line in one go +// - with two calls on the same line, step over both in one go +// - with two calls on the same line, step into the first and +// after stepping out, step over the second +module stepping_call::m; + +fun baz(p: u64): u64 { + p +} + +fun bar(p: u64): u64 { + p +} + +fun foo(p: u64): u64 { + let v1 = p + p + bar(p) + p + p; + let v2 = baz(p) + bar(p); + let v3 = baz(p) + bar(p); + v1 + v2 + v3 +} + +#[test] +fun test() { + foo(42); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/sources/m.move new file mode 100644 index 0000000000000..d1e63ee566f45 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/sources/m.move @@ -0,0 +1,26 @@ +// Test stepping functionality in presence of function calls: +// - with other instructions on the same line a call, step over line in one go +// - with two calls on the same line, step over both in one go +// - with two calls on the same line, step into the first and +// after stepping out, step over the second +module stepping_call::m; + +fun baz(p: u64): u64 { + p +} + +fun bar(p: u64): u64 { + p +} + +fun foo(p: u64): u64 { + let v1 = p + p + bar(p) + p + p; + let v2 = baz(p) + bar(p); + let v3 = baz(p) + bar(p); + v1 + v2 + v3 +} + +#[test] +fun test() { + foo(42); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/test.exp b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/test.exp new file mode 100644 index 0000000000000..01039187c0a5e --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/test.exp @@ -0,0 +1,58 @@ +current frame stack: + function: test (line 25) + scope 0 : + function: foo (line 17) + scope 0 : + p : 42 + type: u64 + +current frame stack: + function: test (line 25) + scope 0 : + function: foo (line 18) + scope 0 : + p : 42 + type: u64 + + v1 : 210 + type: u64 + +current frame stack: + function: test (line 25) + scope 0 : + function: foo (line 19) + scope 0 : + p : 42 + type: u64 + + v1 : 210 + type: u64 + + v2 : 84 + type: u64 + +current frame stack: + function: test (line 25) + scope 0 : + function: foo (line 19) + scope 0 : + v1 : 210 + type: u64 + + v2 : 84 + type: u64 + +current frame stack: + function: test (line 25) + scope 0 : + function: foo (line 20) + scope 0 : + v1 : 210 + type: u64 + + v2 : 84 + type: u64 + + v3 : 84 + type: u64 + diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/trace.spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/trace.spec.js new file mode 100644 index 0000000000000..80eb43a64fb4d --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/trace.spec.js @@ -0,0 +1,27 @@ +let action = (runtime) => { + let res = ''; + // step into the main test function + runtime.step(false); + res += runtime.toString(); + + // step over a function to the next line + runtime.step(true); + res += runtime.toString(); + + // step over two functions to the next line + runtime.step(true); + res += runtime.toString(); + + // step into a function + runtime.step(false); + // step out of the function to the same line + runtime.stepOut(false); + res += runtime.toString(); + // step into a function + runtime.step(false); + // step out of the function to the next line + runtime.stepOut(false); + res += runtime.toString(); + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/traces/stepping_call__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/traces/stepping_call__m__test.json new file mode 100644 index 0000000000000..568616f20037b --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/traces/stepping_call__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":3,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999997,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999997,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":4,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":2,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999997}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999978,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999960,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999957,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999939,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999939,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":19,"function_name":"bar","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999939}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999920,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[19,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999919,"instruction":"RET"}},{"CloseFrame":{"frame_id":19,"return_":[{"RuntimeValue":{"value":42}}],"gas_left":999999919}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999916,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Push":{"RuntimeValue":{"value":126}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999898,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999895,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":126}}}},{"Effect":{"Push":{"RuntimeValue":{"value":168}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999877,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999874,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":168}}}},{"Effect":{"Push":{"RuntimeValue":{"value":210}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999873,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":210}}}},{"Effect":{"Write":{"location":{"Local":[4,1]},"root_value_after_write":{"RuntimeValue":{"value":210}}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999855,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999855,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":50,"function_name":"baz","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999855}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999836,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[50,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999835,"instruction":"RET"}},{"CloseFrame":{"frame_id":50,"return_":[{"RuntimeValue":{"value":42}}],"gas_left":999999835}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999817,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999817,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":60,"function_name":"bar","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999817}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999798,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[60,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999797,"instruction":"RET"}},{"CloseFrame":{"frame_id":60,"return_":[{"RuntimeValue":{"value":42}}],"gas_left":999999797}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999794,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999793,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":84}}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999775,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999775,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":77,"function_name":"baz","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999775}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999756,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[77,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999755,"instruction":"RET"}},{"CloseFrame":{"frame_id":77,"return_":[{"RuntimeValue":{"value":42}}],"gas_left":999999755}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999737,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999737,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":87,"function_name":"bar","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999737}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999718,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[87,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999717,"instruction":"RET"}},{"CloseFrame":{"frame_id":87,"return_":[{"RuntimeValue":{"value":42}}],"gas_left":999999717}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999714,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999713,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Write":{"location":{"Local":[4,3]},"root_value_after_write":{"RuntimeValue":{"value":84}}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999695,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,1]},"root_value_read":{"RuntimeValue":{"value":210}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":210}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999677,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":84}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":25,"gas_left":999999674,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":210}}}},{"Effect":{"Push":{"RuntimeValue":{"value":294}}}},{"Instruction":{"type_parameters":[],"pc":26,"gas_left":999999656,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,3]},"root_value_read":{"RuntimeValue":{"value":84}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":27,"gas_left":999999653,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":294}}}},{"Effect":{"Push":{"RuntimeValue":{"value":378}}}},{"Instruction":{"type_parameters":[],"pc":28,"gas_left":999999652,"instruction":"RET"}},{"CloseFrame":{"frame_id":4,"return_":[{"RuntimeValue":{"value":378}}],"gas_left":999999652}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999651,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":378}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999650,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999999650}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-debug/package.json b/external-crates/move/crates/move-analyzer/trace-debug/package.json index 3ae00120245d6..2a9c6eccf648c 100644 --- a/external-crates/move/crates/move-analyzer/trace-debug/package.json +++ b/external-crates/move/crates/move-analyzer/trace-debug/package.json @@ -20,6 +20,7 @@ ], "main": "./out/extension.js", "contributes": { + "breakpoints": [{ "language": "move" }], "debuggers": [ { "type": "move-debug", diff --git a/external-crates/move/crates/move-binary-format/src/compatibility_mode.rs b/external-crates/move/crates/move-binary-format/src/compatibility_mode.rs index a0848682ae405..d273f5a3d396b 100644 --- a/external-crates/move/crates/move-binary-format/src/compatibility_mode.rs +++ b/external-crates/move/crates/move-binary-format/src/compatibility_mode.rs @@ -100,7 +100,7 @@ pub trait CompatibilityMode: Default { ); /// Finish the compatibility check and return the error if one has been accumulated from individual errors. - fn finish(&self, _: &Compatibility) -> Result<(), Self::Error>; + fn finish(self, _: &Compatibility) -> Result<(), Self::Error>; } /// Compatibility mode impl for execution compatibility checks. @@ -240,7 +240,7 @@ impl CompatibilityMode for ExecutionCompatibilityMode { } /// Finish by comparing against the compatibility flags. - fn finish(&self, compatability: &Compatibility) -> Result<(), ()> { + fn finish(self, compatability: &Compatibility) -> Result<(), ()> { if !self.datatype_and_function_linking { return Err(()); } diff --git a/external-crates/move/crates/move-cli/Cargo.toml b/external-crates/move/crates/move-cli/Cargo.toml index 128c9493155fa..fae90ee1126d8 100644 --- a/external-crates/move/crates/move-cli/Cargo.toml +++ b/external-crates/move/crates/move-cli/Cargo.toml @@ -64,4 +64,4 @@ harness = false [features] tiered-gas = ["move-vm-test-utils/tiered-gas"] -gas-profiler = ["move-vm-runtime/gas-profiler"] +tracing = ["move-vm-runtime/tracing"] diff --git a/external-crates/move/crates/move-cli/src/base/test.rs b/external-crates/move/crates/move-cli/src/base/test.rs index e37309a8e9b1a..3133b799a9b10 100644 --- a/external-crates/move/crates/move-cli/src/base/test.rs +++ b/external-crates/move/crates/move-cli/src/base/test.rs @@ -196,7 +196,7 @@ pub fn run_move_unit_tests( let (files, comments_and_compiler_res) = compiler.run::().unwrap(); let (_, compiler) = diagnostics::unwrap_or_report_pass_diagnostics(&files, comments_and_compiler_res); - let (mut compiler, cfgir) = compiler.into_ast(); + let (compiler, cfgir) = compiler.into_ast(); let compilation_env = compiler.compilation_env(); let built_test_plan = construct_test_plan(compilation_env, Some(root_package), &cfgir); let mapped_files = compilation_env.mapped_files().clone(); diff --git a/external-crates/move/crates/move-cli/src/sandbox/cli.rs b/external-crates/move/crates/move-cli/src/sandbox/cli.rs index 30a4053bfdfb0..e8aee6887e169 100644 --- a/external-crates/move/crates/move-cli/src/sandbox/cli.rs +++ b/external-crates/move/crates/move-cli/src/sandbox/cli.rs @@ -11,15 +11,19 @@ use crate::{ }; use anyhow::Result; use clap::Parser; -use move_core_types::{ - language_storage::TypeTag, parser, transaction_argument::TransactionArgument, -}; +use move_core_types::parsing::values::ParsedValue; +use move_core_types::{language_storage::TypeTag, transaction_argument::TransactionArgument}; use move_package::compilation::package_layout::CompiledPackageLayout; use move_vm_test_utils::gas_schedule::CostTable; use std::{ fs, path::{Path, PathBuf}, }; +fn parse_transaction_argument(s: &str) -> Result { + let x: ParsedValue<()> = ParsedValue::parse(s)?; + let move_value = x.into_concrete_value(&|_| None)?; + TransactionArgument::try_from(move_value) +} #[derive(Parser)] pub enum SandboxCommand { @@ -75,7 +79,7 @@ pub enum SandboxCommand { /// ASCII strings (e.g., 'b"hi" will parse as the vector value [68, 69]). #[clap( long = "args", - value_parser = parser::parse_transaction_argument, + value_parser = parse_transaction_argument, num_args(1..), action = clap::ArgAction::Append, )] @@ -84,7 +88,6 @@ pub enum SandboxCommand { /// `main()`). Must match the type arguments kinds expected by `script_file`. #[clap( long = "type-args", - value_parser = parser::parse_type_tag, num_args(1..), action = clap::ArgAction::Append, )] @@ -155,7 +158,6 @@ pub struct StructLayoutOptions { /// Generate layout bindings for `struct` bound to these type arguments. #[clap( long = "type-args", - value_parser = parser::parse_type_tag, requires="struct", action = clap::ArgAction::Append, num_args(1..), diff --git a/external-crates/move/crates/move-cli/src/sandbox/commands/run.rs b/external-crates/move/crates/move-cli/src/sandbox/commands/run.rs index 5f3c556b6745c..61458bbb817ce 100644 --- a/external-crates/move/crates/move-cli/src/sandbox/commands/run.rs +++ b/external-crates/move/crates/move-cli/src/sandbox/commands/run.rs @@ -83,7 +83,7 @@ pub fn run( // script fun. parse module, extract script ID to pass to VM let module = CompiledModule::deserialize_with_defaults(&bytecode) .map_err(|e| anyhow!("Error deserializing module: {:?}", e))?; - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; use move_vm_types::gas::GasMeter; diff --git a/external-crates/move/crates/move-cli/tests/tracing_testsuite.rs b/external-crates/move/crates/move-cli/tests/tracing_testsuite.rs index cfe95ec02ac3a..e59c522afac15 100644 --- a/external-crates/move/crates/move-cli/tests/tracing_testsuite.rs +++ b/external-crates/move/crates/move-cli/tests/tracing_testsuite.rs @@ -6,7 +6,7 @@ use std::path::Path; #[allow(unused_variables)] fn run_all(args_path: &Path) -> datatest_stable::Result<()> { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] { use move_cli::sandbox::commands::test; use std::path::PathBuf; diff --git a/external-crates/move/crates/move-command-line-common/Cargo.toml b/external-crates/move/crates/move-command-line-common/Cargo.toml index 08679ba8a6e31..83aa969a204af 100644 --- a/external-crates/move/crates/move-command-line-common/Cargo.toml +++ b/external-crates/move/crates/move-command-line-common/Cargo.toml @@ -15,7 +15,6 @@ difference.workspace = true walkdir.workspace = true sha2.workspace = true hex.workspace = true -num-bigint.workspace = true once_cell.workspace = true serde.workspace = true dirs-next.workspace = true @@ -27,9 +26,3 @@ move-binary-format.workspace = true [dev-dependencies] proptest.workspace = true -# Ok to do this since: -# edition = 2021 ==> resolver = 2 -# * https://doc.rust-lang.org/edition-guide/rust-2021/default-cargo-resolver.html#summary -# resolver = 2 ==> feature-resolver-version-2 which allows dev-dependencies to set features -# * https://doc.rust-lang.org/cargo/reference/resolver.html#feature-resolver-version-2 -move-core-types = { workspace = true, features = ["fuzzing"] } diff --git a/external-crates/move/crates/move-command-line-common/src/lib.rs b/external-crates/move/crates/move-command-line-common/src/lib.rs index 2b087266cf27f..6014194ab13ef 100644 --- a/external-crates/move/crates/move-command-line-common/src/lib.rs +++ b/external-crates/move/crates/move-command-line-common/src/lib.rs @@ -4,14 +4,10 @@ #![forbid(unsafe_code)] -pub mod address; pub mod character_sets; pub mod display; pub mod env; pub mod error_bitset; pub mod files; pub mod interactive; -pub mod parser; pub mod testing; -pub mod types; -pub mod values; diff --git a/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/derived_line_number_raw_abort.exp b/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/derived_line_number_raw_abort.exp new file mode 100644 index 0000000000000..b980699ad1b88 --- /dev/null +++ b/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/derived_line_number_raw_abort.exp @@ -0,0 +1,11 @@ +processed 2 tasks + +task 1, lines 6-11: +//# run +Error: Function execution failed with VMError: { + major_status: ABORTED, + sub_status: Some(9223372079804448767), + location: 0x42::m, + indices: [], + offsets: [(FunctionDefinitionIndex(0), 1)], +} diff --git a/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/derived_line_number_raw_abort.move b/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/derived_line_number_raw_abort.move new file mode 100644 index 0000000000000..c9e4464e3ad8b --- /dev/null +++ b/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/derived_line_number_raw_abort.move @@ -0,0 +1,11 @@ +// NB: Do _not_ change the number of lines in this file. Any changes to the +// number of lines in this file may break the expected output of this test. + +//# init --edition 2024.beta + +//# run +module 0x42::m { + fun f() { + abort + } +} diff --git a/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/macro_call_line_number_abort.exp b/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/macro_call_line_number_abort.exp new file mode 100644 index 0000000000000..760fa19edda51 --- /dev/null +++ b/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/macro_call_line_number_abort.exp @@ -0,0 +1,21 @@ +processed 4 tasks + +task 2, line 25: +//# run 0x42::m::t_a +Error: Function execution failed with VMError: { + major_status: ABORTED, + sub_status: Some(9223372105574252543), + location: 0x42::m, + indices: [], + offsets: [(FunctionDefinitionIndex(0), 1)], +} + +task 3, line 27: +//# run 0x42::m::t_calls_a +Error: Function execution failed with VMError: { + major_status: ABORTED, + sub_status: Some(9223372118459154431), + location: 0x42::m, + indices: [], + offsets: [(FunctionDefinitionIndex(1), 1)], +} diff --git a/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/macro_call_line_number_abort.move b/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/macro_call_line_number_abort.move new file mode 100644 index 0000000000000..fc2069b6d0a0c --- /dev/null +++ b/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/macro_call_line_number_abort.move @@ -0,0 +1,27 @@ +// NB: Do _not_ change the number of lines in this file. Any changes to the +// number of lines in this file may break the expected output of this test. + +//# init --edition 2024.beta + +//# publish +module 0x42::m { + macro fun a() { + abort + } + + macro fun calls_a() { + a!() + } + + entry fun t_a() { + a!() // assert should point to this line + } + + entry fun t_calls_a() { + calls_a!() // assert should point to this line + } +} + +//# run 0x42::m::t_a + +//# run 0x42::m::t_calls_a diff --git a/external-crates/move/crates/move-compiler/src/cfgir/ast.rs b/external-crates/move/crates/move-compiler/src/cfgir/ast.rs index 72507507b8142..e18c8b4e0233c 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/ast.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/ast.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - diagnostics::WarningFilters, + diagnostics::warning_filters::WarningFilters, expansion::ast::{Attributes, Friend, ModuleIdent, Mutability, TargetKind}, hlir::ast::{ BaseType, Command, Command_, EnumDefinition, FunctionSignature, Label, SingleType, diff --git a/external-crates/move/crates/move-compiler/src/cfgir/borrows/mod.rs b/external-crates/move/crates/move-compiler/src/cfgir/borrows/mod.rs index 59e44f130a78b..3952c47e44f32 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/borrows/mod.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/borrows/mod.rs @@ -13,7 +13,7 @@ use crate::{ translate::{display_var, DisplayVar}, }, parser::ast::BinOp_, - shared::{unique_map::UniqueMap, CompilationEnv}, + shared::unique_map::UniqueMap, }; use move_proc_macros::growing_stack; @@ -90,7 +90,6 @@ impl TransferFunctions for BorrowSafety { impl AbstractInterpreter for BorrowSafety {} pub fn verify( - compilation_env: &mut CompilationEnv, context: &super::CFGContext, cfg: &super::cfg::MutForwardCFG, ) -> BTreeMap { @@ -100,21 +99,17 @@ pub fn verify( let mut safety = BorrowSafety::new(locals); // check for existing errors - let has_errors = compilation_env.has_errors(); + let has_errors = context.env.has_errors(); let mut initial_state = BorrowState::initial(locals, safety.mutably_used.clone(), has_errors); initial_state.bind_arguments(&signature.parameters); initial_state.canonicalize_locals(&safety.local_numbers); let (final_state, ds) = safety.analyze_function(cfg, initial_state); - compilation_env.add_diags(ds); - unused_mut_borrows(compilation_env, context, safety.mutably_used); + context.add_diags(ds); + unused_mut_borrows(context, safety.mutably_used); final_state } -fn unused_mut_borrows( - compilation_env: &mut CompilationEnv, - context: &super::CFGContext, - mutably_used: RefExpInfoMap, -) { +fn unused_mut_borrows(context: &super::CFGContext, mutably_used: RefExpInfoMap) { const MSG: &str = "Mutable reference is never used mutably, \ consider switching to an immutable reference '&' instead"; @@ -143,7 +138,7 @@ fn unused_mut_borrows( } else { diag!(UnusedItem::MutReference, (*loc, MSG)) }; - compilation_env.add_diag(diag) + context.add_diag(diag) } } } diff --git a/external-crates/move/crates/move-compiler/src/cfgir/liveness/mod.rs b/external-crates/move/crates/move-compiler/src/cfgir/liveness/mod.rs index fd32f21714754..f5aa20f2619b3 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/liveness/mod.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/liveness/mod.rs @@ -13,7 +13,7 @@ use crate::{ diagnostics::Diagnostics, expansion::ast::Mutability, hlir::ast::{self as H, *}, - shared::{unique_map::UniqueMap, CompilationEnv}, + shared::unique_map::UniqueMap, }; use move_ir_types::location::*; use move_proc_macros::growing_stack; @@ -168,11 +168,7 @@ fn exp(state: &mut LivenessState, parent_e: &Exp) { /// - Reports an error if an assignment/let was not used /// Switches it to an `Ignore` if it has the drop ability (helps with error messages for borrows) -pub fn last_usage( - compilation_env: &mut CompilationEnv, - context: &super::CFGContext, - cfg: &mut MutForwardCFG, -) { +pub fn last_usage(context: &super::CFGContext, cfg: &mut MutForwardCFG) { let super::CFGContext { infinite_loop_starts, .. @@ -183,7 +179,7 @@ pub fn last_usage( .get(lbl) .unwrap_or_else(|| panic!("ICE no liveness states for {}", lbl)); let command_states = per_command_states.get(lbl).unwrap(); - last_usage::block(compilation_env, final_invariant, command_states, block) + last_usage::block(context, final_invariant, command_states, block) } } @@ -191,30 +187,29 @@ mod last_usage { use move_proc_macros::growing_stack; use crate::{ - cfgir::liveness::state::LivenessState, + cfgir::{liveness::state::LivenessState, CFGContext}, diag, hlir::{ ast::*, translate::{display_var, DisplayVar}, }, - shared::*, }; use std::collections::{BTreeSet, VecDeque}; struct Context<'a, 'b> { - env: &'a mut CompilationEnv, + outer: &'a CFGContext<'a>, next_live: &'b BTreeSet, dropped_live: BTreeSet, } impl<'a, 'b> Context<'a, 'b> { fn new( - env: &'a mut CompilationEnv, + outer: &'a CFGContext<'a>, next_live: &'b BTreeSet, dropped_live: BTreeSet, ) -> Self { Context { - env, + outer, next_live, dropped_live, } @@ -222,7 +217,7 @@ mod last_usage { } pub fn block( - compilation_env: &mut CompilationEnv, + context: &CFGContext, final_invariant: &LivenessState, command_states: &VecDeque, block: &mut BasicBlock, @@ -245,10 +240,7 @@ mod last_usage { .difference(next_data) .cloned() .collect::>(); - command( - &mut Context::new(compilation_env, next_data, dropped_live), - cmd, - ) + command(&mut Context::new(context, next_data, dropped_live), cmd) } } @@ -300,7 +292,7 @@ mod last_usage { '_{vstr}')", ); context - .env + .outer .add_diag(diag!(UnusedItem::Assignment, (l.loc, msg))); } *unused_assignment = true; diff --git a/external-crates/move/crates/move-compiler/src/cfgir/locals/mod.rs b/external-crates/move/crates/move-compiler/src/cfgir/locals/mod.rs index 1793e58dc9139..31a302ece25e1 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/locals/mod.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/locals/mod.rs @@ -6,6 +6,7 @@ pub mod state; use super::absint::*; use crate::{ + cfgir::CFGContext, diag, diagnostics::{Diagnostic, Diagnostics}, editions::Edition, @@ -16,15 +17,10 @@ use crate::{ }, naming::ast::{self as N, TParam}, parser::ast::{Ability_, DatatypeName}, - shared::{ - program_info::{DatatypeKind, TypingProgramInfo}, - unique_map::UniqueMap, - *, - }, + shared::{program_info::DatatypeKind, unique_map::UniqueMap}, }; use move_ir_types::location::*; use move_proc_macros::growing_stack; -use move_symbol_pool::Symbol; use state::*; use std::collections::BTreeMap; @@ -33,9 +29,7 @@ use std::collections::BTreeMap; //************************************************************************************************** struct LocalsSafety<'a> { - env: &'a CompilationEnv, - info: &'a TypingProgramInfo, - package: Option, + context: &'a CFGContext<'a>, local_types: &'a UniqueMap, signature: &'a FunctionSignature, unused_mut: BTreeMap, @@ -43,9 +37,7 @@ struct LocalsSafety<'a> { impl<'a> LocalsSafety<'a> { fn new( - env: &'a CompilationEnv, - info: &'a TypingProgramInfo, - package: Option, + context: &'a CFGContext<'a>, local_types: &'a UniqueMap, signature: &'a FunctionSignature, ) -> Self { @@ -60,9 +52,7 @@ impl<'a> LocalsSafety<'a> { }) .collect(); Self { - env, - info, - package, + context, local_types, signature, unused_mut, @@ -71,9 +61,7 @@ impl<'a> LocalsSafety<'a> { } struct Context<'a, 'b> { - env: &'a CompilationEnv, - info: &'a TypingProgramInfo, - package: Option, + outer: &'a CFGContext<'a>, local_types: &'a UniqueMap, unused_mut: &'a mut BTreeMap, local_states: &'b mut LocalStates, @@ -83,15 +71,12 @@ struct Context<'a, 'b> { impl<'a, 'b> Context<'a, 'b> { fn new(locals_safety: &'a mut LocalsSafety, local_states: &'b mut LocalStates) -> Self { - let env = locals_safety.env; - let info = locals_safety.info; + let outer = locals_safety.context; let local_types = locals_safety.local_types; let signature = locals_safety.signature; let unused_mut = &mut locals_safety.unused_mut; Self { - env, - info, - package: locals_safety.package, + outer, local_types, unused_mut, local_states, @@ -154,18 +139,18 @@ impl<'a, 'b> Context<'a, 'b> { // .unwrap(); fn datatype_decl_loc(&self, m: &ModuleIdent, n: &DatatypeName) -> Loc { - let kind = self.info.datatype_kind(m, n); + let kind = self.outer.info.datatype_kind(m, n); match kind { - DatatypeKind::Struct => self.info.struct_declared_loc(m, n), - DatatypeKind::Enum => self.info.enum_declared_loc(m, n), + DatatypeKind::Struct => self.outer.info.struct_declared_loc(m, n), + DatatypeKind::Enum => self.outer.info.enum_declared_loc(m, n), } } fn datatype_declared_abilities(&self, m: &ModuleIdent, n: &DatatypeName) -> &'a AbilitySet { - let kind = self.info.datatype_kind(m, n); + let kind = self.outer.info.datatype_kind(m, n); match kind { - DatatypeKind::Struct => self.info.struct_declared_abilities(m, n), - DatatypeKind::Enum => self.info.enum_declared_abilities(m, n), + DatatypeKind::Struct => self.outer.info.struct_declared_abilities(m, n), + DatatypeKind::Enum => self.outer.info.enum_declared_abilities(m, n), } } } @@ -189,7 +174,6 @@ impl<'a> TransferFunctions for LocalsSafety<'a> { impl<'a> AbstractInterpreter for LocalsSafety<'a> {} pub fn verify( - compilation_env: &mut CompilationEnv, context: &super::CFGContext, cfg: &super::cfg::MutForwardCFG, ) -> BTreeMap { @@ -197,22 +181,16 @@ pub fn verify( signature, locals, .. } = context; let initial_state = LocalStates::initial(&signature.parameters, locals); - let mut locals_safety = LocalsSafety::new( - compilation_env, - context.info, - context.package, - locals, - signature, - ); + let mut locals_safety = LocalsSafety::new(context, locals, signature); let (final_state, ds) = locals_safety.analyze_function(cfg, initial_state); - unused_let_muts(compilation_env, locals, locals_safety.unused_mut); - compilation_env.add_diags(ds); + unused_let_muts(context, locals, locals_safety.unused_mut); + context.add_diags(ds); final_state } /// Generates warnings for unused mut declarations fn unused_let_muts( - env: &mut CompilationEnv, + context: &CFGContext, locals: &UniqueMap, unused_mut_locals: BTreeMap, ) { @@ -226,7 +204,7 @@ fn unused_let_muts( let decl_loc = *locals.get_loc(&v).unwrap(); let decl_msg = format!("The variable '{vstr}' is never used mutably"); let mut_msg = "Consider removing the 'mut' declaration here"; - env.add_diag(diag!( + context.add_diag(diag!( UnusedItem::MutModifier, (decl_loc, decl_msg), (mut_loc, mut_msg) @@ -524,7 +502,7 @@ fn check_mutability( let usage_msg = format!("Invalid {usage} of immutable variable '{vstr}'"); let decl_msg = format!("To use the variable mutably, it must be declared 'mut', e.g. 'mut {vstr}'"); - if context.env.edition(context.package) == Edition::E2024_MIGRATION { + if context.outer.env.edition(context.outer.package) == Edition::E2024_MIGRATION { context.add_diag(diag!(Migration::NeedsLetMut, (decl_loc, decl_msg.clone()))) } else { let mut diag = diag!( diff --git a/external-crates/move/crates/move-compiler/src/cfgir/mod.rs b/external-crates/move/crates/move-compiler/src/cfgir/mod.rs index 958e2a3abb3fc..5dd26eca887e6 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/mod.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/mod.rs @@ -15,6 +15,7 @@ pub mod visitor; mod optimize; use crate::{ + diagnostics::warning_filters::WarningFiltersScope, expansion::ast::{Attributes, ModuleIdent, Mutability}, hlir::ast::{FunctionSignature, Label, SingleType, Var, Visibility}, shared::{program_info::TypingProgramInfo, unique_map::UniqueMap, CompilationEnv, Name}, @@ -26,6 +27,8 @@ use optimize::optimize; use std::collections::BTreeSet; pub struct CFGContext<'a> { + pub env: &'a CompilationEnv, + pub warning_filters_scope: WarningFiltersScope, pub info: &'a TypingProgramInfo, pub package: Option, pub module: ModuleIdent, @@ -43,16 +46,22 @@ pub enum MemberName { Function(Name), } -pub fn refine_inference_and_verify( - env: &mut CompilationEnv, - context: &CFGContext, - cfg: &mut MutForwardCFG, -) { - liveness::last_usage(env, context, cfg); - let locals_states = locals::verify(env, context, cfg); +pub fn refine_inference_and_verify(context: &CFGContext, cfg: &mut MutForwardCFG) { + liveness::last_usage(context, cfg); + let locals_states = locals::verify(context, cfg); liveness::release_dead_refs(context, &locals_states, cfg); - borrows::verify(env, context, cfg); + borrows::verify(context, cfg); +} + +impl CFGContext<'_> { + fn add_diag(&self, diag: crate::diagnostics::Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + fn add_diags(&self, diags: crate::diagnostics::Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } } impl MemberName { diff --git a/external-crates/move/crates/move-compiler/src/cfgir/optimize/mod.rs b/external-crates/move/crates/move-compiler/src/cfgir/optimize/mod.rs index db73ab7f19c72..72cd2c0e72ffc 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/optimize/mod.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/optimize/mod.rs @@ -43,7 +43,7 @@ const MOVE_2024_OPTIMIZATIONS: &[Optimization] = &[ #[growing_stack] pub fn optimize( - env: &mut CompilationEnv, + env: &CompilationEnv, package: Option, signature: &FunctionSignature, locals: &UniqueMap, diff --git a/external-crates/move/crates/move-compiler/src/cfgir/translate.rs b/external-crates/move/crates/move-compiler/src/cfgir/translate.rs index 93ec88edf4396..fb9692eacd400 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/translate.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/translate.rs @@ -10,9 +10,13 @@ use crate::{ visitor::{CFGIRVisitor, CFGIRVisitorConstructor, CFGIRVisitorContext}, }, diag, - diagnostics::Diagnostics, + diagnostics::{ + warning_filters::{WarningFilters, WarningFiltersScope}, + Diagnostic, Diagnostics, + }, expansion::ast::{Attributes, ModuleIdent, Mutability}, hlir::ast::{self as H, BlockLabel, Label, Value, Value_, Var}, + ice_assert, parser::ast::{ConstantName, FunctionName}, shared::{program_info::TypingProgramInfo, unique_map::UniqueMap, CompilationEnv}, FullyCompiledProgram, @@ -42,8 +46,9 @@ enum NamedBlockType { } struct Context<'env> { - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, info: &'env TypingProgramInfo, + warning_filters_scope: WarningFiltersScope, current_package: Option, label_count: usize, named_blocks: UniqueMap, @@ -52,9 +57,11 @@ struct Context<'env> { } impl<'env> Context<'env> { - pub fn new(env: &'env mut CompilationEnv, info: &'env TypingProgramInfo) -> Self { + pub fn new(env: &'env CompilationEnv, info: &'env TypingProgramInfo) -> Self { + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); Context { env, + warning_filters_scope, info, current_package: None, label_count: 0, @@ -63,6 +70,22 @@ impl<'env> Context<'env> { } } + pub fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + pub fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + + pub fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) + } + + pub fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } + fn new_label(&mut self) -> Label { let count = self.label_count; self.label_count += 1; @@ -121,7 +144,7 @@ impl<'env> Context<'env> { //************************************************************************************************** pub fn program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, _pre_compiled_lib: Option>, prog: H::Program, ) -> G::Program { @@ -170,10 +193,10 @@ fn module( constants: hconstants, } = mdef; context.current_package = package_name; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let constants = constants(context, module_ident, hconstants); let functions = hfunctions.map(|name, f| function(context, module_ident, name, f)); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); context.current_package = None; ( module_ident, @@ -238,7 +261,7 @@ fn constants( "Cyclic constant defined here", )); } - context.env.add_diag(diag); + context.add_diag(diag); cycle_nodes.append(&mut scc.into_iter().collect()); } } @@ -251,7 +274,7 @@ fn constants( .filter(|node| !cycle_nodes.contains(node) && graph.contains_node(*node)) .collect(); for node in neighbors { - context.env.add_diag(diag!( + context.add_diag(diag!( BytecodeGeneration::UnfoldableConstant, ( *consts.get_loc(&node).unwrap(), @@ -402,7 +425,7 @@ fn constant( value: (locals, block), } = c; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let final_value = constant_( context, constant_values, @@ -427,7 +450,7 @@ fn constant( _ => None, }; - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); G::Constant { warning_filter, index, @@ -471,6 +494,8 @@ fn constant_( }; let fake_infinite_loop_starts = BTreeSet::new(); let function_context = super::CFGContext { + env: context.env, + warning_filters_scope: context.warning_filters_scope.clone(), info: context.info, package: context.current_package, module, @@ -482,9 +507,11 @@ fn constant_( locals: &locals, infinite_loop_starts: &fake_infinite_loop_starts, }; - cfgir::refine_inference_and_verify(context.env, &function_context, &mut cfg); - assert!( + cfgir::refine_inference_and_verify(&function_context, &mut cfg); + ice_assert!( + context.env, num_previous_errors == context.env.count_diags(), + full_loc, "{}", ICE_MSG ); @@ -498,7 +525,7 @@ fn constant_( ); if blocks.len() != 1 { - context.env.add_diag(diag!( + context.add_diag(diag!( BytecodeGeneration::UnfoldableConstant, (full_loc, CANNOT_FOLD) )); @@ -510,7 +537,7 @@ fn constant_( let e = match cmd_ { C::IgnoreAndPop { exp, .. } => exp, _ => { - context.env.add_diag(diag!( + context.add_diag(diag!( BytecodeGeneration::UnfoldableConstant, (*cloc, CANNOT_FOLD) )); @@ -532,7 +559,7 @@ fn check_constant_value(context: &mut Context, e: &H::Exp) { use H::UnannotatedExp_ as E; match &e.exp.value { E::Value(_) => (), - _ => context.env.add_diag(diag!( + _ => context.add_diag(diag!( BytecodeGeneration::UnfoldableConstant, (e.exp.loc, CANNOT_FOLD) )), @@ -579,7 +606,7 @@ fn function( signature, body, } = f; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let body = function_body( context, module, @@ -590,7 +617,7 @@ fn function( &signature, body, ); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); G::Function { warning_filter, index, @@ -627,9 +654,11 @@ fn function_body( let (mut cfg, infinite_loop_starts, diags) = MutForwardCFG::new(start, &mut blocks, binfo); - context.env.add_diags(diags); + context.add_diags(diags); let function_context = super::CFGContext { + env: context.env, + warning_filters_scope: context.warning_filters_scope.clone(), info: context.info, package: context.current_package, module, @@ -641,7 +670,7 @@ fn function_body( locals: &locals, infinite_loop_starts: &infinite_loop_starts, }; - cfgir::refine_inference_and_verify(context.env, &function_context, &mut cfg); + cfgir::refine_inference_and_verify(&function_context, &mut cfg); // do not optimize if there are errors, warnings are okay if !context.env.has_errors() { cfgir::optimize( @@ -977,7 +1006,8 @@ fn visit_program(context: &mut Context, prog: &mut G::Program) { struct AbsintVisitor; struct AbsintVisitorContext<'a> { - env: &'a mut CompilationEnv, + env: &'a CompilationEnv, + warning_filters_scope: WarningFiltersScope, info: Arc, current_package: Option, } @@ -985,22 +1015,35 @@ struct AbsintVisitorContext<'a> { impl CFGIRVisitorConstructor for AbsintVisitor { type Context<'a> = AbsintVisitorContext<'a>; - fn context<'a>(env: &'a mut CompilationEnv, program: &G::Program) -> Self::Context<'a> { + fn context<'a>(env: &'a CompilationEnv, program: &G::Program) -> Self::Context<'a> { + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); AbsintVisitorContext { env, + warning_filters_scope, info: program.info.clone(), current_package: None, } } } +impl AbsintVisitorContext<'_> { + #[allow(unused)] + fn add_diag(&self, diag: crate::diagnostics::Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + fn add_diags(&self, diags: crate::diagnostics::Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } +} + impl<'a> CFGIRVisitorContext for AbsintVisitorContext<'a> { - fn add_warning_filter_scope(&mut self, filter: crate::diagnostics::WarningFilters) { - self.env.add_warning_filter_scope(filter) + fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) } fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() + self.warning_filters_scope.pop() } fn visit_module_custom(&mut self, _ident: ModuleIdent, mdef: &G::ModuleDefinition) -> bool { @@ -1035,6 +1078,8 @@ impl<'a> CFGIRVisitorContext for AbsintVisitorContext<'a> { }; let (cfg, infinite_loop_starts) = ImmForwardCFG::new(*start, blocks, block_info.iter()); let function_context = super::CFGContext { + env: self.env, + warning_filters_scope: self.warning_filters_scope.clone(), info: &self.info, package: self.current_package, module: mident, @@ -1048,9 +1093,9 @@ impl<'a> CFGIRVisitorContext for AbsintVisitorContext<'a> { }; let mut ds = Diagnostics::new(); for v in &self.env.visitors().abs_int { - ds.extend(v.verify(self.env, &function_context, &cfg)); + ds.extend(v.verify(&function_context, &cfg)); } - self.env.add_diags(ds); + self.add_diags(ds); true } } diff --git a/external-crates/move/crates/move-compiler/src/cfgir/visitor.rs b/external-crates/move/crates/move-compiler/src/cfgir/visitor.rs index 1ec55b3cd3f82..7bebfa4435b02 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/visitor.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/visitor.rs @@ -11,7 +11,7 @@ use crate::{ CFGContext, }, command_line::compiler::Visitor, - diagnostics::{Diagnostic, Diagnostics, WarningFilters}, + diagnostics::{warning_filters::WarningFilters, Diagnostic, Diagnostics}, expansion::ast::ModuleIdent, hlir::ast::{self as H, Command, Exp, LValue, LValue_, Label, ModuleCall, Type, Type_, Var}, parser::ast::{ConstantName, DatatypeName, FunctionName}, @@ -24,7 +24,7 @@ pub type AbsIntVisitorObj = Box; pub type CFGIRVisitorObj = Box; pub trait CFGIRVisitor: Send + Sync { - fn visit(&self, env: &mut CompilationEnv, program: &G::Program); + fn visit(&self, env: &CompilationEnv, program: &G::Program); fn visitor(self) -> Visitor where @@ -35,12 +35,7 @@ pub trait CFGIRVisitor: Send + Sync { } pub trait AbstractInterpreterVisitor: Send + Sync { - fn verify( - &self, - env: &CompilationEnv, - context: &CFGContext, - cfg: &ImmForwardCFG, - ) -> Diagnostics; + fn verify(&self, context: &CFGContext, cfg: &ImmForwardCFG) -> Diagnostics; fn visitor(self) -> Visitor where @@ -57,16 +52,16 @@ pub trait AbstractInterpreterVisitor: Send + Sync { pub trait CFGIRVisitorConstructor: Send { type Context<'a>: Sized + CFGIRVisitorContext; - fn context<'a>(env: &'a mut CompilationEnv, program: &G::Program) -> Self::Context<'a>; + fn context<'a>(env: &'a CompilationEnv, program: &G::Program) -> Self::Context<'a>; - fn visit(env: &mut CompilationEnv, program: &G::Program) { + fn visit(env: &CompilationEnv, program: &G::Program) { let mut context = Self::context(env, program); context.visit(program); } } pub trait CFGIRVisitorContext { - fn add_warning_filter_scope(&mut self, filter: WarningFilters); + fn push_warning_filter_scope(&mut self, filters: WarningFilters); fn pop_warning_filter_scope(&mut self); fn visit_module_custom(&mut self, _ident: ModuleIdent, _mdef: &G::ModuleDefinition) -> bool { @@ -78,7 +73,7 @@ pub trait CFGIRVisitorContext { /// required. fn visit(&mut self, program: &G::Program) { for (mident, mdef) in program.modules.key_cloned_iter() { - self.add_warning_filter_scope(mdef.warning_filter.clone()); + self.push_warning_filter_scope(mdef.warning_filter.clone()); if self.visit_module_custom(mident, mdef) { self.pop_warning_filter_scope(); continue; @@ -117,7 +112,7 @@ pub trait CFGIRVisitorContext { struct_name: DatatypeName, sdef: &H::StructDefinition, ) { - self.add_warning_filter_scope(sdef.warning_filter.clone()); + self.push_warning_filter_scope(sdef.warning_filter.clone()); if self.visit_struct_custom(module, struct_name, sdef) { self.pop_warning_filter_scope(); return; @@ -139,7 +134,7 @@ pub trait CFGIRVisitorContext { enum_name: DatatypeName, edef: &H::EnumDefinition, ) { - self.add_warning_filter_scope(edef.warning_filter.clone()); + self.push_warning_filter_scope(edef.warning_filter.clone()); if self.visit_enum_custom(module, enum_name, edef) { self.pop_warning_filter_scope(); return; @@ -161,7 +156,7 @@ pub trait CFGIRVisitorContext { constant_name: ConstantName, cdef: &G::Constant, ) { - self.add_warning_filter_scope(cdef.warning_filter.clone()); + self.push_warning_filter_scope(cdef.warning_filter.clone()); if self.visit_constant_custom(module, constant_name, cdef) { self.pop_warning_filter_scope(); return; @@ -183,7 +178,7 @@ pub trait CFGIRVisitorContext { function_name: FunctionName, fdef: &G::Function, ) { - self.add_warning_filter_scope(fdef.warning_filter.clone()); + self.push_warning_filter_scope(fdef.warning_filter.clone()); if self.visit_function_custom(module, function_name, fdef) { self.pop_warning_filter_scope(); return; @@ -322,11 +317,62 @@ impl From for CFGIRVisitorObj { } impl CFGIRVisitor for V { - fn visit(&self, env: &mut CompilationEnv, program: &G::Program) { + fn visit(&self, env: &CompilationEnv, program: &G::Program) { Self::visit(env, program) } } +macro_rules! simple_visitor { + ($visitor:ident, $($overrides:item),*) => { + pub struct $visitor; + + pub struct Context<'a> { + env: &'a crate::shared::CompilationEnv, + warning_filters_scope: crate::diagnostics::warning_filters::WarningFiltersScope, + } + + impl crate::cfgir::visitor::CFGIRVisitorConstructor for $visitor { + type Context<'a> = Context<'a>; + + fn context<'a>(env: &'a crate::shared::CompilationEnv, _program: &crate::cfgir::ast::Program) -> Self::Context<'a> { + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); + Context { + env, + warning_filters_scope, + } + } + } + + impl Context<'_> { + #[allow(unused)] + fn add_diag(&self, diag: crate::diagnostics::Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + fn add_diags(&self, diags: crate::diagnostics::Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + } + + impl crate::cfgir::visitor::CFGIRVisitorContext for Context<'_> { + fn push_warning_filter_scope( + &mut self, + filters: crate::diagnostics::warning_filters::WarningFilters, + ) { + self.warning_filters_scope.push(filters) + } + + fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } + + $($overrides)* + } + } +} +pub(crate) use simple_visitor; + //************************************************************************************************** // simple absint visitor //************************************************************************************************** @@ -448,13 +494,12 @@ pub trait SimpleAbsIntConstructor: Sized { /// Given the initial state/domain, construct a new abstract interpreter. /// Return None if it should not be run given this context fn new<'a>( - env: &CompilationEnv, context: &'a CFGContext<'a>, cfg: &ImmForwardCFG, init_state: &mut as SimpleAbsInt>::State, ) -> Option>; - fn verify(env: &CompilationEnv, context: &CFGContext, cfg: &ImmForwardCFG) -> Diagnostics { + fn verify(context: &CFGContext, cfg: &ImmForwardCFG) -> Diagnostics { let mut locals = context .locals .key_cloned_iter() @@ -473,7 +518,7 @@ pub trait SimpleAbsIntConstructor: Sized { ); } let mut init_state = as SimpleAbsInt>::State::new(context, locals); - let Some(mut ai) = Self::new(env, context, cfg, &mut init_state) else { + let Some(mut ai) = Self::new(context, cfg, &mut init_state) else { return Diagnostics::new(); }; let (final_state, ds) = ai.analyze_function(cfg, init_state); @@ -760,13 +805,8 @@ impl From for AbsIntVisitorObj { } impl AbstractInterpreterVisitor for V { - fn verify( - &self, - env: &CompilationEnv, - context: &CFGContext, - cfg: &ImmForwardCFG, - ) -> Diagnostics { - ::verify(env, context, cfg) + fn verify(&self, context: &CFGContext, cfg: &ImmForwardCFG) -> Diagnostics { + ::verify(context, cfg) } } diff --git a/external-crates/move/crates/move-compiler/src/command_line/compiler.rs b/external-crates/move/crates/move-compiler/src/command_line/compiler.rs index 5ab1794628a81..22e7521153301 100644 --- a/external-crates/move/crates/move-compiler/src/command_line/compiler.rs +++ b/external-crates/move/crates/move-compiler/src/command_line/compiler.rs @@ -10,7 +10,8 @@ use crate::{ command_line::{DEFAULT_OUTPUT_DIR, MOVE_COMPILED_INTERFACES_DIR}, compiled_unit::{self, AnnotatedCompiledUnit}, diagnostics::{ - codes::{Severity, WarningFilter}, + codes::Severity, + warning_filters::{WarningFilter, WarningFilters}, *, }, editions::Edition, @@ -376,17 +377,19 @@ impl Compiler { interface_files_dir_opt, &compiled_module_named_address_mapping, )?; - let mut compilation_env = - CompilationEnv::new(flags, visitors, save_hooks, package_configs, default_config); - if let Some(filter) = warning_filter { - compilation_env.add_warning_filter_scope(filter); - } + let mut compilation_env = CompilationEnv::new( + flags, + visitors, + save_hooks, + warning_filter, + package_configs, + default_config, + ); for (prefix, filters) in known_warning_filters { compilation_env.add_custom_known_filters(prefix, filters)?; } - let (source_text, pprog, comments) = - parse_program(&mut compilation_env, maps, targets, deps)?; + let (source_text, pprog, comments) = parse_program(&compilation_env, maps, targets, deps)?; for (fhash, (fname, contents)) in &source_text { // TODO better support for bytecode interface file paths @@ -480,12 +483,12 @@ impl SteppedCompiler

{ "Invalid pass for run_to. Target pass precedes the current pass" ); let Self { - mut compilation_env, + compilation_env, pre_compiled_lib, program, } = self; let new_prog = run( - &mut compilation_env, + &compilation_env, pre_compiled_lib.clone(), program.unwrap(), TARGET, @@ -498,10 +501,7 @@ impl SteppedCompiler

{ }) } - pub fn compilation_env(&mut self) -> &mut CompilationEnv { - &mut self.compilation_env - } - pub fn compilation_env_ref(&self) -> &CompilationEnv { + pub fn compilation_env(&self) -> &CompilationEnv { &self.compilation_env } } @@ -657,9 +657,9 @@ pub fn construct_pre_compiled_lib, NamedAddress: Into Ok(Err((files, errors))), Ok(PassResult::Compilation(compiled, _)) => Ok(Ok(FullyCompiledProgram { files, @@ -886,7 +886,7 @@ pub fn move_check_for_errors( ) -> Result<(Vec, Diagnostics), (Pass, Diagnostics)> { let (_, compiler) = comments_and_compiler_res?; - let (mut compiler, cfgir) = compiler.run::()?.into_ast(); + let (compiler, cfgir) = compiler.run::()?.into_ast(); let compilation_env = compiler.compilation_env(); if compilation_env.flags().is_testing() { unit_test::plan_builder::construct_test_plan(compilation_env, None, &cfgir); @@ -922,7 +922,7 @@ impl PassResult { } } - pub fn save(&self, compilation_env: &mut CompilationEnv) { + pub fn save(&self, compilation_env: &CompilationEnv) { match self { PassResult::Parser(prog) => { compilation_env.save_parser_ast(prog); @@ -949,14 +949,14 @@ impl PassResult { } fn run( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, cur: PassResult, until: Pass, ) -> Result { #[growing_stack] fn rec( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, cur: PassResult, until: Pass, diff --git a/external-crates/move/crates/move-compiler/src/diagnostics/codes.rs b/external-crates/move/crates/move-compiler/src/diagnostics/codes.rs index a3d73b45893f9..436f81cfd82f3 100644 --- a/external-crates/move/crates/move-compiler/src/diagnostics/codes.rs +++ b/external-crates/move/crates/move-compiler/src/diagnostics/codes.rs @@ -6,8 +6,6 @@ // Main types //************************************************************************************************** -use crate::shared::FILTER_ALL; - #[derive(PartialEq, Eq, Clone, Copy, Debug, Hash, PartialOrd, Ord)] pub enum Severity { Note = 0, @@ -20,8 +18,6 @@ pub enum Severity { /// A an optional prefix to distinguish between different types of warnings (internal vs. possibly /// multiple externally provided ones). pub type ExternalPrefix = Option<&'static str>; -/// The name for a well-known filter. -pub type WellKnownFilterName = &'static str; /// The ID for a diagnostic, consisting of an optional prefix, a category, and a code. pub type DiagnosticsID = (ExternalPrefix, u8, u8); @@ -55,26 +51,6 @@ pub(crate) trait DiagnosticCode: Copy { } } -#[derive(PartialEq, Eq, Clone, Copy, Debug, PartialOrd, Ord)] -/// Represents a single annotation for a diagnostic filter -pub enum WarningFilter { - /// Filters all warnings - All(ExternalPrefix), - /// Filters all warnings of a specific category. Only known filters have names. - Category { - prefix: ExternalPrefix, - category: u8, - name: Option, - }, - /// Filters a single warning, as defined by codes below. Only known filters have names. - Code { - prefix: ExternalPrefix, - category: u8, - code: u8, - name: Option, - }, -} - //************************************************************************************************** // Categories and Codes //************************************************************************************************** @@ -387,45 +363,6 @@ codes!( ], ); -//************************************************************************************************** -// Warning Filter -//************************************************************************************************** - -impl WarningFilter { - pub fn to_str(self) -> Option<&'static str> { - match self { - Self::All(_) => Some(FILTER_ALL), - Self::Category { name, .. } | Self::Code { name, .. } => name, - } - } - - pub fn code( - prefix: ExternalPrefix, - category: u8, - code: u8, - name: Option, - ) -> Self { - Self::Code { - prefix, - category, - code, - name, - } - } - - pub fn category( - prefix: ExternalPrefix, - category: u8, - name: Option, - ) -> Self { - Self::Category { - prefix, - category, - name, - } - } -} - //************************************************************************************************** // impls //************************************************************************************************** diff --git a/external-crates/move/crates/move-compiler/src/diagnostics/mod.rs b/external-crates/move/crates/move-compiler/src/diagnostics/mod.rs index 3c7cbe46de462..5c64209378d30 100644 --- a/external-crates/move/crates/move-compiler/src/diagnostics/mod.rs +++ b/external-crates/move/crates/move-compiler/src/diagnostics/mod.rs @@ -3,19 +3,12 @@ // SPDX-License-Identifier: Apache-2.0 pub mod codes; +pub mod warning_filters; use crate::{ command_line::COLOR_MODE_ENV_VAR, - diagnostics::codes::{ - Category, DiagnosticCode, DiagnosticInfo, ExternalPrefix, Severity, WarningFilter, - WellKnownFilterName, - }, - shared::{ - ast_debug::AstDebug, - files::{ByteSpan, FileByteSpan, FileId, MappedFiles}, - known_attributes, FILTER_UNUSED_CONST, FILTER_UNUSED_FUNCTION, FILTER_UNUSED_MUT_PARAM, - FILTER_UNUSED_MUT_REF, FILTER_UNUSED_STRUCT_FIELD, FILTER_UNUSED_TYPE_PARAMETER, - }, + diagnostics::codes::{Category, DiagnosticCode, DiagnosticInfo, Severity}, + shared::files::{ByteSpan, FileByteSpan, FileId, MappedFiles}, }; use codespan_reporting::{ self as csr, @@ -37,8 +30,6 @@ use std::{ path::PathBuf, }; -use self::codes::UnusedItem; - //************************************************************************************************** // Types //************************************************************************************************** @@ -84,28 +75,6 @@ struct JsonDiagnostic { msg: String, } -#[derive(PartialEq, Eq, Clone, Debug)] -/// Used to filter out diagnostics, specifically used for warning suppression -pub struct WarningFilters { - filters: BTreeMap, - for_dependency: bool, // if false, the filters are used for source code -} - -#[derive(PartialEq, Eq, Clone, Debug)] -/// Filters split by category and code -enum UnprefixedWarningFilters { - /// Remove all warnings - All, - Specified { - /// Remove all diags of this category with optional known name - categories: BTreeMap>, - /// Remove specific diags with optional known filter name - codes: BTreeMap<(u8, u8), Option>, - }, - /// No filter - Empty, -} - #[derive(PartialEq, Eq, Clone, Debug, PartialOrd, Ord)] enum MigrationChange { AddMut, @@ -776,7 +745,7 @@ macro_rules! ice { macro_rules! ice_assert { ($env: expr, $cond: expr, $loc: expr, $($arg:tt)*) => {{ if !$cond { - $env.add_diag($crate::ice!(( + $env.add_error_diag($crate::ice!(( $loc, format!($($arg)*), ))); @@ -797,180 +766,6 @@ pub fn print_stack_trace() { } } -impl WarningFilters { - pub fn new_for_source() -> Self { - Self { - filters: BTreeMap::new(), - for_dependency: false, - } - } - - pub fn new_for_dependency() -> Self { - Self { - filters: BTreeMap::new(), - for_dependency: true, - } - } - - pub fn is_filtered(&self, diag: &Diagnostic) -> bool { - self.is_filtered_by_info(&diag.info) - } - - fn is_filtered_by_info(&self, info: &DiagnosticInfo) -> bool { - let prefix = info.external_prefix(); - self.filters - .get(&prefix) - .is_some_and(|filters| filters.is_filtered_by_info(info)) - } - - pub fn union(&mut self, other: &Self) { - for (prefix, filters) in &other.filters { - self.filters - .entry(*prefix) - .or_insert_with(UnprefixedWarningFilters::new) - .union(filters); - } - // if there is a dependency code filter on the stack, it means we are filtering dependent - // code and this information must be preserved when stacking up additional filters (which - // involves union of the current filter with the new one) - self.for_dependency = self.for_dependency || other.for_dependency; - } - - pub fn add(&mut self, filter: WarningFilter) { - let (prefix, category, code, name) = match filter { - WarningFilter::All(prefix) => { - self.filters.insert(prefix, UnprefixedWarningFilters::All); - return; - } - WarningFilter::Category { - prefix, - category, - name, - } => (prefix, category, None, name), - WarningFilter::Code { - prefix, - category, - code, - name, - } => (prefix, category, Some(code), name), - }; - self.filters - .entry(prefix) - .or_insert(UnprefixedWarningFilters::Empty) - .add(category, code, name) - } - - pub fn unused_warnings_filter_for_test() -> Self { - Self { - filters: BTreeMap::from([( - None, - UnprefixedWarningFilters::unused_warnings_filter_for_test(), - )]), - for_dependency: false, - } - } - - pub fn for_dependency(&self) -> bool { - self.for_dependency - } -} - -impl UnprefixedWarningFilters { - pub fn new() -> Self { - Self::Empty - } - - fn is_filtered_by_info(&self, info: &DiagnosticInfo) -> bool { - match self { - Self::All => info.severity() <= Severity::Warning, - Self::Specified { categories, codes } => { - info.severity() <= Severity::Warning - && (categories.contains_key(&info.category()) - || codes.contains_key(&(info.category(), info.code()))) - } - Self::Empty => false, - } - } - - pub fn union(&mut self, other: &Self) { - match (self, other) { - // if self is empty, just take the other filter - (s @ Self::Empty, _) => *s = other.clone(), - // if other is empty, or self is ALL, no change to the filter - (_, Self::Empty) => (), - (Self::All, _) => (), - // if other is all, self is now all - (s, Self::All) => *s = Self::All, - // category and code level union - ( - Self::Specified { categories, codes }, - Self::Specified { - categories: other_categories, - codes: other_codes, - }, - ) => { - categories.extend(other_categories); - // remove any codes covered by the category level filter - codes.extend( - other_codes - .iter() - .filter(|((category, _), _)| !categories.contains_key(category)), - ); - } - } - } - - /// Add a specific filter to the filter map. - /// If filter_code is None, then the filter applies to all codes in the filter_category. - fn add( - &mut self, - filter_category: u8, - filter_code: Option, - filter_name: Option, - ) { - match self { - Self::All => (), - Self::Empty => { - *self = Self::Specified { - categories: BTreeMap::new(), - codes: BTreeMap::new(), - }; - self.add(filter_category, filter_code, filter_name) - } - Self::Specified { categories, .. } if categories.contains_key(&filter_category) => (), - Self::Specified { categories, codes } => { - if let Some(filter_code) = filter_code { - codes.insert((filter_category, filter_code), filter_name); - } else { - categories.insert(filter_category, filter_name); - codes.retain(|(category, _), _| *category != filter_category); - } - } - } - } - - pub fn unused_warnings_filter_for_test() -> Self { - let filtered_codes = [ - (UnusedItem::Function, FILTER_UNUSED_FUNCTION), - (UnusedItem::StructField, FILTER_UNUSED_STRUCT_FIELD), - (UnusedItem::FunTypeParam, FILTER_UNUSED_TYPE_PARAMETER), - (UnusedItem::Constant, FILTER_UNUSED_CONST), - (UnusedItem::MutReference, FILTER_UNUSED_MUT_REF), - (UnusedItem::MutParam, FILTER_UNUSED_MUT_PARAM), - ] - .into_iter() - .map(|(item, filter)| { - let info = item.into_info(); - ((info.category(), info.code()), Some(filter)) - }) - .collect(); - Self::Specified { - categories: BTreeMap::new(), - codes: filtered_codes, - } - } -} - impl Migration { pub fn new( mapped_files: MappedFiles, @@ -1207,43 +1002,6 @@ impl From> for Diagnostics { } } -impl AstDebug for WarningFilters { - fn ast_debug(&self, w: &mut crate::shared::ast_debug::AstWriter) { - for (prefix, filters) in &self.filters { - let prefix_str = prefix.unwrap_or(known_attributes::DiagnosticAttribute::ALLOW); - match filters { - UnprefixedWarningFilters::All => w.write(format!( - "#[{}({})]", - prefix_str, - WarningFilter::All(*prefix).to_str().unwrap(), - )), - UnprefixedWarningFilters::Specified { categories, codes } => { - w.write(format!("#[{}(", prefix_str)); - let items = categories - .iter() - .map(|(cat, n)| WarningFilter::Category { - prefix: *prefix, - category: *cat, - name: *n, - }) - .chain(codes.iter().map(|((cat, code), n)| WarningFilter::Code { - prefix: *prefix, - category: *cat, - code: *code, - name: *n, - })); - w.list(items, ",", |w, filter| { - w.write(filter.to_str().unwrap()); - false - }); - w.write(")]") - } - UnprefixedWarningFilters::Empty => (), - } - } - } -} - impl From for DiagnosticInfo { fn from(value: C) -> Self { value.into_info() diff --git a/external-crates/move/crates/move-compiler/src/diagnostics/warning_filters.rs b/external-crates/move/crates/move-compiler/src/diagnostics/warning_filters.rs new file mode 100644 index 0000000000000..556277f5c5238 --- /dev/null +++ b/external-crates/move/crates/move-compiler/src/diagnostics/warning_filters.rs @@ -0,0 +1,442 @@ +use crate::{ + diagnostics::{ + codes::{Category, DiagnosticInfo, ExternalPrefix, Severity, UnusedItem}, + Diagnostic, DiagnosticCode, + }, + shared::{known_attributes, AstDebug}, +}; +use move_symbol_pool::Symbol; +use std::collections::{BTreeMap, BTreeSet}; + +pub const FILTER_ALL: &str = "all"; +pub const FILTER_UNUSED: &str = "unused"; +pub const FILTER_MISSING_PHANTOM: &str = "missing_phantom"; +pub const FILTER_UNUSED_USE: &str = "unused_use"; +pub const FILTER_UNUSED_VARIABLE: &str = "unused_variable"; +pub const FILTER_UNUSED_ASSIGNMENT: &str = "unused_assignment"; +pub const FILTER_UNUSED_TRAILING_SEMI: &str = "unused_trailing_semi"; +pub const FILTER_UNUSED_ATTRIBUTE: &str = "unused_attribute"; +pub const FILTER_UNUSED_TYPE_PARAMETER: &str = "unused_type_parameter"; +pub const FILTER_UNUSED_FUNCTION: &str = "unused_function"; +pub const FILTER_UNUSED_STRUCT_FIELD: &str = "unused_field"; +pub const FILTER_UNUSED_CONST: &str = "unused_const"; +pub const FILTER_DEAD_CODE: &str = "dead_code"; +pub const FILTER_UNUSED_LET_MUT: &str = "unused_let_mut"; +pub const FILTER_UNUSED_MUT_REF: &str = "unused_mut_ref"; +pub const FILTER_UNUSED_MUT_PARAM: &str = "unused_mut_parameter"; +pub const FILTER_IMPLICIT_CONST_COPY: &str = "implicit_const_copy"; +pub const FILTER_DUPLICATE_ALIAS: &str = "duplicate_alias"; +pub const FILTER_DEPRECATED: &str = "deprecated_usage"; +pub const FILTER_IDE_PATH_AUTOCOMPLETE: &str = "ide_path_autocomplete"; +pub const FILTER_IDE_DOT_AUTOCOMPLETE: &str = "ide_dot_autocomplete"; + +macro_rules! known_code_filter { + ($name:ident, $category:ident::$code:ident) => {{ + use crate::diagnostics::codes::*; + ( + move_symbol_pool::Symbol::from($name), + std::collections::BTreeSet::from([ + crate::diagnostics::warning_filters::WarningFilter::Code { + prefix: None, + category: Category::$category as u8, + code: $category::$code as u8, + name: Some($name), + }, + ]), + ) + }}; +} +pub(crate) use known_code_filter; + +//************************************************************************************************** +// Types +//************************************************************************************************** + +/// None for the default 'allow'. +/// Some(prefix) for a custom set of warnings, e.g. 'allow(lint(_))'. +pub type FilterPrefix = Option; +pub type FilterName = Symbol; + +#[derive(PartialEq, Eq, Clone, Debug)] +pub struct WarningFiltersScope { + scopes: Vec, +} + +#[derive(PartialEq, Eq, Clone, Debug)] +/// Used to filter out diagnostics, specifically used for warning suppression +pub struct WarningFilters { + filters: BTreeMap, + for_dependency: bool, // if false, the filters are used for source code +} + +#[derive(PartialEq, Eq, Clone, Debug)] +/// Filters split by category and code +enum UnprefixedWarningFilters { + /// Remove all warnings + All, + Specified { + /// Remove all diags of this category with optional known name + categories: BTreeMap>, + /// Remove specific diags with optional known filter name + codes: BTreeMap<(u8, u8), Option>, + }, + /// No filter + Empty, +} + +#[derive(PartialEq, Eq, Clone, Copy, Debug, PartialOrd, Ord)] +/// Represents a single annotation for a diagnostic filter +pub enum WarningFilter { + /// Filters all warnings + All(ExternalPrefix), + /// Filters all warnings of a specific category. Only known filters have names. + Category { + prefix: ExternalPrefix, + category: u8, + name: Option, + }, + /// Filters a single warning, as defined by codes below. Only known filters have names. + Code { + prefix: ExternalPrefix, + category: u8, + code: u8, + name: Option, + }, +} + +/// The name for a well-known filter. +pub type WellKnownFilterName = &'static str; + +//************************************************************************************************** +// impls +//************************************************************************************************** + +impl WarningFiltersScope { + /// Unsafe and should be used only for internal purposes, such as ide annotations + pub(crate) const EMPTY: &'static Self = &WarningFiltersScope { scopes: vec![] }; + + pub(crate) fn new(top_level_warning_filter: Option) -> Self { + Self { + scopes: top_level_warning_filter.into_iter().collect(), + } + } + + pub fn push(&mut self, filters: WarningFilters) { + self.scopes.push(filters) + } + + pub fn pop(&mut self) { + self.scopes.pop().unwrap(); + } + + pub fn is_filtered(&self, diag: &Diagnostic) -> bool { + self.scopes.iter().any(|filters| filters.is_filtered(diag)) + } + + pub fn is_filtered_for_dependency(&self) -> bool { + self.scopes.iter().any(|filters| filters.for_dependency()) + } +} + +impl WarningFilters { + pub const fn new_for_source() -> Self { + Self { + filters: BTreeMap::new(), + for_dependency: false, + } + } + + pub const fn new_for_dependency() -> Self { + Self { + filters: BTreeMap::new(), + for_dependency: true, + } + } + + pub fn is_filtered(&self, diag: &Diagnostic) -> bool { + self.is_filtered_by_info(&diag.info) + } + + fn is_filtered_by_info(&self, info: &DiagnosticInfo) -> bool { + let prefix = info.external_prefix(); + self.filters + .get(&prefix) + .is_some_and(|filters| filters.is_filtered_by_info(info)) + } + + pub fn union(&mut self, other: &Self) { + for (prefix, filters) in &other.filters { + self.filters + .entry(*prefix) + .or_insert_with(UnprefixedWarningFilters::new) + .union(filters); + } + // if there is a dependency code filter on the stack, it means we are filtering dependent + // code and this information must be preserved when stacking up additional filters (which + // involves union of the current filter with the new one) + self.for_dependency = self.for_dependency || other.for_dependency; + } + + pub fn add(&mut self, filter: WarningFilter) { + let (prefix, category, code, name) = match filter { + WarningFilter::All(prefix) => { + self.filters.insert(prefix, UnprefixedWarningFilters::All); + return; + } + WarningFilter::Category { + prefix, + category, + name, + } => (prefix, category, None, name), + WarningFilter::Code { + prefix, + category, + code, + name, + } => (prefix, category, Some(code), name), + }; + self.filters + .entry(prefix) + .or_insert(UnprefixedWarningFilters::Empty) + .add(category, code, name) + } + + pub fn unused_warnings_filter_for_test() -> Self { + Self { + filters: BTreeMap::from([( + None, + UnprefixedWarningFilters::unused_warnings_filter_for_test(), + )]), + for_dependency: false, + } + } + + pub fn for_dependency(&self) -> bool { + self.for_dependency + } +} + +impl UnprefixedWarningFilters { + pub fn new() -> Self { + Self::Empty + } + + fn is_filtered_by_info(&self, info: &DiagnosticInfo) -> bool { + match self { + Self::All => info.severity() <= Severity::Warning, + Self::Specified { categories, codes } => { + info.severity() <= Severity::Warning + && (categories.contains_key(&info.category()) + || codes.contains_key(&(info.category(), info.code()))) + } + Self::Empty => false, + } + } + + pub fn union(&mut self, other: &Self) { + match (self, other) { + // if self is empty, just take the other filter + (s @ Self::Empty, _) => *s = other.clone(), + // if other is empty, or self is ALL, no change to the filter + (_, Self::Empty) => (), + (Self::All, _) => (), + // if other is all, self is now all + (s, Self::All) => *s = Self::All, + // category and code level union + ( + Self::Specified { categories, codes }, + Self::Specified { + categories: other_categories, + codes: other_codes, + }, + ) => { + categories.extend(other_categories); + // remove any codes covered by the category level filter + codes.extend( + other_codes + .iter() + .filter(|((category, _), _)| !categories.contains_key(category)), + ); + } + } + } + + /// Add a specific filter to the filter map. + /// If filter_code is None, then the filter applies to all codes in the filter_category. + fn add( + &mut self, + filter_category: u8, + filter_code: Option, + filter_name: Option, + ) { + match self { + Self::All => (), + Self::Empty => { + *self = Self::Specified { + categories: BTreeMap::new(), + codes: BTreeMap::new(), + }; + self.add(filter_category, filter_code, filter_name) + } + Self::Specified { categories, .. } if categories.contains_key(&filter_category) => (), + Self::Specified { categories, codes } => { + if let Some(filter_code) = filter_code { + codes.insert((filter_category, filter_code), filter_name); + } else { + categories.insert(filter_category, filter_name); + codes.retain(|(category, _), _| *category != filter_category); + } + } + } + } + + pub fn unused_warnings_filter_for_test() -> Self { + let filtered_codes = [ + (UnusedItem::Function, FILTER_UNUSED_FUNCTION), + (UnusedItem::StructField, FILTER_UNUSED_STRUCT_FIELD), + (UnusedItem::FunTypeParam, FILTER_UNUSED_TYPE_PARAMETER), + (UnusedItem::Constant, FILTER_UNUSED_CONST), + (UnusedItem::MutReference, FILTER_UNUSED_MUT_REF), + (UnusedItem::MutParam, FILTER_UNUSED_MUT_PARAM), + ] + .into_iter() + .map(|(item, filter)| { + let info = item.into_info(); + ((info.category(), info.code()), Some(filter)) + }) + .collect(); + Self::Specified { + categories: BTreeMap::new(), + codes: filtered_codes, + } + } +} + +impl WarningFilter { + pub fn to_str(self) -> Option<&'static str> { + match self { + Self::All(_) => Some(FILTER_ALL), + Self::Category { name, .. } | Self::Code { name, .. } => name, + } + } + + pub fn code( + prefix: ExternalPrefix, + category: u8, + code: u8, + name: Option, + ) -> Self { + Self::Code { + prefix, + category, + code, + name, + } + } + + pub fn category( + prefix: ExternalPrefix, + category: u8, + name: Option, + ) -> Self { + Self::Category { + prefix, + category, + name, + } + } + + pub fn compiler_known_filters() -> BTreeMap> { + BTreeMap::from([ + ( + FILTER_ALL.into(), + BTreeSet::from([WarningFilter::All(None)]), + ), + ( + FILTER_UNUSED.into(), + BTreeSet::from([WarningFilter::Category { + prefix: None, + category: Category::UnusedItem as u8, + name: Some(FILTER_UNUSED), + }]), + ), + known_code_filter!(FILTER_MISSING_PHANTOM, Declarations::InvalidNonPhantomUse), + known_code_filter!(FILTER_UNUSED_USE, UnusedItem::Alias), + known_code_filter!(FILTER_UNUSED_VARIABLE, UnusedItem::Variable), + known_code_filter!(FILTER_UNUSED_ASSIGNMENT, UnusedItem::Assignment), + known_code_filter!(FILTER_UNUSED_TRAILING_SEMI, UnusedItem::TrailingSemi), + known_code_filter!(FILTER_UNUSED_ATTRIBUTE, UnusedItem::Attribute), + known_code_filter!(FILTER_UNUSED_FUNCTION, UnusedItem::Function), + known_code_filter!(FILTER_UNUSED_STRUCT_FIELD, UnusedItem::StructField), + ( + FILTER_UNUSED_TYPE_PARAMETER.into(), + BTreeSet::from([ + WarningFilter::Code { + prefix: None, + category: Category::UnusedItem as u8, + code: UnusedItem::StructTypeParam as u8, + name: Some(FILTER_UNUSED_TYPE_PARAMETER), + }, + WarningFilter::Code { + prefix: None, + category: Category::UnusedItem as u8, + code: UnusedItem::FunTypeParam as u8, + name: Some(FILTER_UNUSED_TYPE_PARAMETER), + }, + ]), + ), + known_code_filter!(FILTER_UNUSED_CONST, UnusedItem::Constant), + known_code_filter!(FILTER_DEAD_CODE, UnusedItem::DeadCode), + known_code_filter!(FILTER_UNUSED_LET_MUT, UnusedItem::MutModifier), + known_code_filter!(FILTER_UNUSED_MUT_REF, UnusedItem::MutReference), + known_code_filter!(FILTER_UNUSED_MUT_PARAM, UnusedItem::MutParam), + known_code_filter!(FILTER_IMPLICIT_CONST_COPY, TypeSafety::ImplicitConstantCopy), + known_code_filter!(FILTER_DUPLICATE_ALIAS, Declarations::DuplicateAlias), + known_code_filter!(FILTER_DEPRECATED, TypeSafety::DeprecatedUsage), + ]) + } + + pub fn ide_known_filters() -> BTreeMap> { + BTreeMap::from([ + known_code_filter!(FILTER_IDE_PATH_AUTOCOMPLETE, IDE::PathAutocomplete), + known_code_filter!(FILTER_IDE_DOT_AUTOCOMPLETE, IDE::DotAutocomplete), + ]) + } +} + +impl AstDebug for WarningFilters { + fn ast_debug(&self, w: &mut crate::shared::ast_debug::AstWriter) { + for (prefix, filters) in &self.filters { + let prefix_str = prefix.unwrap_or(known_attributes::DiagnosticAttribute::ALLOW); + match filters { + UnprefixedWarningFilters::All => w.write(format!( + "#[{}({})]", + prefix_str, + WarningFilter::All(*prefix).to_str().unwrap(), + )), + UnprefixedWarningFilters::Specified { categories, codes } => { + w.write(format!("#[{}(", prefix_str)); + let items = categories + .iter() + .map(|(cat, n)| WarningFilter::Category { + prefix: *prefix, + category: *cat, + name: *n, + }) + .chain(codes.iter().map(|((cat, code), n)| WarningFilter::Code { + prefix: *prefix, + category: *cat, + code: *code, + name: *n, + })); + w.list(items, ",", |w, filter| { + w.write(filter.to_str().unwrap()); + false + }); + w.write(")]") + } + UnprefixedWarningFilters::Empty => (), + } + } + } +} diff --git a/external-crates/move/crates/move-compiler/src/editions/mod.rs b/external-crates/move/crates/move-compiler/src/editions/mod.rs index 5cfea4d99d59d..910a977791337 100644 --- a/external-crates/move/crates/move-compiler/src/editions/mod.rs +++ b/external-crates/move/crates/move-compiler/src/editions/mod.rs @@ -72,13 +72,13 @@ pub const UPGRADE_NOTE: &str = /// Returns true if the feature is present in the given edition. /// Adds an error to the environment. pub fn check_feature_or_error( - env: &mut CompilationEnv, + env: &CompilationEnv, edition: Edition, feature: FeatureGate, loc: Loc, ) -> bool { if !edition.supports(feature) { - env.add_diag(create_feature_error(edition, feature, loc)); + env.add_error_diag(create_feature_error(edition, feature, loc)); false } else { true diff --git a/external-crates/move/crates/move-compiler/src/expansion/ast.rs b/external-crates/move/crates/move-compiler/src/expansion/ast.rs index 1fb7b04aad199..9a04042389e1a 100644 --- a/external-crates/move/crates/move-compiler/src/expansion/ast.rs +++ b/external-crates/move/crates/move-compiler/src/expansion/ast.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - diagnostics::WarningFilters, + diagnostics::warning_filters::WarningFilters, parser::ast::{ self as P, Ability, Ability_, BinOp, BlockLabel, ConstantName, DatatypeName, Field, FunctionName, ModuleName, QuantKind, UnaryOp, Var, VariantName, ENTRY_MODIFIER, @@ -397,7 +397,7 @@ pub enum Exp_ { Pack(ModuleAccess, Option>, Fields), Vector(Loc, Option>, Spanned>), - IfElse(Box, Box, Box), + IfElse(Box, Box, Option>), Match(Box, Spanned>), While(Option, Box, Box), Loop(Option, Box), @@ -414,7 +414,7 @@ pub enum Exp_ { Assign(LValueList, Box), FieldMutate(Box, Box), Mutate(Box, Box), - Abort(Box), + Abort(Option>), Return(Option, Box), Break(Option, Box), Continue(Option), @@ -1571,13 +1571,15 @@ impl AstDebug for Exp_ { w.comma(elems, |w, e| e.ast_debug(w)); w.write("]"); } - E::IfElse(b, t, f) => { + E::IfElse(b, t, f_opt) => { w.write("if ("); b.ast_debug(w); w.write(") "); t.ast_debug(w); - w.write(" else "); - f.ast_debug(w); + if let Some(f) = f_opt { + w.write(" else "); + f.ast_debug(w); + } } E::Match(subject, arms) => { w.write("match ("); @@ -1650,8 +1652,11 @@ impl AstDebug for Exp_ { } E::Abort(e) => { - w.write("abort "); - e.ast_debug(w); + w.write("abort"); + if let Some(e) = e { + w.write(" "); + e.ast_debug(w); + } } E::Return(name, e) => { w.write("return "); diff --git a/external-crates/move/crates/move-compiler/src/expansion/name_validation.rs b/external-crates/move/crates/move-compiler/src/expansion/name_validation.rs index 1991ab59a7f98..dcd9de94e0d93 100644 --- a/external-crates/move/crates/move-compiler/src/expansion/name_validation.rs +++ b/external-crates/move/crates/move-compiler/src/expansion/name_validation.rs @@ -103,7 +103,7 @@ impl NameCase { #[allow(clippy::result_unit_err)] pub fn check_valid_address_name( - env: &mut CompilationEnv, + env: &CompilationEnv, sp!(_, ln_): &P::LeadingNameAccess, ) -> Result<(), ()> { use P::LeadingNameAccess_ as LN; @@ -120,11 +120,7 @@ pub fn valid_local_variable_name(s: Symbol) -> bool { } #[allow(clippy::result_unit_err)] -pub fn check_valid_function_parameter_name( - env: &mut CompilationEnv, - is_macro: Option, - v: &Var, -) { +pub fn check_valid_function_parameter_name(env: &CompilationEnv, is_macro: Option, v: &Var) { const SYNTAX_IDENTIFIER_NOTE: &str = "'macro' parameters start with '$' to indicate that their arguments are not evaluated \ before the macro is expanded, meaning the entire expression is substituted. \ @@ -144,7 +140,7 @@ pub fn check_valid_function_parameter_name( (macro_loc, macro_msg), ); diag.add_note(SYNTAX_IDENTIFIER_NOTE); - env.add_diag(diag); + env.add_error_diag(diag); } } else if is_syntax_identifier { let msg = format!( @@ -153,26 +149,26 @@ pub fn check_valid_function_parameter_name( ); let mut diag = diag!(Declarations::InvalidName, (v.loc(), msg)); diag.add_note(SYNTAX_IDENTIFIER_NOTE); - env.add_diag(diag); + env.add_error_diag(diag); } else if !is_valid_local_variable_name(v.value()) { let msg = format!( "Invalid parameter name '{}'. Local variable names must start with 'a'..'z', '_', \ or be a valid name quoted with backticks (`name`)", v, ); - env.add_diag(diag!(Declarations::InvalidName, (v.loc(), msg))); + env.add_error_diag(diag!(Declarations::InvalidName, (v.loc(), msg))); } let _ = check_restricted_name_all_cases(env, NameCase::Variable, &v.0); } -pub fn check_valid_local_name(env: &mut CompilationEnv, v: &Var) { +pub fn check_valid_local_name(env: &CompilationEnv, v: &Var) { if !is_valid_local_variable_name(v.value()) { let msg = format!( "Invalid local name '{}'. Local variable names must start with 'a'..'z', '_', \ or be a valid name quoted with backticks (`name`)", v, ); - env.add_diag(diag!(Declarations::InvalidName, (v.loc(), msg))); + env.add_error_diag(diag!(Declarations::InvalidName, (v.loc(), msg))); } let _ = check_restricted_name_all_cases(env, NameCase::Variable, &v.0); } @@ -182,7 +178,7 @@ fn is_valid_local_variable_name(s: Symbol) -> bool { } pub fn check_valid_module_member_name( - env: &mut CompilationEnv, + env: &CompilationEnv, member: ModuleMemberKind, name: Name, ) -> Option { @@ -193,7 +189,7 @@ pub fn check_valid_module_member_name( } pub fn check_valid_module_member_alias( - env: &mut CompilationEnv, + env: &CompilationEnv, member: ModuleMemberKind, alias: Name, ) -> Option { @@ -209,7 +205,7 @@ pub fn check_valid_module_member_alias( } fn check_valid_module_member_name_impl( - env: &mut CompilationEnv, + env: &CompilationEnv, member: ModuleMemberKind, n: &Name, case: NameCase, @@ -231,7 +227,7 @@ fn check_valid_module_member_name_impl( n, upper_first_letter(case.name()), ); - env.add_diag(diag!(Declarations::InvalidName, (n.loc, msg))); + env.add_error_diag(diag!(Declarations::InvalidName, (n.loc, msg))); return Err(()); } } @@ -243,7 +239,7 @@ fn check_valid_module_member_name_impl( n, upper_first_letter(case.name()), ); - env.add_diag(diag!(Declarations::InvalidName, (n.loc, msg))); + env.add_error_diag(diag!(Declarations::InvalidName, (n.loc, msg))); return Err(()); } } @@ -272,14 +268,14 @@ fn check_valid_module_member_name_impl( #[allow(clippy::result_unit_err)] pub fn check_valid_type_parameter_name( - env: &mut CompilationEnv, + env: &CompilationEnv, is_macro: Option, n: &Name, ) -> Result<(), ()> { // TODO move these names to a more central place? if n.value == symbol!("_") { let diag = restricted_name_error(NameCase::TypeParameter, n.loc, "_"); - env.add_diag(diag); + env.add_error_diag(diag); return Err(()); } @@ -302,7 +298,7 @@ pub fn check_valid_type_parameter_name( (macro_loc, macro_msg), ); diag.add_note(SYNTAX_IDENTIFIER_NOTE); - env.add_diag(diag); + env.add_error_diag(diag); } else { let next_char = n.value.chars().nth(1).unwrap(); if !next_char.is_ascii_alphabetic() { @@ -314,7 +310,7 @@ pub fn check_valid_type_parameter_name( ); let mut diag = diag!(Declarations::InvalidName, (n.loc, msg)); diag.add_note(SYNTAX_IDENTIFIER_NOTE); - env.add_diag(diag); + env.add_error_diag(diag); } } } else if is_syntax_ident { @@ -325,7 +321,7 @@ pub fn check_valid_type_parameter_name( ); let mut diag = diag!(Declarations::InvalidName, (n.loc, msg)); diag.add_note(SYNTAX_IDENTIFIER_NOTE); - env.add_diag(diag); + env.add_error_diag(diag); } // TODO move these names to a more central place? @@ -353,7 +349,7 @@ pub fn is_valid_datatype_or_constant_name(s: &str) -> bool { // Checks for a restricted name in any decl case // Self and vector are not allowed pub fn check_restricted_name_all_cases( - env: &mut CompilationEnv, + env: &CompilationEnv, case: NameCase, n: &Name, ) -> Result<(), ()> { @@ -373,7 +369,7 @@ pub fn check_restricted_name_all_cases( case.name(), n, ); - env.add_diag(diag!(Declarations::InvalidName, (n.loc, msg))); + env.add_error_diag(diag!(Declarations::InvalidName, (n.loc, msg))); return Err(()); } } @@ -385,7 +381,7 @@ pub fn check_restricted_name_all_cases( if n_str == ModuleName::SELF_NAME || (!can_be_vector && n_str == crate::naming::ast::BuiltinTypeName_::VECTOR) { - env.add_diag(restricted_name_error(case, n.loc, n_str)); + env.add_error_diag(restricted_name_error(case, n.loc, n_str)); Err(()) } else { Ok(()) @@ -393,13 +389,13 @@ pub fn check_restricted_name_all_cases( } fn check_restricted_names( - env: &mut CompilationEnv, + env: &CompilationEnv, case: NameCase, sp!(loc, n_): &Name, all_names: &BTreeSet, ) -> Result<(), ()> { if all_names.contains(n_) { - env.add_diag(restricted_name_error(case, *loc, n_)); + env.add_error_diag(restricted_name_error(case, *loc, n_)); Err(()) } else { Ok(()) diff --git a/external-crates/move/crates/move-compiler/src/expansion/path_expander.rs b/external-crates/move/crates/move-compiler/src/expansion/path_expander.rs index 70e7cde321745..3c5c2ebadd262 100644 --- a/external-crates/move/crates/move-compiler/src/expansion/path_expander.rs +++ b/external-crates/move/crates/move-compiler/src/expansion/path_expander.rs @@ -247,7 +247,7 @@ impl Move2024PathExpander { NR::Address(name.loc, make_address(context, name, name.loc, address)) } Some(AliasEntry::TypeParam(_)) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( name.loc, "ICE alias map misresolved name as type param" ))); @@ -270,7 +270,7 @@ impl Move2024PathExpander { NR::ModuleAccess(name.loc, mident, mem) } AliasEntry::TypeParam(_) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( name.loc, "ICE alias map misresolved name as type param" ))); @@ -318,7 +318,7 @@ impl Move2024PathExpander { .join(","); diag.add_note(format!("Type arguments are used with the enum, as '{mident}::{name}<{tys}>::{variant}'")) } - context.env.add_diag(diag); + context.add_diag(diag); } } } @@ -326,7 +326,7 @@ impl Move2024PathExpander { fn check_is_macro(context: &mut DefnContext, is_macro: &Option, result: &NR) { if let NR::Address(_, _) | NR::ModuleIdent(_, _) = result { if let Some(loc) = is_macro { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::InvalidTypeParameter, ( *loc, @@ -385,7 +385,7 @@ impl Move2024PathExpander { && root.tyargs.is_none() => { if let Some(address) = top_level_address_opt(context, root.name) { - context.env.add_diag(diag!( + context.add_diag(diag!( Migration::NeedsGlobalQualification, (root.name.loc, "Must globally qualify name") )); @@ -467,9 +467,7 @@ impl Move2024PathExpander { is_macro = entry.is_macro; } NR::UnresolvedName(_, _) => { - context - .env - .add_diag(ice!((loc, "ICE access chain expansion failed"))); + context.add_diag(ice!((loc, "ICE access chain expansion failed"))); break; } NR::ResolutionFailure(_, _) => break, @@ -553,7 +551,6 @@ impl PathExpander for Move2024PathExpander { m_res.err_name() ); context - .env .add_diag(diag!(Attributes::AmbiguousAttributeValue, (loc, msg))); return None; } @@ -561,7 +558,7 @@ impl PathExpander for Move2024PathExpander { match result { NR::ModuleIdent(_, mident) => { if context.module_members.get(&mident).is_none() { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundModule, (loc, format!("Unbound module '{}'", mident)) )); @@ -581,11 +578,11 @@ impl PathExpander for Move2024PathExpander { } NR::Address(_, a) => EV::Address(a), result @ NR::ResolutionFailure(_, _) => { - context.env.add_diag(access_chain_resolution_error(result)); + context.add_diag(access_chain_resolution_error(result)); return None; } NR::IncompleteChain(loc) => { - context.env.add_diag(access_chain_incomplete_error(loc)); + context.add_diag(access_chain_incomplete_error(loc)); return None; } } @@ -628,7 +625,7 @@ impl PathExpander for Move2024PathExpander { access, ); diag.add_note("Variants may not be used as types. Use the enum instead."); - context.env.add_diag(diag); + context.add_diag(diag); // We could try to use the member access to try to keep going. return None; } @@ -637,7 +634,7 @@ impl PathExpander for Move2024PathExpander { (access, tyargs, is_macro) } NR::Address(_, _) => { - context.env.add_diag(unexpected_access_error( + context.add_diag(unexpected_access_error( resolved_name.loc(), resolved_name.name(), access, @@ -658,15 +655,15 @@ impl PathExpander for Move2024PathExpander { base_str, realized_str )); } - context.env.add_diag(diag); + context.add_diag(diag); return None; } result @ NR::ResolutionFailure(_, _) => { - context.env.add_diag(access_chain_resolution_error(result)); + context.add_diag(access_chain_resolution_error(result)); return None; } NR::IncompleteChain(loc) => { - context.env.add_diag(access_chain_incomplete_error(loc)); + context.add_diag(access_chain_incomplete_error(loc)); return None; } } @@ -692,7 +689,7 @@ impl PathExpander for Move2024PathExpander { (access, tyargs, is_macro) } NR::Address(_, _) | NR::ModuleIdent(_, _) => { - context.env.add_diag(unexpected_access_error( + context.add_diag(unexpected_access_error( resolved_name.loc(), resolved_name.name(), access, @@ -700,18 +697,18 @@ impl PathExpander for Move2024PathExpander { return None; } result @ NR::ResolutionFailure(_, _) => { - context.env.add_diag(access_chain_resolution_error(result)); + context.add_diag(access_chain_resolution_error(result)); return None; } NR::IncompleteChain(loc) => { - context.env.add_diag(access_chain_incomplete_error(loc)); + context.add_diag(access_chain_incomplete_error(loc)); return None; } } } }, Access::Module => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "ICE module access should never resolve to a module member" ))); @@ -734,11 +731,11 @@ impl PathExpander for Move2024PathExpander { match resolved_name { NR::ModuleIdent(_, mident) => Some(mident), NR::UnresolvedName(_, name) => { - context.env.add_diag(unbound_module_error(name)); + context.add_diag(unbound_module_error(name)); None } NR::Address(_, _) => { - context.env.add_diag(unexpected_access_error( + context.add_diag(unexpected_access_error( resolved_name.loc(), "address".to_string(), Access::Module, @@ -746,7 +743,7 @@ impl PathExpander for Move2024PathExpander { None } NR::ModuleAccess(_, _, _) | NR::Variant(_, _, _) => { - context.env.add_diag(unexpected_access_error( + context.add_diag(unexpected_access_error( resolved_name.loc(), "module member".to_string(), Access::Module, @@ -754,11 +751,11 @@ impl PathExpander for Move2024PathExpander { None } result @ NR::ResolutionFailure(_, _) => { - context.env.add_diag(access_chain_resolution_error(result)); + context.add_diag(access_chain_resolution_error(result)); None } NR::IncompleteChain(loc) => { - context.env.add_diag(access_chain_incomplete_error(loc)); + context.add_diag(access_chain_incomplete_error(loc)); None } } @@ -767,9 +764,7 @@ impl PathExpander for Move2024PathExpander { fn ide_autocomplete_suggestion(&mut self, context: &mut DefnContext, loc: Loc) { if context.env.ide_mode() { let info = self.aliases.get_ide_alias_information(); - context - .env - .add_ide_annotation(loc, IDEAnnotation::PathAutocompleteInfo(Box::new(info))); + context.add_ide_annotation(loc, IDEAnnotation::PathAutocompleteInfo(Box::new(info))); } } } @@ -934,7 +929,7 @@ impl PathExpander for LegacyPathExpander { let sp!(_, mident_) = self.aliases.module_alias_get(&name).unwrap(); let mident = sp(ident_loc, mident_); if context.module_members.get(&mident).is_none() { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundModule, (ident_loc, format!("Unbound module '{}'", mident)) )); @@ -949,7 +944,7 @@ impl PathExpander for LegacyPathExpander { let addr = Address::anonymous(*aloc, *a); let mident = sp(ident_loc, ModuleIdent_::new(addr, ModuleName(n.name))); if context.module_members.get(&mident).is_none() { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundModule, (ident_loc, format!("Unbound module '{}'", mident)) )); @@ -971,7 +966,7 @@ impl PathExpander for LegacyPathExpander { let mident = sp(ident_loc, ModuleIdent_::new(addr, ModuleName(n2.name))); if context.module_members.get(&mident).is_none() { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundModule, (ident_loc, format!("Unbound module '{}'", mident)) )); @@ -1007,7 +1002,7 @@ impl PathExpander for LegacyPathExpander { let tn_: ModuleAccessResult = match (access, ptn_) { (Access::Pattern, _) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "Attempted to expand a variant with the legacy path expander" ))); @@ -1042,7 +1037,7 @@ impl PathExpander for LegacyPathExpander { make_access_result(sp(name.loc, EN::Name(name)), tyargs, is_macro) } (Access::Module, single_entry!(_name, _tyargs, _is_macro)) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "ICE path resolution produced an impossible path for a module" ))); @@ -1056,7 +1051,7 @@ impl PathExpander for LegacyPathExpander { // Error cases (sp!(aloc, LN::AnonymousAddress(_)), [_]) => { let diag = unexpected_address_module_error(loc, *aloc, access); - context.env.add_diag(diag); + context.add_diag(diag); return None; } (sp!(_aloc, LN::GlobalAddress(_)), [_]) => { @@ -1069,7 +1064,7 @@ impl PathExpander for LegacyPathExpander { loc, "Paths that start with `::` are not valid in legacy move.", )); - context.env.add_diag(diag); + context.add_diag(diag); return None; } // Others @@ -1077,7 +1072,7 @@ impl PathExpander for LegacyPathExpander { self.ide_autocomplete_suggestion(context, n1.loc); if let Some(mident) = self.aliases.module_alias_get(n1) { let n2_name = n2.name; - let (tyargs, is_macro) = if !(path.has_tyargs_last()) { + let (tyargs, is_macro) = if !path.has_tyargs_last() { let mut diag = diag!( Syntax::InvalidName, (path.tyargs_loc().unwrap(), "Invalid type argument position") @@ -1085,7 +1080,7 @@ impl PathExpander for LegacyPathExpander { diag.add_note( "Type arguments may only be used with module members", ); - context.env.add_diag(diag); + context.add_diag(diag); (None, path.is_macro()) } else { (path.take_tyargs(), path.is_macro()) @@ -1096,7 +1091,7 @@ impl PathExpander for LegacyPathExpander { is_macro.copied(), ) } else { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundModule, (n1.loc, format!("Unbound module alias '{}'", n1)) )); @@ -1120,7 +1115,7 @@ impl PathExpander for LegacyPathExpander { (path.tyargs_loc().unwrap(), "Invalid type argument position") ); diag.add_note("Type arguments may only be used with module members"); - context.env.add_diag(diag); + context.add_diag(diag); (None, path.is_macro()) } else { (path.take_tyargs(), path.is_macro()) @@ -1129,14 +1124,14 @@ impl PathExpander for LegacyPathExpander { } (_ln, []) => { let diag = ice!((loc, "Found a root path with no additional entries")); - context.env.add_diag(diag); + context.add_diag(diag); return None; } (ln, [_n1, _n2, ..]) => { self.ide_autocomplete_suggestion(context, ln.loc); let mut diag = diag!(Syntax::InvalidName, (loc, "Too many name segments")); diag.add_note("Names may only have 0, 1, or 2 segments separated by '::'"); - context.env.add_diag(diag); + context.add_diag(diag); return None; } } @@ -1157,7 +1152,7 @@ impl PathExpander for LegacyPathExpander { ice_assert!(context.env, single.is_macro.is_none(), loc, "Found macro"); match self.aliases.module_alias_get(&single.name) { None => { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundModule, ( single.name.loc, @@ -1182,9 +1177,7 @@ impl PathExpander for LegacyPathExpander { } // Error cases (_ln, []) => { - context - .env - .add_diag(ice!((loc, "Found path with no path entries"))); + context.add_diag(ice!((loc, "Found path with no path entries"))); None } (ln, [n, m, ..]) => { @@ -1199,7 +1192,7 @@ impl PathExpander for LegacyPathExpander { module: ModuleName(n.name), }; let _ = module_ident(context, sp(ident_loc, pmident_)); - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::NamePositionMismatch, if path.entries.len() < 3 { (m.name.loc, "Unexpected module member access. Expected a module identifier only") @@ -1227,7 +1220,7 @@ impl PathExpander for LegacyPathExpander { info.members.insert((*name, *mident, *member)); } let annotation = IDEAnnotation::PathAutocompleteInfo(Box::new(info)); - context.env.add_ide_annotation(loc, annotation) + context.add_ide_annotation(loc, annotation) } } } diff --git a/external-crates/move/crates/move-compiler/src/expansion/primitive_definers.rs b/external-crates/move/crates/move-compiler/src/expansion/primitive_definers.rs index cc1b92013c358..503a7c656fd51 100644 --- a/external-crates/move/crates/move-compiler/src/expansion/primitive_definers.rs +++ b/external-crates/move/crates/move-compiler/src/expansion/primitive_definers.rs @@ -20,7 +20,7 @@ use super::ast::Attribute_; /// Gather primitive defines from module declarations, erroring on duplicates for a given base /// type or for unknown base types. pub fn modules( - env: &mut CompilationEnv, + env: &CompilationEnv, pre_compiled_lib_opt: Option>, modules: &UniqueMap, ) { @@ -49,7 +49,7 @@ pub fn modules( } fn check_prim_definer( - env: &mut CompilationEnv, + env: &CompilationEnv, allow_shadowing: bool, definers: &mut BTreeMap, mident: ModuleIdent, @@ -61,12 +61,16 @@ fn check_prim_definer( let Some(sp!(attr_loc, attr_)) = defines_prim_attr else { return; }; + let warning_filters = env.top_level_warning_filter_scope(); let Attribute_::Parameterized(_, params) = attr_ else { let msg = format!( "Expected a primitive type parameterization, e.g. '{}()'", DefinesPrimitive::DEFINES_PRIM ); - env.add_diag(diag!(Attributes::InvalidUsage, (*attr_loc, msg))); + env.add_diag( + warning_filters, + diag!(Attributes::InvalidUsage, (*attr_loc, msg)), + ); return; }; if params.len() != 1 { @@ -74,7 +78,10 @@ fn check_prim_definer( "Expected a single primitive type parameterization, e.g. '{}()'", DefinesPrimitive::DEFINES_PRIM ); - env.add_diag(diag!(Attributes::InvalidUsage, (*attr_loc, msg))); + env.add_diag( + warning_filters, + diag!(Attributes::InvalidUsage, (*attr_loc, msg)), + ); return; } let (_, _, sp!(param_loc, param_)) = params.into_iter().next().unwrap(); @@ -83,7 +90,10 @@ fn check_prim_definer( "Expected a primitive type parameterization, e.g. '{}()'", DefinesPrimitive::DEFINES_PRIM ); - env.add_diag(diag!(Attributes::InvalidUsage, (*param_loc, msg))); + env.add_diag( + warning_filters, + diag!(Attributes::InvalidUsage, (*param_loc, msg)), + ); return; }; let Some(prim) = BuiltinTypeName_::resolve(name.value.as_str()) else { @@ -92,18 +102,24 @@ fn check_prim_definer( DefinesPrimitive::DEFINES_PRIM, name, ); - env.add_diag(diag!(Attributes::InvalidUsage, (name.loc, msg))); + env.add_diag( + warning_filters, + diag!(Attributes::InvalidUsage, (name.loc, msg)), + ); return; }; if let Some(prev) = definers.get(&prim) { if !allow_shadowing { let msg = format!("Duplicate definer annotated for primitive type '{}'", prim); - env.add_diag(diag!( - Attributes::InvalidUsage, - (*attr_loc, msg), - (prev.loc, "Previously declared here") - )); + env.add_diag( + warning_filters, + diag!( + Attributes::InvalidUsage, + (*attr_loc, msg), + (prev.loc, "Previously declared here") + ), + ); } } else { definers.insert(prim, mident); diff --git a/external-crates/move/crates/move-compiler/src/expansion/translate.rs b/external-crates/move/crates/move-compiler/src/expansion/translate.rs index 40eb01fe7a98c..baa9438dd8ecf 100644 --- a/external-crates/move/crates/move-compiler/src/expansion/translate.rs +++ b/external-crates/move/crates/move-compiler/src/expansion/translate.rs @@ -4,7 +4,12 @@ use crate::{ diag, - diagnostics::{codes::WarningFilter, Diagnostic, WarningFilters}, + diagnostics::{ + warning_filters::{ + WarningFilter, WarningFilters, WarningFiltersScope, FILTER_ALL, FILTER_UNUSED, + }, + Diagnostic, Diagnostics, + }, editions::{self, Edition, FeatureGate, Flavor}, expansion::{ alias_map_builder::{ @@ -33,6 +38,7 @@ use crate::{ NATIVE_MODIFIER, }, shared::{ + ide::{IDEAnnotation, IDEInfo}, known_attributes::AttributePosition, string_utils::{is_pascal_case, is_upper_snake_case}, unique_map::UniqueMap, @@ -40,8 +46,8 @@ use crate::{ }, FullyCompiledProgram, }; -use move_command_line_common::parser::{parse_u16, parse_u256, parse_u32}; use move_core_types::account_address::AccountAddress; +use move_core_types::parsing::parser::{parse_u16, parse_u256, parse_u32}; use move_ir_types::location::*; use move_proc_macros::growing_stack; use move_symbol_pool::Symbol; @@ -65,10 +71,11 @@ type ModuleMembers = BTreeMap; pub(super) struct DefnContext<'env, 'map> { pub(super) named_address_mapping: Option<&'map NamedAddressMap>, pub(super) module_members: UniqueMap, - pub(super) env: &'env mut CompilationEnv, + pub(super) env: &'env CompilationEnv, pub(super) address_conflicts: BTreeSet, pub(super) current_package: Option, pub(super) is_source_definition: bool, + warning_filters_scope: WarningFiltersScope, } struct Context<'env, 'map> { @@ -82,7 +89,7 @@ struct Context<'env, 'map> { impl<'env, 'map> Context<'env, 'map> { fn new( - compilation_env: &'env mut CompilationEnv, + compilation_env: &'env CompilationEnv, module_members: UniqueMap, address_conflicts: BTreeSet, ) -> Self { @@ -92,6 +99,7 @@ impl<'env, 'map> Context<'env, 'map> { all_filter_alls.add(f); } } + let warning_filters_scope = compilation_env.top_level_warning_filter_scope().clone(); let defn_context = DefnContext { env: compilation_env, named_address_mapping: None, @@ -99,6 +107,7 @@ impl<'env, 'map> Context<'env, 'map> { module_members, current_package: None, is_source_definition: false, + warning_filters_scope, }; Context { defn_context, @@ -108,7 +117,7 @@ impl<'env, 'map> Context<'env, 'map> { } } - fn env(&mut self) -> &mut CompilationEnv { + fn env(&mut self) -> &CompilationEnv { self.defn_context.env } @@ -141,7 +150,7 @@ impl<'env, 'map> Context<'env, 'map> { .unwrap() .push_alias_scope(loc, new_scope); match res { - Err(diag) => self.env().add_diag(*diag), + Err(diag) => self.add_diag(*diag), Ok(unnecessaries) => unnecessary_alias_errors(self, unnecessaries), } } @@ -242,7 +251,7 @@ impl<'env, 'map> Context<'env, 'map> { pub fn spec_deprecated(&mut self, loc: Loc, is_error: bool) { let diag = self.spec_deprecated_diag(loc, is_error); - self.env().add_diag(diag); + self.add_diag(diag); } pub fn spec_deprecated_diag(&mut self, loc: Loc, is_error: bool) -> Diagnostic { @@ -258,6 +267,60 @@ impl<'env, 'map> Context<'env, 'map> { ) ) } + + pub fn add_diag(&self, diag: Diagnostic) { + self.defn_context.add_diag(diag); + } + + #[allow(unused)] + pub fn add_diags(&self, diags: Diagnostics) { + self.defn_context.add_diags(diags); + } + + #[allow(unused)] + pub fn extend_ide_info(&self, info: IDEInfo) { + self.defn_context.extend_ide_info(info); + } + + #[allow(unused)] + pub fn add_ide_annotation(&self, loc: Loc, info: IDEAnnotation) { + self.defn_context.add_ide_annotation(loc, info); + } + + pub fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.defn_context.push_warning_filter_scope(filters) + } + + pub fn pop_warning_filter_scope(&mut self) { + self.defn_context.pop_warning_filter_scope() + } +} + +impl DefnContext<'_, '_> { + pub(super) fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + pub(super) fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + + pub(super) fn extend_ide_info(&self, info: IDEInfo) { + self.env.extend_ide_info(&self.warning_filters_scope, info); + } + + pub(super) fn add_ide_annotation(&self, loc: Loc, info: IDEAnnotation) { + self.env + .add_ide_annotation(&self.warning_filters_scope, loc, info); + } + + pub(super) fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) + } + + pub(super) fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } } fn unnecessary_alias_errors(context: &mut Context, unnecessaries: Vec) { @@ -297,7 +360,7 @@ fn unnecessary_alias_error(context: &mut Context, unnecessary: UnnecessaryAlias) // nothing to point to for the default case diag.add_secondary_label((prev, "The same alias was previously declared here")) } - context.env().add_diag(diag); + context.add_diag(diag); } /// We mark named addresses as having a conflict if there is not a bidirectional mapping between @@ -402,12 +465,13 @@ fn default_aliases(context: &mut Context) -> AliasMapBuilder { //************************************************************************************************** pub fn program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: P::Program, ) -> E::Program { let address_conflicts = compute_address_conflicts(pre_compiled_lib.clone(), &prog); + let warning_filters_scope = compilation_env.top_level_warning_filter_scope().clone(); let mut member_computation_context = DefnContext { env: compilation_env, named_address_mapping: None, @@ -415,6 +479,7 @@ pub fn program( address_conflicts, current_package: None, is_source_definition: false, + warning_filters_scope, }; let module_members = { @@ -477,7 +542,7 @@ pub fn program( // should never fail if let Err(diag) = path_expander.push_alias_scope(Loc::invalid(), aliases) { - context.env().add_diag(*diag); + context.add_diag(*diag); } context.defn_context.named_address_mapping = Some(named_address_map); @@ -511,7 +576,7 @@ pub fn program( let aliases = named_addr_map_to_alias_map_builder(&mut context, named_address_map); // should never fail if let Err(diag) = path_expander.push_alias_scope(Loc::invalid(), aliases) { - context.env().add_diag(*diag); + context.add_diag(*diag); } context.defn_context.named_address_mapping = Some(named_address_map); context.path_expander = Some(Box::new(path_expander)); @@ -611,7 +676,7 @@ fn top_level_address_( // This should have been handled elsewhere in alias resolution for user-provided paths, and // should never occur in compiler-generated ones. P::LeadingNameAccess_::GlobalAddress(name) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "Found an address in top-level address position that uses a global name" ))); @@ -622,7 +687,7 @@ fn top_level_address_( Some(addr) => make_address(context, name, loc, addr), None => { if name_res.is_ok() { - context.env.add_diag(address_without_value_error( + context.add_diag(address_without_value_error( suggest_declaration, loc, &name, @@ -650,7 +715,7 @@ pub(super) fn top_level_address_opt( // This should have been handled elsewhere in alias resolution for user-provided paths, and // should never occur in compiler-generated ones. P::LeadingNameAccess_::GlobalAddress(_) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "Found an address in top-level address position that uses a global name" ))); @@ -730,7 +795,7 @@ fn check_module_address( } else { "Multiple addresses specified for module" }; - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (other_loc, msg), (loc, "Address previously specified here") @@ -750,7 +815,7 @@ fn duplicate_module( let old_mident = module_map.get_key(&mident).unwrap(); let dup_msg = format!("Duplicate definition for module '{}'", mident); let prev_msg = format!("Module previously defined here, with '{}'", old_mident); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (mident.loc, dup_msg), (old_loc, prev_msg), @@ -791,9 +856,7 @@ fn set_module_address( address 'module

::{}''", module_name ); - context - .env() - .add_diag(diag!(Declarations::InvalidModule, (loc, msg))); + context.add_diag(diag!(Declarations::InvalidModule, (loc, msg))); Address::anonymous(loc, NumericalAddress::DEFAULT_ERROR_ADDRESS) } }) @@ -819,9 +882,7 @@ fn module_( let config = context.env().package_config(package_name); warning_filter.union(&config.warning_filter); - context - .env() - .add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); assert!(context.address.is_none()); assert!(address.is_none()); set_module_address(context, &name, module_address); @@ -831,9 +892,7 @@ fn module_( "Invalid module name '{}'. Module names cannot start with '_'", name, ); - context - .env() - .add_diag(diag!(Declarations::InvalidName, (name.loc(), msg))); + context.add_diag(diag!(Declarations::InvalidName, (name.loc(), msg))); } let name_loc = name.0.loc; @@ -906,7 +965,7 @@ fn module_( functions, warning_filter, }; - context.env().pop_warning_filter_scope(); + context.pop_warning_filter_scope(); (current_module, def) } @@ -936,15 +995,13 @@ fn check_visibility_modifiers( let loc = friend_decl.loc; let diag = if edition == Edition::E2024_MIGRATION { for aloc in &friend_decl.attr_locs { - context - .env() - .add_diag(diag!(Migration::RemoveFriend, (*aloc, friend_msg))); + context.add_diag(diag!(Migration::RemoveFriend, (*aloc, friend_msg))); } diag!(Migration::RemoveFriend, (loc, friend_msg)) } else { diag!(Editions::DeprecatedFeature, (loc, friend_msg)) }; - context.env().add_diag(diag); + context.add_diag(diag); } for (_, _, function) in functions { let E::Visibility::Friend(loc) = function.visibility else { @@ -955,7 +1012,7 @@ fn check_visibility_modifiers( } else { diag!(Editions::DeprecatedFeature, (loc, pub_msg)) }; - context.env().add_diag(diag); + context.add_diag(diag); } } @@ -985,7 +1042,7 @@ fn check_visibility_modifiers( ); let package_definition_msg = format!("'{}' visibility used here", E::Visibility::PACKAGE); for (_, _, friend) in friends { - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidVisibilityModifier, (friend.loc, friend_error_msg.clone()), ( @@ -1007,7 +1064,7 @@ fn check_visibility_modifiers( for (_, _, function) in functions { match function.visibility { E::Visibility::Friend(loc) => { - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidVisibilityModifier, (loc, friend_error_msg.clone()), ( @@ -1017,7 +1074,7 @@ fn check_visibility_modifiers( )); } E::Visibility::Package(loc) => { - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidVisibilityModifier, (loc, package_error_msg.clone()), ( @@ -1058,9 +1115,7 @@ fn known_attributes( e.g. #[{ext}({n})]", ext = known_attributes::ExternalAttribute::EXTERNAL ); - context - .env() - .add_diag(diag!(Declarations::UnknownAttribute, (loc, msg))); + context.add_diag(diag!(Declarations::UnknownAttribute, (loc, msg))); None } sp!(loc, E::AttributeName_::Known(n)) => { @@ -1111,9 +1166,7 @@ fn unique_attributes( let msg = format!( "Known attribute '{known}' is not expected in a nested attribute position" ); - context - .env() - .add_diag(diag!(Declarations::InvalidAttribute, (nloc, msg))); + context.add_diag(diag!(Declarations::InvalidAttribute, (nloc, msg))); continue; } @@ -1133,7 +1186,7 @@ fn unique_attributes( "Expected to be used with one of the following: {}", all_expected ); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidAttribute, (nloc, msg), (nloc, expected_msg) @@ -1151,7 +1204,7 @@ fn unique_attributes( } if let Err((_, old_loc)) = attr_map.add(sp(nloc, name_), sp(loc, attr_)) { let msg = format!("Duplicate attribute '{}' attached to the same item", name_); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (loc, msg), (old_loc, "Attribute previously given here"), @@ -1235,9 +1288,7 @@ fn warning_filter(context: &mut Context, attributes: &E::Attributes) -> WarningF DiagnosticAttribute::ALLOW, n ); - context - .env() - .add_diag(diag!(Declarations::InvalidAttribute, (inner_attr_loc, msg))); + context.add_diag(diag!(Declarations::InvalidAttribute, (inner_attr_loc, msg))); (None, vec![*n]) } }; @@ -1262,9 +1313,7 @@ fn warning_filter(context: &mut Context, attributes: &E::Attributes) -> WarningF ) } }; - context - .env() - .add_diag(diag!(Attributes::ValueWarning, (nloc, msg))); + context.add_diag(diag!(Attributes::ValueWarning, (nloc, msg))); continue; }; for f in filters { @@ -1295,9 +1344,7 @@ fn get_allow_attribute_inners<'a>( .to_str() .unwrap(), ); - context - .env() - .add_diag(diag!(Attributes::ValueWarning, (allow_attr.loc, msg))); + context.add_diag(diag!(Attributes::ValueWarning, (allow_attr.loc, msg))); None } } @@ -1322,9 +1369,7 @@ fn prefixed_warning_filters( prefix, n ); - context - .env() - .add_diag(diag!(Attributes::ValueWarning, (*loc, msg))); + context.add_diag(diag!(Attributes::ValueWarning, (*loc, msg))); *n } }) @@ -1534,7 +1579,7 @@ fn use_( otherwise they must internal to declared scope.", P::Visibility::PUBLIC ); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidUseFun, (loc, msg), (vis_loc, vis_msg) @@ -1592,7 +1637,7 @@ fn module_use( P::ModuleUse::Module(alias_opt) => { let mident = module_ident(&mut context.defn_context, in_mident); if !context.defn_context.module_members.contains_key(&mident) { - context.env().add_diag(unbound_module(&mident)); + context.add_diag(unbound_module(&mident)); return; }; let alias = alias_opt @@ -1605,7 +1650,7 @@ fn module_use( let members = match context.defn_context.module_members.get(&mident) { Some(members) => members, None => { - context.env().add_diag(unbound_module(&mident)); + context.add_diag(unbound_module(&mident)); return; } }; @@ -1644,7 +1689,7 @@ fn module_use( "Invalid 'use'. Unbound member '{}' in module '{}'", member, mident ); - context.env().add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundModuleMember, (member.loc, msg), (mloc, format!("Module '{}' declared here", mident)), @@ -1686,7 +1731,7 @@ fn module_use( P::ModuleUse::Partial { .. } => { let mident = module_ident(&mut context.defn_context, in_mident); if !context.defn_context.module_members.contains_key(&mident) { - context.env().add_diag(unbound_module(&mident)); + context.add_diag(unbound_module(&mident)); return; }; add_module_alias!(mident, mident.value.module.0) @@ -1762,7 +1807,7 @@ fn duplicate_module_alias(context: &mut Context, old_loc: Loc, alias: Name) { "Duplicate module alias '{}'. Module aliases must be unique within a given namespace", alias ); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (alias.loc, msg), (old_loc, "Alias previously defined here"), @@ -1774,7 +1819,7 @@ fn duplicate_module_member(context: &mut Context, old_loc: Loc, alias: Name) { "Duplicate module member or alias '{}'. Top level names in a namespace must be unique", alias ); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (alias.loc, msg), (old_loc, "Alias previously defined here"), @@ -1803,7 +1848,7 @@ fn unused_alias(context: &mut Context, _kind: &str, alias: Name) { alias )); } - context.env().add_diag(diag); + context.add_diag(diag); } //************************************************************************************************** @@ -1836,9 +1881,7 @@ fn struct_def_( } = pstruct; let attributes = flatten_attributes(context, AttributePosition::Struct, attributes); let warning_filter = warning_filter(context, &attributes); - context - .env() - .add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let type_parameters = datatype_type_parameters(context, pty_params); context.push_type_parameters(type_parameters.iter().map(|tp| &tp.name)); let abilities = ability_set(context, "modifier", abilities_vec); @@ -1853,7 +1896,7 @@ fn struct_def_( fields, }; context.pop_alias_scope(None); - context.env().pop_warning_filter_scope(); + context.pop_warning_filter_scope(); (name, sdef) } @@ -1874,7 +1917,7 @@ fn struct_fields( for (idx, (field, pt)) in pfields_vec.into_iter().enumerate() { let t = type_(context, pt); if let Err((field, old_loc)) = field_map.add(field, (idx, t)) { - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, ( field.loc(), @@ -1920,9 +1963,7 @@ fn enum_def_( } = penum; let attributes = flatten_attributes(context, AttributePosition::Enum, attributes); let warning_filter = warning_filter(context, &attributes); - context - .env() - .add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let type_parameters = datatype_type_parameters(context, pty_params); context.push_type_parameters(type_parameters.iter().map(|tp| &tp.name)); let abilities = ability_set(context, "modifier", abilities_vec); @@ -1937,7 +1978,7 @@ fn enum_def_( variants, }; context.pop_alias_scope(None); - context.env().pop_warning_filter_scope(); + context.pop_warning_filter_scope(); (name, edef) } @@ -1949,7 +1990,7 @@ fn enum_variants( ) -> UniqueMap { let mut variants = UniqueMap::new(); if pvariants.is_empty() { - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidEnum, (eloc, "An 'enum' must define at least one variant") )) @@ -1962,7 +2003,7 @@ fn enum_variants( "Duplicate definition for variant '{}' in enum '{}'", vname, ename ); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (loc, msg), (old_loc.1, "Variant previously defined here") @@ -2000,7 +2041,7 @@ fn variant_fields( for (idx, (field, pt)) in pfields_vec.into_iter().enumerate() { let t = type_(context, pt); if let Err((field, old_loc)) = field_map.add(field, (idx, t)) { - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, ( field.loc(), @@ -2034,7 +2075,7 @@ fn friend( unique", mident ); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (friend.loc, msg), (old_friend.loc, "Friend previously declared here"), @@ -2096,9 +2137,7 @@ fn constant_( } = pconstant; let attributes = flatten_attributes(context, AttributePosition::Constant, pattributes); let warning_filter = warning_filter(context, &attributes); - context - .env() - .add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let signature = type_(context, psignature); let value = *exp(context, Box::new(pvalue)); let constant = E::Constant { @@ -2109,7 +2148,7 @@ fn constant_( signature, value, }; - context.env().pop_warning_filter_scope(); + context.pop_warning_filter_scope(); (name, constant) } @@ -2147,9 +2186,7 @@ fn function_( } = pfunction; let attributes = flatten_attributes(context, AttributePosition::Function, pattributes); let warning_filter = warning_filter(context, &attributes); - context - .env() - .add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); if let (Some(entry_loc), Some(macro_loc)) = (entry, macro_) { let e_msg = format!( "Invalid function declaration. \ @@ -2157,7 +2194,7 @@ fn function_( are fully-expanded inline during compilation" ); let m_msg = format!("Function declared as '{MACRO_MODIFIER}' here"); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidFunction, (entry_loc, e_msg), (macro_loc, m_msg), @@ -2169,7 +2206,7 @@ fn function_( '{NATIVE_MODIFIER}' functions cannot be '{MACRO_MODIFIER}'", ); let m_msg = format!("Function declared as '{MACRO_MODIFIER}' here"); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidFunction, (*native_loc, n_msg), (macro_loc, m_msg), @@ -2208,7 +2245,7 @@ fn function_( body, }; context.pop_alias_scope(None); - context.env().pop_warning_filter_scope(); + context.pop_warning_filter_scope(); (name, fdef) } @@ -2267,7 +2304,7 @@ fn ability_set(context: &mut Context, case: &str, abilities_vec: Vec) - for ability in abilities_vec { let loc = ability.loc; if let Err(prev_loc) = set.add(ability) { - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (loc, format!("Duplicate '{}' ability {}", ability, case)), (prev_loc, "Ability previously given here") @@ -2501,9 +2538,7 @@ fn exp(context: &mut Context, pe: Box) -> Box { PE::Name(pn) if pn.value.has_tyargs() => { let msg = "Expected name to be followed by a brace-enclosed list of field expressions \ or a parenthesized list of arguments for a function call"; - context - .env() - .add_diag(diag!(NameResolution::NamePositionMismatch, (loc, msg))); + context.add_diag(diag!(NameResolution::NamePositionMismatch, (loc, msg))); EE::UnresolvedError } PE::Name(pn) => { @@ -2548,11 +2583,8 @@ fn exp(context: &mut Context, pe: Box) -> Box { PE::IfElse(pb, pt, pf_opt) => { let eb = exp(context, pb); let et = exp(context, pt); - let ef = match pf_opt { - None => Box::new(sp(loc, EE::Unit { trailing: false })), - Some(pf) => exp(context, pf), - }; - EE::IfElse(eb, et, ef) + let ef_opt = pf_opt.map(|pf| exp(context, pf)); + EE::IfElse(eb, et, ef_opt) } PE::Match(subject, sp!(aloc, arms)) => EE::Match( exp(context, subject), @@ -2603,7 +2635,8 @@ fn exp(context: &mut Context, pe: Box) -> Box { Some(LValue::FieldMutate(edotted)) => EE::FieldMutate(edotted, er), } } - PE::Abort(pe) => EE::Abort(exp(context, pe)), + PE::Abort(None) => EE::Abort(None), + PE::Abort(Some(pe)) => EE::Abort(Some(exp(context, pe))), PE::Return(name_opt, pe_opt) => { let ev = match pe_opt { None => Box::new(sp(loc, EE::Unit { trailing: false })), @@ -2683,7 +2716,7 @@ fn exp(context: &mut Context, pe: Box) -> Box { consider updating your Move edition to '{valid_editions}'" )); diag.add_note(editions::UPGRADE_NOTE); - context.env().add_diag(diag); + context.add_diag(diag); EE::UnresolvedError } else { match exp_dotted(context, Box::new(sp(loc, pdotted_))) { @@ -2775,9 +2808,7 @@ fn exp_cast(context: &mut Context, in_parens: bool, plhs: Box, pty: P::T .check_feature(current_package, FeatureGate::NoParensCast, loc); if supports_feature && ambiguous_cast(&plhs) { let msg = "Potentially ambiguous 'as'. Add parentheses to disambiguate"; - context - .env() - .add_diag(diag!(Syntax::AmbiguousCast, (loc, msg))); + context.add_diag(diag!(Syntax::AmbiguousCast, (loc, msg))); } } EE::Cast(exp(context, plhs), type_(context, pty)) @@ -2807,9 +2838,7 @@ fn maybe_labeled_exp( _ => { let msg = "Invalid label. Labels can only be used on 'while', 'loop', or block '{{}}' \ expressions"; - context - .env() - .add_diag(diag!(Syntax::InvalidLabel, (loc, msg))); + context.add_diag(diag!(Syntax::InvalidLabel, (loc, msg))); E::Exp_::UnresolvedError } }; @@ -2823,7 +2852,7 @@ fn ensure_unique_label( label_opt: Option, ) { if let Some(old_label) = label_opt { - context.env().add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidLabel, (loc, "Multiple labels for a single expression"), (old_label.0.loc, "Label previously given here"), @@ -2869,7 +2898,7 @@ fn move_or_copy_path_(context: &mut Context, case: PathCase, pe: Box) -> if !matches!(&inner.value, E::Exp_::Name(_, _)) { let cmsg = format!("Invalid '{}' of expression", case.case()); let emsg = "Expected a name or path access, e.g. 'x' or 'e.f'"; - context.env().add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidMoveOrCopy, (cloc, cmsg), (inner.loc, emsg) @@ -2940,7 +2969,7 @@ fn check_ellipsis_usage(context: &mut Context, ellipsis_locs: &[Loc]) { diag.add_secondary_label((*loc, "Ellipsis pattern used again here")); } diag.add_note("An ellipsis pattern can only appear once in a constructor's pattern."); - context.env().add_diag(diag); + context.add_diag(diag); } } @@ -2974,7 +3003,7 @@ fn match_pattern(context: &mut Context, sp!(loc, pat_): P::MatchPattern) -> E::M EM::Variant(_, _) | EM::ModuleAccess(_, _) => Some(name), EM::Name(_) if identifier_okay => Some(name), EM::Name(_) => { - context.env().add_diag(diag!( + context.add_diag(diag!( Syntax::UnexpectedToken, ( name.loc, @@ -2999,7 +3028,7 @@ fn match_pattern(context: &mut Context, sp!(loc, pat_): P::MatchPattern) -> E::M } = context.name_access_chain_to_module_access(Access::Pattern, name_chain)?; let name = head_ctor_okay(context, access, identifier_okay)?; if let Some(loc) = is_macro { - context.env().add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidMacro, (loc, "Macros are not allowed in patterns.") )); @@ -3105,14 +3134,14 @@ fn match_pattern(context: &mut Context, sp!(loc, pat_): P::MatchPattern) -> E::M resolve this constant's name", ); } - context.env().add_diag(diag); + context.add_diag(diag); error_pattern!() } else { if let Some(_tys) = pts_opt { let msg = "Invalid type arguments on a pattern variable"; let mut diag = diag!(Declarations::InvalidName, (name.loc, msg)); diag.add_note("Type arguments cannot appear on pattern variables"); - context.env().add_diag(diag); + context.add_diag(diag); } sp(loc, EP::Binder(mutability(context, loc, mut_), Var(name))) } @@ -3122,7 +3151,7 @@ fn match_pattern(context: &mut Context, sp!(loc, pat_): P::MatchPattern) -> E::M let msg = "'mut' can only be used with variable bindings in patterns"; let nmsg = "Expected a valid 'enum' variant, 'struct', or 'const', not a variable"; - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidName, (mloc, msg), (head_ctor_name.loc, nmsg) @@ -3156,7 +3185,7 @@ fn match_pattern(context: &mut Context, sp!(loc, pat_): P::MatchPattern) -> E::M ), PP::At(x, inner) => { if x.is_underscore() { - context.env().add_diag(diag!( + context.add_diag(diag!( NameResolution::InvalidPattern, (x.loc(), "Can't use '_' as a binder in an '@' pattern") )); @@ -3183,42 +3212,42 @@ pub(super) fn value(context: &mut DefnContext, sp!(loc, pvalue_): P::Value) -> O PV::Num(s) if s.ends_with("u8") => match parse_u8(&s[..s.len() - 2]) { Ok((u, _format)) => EV::U8(u), Err(_) => { - context.env.add_diag(num_too_big_error(loc, "'u8'")); + context.add_diag(num_too_big_error(loc, "'u8'")); return None; } }, PV::Num(s) if s.ends_with("u16") => match parse_u16(&s[..s.len() - 3]) { Ok((u, _format)) => EV::U16(u), Err(_) => { - context.env.add_diag(num_too_big_error(loc, "'u16'")); + context.add_diag(num_too_big_error(loc, "'u16'")); return None; } }, PV::Num(s) if s.ends_with("u32") => match parse_u32(&s[..s.len() - 3]) { Ok((u, _format)) => EV::U32(u), Err(_) => { - context.env.add_diag(num_too_big_error(loc, "'u32'")); + context.add_diag(num_too_big_error(loc, "'u32'")); return None; } }, PV::Num(s) if s.ends_with("u64") => match parse_u64(&s[..s.len() - 3]) { Ok((u, _format)) => EV::U64(u), Err(_) => { - context.env.add_diag(num_too_big_error(loc, "'u64'")); + context.add_diag(num_too_big_error(loc, "'u64'")); return None; } }, PV::Num(s) if s.ends_with("u128") => match parse_u128(&s[..s.len() - 4]) { Ok((u, _format)) => EV::U128(u), Err(_) => { - context.env.add_diag(num_too_big_error(loc, "'u128'")); + context.add_diag(num_too_big_error(loc, "'u128'")); return None; } }, PV::Num(s) if s.ends_with("u256") => match parse_u256(&s[..s.len() - 4]) { Ok((u, _format)) => EV::U256(u), Err(_) => { - context.env.add_diag(num_too_big_error(loc, "'u256'")); + context.add_diag(num_too_big_error(loc, "'u256'")); return None; } }, @@ -3226,7 +3255,7 @@ pub(super) fn value(context: &mut DefnContext, sp!(loc, pvalue_): P::Value) -> O PV::Num(s) => match parse_u256(&s) { Ok((u, _format)) => EV::InferredNum(u), Err(_) => { - context.env.add_diag(num_too_big_error( + context.add_diag(num_too_big_error( loc, "the largest possible integer type, 'u256'", )); @@ -3237,14 +3266,14 @@ pub(super) fn value(context: &mut DefnContext, sp!(loc, pvalue_): P::Value) -> O PV::HexString(s) => match hex_string::decode(loc, &s) { Ok(v) => EV::Bytearray(v), Err(e) => { - context.env.add_diag(*e); + context.add_diag(*e); return None; } }, PV::ByteString(s) => match byte_string::decode(loc, &s) { Ok(v) => EV::Bytearray(v), Err(e) => { - context.env.add_diags(e); + context.add_diags(e); return None; } }, @@ -3281,7 +3310,7 @@ fn named_fields( let mut fmap = UniqueMap::new(); for (idx, (field, x)) in xs.into_iter().enumerate() { if let Err((field, old_loc)) = fmap.add(field, (idx, x)) { - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (loc, format!("Invalid {}", case)), ( @@ -3398,7 +3427,7 @@ fn lvalues(context: &mut Context, e: Box) -> Option { L::FieldMutate(dotted) } PE::Index(_, _) => { - context.env().add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidLValue, ( loc, @@ -3425,14 +3454,14 @@ fn assign(context: &mut Context, sp!(loc, e_): P::Exp) -> Option { "If you are trying to unpack a struct, try adding fields, e.g.'{} {{}}'", name )); - context.env().add_diag(diag); + context.add_diag(diag); None } Some(access_result!(_, _ptys_opt, Some(_))) => { let msg = "Unexpected assignment of name with macro invocation"; let mut diag = diag!(Syntax::InvalidLValue, (loc, msg)); diag.add_note("Macro invocation '!' must appear on an invocation"); - context.env().add_diag(diag); + context.add_diag(diag); None } Some(access_result!(sp!(_, name @ M::Name(_)), None, None)) => { @@ -3445,7 +3474,7 @@ fn assign(context: &mut Context, sp!(loc, e_): P::Exp) -> Option { "If you are trying to unpack a struct, try adding fields, e.g.'{} {{}}'", name )); - context.env().add_diag(diag); + context.add_diag(diag); None } Some(access_result!(sp!(loc, M::Variant(_, _)), _tys_opt, _is_macro)) => { @@ -3457,7 +3486,7 @@ fn assign(context: &mut Context, sp!(loc, e_): P::Exp) -> Option { let msg = "Unexpected assignment of variant"; let mut diag = diag!(Syntax::InvalidLValue, (loc, msg)); diag.add_note("If you are trying to unpack an enum variant, use 'match'"); - context.env().add_diag(diag); + context.add_diag(diag); None } else { assert!(context.env().has_errors()); @@ -3507,7 +3536,7 @@ fn assign(context: &mut Context, sp!(loc, e_): P::Exp) -> Option { )) } _ => { - context.env().add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidLValue, ( loc, diff --git a/external-crates/move/crates/move-compiler/src/hlir/ast.rs b/external-crates/move/crates/move-compiler/src/hlir/ast.rs index 36db981f504e8..91db32f3fde74 100644 --- a/external-crates/move/crates/move-compiler/src/hlir/ast.rs +++ b/external-crates/move/crates/move-compiler/src/hlir/ast.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - diagnostics::WarningFilters, + diagnostics::warning_filters::WarningFilters, expansion::ast::{ ability_modifiers_ast_debug, AbilitySet, Attributes, Friend, ModuleIdent, Mutability, TargetKind, diff --git a/external-crates/move/crates/move-compiler/src/hlir/detect_dead_code.rs b/external-crates/move/crates/move-compiler/src/hlir/detect_dead_code.rs index 00e1f6109a5ee..37134f460b777 100644 --- a/external-crates/move/crates/move-compiler/src/hlir/detect_dead_code.rs +++ b/external-crates/move/crates/move-compiler/src/hlir/detect_dead_code.rs @@ -3,6 +3,10 @@ use crate::{ diag, + diagnostics::{ + warning_filters::{WarningFilters, WarningFiltersScope}, + Diagnostic, Diagnostics, + }, expansion::ast::ModuleIdent, ice, naming::ast::{self as N, BlockLabel}, @@ -188,15 +192,37 @@ impl ControlFlow { } struct Context<'env> { - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, + warning_filters_scope: WarningFiltersScope, // loops: Vec, } impl<'env> Context<'env> { - pub fn new(env: &'env mut CompilationEnv) -> Self { + pub fn new(env: &'env CompilationEnv) -> Self { // let loops = vec![]; // Context { env , loops } - Context { env } + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); + Context { + env, + warning_filters_scope, + } + } + + pub fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + pub fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + + pub fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) + } + + pub fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() } fn maybe_report_value_error(&mut self, error: &mut ControlFlow) -> bool { @@ -208,8 +234,7 @@ impl<'env> Context<'env> { reported, } if !*reported => { *reported = true; - self.env - .add_diag(diag!(UnusedItem::DeadCode, (*loc, VALUE_UNREACHABLE_MSG))); + self.add_diag(diag!(UnusedItem::DeadCode, (*loc, VALUE_UNREACHABLE_MSG))); true } CF::Divergent { .. } | CF::None | CF::Possible => false, @@ -225,8 +250,7 @@ impl<'env> Context<'env> { reported, } if !*reported => { *reported = true; - self.env - .add_diag(diag!(UnusedItem::DeadCode, (*loc, DIVERGENT_MSG))); + self.add_diag(diag!(UnusedItem::DeadCode, (*loc, DIVERGENT_MSG))); true } CF::Divergent { .. } | CF::None | CF::Possible => false, @@ -250,7 +274,7 @@ impl<'env> Context<'env> { if let Some(next_loc) = next_stmt { diag.add_secondary_label((*next_loc, UNREACHABLE_MSG)); } - self.env.add_diag(diag); + self.add_diag(diag); true } CF::Divergent { .. } | CF::None | CF::Possible => false, @@ -271,7 +295,7 @@ impl<'env> Context<'env> { reported, } if !*reported => { *reported = true; - self.env.add_diag(diag!( + self.add_diag(diag!( UnusedItem::TrailingSemi, (tail_exp.exp.loc, SEMI_MSG), (*loc, DIVERGENT_MSG), @@ -344,7 +368,7 @@ fn infinite_loop(loc: Loc) -> ControlFlow { // Entry //************************************************************************************************** -pub fn program(compilation_env: &mut CompilationEnv, prog: &T::Program) { +pub fn program(compilation_env: &CompilationEnv, prog: &T::Program) { let mut context = Context::new(compilation_env); modules(&mut context, &prog.modules); } @@ -356,16 +380,14 @@ fn modules(context: &mut Context, modules: &UniqueMap ControlFlow { // ----------------------------------------------------------------------------------------- // control flow statements // ----------------------------------------------------------------------------------------- - E::IfElse(test, conseq, alt) => do_if( + E::IfElse(test, conseq, alt_opt) => do_if( context, - (eloc, test, conseq, alt), + (eloc, test, conseq, alt_opt.as_deref()), /* tail_pos */ true, tail, |context, flow| context.maybe_report_tail_error(flow), @@ -452,9 +472,7 @@ fn tail(context: &mut Context, e: &T::Exp) -> ControlFlow { |context, flow| context.maybe_report_tail_error(flow), ), E::VariantMatch(..) => { - context - .env - .add_diag(ice!((*eloc, "Found variant match in detect_dead_code"))); + context.add_diag(ice!((*eloc, "Found variant match in detect_dead_code"))); CF::None } @@ -508,7 +526,7 @@ fn tail_block(context: &mut Context, seq: &VecDeque) -> Control None => ControlFlow::None, Some(sp!(_, S::Seq(last))) => tail(context, last), Some(sp!(loc, _)) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( *loc, "ICE last sequence item should have been an exp in dead code analysis" ))); @@ -547,7 +565,7 @@ fn value(context: &mut Context, e: &T::Exp) -> ControlFlow { // ----------------------------------------------------------------------------------------- E::IfElse(test, conseq, alt) => do_if( context, - (eloc, test, conseq, alt), + (eloc, test, conseq, alt.as_deref()), /* tail_pos */ false, value, |context, flow| context.maybe_report_value_error(flow), @@ -560,9 +578,7 @@ fn value(context: &mut Context, e: &T::Exp) -> ControlFlow { |context, flow| context.maybe_report_value_error(flow), ), E::VariantMatch(_subject, _, _arms) => { - context - .env - .add_diag(ice!((*eloc, "Found variant match in detect_dead_code"))); + context.add_diag(ice!((*eloc, "Found variant match in detect_dead_code"))); CF::None } E::While(..) => statement(context, e), @@ -618,7 +634,7 @@ fn value(context: &mut Context, e: &T::Exp) -> ControlFlow { context.maybe_report_value_error(&mut flow); } T::ExpListItem::Splat(_, _, _) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( *eloc, "ICE splat exp unsupported by dead code analysis" ))); @@ -686,7 +702,7 @@ fn value_block(context: &mut Context, seq: &VecDeque) -> Contro None => ControlFlow::None, Some(sp!(_, S::Seq(last))) => value(context, last), Some(sp!(loc, _)) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( *loc, "ICE last sequence item should have been an exp in dead code analysis" ))); @@ -726,7 +742,7 @@ fn statement(context: &mut Context, e: &T::Exp) -> ControlFlow { // about the final, total view of them. E::IfElse(test, conseq, alt) => do_if( context, - (eloc, test, conseq, alt), + (eloc, test, conseq, alt.as_deref()), /* tail_pos */ false, statement, |_, _| false, @@ -739,9 +755,7 @@ fn statement(context: &mut Context, e: &T::Exp) -> ControlFlow { |_, _| false, ), E::VariantMatch(_subject, _, _arms) => { - context - .env - .add_diag(ice!((*eloc, "Found variant match in detect_dead_code"))); + context.add_diag(ice!((*eloc, "Found variant match in detect_dead_code"))); CF::None } E::While(name, test, body) => { @@ -826,9 +840,7 @@ fn statement(context: &mut Context, e: &T::Exp) -> ControlFlow { // odds and ends -- things we need to deal with but that don't do much // ----------------------------------------------------------------------------------------- E::Use(_) => { - context - .env - .add_diag(ice!((*eloc, "ICE found unexpanded use"))); + context.add_diag(ice!((*eloc, "ICE found unexpanded use"))); CF::None } } @@ -910,7 +922,7 @@ fn has_trailing_unit(seq: &VecDeque) -> bool { fn do_if( context: &mut Context, - (loc, test, conseq, alt): (&Loc, &T::Exp, &T::Exp, &T::Exp), + (loc, test, conseq, alt_opt): (&Loc, &T::Exp, &T::Exp, Option<&T::Exp>), tail_pos: bool, arm_recur: F1, arm_error: F2, @@ -926,10 +938,15 @@ where }; let conseq_flow = arm_recur(context, conseq); - let alt_flow = arm_recur(context, alt); + let alt_flow = alt_opt + .map(|alt| arm_recur(context, alt)) + .unwrap_or(CF::None); if tail_pos && matches!(conseq.ty, sp!(_, N::Type_::Unit | N::Type_::Anything)) - && matches!(alt.ty, sp!(_, N::Type_::Unit | N::Type_::Anything)) + && matches!( + alt_opt.map(|alt| &alt.ty), + None | Some(sp!(_, N::Type_::Unit | N::Type_::Anything)) + ) { return CF::None; }; diff --git a/external-crates/move/crates/move-compiler/src/hlir/match_compilation.rs b/external-crates/move/crates/move-compiler/src/hlir/match_compilation.rs index 2ffad708230e8..2783fce778fd9 100644 --- a/external-crates/move/crates/move-compiler/src/hlir/match_compilation.rs +++ b/external-crates/move/crates/move-compiler/src/hlir/match_compilation.rs @@ -724,7 +724,7 @@ fn make_arm_unpack( let Some((queue_entries, unpack)) = arm_variant_unpack(context, None, ploc, m, e, tys, v, fs, entry) else { - context.hlir_context.env.add_diag(ice!(( + context.hlir_context.add_diag(ice!(( ploc, "Did not build an arm unpack for a value variant" ))); @@ -750,7 +750,7 @@ fn make_arm_unpack( let Some((queue_entries, unpack)) = arm_struct_unpack(context, None, ploc, m, s, tys, fs, entry) else { - context.hlir_context.env.add_diag(ice!(( + context.hlir_context.add_diag(ice!(( ploc, "Did not build an arm unpack for a value struct" ))); @@ -1277,7 +1277,7 @@ fn make_if_else(test: T::Exp, conseq: T::Exp, alt: T::Exp, result_ty: Type) -> T result_ty, sp( loc, - T::UnannotatedExp_::IfElse(Box::new(test), Box::new(conseq), Box::new(alt)), + T::UnannotatedExp_::IfElse(Box::new(test), Box::new(conseq), Some(Box::new(alt))), ), ) } diff --git a/external-crates/move/crates/move-compiler/src/hlir/translate.rs b/external-crates/move/crates/move-compiler/src/hlir/translate.rs index 26c07e9d20152..1c5441c387c6f 100644 --- a/external-crates/move/crates/move-compiler/src/hlir/translate.rs +++ b/external-crates/move/crates/move-compiler/src/hlir/translate.rs @@ -4,6 +4,10 @@ use crate::{ debug_display, debug_display_verbose, diag, + diagnostics::{ + warning_filters::{WarningFilters, WarningFiltersScope}, + Diagnostic, Diagnostics, + }, editions::{FeatureGate, Flavor}, expansion::ast::{self as E, Fields, ModuleIdent, Mutability, TargetKind}, hlir::{ @@ -127,9 +131,10 @@ pub(super) struct HLIRDebugFlags { } pub(super) struct Context<'env> { - pub env: &'env mut CompilationEnv, + pub env: &'env CompilationEnv, pub info: Arc, pub debug: HLIRDebugFlags, + warning_filters_scope: WarningFiltersScope, current_package: Option, function_locals: UniqueMap, signature: Option, @@ -142,7 +147,7 @@ pub(super) struct Context<'env> { impl<'env> Context<'env> { pub fn new( - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, _pre_compiled_lib_opt: Option>, prog: &T::Program, ) -> Self { @@ -154,8 +159,10 @@ impl<'env> Context<'env> { match_specialization: false, match_work_queue: false, }; + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); Context { env, + warning_filters_scope, info: prog.info.clone(), debug, current_package: None, @@ -168,6 +175,23 @@ impl<'env> Context<'env> { } } + pub fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + pub fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + + pub fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) + } + + pub fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } + pub fn has_empty_locals(&self) -> bool { self.function_locals.is_empty() } @@ -250,7 +274,7 @@ impl<'env> Context<'env> { } impl MatchContext for Context<'_> { - fn env(&mut self) -> &mut CompilationEnv { + fn env(&mut self) -> &CompilationEnv { self.env } @@ -288,7 +312,7 @@ impl MatchContext for Context<'_> { //************************************************************************************************** pub fn program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: T::Program, ) -> H::Program { @@ -337,7 +361,7 @@ fn module( constants: tconstants, } = mdef; context.current_package = package_name; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let structs = tstructs.map(|name, s| struct_def(context, name, s)); let enums = tenums.map(|name, s| enum_def(context, name, s)); @@ -353,7 +377,7 @@ fn module( gen_unused_warnings(context, target_kind, &structs); context.current_package = None; - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); ( module_ident, H::ModuleDefinition { @@ -391,10 +415,10 @@ fn function(context: &mut Context, _name: FunctionName, f: T::Function) -> H::Fu body, } = f; assert!(macro_.is_none(), "ICE macros filtered above"); - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let signature = function_signature(context, signature); let body = function_body(context, &signature, _name, body); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); H::Function { warning_filter, index, @@ -499,7 +523,7 @@ fn constant(context: &mut Context, _name: ConstantName, cdef: T::Constant) -> H: signature: tsignature, value: tvalue, } = cdef; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let signature = base_type(context, tsignature); let eloc = tvalue.exp.loc; let tseq = { @@ -513,7 +537,7 @@ fn constant(context: &mut Context, _name: ConstantName, cdef: T::Constant) -> H: return_type: H::Type_::base(signature.clone()), }; let (locals, body) = function_body_defined(context, &function_signature, loc, tseq); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); H::Constant { warning_filter, index, @@ -542,9 +566,9 @@ fn struct_def( type_parameters, fields, } = sdef; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let fields = struct_fields(context, fields); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); H::StructDefinition { warning_filter, index, @@ -586,13 +610,13 @@ fn enum_def( type_parameters, variants, } = edef; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let variants = variants.map(|_, defn| H::VariantDefinition { index: defn.index, loc: defn.loc, fields: variant_fields(context, defn.fields), }); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); H::EnumDefinition { warning_filter, index, @@ -648,7 +672,7 @@ fn base_type(context: &mut Context, sp!(loc, nb_): N::Type) -> H::BaseType { use N::Type_ as NT; let b_ = match nb_ { NT::Var(_) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, format!( "ICE type inf. var not expanded: {}", @@ -658,7 +682,7 @@ fn base_type(context: &mut Context, sp!(loc, nb_): N::Type) -> H::BaseType { return error_base_type(loc); } NT::Apply(None, _, _) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, format!("ICE kind not expanded: {}", debug_display_verbose!(nb_)) ))); @@ -669,7 +693,7 @@ fn base_type(context: &mut Context, sp!(loc, nb_): N::Type) -> H::BaseType { NT::UnresolvedError => HB::UnresolvedError, NT::Anything => HB::Unreachable, NT::Ref(_, _) | NT::Unit | NT::Fun(_, _) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, format!( "ICE base type constraint failed: {}", @@ -716,7 +740,7 @@ fn type_(context: &mut Context, sp!(loc, ty_): N::Type) -> H::Type { let t_ = match ty_ { NT::Unit => HT::Unit, NT::Apply(None, _, _) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, format!("ICE kind not expanded: {}", debug_display_verbose!(ty_)) ))); @@ -793,11 +817,12 @@ fn tail( // ----------------------------------------------------------------------------------------- // control flow statements // ----------------------------------------------------------------------------------------- - E::IfElse(test, conseq, alt) => { + E::IfElse(test, conseq, alt_opt) => { let cond = value(context, block, Some(&tbool(eloc)), *test); let mut if_block = make_block!(); let conseq_exp = tail(context, &mut if_block, Some(&out_type), *conseq); let mut else_block = make_block!(); + let alt = alt_opt.unwrap_or_else(|| Box::new(typing_unit_exp(eloc))); let alt_exp = tail(context, &mut else_block, Some(&out_type), *alt); let (binders, bound_exp) = make_binders(context, eloc, out_type.clone()); @@ -971,9 +996,7 @@ fn tail( | E::Continue(_) | E::Assign(_, _, _) | E::Mutate(_, _) => { - context - .env - .add_diag(ice!((eloc, "ICE statement mishandled in HLIR lowering"))); + context.add_diag(ice!((eloc, "ICE statement mishandled in HLIR lowering"))); None } @@ -1003,9 +1026,7 @@ fn tail_block( None => None, Some(sp!(_, S::Seq(last))) => tail(context, block, expected_type, *last), Some(sp!(loc, _)) => { - context - .env - .add_diag(ice!((loc, "ICE statement mishandled in HLIR lowering"))); + context.add_diag(ice!((loc, "ICE statement mishandled in HLIR lowering"))); None } } @@ -1067,18 +1088,14 @@ fn value( let [cond_item, code_item]: [TI; 2] = match arguments.exp.value { E::ExpList(arg_list) => arg_list.try_into().unwrap(), _ => { - context - .env - .add_diag(ice!((eloc, "ICE type checking assert failed"))); + context.add_diag(ice!((eloc, "ICE type checking assert failed"))); return error_exp(eloc); } }; let (econd, ecode) = match (cond_item, code_item) { (TI::Single(econd, _), TI::Single(ecode, _)) => (econd, ecode), _ => { - context - .env - .add_diag(ice!((eloc, "ICE type checking assert failed"))); + context.add_diag(ice!((eloc, "ICE type checking assert failed"))); return error_exp(eloc); } }; @@ -1105,18 +1122,14 @@ fn value( let [cond_item, code_item]: [TI; 2] = match arguments.exp.value { E::ExpList(arg_list) => arg_list.try_into().unwrap(), _ => { - context - .env - .add_diag(ice!((eloc, "ICE type checking assert failed"))); + context.add_diag(ice!((eloc, "ICE type checking assert failed"))); return error_exp(eloc); } }; let (econd, ecode) = match (cond_item, code_item) { (TI::Single(econd, _), TI::Single(ecode, _)) => (econd, ecode), _ => { - context - .env - .add_diag(ice!((eloc, "ICE type checking assert failed"))); + context.add_diag(ice!((eloc, "ICE type checking assert failed"))); return error_exp(eloc); } }; @@ -1139,13 +1152,13 @@ fn value( // ----------------------------------------------------------------------------------------- // control flow statements // ----------------------------------------------------------------------------------------- - E::IfElse(test, conseq, alt) => { + E::IfElse(test, conseq, alt_opt) => { let cond = value(context, block, Some(&tbool(eloc)), *test); let mut if_block = make_block!(); let conseq_exp = value(context, &mut if_block, Some(&out_type), *conseq); let mut else_block = make_block!(); + let alt = alt_opt.unwrap_or_else(|| Box::new(typing_unit_exp(eloc))); let alt_exp = value(context, &mut else_block, Some(&out_type), *alt); - let (binders, bound_exp) = make_binders(context, eloc, out_type.clone()); let arms_unreachable = conseq_exp.is_unreachable() && alt_exp.is_unreachable(); @@ -1513,7 +1526,7 @@ fn value( var, } => var, _ => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( eloc, format!( "ICE invalid bind_exp for single value: {}", @@ -1537,7 +1550,7 @@ fn value( | Some(bt @ sp!(_, BT::U128)) | Some(bt @ sp!(_, BT::U256)) => *bt, _ => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( eloc, format!( "ICE typing failed for cast: {} : {}", @@ -1602,9 +1615,7 @@ fn value( | E::Continue(_) | E::Assign(_, _, _) | E::Mutate(_, _) => { - context - .env - .add_diag(ice!((eloc, "ICE statement mishandled in HLIR lowering"))); + context.add_diag(ice!((eloc, "ICE statement mishandled in HLIR lowering"))); error_exp(eloc) } @@ -1612,7 +1623,7 @@ fn value( // odds and ends -- things we need to deal with but that don't do much // ----------------------------------------------------------------------------------------- E::Use(_) => { - context.env.add_diag(ice!((eloc, "ICE unexpanded use"))); + context.add_diag(ice!((eloc, "ICE unexpanded use"))); error_exp(eloc) } E::UnresolvedError => { @@ -1636,15 +1647,11 @@ fn value_block( match last_exp { Some(sp!(_, S::Seq(last))) => value(context, block, expected_type, *last), Some(sp!(loc, _)) => { - context - .env - .add_diag(ice!((loc, "ICE last sequence item should be an exp"))); + context.add_diag(ice!((loc, "ICE last sequence item should be an exp"))); error_exp(loc) } None => { - context - .env - .add_diag(ice!((seq_loc, "ICE empty sequence in value position"))); + context.add_diag(ice!((seq_loc, "ICE empty sequence in value position"))); error_exp(seq_loc) } } @@ -1809,11 +1816,12 @@ fn statement(context: &mut Context, block: &mut Block, e: T::Exp) { // ----------------------------------------------------------------------------------------- // control flow statements // ----------------------------------------------------------------------------------------- - E::IfElse(test, conseq, alt) => { + E::IfElse(test, conseq, alt_opt) => { let cond = value(context, block, Some(&tbool(eloc)), *test); let mut if_block = make_block!(); statement(context, &mut if_block, *conseq); let mut else_block = make_block!(); + let alt = alt_opt.unwrap_or_else(|| Box::new(typing_unit_exp(eloc))); statement(context, &mut else_block, *alt); block.push_back(sp( eloc, @@ -1978,7 +1986,7 @@ fn statement(context: &mut Context, block: &mut Block, e: T::Exp) { // odds and ends -- things we need to deal with but that don't do much // ----------------------------------------------------------------------------------------- E::Use(_) => { - context.env.add_diag(ice!((eloc, "ICE unexpanded use"))); + context.add_diag(ice!((eloc, "ICE unexpanded use"))); } } } @@ -2043,6 +2051,13 @@ fn tunit(loc: Loc) -> H::Type { sp(loc, H::Type_::Unit) } +fn typing_unit_exp(loc: Loc) -> T::Exp { + T::exp( + sp(loc, N::Type_::Unit), + sp(loc, T::UnannotatedExp_::Unit { trailing: false }), + ) +} + fn unit_exp(loc: Loc) -> H::Exp { H::exp( tunit(loc), @@ -2494,7 +2509,7 @@ fn bind_value_in_block( match lvalue { H::LValue_::Var { .. } => (), lv => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( *loc, format!( "ICE tried bind_value for non-var lvalue {}", @@ -2594,9 +2609,7 @@ fn process_value(context: &mut Context, sp!(loc, ev_): E::Value) -> H::Value { use H::Value_ as HV; let v_ = match ev_ { EV::InferredNum(_) => { - context - .env - .add_diag(ice!((loc, "ICE not expanded to value"))); + context.add_diag(ice!((loc, "ICE not expanded to value"))); HV::U64(0) } EV::Address(a) => HV::Address(a.into_addr_bytes()), @@ -2932,7 +2945,7 @@ fn needs_freeze( format!("Expected type: {}", debug_display_verbose!(_expected)) ), ); - context.env.add_diag(diag); + context.add_diag(diag); } Freeze::NotNeeded } @@ -2973,7 +2986,7 @@ fn freeze(context: &mut Context, expected_type: &H::Type, e: H::Exp) -> (Block, "ICE list item has Multple type: {}", debug_display_verbose!(e.ty) ); - context.env.add_diag(ice!((e.ty.loc, msg))); + context.add_diag(ice!((e.ty.loc, msg))); H::SingleType_::base(error_base_type(e.ty.loc)) } }) @@ -3035,9 +3048,7 @@ fn gen_unused_warnings( let is_sui_mode = context.env.package_config(context.current_package).flavor == Flavor::Sui; for (_, sname, sdef) in structs { - context - .env - .add_warning_filter_scope(sdef.warning_filter.clone()); + context.push_warning_filter_scope(sdef.warning_filter.clone()); let has_key = sdef.abilities.has_ability_(Ability_::Key); @@ -3053,13 +3064,11 @@ fn gen_unused_warnings( .is_some_and(|names| names.contains(&f.value())) { let msg = format!("The '{}' field of the '{sname}' type is unused", f.value()); - context - .env - .add_diag(diag!(UnusedItem::StructField, (f.loc(), msg))); + context.add_diag(diag!(UnusedItem::StructField, (f.loc(), msg))); } } } - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } } diff --git a/external-crates/move/crates/move-compiler/src/linters/abort_constant.rs b/external-crates/move/crates/move-compiler/src/linters/abort_constant.rs index cbdde8085887b..49f8399e8324c 100644 --- a/external-crates/move/crates/move-compiler/src/linters/abort_constant.rs +++ b/external-crates/move/crates/move-compiler/src/linters/abort_constant.rs @@ -6,6 +6,7 @@ use move_ir_types::location::Loc; use move_symbol_pool::Symbol; +use crate::diagnostics::warning_filters::{WarningFilters, WarningFiltersScope}; use crate::linters::StyleCodes; use crate::{ cfgir::{ @@ -13,7 +14,7 @@ use crate::{ visitor::{CFGIRVisitorConstructor, CFGIRVisitorContext}, }, diag, - diagnostics::WarningFilters, + diagnostics::{Diagnostic, Diagnostics}, editions::FeatureGate, hlir::ast as H, shared::CompilationEnv, @@ -23,29 +24,46 @@ pub struct AssertAbortNamedConstants; pub struct Context<'a> { package_name: Option, - env: &'a mut CompilationEnv, + env: &'a CompilationEnv, + warning_filters_scope: WarningFiltersScope, } impl CFGIRVisitorConstructor for AssertAbortNamedConstants { type Context<'a> = Context<'a>; - fn context<'a>(env: &'a mut CompilationEnv, program: &G::Program) -> Self::Context<'a> { + fn context<'a>(env: &'a CompilationEnv, program: &G::Program) -> Self::Context<'a> { let package_name = program .modules .iter() .next() .and_then(|(_, _, mdef)| mdef.package_name); - Context { env, package_name } + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); + Context { + env, + warning_filters_scope, + package_name, + } + } +} + +impl Context<'_> { + fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); } } impl CFGIRVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) + fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) } fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() + self.warning_filters_scope.pop() } fn visit_command_custom(&mut self, cmd: &H::Command) -> bool { @@ -76,7 +94,7 @@ impl Context<'_> { diag.add_note("Consider using an error constant with the '#[error]' to allow for a more descriptive error."); } - self.env.add_diag(diag); + self.add_diag(diag); } } } diff --git a/external-crates/move/crates/move-compiler/src/linters/constant_naming.rs b/external-crates/move/crates/move-compiler/src/linters/constant_naming.rs index a5960014b5c29..2fb17efe6fba6 100644 --- a/external-crates/move/crates/move-compiler/src/linters/constant_naming.rs +++ b/external-crates/move/crates/move-compiler/src/linters/constant_naming.rs @@ -6,30 +6,14 @@ //! within a module against this convention. use crate::{ diag, - diagnostics::WarningFilters, expansion::ast::ModuleIdent, linters::StyleCodes, parser::ast::ConstantName, - shared::CompilationEnv, - typing::{ - ast as T, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, - }, + typing::{ast as T, visitor::simple_visitor}, }; -pub struct ConstantNamingVisitor; -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} -impl TypingVisitorConstructor for ConstantNamingVisitor { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { +simple_visitor!( + ConstantNamingVisitor, fn visit_constant_custom( &mut self, _module: ModuleIdent, @@ -41,19 +25,11 @@ impl TypingVisitorContext for Context<'_> { let uid_msg = format!("'{name}' should be ALL_CAPS. Or for error constants, use PascalCase",); let diagnostic = diag!(StyleCodes::ConstantNaming.diag_info(), (cdef.loc, uid_msg)); - self.env.add_diag(diagnostic); + self.add_diag(diagnostic); } false } - - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } -} +); /// Returns `true` if the string is in all caps snake case, including numeric characters. fn is_valid_name(name: &str) -> bool { diff --git a/external-crates/move/crates/move-compiler/src/linters/loop_without_exit.rs b/external-crates/move/crates/move-compiler/src/linters/loop_without_exit.rs index 17935658b908c..3a70cccdba7a9 100644 --- a/external-crates/move/crates/move-compiler/src/linters/loop_without_exit.rs +++ b/external-crates/move/crates/move-compiler/src/linters/loop_without_exit.rs @@ -4,36 +4,14 @@ use super::StyleCodes; use crate::{ diag, - diagnostics::WarningFilters, - shared::CompilationEnv, typing::{ ast::{self as T, UnannotatedExp_}, - visitor::{exp_satisfies, TypingVisitorConstructor, TypingVisitorContext}, + visitor::{exp_satisfies, simple_visitor}, }, }; -pub struct LoopWithoutExit; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for LoopWithoutExit { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + LoopWithoutExit, fn visit_exp_custom(&mut self, exp: &T::Exp) -> bool { // we do not care about `while` since there is another lint that handles reporting // that `while (true)` should be `loop` @@ -57,10 +35,10 @@ impl TypingVisitorContext for Context<'_> { This code will until it errors, e.g. reaching an 'abort' or running out of gas" ) ); - self.env.add_diag(diag); + self.add_diag(diag); false } -} +); fn has_return(e: &T::Exp) -> bool { exp_satisfies(e, |e| matches!(e.exp.value, UnannotatedExp_::Return(_))) diff --git a/external-crates/move/crates/move-compiler/src/linters/meaningless_math_operation.rs b/external-crates/move/crates/move-compiler/src/linters/meaningless_math_operation.rs index de5d816695894..21fc0a9e19bcb 100644 --- a/external-crates/move/crates/move-compiler/src/linters/meaningless_math_operation.rs +++ b/external-crates/move/crates/move-compiler/src/linters/meaningless_math_operation.rs @@ -4,40 +4,17 @@ //! Detects meaningless math operations like `x * 0`, `x << 0`, `x >> 0`, `x * 1`, `x + 0`, `x - 0` //! Aims to reduce code redundancy and improve clarity by flagging operations with no effect. use crate::{ - cfgir::ast as G, - cfgir::visitor::{CFGIRVisitorConstructor, CFGIRVisitorContext}, + cfgir::visitor::simple_visitor, diag, - diagnostics::WarningFilters, hlir::ast::{self as H, Value_}, linters::StyleCodes, parser::ast::BinOp_, - shared::CompilationEnv, }; use move_core_types::u256::U256; use move_ir_types::location::Loc; -pub struct MeaninglessMathOperation; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl CFGIRVisitorConstructor for MeaninglessMathOperation { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &G::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl CFGIRVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + MeaninglessMathOperation, fn visit_exp_custom(&mut self, exp: &H::Exp) -> bool { let H::UnannotatedExp_::BinopExp(lhs, op, rhs) = &exp.exp.value else { return false; @@ -54,7 +31,7 @@ impl CFGIRVisitorContext for Context<'_> { }; if let Some(meaningless_operand) = is_unchanged { let msg = "This operation has no effect and can be removed"; - self.env.add_diag(diag!( + self.add_diag(diag!( StyleCodes::MeaninglessMath.diag_info(), (exp.exp.loc, msg), (meaningless_operand, "Because of this operand"), @@ -70,7 +47,7 @@ impl CFGIRVisitorContext for Context<'_> { }; if let Some(zero_operand) = is_always_zero { let msg = "This operation is always zero and can be replaced with '0'"; - self.env.add_diag(diag!( + self.add_diag(diag!( StyleCodes::MeaninglessMath.diag_info(), (exp.exp.loc, msg), (zero_operand, "Because of this operand"), @@ -84,7 +61,7 @@ impl CFGIRVisitorContext for Context<'_> { }; if let Some(one_operand) = is_always_one { let msg = "This operation is always one and can be replaced with '1'"; - self.env.add_diag(diag!( + self.add_diag(diag!( StyleCodes::MeaninglessMath.diag_info(), (exp.exp.loc, msg), (one_operand, "Because of this operand"), @@ -95,7 +72,7 @@ impl CFGIRVisitorContext for Context<'_> { false } -} +); fn is_zero(exp: &H::Exp) -> Option { let H::UnannotatedExp_::Value(sp!(loc, value_)) = &exp.exp.value else { diff --git a/external-crates/move/crates/move-compiler/src/linters/mod.rs b/external-crates/move/crates/move-compiler/src/linters/mod.rs index 1aaf95a668c27..6f02e2e2cd4a9 100644 --- a/external-crates/move/crates/move-compiler/src/linters/mod.rs +++ b/external-crates/move/crates/move-compiler/src/linters/mod.rs @@ -6,8 +6,10 @@ use move_symbol_pool::Symbol; use crate::{ cfgir::visitor::CFGIRVisitor, command_line::compiler::Visitor, - diagnostics::codes::WarningFilter, - diagnostics::codes::{custom, DiagnosticInfo, Severity}, + diagnostics::{ + codes::{custom, DiagnosticInfo, Severity}, + warning_filters::WarningFilter, + }, typing::visitor::TypingVisitor, }; @@ -18,6 +20,7 @@ pub mod meaningless_math_operation; pub mod redundant_ref_deref; pub mod self_assignment; pub mod unnecessary_conditional; +pub mod unnecessary_unit; pub mod unnecessary_while_loop; pub mod unneeded_return; @@ -152,7 +155,13 @@ lints!( LinterDiagnosticCategory::Complexity, "redundant_ref_deref", "redundant reference/dereference" - ) + ), + ( + UnnecessaryUnit, + LinterDiagnosticCategory::Style, + "unnecessary_unit", + "unit `()` expression can be removed or simplified" + ), ); pub const ALLOW_ATTR_CATEGORY: &str = "lint"; @@ -189,6 +198,7 @@ pub fn linter_visitors(level: LintLevel) -> Vec { unnecessary_conditional::UnnecessaryConditional.visitor(), self_assignment::SelfAssignmentVisitor.visitor(), redundant_ref_deref::RedundantRefDerefVisitor.visitor(), + unnecessary_unit::UnnecessaryUnit.visitor(), ] } } diff --git a/external-crates/move/crates/move-compiler/src/linters/redundant_ref_deref.rs b/external-crates/move/crates/move-compiler/src/linters/redundant_ref_deref.rs index cf1ed98301767..4029661735e0a 100644 --- a/external-crates/move/crates/move-compiler/src/linters/redundant_ref_deref.rs +++ b/external-crates/move/crates/move-compiler/src/linters/redundant_ref_deref.rs @@ -8,41 +8,19 @@ use crate::linters::StyleCodes; use crate::{ diag, - diagnostics::WarningFilters, - shared::CompilationEnv, typing::{ - ast::{self as T, Exp, UnannotatedExp_ as TE}, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, + ast::{Exp, UnannotatedExp_ as TE}, + visitor::simple_visitor, }, }; -pub struct RedundantRefDerefVisitor; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for RedundantRefDerefVisitor { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + RedundantRefDerefVisitor, fn visit_exp_custom(&mut self, exp: &Exp) -> bool { self.check_redundant_ref_deref(exp); false } -} +); impl Context<'_> { // Check for &* pattern @@ -59,7 +37,7 @@ impl Context<'_> { return; } match &deref_exp.exp.value { - TE::TempBorrow(_, inner) if is_simple_deref_ref_exp(inner) => self.env.add_diag(diag!( + TE::TempBorrow(_, inner) if is_simple_deref_ref_exp(inner) => self.add_diag(diag!( StyleCodes::RedundantRefDeref.diag_info(), ( exp.exp.loc, @@ -67,7 +45,7 @@ impl Context<'_> { Remove this borrow-deref and use the expression directly." ) )), - TE::TempBorrow(_, inner) if all_deref_borrow(inner) => self.env.add_diag(diag!( + TE::TempBorrow(_, inner) if all_deref_borrow(inner) => self.add_diag(diag!( StyleCodes::RedundantRefDeref.diag_info(), ( exp.exp.loc, @@ -75,17 +53,15 @@ impl Context<'_> { Use the inner expression directly." ) )), - TE::Borrow(false, _, _) if exp.exp.loc != deref_exp.exp.loc => { - self.env.add_diag(diag!( - StyleCodes::RedundantRefDeref.diag_info(), - ( - exp.exp.loc, - "Redundant borrow-dereference detected. \ + TE::Borrow(false, _, _) if exp.exp.loc != deref_exp.exp.loc => self.add_diag(diag!( + StyleCodes::RedundantRefDeref.diag_info(), + ( + exp.exp.loc, + "Redundant borrow-dereference detected. \ Use the field access directly." - ) - )) - } - TE::Borrow(_, _, _) | TE::BorrowLocal(_, _) => self.env.add_diag(diag!( + ) + )), + TE::Borrow(_, _, _) | TE::BorrowLocal(_, _) => self.add_diag(diag!( StyleCodes::RedundantRefDeref.diag_info(), ( exp.exp.loc, diff --git a/external-crates/move/crates/move-compiler/src/linters/self_assignment.rs b/external-crates/move/crates/move-compiler/src/linters/self_assignment.rs index 808b55ee9ff44..029fdda924026 100644 --- a/external-crates/move/crates/move-compiler/src/linters/self_assignment.rs +++ b/external-crates/move/crates/move-compiler/src/linters/self_assignment.rs @@ -6,40 +6,17 @@ use super::StyleCodes; use crate::{ diag, - diagnostics::WarningFilters, naming::ast::Var, - shared::CompilationEnv, typing::{ ast::{self as T}, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, + visitor::simple_visitor, }, }; use move_ir_types::location::Loc; use move_proc_macros::growing_stack; -pub struct SelfAssignmentVisitor; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for SelfAssignmentVisitor { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + SelfAssignmentVisitor, fn visit_exp_custom(&mut self, e: &T::Exp) -> bool { use T::UnannotatedExp_ as E; match &e.exp.value { @@ -49,7 +26,7 @@ impl TypingVisitorContext for Context<'_> { } false } -} +); fn check_mutate(context: &mut Context, loc: Loc, lhs: &T::Exp, rhs: &T::Exp) { #[growing_stack] @@ -202,7 +179,7 @@ fn exp_list_items(e: &T::Exp) -> Vec<&T::Exp> { fn report_self_assignment(context: &mut Context, case: &str, eloc: Loc, lloc: Loc, rloc: Loc) { let msg = format!("Unnecessary self-{case}. The {case} is redundant and will not change the value"); - context.env.add_diag(diag!( + context.add_diag(diag!( StyleCodes::SelfAssignment.diag_info(), (eloc, msg), (lloc, "This location"), diff --git a/external-crates/move/crates/move-compiler/src/linters/unnecessary_conditional.rs b/external-crates/move/crates/move-compiler/src/linters/unnecessary_conditional.rs index 490b8eb57f33c..f3b14b3ed2725 100644 --- a/external-crates/move/crates/move-compiler/src/linters/unnecessary_conditional.rs +++ b/external-crates/move/crates/move-compiler/src/linters/unnecessary_conditional.rs @@ -8,37 +8,15 @@ use crate::expansion::ast::Value; use crate::linters::StyleCodes; use crate::{ diag, - diagnostics::WarningFilters, expansion::ast::Value_, - shared::CompilationEnv, typing::{ ast::{self as T, SequenceItem_, UnannotatedExp_}, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, + visitor::simple_visitor, }, }; -pub struct UnnecessaryConditional; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for UnnecessaryConditional { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + UnnecessaryConditional, fn visit_exp_custom(&mut self, exp: &T::Exp) -> bool { let UnannotatedExp_::IfElse(_, etrue, efalse) = &exp.exp.value else { return false; @@ -46,7 +24,7 @@ impl TypingVisitorContext for Context<'_> { let Some(vtrue) = extract_value(etrue) else { return false; }; - let Some(vfalse) = extract_value(efalse) else { + let Some(vfalse) = efalse.as_ref().and_then(|efalse| extract_value(efalse)) else { return false; }; @@ -58,7 +36,7 @@ impl TypingVisitorContext for Context<'_> { "Detected an unnecessary conditional expression 'if (cond)'. Consider using \ the condition directly, i.e. '{negation}cond'", ); - self.env.add_diag(diag!( + self.add_diag(diag!( StyleCodes::UnnecessaryConditional.diag_info(), (exp.exp.loc, msg) )); @@ -67,7 +45,7 @@ impl TypingVisitorContext for Context<'_> { let msg = "Detected a redundant conditional expression 'if (..) v else v', where each \ branch results in the same value 'v'. Consider using the value directly"; - self.env.add_diag(diag!( + self.add_diag(diag!( StyleCodes::UnnecessaryConditional.diag_info(), (exp.exp.loc, msg), (vtrue.loc, "This value"), @@ -97,7 +75,7 @@ impl TypingVisitorContext for Context<'_> { // } false } -} +); #[growing_stack] fn extract_value(block: &T::Exp) -> Option<&Value> { diff --git a/external-crates/move/crates/move-compiler/src/linters/unnecessary_unit.rs b/external-crates/move/crates/move-compiler/src/linters/unnecessary_unit.rs new file mode 100644 index 0000000000000..0451c47a83de3 --- /dev/null +++ b/external-crates/move/crates/move-compiler/src/linters/unnecessary_unit.rs @@ -0,0 +1,100 @@ +//! Detects an unnecessary unit expression in a block, sequence, if, or else. + +use crate::{ + diag, ice, + linters::StyleCodes, + typing::{ + ast::{self as T, SequenceItem_, UnannotatedExp_}, + visitor::simple_visitor, + }, +}; +use move_ir_types::location::Loc; + +simple_visitor!( + UnnecessaryUnit, + fn visit_seq_custom(&mut self, loc: Loc, (_, seq_): &T::Sequence) -> bool { + let n = seq_.len(); + match n { + 0 => { + self.add_diag(ice!((loc, "Unexpected empty block without a value"))); + } + 1 => { + // TODO probably too noisy for now, we would need more information about + // blocks were added by the programmer + // self.env.add_diag(diag!( + // StyleCodes::UnnecessaryBlock.diag_info(), + // (e.exp.loc, "Unnecessary block expression '{}')" + // (e.exp.loc, if_msg), + // )); + } + n => { + let last = n - 1; + for (i, stmt) in seq_.iter().enumerate() { + if i != last && is_unit_seq(self, stmt) { + let msg = "Unnecessary unit in sequence '();'. Consider removing"; + self.add_diag(diag!( + StyleCodes::UnnecessaryUnit.diag_info(), + (stmt.loc, msg), + )); + } + } + } + } + false + }, + fn visit_exp_custom(&mut self, e: &T::Exp) -> bool { + use UnannotatedExp_ as TE; + let TE::IfElse(e_cond, e_true, e_false_opt) = &e.exp.value else { + return false; + }; + if is_unit(self, e_true) { + let u_msg = "Unnecessary unit '()'"; + let if_msg = "Consider negating the 'if' condition and simplifying"; + let mut diag = diag!( + StyleCodes::UnnecessaryUnit.diag_info(), + (e_true.exp.loc, u_msg), + (e_cond.exp.loc, if_msg), + ); + diag.add_note("For example 'if (cond) () else e' can be simplified to 'if (!cond) e'"); + self.add_diag(diag); + } + if let Some(e_false) = e_false_opt { + if is_unit(self, e_false) { + let u_msg = "Unnecessary 'else ()'."; + let if_msg = "An 'if' without an 'else' has an implicit 'else ()'. \ + Consider removing the 'else' branch"; + let mut diag = diag!( + StyleCodes::UnnecessaryUnit.diag_info(), + (e_false.exp.loc, u_msg), + (e.exp.loc, if_msg), + ); + diag.add_note( + "For example 'if (cond) e else ()' can be simplified to 'if (cond) e'", + ); + self.add_diag(diag); + } + } + false + } +); + +fn is_unit_seq(context: &mut Context, s: &T::SequenceItem) -> bool { + match &s.value { + SequenceItem_::Seq(e) => is_unit(context, e), + SequenceItem_::Declare(_) | SequenceItem_::Bind(_, _, _) => false, + } +} + +fn is_unit(context: &mut Context, e: &T::Exp) -> bool { + use UnannotatedExp_ as TE; + match &e.exp.value { + TE::Unit { .. } => true, + TE::Annotate(inner, _) => is_unit(context, inner), + TE::Block((_, seq)) if seq.is_empty() => { + context.add_diag(ice!((e.exp.loc, "Unexpected empty block without a value"))); + false + } + TE::Block((_, seq)) if seq.len() == 1 => is_unit_seq(context, &seq[0]), + _ => false, + } +} diff --git a/external-crates/move/crates/move-compiler/src/linters/unnecessary_while_loop.rs b/external-crates/move/crates/move-compiler/src/linters/unnecessary_while_loop.rs index b13449cd9f15b..ce5a0809a2ca9 100644 --- a/external-crates/move/crates/move-compiler/src/linters/unnecessary_while_loop.rs +++ b/external-crates/move/crates/move-compiler/src/linters/unnecessary_while_loop.rs @@ -3,38 +3,16 @@ //! Aims to enhance code readability and adherence to Rust idioms. use crate::{ diag, - diagnostics::WarningFilters, expansion::ast::Value_, linters::StyleCodes, - shared::CompilationEnv, typing::{ ast::{self as T, UnannotatedExp_}, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, + visitor::simple_visitor, }, }; -pub struct WhileTrueToLoop; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for WhileTrueToLoop { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + WhileTrueToLoop, fn visit_exp_custom(&mut self, exp: &T::Exp) -> bool { let UnannotatedExp_::While(_, cond, _) = &exp.exp.value else { return false; @@ -49,8 +27,8 @@ impl TypingVisitorContext for Context<'_> { "A 'loop' is more useful in these cases. Unlike 'while', 'loop' can have a \ 'break' with a value, e.g. 'let x = loop { break 42 };'", ); - self.env.add_diag(diag); + self.add_diag(diag); false } -} +); diff --git a/external-crates/move/crates/move-compiler/src/linters/unneeded_return.rs b/external-crates/move/crates/move-compiler/src/linters/unneeded_return.rs index 5e974615629b5..2a62c7bf94e28 100644 --- a/external-crates/move/crates/move-compiler/src/linters/unneeded_return.rs +++ b/external-crates/move/crates/move-compiler/src/linters/unneeded_return.rs @@ -6,15 +6,10 @@ use crate::{ diag, - diagnostics::WarningFilters, expansion::ast::ModuleIdent, linters::StyleCodes, parser::ast::FunctionName, - shared::CompilationEnv, - typing::{ - ast as T, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, - }, + typing::{ast as T, visitor::simple_visitor}, }; use move_ir_types::location::Loc; @@ -22,21 +17,8 @@ use move_proc_macros::growing_stack; use std::collections::VecDeque; -pub struct UnneededReturnVisitor; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for UnneededReturnVisitor { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { +simple_visitor!( + UnneededReturnVisitor, fn visit_function_custom( &mut self, _module: ModuleIdent, @@ -48,15 +30,7 @@ impl TypingVisitorContext for Context<'_> { }; true } - - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } -} +); /// Recur down the tail (last) position of the sequence, looking for returns that /// might occur in the function's taul/return position.. @@ -75,9 +49,11 @@ fn tail_block(context: &mut Context, seq: &VecDeque) { #[growing_stack] fn tail(context: &mut Context, exp: &T::Exp) { match &exp.exp.value { - T::UnannotatedExp_::IfElse(_, conseq, alt) => { + T::UnannotatedExp_::IfElse(_, conseq, alt_opt) => { tail(context, conseq); - tail(context, alt); + if let Some(alt) = alt_opt { + tail(context, alt); + } } T::UnannotatedExp_::Match(_, arms) => { for arm in &arms.value { @@ -209,7 +185,7 @@ fn returnable_value(context: &mut Context, exp: &T::Exp) -> bool { } fn report_unneeded_return(context: &mut Context, loc: Loc) { - context.env.add_diag(diag!( + context.add_diag(diag!( StyleCodes::UnneededReturn.diag_info(), ( loc, diff --git a/external-crates/move/crates/move-compiler/src/naming/ast.rs b/external-crates/move/crates/move-compiler/src/naming/ast.rs index 52ef5c5aae880..aca723dc12584 100644 --- a/external-crates/move/crates/move-compiler/src/naming/ast.rs +++ b/external-crates/move/crates/move-compiler/src/naming/ast.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - diagnostics::WarningFilters, + diagnostics::warning_filters::WarningFilters, expansion::ast::{ ability_constraints_ast_debug, ability_modifiers_ast_debug, AbilitySet, Attributes, DottedUsage, Fields, Friend, ImplicitUseFunCandidate, ModuleIdent, Mutability, TargetKind, @@ -426,7 +426,7 @@ pub enum Exp_ { Builtin(BuiltinFunction, Spanned>), Vector(Loc, Option, Spanned>), - IfElse(Box, Box, Box), + IfElse(Box, Box, Option>), Match(Box, Spanned>), While(BlockLabel, Box, Box), Loop(BlockLabel, Box), @@ -1704,13 +1704,15 @@ impl AstDebug for Exp_ { }); w.write("}"); } - E::IfElse(b, t, f) => { + E::IfElse(b, t, f_opt) => { w.write("if ("); b.ast_debug(w); w.write(") "); t.ast_debug(w); - w.write(" else "); - f.ast_debug(w); + if let Some(f) = f_opt { + w.write(" else "); + f.ast_debug(w); + } } E::Match(subject, arms) => { w.write("match ("); diff --git a/external-crates/move/crates/move-compiler/src/naming/fake_natives.rs b/external-crates/move/crates/move-compiler/src/naming/fake_natives.rs index 814174cb72e6b..56574e6e7408b 100644 --- a/external-crates/move/crates/move-compiler/src/naming/fake_natives.rs +++ b/external-crates/move/crates/move-compiler/src/naming/fake_natives.rs @@ -19,7 +19,7 @@ use move_symbol_pool::symbol; /// verify fake native attribute usage usage pub fn function( - env: &mut CompilationEnv, + env: &CompilationEnv, module: ModuleIdent, function_name: FunctionName, function: &N::Function, @@ -45,7 +45,7 @@ pub fn function( (loc, attr_msg), (function_name.loc(), name_msg), ); - env.add_diag(diag); + env.add_error_diag(diag); } match &function.body.value { N::FunctionBody_::Native => (), @@ -55,7 +55,7 @@ pub fn function( NativeAttribute::BYTECODE_INSTRUCTION ); let diag = diag!(Attributes::InvalidBytecodeInst, (loc, attr_msg)); - env.add_diag(diag); + env.add_error_diag(diag); } } } diff --git a/external-crates/move/crates/move-compiler/src/naming/resolve_use_funs.rs b/external-crates/move/crates/move-compiler/src/naming/resolve_use_funs.rs index 2248154e6fcdf..5262a67858ec5 100644 --- a/external-crates/move/crates/move-compiler/src/naming/resolve_use_funs.rs +++ b/external-crates/move/crates/move-compiler/src/naming/resolve_use_funs.rs @@ -1,6 +1,8 @@ // Copyright (c) The Move Contributors // SPDX-License-Identifier: Apache-2.0 +use crate::diagnostics::warning_filters::{WarningFilters, WarningFiltersScope}; +use crate::diagnostics::{Diagnostic, Diagnostics}; use crate::expansion::ast::{self as E, ModuleIdent}; use crate::naming::ast as N; use crate::parser::ast::{FunctionName, Visibility}; @@ -15,30 +17,50 @@ use move_proc_macros::growing_stack; //************************************************************************************************** struct Context<'env, 'info> { - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, info: &'info NamingProgramInfo, + warning_filters_scope: WarningFiltersScope, current_module: ModuleIdent, } impl<'env, 'info> Context<'env, 'info> { fn new( - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, info: &'info NamingProgramInfo, current_module: ModuleIdent, ) -> Self { + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); Self { env, info, + warning_filters_scope, current_module, } } + + pub fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + pub fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + + pub fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) + } + + pub fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } } //************************************************************************************************** // Entry //************************************************************************************************** -pub fn program(env: &mut CompilationEnv, info: &mut NamingProgramInfo, inner: &mut N::Program_) { +pub fn program(env: &CompilationEnv, info: &mut NamingProgramInfo, inner: &mut N::Program_) { let N::Program_ { modules } = inner; for (mident, mdef) in modules.key_cloned_iter_mut() { module(env, info, mident, mdef); @@ -59,15 +81,13 @@ pub fn program(env: &mut CompilationEnv, info: &mut NamingProgramInfo, inner: &m } fn module( - env: &mut CompilationEnv, + env: &CompilationEnv, info: &mut NamingProgramInfo, mident: ModuleIdent, mdef: &mut N::ModuleDefinition, ) { let context = &mut Context::new(env, info, mident); - context - .env - .add_warning_filter_scope(mdef.warning_filter.clone()); + context.push_warning_filter_scope(mdef.warning_filter.clone()); use_funs(context, &mut mdef.use_funs); for (_, _, c) in &mut mdef.constants { constant(context, c); @@ -75,25 +95,21 @@ fn module( for (_, _, f) in &mut mdef.functions { function(context, f); } - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } fn constant(context: &mut Context, c: &mut N::Constant) { - context - .env - .add_warning_filter_scope(c.warning_filter.clone()); + context.push_warning_filter_scope(c.warning_filter.clone()); exp(context, &mut c.value); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } fn function(context: &mut Context, function: &mut N::Function) { - context - .env - .add_warning_filter_scope(function.warning_filter.clone()); + context.push_warning_filter_scope(function.warning_filter.clone()); if let N::FunctionBody_::Defined(seq) = &mut function.body.value { sequence(context, seq) } - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } //************************************************************************************************** @@ -128,7 +144,7 @@ fn use_funs(context: &mut Context, uf: &mut N::UseFuns) { if let Some(public_loc) = nuf.is_public { let defining_module = match &tn.value { N::TypeName_::Multiple(_) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( tn.loc, "ICE tuple type should not be reachable from use fun" ))); @@ -155,7 +171,7 @@ fn use_funs(context: &mut Context, uf: &mut N::UseFuns) { format!("The type '{tn}' is defined here"), )) } - context.env.add_diag(diag); + context.add_diag(diag); nuf.is_public = None; } } @@ -173,7 +189,7 @@ fn use_funs(context: &mut Context, uf: &mut N::UseFuns) { } None => format!("But '{m}::{f}' takes no arguments"), }; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidUseFun, (loc, msg), (first_ty_loc, first_tn_msg), @@ -199,9 +215,7 @@ fn use_funs(context: &mut Context, uf: &mut N::UseFuns) { let Some((target_f, tn)) = is_valid_method(context, &target_m, target_f) else { if matches!(ekind, E::ImplicitUseFunKind::UseAlias { used: false }) { let msg = format!("Unused 'use' of alias '{}'. Consider removing it", method); - context - .env - .add_diag(diag!(UnusedItem::Alias, (method.loc, msg),)) + context.add_diag(diag!(UnusedItem::Alias, (method.loc, msg),)) } continue; }; @@ -238,7 +252,7 @@ fn use_funs(context: &mut Context, uf: &mut N::UseFuns) { argument is a type defined in the same module" } }; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (nuf_loc, msg), (prev, "Previously declared here"), @@ -335,10 +349,12 @@ fn exp(context: &mut Context, sp!(_, e_): &mut N::Exp) { use_fun_color: _, body: e, }) => exp(context, e), - N::Exp_::IfElse(econd, et, ef) => { + N::Exp_::IfElse(econd, et, ef_opt) => { exp(context, econd); exp(context, et); - exp(context, ef); + if let Some(ef) = ef_opt { + exp(context, ef); + } } N::Exp_::Match(esubject, arms) => { exp(context, esubject); diff --git a/external-crates/move/crates/move-compiler/src/naming/syntax_methods.rs b/external-crates/move/crates/move-compiler/src/naming/syntax_methods.rs index d5c19587d6af8..38a46961b5625 100644 --- a/external-crates/move/crates/move-compiler/src/naming/syntax_methods.rs +++ b/external-crates/move/crates/move-compiler/src/naming/syntax_methods.rs @@ -68,7 +68,7 @@ pub(super) fn resolve_syntax_attributes( if let Some(macro_loc) = function.macro_ { let msg = "Syntax attributes may not appear on macro definitions"; let fn_msg = "This function is a macro"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (attr_loc, msg), (macro_loc, fn_msg) @@ -125,7 +125,7 @@ fn prev_syntax_defn_error( kind_string, type_name ); let prev_msg = "This syntax method was previously defined here."; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidAttribute, (sloc, msg), (prev.loc, prev_msg) @@ -147,7 +147,7 @@ fn attr_param_from_str(loc: Loc, name_str: &str) -> Option /// Resolve the mapping for a function + syntax attribute into a SyntaxMethodKind. fn resolve_syntax_method_prekind( - env: &mut CompilationEnv, + env: &CompilationEnv, sp!(loc, attr_): &Attribute, ) -> Option> { match attr_ { @@ -157,7 +157,7 @@ fn resolve_syntax_method_prekind( SyntaxAttribute::SYNTAX, SyntaxAttribute::INDEX ); - env.add_diag(diag!(Declarations::InvalidAttribute, (*loc, msg))); + env.add_error_diag(diag!(Declarations::InvalidAttribute, (*loc, msg))); None } Attribute_::Parameterized(_, inner) => { @@ -169,7 +169,7 @@ fn resolve_syntax_method_prekind( if let Some(prev_kind) = kinds.replace(kind) { let msg = "Repeated syntax method identifier".to_string(); let prev = "Initially defined here".to_string(); - env.add_diag(diag!( + env.add_error_diag(diag!( Declarations::InvalidAttribute, (loc, msg), (prev_kind.loc, prev) @@ -177,7 +177,7 @@ fn resolve_syntax_method_prekind( } } else { let msg = format!("Invalid syntax method identifier '{}'", name); - env.add_diag(diag!(Declarations::InvalidAttribute, (loc, msg))); + env.add_error_diag(diag!(Declarations::InvalidAttribute, (loc, msg))); } } Attribute_::Assigned(n, _) => { @@ -186,7 +186,7 @@ fn resolve_syntax_method_prekind( SyntaxAttribute::SYNTAX, n ); - env.add_diag(diag!(Declarations::InvalidAttribute, (loc, msg))); + env.add_error_diag(diag!(Declarations::InvalidAttribute, (loc, msg))); } Attribute_::Parameterized(n, _) => { let msg = format!( @@ -194,7 +194,7 @@ fn resolve_syntax_method_prekind( SyntaxAttribute::SYNTAX, n ); - env.add_diag(diag!(Declarations::InvalidAttribute, (loc, msg))); + env.add_error_diag(diag!(Declarations::InvalidAttribute, (loc, msg))); } } } @@ -221,7 +221,7 @@ fn determine_valid_kind( SyntaxAttribute::INDEX, ); let ty_msg = "This type is not a reference"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidAttribute, (sloc, msg), (subject_type.loc, ty_msg) @@ -231,9 +231,7 @@ fn determine_valid_kind( } SyntaxMethodPrekind_::For => { let msg = "'for' syntax attributes are not currently supported"; - context - .env - .add_diag(diag!(Declarations::InvalidAttribute, (sloc, msg),)); + context.add_diag(diag!(Declarations::InvalidAttribute, (sloc, msg),)); return None; } // SyntaxMethodPrekind_::For => match mut_opt { @@ -243,9 +241,7 @@ fn determine_valid_kind( // }, SyntaxMethodPrekind_::Assign => { let msg = "'assign' syntax attributes are not currently supported"; - context - .env - .add_diag(diag!(Declarations::InvalidAttribute, (sloc, msg),)); + context.add_diag(diag!(Declarations::InvalidAttribute, (sloc, msg),)); return None; } // SyntaxMethodPrekind_::Assign => match mut_opt { // Some((loc, true)) => SK::Assign, @@ -255,7 +251,7 @@ fn determine_valid_kind( // SyntaxAttribute::INDEX, // ); // let ty_msg = "This type is not a reference"; - // context.env.add_diag(diag!( + // context.add_diag(diag!( // Declarations::InvalidAttribute, // (sloc, msg), // (*ty_loc, msg) @@ -287,7 +283,7 @@ fn determine_subject_type_name( let msg = "Invalid type for syntax method definition"; let mut diag = diag!(Declarations::InvalidSyntaxMethod, (*loc, msg)); diag.add_note("Syntax methods may only be defined for single base types"); - context.env.add_diag(diag); + context.add_diag(diag); return None; } N::TypeName_::Builtin(sp!(_, bt_)) => context.env.primitive_definer(*bt_), @@ -296,7 +292,7 @@ fn determine_subject_type_name( if Some(cur_module) == defining_module { Some(type_name.clone()) } else { - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (*ann_loc, INVALID_MODULE_MSG), (*loc, INVALID_MODULE_TYPE_MSG) @@ -314,7 +310,7 @@ fn determine_subject_type_name( "But '{}' was declared as a type parameter here", param.user_specified_name ); - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (*ann_loc, msg), (*loc, tmsg) @@ -329,7 +325,7 @@ fn determine_subject_type_name( let msg = "Invalid type for syntax method definition"; let mut diag = diag!(Declarations::InvalidSyntaxMethod, (*loc, msg)); diag.add_note("Syntax methods may only be defined for single base types"); - context.env.add_diag(diag); + context.add_diag(diag); None } } @@ -349,7 +345,7 @@ fn valid_return_type( let msg = format!("Invalid {} annotation", SyntaxAttribute::SYNTAX); let tmsg = "This syntax method must return an immutable reference to match its subject type"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (*loc, msg), (ty.loc, tmsg), @@ -362,7 +358,7 @@ fn valid_return_type( SyntaxAttribute::SYNTAX ); let tmsg = "This is not an immutable reference"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (*loc, msg), (ty.loc, tmsg), @@ -379,7 +375,7 @@ fn valid_return_type( let msg = format!("Invalid {} annotation", SyntaxAttribute::SYNTAX); let tmsg = "This syntax method must return a mutable reference to match its subject type"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (*loc, msg), (ty.loc, tmsg), @@ -392,7 +388,7 @@ fn valid_return_type( SyntaxAttribute::SYNTAX ); let tmsg = "This is not a mutable reference"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (*loc, msg), (ty.loc, tmsg), @@ -426,7 +422,7 @@ fn valid_index_return_type( SyntaxAttribute::SYNTAX ); let tmsg = "Unit type occurs as the return type for this function"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (*kind_loc, msg), (*tloc, tmsg) @@ -439,7 +435,7 @@ fn valid_index_return_type( SyntaxAttribute::SYNTAX ); let tmsg = "But a function type appears in this return type"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (*kind_loc, msg), (*tloc, tmsg) @@ -466,9 +462,7 @@ fn get_first_type( "Invalid attribute. {} is only valid if the function takes at least one parameter", SyntaxAttribute::SYNTAX ); - context - .env - .add_diag(diag!(Declarations::InvalidAttribute, (*attr_loc, msg))); + context.add_diag(diag!(Declarations::InvalidAttribute, (*attr_loc, msg))); None } } diff --git a/external-crates/move/crates/move-compiler/src/naming/translate.rs b/external-crates/move/crates/move-compiler/src/naming/translate.rs index c69876a26eb4a..a4c7f6e08b98f 100644 --- a/external-crates/move/crates/move-compiler/src/naming/translate.rs +++ b/external-crates/move/crates/move-compiler/src/naming/translate.rs @@ -7,7 +7,8 @@ use crate::{ diagnostics::{ self, codes::{self, *}, - Diagnostic, + warning_filters::{WarningFilters, WarningFiltersScope}, + Diagnostic, Diagnostics, }, editions::FeatureGate, expansion::{ @@ -24,7 +25,10 @@ use crate::{ self as P, ConstantName, DatatypeName, Field, FunctionName, VariantName, MACRO_MODIFIER, }, shared::{ - ide::EllipsisMatchEntries, program_info::NamingProgramInfo, unique_map::UniqueMap, *, + ide::{EllipsisMatchEntries, IDEAnnotation, IDEInfo}, + program_info::NamingProgramInfo, + unique_map::UniqueMap, + *, }, FullyCompiledProgram, }; @@ -519,13 +523,19 @@ pub fn build_member_map( // Context //************************************************************************************************** -pub(super) struct Context<'env> { - pub env: &'env mut CompilationEnv, - current_module: Option, +pub(super) struct OuterContext { /// Nothing should ever use this directly, and should instead go through /// `resolve_module_access` because it preserves source location information. module_members: ModuleMembers, + unscoped_types: BTreeMap, +} + +pub(super) struct Context<'outer, 'env> { + pub env: &'env CompilationEnv, + outer: &'outer OuterContext, unscoped_types: Vec>, + warning_filters_scope: WarningFiltersScope, + current_module: ModuleIdent, local_scopes: Vec>, local_count: BTreeMap, used_locals: BTreeSet, @@ -546,7 +556,7 @@ macro_rules! resolve_from_module_access { Some(other) => { let diag = make_invalid_module_member_kind_error($context, &$expected_kind, $loc, &other); - $context.env.add_diag(diag); + $context.add_diag(diag); None } None => { @@ -557,26 +567,43 @@ macro_rules! resolve_from_module_access { }}; } -impl<'env> Context<'env> { +impl OuterContext { fn new( - compilation_env: &'env mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: &E::Program, ) -> Self { use ResolvedType as RT; let module_members = build_member_map(compilation_env, pre_compiled_lib, prog); - let unscoped_types = vec![N::BuiltinTypeName_::all_names() + let unscoped_types = N::BuiltinTypeName_::all_names() .iter() .map(|s| { let b_ = RT::BuiltinType(N::BuiltinTypeName_::resolve(s.as_str()).unwrap()); (*s, b_) }) - .collect()]; + .collect(); Self { - env: compilation_env, - current_module: None, module_members, unscoped_types, + } + } +} + +impl<'outer, 'env> Context<'outer, 'env> { + fn new( + env: &'env CompilationEnv, + outer: &'outer OuterContext, + current_package: Option, + current_module: ModuleIdent, + ) -> Self { + let unscoped_types = vec![outer.unscoped_types.clone()]; + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); + Self { + env, + outer, + unscoped_types, + current_module, + warning_filters_scope, local_scopes: vec![], local_count: BTreeMap::new(), nominal_blocks: vec![], @@ -584,15 +611,42 @@ impl<'env> Context<'env> { used_locals: BTreeSet::new(), used_fun_tparams: BTreeSet::new(), translating_fun: false, - current_package: None, + current_package, } } + pub fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + pub fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + + #[allow(unused)] + pub fn extend_ide_info(&self, info: IDEInfo) { + self.env.extend_ide_info(&self.warning_filters_scope, info); + } + + pub fn add_ide_annotation(&self, loc: Loc, info: IDEAnnotation) { + self.env + .add_ide_annotation(&self.warning_filters_scope, loc, info); + } + + pub fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) + } + + pub fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } + fn valid_module(&mut self, m: &ModuleIdent) -> bool { - let resolved = self.module_members.contains_key(m); + let resolved = self.outer.module_members.contains_key(m); if !resolved { let diag = make_unbound_module_error(self, m.loc, m); - self.env.add_diag(diag); + self.add_diag(diag); } resolved } @@ -606,14 +660,14 @@ impl<'env> Context<'env> { m: &ModuleIdent, n: &Name, ) -> Option { - let Some(members) = self.module_members.get(m) else { - self.env.add_diag(make_unbound_module_error(self, m.loc, m)); + let Some(members) = self.outer.module_members.get(m) else { + self.add_diag(make_unbound_module_error(self, m.loc, m)); return None; }; let result = members.get(&n.value); if result.is_none() { let diag = make_unbound_module_member_error(self, kind, loc, *m, n.value); - self.env.add_diag(diag); + self.env.add_diag(&self.warning_filters_scope, diag); } result.map(|inner| { let mut result = inner.clone(); @@ -690,7 +744,7 @@ impl<'env> Context<'env> { } EN::Name(n) => match self.resolve_unscoped_type(nloc, n) { ResolvedType::ModuleType(mut module_type) => { - module_type.set_name_info(self.current_module.unwrap(), nloc); + module_type.set_name_info(self.current_module, nloc); ResolvedType::ModuleType(module_type) } ty @ (ResolvedType::BuiltinType(_) @@ -725,7 +779,7 @@ impl<'env> Context<'env> { { None => { let diag = make_unbound_local_name_error(self, &ErrorKind::Type, loc, n); - self.env.add_diag(diag); + self.add_diag(diag); ResolvedType::Unbound } Some(rn) => rn.clone(), @@ -748,7 +802,7 @@ impl<'env> Context<'env> { Some(c @ ResolvedModuleMember::Constant(_)) => { let diag = make_invalid_module_member_kind_error(self, &EK::Function, mloc, &c); - self.env.add_diag(diag); + self.add_diag(diag); ResolvedCallSubject::Unbound } Some(e @ ResolvedModuleMember::Datatype(ResolvedDatatype::Enum(_))) => { @@ -758,7 +812,7 @@ impl<'env> Context<'env> { "Enums cannot be instantiated directly. \ Instead, you must instantiate a variant.", ); - self.env.add_diag(diag); + self.add_diag(diag); ResolvedCallSubject::Unbound } None => { @@ -776,7 +830,7 @@ impl<'env> Context<'env> { _ => { let diag = make_unbound_local_name_error(self, &EK::Function, n.loc, n.value); - self.env.add_diag(diag); + self.add_diag(diag); return ResolvedCallSubject::Unbound; } }; @@ -815,7 +869,7 @@ impl<'env> Context<'env> { ResolvedCallSubject::Constructor(Box::new(variant)) } Some(ResolvedConstructor::Struct(struct_)) => { - self.env.add_diag(diag!( + self.add_diag(diag!( NameResolution::NamePositionMismatch, (sloc, "Invalid constructor. Expected an enum".to_string()), ( @@ -862,7 +916,7 @@ impl<'env> Context<'env> { _ => { let diag = make_unbound_local_name_error(self, &ErrorKind::Function, n.loc, n); - self.env.add_diag(diag); + self.add_diag(diag); return ResolvedUseFunFunction::Unbound; } }; @@ -872,11 +926,11 @@ impl<'env> Context<'env> { } EA::Name(n) => { let diag = make_unbound_local_name_error(self, &ErrorKind::Function, n.loc, n); - self.env.add_diag(diag); + self.add_diag(diag); ResolvedUseFunFunction::Unbound } EA::Variant(_, _) => { - self.env.add_diag(ice!(( + self.add_diag(ice!(( mloc, "Tried to resolve variant '{}' as a function in current scope" ),)); @@ -924,7 +978,7 @@ impl<'env> Context<'env> { } else { format!("Invalid {}. Expected a struct name", verb) }; - self.env.add_diag(diag!( + self.add_diag(diag!( NameResolution::NamePositionMismatch, (ma.loc, msg), (rtloc, rtmsg) @@ -944,7 +998,7 @@ impl<'env> Context<'env> { "Invalid {verb}. Variant '{variant_name}' is not part of this enum", ); let decl_msg = format!("Enum '{}' is defined here", enum_type.name); - self.env.add_diag(diag!( + self.add_diag(diag!( NameResolution::UnboundVariant, (ma.loc, primary_msg), (enum_type.decl_loc, decl_msg), @@ -960,7 +1014,7 @@ impl<'env> Context<'env> { Some(ResolvedConstructor::Variant(Box::new(variant_info))) } (EN::Name(_) | EN::ModuleAccess(_, _), D::Enum(enum_type)) => { - self.env.add_diag(diag!( + self.add_diag(diag!( NameResolution::NamePositionMismatch, (ma.loc, format!("Invalid {verb}. Expected a struct")), ( @@ -971,7 +1025,7 @@ impl<'env> Context<'env> { None } (EN::Variant(sp!(sloc, _), _), D::Struct(stype)) => { - self.env.add_diag(diag!( + self.add_diag(diag!( NameResolution::NamePositionMismatch, (*sloc, format!("Invalid {verb}. Expected an enum")), (stype.decl_loc, format!("But '{}' is an struct", stype.name)) @@ -1003,7 +1057,7 @@ impl<'env> Context<'env> { } } E::ModuleAccess_::Name(name) => { - self.env.add_diag(diag!( + self.add_diag(diag!( NameResolution::UnboundUnscopedName, (mloc, format!("Unbound constant '{}'", name)), )); @@ -1065,7 +1119,7 @@ impl<'env> Context<'env> { } ResolvedModuleMember::Constant(_) => (), }; - self.env.add_diag(diag); + self.add_diag(diag); ResolvedTerm::Unbound } }, @@ -1098,13 +1152,12 @@ impl<'env> Context<'env> { fn resolve_pattern_term(&mut self, sp!(mloc, ma_): E::ModuleAccess) -> ResolvedPatternTerm { match ma_ { E::ModuleAccess_::Name(name) if !is_constant_name(&name.value) => { - self.env - .add_diag(ice!((mloc, "This should have become a binder"))); + self.add_diag(ice!((mloc, "This should have become a binder"))); ResolvedPatternTerm::Unbound } // If we have a name, try to resolve it in our module. E::ModuleAccess_::Name(name) => { - let mut mident = self.current_module.unwrap(); + let mut mident = self.current_module; mident.loc = mloc; let maccess = sp(mloc, E::ModuleAccess_::ModuleAccess(mident, name)); self.resolve_pattern_term(maccess) @@ -1181,7 +1234,7 @@ impl<'env> Context<'env> { match id_opt { None => { let msg = variable_msg(name); - self.env.add_diag(diag!(code, (loc, msg))); + self.add_diag(diag!(code, (loc, msg))); None } Some(id) => { @@ -1202,7 +1255,7 @@ impl<'env> Context<'env> { match id_opt { None => { let msg = format!("Failed to resolve pattern binder {}", name); - self.env.add_diag(ice!((loc, msg))); + self.add_diag(ice!((loc, msg))); None } Some(id) => { @@ -1242,8 +1295,7 @@ impl<'env> Context<'env> { "Invalid usage of '{usage}'. \ '{usage}' can only be used inside a loop body or lambda", ); - self.env - .add_diag(diag!(TypeSafety::InvalidLoopControl, (loc, msg))); + self.add_diag(diag!(TypeSafety::InvalidLoopControl, (loc, msg))); return None; }; if *name_type == NominalBlockType::LambdaLoopCapture { @@ -1281,7 +1333,7 @@ impl<'env> Context<'env> { }; diag.add_secondary_label((loop_label.label.loc, msg)); } - self.env.add_diag(diag); + self.add_diag(diag); return None; } Some(*label) @@ -1337,13 +1389,12 @@ impl<'env> Context<'env> { not 'continue'." } }); - self.env.add_diag(diag); + self.add_diag(diag); None } } else { let msg = format!("Invalid {usage}. Unbound label '{name}"); - self.env - .add_diag(diag!(NameResolution::UnboundLabel, (loc, msg))); + self.add_diag(diag!(NameResolution::UnboundLabel, (loc, msg))); None } } @@ -1549,7 +1600,7 @@ fn make_unbound_module_member_error( name: impl std::fmt::Display, ) -> Diagnostic { let expected = expected.as_ref().unwrap_or(&ErrorKind::ModuleMember); - let same_module = context.current_module == Some(mident); + let same_module = context.current_module == mident; let (prefix, postfix) = if same_module { ("", " in current scope".to_string()) } else { @@ -1572,7 +1623,7 @@ fn make_invalid_module_member_kind_error( actual: &ResolvedModuleMember, ) -> Diagnostic { let mident = actual.mident(); - let same_module = context.current_module == Some(mident); + let same_module = context.current_module == mident; let (prefix, postfix) = if same_module { ("", " in current scope".to_string()) } else { @@ -1604,13 +1655,13 @@ fn arity_string(arity: usize) -> &'static str { //************************************************************************************************** pub fn program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: E::Program, ) -> N::Program { - let mut context = Context::new(compilation_env, pre_compiled_lib.clone(), &prog); + let outer_context = OuterContext::new(compilation_env, pre_compiled_lib.clone(), &prog); let E::Program { modules: emodules } = prog; - let modules = modules(&mut context, emodules); + let modules = modules(compilation_env, &outer_context, emodules); let mut inner = N::Program_ { modules }; let mut info = NamingProgramInfo::new(pre_compiled_lib, &inner); super::resolve_use_funs::program(compilation_env, &mut info, &mut inner); @@ -1618,18 +1669,19 @@ pub fn program( } fn modules( - context: &mut Context, + env: &CompilationEnv, + outer: &OuterContext, modules: UniqueMap, ) -> UniqueMap { - modules.map(|ident, mdef| module(context, ident, mdef)) + modules.map(|ident, mdef| module(env, outer, ident, mdef)) } fn module( - context: &mut Context, + env: &CompilationEnv, + outer: &OuterContext, ident: ModuleIdent, mdef: E::ModuleDefinition, ) -> N::ModuleDefinition { - context.current_module = Some(ident); let E::ModuleDefinition { loc, warning_filter, @@ -1643,8 +1695,8 @@ fn module( functions: efunctions, constants: econstants, } = mdef; - context.current_package = package_name; - context.env.add_warning_filter_scope(warning_filter.clone()); + let context = &mut Context::new(env, outer, package_name, ident); + context.push_warning_filter_scope(warning_filter.clone()); let mut use_funs = use_funs(context, euse_funs); let mut syntax_methods = N::SyntaxMethods::new(); let friends = efriends.filter_map(|mident, f| friend(context, mident, f)); @@ -1699,8 +1751,7 @@ fn module( if has_macro { mark_all_use_funs_as_used(&mut use_funs); } - context.env.pop_warning_filter_scope(); - context.current_package = None; + context.pop_warning_filter_scope(); N::ModuleDefinition { loc, warning_filter, @@ -1736,7 +1787,7 @@ fn use_funs(context: &mut Context, eufs: E::UseFuns) -> N::UseFuns { let nuf_loc = nuf.loc; if let Err((_, prev)) = methods.add(method, nuf) { let msg = format!("Duplicate 'use fun' for '{}.{}'", tn, method); - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (nuf_loc, msg), (prev, "Previously declared here"), @@ -1774,9 +1825,7 @@ fn explicit_use_fun( } ResolvedUseFunFunction::Builtin(_) => { let msg = "Invalid 'use fun'. Cannot use a builtin function as a method"; - context - .env - .add_diag(diag!(Declarations::InvalidUseFun, (loc, msg))); + context.add_diag(diag!(Declarations::InvalidUseFun, (loc, msg))); None } ResolvedUseFunFunction::Unbound => { @@ -1809,7 +1858,7 @@ fn explicit_use_fun( ResolvedType::Hole => { let msg = "Invalid 'use fun'. Cannot associate a method with an inferred type"; let tmsg = "The '_' type is a placeholder for type inference"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidUseFun, (loc, msg), (ty_loc, tmsg) @@ -1822,7 +1871,7 @@ fn explicit_use_fun( "But '{}' was declared as a type parameter here", tp.user_specified_name ); - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidUseFun, (loc, msg,), (tloc, tmsg) @@ -1855,7 +1904,7 @@ fn check_use_fun_scope( return true; }; let current_module = context.current_module; - let Err(def_loc_opt) = use_fun_module_defines(context, use_fun_loc, current_module, rtype) + let Err(def_loc_opt) = use_fun_module_defines(context, use_fun_loc, ¤t_module, rtype) else { return true; }; @@ -1874,22 +1923,19 @@ fn check_use_fun_scope( if let Some(def_loc) = def_loc_opt { diag.add_secondary_label((def_loc, "Type defined in another module here")); } - context.env.add_diag(diag); + context.add_diag(diag); false } fn use_fun_module_defines( context: &mut Context, use_fun_loc: &Loc, - specified: Option, + specified: &ModuleIdent, rtype: &ResolvedType, ) -> Result<(), Option> { match rtype { ResolvedType::ModuleType(mtype) => { - if specified - .as_ref() - .is_some_and(|mident| mident == &mtype.mident()) - { + if specified == &mtype.mident() { Ok(()) } else { Err(Some(mtype.decl_loc())) @@ -1897,11 +1943,10 @@ fn use_fun_module_defines( } ResolvedType::BuiltinType(b_) => { let definer_opt = context.env.primitive_definer(*b_); - match (definer_opt, &specified) { - (None, _) => Err(None), - (Some(d), None) => Err(Some(d.loc)), - (Some(d), Some(s)) => { - if d == s { + match definer_opt { + None => Err(None), + Some(d) => { + if d == specified { Ok(()) } else { Err(Some(d.loc)) @@ -1910,7 +1955,7 @@ fn use_fun_module_defines( } } ResolvedType::TParam(_, _) | ResolvedType::Hole | ResolvedType::Unbound => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( *use_fun_loc, "Tried to validate use fun for invalid type" ))); @@ -1943,20 +1988,20 @@ fn mark_all_use_funs_as_used(use_funs: &mut N::UseFuns) { //************************************************************************************************** fn friend(context: &mut Context, mident: ModuleIdent, friend: E::Friend) -> Option { - let current_mident = context.current_module.as_ref().unwrap(); + let current_mident = &context.current_module; if mident.value.address != current_mident.value.address { // NOTE: in alignment with the bytecode verifier, this constraint is a policy decision // rather than a technical requirement. The compiler, VM, and bytecode verifier DO NOT // rely on the assumption that friend modules must reside within the same account address. let msg = "Cannot declare modules out of the current address as a friend"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidFriendDeclaration, (friend.loc, "Invalid friend declaration"), (mident.loc, msg), )); None } else if &mident == current_mident { - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidFriendDeclaration, (friend.loc, "Invalid friend declaration"), (mident.loc, "Cannot declare the module itself as a friend"), @@ -1998,7 +2043,7 @@ fn function( assert!(context.nominal_block_id == 0); assert!(context.used_fun_tparams.is_empty()); assert!(context.used_locals.is_empty()); - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); context.local_scopes = vec![BTreeMap::new()]; context.local_count = BTreeMap::new(); context.translating_fun = true; @@ -2015,9 +2060,7 @@ fn function( if !context.used_fun_tparams.contains(&tparam.id) { let sp!(loc, n) = tparam.user_specified_name; let msg = format!("Unused type parameter '{}'.", n); - context - .env - .add_diag(diag!(UnusedItem::FunTypeParam, (loc, msg))) + context.add_diag(diag!(UnusedItem::FunTypeParam, (loc, msg))) } } } @@ -2042,7 +2085,7 @@ fn function( context.nominal_block_id = 0; context.used_fun_tparams = BTreeSet::new(); context.used_locals = BTreeSet::new(); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); context.translating_fun = false; f } @@ -2073,14 +2116,14 @@ fn function_signature( ); let mut diag = diag!(NameResolution::InvalidMacroParameter, (mutloc, msg)); diag.add_note(ASSIGN_SYNTAX_IDENTIFIER_NOTE); - context.env.add_diag(diag); + context.add_diag(diag); mut_ = Mutability::Imm; } } if let Err((param, prev_loc)) = declared.add(param, ()) { if !is_underscore { let msg = format!("Duplicate parameter with name '{}'", param); - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (param.loc(), msg), (prev_loc, "Previously declared here"), @@ -2129,10 +2172,10 @@ fn struct_def( type_parameters, fields, } = sdef; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let type_parameters = datatype_type_parameters(context, type_parameters); let fields = struct_fields(context, fields); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); N::StructDefinition { warning_filter, index, @@ -2187,10 +2230,10 @@ fn enum_def( type_parameters, variants, } = edef; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let type_parameters = datatype_type_parameters(context, type_parameters); let variants = enum_variants(context, variants); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); N::EnumDefinition { warning_filter, index, @@ -2259,7 +2302,7 @@ fn constant(context: &mut Context, _name: ConstantName, econstant: E::Constant) assert!(context.local_scopes.is_empty()); assert!(context.local_count.is_empty()); assert!(context.used_locals.is_empty()); - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); context.local_scopes = vec![BTreeMap::new()]; let signature = type_(context, TypeAnnotation::ConstantSignature, esignature); let value = *exp(context, Box::new(evalue)); @@ -2267,7 +2310,7 @@ fn constant(context: &mut Context, _name: ConstantName, econstant: E::Constant) context.local_count = BTreeMap::new(); context.used_locals = BTreeSet::new(); context.nominal_block_id = 0; - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); N::Constant { warning_filter, index, @@ -2325,7 +2368,7 @@ fn type_parameter( context.bind_type(name.value, ResolvedType::TParam(loc, tp.clone())); if let Err((name, old_loc)) = unique_tparams.add(name, ()) { let msg = format!("Duplicate type parameter declared with name '{}'", name); - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (loc, msg), (old_loc, "Type parameter previously defined here"), @@ -2392,7 +2435,7 @@ fn type_(context: &mut Context, case: TypeAnnotation, sp!(loc, ety_): E::Type) - if let TypeAnnotation::FunctionSignature = case { diag.add_note("Only 'macro' functions can use '_' in their signatures"); } - context.env.add_diag(diag); + context.add_diag(diag); NT::UnresolvedError } else { // replaced with a type variable during type instantiation @@ -2408,7 +2451,7 @@ fn type_(context: &mut Context, case: TypeAnnotation, sp!(loc, ety_): E::Type) - } RT::TParam(_, tp) => { if !tys.is_empty() { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::NamePositionMismatch, (loc, "Generic type parameters cannot take type arguments"), )); @@ -2483,7 +2526,7 @@ fn check_type_instantiation_arity String>( arity, args_len ); - context.env.add_diag(diag!(diag_code, (loc, msg))); + context.add_diag(diag!(diag_code, (loc, msg))); } while ty_args.len() > arity { @@ -2616,7 +2659,11 @@ fn exp(context: &mut Context, e: Box) -> Box { } } - EE::IfElse(eb, et, ef) => NE::IfElse(exp(context, eb), exp(context, et), exp(context, ef)), + EE::IfElse(eb, et, ef_opt) => NE::IfElse( + exp(context, eb), + exp(context, et), + ef_opt.map(|ef| exp(context, ef)), + ), // EE::Match(esubject, sp!(_aloc, arms)) if arms.is_empty() => { // exp(context, esubject); // for error effect // let msg = "Invalid 'match' form. 'match' must have at least one arm"; @@ -2720,7 +2767,19 @@ fn exp(context: &mut Context, e: Box) -> Box { NE::Mutate(nel, ner) } - EE::Abort(es) => NE::Abort(exp(context, es)), + EE::Abort(Some(es)) => NE::Abort(exp(context, es)), + EE::Abort(None) => { + context + .env + .check_feature(context.current_package, FeatureGate::CleverAssertions, eloc); + let abort_const_expr = sp( + eloc, + N::Exp_::ErrorConstant { + line_number_loc: eloc, + }, + ); + NE::Abort(Box::new(abort_const_expr)) + } EE::Return(Some(block_name), es) => { let out_rhs = exp(context, es); context @@ -2888,7 +2947,7 @@ fn exp(context: &mut Context, e: Box) -> Box { "ICE compiler should not have parsed this form as a specification" )); diag.add_note(format!("Compiler parsed: {}", debug_display!(e))); - context.env.add_diag(diag); + context.add_diag(diag); NE::UnresolvedError } }; @@ -2917,7 +2976,7 @@ fn dotted(context: &mut Context, edot: E::ExpDotted) -> Option { modified by path operations.\n\ Path operations include 'move', 'copy', '&', '&mut', and field references", ); - context.env.add_diag(diag); + context.add_diag(diag); N::ExpDotted_::Exp(Box::new(sp(ne.loc, N::Exp_::UnresolvedError))) } _ => N::ExpDotted_::Exp(ne), @@ -3004,7 +3063,7 @@ fn check_constructor_form( } else { diag.add_note(named_note!()); } - context.env.add_diag(diag); + context.add_diag(diag); } CF::Parens if stype.field_info.is_positional() => (), CF::Parens => { @@ -3014,7 +3073,7 @@ fn check_constructor_form( (loc, &msg), (stype.decl_loc, defn_loc_error(&name)), ); - context.env.add_diag(diag); + context.add_diag(diag); } CF::Braces if stype.field_info.is_positional() => { let msg = invalid_inst_msg!("struct", POSNL_UPCASE, POSNL); @@ -3023,7 +3082,7 @@ fn check_constructor_form( (loc, &msg), (stype.decl_loc, defn_loc_error(&name)), ); - context.env.add_diag(diag); + context.add_diag(diag); } CF::Braces => (), }, @@ -3049,7 +3108,7 @@ fn check_constructor_form( } else { diag.add_note(named_note!()); } - context.env.add_diag(diag); + context.add_diag(diag); } CF::Parens if vfields.is_empty() => { let msg = invalid_inst_msg!("variant", EMPTY_UPCASE, EMPTY); @@ -3059,7 +3118,7 @@ fn check_constructor_form( (vloc, defn_loc_error(&name)), ); diag.add_note(format!("Remove '()' arguments from this {position}")); - context.env.add_diag(diag); + context.add_diag(diag); } CF::Parens if vfields.is_positional() => (), CF::Parens => { @@ -3070,7 +3129,7 @@ fn check_constructor_form( (vloc, defn_loc_error(&name)), ); diag.add_note(named_note!()); - context.env.add_diag(diag); + context.add_diag(diag); } CF::Braces if vfields.is_empty() => { let msg = invalid_inst_msg!("variant", EMPTY_UPCASE, EMPTY); @@ -3080,7 +3139,7 @@ fn check_constructor_form( (vloc, defn_loc_error(&name)), ); diag.add_note(format!("Remove '{{ }}' arguments from this {position}")); - context.env.add_diag(diag); + context.add_diag(diag); } CF::Braces if vfields.is_positional() => { let msg = invalid_inst_msg!("variant", POSNL_UPCASE, POSNL); @@ -3090,7 +3149,7 @@ fn check_constructor_form( (vloc, defn_loc_error(&name)), ); diag.add_note(posnl_note!()); - context.env.add_diag(diag); + context.add_diag(diag); } CF::Braces => (), } @@ -3209,7 +3268,7 @@ fn unique_pattern_binders( diag.add_secondary_label((*loc, "and repeated here")); } diag.add_note("A pattern variable must be unique, and must appear once in each or-pattern alternative."); - context.env.add_diag(diag); + context.add_diag(diag); } enum OrPosn { @@ -3227,7 +3286,7 @@ fn unique_pattern_binders( let mut diag = diag!(NameResolution::InvalidPattern, (var.loc(), primary_msg)); diag.add_secondary_label((other_loc, secondary_msg)); diag.add_note("Both sides of an or-pattern must bind the same variables."); - context.env.add_diag(diag); + context.add_diag(diag); } fn report_mismatched_or_mutability( @@ -3248,7 +3307,7 @@ fn unique_pattern_binders( diag.add_note( "Both sides of an or-pattern must bind the same variables with the same mutability.", ); - context.env.add_diag(diag); + context.add_diag(diag); } type Bindings = BTreeMap>; @@ -3410,7 +3469,7 @@ fn expand_positional_ellipsis( let entries = (0..=missing).map(|_| "_".into()).collect::>(); let info = EllipsisMatchEntries::Positional(entries); let info = ide::IDEAnnotation::EllipsisMatchEntries(Box::new(info)); - context.env.add_ide_annotation(eloc, info); + context.add_ide_annotation(eloc, info); } result } @@ -3447,7 +3506,7 @@ fn expand_named_ellipsis( let entries = fields.iter().map(|field| field.value()).collect::>(); let info = EllipsisMatchEntries::Named(entries); let info = ide::IDEAnnotation::EllipsisMatchEntries(Box::new(info)); - context.env.add_ide_annotation(ellipsis_loc, info); + context.add_ide_annotation(ellipsis_loc, info); } let start_idx = args.len(); @@ -3549,7 +3608,7 @@ fn match_pattern(context: &mut Context, in_pat: Box) -> Box { if etys_opt.is_some() { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::TooManyTypeArguments, (ploc, "Constants in patterns do not take type arguments") )); @@ -3659,9 +3718,7 @@ fn lvalue( ((var.loc, msg), (prev_loc, "Previously assigned here")) } }; - context - .env - .add_diag(diag!(Declarations::DuplicateItem, primary, secondary)); + context.add_diag(diag!(Declarations::DuplicateItem, primary, secondary)); } if v.is_syntax_identifier() { debug_assert!( @@ -3675,7 +3732,7 @@ fn lvalue( ); let mut diag = diag!(TypeSafety::CannotExpandMacro, (loc, msg)); diag.add_note(ASSIGN_SYNTAX_IDENTIFIER_NOTE); - context.env.add_diag(diag); + context.add_diag(diag); return None; } let nv = match case { @@ -3721,7 +3778,7 @@ fn lvalue( stype } Some(ResolvedConstructor::Variant(variant)) => { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::NamePositionMismatch, (tn.loc, format!("Invalid {}. Expected a struct", msg)), ( @@ -3791,7 +3848,7 @@ fn lvalue( "ICE compiler should not have parsed this form as a specification" )); diag.add_note(format!("Compiler parsed: {}", debug_display!(e))); - context.env.add_diag(diag); + context.add_diag(diag); NL::Ignore } }; @@ -3804,9 +3861,7 @@ fn check_mut_underscore(context: &mut Context, mut_: Option) { return; }; let msg = "Invalid 'mut' declaration. 'mut' is applied to variables and cannot be applied to the '_' pattern"; - context - .env - .add_diag(diag!(NameResolution::InvalidMut, (loc, msg))); + context.add_diag(diag!(NameResolution::InvalidMut, (loc, msg))); } fn bind_list(context: &mut Context, ls: E::LValueList) -> Option { @@ -3903,9 +3958,7 @@ fn resolve_call( match tyargs_opt.as_deref() { Some([ty]) => B::Freeze(Some(ty.clone())), Some(_tys) => { - context - .env - .add_diag(ice!((call_loc, "Builtin tyarg arity failure"))); + context.add_diag(ice!((call_loc, "Builtin tyarg arity failure"))); return N::Exp_::UnresolvedError; } None => B::Freeze(None), @@ -3926,7 +3979,7 @@ fn resolve_call( let mut diag = diag!(Uncategorized::DeprecatedWillBeRemoved, (call_loc, dep_msg),); diag.add_note(help_msg); - context.env.add_diag(diag); + context.add_diag(diag); } exp_types_opt_with_arity_check( context, @@ -4009,7 +4062,7 @@ fn resolve_call( check_is_not_macro(context, is_macro, &var.value.name); let tyargs_opt = types_opt(context, TypeAnnotation::Expression, in_tyargs_opt); if tyargs_opt.is_some() { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::TooManyTypeArguments, ( subject_loc, @@ -4030,7 +4083,7 @@ fn resolve_call( ); let mut diag = diag!(TypeSafety::InvalidCallTarget, (var.loc, msg)); diag.add_note(note); - context.env.add_diag(diag); + context.add_diag(diag); N::Exp_::UnresolvedError } else if var.value.id != 0 { let msg = format!( @@ -4038,9 +4091,7 @@ fn resolve_call( Only lambda-typed syntax parameters may be invoked", var.value.name ); - context - .env - .add_diag(diag!(TypeSafety::InvalidCallTarget, (var.loc, msg))); + context.add_diag(diag!(TypeSafety::InvalidCallTarget, (var.loc, msg))); N::Exp_::UnresolvedError } else { N::Exp_::VarCall(sp(subject_loc, var.value), args) @@ -4061,9 +4112,7 @@ fn check_is_not_macro(context: &mut Context, is_macro: Option, name: &str) macro", name ); - context - .env - .add_diag(diag!(TypeSafety::InvalidCallTarget, (mloc, msg))); + context.add_diag(diag!(TypeSafety::InvalidCallTarget, (mloc, msg))); } } @@ -4073,9 +4122,7 @@ fn report_invalid_macro(context: &mut Context, is_macro: Option, kind: &str "Unexpected macro invocation. {} cannot be invoked as macros", kind ); - context - .env - .add_diag(diag!(NameResolution::PositionalCallMismatch, (mloc, msg))); + context.add_diag(diag!(NameResolution::PositionalCallMismatch, (mloc, msg))); } } @@ -4100,7 +4147,7 @@ fn exp_types_opt_with_arity_check( }; let msg = fmsg(); let targs_msg = format!("Expected {} type argument(s) but got {}", arity, args_len); - context.env.add_diag(diag!( + context.add_diag(diag!( diag_code, (msg_loc, msg), (tyarg_error_loc, targs_msg) @@ -4223,10 +4270,12 @@ fn remove_unused_bindings_exp( | N::Exp_::Loop(_, e) | N::Exp_::Give(_, _, e) | N::Exp_::Annotate(e, _) => remove_unused_bindings_exp(context, used, e), - N::Exp_::IfElse(econd, et, ef) => { + N::Exp_::IfElse(econd, et, ef_opt) => { remove_unused_bindings_exp(context, used, econd); remove_unused_bindings_exp(context, used, et); - remove_unused_bindings_exp(context, used, ef); + if let Some(ef) = ef_opt { + remove_unused_bindings_exp(context, used, ef); + } } N::Exp_::Match(esubject, arms) => { remove_unused_bindings_exp(context, used, esubject); @@ -4372,7 +4421,5 @@ fn report_unused_local(context: &mut Context, sp!(loc, unused_): &N::Var) { let msg = format!( "Unused {kind} '{name}'. Consider removing or prefixing with an underscore: '_{name}'", ); - context - .env - .add_diag(diag!(UnusedItem::Variable, (*loc, msg))); + context.add_diag(diag!(UnusedItem::Variable, (*loc, msg))); } diff --git a/external-crates/move/crates/move-compiler/src/parser/ast.rs b/external-crates/move/crates/move-compiler/src/parser/ast.rs index ee3cc68d11b61..c04017cab5d6b 100644 --- a/external-crates/move/crates/move-compiler/src/parser/ast.rs +++ b/external-crates/move/crates/move-compiler/src/parser/ast.rs @@ -609,7 +609,7 @@ pub enum Exp_ { Assign(Box, Box), // abort e - Abort(Box), + Abort(Option>), // return e Return(Option, Option>), // break @@ -2079,8 +2079,11 @@ impl AstDebug for Exp_ { rhs.ast_debug(w); } E::Abort(e) => { - w.write("abort "); - e.ast_debug(w); + w.write("abort"); + if let Some(e) = e { + w.write(" "); + e.ast_debug(w); + } } E::Return(name, e) => { w.write("return"); diff --git a/external-crates/move/crates/move-compiler/src/parser/lexer.rs b/external-crates/move/crates/move-compiler/src/parser/lexer.rs index 610d978e733fb..18271da49805d 100644 --- a/external-crates/move/crates/move-compiler/src/parser/lexer.rs +++ b/external-crates/move/crates/move-compiler/src/parser/lexer.rs @@ -482,10 +482,7 @@ impl<'input> Lexer<'input> { // At the end of parsing, checks whether there are any unmatched documentation comments, // producing errors if so. Otherwise returns a map from file position to associated // documentation. - pub fn check_and_get_doc_comments( - &mut self, - env: &mut CompilationEnv, - ) -> MatchedFileCommentMap { + pub fn check_and_get_doc_comments(&mut self, env: &CompilationEnv) -> MatchedFileCommentMap { let msg = "Documentation comment cannot be matched to a language item"; let diags = self .doc_comments @@ -495,7 +492,8 @@ impl<'input> Lexer<'input> { diag!(Syntax::InvalidDocComment, (loc, msg)) }) .collect(); - env.add_diags(diags); + let warning_filters = env.top_level_warning_filter_scope(); + env.add_diags(warning_filters, diags); std::mem::take(&mut self.matched_doc_comments) } diff --git a/external-crates/move/crates/move-compiler/src/parser/mod.rs b/external-crates/move/crates/move-compiler/src/parser/mod.rs index 5b2e299d813e2..724e7e8012b9f 100644 --- a/external-crates/move/crates/move-compiler/src/parser/mod.rs +++ b/external-crates/move/crates/move-compiler/src/parser/mod.rs @@ -25,7 +25,7 @@ use vfs::VfsPath; /// Parses program's targets and dependencies, both of which are read from different virtual file /// systems (vfs and deps_out_vfs, respectively). pub(crate) fn parse_program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, named_address_maps: NamedAddressMaps, mut targets: Vec, mut deps: Vec, @@ -113,7 +113,7 @@ fn ensure_targets_deps_dont_intersect( fn parse_file( path: &VfsPath, - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, files: &mut MappedFiles, package: Option, ) -> anyhow::Result<( @@ -126,8 +126,9 @@ fn parse_file( let file_hash = FileHash::new(&source_buffer); let fname = Symbol::from(path.as_str()); let source_str = Arc::from(source_buffer); + let warning_filters = compilation_env.top_level_warning_filter_scope(); if let Err(ds) = verify_string(file_hash, &source_str) { - compilation_env.add_diags(ds); + compilation_env.add_diags(warning_filters, ds); files.add(file_hash, fname, source_str); return Ok((vec![], MatchedFileCommentMap::new(), file_hash)); } @@ -135,7 +136,7 @@ fn parse_file( { Ok(defs_and_comments) => defs_and_comments, Err(ds) => { - compilation_env.add_diags(ds); + compilation_env.add_diags(warning_filters, ds); (vec![], MatchedFileCommentMap::new()) } }; diff --git a/external-crates/move/crates/move-compiler/src/parser/syntax.rs b/external-crates/move/crates/move-compiler/src/parser/syntax.rs index 7fdf9221d598a..a7d296fd19a01 100644 --- a/external-crates/move/crates/move-compiler/src/parser/syntax.rs +++ b/external-crates/move/crates/move-compiler/src/parser/syntax.rs @@ -22,14 +22,14 @@ use move_symbol_pool::{symbol, Symbol}; struct Context<'env, 'lexer, 'input> { current_package: Option, - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, tokens: &'lexer mut Lexer<'input>, stop_set: TokenSet, } impl<'env, 'lexer, 'input> Context<'env, 'lexer, 'input> { fn new( - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, tokens: &'lexer mut Lexer<'input>, package_name: Option, ) -> Self { @@ -71,8 +71,9 @@ impl<'env, 'lexer, 'input> Context<'env, 'lexer, 'input> { } } - fn add_diag(&mut self, diag: Diagnostic) { - self.env.add_diag(diag); + fn add_diag(&self, diag: Diagnostic) { + let warning_filters = self.env.top_level_warning_filter_scope(); + self.env.add_diag(warning_filters, diag); } } @@ -758,15 +759,13 @@ fn parse_name_access_chain_<'a, F: Fn() -> &'a str>( "Macro invocation are disallowed here. Expected {}", item_description() ); - context - .env - .add_diag(diag!(Syntax::InvalidName, (*loc, msg))); + context.add_diag(diag!(Syntax::InvalidName, (*loc, msg))); is_macro = None; } } if let Some(sp!(ty_loc, _)) = tys { if !tyargs_allowed { - context.env.add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidName, ( ty_loc, @@ -845,7 +844,7 @@ fn parse_name_access_chain_<'a, F: Fn() -> &'a str>( parse_macro_opt_and_tyargs_opt(context, tyargs_whitespace_allowed, name.loc); if let Some(loc) = &is_macro { if !macros_allowed { - context.env.add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidName, ( *loc, @@ -857,7 +856,7 @@ fn parse_name_access_chain_<'a, F: Fn() -> &'a str>( } if let Some(sp!(ty_loc, _)) = tys { if !tyargs_allowed { - context.env.add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidName, ( ty_loc, @@ -870,7 +869,7 @@ fn parse_name_access_chain_<'a, F: Fn() -> &'a str>( path.push_path_entry(name, tys, is_macro) .into_iter() - .for_each(|diag| context.env.add_diag(diag)); + .for_each(|diag| context.add_diag(diag)); } Ok(NameAccessChain_::Path(path)) } @@ -1912,8 +1911,13 @@ fn parse_control_exp(context: &mut Context) -> Result<(Exp, bool), Box { context.tokens.advance()?; - let (e, ends_in_block) = parse_exp_or_sequence(context)?; - (Exp_::Abort(Box::new(e)), ends_in_block) + let (e, ends_in_block) = if !at_start_of_exp(context) { + (None, false) + } else { + let (e, ends_in_block) = parse_exp_or_sequence(context)?; + (Some(Box::new(e)), ends_in_block) + }; + (Exp_::Abort(e), ends_in_block) } Tok::Break => { context.tokens.advance()?; @@ -2124,7 +2128,7 @@ fn parse_match_pattern(context: &mut Context) -> Result { if context.at_stop_set() { - context - .env - .add_diag(*unexpected_token_error(context.tokens, "a type name")); + context.add_diag(*unexpected_token_error(context.tokens, "a type name")); Type_::UnresolvedError } else { let tn = if whitespace_sensitive_ty_args { @@ -3502,7 +3504,7 @@ fn check_enum_visibility(visibility: Option, context: &mut Context) let note = "Visibility annotations are required on enum declarations."; let mut err = diag!(Syntax::InvalidModifier, (loc, msg)); err.add_note(note); - context.env.add_diag(err); + context.add_diag(err); } } } @@ -3952,9 +3954,7 @@ fn parse_address_block( addr_name.loc.start() as usize, context.tokens.current_token_loc().end() as usize, ); - context - .env - .add_diag(diag!(Migration::AddressRemove, (loc, "address decl"))); + context.add_diag(diag!(Migration::AddressRemove, (loc, "address decl"))); } context.tokens.advance()?; let mut modules = vec![]; @@ -3969,7 +3969,7 @@ fn parse_address_block( let (module, next_mod_attributes) = parse_module(attributes, context)?; if in_migration_mode { - context.env.add_diag(diag!( + context.add_diag(diag!( Migration::AddressAdd, ( module.name.loc(), @@ -3989,7 +3989,7 @@ fn parse_address_block( } for module in &modules { if matches!(module.definition_mode, ModuleDefinitionMode::Semicolon) { - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidModule, ( module.name.loc(), @@ -4001,9 +4001,7 @@ fn parse_address_block( if in_migration_mode { let loc = context.tokens.current_token_loc(); - context - .env - .add_diag(diag!(Migration::AddressRemove, (loc, "close lbrace"))); + context.add_diag(diag!(Migration::AddressRemove, (loc, "close lbrace"))); } consume_token(context.tokens, context.tokens.peek())?; @@ -4023,7 +4021,7 @@ fn parse_address_block( format!("Replace with '{}::{}'", addr, module.name), )); } - context.env.add_diag(diag); + context.add_diag(diag); } Ok(AddressDefinition { @@ -4054,7 +4052,7 @@ fn parse_friend_decl( || "a friend declaration", )?; if friend.value.is_macro().is_some() || friend.value.has_tyargs() { - context.env.add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidName, (friend.loc, "Invalid 'friend' name") )) @@ -4669,7 +4667,7 @@ fn parse_file_def( "Either move each 'module' label and definitions into its own file or \ define each as 'module { contents }'", ); - context.env.add_diag(diag); + context.add_diag(diag); } } defs.push(Definition::Module(module)); @@ -4692,7 +4690,7 @@ fn parse_file_def( /// result as either a pair of FileDefinition and doc comments or some Diagnostics. The `file` name /// is used to identify source locations in error messages. pub fn parse_file_string( - env: &mut CompilationEnv, + env: &CompilationEnv, file_hash: FileHash, input: &str, package: Option, diff --git a/external-crates/move/crates/move-compiler/src/parser/verification_attribute_filter.rs b/external-crates/move/crates/move-compiler/src/parser/verification_attribute_filter.rs index 46c94e75991dd..4e23c1a5003b8 100644 --- a/external-crates/move/crates/move-compiler/src/parser/verification_attribute_filter.rs +++ b/external-crates/move/crates/move-compiler/src/parser/verification_attribute_filter.rs @@ -15,13 +15,13 @@ use crate::{ }; struct Context<'env> { - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, is_source_def: bool, current_package: Option, } impl<'env> Context<'env> { - fn new(env: &'env mut CompilationEnv) -> Self { + fn new(env: &'env CompilationEnv) -> Self { Self { env, is_source_def: false, @@ -56,6 +56,7 @@ impl FilterContext for Context<'_> { // expansion // Ideally we would just have a warning filter scope here // (but again, need expansion for that) + let top_warning_filter_scope = self.env.top_level_warning_filter_scope(); let silence_warning = !self.is_source_def || self.env.package_config(self.current_package).is_dependency; if !silence_warning { @@ -64,8 +65,10 @@ impl FilterContext for Context<'_> { "The '{}' attribute has been deprecated along with specification blocks", VerificationAttribute::VERIFY_ONLY ); - self.env - .add_diag(diag!(Uncategorized::DeprecatedWillBeRemoved, (*loc, msg))); + self.env.add_diag( + top_warning_filter_scope, + diag!(Uncategorized::DeprecatedWillBeRemoved, (*loc, msg)), + ); } } should_remove @@ -79,7 +82,7 @@ impl FilterContext for Context<'_> { // This filters out all AST elements annotated with verify-only annotated from `prog` // if the `verify` flag in `compilation_env` is not set. If the `verify` flag is set, // no filtering is performed. -pub fn program(compilation_env: &mut CompilationEnv, prog: P::Program) -> P::Program { +pub fn program(compilation_env: &CompilationEnv, prog: P::Program) -> P::Program { let mut context = Context::new(compilation_env); filter_program(&mut context, prog) } diff --git a/external-crates/move/crates/move-compiler/src/shared/ide.rs b/external-crates/move/crates/move-compiler/src/shared/ide.rs index 6816278d34ed8..895f57da06985 100644 --- a/external-crates/move/crates/move-compiler/src/shared/ide.rs +++ b/external-crates/move/crates/move-compiler/src/shared/ide.rs @@ -16,7 +16,7 @@ use crate::{ unit_test::filter_test_members::UNIT_TEST_POISON_FUN_NAME, }; -use move_command_line_common::address::NumericalAddress; +use move_core_types::parsing::address::NumericalAddress; use move_ir_types::location::Loc; use move_symbol_pool::Symbol; diff --git a/external-crates/move/crates/move-compiler/src/shared/matching.rs b/external-crates/move/crates/move-compiler/src/shared/matching.rs index 26ca069a61b9b..9beb38564ab7b 100644 --- a/external-crates/move/crates/move-compiler/src/shared/matching.rs +++ b/external-crates/move/crates/move-compiler/src/shared/matching.rs @@ -67,7 +67,7 @@ pub struct ArmResult { /// A shared match context trait for use with counterexample generation in Typing and match /// compilation in HLIR lowering. pub trait MatchContext { - fn env(&mut self) -> &mut CompilationEnv; + fn env(&mut self) -> &CompilationEnv; fn env_ref(&self) -> &CompilationEnv; fn new_match_var(&mut self, name: String, loc: Loc) -> N::Var; fn program_info(&self) -> &ProgramInfo; @@ -481,7 +481,7 @@ impl PatternMatrix { // Make a match pattern that only holds guard binders let guard_binders = guard_binders.union_with(&const_binders, |k, _, x| { let msg = "Match compilation made a binder for this during const compilation"; - context.env().add_diag(ice!((k.loc, msg))); + context.env().add_error_diag(ice!((k.loc, msg))); *x }); let pat = apply_pattern_subst(pat, &guard_binders); diff --git a/external-crates/move/crates/move-compiler/src/shared/mod.rs b/external-crates/move/crates/move-compiler/src/shared/mod.rs index a242bcb7565ef..0c0147b8d08de 100644 --- a/external-crates/move/crates/move-compiler/src/shared/mod.rs +++ b/external-crates/move/crates/move-compiler/src/shared/mod.rs @@ -9,8 +9,12 @@ use crate::{ }, command_line as cli, diagnostics::{ - codes::{Category, Declarations, DiagnosticsID, Severity, WarningFilter}, - Diagnostic, Diagnostics, DiagnosticsFormat, WarningFilters, + codes::{DiagnosticsID, Severity}, + warning_filters::{ + FilterName, FilterPrefix, WarningFilter, WarningFilters, WarningFiltersScope, + FILTER_ALL, + }, + Diagnostic, Diagnostics, DiagnosticsFormat, }, editions::{check_feature_or_error, feature_edition_error_msg, Edition, FeatureGate, Flavor}, expansion::ast as E, @@ -33,14 +37,12 @@ use move_ir_types::location::*; use move_symbol_pool::Symbol; use petgraph::{algo::astar as petgraph_astar, graphmap::DiGraphMap}; use std::{ - cell::RefCell, collections::{BTreeMap, BTreeSet}, fmt, hash::Hash, - rc::Rc, sync::{ atomic::{AtomicUsize, Ordering as AtomicOrdering}, - Arc, + Arc, Mutex, OnceLock, RwLock, }, }; use vfs::{VfsError, VfsPath}; @@ -62,7 +64,7 @@ pub use ast_debug::AstDebug; // Numbers //************************************************************************************************** -pub use move_command_line_common::parser::{ +pub use move_core_types::parsing::parser::{ parse_address_number as parse_address, parse_u128, parse_u16, parse_u256, parse_u32, parse_u64, parse_u8, NumberFormat, }; @@ -71,7 +73,7 @@ pub use move_command_line_common::parser::{ // Address //************************************************************************************************** -pub use move_command_line_common::address::NumericalAddress; +pub use move_core_types::parsing::address::NumericalAddress; pub fn parse_named_address(s: &str) -> anyhow::Result<(String, NumericalAddress)> { let before_after = s.split('=').collect::>(); @@ -169,28 +171,6 @@ pub fn shortest_cycle<'a, T: Ord + Hash>( // Compilation Env //************************************************************************************************** -pub const FILTER_ALL: &str = "all"; -pub const FILTER_UNUSED: &str = "unused"; -pub const FILTER_MISSING_PHANTOM: &str = "missing_phantom"; -pub const FILTER_UNUSED_USE: &str = "unused_use"; -pub const FILTER_UNUSED_VARIABLE: &str = "unused_variable"; -pub const FILTER_UNUSED_ASSIGNMENT: &str = "unused_assignment"; -pub const FILTER_UNUSED_TRAILING_SEMI: &str = "unused_trailing_semi"; -pub const FILTER_UNUSED_ATTRIBUTE: &str = "unused_attribute"; -pub const FILTER_UNUSED_TYPE_PARAMETER: &str = "unused_type_parameter"; -pub const FILTER_UNUSED_FUNCTION: &str = "unused_function"; -pub const FILTER_UNUSED_STRUCT_FIELD: &str = "unused_field"; -pub const FILTER_UNUSED_CONST: &str = "unused_const"; -pub const FILTER_DEAD_CODE: &str = "dead_code"; -pub const FILTER_UNUSED_LET_MUT: &str = "unused_let_mut"; -pub const FILTER_UNUSED_MUT_REF: &str = "unused_mut_ref"; -pub const FILTER_UNUSED_MUT_PARAM: &str = "unused_mut_parameter"; -pub const FILTER_IMPLICIT_CONST_COPY: &str = "implicit_const_copy"; -pub const FILTER_DUPLICATE_ALIAS: &str = "duplicate_alias"; -pub const FILTER_DEPRECATED: &str = "deprecated_usage"; -pub const FILTER_IDE_PATH_AUTOCOMPLETE: &str = "ide_path_autocomplete"; -pub const FILTER_IDE_DOT_AUTOCOMPLETE: &str = "ide_dot_autocomplete"; - pub type NamedAddressMap = BTreeMap; #[derive(Clone, Copy, Debug, Eq, PartialEq)] @@ -232,17 +212,11 @@ pub struct PackagePaths = Symbol, NamedAddress: Into pub named_address_map: BTreeMap, } -/// None for the default 'allow'. -/// Some(prefix) for a custom set of warnings, e.g. 'allow(lint(_))'. -pub type FilterPrefix = Option; -pub type FilterName = Symbol; - pub struct CompilationEnv { flags: Flags, - // filters warnings when added. - warning_filter: Vec, - diags: Diagnostics, - visitors: Rc, + top_level_warning_filter_scope: &'static WarningFiltersScope, + diags: RwLock, + visitors: Visitors, package_configs: BTreeMap, /// Config for any package not found in `package_configs`, or for inputs without a package. default_config: PackageConfig, @@ -250,27 +224,12 @@ pub struct CompilationEnv { known_filters: BTreeMap>>, /// Maps a diagnostics ID to a known filter name. known_filter_names: BTreeMap, - prim_definers: - BTreeMap, + prim_definers: OnceLock>, // TODO(tzakian): Remove the global counter and use this counter instead // pub counter: u64, mapped_files: MappedFiles, save_hooks: Vec, - pub ide_information: IDEInfo, -} - -macro_rules! known_code_filter { - ($name:ident, $category:ident::$code:ident) => { - ( - Symbol::from($name), - BTreeSet::from([WarningFilter::Code { - prefix: None, - category: Category::$category as u8, - code: $category::$code as u8, - name: Some($name), - }]), - ) - }; + ide_information: RwLock, } impl CompilationEnv { @@ -278,66 +237,18 @@ impl CompilationEnv { flags: Flags, mut visitors: Vec, save_hooks: Vec, + warning_filters: Option, package_configs: BTreeMap, default_config: Option, ) -> Self { - use crate::diagnostics::codes::{TypeSafety, UnusedItem, IDE}; visitors.extend([ sui_mode::id_leak::IDLeakVerifier.visitor(), sui_mode::typing::SuiTypeChecks.visitor(), ]); - let mut known_filters_: BTreeMap> = BTreeMap::from([ - ( - FILTER_ALL.into(), - BTreeSet::from([WarningFilter::All(None)]), - ), - ( - FILTER_UNUSED.into(), - BTreeSet::from([WarningFilter::Category { - prefix: None, - category: Category::UnusedItem as u8, - name: Some(FILTER_UNUSED), - }]), - ), - known_code_filter!(FILTER_MISSING_PHANTOM, Declarations::InvalidNonPhantomUse), - known_code_filter!(FILTER_UNUSED_USE, UnusedItem::Alias), - known_code_filter!(FILTER_UNUSED_VARIABLE, UnusedItem::Variable), - known_code_filter!(FILTER_UNUSED_ASSIGNMENT, UnusedItem::Assignment), - known_code_filter!(FILTER_UNUSED_TRAILING_SEMI, UnusedItem::TrailingSemi), - known_code_filter!(FILTER_UNUSED_ATTRIBUTE, UnusedItem::Attribute), - known_code_filter!(FILTER_UNUSED_FUNCTION, UnusedItem::Function), - known_code_filter!(FILTER_UNUSED_STRUCT_FIELD, UnusedItem::StructField), - ( - FILTER_UNUSED_TYPE_PARAMETER.into(), - BTreeSet::from([ - WarningFilter::Code { - prefix: None, - category: Category::UnusedItem as u8, - code: UnusedItem::StructTypeParam as u8, - name: Some(FILTER_UNUSED_TYPE_PARAMETER), - }, - WarningFilter::Code { - prefix: None, - category: Category::UnusedItem as u8, - code: UnusedItem::FunTypeParam as u8, - name: Some(FILTER_UNUSED_TYPE_PARAMETER), - }, - ]), - ), - known_code_filter!(FILTER_UNUSED_CONST, UnusedItem::Constant), - known_code_filter!(FILTER_DEAD_CODE, UnusedItem::DeadCode), - known_code_filter!(FILTER_UNUSED_LET_MUT, UnusedItem::MutModifier), - known_code_filter!(FILTER_UNUSED_MUT_REF, UnusedItem::MutReference), - known_code_filter!(FILTER_UNUSED_MUT_PARAM, UnusedItem::MutParam), - known_code_filter!(FILTER_IMPLICIT_CONST_COPY, TypeSafety::ImplicitConstantCopy), - known_code_filter!(FILTER_DUPLICATE_ALIAS, Declarations::DuplicateAlias), - known_code_filter!(FILTER_DEPRECATED, TypeSafety::DeprecatedUsage), - ]); + let mut known_filters_: BTreeMap> = + WarningFilter::compiler_known_filters(); if flags.ide_mode() { - known_filters_.extend([ - known_code_filter!(FILTER_IDE_PATH_AUTOCOMPLETE, IDE::PathAutocomplete), - known_code_filter!(FILTER_IDE_DOT_AUTOCOMPLETE, IDE::DotAutocomplete), - ]); + known_filters_.extend(WarningFilter::ide_known_filters()); } let known_filters: BTreeMap>> = BTreeMap::from([(None, known_filters_)]); @@ -363,30 +274,32 @@ impl CompilationEnv { }) .collect(); - let warning_filter = if flags.silence_warnings() { + let top_level_warning_filter = if flags.silence_warnings() { let mut f = WarningFilters::new_for_source(); f.add(WarningFilter::All(None)); - vec![f] + Some(f) } else { - vec![] + warning_filters }; + let top_level_warning_filter_scope = + Box::leak(Box::new(WarningFiltersScope::new(top_level_warning_filter))); let mut diags = Diagnostics::new(); if flags.json_errors() { diags.set_format(DiagnosticsFormat::JSON); } Self { flags, - warning_filter, - diags, - visitors: Rc::new(Visitors::new(visitors)), + top_level_warning_filter_scope, + diags: RwLock::new(diags), + visitors: Visitors::new(visitors), package_configs, default_config: default_config.unwrap_or_default(), known_filters, known_filter_names, - prim_definers: BTreeMap::new(), + prim_definers: OnceLock::new(), mapped_files: MappedFiles::empty(), save_hooks, - ide_information: IDEInfo::new(), + ide_information: RwLock::new(IDEInfo::new()), } } @@ -403,10 +316,16 @@ impl CompilationEnv { &self.mapped_files } - pub fn add_diag(&mut self, mut diag: Diagnostic) { + pub fn top_level_warning_filter_scope(&self) -> &'static WarningFiltersScope { + self.top_level_warning_filter_scope + } + + pub fn add_diag(&self, warning_filters: &WarningFiltersScope, mut diag: Diagnostic) { if diag.info().severity() <= Severity::NonblockingError && self .diags + .read() + .unwrap() .any_syntax_error_with_primary_loc(diag.primary_loc()) { // do not report multiple diags for the same location (unless they are blocking) to @@ -417,7 +336,7 @@ impl CompilationEnv { return; } - if !self.is_filtered(&diag) { + if !warning_filters.is_filtered(&diag) { // add help to suppress warning, if applicable // TODO do we want a centralized place for tips like this? if diag.info().severity() == Severity::Warning { @@ -434,21 +353,34 @@ impl CompilationEnv { diag = diag.set_severity(Severity::NonblockingError) } } - self.diags.add(diag) - } else if !self.filter_for_dependency() { + self.diags.write().unwrap().add(diag) + } else if !warning_filters.is_filtered_for_dependency() { // unwrap above is safe as the filter has been used (thus it must exist) - self.diags.add_source_filtered(diag) + self.diags.write().unwrap().add_source_filtered(diag) } } - pub fn add_diags(&mut self, diags: Diagnostics) { + pub fn add_diags(&self, warning_filters: &WarningFiltersScope, diags: Diagnostics) { for diag in diags.into_vec() { - self.add_diag(diag) + self.add_diag(warning_filters, diag) + } + } + + /// Aborts if the diagnostic is a warning + pub fn add_error_diag(&self, diag: Diagnostic) { + assert!(diag.info().severity() > Severity::Warning); + self.add_diag(WarningFiltersScope::EMPTY, diag) + } + + /// Aborts if any diagnostic is a warning + pub fn add_error_diags(&self, diags: Diagnostics) { + for diag in diags.into_vec() { + self.add_error_diag(diag) } } pub fn has_warnings_or_errors(&self) -> bool { - !self.diags.is_empty() + !self.diags.read().unwrap().is_empty() } pub fn has_errors(&self) -> bool { @@ -457,63 +389,45 @@ impl CompilationEnv { } pub fn count_diags(&self) -> usize { - self.diags.len() + self.diags.read().unwrap().len() } pub fn count_diags_at_or_above_severity(&self, threshold: Severity) -> usize { - self.diags.count_diags_at_or_above_severity(threshold) + self.diags + .read() + .unwrap() + .count_diags_at_or_above_severity(threshold) } pub fn has_diags_at_or_above_severity(&self, threshold: Severity) -> bool { - self.diags.max_severity_at_or_above_severity(threshold) + self.diags + .read() + .unwrap() + .max_severity_at_or_above_severity(threshold) } - pub fn check_diags_at_or_above_severity( - &mut self, - threshold: Severity, - ) -> Result<(), Diagnostics> { + pub fn check_diags_at_or_above_severity(&self, threshold: Severity) -> Result<(), Diagnostics> { if self.has_diags_at_or_above_severity(threshold) { - Err(std::mem::take(&mut self.diags)) + let diagnostics: &mut Diagnostics = &mut self.diags.write().unwrap(); + Err(std::mem::take(diagnostics)) } else { Ok(()) } } /// Should only be called after compilation is finished - pub fn take_final_diags(&mut self) -> Diagnostics { - std::mem::take(&mut self.diags) + pub fn take_final_diags(&self) -> Diagnostics { + let diagnostics: &mut Diagnostics = &mut self.diags.write().unwrap(); + std::mem::take(diagnostics) } /// Should only be called after compilation is finished - pub fn take_final_warning_diags(&mut self) -> Diagnostics { + pub fn take_final_warning_diags(&self) -> Diagnostics { let final_diags = self.take_final_diags(); debug_assert!(final_diags.max_severity_at_or_under_severity(Severity::Warning)); final_diags } - /// Add a new filter for warnings - pub fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.warning_filter.push(filter) - } - - pub fn pop_warning_filter_scope(&mut self) { - self.warning_filter.pop().unwrap(); - } - - fn is_filtered(&self, diag: &Diagnostic) -> bool { - self.warning_filter - .iter() - .rev() - .any(|filter| filter.is_filtered(diag)) - } - - fn filter_for_dependency(&self) -> bool { - self.warning_filter - .iter() - .rev() - .any(|filter| filter.for_dependency()) - } - pub fn known_filter_names(&self) -> impl IntoIterator + '_ { self.known_filters.keys().copied() } @@ -573,24 +487,19 @@ impl CompilationEnv { &self.flags } - pub fn visitors(&self) -> Rc { - self.visitors.clone() + pub fn visitors(&self) -> &Visitors { + &self.visitors } // Logs an error if the feature isn't supported. Returns `false` if the feature is not // supported, and `true` otherwise. - pub fn check_feature( - &mut self, - package: Option, - feature: FeatureGate, - loc: Loc, - ) -> bool { + pub fn check_feature(&self, package: Option, feature: FeatureGate, loc: Loc) -> bool { check_feature_or_error(self, self.package_config(package).edition, feature, loc) } // Returns an error string if if the feature isn't supported, or None otherwise. pub fn feature_edition_error_msg( - &mut self, + &self, feature: FeatureGate, package: Option, ) -> Option { @@ -619,15 +528,12 @@ impl CompilationEnv { ) } - pub fn set_primitive_type_definers( - &mut self, - m: BTreeMap, - ) { - self.prim_definers = m + pub fn set_primitive_type_definers(&self, m: BTreeMap) { + self.prim_definers.set(m).unwrap() } pub fn primitive_definer(&self, t: N::BuiltinTypeName_) -> Option<&E::ModuleIdent> { - self.prim_definers.get(&t) + self.prim_definers.get().and_then(|m| m.get(&t)) } pub fn save_parser_ast(&self, ast: &P::Program) { @@ -678,22 +584,34 @@ impl CompilationEnv { self.flags.ide_mode() } - pub fn extend_ide_info(&mut self, info: IDEInfo) { + pub fn extend_ide_info(&self, warning_filters: &WarningFiltersScope, info: IDEInfo) { if self.flags().ide_test_mode() { for entry in info.annotations.iter() { let diag = entry.clone().into(); - self.add_diag(diag); + self.add_diag(warning_filters, diag); } } - self.ide_information.extend(info); + self.ide_information.write().unwrap().extend(info); } - pub fn add_ide_annotation(&mut self, loc: Loc, info: IDEAnnotation) { + pub fn add_ide_annotation( + &self, + warning_filters: &WarningFiltersScope, + loc: Loc, + info: IDEAnnotation, + ) { if self.flags().ide_test_mode() { let diag = (loc, info.clone()).into(); - self.add_diag(diag); + self.add_diag(warning_filters, diag); } - self.ide_information.add_ide_annotation(loc, info); + self.ide_information + .write() + .unwrap() + .add_ide_annotation(loc, info); + } + + pub fn ide_information(&self) -> std::sync::RwLockReadGuard<'_, IDEInfo> { + self.ide_information.read().unwrap() } } @@ -974,6 +892,7 @@ fn check() {} fn check_all() { check::(); check::<&Visitors>(); + check::<&CompilationEnv>(); } //************************************************************************************************** @@ -981,7 +900,7 @@ fn check_all() { //************************************************************************************************** #[derive(Clone)] -pub struct SaveHook(Rc>); +pub struct SaveHook(Arc>); #[derive(Clone)] pub(crate) struct SavedInfo { @@ -1009,7 +928,7 @@ pub enum SaveFlag { impl SaveHook { pub fn new(flags: impl IntoIterator) -> Self { let flags = flags.into_iter().collect(); - Self(Rc::new(RefCell::new(SavedInfo { + Self(Arc::new(Mutex::new(SavedInfo { flags, parser: None, expansion: None, @@ -1022,56 +941,56 @@ impl SaveHook { } pub(crate) fn save_parser_ast(&self, ast: &P::Program) { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); if r.parser.is_none() && r.flags.contains(&SaveFlag::Parser) { r.parser = Some(ast.clone()) } } pub(crate) fn save_expansion_ast(&self, ast: &E::Program) { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); if r.expansion.is_none() && r.flags.contains(&SaveFlag::Expansion) { r.expansion = Some(ast.clone()) } } pub(crate) fn save_naming_ast(&self, ast: &N::Program) { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); if r.naming.is_none() && r.flags.contains(&SaveFlag::Naming) { r.naming = Some(ast.clone()) } } pub(crate) fn save_typing_ast(&self, ast: &T::Program) { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); if r.typing.is_none() && r.flags.contains(&SaveFlag::Typing) { r.typing = Some(ast.clone()) } } pub(crate) fn save_typing_info(&self, info: &Arc) { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); if r.typing_info.is_none() && r.flags.contains(&SaveFlag::TypingInfo) { r.typing_info = Some(info.clone()) } } pub(crate) fn save_hlir_ast(&self, ast: &H::Program) { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); if r.hlir.is_none() && r.flags.contains(&SaveFlag::HLIR) { r.hlir = Some(ast.clone()) } } pub(crate) fn save_cfgir_ast(&self, ast: &G::Program) { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); if r.cfgir.is_none() && r.flags.contains(&SaveFlag::CFGIR) { r.cfgir = Some(ast.clone()) } } pub fn take_parser_ast(&self) -> P::Program { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); assert!( r.flags.contains(&SaveFlag::Parser), "Parser AST not saved. Please set the flag when creating the SaveHook" @@ -1080,7 +999,7 @@ impl SaveHook { } pub fn take_expansion_ast(&self) -> E::Program { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); assert!( r.flags.contains(&SaveFlag::Expansion), "Expansion AST not saved. Please set the flag when creating the SaveHook" @@ -1089,7 +1008,7 @@ impl SaveHook { } pub fn take_naming_ast(&self) -> N::Program { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); assert!( r.flags.contains(&SaveFlag::Naming), "Naming AST not saved. Please set the flag when creating the SaveHook" @@ -1098,7 +1017,7 @@ impl SaveHook { } pub fn take_typing_ast(&self) -> T::Program { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); assert!( r.flags.contains(&SaveFlag::Typing), "Typing AST not saved. Please set the flag when creating the SaveHook" @@ -1107,7 +1026,7 @@ impl SaveHook { } pub fn take_typing_info(&self) -> Arc { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); assert!( r.flags.contains(&SaveFlag::TypingInfo), "Typing info not saved. Please set the flag when creating the SaveHook" @@ -1116,7 +1035,7 @@ impl SaveHook { } pub fn take_hlir_ast(&self) -> H::Program { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); assert!( r.flags.contains(&SaveFlag::HLIR), "HLIR AST not saved. Please set the flag when creating the SaveHook" @@ -1125,7 +1044,7 @@ impl SaveHook { } pub fn take_cfgir_ast(&self) -> G::Program { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); assert!( r.flags.contains(&SaveFlag::CFGIR), "CFGIR AST not saved. Please set the flag when creating the SaveHook" diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/id_leak.rs b/external-crates/move/crates/move-compiler/src/sui_mode/id_leak.rs index 2a67686ce4ba2..03d7d42a388c6 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/id_leak.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/id_leak.rs @@ -20,7 +20,7 @@ use crate::{ expansion::ast::{ModuleIdent, TargetKind}, hlir::ast::{self as H, Exp, Label, ModuleCall, SingleType, Type, Type_, Var}, parser::ast::Ability_, - shared::{program_info::TypingProgramInfo, CompilationEnv, Identifier}, + shared::{program_info::TypingProgramInfo, Identifier}, sui_mode::{OBJECT_NEW, TEST_SCENARIO_MODULE_NAME, TS_NEW_OBJECT}, }; use std::collections::BTreeMap; @@ -94,7 +94,6 @@ impl SimpleAbsIntConstructor for IDLeakVerifier { type AI<'a> = IDLeakVerifierAI<'a>; fn new<'a>( - env: &CompilationEnv, context: &'a CFGContext<'a>, cfg: &ImmForwardCFG, _init_state: &mut as SimpleAbsInt>::State, @@ -102,7 +101,7 @@ impl SimpleAbsIntConstructor for IDLeakVerifier { let module = &context.module; let minfo = context.info.module(module); let package_name = minfo.package; - let config = env.package_config(package_name); + let config = context.env.package_config(package_name); if config.flavor != Flavor::Sui { // Skip if not sui return None; diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/info.rs b/external-crates/move/crates/move-compiler/src/sui_mode/info.rs index 2bfaedeafef51..f4068b4c67048 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/info.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/info.rs @@ -10,6 +10,7 @@ use std::{ }; use crate::{ + diagnostics::warning_filters::WarningFilters, expansion::ast::{Fields, ModuleIdent}, naming::ast as N, parser::ast::{Ability_, DatatypeName, Field}, @@ -271,7 +272,7 @@ fn add_private_transfers( transferred: &'a mut BTreeMap<(ModuleIdent, DatatypeName), TransferKind>, } impl<'a> TypingVisitorContext for TransferVisitor<'a> { - fn add_warning_filter_scope(&mut self, _: crate::diagnostics::WarningFilters) { + fn push_warning_filter_scope(&mut self, _: WarningFilters) { unreachable!("no warning filters in function bodies") } @@ -303,6 +304,6 @@ fn add_private_transfers( let mut visitor = TransferVisitor { transferred }; match &fdef.body.value { T::FunctionBody_::Native | &T::FunctionBody_::Macro => (), - T::FunctionBody_::Defined(seq) => visitor.visit_seq(seq), + T::FunctionBody_::Defined(seq) => visitor.visit_seq(fdef.body.loc, seq), } } diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/coin_field.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/coin_field.rs index 2af05f7f6e247..1553e71292c2c 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/coin_field.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/coin_field.rs @@ -7,12 +7,11 @@ use crate::{ diag, diagnostics::codes::{custom, DiagnosticInfo, Severity}, + expansion::ast::ModuleIdent, naming::ast as N, - shared::CompilationEnv, - typing::{ast as T, visitor::TypingVisitor}, + parser::ast::DatatypeName, + typing::{ast as T, visitor::simple_visitor}, }; -use move_ir_types::location::Loc; -use move_symbol_pool::Symbol; use super::{ LinterDiagnosticCategory, LinterDiagnosticCode, COIN_MOD_NAME, COIN_STRUCT_NAME, @@ -27,40 +26,35 @@ const COIN_FIELD_DIAG: DiagnosticInfo = custom( "sub-optimal 'sui::coin::Coin' field type", ); -pub struct CoinFieldVisitor; - -impl TypingVisitor for CoinFieldVisitor { - fn visit(&self, env: &mut CompilationEnv, program: &T::Program) { - for (_, _, mdef) in program.modules.iter() { - if mdef.attributes.is_test_or_test_only() { - continue; - } - env.add_warning_filter_scope(mdef.warning_filter.clone()); - mdef.structs - .iter() - .filter(|(_, _, sdef)| !sdef.attributes.is_test_or_test_only()) - .for_each(|(sloc, sname, sdef)| struct_def(env, *sname, sdef, sloc)); - env.pop_warning_filter_scope(); +simple_visitor!( + CoinFieldVisitor, + fn visit_module_custom(&mut self, _ident: ModuleIdent, mdef: &T::ModuleDefinition) -> bool { + // skip if test only + mdef.attributes.is_test_or_test_only() + }, + // TODO enums + fn visit_struct_custom( + &mut self, + _module: ModuleIdent, + _sname: DatatypeName, + sdef: &N::StructDefinition, + ) -> bool { + if sdef.attributes.is_test_or_test_only() { + return false; } - } -} -fn struct_def(env: &mut CompilationEnv, sname: Symbol, sdef: &N::StructDefinition, sloc: Loc) { - env.add_warning_filter_scope(sdef.warning_filter.clone()); - - if let N::StructFields::Defined(_, sfields) = &sdef.fields { - for (floc, fname, (_, ftype)) in sfields.iter() { - if is_field_coin_type(ftype) { - let msg = format!("The field '{fname}' of '{sname}' has type 'sui::coin::Coin'"); - let uid_msg = "Storing 'sui::balance::Balance' in this field will typically be more space-efficient"; - let d = diag!(COIN_FIELD_DIAG, (sloc, msg), (floc, uid_msg)); - env.add_diag(d); + if let N::StructFields::Defined(_, sfields) = &sdef.fields { + for (_floc, _fname, (_, ftype)) in sfields { + if is_field_coin_type(ftype) { + let msg = "Sub-optimal 'sui::coin::Coin' field type. Using \ + 'sui::balance::Balance' instead will be more space efficient"; + self.add_diag(diag!(COIN_FIELD_DIAG, (ftype.loc, msg))); + } } } + false } - - env.pop_warning_filter_scope(); -} +); fn is_field_coin_type(sp!(_, t): &N::Type) -> bool { use N::Type_ as T; diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/collection_equality.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/collection_equality.rs index ab61b891ca6b3..083cdb8b5831a 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/collection_equality.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/collection_equality.rs @@ -7,17 +7,11 @@ use crate::{ diag, - diagnostics::{ - codes::{custom, DiagnosticInfo, Severity}, - WarningFilters, - }, + diagnostics::codes::{custom, DiagnosticInfo, Severity}, naming::ast as N, parser::ast as P, - shared::{CompilationEnv, Identifier}, - typing::{ - ast as T, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, - }, + shared::Identifier, + typing::{ast as T, visitor::simple_visitor}, }; use super::{ @@ -55,20 +49,8 @@ const COLLECTION_TYPES: &[(&str, &str, &str)] = &[ (SUI_PKG_NAME, VEC_SET_MOD_NAME, VEC_SET_STRUCT_NAME), ]; -pub struct CollectionEqualityVisitor; -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for CollectionEqualityVisitor { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { +simple_visitor!( + CollectionEqualityVisitor, fn visit_exp_custom(&mut self, exp: &T::Exp) -> bool { use T::UnannotatedExp_ as E; if let E::BinopExp(_, op, t, _) = &exp.exp.value { @@ -98,18 +80,10 @@ impl TypingVisitorContext for Context<'_> { format!("Equality for collections of type '{caddr}::{cmodule}::{cname}' IS NOT a structural check based on content"); let mut d = diag!(COLLECTIONS_EQUALITY_DIAG, (op.loc, msg),); d.add_note(note_msg); - self.env.add_diag(d); + self.add_diag(d); return true; } } false } - - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } -} +); diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/custom_state_change.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/custom_state_change.rs index 085135f676b0d..f0e542d5a50af 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/custom_state_change.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/custom_state_change.rs @@ -29,7 +29,7 @@ use crate::{ BaseType_, Label, ModuleCall, SingleType, SingleType_, Type, TypeName_, Type_, Var, }, parser::ast::Ability_, - shared::{CompilationEnv, Identifier}, + shared::Identifier, }; use std::collections::BTreeMap; @@ -87,7 +87,6 @@ impl SimpleAbsIntConstructor for CustomStateChangeVerifier { type AI<'a> = CustomStateChangeVerifierAI; fn new<'a>( - _env: &CompilationEnv, context: &'a CFGContext<'a>, cfg: &ImmForwardCFG, init_state: &mut State, diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/freeze_wrapped.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/freeze_wrapped.rs index 717355f78c823..705a09750291d 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/freeze_wrapped.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/freeze_wrapped.rs @@ -11,7 +11,8 @@ use crate::{ diag, diagnostics::{ codes::{custom, DiagnosticInfo, Severity}, - WarningFilters, + warning_filters::{WarningFilters, WarningFiltersScope}, + Diagnostic, Diagnostics, }, expansion::ast as E, naming::ast as N, @@ -74,7 +75,8 @@ type WrappingFields = pub struct FreezeWrappedVisitor; pub struct Context<'a> { - env: &'a mut CompilationEnv, + env: &'a CompilationEnv, + warning_filters_scope: WarningFiltersScope, program_info: Arc, /// Memoizes information about struct fields wrapping other objects as they are discovered wrapping_fields: WrappingFields, @@ -83,15 +85,28 @@ pub struct Context<'a> { impl TypingVisitorConstructor for FreezeWrappedVisitor { type Context<'a> = Context<'a>; - fn context<'a>(env: &'a mut CompilationEnv, program: &T::Program) -> Self::Context<'a> { + fn context<'a>(env: &'a CompilationEnv, program: &T::Program) -> Self::Context<'a> { + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); Context { env, + warning_filters_scope, program_info: program.info.clone(), wrapping_fields: WrappingFields::new(), } } } +impl Context<'_> { + fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } +} + impl<'a> TypingVisitorContext for Context<'a> { fn visit_module_custom(&mut self, _ident: E::ModuleIdent, mdef: &T::ModuleDefinition) -> bool { // skips if true @@ -128,7 +143,7 @@ impl<'a> TypingVisitorContext for Context<'a> { }; if let Some(wrapping_field_info) = self.find_wrapping_field_loc(mident, sname) { add_diag( - self.env, + self, fun.arguments.exp.loc, sname.value(), wrapping_field_info, @@ -140,12 +155,12 @@ impl<'a> TypingVisitorContext for Context<'a> { false } - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) + fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) } fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() + self.warning_filters_scope.pop() } } @@ -233,7 +248,7 @@ impl<'a> Context<'a> { } fn add_diag( - env: &mut CompilationEnv, + context: &mut Context, freeze_arg_loc: Loc, frozen_struct_name: Symbol, info: WrappingFieldInfo, @@ -261,5 +276,5 @@ fn add_diag( if !direct { d.add_secondary_label((wrapped_tloc, "Indirectly wrapped object is of this type")); } - env.add_diag(d); + context.add_diag(d); } diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/freezing_capability.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/freezing_capability.rs index 3f77fa2417a19..34e9deb90d126 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/freezing_capability.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/freezing_capability.rs @@ -7,17 +7,11 @@ use super::{LinterDiagnosticCategory, LinterDiagnosticCode, LINT_WARNING_PREFIX}; use crate::{ diag, - diagnostics::{ - codes::{custom, DiagnosticInfo, Severity}, - WarningFilters, - }, + diagnostics::codes::{custom, DiagnosticInfo, Severity}, naming::ast::TypeName_, - shared::{CompilationEnv, Identifier}, + shared::Identifier, sui_mode::linters::{FREEZE_FUN, PUBLIC_FREEZE_FUN, SUI_PKG_NAME, TRANSFER_MOD_NAME}, - typing::{ - ast as T, core, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, - }, + typing::{ast as T, core, visitor::simple_visitor}, }; use move_ir_types::location::*; use once_cell::sync::Lazy; @@ -36,22 +30,10 @@ const FREEZE_FUNCTIONS: &[(&str, &str, &str)] = &[ (SUI_PKG_NAME, TRANSFER_MOD_NAME, FREEZE_FUN), ]; -pub struct WarnFreezeCapability; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - static REGEX: Lazy = Lazy::new(|| Regex::new(r".*Cap(?:[A-Z0-9_]+|ability|$).*").unwrap()); -impl TypingVisitorConstructor for WarnFreezeCapability { - type Context<'a> = Context<'a>; - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl<'a> TypingVisitorContext for Context<'a> { +simple_visitor!( + WarnFreezeCapability, fn visit_module_custom( &mut self, _ident: crate::expansion::ast::ModuleIdent, @@ -59,8 +41,7 @@ impl<'a> TypingVisitorContext for Context<'a> { ) -> bool { // skips if true mdef.attributes.is_test_or_test_only() - } - + }, fn visit_function_custom( &mut self, _module: crate::expansion::ast::ModuleIdent, @@ -69,8 +50,7 @@ impl<'a> TypingVisitorContext for Context<'a> { ) -> bool { // skips if true fdef.attributes.is_test_or_test_only() - } - + }, fn visit_exp_custom(&mut self, exp: &T::Exp) -> bool { if let T::UnannotatedExp_::ModuleCall(fun) = &exp.exp.value { if is_freeze_function(fun) { @@ -79,15 +59,7 @@ impl<'a> TypingVisitorContext for Context<'a> { } false } - - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } -} +); fn is_freeze_function(fun: &T::ModuleCall) -> bool { FREEZE_FUNCTIONS.iter().any(|(addr, module, fname)| { @@ -110,7 +82,7 @@ fn check_type_arguments(context: &mut Context, fun: &T::ModuleCall, loc: Loc) { "Freezing a capability might lock out critical operations \ or otherwise open access to operations that otherwise should be restricted", ); - context.env.add_diag(diag); + context.add_diag(diag); }; } } diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/missing_key.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/missing_key.rs index fae0bb94446db..8289ee38f7cc2 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/missing_key.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/missing_key.rs @@ -8,17 +8,10 @@ use crate::expansion::ast::ModuleIdent; use crate::parser::ast::DatatypeName; use crate::{ diag, - diagnostics::{ - codes::{custom, DiagnosticInfo, Severity}, - WarningFilters, - }, + diagnostics::codes::{custom, DiagnosticInfo, Severity}, naming::ast::{StructDefinition, StructFields}, parser::ast::Ability_, - shared::CompilationEnv, - typing::{ - ast as T, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, - }, + typing::visitor::simple_visitor, }; const MISSING_KEY_ABILITY_DIAG: DiagnosticInfo = custom( @@ -29,28 +22,8 @@ const MISSING_KEY_ABILITY_DIAG: DiagnosticInfo = custom( "struct with id but missing key ability", ); -pub struct MissingKeyVisitor; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} -impl TypingVisitorConstructor for MissingKeyVisitor { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + MissingKeyVisitor, fn visit_struct_custom( &mut self, _module: ModuleIdent, @@ -61,11 +34,11 @@ impl TypingVisitorContext for Context<'_> { let uid_msg = "Struct's first field has an 'id' field of type 'sui::object::UID' but is missing the 'key' ability."; let diagnostic = diag!(MISSING_KEY_ABILITY_DIAG, (sdef.loc, uid_msg)); - self.env.add_diag(diagnostic); + self.add_diag(diagnostic); } false } -} +); fn first_field_has_id_field_of_type_uid(sdef: &StructDefinition) -> bool { match &sdef.fields { diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/mod.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/mod.rs index d8e21d6bb3167..d308381283f1e 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/mod.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/mod.rs @@ -4,7 +4,7 @@ use crate::{ cfgir::visitor::AbstractInterpreterVisitor, command_line::compiler::Visitor, - diagnostics::codes::WarningFilter, + diagnostics::warning_filters::WarningFilter, expansion::ast as E, hlir::ast::{BaseType_, SingleType, SingleType_}, linters::{LintLevel, LinterDiagnosticCategory, ALLOW_ATTR_CATEGORY, LINT_WARNING_PREFIX}, diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_mut_tx_context.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_mut_tx_context.rs index b150e5deb8d0f..80c11583af028 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_mut_tx_context.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_mut_tx_context.rs @@ -4,23 +4,16 @@ //! Enforces that public functions use `&mut TxContext` instead of `&TxContext` to ensure upgradability. //! Detects and reports instances where a non-mutable reference to `TxContext` is used in public function signatures. //! Promotes best practices for future-proofing smart contract code by allowing mutation of the transaction context. -use super::{LinterDiagnosticCategory, LinterDiagnosticCode, LINT_WARNING_PREFIX}; +use super::{LinterDiagnosticCategory, LinterDiagnosticCode, LINT_WARNING_PREFIX}; use crate::{ diag, - diagnostics::{ - codes::{custom, DiagnosticInfo, Severity}, - WarningFilters, - }, + diagnostics::codes::{custom, DiagnosticInfo, Severity}, expansion::ast::{ModuleIdent, Visibility}, naming::ast::Type_, parser::ast::FunctionName, - shared::CompilationEnv, sui_mode::{SUI_ADDR_NAME, TX_CONTEXT_MODULE_NAME, TX_CONTEXT_TYPE_NAME}, - typing::{ - ast as T, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, - }, + typing::{ast as T, visitor::simple_visitor}, }; use move_ir_types::location::Loc; @@ -32,33 +25,12 @@ const REQUIRE_MUTABLE_TX_CONTEXT_DIAG: DiagnosticInfo = custom( "prefer '&mut TxContext' over '&TxContext'", ); -pub struct PreferMutableTxContext; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for PreferMutableTxContext { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + PreferMutableTxContext, fn visit_module_custom(&mut self, ident: ModuleIdent, _mdef: &T::ModuleDefinition) -> bool { // skip if in 'sui::tx_context' ident.value.is(SUI_ADDR_NAME, TX_CONTEXT_MODULE_NAME) - } - + }, fn visit_function_custom( &mut self, _module: ModuleIdent, @@ -74,15 +46,15 @@ impl TypingVisitorContext for Context<'_> { param_ty_, Type_::Ref(false, t) if t.value.is(SUI_ADDR_NAME, TX_CONTEXT_MODULE_NAME, TX_CONTEXT_TYPE_NAME), ) { - report_non_mutable_tx_context(self.env, *loc); + report_non_mutable_tx_context(self, *loc); } } false } -} +); -fn report_non_mutable_tx_context(env: &mut CompilationEnv, loc: Loc) { +fn report_non_mutable_tx_context(context: &mut Context, loc: Loc) { let msg = format!( "'public' functions should prefer '&mut {0}' over '&{0}' for better upgradability.", TX_CONTEXT_TYPE_NAME @@ -93,5 +65,5 @@ fn report_non_mutable_tx_context(env: &mut CompilationEnv, loc: Loc) { of '&TxContext'. As such, it is recommended to consider using '&mut TxContext' to \ future-proof the function.", ); - env.add_diag(diag); + context.add_diag(diag); } diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_random.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_random.rs index 59cb91685f371..5f7df11f22137 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_random.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_random.rs @@ -3,17 +3,15 @@ //! This analysis flags uses of random::Random and random::RandomGenerator in public functions. -use crate::diagnostics::WarningFilters; use crate::expansion::ast::ModuleIdent; use crate::parser::ast::FunctionName; use crate::sui_mode::SUI_ADDR_NAME; -use crate::typing::visitor::{TypingVisitorConstructor, TypingVisitorContext}; +use crate::typing::visitor::simple_visitor; use crate::{ diag, diagnostics::codes::{custom, DiagnosticInfo, Severity}, expansion::ast::Visibility, naming::ast as N, - shared::CompilationEnv, typing::ast as T, }; @@ -30,33 +28,12 @@ const PUBLIC_RANDOM_DIAG: DiagnosticInfo = custom( "Risky use of 'sui::random'", ); -pub struct PublicRandomVisitor; -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for PublicRandomVisitor { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + PublicRandomVisitor, fn visit_module_custom(&mut self, ident: ModuleIdent, mdef: &T::ModuleDefinition) -> bool { // skips if true mdef.attributes.is_test_or_test_only() || ident.value.address.is(SUI_ADDR_NAME) - } - + }, fn visit_function_custom( &mut self, _module: ModuleIdent, @@ -78,12 +55,12 @@ impl TypingVisitorContext for Context<'_> { SUI_PKG_NAME, RANDOM_MOD_NAME, struct_name); d.add_note(note); d.add_note("Non-public functions are preferred"); - self.env.add_diag(d); + self.add_diag(d); } } true } -} +); fn is_random_or_random_generator(sp!(_, t): &N::Type) -> Option<&str> { use N::Type_ as T; diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/self_transfer.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/self_transfer.rs index 84f8ca01f41e7..45ab199583213 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/self_transfer.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/self_transfer.rs @@ -23,7 +23,6 @@ use crate::{ }, hlir::ast::{Label, ModuleCall, Type, Type_, Var}, parser::ast::Ability_, - shared::CompilationEnv, }; use std::collections::BTreeMap; @@ -80,7 +79,6 @@ impl SimpleAbsIntConstructor for SelfTransferVerifier { type AI<'a> = SelfTransferVerifierAI; fn new<'a>( - _env: &CompilationEnv, context: &'a CFGContext<'a>, cfg: &ImmForwardCFG, _init_state: &mut as SimpleAbsInt>::State, diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/share_owned.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/share_owned.rs index a3e44f87a3a2d..86003cd647ca6 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/share_owned.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/share_owned.rs @@ -32,7 +32,7 @@ use crate::{ parser::ast::{Ability_, DatatypeName}, shared::{ program_info::{DatatypeKind, TypingProgramInfo}, - CompilationEnv, Identifier, + Identifier, }, sui_mode::{ info::{SuiInfo, TransferKind}, @@ -96,7 +96,6 @@ impl SimpleAbsIntConstructor for ShareOwnedVerifier { type AI<'a> = ShareOwnedVerifierAI<'a>; fn new<'a>( - _env: &CompilationEnv, context: &'a CFGContext<'a>, cfg: &ImmForwardCFG, _init_state: &mut as SimpleAbsInt>::State, diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/typing.rs b/external-crates/move/crates/move-compiler/src/sui_mode/typing.rs index b895758f2602f..a15563e19050d 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/typing.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/typing.rs @@ -8,7 +8,10 @@ use move_symbol_pool::Symbol; use crate::{ diag, - diagnostics::{Diagnostic, WarningFilters}, + diagnostics::{ + warning_filters::{WarningFilters, WarningFiltersScope}, + Diagnostic, Diagnostics, + }, editions::Flavor, expansion::ast::{AbilitySet, Fields, ModuleIdent, Mutability, TargetKind, Visibility}, naming::ast::{ @@ -32,7 +35,7 @@ pub struct SuiTypeChecks; impl TypingVisitorConstructor for SuiTypeChecks { type Context<'a> = Context<'a>; - fn context<'a>(env: &'a mut CompilationEnv, program: &T::Program) -> Self::Context<'a> { + fn context<'a>(env: &'a CompilationEnv, program: &T::Program) -> Self::Context<'a> { Context::new(env, program.info.clone()) } } @@ -43,7 +46,8 @@ impl TypingVisitorConstructor for SuiTypeChecks { #[allow(unused)] pub struct Context<'a> { - env: &'a mut CompilationEnv, + env: &'a CompilationEnv, + warning_filters_scope: WarningFiltersScope, info: Arc, sui_transfer_ident: Option, current_module: Option, @@ -53,14 +57,16 @@ pub struct Context<'a> { } impl<'a> Context<'a> { - fn new(env: &'a mut CompilationEnv, info: Arc) -> Self { + fn new(env: &'a CompilationEnv, info: Arc) -> Self { let sui_module_ident = info .modules .key_cloned_iter() .find(|(m, _)| m.value.is(SUI_ADDR_NAME, TRANSFER_MODULE_NAME)) .map(|(m, _)| m); + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); Context { env, + warning_filters_scope, info, sui_transfer_ident: sui_module_ident, current_module: None, @@ -70,6 +76,15 @@ impl<'a> Context<'a> { } } + fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + fn set_module(&mut self, current_module: ModuleIdent) { self.current_module = Some(current_module); self.otw_name = Some(Symbol::from( @@ -98,12 +113,12 @@ const OTW_NOTE: &str = "One-time witness types are structs with the following re //************************************************************************************************** impl<'a> TypingVisitorContext for Context<'a> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) + fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) } fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() + self.warning_filters_scope.pop() } fn visit_module_custom(&mut self, ident: ModuleIdent, mdef: &T::ModuleDefinition) -> bool { @@ -205,9 +220,7 @@ fn struct_def(context: &mut Context, name: DatatypeName, sdef: &N::StructDefinit }; if let Some(loc) = invalid_first_field { // no fields or an invalid 'id' field - context - .env - .add_diag(invalid_object_id_field_diag(key_loc, loc, name)); + context.add_diag(invalid_object_id_field_diag(key_loc, loc, name)); return; }; @@ -223,7 +236,7 @@ fn struct_def(context: &mut Context, name: DatatypeName, sdef: &N::StructDefinit ); let mut diag = invalid_object_id_field_diag(key_loc, *id_field_loc, name); diag.add_secondary_label((id_field_type.loc, actual)); - context.env.add_diag(diag); + context.add_diag(diag); } } @@ -261,7 +274,7 @@ fn enum_def(context: &mut Context, name: DatatypeName, edef: &N::EnumDefinition) let msg = format!("Invalid object '{name}'"); let key_msg = format!("Enums cannot have the '{}' ability.", Ability_::Key); let diag = diag!(OBJECT_DECL_DIAG, (name.loc(), msg), (key_loc, key_msg)); - context.env.add_diag(diag); + context.add_diag(diag); }; } @@ -293,7 +306,7 @@ fn function(context: &mut Context, name: FunctionName, fdef: &T::Function) { entry_signature(context, *entry_loc, name, signature); } if let sp!(_, T::FunctionBody_::Defined(seq)) = body { - context.visit_seq(seq) + context.visit_seq(body.loc, seq) } context.in_test = prev_in_test; } @@ -309,17 +322,16 @@ fn init_visibility( entry: Option, ) { match visibility { - Visibility::Public(loc) | Visibility::Friend(loc) | Visibility::Package(loc) => { - context.env.add_diag(diag!( + Visibility::Public(loc) | Visibility::Friend(loc) | Visibility::Package(loc) => context + .add_diag(diag!( INIT_FUN_DIAG, (name.loc(), "Invalid 'init' function declaration"), (loc, "'init' functions must be internal to their module"), - )) - } + )), Visibility::Internal => (), } if let Some(entry) = entry { - context.env.add_diag(diag!( + context.add_diag(diag!( INIT_FUN_DIAG, (name.loc(), "Invalid 'init' function declaration"), (entry, "'init' functions cannot be 'entry' functions"), @@ -335,7 +347,7 @@ fn init_signature(context: &mut Context, name: FunctionName, signature: &Functio } = signature; if !type_parameters.is_empty() { let tp_loc = type_parameters[0].user_specified_name.loc; - context.env.add_diag(diag!( + context.add_diag(diag!( INIT_FUN_DIAG, (name.loc(), "Invalid 'init' function declaration"), (tp_loc, "'init' functions cannot have type parameters"), @@ -346,7 +358,7 @@ fn init_signature(context: &mut Context, name: FunctionName, signature: &Functio "'init' functions must have a return type of {}", error_format_(&Type_::Unit, &Subst::empty()) ); - context.env.add_diag(diag!( + context.add_diag(diag!( INIT_FUN_DIAG, (name.loc(), "Invalid 'init' function declaration"), (return_type.loc, msg), @@ -368,7 +380,7 @@ fn init_signature(context: &mut Context, name: FunctionName, signature: &Functio m = TX_CONTEXT_MODULE_NAME, t = TX_CONTEXT_TYPE_NAME, ); - context.env.add_diag(diag!( + context.add_diag(diag!( INIT_FUN_DIAG, (name.loc(), "Invalid 'init' function declaration"), (last_loc, msg), @@ -397,7 +409,7 @@ fn init_signature(context: &mut Context, name: FunctionName, signature: &Functio (otw_loc, otw_msg), ); diag.add_note(OTW_NOTE); - context.env.add_diag(diag) + context.add_diag(diag) } else if parameters.len() > 1 { // if there is more than one parameter, the first must be the OTW let (_, first_var, first_ty) = parameters.first().unwrap(); @@ -421,7 +433,7 @@ fn init_signature(context: &mut Context, name: FunctionName, signature: &Functio (first_ty.loc, msg) ); diag.add_note(OTW_NOTE); - context.env.add_diag(diag) + context.add_diag(diag) } else if let Some(sdef) = info .module(context.current_module()) .structs @@ -439,7 +451,7 @@ fn init_signature(context: &mut Context, name: FunctionName, signature: &Functio if parameters.len() > 2 { // no init function can take more than 2 parameters (the OTW and the TxContext) let (_, third_var, _) = ¶meters[2]; - context.env.add_diag(diag!( + context.add_diag(diag!( INIT_FUN_DIAG, (name.loc(), "Invalid 'init' function declaration"), ( @@ -474,7 +486,7 @@ fn check_otw_type( let mut valid = true; if let Some(tp) = sdef.type_parameters.first() { let msg = "One-time witness types cannot have type parameters"; - context.env.add_diag(otw_diag(diag!( + context.add_diag(otw_diag(diag!( OTW_DECL_DIAG, (name.loc(), "Invalid one-time witness declaration"), (tp.param.user_specified_name.loc, msg), @@ -496,7 +508,7 @@ fn check_otw_type( (loc, format!("Found more than one field. {msg_base}")) } }; - context.env.add_diag(otw_diag(diag!( + context.add_diag(otw_diag(diag!( OTW_DECL_DIAG, (name.loc(), "Invalid one-time witness declaration"), (invalid_loc, invalid_msg), @@ -527,7 +539,7 @@ fn check_otw_type( "One-time witness types can only have the have the '{}' ability", Ability_::Drop ); - context.env.add_diag(otw_diag(diag!( + context.add_diag(otw_diag(diag!( OTW_DECL_DIAG, (name.loc(), "Invalid one-time witness declaration"), (loc, msg), @@ -691,7 +703,7 @@ fn entry_param_ty( .to_owned() }; let emsg = format!("'{name}' was declared 'entry' here"); - context.env.add_diag(diag!( + context.add_diag(diag!( ENTRY_FUN_SIGNATURE_DIAG, (param.loc, pmsg), (param_ty.loc, tmsg), @@ -843,7 +855,7 @@ fn entry_return( Type_::Ref(_, _) => { let fmsg = format!("Invalid return type for entry function '{}'", name); let tmsg = "Expected a non-reference type"; - context.env.add_diag(diag!( + context.add_diag(diag!( ENTRY_FUN_SIGNATURE_DIAG, (entry_loc, fmsg), (*tloc, tmsg) @@ -917,7 +929,7 @@ fn invalid_entry_return_ty<'a>( declared_abilities, ty_args, ); - context.env.add_diag(diag) + context.add_diag(diag) } //************************************************************************************************** @@ -941,7 +953,7 @@ fn exp(context: &mut Context, e: &T::Exp) { consider extracting the logic into a new function and \ calling that instead.", ); - context.env.add_diag(diag) + context.add_diag(diag) } if module.value.is(SUI_ADDR_NAME, EVENT_MODULE_NAME) && name.value() == EVENT_FUNCTION_NAME @@ -965,7 +977,7 @@ fn exp(context: &mut Context, e: &T::Exp) { cannot be created manually, but are passed as an argument 'init'"; let mut diag = diag!(OTW_USAGE_DIAG, (e.exp.loc, msg)); diag.add_note(OTW_NOTE); - context.env.add_diag(diag) + context.add_diag(diag) } } _ => (), @@ -1005,7 +1017,7 @@ fn check_event_emit(context: &mut Context, loc: Loc, mcall: &ModuleCall) { "The type {} is not declared in the current module", error_format(first_ty, &Subst::empty()), ); - context.env.add_diag(diag!( + context.add_diag(diag!( EVENT_EMIT_CALL_DIAG, (loc, msg), (first_ty.loc, ty_msg) @@ -1083,6 +1095,6 @@ fn check_private_transfer(context: &mut Context, loc: Loc, mcall: &ModuleCall) { ); diag.add_secondary_label((store_loc, store_msg)) } - context.env.add_diag(diag) + context.add_diag(diag) } } diff --git a/external-crates/move/crates/move-compiler/src/to_bytecode/context.rs b/external-crates/move/crates/move-compiler/src/to_bytecode/context.rs index 627c30fa3049c..cd99815152253 100644 --- a/external-crates/move/crates/move-compiler/src/to_bytecode/context.rs +++ b/external-crates/move/crates/move-compiler/src/to_bytecode/context.rs @@ -27,7 +27,7 @@ pub type DatatypeDeclarations = /// Compilation context for a single compilation unit (module). /// Contains all of the dependencies actually used in the module pub struct Context<'a> { - pub env: &'a mut CompilationEnv, + pub env: &'a CompilationEnv, current_package: Option, current_module: Option<&'a ModuleIdent>, seen_datatypes: BTreeSet<(ModuleIdent, DatatypeName)>, @@ -36,7 +36,7 @@ pub struct Context<'a> { impl<'a> Context<'a> { pub fn new( - env: &'a mut CompilationEnv, + env: &'a CompilationEnv, current_package: Option, current_module: Option<&'a ModuleIdent>, ) -> Self { diff --git a/external-crates/move/crates/move-compiler/src/to_bytecode/translate.rs b/external-crates/move/crates/move-compiler/src/to_bytecode/translate.rs index bb1ba8ab8727b..d024c40bf6b03 100644 --- a/external-crates/move/crates/move-compiler/src/to_bytecode/translate.rs +++ b/external-crates/move/crates/move-compiler/src/to_bytecode/translate.rs @@ -38,7 +38,7 @@ type CollectedInfos = UniqueMap; type CollectedInfo = (Vec<(Mutability, Var, H::SingleType)>, Attributes); fn extract_decls( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: &G::Program, ) -> ( @@ -127,7 +127,7 @@ fn extract_decls( //************************************************************************************************** pub fn program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: G::Program, ) -> Vec { @@ -153,7 +153,7 @@ pub fn program( } fn module( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, ident: ModuleIdent, mdef: G::ModuleDefinition, dependency_orderings: &HashMap, @@ -227,7 +227,7 @@ fn module( match move_ir_to_bytecode::compiler::compile_module(ir_module, deps) { Ok(res) => res, Err(e) => { - compilation_env.add_diag(diag!( + compilation_env.add_error_diag(diag!( Bug::BytecodeGeneration, (ident_loc, format!("IR ERROR: {}", e)) )); diff --git a/external-crates/move/crates/move-compiler/src/typing/ast.rs b/external-crates/move/crates/move-compiler/src/typing/ast.rs index d847ba757f9ca..5bc529e4b73eb 100644 --- a/external-crates/move/crates/move-compiler/src/typing/ast.rs +++ b/external-crates/move/crates/move-compiler/src/typing/ast.rs @@ -3,23 +3,19 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - debug_display, - diagnostics::WarningFilters, + diagnostics::warning_filters::WarningFilters, expansion::ast::{ Address, Attributes, Fields, Friend, ModuleIdent, Mutability, TargetKind, Value, Visibility, }, - ice, naming::ast::{ BlockLabel, EnumDefinition, FunctionSignature, Neighbor, StructDefinition, SyntaxMethods, - Type, TypeName_, Type_, UseFuns, Var, + Type, Type_, UseFuns, Var, }, parser::ast::{ BinOp, ConstantName, DatatypeName, Field, FunctionName, UnaryOp, VariantName, ENTRY_MODIFIER, MACRO_MODIFIER, NATIVE_MODIFIER, }, - shared::{ - ast_debug::*, program_info::TypingProgramInfo, unique_map::UniqueMap, CompilationEnv, Name, - }, + shared::{ast_debug::*, program_info::TypingProgramInfo, unique_map::UniqueMap, Name}, }; use move_ir_types::location::*; use move_symbol_pool::Symbol; @@ -191,7 +187,7 @@ pub enum UnannotatedExp_ { Builtin(Box, Box), Vector(Loc, usize, Box, Box), - IfElse(Box, Box, Box), + IfElse(Box, Box, Option>), Match(Box, Spanned>), VariantMatch( Box, @@ -361,20 +357,6 @@ pub fn single_item(e: Exp) -> ExpListItem { ExpListItem::Single(e, ty) } -pub fn splat_item(env: &mut CompilationEnv, splat_loc: Loc, e: Exp) -> ExpListItem { - let ss = match &e.ty { - sp!(_, Type_::Unit) => vec![], - sp!(_, Type_::Apply(_, sp!(_, TypeName_::Multiple(_)), ss)) => ss.clone(), - _ => { - let mut diag = ice!((splat_loc, "ICE called `splat_item` on a non-list type")); - diag.add_note(format!("Expression: {}", debug_display!(e))); - env.add_diag(diag); - vec![] - } - }; - ExpListItem::Splat(splat_loc, e, ss) -} - pub fn pat(ty: Type, pat: UnannotatedPat) -> MatchPattern { MatchPattern { ty, pat } } @@ -683,13 +665,15 @@ impl AstDebug for UnannotatedExp_ { }); w.write("}"); } - E::IfElse(b, t, f) => { + E::IfElse(b, t, f_opt) => { w.write("if ("); b.ast_debug(w); w.write(") "); t.ast_debug(w); - w.write(" else "); - f.ast_debug(w); + if let Some(f) = f_opt { + w.write(" else "); + f.ast_debug(w); + } } E::Match(esubject, arms) => { w.write("match ("); diff --git a/external-crates/move/crates/move-compiler/src/typing/core.rs b/external-crates/move/crates/move-compiler/src/typing/core.rs index 8c99ff9e66074..67f52ac8c3b67 100644 --- a/external-crates/move/crates/move-compiler/src/typing/core.rs +++ b/external-crates/move/crates/move-compiler/src/typing/core.rs @@ -6,7 +6,8 @@ use crate::{ debug_display, diag, diagnostics::{ codes::{NameResolution, TypeSafety}, - Diagnostic, + warning_filters::{WarningFilters, WarningFiltersScope}, + Diagnostic, Diagnostics, }, editions::FeatureGate, expansion::ast::{AbilitySet, ModuleIdent, ModuleIdent_, Mutability, Visibility}, @@ -91,7 +92,8 @@ pub(super) struct TypingDebugFlags { pub struct Context<'env> { pub modules: NamingProgramInfo, macros: UniqueMap>, - pub env: &'env mut CompilationEnv, + pub env: &'env CompilationEnv, + warning_filters_scope: WarningFiltersScope, pub(super) debug: TypingDebugFlags, deprecations: Deprecations, @@ -179,7 +181,7 @@ impl UseFunsScope { impl<'env> Context<'env> { pub fn new( - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, _pre_compiled_lib: Option>, info: NamingProgramInfo, ) -> Self { @@ -191,6 +193,7 @@ impl<'env> Context<'env> { function_translation: false, type_elaboration: false, }; + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); Context { use_funs: vec![global_use_funs], subst: Subst::empty(), @@ -206,6 +209,7 @@ impl<'env> Context<'env> { macros: UniqueMap::new(), named_block_map: BTreeMap::new(), env, + warning_filters_scope, debug, next_match_var_id: 0, new_friends: BTreeSet::new(), @@ -217,6 +221,31 @@ impl<'env> Context<'env> { } } + pub fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + pub fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + + pub fn extend_ide_info(&self, info: IDEInfo) { + self.env.extend_ide_info(&self.warning_filters_scope, info); + } + + pub fn add_ide_annotation(&self, loc: Loc, info: IDEAnnotation) { + self.env + .add_ide_annotation(&self.warning_filters_scope, loc, info); + } + + pub fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) + } + + pub fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } + pub fn set_macros( &mut self, macros: UniqueMap>, @@ -266,7 +295,7 @@ impl<'env> Context<'env> { let (target_m, target_f) = &use_fun.target_function; let msg = format!("{case} method alias '{tn}.{method}' for '{target_m}::{target_f}'"); - self.env.add_diag(diag!( + self.add_diag(diag!( Declarations::DuplicateAlias, (use_fun.loc, msg), (prev_loc, "The same alias was previously declared here") @@ -306,18 +335,18 @@ impl<'env> Context<'env> { UseFunKind::Explicit => { let msg = format!("Unused 'use fun' of '{tn}.{method}'. Consider removing it"); - self.env.add_diag(diag!(UnusedItem::Alias, (*loc, msg))) + self.add_diag(diag!(UnusedItem::Alias, (*loc, msg))) } UseFunKind::UseAlias => { let msg = format!("Unused 'use' of alias '{method}'. Consider removing it"); - self.env.add_diag(diag!(UnusedItem::Alias, (*loc, msg))) + self.add_diag(diag!(UnusedItem::Alias, (*loc, msg))) } UseFunKind::FunctionDeclaration => { let diag = ice!(( *loc, "ICE fun declaration 'use' funs should never be added to 'use' funs" )); - self.env.add_diag(diag); + self.add_diag(diag); } } } @@ -411,7 +440,7 @@ impl<'env> Context<'env> { }; diag.add_secondary_label((*prev_loc, msg)); } - self.env.add_diag(diag); + self.add_diag(diag); false } else { self.macro_expansion @@ -433,7 +462,7 @@ impl<'env> Context<'env> { loc, "ICE macro expansion stack should have a call when leaving a macro expansion" )); - self.env.add_diag(diag); + self.add_diag(diag); return false; } }; @@ -471,7 +500,7 @@ impl<'env> Context<'env> { loc, "ICE macro expansion stack should have a lambda when leaving a lambda", )); - self.env.add_diag(diag); + self.add_diag(diag); } } } @@ -507,8 +536,7 @@ impl<'env> Context<'env> { self.lambda_expansion = vec![]; if !self.ide_info.is_empty() { - self.env - .add_diag(ice!((loc, "IDE info should be cleared after each item"))); + self.add_diag(ice!((loc, "IDE info should be cleared after each item"))); self.ide_info = IDEInfo::new(); } } @@ -575,15 +603,14 @@ impl<'env> Context<'env> { pub fn declare_local(&mut self, _: Mutability, var: Var, ty: Type) { if let Err((_, prev_loc)) = self.locals.add(var, ty) { let msg = format!("ICE duplicate {var:?}. Should have been made unique in naming"); - self.env - .add_diag(ice!((var.loc, msg), (prev_loc, "Previously declared here"))); + self.add_diag(ice!((var.loc, msg), (prev_loc, "Previously declared here"))); } } pub fn get_local_type(&mut self, var: &Var) -> Type { if !self.locals.contains_key(var) { let msg = format!("ICE unbound {var:?}. Should have failed in naming"); - self.env.add_diag(ice!((var.loc, msg))); + self.add_diag(ice!((var.loc, msg))); return self.error_type(var.loc); } @@ -659,7 +686,8 @@ impl<'env> Context<'env> { if deprecation.location == AttributePosition::Module && in_same_module { return; } - deprecation.emit_deprecation_warning(self.env, name, method_opt); + let diags = deprecation.deprecation_warnings(name, method_opt); + self.add_diags(diags); } } @@ -847,7 +875,7 @@ impl<'env> Context<'env> { } impl MatchContext for Context<'_> { - fn env(&mut self) -> &mut CompilationEnv { + fn env(&mut self) -> &CompilationEnv { self.env } @@ -1102,7 +1130,7 @@ fn debug_abilities_info(context: &mut Context, ty: &Type) -> (Option, Abili loc, "ICE did not call unfold_type before debug_abiliites_info" )); - context.env.add_diag(diag); + context.add_diag(diag); (None, AbilitySet::all(loc), vec![]) } T::UnresolvedError | T::Anything => (None, AbilitySet::all(loc), vec![]), @@ -1238,7 +1266,7 @@ pub fn make_struct_field_type( N::StructFields::Native(nloc) => { let nloc = *nloc; let msg = format!("Unbound field '{}' for native struct '{}::{}'", field, m, n); - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundField, (loc, msg), (nloc, "Struct declared 'native' here") @@ -1249,7 +1277,7 @@ pub fn make_struct_field_type( }; match fields_map.get(field).cloned() { None => { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundField, (loc, format!("Unbound field '{}' in '{}::{}'", field, m, n)), )); @@ -1364,7 +1392,7 @@ pub fn make_constant_type( let msg = format!("Invalid access of '{}::{}'", m, c); let internal_msg = "Constants are internal to their module, and cannot can be accessed \ outside of their module"; - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::Visibility, (loc, msg), (defined_loc, internal_msg) @@ -1396,7 +1424,7 @@ pub fn make_method_call_type( loc, format!("ICE method on tuple type {}", debug_display!(tn)) )); - context.env.add_diag(diag); + context.add_diag(diag); return None; } TypeName_::Builtin(sp!(_, bt_)) => context.env.primitive_definer(*bt_), @@ -1433,7 +1461,7 @@ pub fn make_method_call_type( No known method '{method}' on type '{lhs_ty_str}'" ); let fmsg = format!("The function '{m}::{method}' exists, {arg_msg}"); - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::InvalidMethodCall, (loc, msg), (first_ty_loc, fmsg) @@ -1451,7 +1479,7 @@ pub fn make_method_call_type( }; let fmsg = format!("No local 'use fun' alias was found for '{lhs_ty_str}.{method}'{decl_msg}"); - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::InvalidMethodCall, (loc, msg), (method.loc, fmsg) @@ -1739,7 +1767,7 @@ fn report_visibility_error_( diag.add_secondary_label((call.invocation, "While expanding this macro")); } _ => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( call_loc, "Error when dealing with macro visibilities" ))); @@ -1752,7 +1780,7 @@ fn report_visibility_error_( "Visibility inside of expanded macros is resolved in the scope of the caller.", ); } - context.env.add_diag(diag); + context.add_diag(diag); } pub fn check_call_arity S>( @@ -1777,7 +1805,7 @@ pub fn check_call_arity S>( arity, given_len ); - context.env.add_diag(diag!( + context.add_diag(diag!( code, (loc, cmsg), (argloc, format!("Found {} argument(s) here", given_len)), @@ -1873,7 +1901,7 @@ fn solve_ability_constraint( format!("'{}' constraint declared here", constraint), )); } - context.env.add_diag(diag) + context.add_diag(diag) } } @@ -1973,7 +2001,7 @@ fn solve_builtin_type_constraint( } _ => { let tmsg = mk_tmsg(); - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::BuiltinOperation, (loc, format!("Invalid argument to '{}'", op)), (tloc, tmsg) @@ -1991,7 +2019,7 @@ fn solve_base_type_constraint(context: &mut Context, loc: Loc, msg: String, ty: Unit | Ref(_, _) | Apply(_, sp!(_, Multiple(_)), _) => { let tystr = error_format(ty, &context.subst); let tmsg = format!("Expected a single non-reference type, but found: {}", tystr); - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::ExpectedBaseType, (loc, msg), (tyloc, tmsg) @@ -2012,7 +2040,7 @@ fn solve_single_type_constraint(context: &mut Context, loc: Loc, msg: String, ty "Expected a single type, but found expression list type: {}", error_format(ty, &context.subst) ); - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::ExpectedSingleType, (loc, msg), (tyloc, tmsg) @@ -2363,7 +2391,7 @@ fn check_type_argument_arity String>( arity, args_len ); - context.env.add_diag(diag!(code, (loc, msg))); + context.add_diag(diag!(code, (loc, msg))); } while ty_args.len() > arity { diff --git a/external-crates/move/crates/move-compiler/src/typing/dependency_ordering.rs b/external-crates/move/crates/move-compiler/src/typing/dependency_ordering.rs index be058f0afac57..0db5e1b0f4a2b 100644 --- a/external-crates/move/crates/move-compiler/src/typing/dependency_ordering.rs +++ b/external-crates/move/crates/move-compiler/src/typing/dependency_ordering.rs @@ -20,7 +20,7 @@ use std::collections::{BTreeMap, BTreeSet}; //************************************************************************************************** pub fn program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, modules: &mut UniqueMap, ) { let imm_modules = &modules; @@ -38,7 +38,7 @@ pub fn program( Err(cycle_node) => { let cycle_ident = *cycle_node.node_id(); let error = cycle_error(&module_neighbors, cycle_ident); - compilation_env.add_diag(error); + compilation_env.add_error_diag(error); } Ok(ordered_ids) => { for (order, mident) in ordered_ids.iter().rev().enumerate() { @@ -63,7 +63,7 @@ enum DepType { } struct Context<'a, 'env> { - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, modules: &'a UniqueMap, // A union of uses and friends for modules (used for cyclyc dependency checking) // - if A uses B, add edge A -> B @@ -79,7 +79,7 @@ struct Context<'a, 'env> { impl<'a, 'env> Context<'a, 'env> { fn new( - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, modules: &'a UniqueMap, ) -> Self { Context { @@ -372,7 +372,7 @@ fn lvalue(context: &mut Context, sp!(loc, lv_): &T::LValue) { } } L::BorrowUnpackVariant(..) | L::UnpackVariant(..) => { - context.env.add_diag(ice!(( + context.env.add_error_diag(ice!(( *loc, "variant unpacking shouldn't occur before match expansion" ))); @@ -402,10 +402,12 @@ fn exp(context: &mut Context, e: &T::Exp) { type_(context, ty); exp(context, e); } - E::IfElse(e1, e2, e3) => { + E::IfElse(e1, e2, e3_opt) => { exp(context, e1); exp(context, e2); - exp(context, e3); + if let Some(e3) = e3_opt { + exp(context, e3); + } } E::Match(esubject, arms) => { exp(context, esubject); @@ -418,7 +420,7 @@ fn exp(context: &mut Context, e: &T::Exp) { } } E::VariantMatch(..) => { - context.env.add_diag(ice!(( + context.env.add_error_diag(ice!(( e.exp.loc, "shouldn't find variant match before HLIR lowering" ))); diff --git a/external-crates/move/crates/move-compiler/src/typing/deprecation_warnings.rs b/external-crates/move/crates/move-compiler/src/typing/deprecation_warnings.rs index 9ba6765d66dc3..e4df3a5fe4aec 100644 --- a/external-crates/move/crates/move-compiler/src/typing/deprecation_warnings.rs +++ b/external-crates/move/crates/move-compiler/src/typing/deprecation_warnings.rs @@ -3,6 +3,7 @@ use crate::{ diag, + diagnostics::Diagnostics, expansion::ast::{self as E, ModuleIdent}, ice, shared::{ @@ -43,7 +44,7 @@ pub struct Deprecations { impl Deprecations { /// Index the modules and their members for deprecation attributes and register each /// deprecation attribute for use later on. - pub fn new(env: &mut CompilationEnv, info: &NamingProgramInfo) -> Self { + pub fn new(env: &CompilationEnv, info: &NamingProgramInfo) -> Self { let mut deprecated_members = HashMap::new(); for (mident, module_info) in info.modules.key_cloned_iter() { @@ -120,12 +121,7 @@ impl Deprecations { impl Deprecation { /// Emit a warning for the deprecation of a module member. - pub fn emit_deprecation_warning( - &self, - env: &mut CompilationEnv, - member_name: Name, - method_opt: Option, - ) { + pub fn deprecation_warnings(&self, member_name: Name, method_opt: Option) -> Diagnostics { let mident_string = self.module_ident.to_string(); let location_string = match (self.location, method_opt) { (AttributePosition::Module, None) => { @@ -159,7 +155,10 @@ impl Deprecation { let location = method_opt.map_or(member_name.loc, |method| method.loc); - env.add_diag(diag!(TypeSafety::DeprecatedUsage, (location, message))); + Diagnostics::from(vec![diag!( + TypeSafety::DeprecatedUsage, + (location, message) + )]) } } @@ -168,7 +167,7 @@ impl Deprecation { // #[deprecated] attributes (malformed, or multiple on the member), add an error diagnostic to // `env` and return None. fn deprecations( - env: &mut CompilationEnv, + env: &CompilationEnv, attr_position: AttributePosition, attrs: &E::Attributes, source_location: Loc, @@ -184,7 +183,7 @@ fn deprecations( } if deprecations.len() != 1 { - env.add_diag(ice!(( + env.add_error_diag(ice!(( source_location, "ICE: verified that there is at at least one deprecation attribute above, \ and expansion should have failed if there were multiple deprecation attributes." @@ -196,7 +195,7 @@ fn deprecations( .last() .expect("Verified deprecations is not empty above"); - let mut make_invalid_deprecation_diag = || { + let make_invalid_deprecation_diag = || { let mut diag = diag!( Attributes::InvalidUsage, ( @@ -209,7 +208,7 @@ fn deprecations( DeprecationAttribute.name() ); diag.add_note(note); - env.add_diag(diag); + env.add_error_diag(diag); None }; diff --git a/external-crates/move/crates/move-compiler/src/typing/expand.rs b/external-crates/move/crates/move-compiler/src/typing/expand.rs index 9f36764277e55..bae757038b6c7 100644 --- a/external-crates/move/crates/move-compiler/src/typing/expand.rs +++ b/external-crates/move/crates/move-compiler/src/typing/expand.rs @@ -69,14 +69,14 @@ pub fn type_(context: &mut Context, ty: &mut Type) { ty.loc, "ICE unfold_type_base failed to expand type inf. var" )); - context.env.add_diag(diag); + context.env.add_error_diag(diag); sp(loc, UnresolvedError) } sp!(loc, Anything) => { let msg = "Could not infer this type. Try adding an annotation"; context .env - .add_diag(diag!(TypeSafety::UninferredType, (ty.loc, msg))); + .add_error_diag(diag!(TypeSafety::UninferredType, (ty.loc, msg))); sp(loc, UnresolvedError) } sp!(loc, Fun(_, _)) if !context.in_macro_function => { @@ -96,7 +96,7 @@ pub fn type_(context: &mut Context, ty: &mut Type) { ty.loc, format!("ICE expanding pre-expanded type {}", debug_display!(aty)) )); - context.env.add_diag(diag); + context.env.add_error_diag(diag); *ty = sp(ty.loc, UnresolvedError) } Apply(None, _, _) => { @@ -108,7 +108,7 @@ pub fn type_(context: &mut Context, ty: &mut Type) { } _ => { let diag = ice!((ty.loc, "ICE type-apply switched to non-apply")); - context.env.add_diag(diag); + context.env.add_error_diag(diag); *ty = sp(ty.loc, UnresolvedError) } } @@ -134,7 +134,7 @@ fn unexpected_lambda_type(context: &mut Context, loc: Loc) { Lambdas can only be used with 'macro' functions, as parameters or direct arguments"; context .env - .add_diag(diag!(TypeSafety::UnexpectedFunctionType, (loc, msg))); + .add_error_diag(diag!(TypeSafety::UnexpectedFunctionType, (loc, msg))); } } @@ -234,10 +234,12 @@ pub fn exp(context: &mut Context, e: &mut T::Exp) { exp(context, args); } - E::IfElse(eb, et, ef) => { + E::IfElse(eb, et, ef_opt) => { exp(context, eb); exp(context, et); - exp(context, ef); + if let Some(ef) = ef_opt { + exp(context, ef) + } } E::Match(esubject, arms) => { exp(context, esubject); @@ -246,7 +248,7 @@ pub fn exp(context: &mut Context, e: &mut T::Exp) { } } E::VariantMatch(subject, _, arms) => { - context.env.add_diag(ice!(( + context.env.add_error_diag(ice!(( e.exp.loc, "shouldn't find variant match before match compilation" ))); @@ -355,7 +357,7 @@ fn inferred_numerical_value( "Annotating the literal might help inference: '{value}{type}'", type=fix_bt, ); - context.env.add_diag(diag!( + context.env.add_error_diag(diag!( TypeSafety::InvalidNum, (eloc, "Invalid numerical literal"), (ty.loc, msg), diff --git a/external-crates/move/crates/move-compiler/src/typing/infinite_instantiations.rs b/external-crates/move/crates/move-compiler/src/typing/infinite_instantiations.rs index 03e98d4e657cb..0e9745d4b4f1f 100644 --- a/external-crates/move/crates/move-compiler/src/typing/infinite_instantiations.rs +++ b/external-crates/move/crates/move-compiler/src/typing/infinite_instantiations.rs @@ -140,7 +140,7 @@ impl<'a> Context<'a> { //************************************************************************************************** pub fn modules( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, modules: &UniqueMap, ) { let tparams = modules @@ -171,7 +171,7 @@ macro_rules! scc_edges { } fn module<'a>( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, tparams: &'a BTreeMap>>, mname: ModuleIdent, module: &T::ModuleDefinition, @@ -188,7 +188,7 @@ fn module<'a>( petgraph_scc(&graph) .into_iter() .filter(|scc| scc_edges!(&graph, scc).any(|(_, e, _)| e == Edge::Nested)) - .for_each(|scc| compilation_env.add_diag(cycle_error(context, &graph, scc))) + .for_each(|scc| compilation_env.add_error_diag(cycle_error(context, &graph, scc))) } //************************************************************************************************** @@ -239,10 +239,12 @@ fn exp(context: &mut Context, e: &T::Exp) { exp(context, &call.arguments) } - E::IfElse(eb, et, ef) => { + E::IfElse(eb, et, ef_opt) => { exp(context, eb); exp(context, et); - exp(context, ef); + if let Some(ef) = ef_opt { + exp(context, ef) + } } E::Match(esubject, arms) => { exp(context, esubject); diff --git a/external-crates/move/crates/move-compiler/src/typing/macro_expand.rs b/external-crates/move/crates/move-compiler/src/typing/macro_expand.rs index db7dcb2a7fb35..58fb3c512d551 100644 --- a/external-crates/move/crates/move-compiler/src/typing/macro_expand.rs +++ b/external-crates/move/crates/move-compiler/src/typing/macro_expand.rs @@ -64,7 +64,7 @@ pub(crate) fn call( let reloc_clever_errors = match &context.macro_expansion[0] { core::MacroExpansion::Call(call) => call.invocation, core::MacroExpansion::Argument { .. } => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( call_loc, "ICE top level macro scope should never be an argument" ))); @@ -92,7 +92,7 @@ pub(crate) fn call( return None; } Err(Some(diag)) => { - context.env.add_diag(*diag); + context.add_diag(*diag); return None; } }; @@ -288,9 +288,7 @@ fn bind_lambda( "Unable to bind lambda to parameter '{}'. The lambda must be passed directly", param.name ); - context - .env - .add_diag(diag!(TypeSafety::CannotExpandMacro, (arg.loc, msg))); + context.add_diag(diag!(TypeSafety::CannotExpandMacro, (arg.loc, msg))); None } } @@ -551,10 +549,12 @@ fn recolor_exp(ctx: &mut Recolor, sp!(_, e_): &mut N::Exp) { recolor_lvalues(ctx, lvalues); recolor_exp(ctx, e) } - N::Exp_::IfElse(econd, et, ef) => { + N::Exp_::IfElse(econd, et, ef_opt) => { recolor_exp(ctx, econd); recolor_exp(ctx, et); - recolor_exp(ctx, ef); + if let Some(ef) = ef_opt { + recolor_exp(ctx, ef); + } } N::Exp_::Match(subject, arms) => { recolor_exp(ctx, subject); @@ -745,9 +745,7 @@ fn report_unused_argument(context: &mut core::Context, loc: EvalStrategy { + N::Exp_::IfElse(econd, et, ef_opt) => { exp(context, econd); exp(context, et); - exp(context, ef); + if let Some(ef) = ef_opt { + exp(context, ef) + } } N::Exp_::Match(subject, arms) => { macro_rules! take_and_mut_replace { @@ -1061,7 +1061,6 @@ fn exp(context: &mut Context, sp!(eloc, e_): &mut N::Exp) { if context.core.env.ide_mode() { context .core - .env .add_ide_annotation(*eloc, IDEAnnotation::ExpandedLambda); } *e_ = block; @@ -1100,7 +1099,7 @@ fn exp(context: &mut Context, sp!(eloc, e_): &mut N::Exp) { N::Exp_::VarCall(sp!(_, v_), _) if context.by_name_args.contains_key(v_) => { context.mark_used(v_); let (arg, _expected_ty) = context.by_name_args.get(v_).unwrap(); - context.core.env.add_diag(diag!( + context.core.add_diag(diag!( TypeSafety::CannotExpandMacro, (*eloc, "Cannot call non-lambda argument"), (arg.loc, "Expected a lambda argument") diff --git a/external-crates/move/crates/move-compiler/src/typing/match_analysis.rs b/external-crates/move/crates/move-compiler/src/typing/match_analysis.rs index 5c3fcc78e31d1..7bbf7b29032da 100644 --- a/external-crates/move/crates/move-compiler/src/typing/match_analysis.rs +++ b/external-crates/move/crates/move-compiler/src/typing/match_analysis.rs @@ -3,6 +3,7 @@ use crate::{ diag, + diagnostics::warning_filters::WarningFilters, expansion::ast::{ModuleIdent, Value_}, ice, naming::ast::BuiltinTypeName_, @@ -70,12 +71,12 @@ impl TypingMutVisitorContext for MatchCompiler<'_, '_> { } } - fn add_warning_filter_scope(&mut self, filter: crate::diagnostics::WarningFilters) { - self.context.env.add_warning_filter_scope(filter); + fn push_warning_filter_scope(&mut self, filter: WarningFilters) { + self.context.push_warning_filter_scope(filter); } fn pop_warning_filter_scope(&mut self) { - self.context.env.pop_warning_filter_scope(); + self.context.pop_warning_filter_scope(); } } @@ -564,7 +565,7 @@ fn find_counterexample_impl( } else { // An error case: no entry on the fringe but no if !context.env.has_errors() { - context.env.add_diag(ice!(( + context.add_diag(ice!(( matrix.loc, "Non-empty matrix with non errors but no type" ))); @@ -593,7 +594,7 @@ fn find_counterexample_impl( if has_guards { diag.add_note("Match arms with guards are not considered for coverage."); } - context.env.add_diag(diag); + context.add_diag(diag); true } else { false @@ -622,9 +623,7 @@ fn ide_report_missing_arms(context: &mut Context, loc: Loc, matrix: &PatternMatr if !unused.is_empty() { let arms = unused.into_iter().map(PS::Value).collect::>(); let info = MissingMatchArmsInfo { arms }; - context - .env - .add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); + context.add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); } } @@ -635,9 +634,7 @@ fn ide_report_missing_arms(context: &mut Context, loc: Loc, matrix: &PatternMatr let info = MissingMatchArmsInfo { arms: vec![PS::Wildcard], }; - context - .env - .add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); + context.add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); } } @@ -657,7 +654,7 @@ fn ide_report_missing_arms(context: &mut Context, loc: Loc, matrix: &PatternMatr // If the matrix _is_ empty, we suggest adding an unpack. let is_positional = context.modules.struct_is_positional(&mident, &name); let Some(fields) = context.modules.struct_fields(&mident, &name) else { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "Tried to look up fields for this struct and found none" ))); @@ -684,9 +681,7 @@ fn ide_report_missing_arms(context: &mut Context, loc: Loc, matrix: &PatternMatr let info = MissingMatchArmsInfo { arms: vec![suggestion], }; - context - .env - .add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); + context.add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); } else { // If there's a default arm, no suggestion is necessary. if matrix.has_default_arm() { @@ -722,7 +717,7 @@ fn ide_report_missing_arms(context: &mut Context, loc: Loc, matrix: &PatternMatr .modules .enum_variant_fields(&mident, &name, &variant) else { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "Tried to look up fields for this enum and found none" ))); @@ -752,14 +747,12 @@ fn ide_report_missing_arms(context: &mut Context, loc: Loc, matrix: &PatternMatr arms.push(suggestion); } let info = MissingMatchArmsInfo { arms }; - context - .env - .add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); + context.add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); } } let Some(ty) = matrix.tys.first() else { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "Pattern matrix with no types handed to IDE function" ))); @@ -778,7 +771,7 @@ fn ide_report_missing_arms(context: &mut Context, loc: Loc, matrix: &PatternMatr } else { if !context.env.has_errors() { // It's unclear how we got here, so report an ICE and suggest a wildcard. - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, format!( "Found non-matchable type {} as match subject", @@ -790,9 +783,7 @@ fn ide_report_missing_arms(context: &mut Context, loc: Loc, matrix: &PatternMatr let info = MissingMatchArmsInfo { arms: vec![PS::Wildcard], }; - context - .env - .add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); + context.add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); } } } diff --git a/external-crates/move/crates/move-compiler/src/typing/match_compilation.rs b/external-crates/move/crates/move-compiler/src/typing/match_compilation.rs index 347376fe9eda9..bb80f3f350b3f 100644 --- a/external-crates/move/crates/move-compiler/src/typing/match_compilation.rs +++ b/external-crates/move/crates/move-compiler/src/typing/match_compilation.rs @@ -61,8 +61,8 @@ impl TypingVisitorContext for MatchCompiler<'_, '_> { } } - fn add_warning_filter_scope(&mut self, filter: crate::diagnostics::WarningFilters) { - self.context.env.add_warning_filter_scope(filter); + fn push_warning_filter_scope(&mut self, filter: crate::diagnostics::WarningFilters) { + self.context.env.push_warning_filter_scope(filter); } fn pop_warning_filter_scope(&mut self) { diff --git a/external-crates/move/crates/move-compiler/src/typing/recursive_datatypes.rs b/external-crates/move/crates/move-compiler/src/typing/recursive_datatypes.rs index 58a9053120255..11f053a366236 100644 --- a/external-crates/move/crates/move-compiler/src/typing/recursive_datatypes.rs +++ b/external-crates/move/crates/move-compiler/src/typing/recursive_datatypes.rs @@ -54,7 +54,7 @@ impl Context { //************************************************************************************************** pub fn modules( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, modules: &UniqueMap, ) { modules @@ -62,7 +62,7 @@ pub fn modules( .for_each(|(mname, m)| module(compilation_env, mname, m)) } -fn module(compilation_env: &mut CompilationEnv, mname: ModuleIdent, module: &T::ModuleDefinition) { +fn module(compilation_env: &CompilationEnv, mname: ModuleIdent, module: &T::ModuleDefinition) { let context = &mut Context::new(mname); module .structs @@ -79,7 +79,7 @@ fn module(compilation_env: &mut CompilationEnv, mname: ModuleIdent, module: &T:: petgraph_scc(&graph) .into_iter() .filter(|scc| scc.len() > 1 || graph.contains_edge(scc[0], scc[0])) - .for_each(|scc| compilation_env.add_diag(cycle_error(context, &graph, scc[0]))) + .for_each(|scc| compilation_env.add_error_diag(cycle_error(context, &graph, scc[0]))) } fn struct_def(context: &mut Context, sname: DatatypeName, sdef: &N::StructDefinition) { diff --git a/external-crates/move/crates/move-compiler/src/typing/syntax_methods.rs b/external-crates/move/crates/move-compiler/src/typing/syntax_methods.rs index d1ba995249756..6e8b839c068de 100644 --- a/external-crates/move/crates/move-compiler/src/typing/syntax_methods.rs +++ b/external-crates/move/crates/move-compiler/src/typing/syntax_methods.rs @@ -73,7 +73,7 @@ fn validate_index_syntax_methods( diag.add_note( "Index operations on the same type must take the name number of type arguments", ); - context.env.add_diag(diag); + context.add_diag(diag); return false; } @@ -92,7 +92,7 @@ fn validate_index_syntax_methods( (index_mut.loc, index_mut_msg), ); diag.add_note("Index operations on the same type must take the name number of parameters"); - context.env.add_diag(diag); + context.add_diag(diag); return false; } @@ -121,7 +121,7 @@ fn validate_index_syntax_methods( diag.add_note( "Index operations on use the same abilities for their type parameters", ); - context.env.add_diag(diag); + context.add_diag(diag); valid = false; } } @@ -142,7 +142,7 @@ fn validate_index_syntax_methods( diag.add_note( "Index operations on use the same abilities for their type parameters", ); - context.env.add_diag(diag); + context.add_diag(diag); valid = false; } } @@ -200,7 +200,7 @@ fn validate_index_syntax_methods( let N::Type_::Ref(false, inner) = core::ready_tvars(&subst, subject_ref_type.clone()).value else { - context.env.add_diag(ice!(( + context.add_diag(ice!(( index_finfo.signature.return_type.loc, "This index function got to type verification with an invalid type" ))); @@ -228,7 +228,7 @@ fn validate_index_syntax_methods( diag.add_note( "These functions must take the same subject type, differing only by mutability", ); - context.env.add_diag(diag); + context.add_diag(diag); valid = false; } } else { @@ -262,7 +262,7 @@ fn validate_index_syntax_methods( &mut_finfo.signature.type_parameters, ); diag.add_note("Index operation non-subject parameter types must match exactly"); - context.env.add_diag(diag); + context.add_diag(diag); valid = false; } } @@ -282,7 +282,7 @@ fn validate_index_syntax_methods( let index_msg = format!("This index function returns type {}", ty_str(index_type)); let N::Type_::Ref(false, inner) = core::ready_tvars(&subst, index_ty.return_.clone()).value else { - context.env.add_diag(ice!(( + context.add_diag(ice!(( index_finfo.signature.return_type.loc, "This index function got to type verification with an invalid type" ))); @@ -308,7 +308,7 @@ fn validate_index_syntax_methods( &mut_finfo.signature.type_parameters, ); diag.add_note("These functions must return the same type, differing only by mutability"); - context.env.add_diag(diag); + context.add_diag(diag); valid = false; } diff --git a/external-crates/move/crates/move-compiler/src/typing/translate.rs b/external-crates/move/crates/move-compiler/src/typing/translate.rs index f5e374f2a1817..ba75d91516ec4 100644 --- a/external-crates/move/crates/move-compiler/src/typing/translate.rs +++ b/external-crates/move/crates/move-compiler/src/typing/translate.rs @@ -53,7 +53,7 @@ use std::{ //************************************************************************************************** pub fn program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: N::Program, ) -> T::Program { @@ -226,7 +226,7 @@ fn module( } = mdef; context.current_module = Some(ident); context.current_package = package_name; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); context.add_use_funs_scope(use_funs); structs .iter_mut() @@ -238,7 +238,7 @@ fn module( assert!(context.constraints.is_empty()); context.current_package = None; let use_funs = context.pop_use_funs_scope(); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); let typed_module = T::ModuleDefinition { loc, warning_filter, @@ -270,7 +270,7 @@ fn finalize_ide_info(context: &mut Context) { for (_loc, ann) in info.iter_mut() { expand::ide_annotation(context, ann); } - context.env.extend_ide_info(info); + context.extend_ide_info(info); } //************************************************************************************************** @@ -289,7 +289,7 @@ fn function(context: &mut Context, name: FunctionName, f: N::Function) -> T::Fun mut signature, body: n_body, } = f; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); assert!(context.constraints.is_empty()); context.reset_for_module_item(name.loc()); context.current_function = Some(name); @@ -310,7 +310,7 @@ fn function(context: &mut Context, name: FunctionName, f: N::Function) -> T::Fun finalize_ide_info(context); context.current_function = None; context.in_macro_function = false; - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); T::Function { warning_filter, index, @@ -394,7 +394,7 @@ fn constant(context: &mut Context, name: ConstantName, nconstant: N::Constant) - signature, value: nvalue, } = nconstant; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); process_attributes(context, &attributes); @@ -426,7 +426,7 @@ fn constant(context: &mut Context, name: ConstantName, nconstant: N::Constant) - if context.env.ide_mode() { finalize_ide_info(context); } - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); T::Constant { warning_filter, @@ -503,9 +503,7 @@ mod check_valid_constant { core::error_format(ty, &Subst::empty()), format_comma(tys), ); - context - .env - .add_diag(diag!(code, (sloc, fmsg()), (loc, tmsg))) + context.add_diag(diag!(code, (sloc, fmsg()), (loc, tmsg))) } pub fn exp(context: &mut Context, e: &T::Exp) { @@ -572,10 +570,12 @@ mod check_valid_constant { s = format!("'{}' is", b); &s } - E::IfElse(eb, et, ef) => { + E::IfElse(eb, et, ef_opt) => { exp(context, eb); exp(context, et); - exp(context, ef); + if let Some(ef) = ef_opt { + exp(context, ef) + } "'if' expressions are" } E::Match(esubject, sp!(_, arms)) => { @@ -589,7 +589,7 @@ mod check_valid_constant { "'match' expressions are" } E::VariantMatch(_subject, _, _arms) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( *loc, "shouldn't find variant match before match compilation" ))); @@ -642,7 +642,7 @@ mod check_valid_constant { "Enum variants are" } }; - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::UnsupportedConstant, (*loc, format!("{} not supported in constants", error_case)) )); @@ -689,9 +689,7 @@ mod check_valid_constant { } }; let msg = format!("{} are not supported in constants", error_case); - context - .env - .add_diag(diag!(TypeSafety::UnsupportedConstant, (*loc, msg),)) + context.add_diag(diag!(TypeSafety::UnsupportedConstant, (*loc, msg),)) } } @@ -702,9 +700,7 @@ mod check_valid_constant { fn struct_def(context: &mut Context, sloc: Loc, s: &mut N::StructDefinition) { assert!(context.constraints.is_empty()); context.reset_for_module_item(sloc); - context - .env - .add_warning_filter_scope(s.warning_filter.clone()); + context.push_warning_filter_scope(s.warning_filter.clone()); let field_map = match &mut s.fields { N::StructFields::Native(_) => return, @@ -747,15 +743,13 @@ fn struct_def(context: &mut Context, sloc: Loc, s: &mut N::StructDefinition) { expand::type_(context, &mut idx_ty.1); } check_type_params_usage(context, &s.type_parameters, field_map); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } fn enum_def(context: &mut Context, enum_: &mut N::EnumDefinition) { assert!(context.constraints.is_empty()); - context - .env - .add_warning_filter_scope(enum_.warning_filter.clone()); + context.push_warning_filter_scope(enum_.warning_filter.clone()); let enum_abilities = &enum_.abilities; let enum_type_params = &enum_.type_parameters; @@ -768,7 +762,7 @@ fn enum_def(context: &mut Context, enum_: &mut N::EnumDefinition) { } check_variant_type_params_usage(context, enum_type_params, field_types); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } fn variant_def( @@ -1029,7 +1023,7 @@ fn invalid_phantom_use_error( } }; let decl_msg = format!("'{}' declared here as phantom", ¶m.user_specified_name); - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidPhantomUse, (ty_loc, msg), (param.user_specified_name.loc, decl_msg), @@ -1048,9 +1042,7 @@ fn check_non_phantom_param_usage( "Unused type parameter '{}'. Consider declaring it as phantom", name ); - context - .env - .add_diag(diag!(UnusedItem::StructTypeParam, (name.loc, msg))) + context.add_diag(diag!(UnusedItem::StructTypeParam, (name.loc, msg))) } Some(false) => { let msg = format!( @@ -1058,9 +1050,7 @@ fn check_non_phantom_param_usage( adding a phantom declaration here", name ); - context - .env - .add_diag(diag!(Declarations::InvalidNonPhantomUse, (name.loc, msg))) + context.add_diag(diag!(Declarations::InvalidNonPhantomUse, (name.loc, msg))) } Some(true) => {} } @@ -1246,7 +1236,7 @@ fn subtype_impl T>( Err(e) => { context.subst = subst; let diag = typing_error(context, /* from_subtype */ true, loc, msg, e); - context.env.add_diag(diag); + context.add_diag(diag); Err(rhs) } Ok((next_subst, ty)) => { @@ -1296,7 +1286,7 @@ fn join_opt T>( Err(e) => { context.subst = subst; let diag = typing_error(context, /* from_subtype */ false, loc, msg, e); - context.env.add_diag(diag); + context.add_diag(diag); None } Ok((next_subst, ty)) => { @@ -1348,7 +1338,7 @@ fn invariant_impl T>( Err(e) => { context.subst = subst; let diag = typing_error(context, /* from_subtype */ false, loc, msg, e); - context.env.add_diag(diag); + context.add_diag(diag); Err(rhs) } Ok((next_subst, ty)) => { @@ -1575,7 +1565,7 @@ fn exp(context: &mut Context, ne: Box) -> Box { vector_pack(context, eloc, vec_loc, ty_opt, argloc, args_) } - NE::IfElse(nb, nt, nf) => { + NE::IfElse(nb, nt, nf_opt) => { let eb = exp(context, nb); let bloc = eb.exp.loc; subtype( @@ -1586,15 +1576,24 @@ fn exp(context: &mut Context, ne: Box) -> Box { Type_::bool(bloc), ); let et = exp(context, nt); - let ef = exp(context, nf); - let ty = join( - context, - eloc, - || "Incompatible branches", - et.ty.clone(), - ef.ty.clone(), - ); - (ty, TE::IfElse(eb, et, ef)) + let ef_opt = nf_opt.map(|nf| exp(context, nf)); + let ty = match &ef_opt { + Some(ef) => join( + context, + eloc, + || "Incompatible branches", + et.ty.clone(), + ef.ty.clone(), + ), + None => { + let ty = sp(eloc, Type_::Unit); + let msg = + "Invalid 'if'. The body of an 'if' without an 'else' must have type '()'"; + subtype(context, eloc, || msg, et.ty.clone(), ty.clone()); + ty + } + }; + (ty, TE::IfElse(eb, et, ef_opt)) } NE::Match(nsubject, sp!(aloc, narms_)) => { let esubject = exp(context, nsubject); @@ -1612,7 +1611,7 @@ fn exp(context: &mut Context, ne: Box) -> Box { } }; let result_type = core::make_tvar(context, aloc); - let earms = match_arms(context, &subject_type, &result_type, narms_, &ref_mut); + let earms = match_arms(context, &esubject.ty, &result_type, narms_, &ref_mut); (result_type, TE::Match(esubject, sp(aloc, earms))) } NE::While(name, nb, nloop) => { @@ -1670,9 +1669,7 @@ fn exp(context: &mut Context, ne: Box) -> Box { .check_feature(context.current_package, FeatureGate::Lambda, eloc) { let msg = "Lambdas can only be used directly as arguments to 'macro' functions"; - context - .env - .add_diag(diag!(TypeSafety::UnexpectedLambda, (eloc, msg))) + context.add_diag(diag!(TypeSafety::UnexpectedLambda, (eloc, msg))) } (context.error_type(eloc), TE::UnresolvedError) } @@ -2032,9 +2029,7 @@ fn binop( } Range | Implies | Iff => { - context - .env - .add_diag(ice!((loc, "ICE unexpect specification operator"))); + context.add_diag(ice!((loc, "ICE unexpect specification operator"))); (context.error_type(loc), context.error_type(loc)) } }; @@ -2312,9 +2307,7 @@ fn match_pattern_( matched in the module in which they are declared", &m, &struct_, ); - context - .env - .add_diag(diag!(TypeSafety::Visibility, (loc, msg))); + context.add_diag(diag!(TypeSafety::Visibility, (loc, msg))); } let bt = rtype!(bt); let pat_ = if field_error { @@ -2797,7 +2790,7 @@ fn resolve_field(context: &mut Context, loc: Loc, ty: Type, field: &Field) -> Ty match core::ready_tvars(&context.subst, ty) { sp!(_, UnresolvedError) => context.error_type(loc), sp!(tloc, Anything) => { - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::UninferredType, (loc, msg()), (tloc, UNINFERRED_MSG), @@ -2805,7 +2798,7 @@ fn resolve_field(context: &mut Context, loc: Loc, ty: Type, field: &Field) -> Ty context.error_type(loc) } sp!(tloc, Var(i)) if !context.subst.is_num_var(i) => { - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::UninferredType, (loc, msg()), (tloc, UNINFERRED_MSG), @@ -2818,9 +2811,7 @@ fn resolve_field(context: &mut Context, loc: Loc, ty: Type, field: &Field) -> Ty "Invalid access of field '{field}' on the struct '{m}::{n}'. The field '{field}' can only \ be accessed within the module '{m}' since it defines '{n}'" ); - context - .env - .add_diag(diag!(TypeSafety::Visibility, (loc, msg))); + context.add_diag(diag!(TypeSafety::Visibility, (loc, msg))); } match context.datatype_kind(&m, &n) { DatatypeKind::Struct => { @@ -2832,9 +2823,7 @@ fn resolve_field(context: &mut Context, loc: Loc, ty: Type, field: &Field) -> Ty structs, not enums", field, &m, &n ); - context - .env - .add_diag(diag!(TypeSafety::ExpectedSpecificType, (loc, msg))); + context.add_diag(diag!(TypeSafety::ExpectedSpecificType, (loc, msg))); context.error_type(loc) } } @@ -2844,7 +2833,7 @@ fn resolve_field(context: &mut Context, loc: Loc, ty: Type, field: &Field) -> Ty "Expected a struct type in the current module but got: {}", core::error_format(&t, &context.subst) ); - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::ExpectedSpecificType, (loc, msg()), (t.loc, smsg), @@ -2872,7 +2861,7 @@ fn add_struct_field_types( constructed/deconstructed, and their fields cannot be dirctly accessed", verb, m, n ); - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::InvalidNativeUsage, (loc, msg), (nloc, "Struct declared 'native' here") @@ -2883,15 +2872,13 @@ fn add_struct_field_types( for (_, f_, _) in &fields_ty { if fields.get_(f_).is_none() { let msg = format!("Missing {} for field '{}' in '{}::{}'", verb, f_, m, n); - context - .env - .add_diag(diag!(TypeSafety::TooFewArguments, (loc, msg))) + context.add_diag(diag!(TypeSafety::TooFewArguments, (loc, msg))) } } fields.map(|f, (idx, x)| { let fty = match fields_ty.remove(&f) { None => { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundField, (loc, format!("Unbound field '{}' in '{}::{}'", &f, m, n)) )); @@ -2936,15 +2923,13 @@ fn add_variant_field_types( "Missing {} for field '{}' in '{}::{}::{}'", verb, f_, m, n, v ); - context - .env - .add_diag(diag!(TypeSafety::TooFewArguments, (loc, msg))) + context.add_diag(diag!(TypeSafety::TooFewArguments, (loc, msg))) } } fields.map(|f, (idx, x)| { let fty = match fields_ty.remove(&f) { None => { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundField, ( loc, @@ -2981,7 +2966,7 @@ fn find_index_funs(context: &mut Context, loc: Loc, ty: &Type) -> Option None, sp!(tloc, T::Anything) => { - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::UninferredType, (loc, msg()), (*tloc, UNINFERRED_MSG), @@ -2989,7 +2974,7 @@ fn find_index_funs(context: &mut Context, loc: Loc, ty: &Type) -> Option { - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::UninferredType, (loc, msg()), (*tloc, UNINFERRED_MSG), @@ -2999,9 +2984,7 @@ fn find_index_funs(context: &mut Context, loc: Loc, ty: &Type) -> Option { let index_opt = core::find_index_funs(context, type_name); if index_opt.is_none() { - context - .env - .add_diag(diag!(Declarations::MissingSyntaxMethod, (loc, msg()),)); + context.add_diag(diag!(Declarations::MissingSyntaxMethod, (loc, msg()),)); } index_opt } @@ -3010,7 +2993,7 @@ fn find_index_funs(context: &mut Context, loc: Loc, ty: &Type) -> Option *base, ty @ sp!(_, Type_::UnresolvedError) => ty, _ => { - context - .env - .add_diag(ice!((dloc, "Index should have failed in naming"))); + context.add_diag(ice!((dloc, "Index should have failed in naming"))); sp(dloc, Type_::UnresolvedError) } }; @@ -3231,9 +3210,7 @@ fn process_exp_dotted( inner } N::ExpDotted_::DotAutocomplete(_loc, ndot) => { - context - .env - .add_diag(ice!((dloc, "Found a dot autocomplete where unsupported"))); + context.add_diag(ice!((dloc, "Found a dot autocomplete where unsupported"))); // Keep going after the ICE. process_exp_dotted_inner(context, constraint_verb, *ndot) } @@ -3331,7 +3308,7 @@ fn resolve_exp_dotted( }, ), TE::Constant(_, _) if edotted.accessors.is_empty() => { - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::InvalidMoveOp, (loc, "Invalid 'move'. Cannot 'move' constants") )); @@ -3339,7 +3316,7 @@ fn resolve_exp_dotted( } TE::UnresolvedError => make_exp(edotted.base.ty, TE::UnresolvedError), _ if edotted.accessors.is_empty() => { - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::InvalidMoveOp, (loc, "Invalid 'move'. Expected a variable or path.") )); @@ -3355,9 +3332,7 @@ fn resolve_exp_dotted( borrow_exp_dotted(context, error_loc, false, edotted); let msg = "Invalid 'move'. 'move' works only with \ variables, e.g. 'move x'. 'move' on a path access is not supported"; - context - .env - .add_diag(diag!(TypeSafety::InvalidMoveOp, (loc, msg))); + context.add_diag(diag!(TypeSafety::InvalidMoveOp, (loc, msg))); make_error(context) } else { make_error(context) @@ -3386,9 +3361,7 @@ fn resolve_exp_dotted( TE::UnresolvedError => make_exp(edotted.base.ty, TE::UnresolvedError), _ => { let msg = "Invalid 'copy'. Expected a variable or path.".to_owned(); - context - .env - .add_diag(diag!(TypeSafety::InvalidCopyOp, (loc, msg))); + context.add_diag(diag!(TypeSafety::InvalidCopyOp, (loc, msg))); make_error(context) } } @@ -3473,7 +3446,7 @@ fn borrow_exp_dotted( }; // lhs is immutable and current borrow is mutable if !cur_mut && expected_mut { - context.env.add_diag(diag!( + context.add_diag(diag!( ReferenceSafety::RefTrans, (loc, "Invalid mutable borrow from an immutable reference"), (tyloc, "Immutable because of this position"), @@ -3539,7 +3512,6 @@ fn borrow_exp_dotted( } else { let msg = "Could not find a mutable index 'syntax' method"; context - .env .add_diag(diag!(Declarations::MissingSyntaxMethod, (index_loc, msg),)); exp = make_error_exp(context, index_loc); break; @@ -3548,9 +3520,7 @@ fn borrow_exp_dotted( index.target_function } else { let msg = "Could not find an immutable index 'syntax' method"; - context - .env - .add_diag(diag!(Declarations::MissingSyntaxMethod, (index_loc, msg),)); + context.add_diag(diag!(Declarations::MissingSyntaxMethod, (index_loc, msg),)); exp = make_error_exp(context, index_loc); break; }; @@ -3566,7 +3536,7 @@ fn borrow_exp_dotted( core::error_format(&ret_ty, &context.subst), core::error_format(&mut_type, &context.subst) ); - context.env.add_diag(ice!((loc, msg))); + context.add_diag(ice!((loc, msg))); exp = make_error_exp(context, index_loc); break; } @@ -3608,7 +3578,7 @@ fn exp_dotted_to_owned( } } } else { - context.env.add_diag(ice!(( + context.add_diag(ice!(( ed.loc, "Attempted to make a dotted path with no dots" ))); @@ -3616,15 +3586,11 @@ fn exp_dotted_to_owned( }; let case = match usage { DottedUsage::Move(_) => { - context - .env - .add_diag(ice!((ed.loc, "Invalid dotted usage 'move' in to_owned"))); + context.add_diag(ice!((ed.loc, "Invalid dotted usage 'move' in to_owned"))); return make_error_exp(context, ed.loc); } DottedUsage::Borrow(_) => { - context - .env - .add_diag(ice!((ed.loc, "Invalid dotted usage 'borrow' in to_owned"))); + context.add_diag(ice!((ed.loc, "Invalid dotted usage 'borrow' in to_owned"))); return make_error_exp(context, ed.loc); } DottedUsage::Use => "implicit copy", @@ -3715,9 +3681,7 @@ fn warn_on_constant_borrow(context: &mut Context, loc: Loc, e: &T::Exp) { if matches!(&e.exp.value, TE::Constant(_, _)) { let msg = "This access will make a new copy of the constant. \ Consider binding the value to a variable first to make this copy explicit"; - context - .env - .add_diag(diag!(TypeSafety::ImplicitConstantCopy, (loc, msg))) + context.add_diag(diag!(TypeSafety::ImplicitConstantCopy, (loc, msg))) } } @@ -3864,7 +3828,7 @@ fn type_to_type_name_( return None; } Ty::Ref(_, _) | Ty::Var(_) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "Typing did not unfold type before resolving type name" ))); @@ -3873,7 +3837,7 @@ fn type_to_type_name_( Ty::Apply(_, _, _) => unreachable!(), }; if report_error { - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::InvalidMethodCall, (loc, format!("Invalid {error_msg}")), (ty.loc, msg), @@ -4011,7 +3975,7 @@ fn annotated_error_const(context: &mut Context, e: &mut T::Exp, abort_or_assert_ the '#[error]' attribute is added to them." .to_string(), ); - context.env.add_diag(err); + context.add_diag(err); e.ty = context.error_type(e.ty.loc); e.exp = sp(e.exp.loc, T::UnannotatedExp_::UnresolvedError); @@ -4261,7 +4225,7 @@ fn check_call_target( } else { "Normal (non-'macro') function is declared here" }; - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::InvalidCallTarget, (macro_call_loc, call_msg), (decl_loc, decl_msg), @@ -4485,7 +4449,7 @@ fn expand_macro( { None => { if !(context.env.has_errors() || context.env.ide_mode()) { - context.env.add_diag(ice!(( + context.add_diag(ice!(( call_loc, "No macro found, but name resolution passed." ))); @@ -4614,24 +4578,18 @@ fn unused_module_members(context: &mut Context, mident: &ModuleIdent_, mdef: &T: } let is_sui_mode = context.env.package_config(mdef.package_name).flavor == Flavor::Sui; - context - .env - .add_warning_filter_scope(mdef.warning_filter.clone()); + context.push_warning_filter_scope(mdef.warning_filter.clone()); for (loc, name, c) in &mdef.constants { - context - .env - .add_warning_filter_scope(c.warning_filter.clone()); + context.push_warning_filter_scope(c.warning_filter.clone()); let members = context.used_module_members.get(mident); if members.is_none() || !members.unwrap().contains(name) { let msg = format!("The constant '{name}' is never used. Consider removing it."); - context - .env - .add_diag(diag!(UnusedItem::Constant, (loc, msg))) + context.add_diag(diag!(UnusedItem::Constant, (loc, msg))) } - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } for (loc, name, fun) in &mdef.functions { @@ -4647,9 +4605,7 @@ fn unused_module_members(context: &mut Context, mident: &ModuleIdent_, mdef: &T: // a Sui-specific filter to avoid signaling that the init function is unused continue; } - context - .env - .add_warning_filter_scope(fun.warning_filter.clone()); + context.push_warning_filter_scope(fun.warning_filter.clone()); let members = context.used_module_members.get(mident); if fun.entry.is_none() @@ -4662,12 +4618,10 @@ fn unused_module_members(context: &mut Context, mident: &ModuleIdent_, mdef: &T: "The non-'public', non-'entry' function '{name}' is never called. \ Consider removing it." ); - context - .env - .add_diag(diag!(UnusedItem::Function, (loc, msg))) + context.add_diag(diag!(UnusedItem::Function, (loc, msg))) } - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } diff --git a/external-crates/move/crates/move-compiler/src/typing/visitor.rs b/external-crates/move/crates/move-compiler/src/typing/visitor.rs index 5e5aa490931d7..1628d165150af 100644 --- a/external-crates/move/crates/move-compiler/src/typing/visitor.rs +++ b/external-crates/move/crates/move-compiler/src/typing/visitor.rs @@ -3,7 +3,7 @@ use crate::{ command_line::compiler::Visitor, - diagnostics::WarningFilters, + diagnostics::warning_filters::WarningFilters, expansion::ast::ModuleIdent, naming::ast as N, parser::ast::{ConstantName, DatatypeName, FunctionName, VariantName}, @@ -17,7 +17,7 @@ use move_proc_macros::growing_stack; pub type TypingVisitorObj = Box; pub trait TypingVisitor: Send + Sync { - fn visit(&self, env: &mut CompilationEnv, program: &T::Program); + fn visit(&self, env: &CompilationEnv, program: &T::Program); fn visitor(self) -> Visitor where @@ -30,9 +30,9 @@ pub trait TypingVisitor: Send + Sync { pub trait TypingVisitorConstructor: Send + Sync { type Context<'a>: Sized + TypingVisitorContext; - fn context<'a>(env: &'a mut CompilationEnv, program: &T::Program) -> Self::Context<'a>; + fn context<'a>(env: &'a CompilationEnv, program: &T::Program) -> Self::Context<'a>; - fn visit(env: &mut CompilationEnv, program: &T::Program) { + fn visit(env: &CompilationEnv, program: &T::Program) { let mut context = Self::context(env, program); context.visit(program); } @@ -44,7 +44,7 @@ pub enum LValueKind { } pub trait TypingVisitorContext { - fn add_warning_filter_scope(&mut self, filter: WarningFilters); + fn push_warning_filter_scope(&mut self, filters: WarningFilters); fn pop_warning_filter_scope(&mut self); /// Indicates if types should be visited during the traversal of other forms (struct and enum @@ -75,7 +75,7 @@ pub trait TypingVisitorContext { } fn visit_module(&mut self, ident: ModuleIdent, mdef: &T::ModuleDefinition) { - self.add_warning_filter_scope(mdef.warning_filter.clone()); + self.push_warning_filter_scope(mdef.warning_filter.clone()); if self.visit_module_custom(ident, mdef) { self.pop_warning_filter_scope(); return; @@ -116,7 +116,7 @@ pub trait TypingVisitorContext { struct_name: DatatypeName, sdef: &N::StructDefinition, ) { - self.add_warning_filter_scope(sdef.warning_filter.clone()); + self.push_warning_filter_scope(sdef.warning_filter.clone()); if self.visit_struct_custom(module, struct_name, sdef) { self.pop_warning_filter_scope(); return; @@ -149,7 +149,7 @@ pub trait TypingVisitorContext { enum_name: DatatypeName, edef: &N::EnumDefinition, ) { - self.add_warning_filter_scope(edef.warning_filter.clone()); + self.push_warning_filter_scope(edef.warning_filter.clone()); if self.visit_enum_custom(module, enum_name, edef) { self.pop_warning_filter_scope(); return; @@ -192,6 +192,8 @@ pub trait TypingVisitorContext { } } + // TODO field visitor + fn visit_constant_custom( &mut self, _module: ModuleIdent, @@ -207,7 +209,7 @@ pub trait TypingVisitorContext { constant_name: ConstantName, cdef: &T::Constant, ) { - self.add_warning_filter_scope(cdef.warning_filter.clone()); + self.push_warning_filter_scope(cdef.warning_filter.clone()); if self.visit_constant_custom(module, constant_name, cdef) { self.pop_warning_filter_scope(); return; @@ -231,7 +233,7 @@ pub trait TypingVisitorContext { function_name: FunctionName, fdef: &T::Function, ) { - self.add_warning_filter_scope(fdef.warning_filter.clone()); + self.push_warning_filter_scope(fdef.warning_filter.clone()); if self.visit_function_custom(module, function_name, fdef) { self.pop_warning_filter_scope(); return; @@ -245,7 +247,7 @@ pub trait TypingVisitorContext { self.visit_type(None, &fdef.signature.return_type); } if let T::FunctionBody_::Defined(seq) = &fdef.body.value { - self.visit_seq(seq); + self.visit_seq(fdef.body.loc, seq); } self.pop_warning_filter_scope(); } @@ -291,11 +293,19 @@ pub trait TypingVisitorContext { // -- SEQUENCES AND EXPRESSIONS -- - fn visit_seq(&mut self, (use_funs, seq): &T::Sequence) { + /// Custom visit for a sequence. It will skip `visit_seq` if `visit_seq_custom` returns true. + fn visit_seq_custom(&mut self, _loc: Loc, _seq: &T::Sequence) -> bool { + false + } + + fn visit_seq(&mut self, loc: Loc, seq @ (use_funs, seq_): &T::Sequence) { + if self.visit_seq_custom(loc, seq) { + return; + } if Self::VISIT_USE_FUNS { self.visit_use_funs(use_funs); } - for s in seq { + for s in seq_ { self.visit_seq_item(s); } } @@ -431,10 +441,12 @@ pub trait TypingVisitorContext { } self.visit_exp(e); } - E::IfElse(e1, e2, e3) => { + E::IfElse(e1, e2, e3_opt) => { self.visit_exp(e1); self.visit_exp(e2); - self.visit_exp(e3); + if let Some(e3) = e3_opt { + self.visit_exp(e3); + } } E::Match(esubject, arms) => { self.visit_exp(esubject); @@ -456,8 +468,8 @@ pub trait TypingVisitorContext { self.visit_exp(e2); } E::Loop { body, .. } => self.visit_exp(body), - E::NamedBlock(_, seq) => self.visit_seq(seq), - E::Block(seq) => self.visit_seq(seq), + E::NamedBlock(_, seq) => self.visit_seq(exp.exp.loc, seq), + E::Block(seq) => self.visit_seq(exp.exp.loc, seq), E::Assign(lvalues, ty_ann, e) => { // visit the RHS first to better match control flow self.visit_exp(e); @@ -555,32 +567,86 @@ impl From for TypingVisitorObj { } impl TypingVisitor for V { - fn visit(&self, env: &mut CompilationEnv, program: &T::Program) { + fn visit(&self, env: &CompilationEnv, program: &T::Program) { Self::visit(env, program) } } +macro_rules! simple_visitor { + ($visitor:ident, $($overrides:item),*) => { + pub struct $visitor; + + pub struct Context<'a> { + env: &'a crate::shared::CompilationEnv, + warning_filters_scope: crate::diagnostics::warning_filters::WarningFiltersScope, + } + + impl crate::typing::visitor::TypingVisitorConstructor for $visitor { + type Context<'a> = Context<'a>; + + fn context<'a>( + env: &'a crate::shared::CompilationEnv, + _program: &crate::typing::ast::Program, + ) -> Self::Context<'a> { + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); + Context { + env, + warning_filters_scope, + } + } + } + + impl Context<'_> { + #[allow(unused)] + fn add_diag(&self, diag: crate::diagnostics::Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + fn add_diags(&self, diags: crate::diagnostics::Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + } + + impl crate::typing::visitor::TypingVisitorContext for Context<'_> { + fn push_warning_filter_scope( + &mut self, + filters: crate::diagnostics::warning_filters::WarningFilters, + ) { + self.warning_filters_scope.push(filters) + } + + fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } + + $($overrides)* + } + } +} +pub(crate) use simple_visitor; + //************************************************************************************************** // Mut Vistor //************************************************************************************************** pub trait TypingMutVisitor: Send + Sync { - fn visit(&self, env: &mut CompilationEnv, program: &mut T::Program); + fn visit(&self, env: &CompilationEnv, program: &mut T::Program); } pub trait TypingMutVisitorConstructor: Send + Sync { type Context<'a>: Sized + TypingMutVisitorContext; - fn context<'a>(env: &'a mut CompilationEnv, program: &T::Program) -> Self::Context<'a>; + fn context<'a>(env: &'a CompilationEnv, program: &T::Program) -> Self::Context<'a>; - fn visit(env: &mut CompilationEnv, program: &mut T::Program) { + fn visit(env: &CompilationEnv, program: &mut T::Program) { let mut context = Self::context(env, program); context.visit(program); } } pub trait TypingMutVisitorContext { - fn add_warning_filter_scope(&mut self, filter: WarningFilters); + fn push_warning_filter_scope(&mut self, filter: WarningFilters); fn pop_warning_filter_scope(&mut self); /// Indicates if types should be visited during the traversal of other forms (struct and enum @@ -615,7 +681,7 @@ pub trait TypingMutVisitorContext { } fn visit_module(&mut self, ident: ModuleIdent, mdef: &mut T::ModuleDefinition) { - self.add_warning_filter_scope(mdef.warning_filter.clone()); + self.push_warning_filter_scope(mdef.warning_filter.clone()); if self.visit_module_custom(ident, mdef) { self.pop_warning_filter_scope(); return; @@ -656,7 +722,7 @@ pub trait TypingMutVisitorContext { struct_name: DatatypeName, sdef: &mut N::StructDefinition, ) { - self.add_warning_filter_scope(sdef.warning_filter.clone()); + self.push_warning_filter_scope(sdef.warning_filter.clone()); if self.visit_struct_custom(module, struct_name, sdef) { self.pop_warning_filter_scope(); return; @@ -689,7 +755,7 @@ pub trait TypingMutVisitorContext { enum_name: DatatypeName, edef: &mut N::EnumDefinition, ) { - self.add_warning_filter_scope(edef.warning_filter.clone()); + self.push_warning_filter_scope(edef.warning_filter.clone()); if self.visit_enum_custom(module, enum_name, edef) { self.pop_warning_filter_scope(); return; @@ -747,7 +813,7 @@ pub trait TypingMutVisitorContext { constant_name: ConstantName, cdef: &mut T::Constant, ) { - self.add_warning_filter_scope(cdef.warning_filter.clone()); + self.push_warning_filter_scope(cdef.warning_filter.clone()); if self.visit_constant_custom(module, constant_name, cdef) { self.pop_warning_filter_scope(); return; @@ -771,7 +837,7 @@ pub trait TypingMutVisitorContext { function_name: FunctionName, fdef: &mut T::Function, ) { - self.add_warning_filter_scope(fdef.warning_filter.clone()); + self.push_warning_filter_scope(fdef.warning_filter.clone()); if self.visit_function_custom(module, function_name, fdef) { self.pop_warning_filter_scope(); return; @@ -973,10 +1039,12 @@ pub trait TypingMutVisitorContext { } self.visit_exp(e); } - E::IfElse(e1, e2, e3) => { + E::IfElse(e1, e2, e3_opt) => { self.visit_exp(e1); self.visit_exp(e2); - self.visit_exp(e3); + if let Some(e3) = e3_opt { + self.visit_exp(e3); + } } E::Match(esubject, arms) => { self.visit_exp(esubject); @@ -1092,7 +1160,7 @@ pub trait TypingMutVisitorContext { } impl TypingMutVisitor for V { - fn visit(&self, env: &mut CompilationEnv, program: &mut T::Program) { + fn visit(&self, env: &CompilationEnv, program: &mut T::Program) { Self::visit(env, program) } } @@ -1158,8 +1226,10 @@ where E::While(_, e1, e2) | E::Mutate(e1, e2) | E::BinopExp(e1, _, _, e2) => { exp_satisfies_(e1, p) || exp_satisfies_(e2, p) } - E::IfElse(e1, e2, e3) => { - exp_satisfies_(e1, p) || exp_satisfies_(e2, p) || exp_satisfies_(e3, p) + E::IfElse(e1, e2, e3_opt) => { + exp_satisfies_(e1, p) + || exp_satisfies_(e2, p) + || e3_opt.iter().any(|e3| exp_satisfies_(e3, p)) } E::ModuleCall(c) => exp_satisfies_(&c.arguments, p), E::Match(esubject, arms) => { diff --git a/external-crates/move/crates/move-compiler/src/unit_test/filter_test_members.rs b/external-crates/move/crates/move-compiler/src/unit_test/filter_test_members.rs index 225b4f91628b5..2fb22b66b4b30 100644 --- a/external-crates/move/crates/move-compiler/src/unit_test/filter_test_members.rs +++ b/external-crates/move/crates/move-compiler/src/unit_test/filter_test_members.rs @@ -18,13 +18,13 @@ use crate::{ use std::sync::Arc; struct Context<'env> { - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, is_source_def: bool, current_package: Option, } impl<'env> Context<'env> { - fn new(env: &'env mut CompilationEnv) -> Self { + fn new(env: &'env CompilationEnv) -> Self { Self { env, is_source_def: false, @@ -92,7 +92,7 @@ pub const UNIT_TEST_POISON_FUN_NAME: Symbol = symbol!("unit_test_poison"); // in `compilation_env` is not set. If the test flag is set, no filtering is performed, and instead // a test plan is created for use by the testing framework. pub fn program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: P::Program, ) -> P::Program { @@ -127,7 +127,7 @@ fn has_unit_test_module(prog: &P::Program) -> bool { } fn check_has_unit_test_module( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: &P::Program, ) -> bool { @@ -145,7 +145,7 @@ fn check_has_unit_test_module( P::Definition::Module(P::ModuleDefinition { name, .. }) => name.0.loc, P::Definition::Address(P::AddressDefinition { loc, .. }) => *loc, }; - compilation_env.add_diag(diag!( + compilation_env.add_error_diag(diag!( Attributes::InvalidTest, ( loc, diff --git a/external-crates/move/crates/move-compiler/src/unit_test/plan_builder.rs b/external-crates/move/crates/move-compiler/src/unit_test/plan_builder.rs index 7aab1a163d71d..30f14b7f7a891 100644 --- a/external-crates/move/crates/move-compiler/src/unit_test/plan_builder.rs +++ b/external-crates/move/crates/move-compiler/src/unit_test/plan_builder.rs @@ -5,6 +5,10 @@ use crate::{ cfgir::ast as G, diag, + diagnostics::{ + warning_filters::{WarningFilters, WarningFiltersScope}, + Diagnostic, Diagnostics, + }, expansion::ast::{ self as E, Address, Attribute, AttributeValue, Attributes, ModuleAccess_, ModuleIdent, ModuleIdent_, @@ -33,12 +37,13 @@ use move_symbol_pool::Symbol; use std::collections::BTreeMap; struct Context<'env> { - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, + warning_filters_scope: WarningFiltersScope, constants: UniqueMap, Attributes)>>, } impl<'env> Context<'env> { - fn new(compilation_env: &'env mut CompilationEnv, prog: &G::Program) -> Self { + fn new(compilation_env: &'env CompilationEnv, prog: &G::Program) -> Self { let constants = prog.modules.ref_map(|_mident, module| { module.constants.ref_map(|_name, constant| { let v_opt = constant.value.as_ref().and_then(|v| match v { @@ -48,12 +53,31 @@ impl<'env> Context<'env> { (constant.loc, v_opt, constant.attributes.clone()) }) }); + let warning_filters_scope = compilation_env.top_level_warning_filter_scope().clone(); Self { env: compilation_env, + warning_filters_scope, constants, } } + pub fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + pub fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + + pub fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) + } + + pub fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } + fn resolve_address(&self, addr: &Address) -> NumericalAddress { (*addr).into_addr_bytes() } @@ -72,7 +96,7 @@ impl<'env> Context<'env> { // Constructs a test plan for each module in `prog`. This also validates the structure of the // attributes as the test plan is constructed. pub fn construct_test_plan( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, package_filter: Option, prog: &G::Program, ) -> Option> { @@ -85,7 +109,15 @@ pub fn construct_test_plan( prog.modules .key_cloned_iter() .flat_map(|(module_ident, module_def)| { - construct_module_test_plan(&mut context, package_filter, module_ident, module_def) + context.push_warning_filter_scope(module_def.warning_filter.clone()); + let plan = construct_module_test_plan( + &mut context, + package_filter, + module_ident, + module_def, + ); + context.pop_warning_filter_scope(); + plan }) .collect(), ) @@ -104,8 +136,11 @@ fn construct_module_test_plan( .functions .iter() .filter_map(|(loc, fn_name, func)| { - build_test_info(context, loc, fn_name, func) - .map(|test_case| (fn_name.to_string(), test_case)) + context.push_warning_filter_scope(func.warning_filter.clone()); + let info = build_test_info(context, loc, fn_name, func) + .map(|test_case| (fn_name.to_string(), test_case)); + context.pop_warning_filter_scope(); + info }) .collect(); @@ -143,7 +178,7 @@ fn build_test_info<'func>( let fn_msg = "Only functions defined as a test with #[test] can also have an \ #[expected_failure] attribute"; let abort_msg = "Attributed as #[expected_failure] here"; - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidUsage, (fn_loc, fn_msg), (abort_attribute.loc, abort_msg), @@ -154,7 +189,7 @@ fn build_test_info<'func>( (Some(test_attribute), Some(random_test_attribute)) => { let msg = "Function annotated as both #[test] and #[random_test]. You need to declare \ it as either one or the other"; - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidUsage, (random_test_attribute.loc, msg), (test_attribute.loc, PREVIOUSLY_ANNOTATED_MSG), @@ -170,7 +205,7 @@ fn build_test_info<'func>( if let Some(test_only_attribute) = test_only_attribute_opt { let msg = "Function annotated as both #[test] and #[test_only]. You need to declare \ it as either one or the other"; - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidUsage, (test_only_attribute.loc, msg), (test_attribute.loc, PREVIOUSLY_ANNOTATED_MSG), @@ -205,7 +240,7 @@ fn build_test_info<'func>( "Supported builti-in types are: bool, u8, u16, u32, u64, \ u128, u256, address, and vector where T is a built-in type", ); - context.env.add_diag(diag); + context.add_diag(diag); return None; } }; @@ -214,7 +249,7 @@ fn build_test_info<'func>( None => { let missing_param_msg = "Missing test parameter assignment in test. Expected a \ parameter to be assigned in this attribute"; - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidTest, (test_attribute.loc, missing_param_msg), (vloc, "Corresponding to this parameter"), @@ -227,7 +262,7 @@ fn build_test_info<'func>( if is_random_test && arguments.is_empty() { let msg = "No parameters to generate for random test. A #[random_test] function must \ have at least one parameter to generate."; - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidTest, (test_attribute.loc, msg), (fn_loc, IN_THIS_TEST_MSG), @@ -266,7 +301,7 @@ fn parse_test_attribute( match test_attribute { EA::Name(_) | EA::Parameterized(_, _) if depth > 0 => { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidTest, (*aloc, "Unexpected nested attribute in test declaration"), )); @@ -281,7 +316,7 @@ fn parse_test_attribute( } EA::Assigned(nm, attr_value) => { if depth != 1 { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidTest, (*aloc, "Unexpected nested attribute in test declaration"), )); @@ -291,7 +326,7 @@ fn parse_test_attribute( let value = match convert_attribute_value_to_move_value(context, attr_value) { Some(move_value) => move_value, None => { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (*assign_loc, "Unsupported attribute value"), (*aloc, "Assigned in this attribute"), @@ -338,7 +373,7 @@ fn parse_failure_attribute( let invalid_assignment_msg = "Invalid expected failure code assignment"; let expected_msg = "Expect an #[expected_failure(...)] attribute for error specification"; - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (assign_loc, invalid_assignment_msg), (*aloc, expected_msg), @@ -369,9 +404,7 @@ fn parse_failure_attribute( expected_failure_kind_vec.len(), TestingAttribute::expected_failure_cases().to_vec().join(", ") ); - context - .env - .add_diag(diag!(Attributes::InvalidValue, (*aloc, invalid_attr_msg))); + context.add_diag(diag!(Attributes::InvalidValue, (*aloc, invalid_attr_msg))); return None; } let (expected_failure_kind, (attr_loc, attr)) = @@ -400,7 +433,7 @@ fn parse_failure_attribute( attribute.", TestingAttribute::ERROR_LOCATION ); - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::ValueWarning, (attr_loc, BAD_ABORT_VALUE_WARNING), (value_loc, tip) @@ -500,7 +533,7 @@ fn parse_failure_attribute( ); let no_code = format!("No status code associated with value '{move_error_type}'"); - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (value_name_loc, bad_value), (major_value_loc, no_code) @@ -541,9 +574,7 @@ fn parse_failure_attribute( "Unused attribute for {}", TestingAttribute::ExpectedFailure.name() ); - context - .env - .add_diag(diag!(UnusedItem::Attribute, (loc, msg))); + context.add_diag(diag!(UnusedItem::Attribute, (loc, msg))); } Some(ExpectedFailure::ExpectedWithError(ExpectedMoveError( status_code, @@ -571,7 +602,7 @@ fn check_attribute_unassigned( "Expected no assigned value, e.g. '{}', for expected failure attribute", kind ); - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (attr_loc, "Unsupported attribute in this location"), (loc, msg) @@ -598,7 +629,7 @@ fn get_assigned_attribute( "Expected assigned value, e.g. '{}=...', for expected failure attribute", kind ); - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (attr_loc, "Unsupported attribute in this location"), (loc, msg) @@ -615,7 +646,7 @@ fn convert_location(context: &mut Context, attr_loc: Loc, attr: Attribute) -> Op match value { sp!(vloc, EAV::Module(module)) => convert_module_id(context, vloc, &module), sp!(vloc, _) => { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (loc, INVALID_VALUE), (vloc, "Expected a module identifier, e.g. 'std::vector'") @@ -645,7 +676,7 @@ fn convert_constant_value_u64_constant_or_value( let modules_constants = context.constants().get(module).unwrap(); let constant = match modules_constants.get_(&member.value) { None => { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (vloc, INVALID_VALUE), ( @@ -667,7 +698,7 @@ fn convert_constant_value_u64_constant_or_value( "Constant '{module}::{member}' has a non-u64 value. \ Only 'u64' values are permitted" ); - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (vloc, INVALID_VALUE), (*cloc, msg), @@ -680,7 +711,7 @@ fn convert_constant_value_u64_constant_or_value( fn convert_module_id(context: &mut Context, vloc: Loc, module: &ModuleIdent) -> Option { if !context.constants.contains_key(module) { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (vloc, INVALID_VALUE), (module.loc, format!("Unbound module '{module}'")), @@ -693,7 +724,7 @@ fn convert_module_id(context: &mut Context, vloc: Loc, module: &ModuleIdent) -> value: sp!(_, a), .. } => a.into_inner(), Address::NamedUnassigned(addr) => { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (vloc, INVALID_VALUE), (*mloc, format!("Unbound address '{addr}'")), @@ -722,7 +753,7 @@ fn convert_attribute_value_u64( | sp!(vloc, EAV::Value(sp!(_, EV::U32(_)))) | sp!(vloc, EAV::Value(sp!(_, EV::U128(_)))) | sp!(vloc, EAV::Value(sp!(_, EV::U256(_)))) => { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (loc, INVALID_VALUE), (*vloc, "Annotated non-u64 literals are not permitted"), @@ -730,7 +761,7 @@ fn convert_attribute_value_u64( None } sp!(vloc, _) => { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (loc, INVALID_VALUE), (*vloc, "Unsupported value in this assignment"), @@ -765,9 +796,7 @@ fn check_location( "Expected '{}' following '{attr}'", TestingAttribute::ERROR_LOCATION ); - context - .env - .add_diag(diag!(Attributes::InvalidUsage, (loc, msg))); + context.add_diag(diag!(Attributes::InvalidUsage, (loc, msg))); } location } diff --git a/external-crates/move/crates/move-compiler/tests/linter/false_negative_unnecessary_conditional.move b/external-crates/move/crates/move-compiler/tests/linter/false_negative_unnecessary_conditional.move index 4e676fa7ca518..a70e993da5b7e 100644 --- a/external-crates/move/crates/move-compiler/tests/linter/false_negative_unnecessary_conditional.move +++ b/external-crates/move/crates/move-compiler/tests/linter/false_negative_unnecessary_conditional.move @@ -1,12 +1,14 @@ module a::m { // These very simply could be rewritten but we are overly conservative when it comes to blocks public fun t0(condition: bool) { - if (condition) { (); true } else false; - if (condition) b"" else { (); (); vector[] }; + if (condition) { foo(); true } else false; + if (condition) b"" else { foo(); foo(); vector[] }; } // we don't do this check after constant folding public fun t1(condition: bool) { if (condition) 1 + 1 else 2; } + + fun foo() {} } diff --git a/external-crates/move/crates/move-compiler/tests/linter/suppress_unnecessary_unit.move b/external-crates/move/crates/move-compiler/tests/linter/suppress_unnecessary_unit.move new file mode 100644 index 0000000000000..da9533c8d5890 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/linter/suppress_unnecessary_unit.move @@ -0,0 +1,12 @@ +// suppress unnecessary_unit lint +module a::m { + + #[allow(lint(unnecessary_unit))] + public fun test_empty_else(x: bool): bool { + if (x) { x = true; } else {}; + if (!x) () else { test_empty_else(x); }; + { (); }; + (); + x + } +} diff --git a/external-crates/move/crates/move-compiler/tests/linter/true_negative_unnecessary_unit.move b/external-crates/move/crates/move-compiler/tests/linter/true_negative_unnecessary_unit.move new file mode 100644 index 0000000000000..dac10008319ab --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/linter/true_negative_unnecessary_unit.move @@ -0,0 +1,12 @@ +// tests unnecessary units. These caeses are not errors and should not be reported +module a::unnecessary_unit { + public fun t_if_without_else(cond: bool): u64 { + let x = 0; + if (cond) x = 1; + x + } + + public fun t() { + () // unit here is okay + } +} diff --git a/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_unit.exp b/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_unit.exp new file mode 100644 index 0000000000000..3b81304356f04 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_unit.exp @@ -0,0 +1,151 @@ +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:6:16 + │ +6 │ if (b) () else { x = 1 }; + │ - ^^ Unnecessary unit '()' + │ │ + │ Consider negating the 'if' condition and simplifying + │ + = For example 'if (cond) () else e' can be simplified to 'if (!cond) e' + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:8:16 + │ +8 │ if (b) {} else { x = 1 }; + │ - ^^ Unnecessary unit '()' + │ │ + │ Consider negating the 'if' condition and simplifying + │ + = For example 'if (cond) () else e' can be simplified to 'if (!cond) e' + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:10:16 + │ +10 │ if (b) { () } else { x = 1 }; + │ - ^^^^^^ Unnecessary unit '()' + │ │ + │ Consider negating the 'if' condition and simplifying + │ + = For example 'if (cond) () else e' can be simplified to 'if (!cond) e' + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:12:16 + │ +12 │ if (b) { + │ - Consider negating the 'if' condition and simplifying + │ ╭────────────────^ +13 │ │ // new line and comment does not suppress it +14 │ │ } else { x = 1 }; + │ ╰─────────^ Unnecessary unit '()' + │ + = For example 'if (cond) () else e' can be simplified to 'if (!cond) e' + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:21:31 + │ +21 │ if (b) { x = 1 } else (); + │ ----------------------^^ + │ │ │ + │ │ Unnecessary 'else ()'. + │ An 'if' without an 'else' has an implicit 'else ()'. Consider removing the 'else' branch + │ + = For example 'if (cond) e else ()' can be simplified to 'if (cond) e' + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:23:31 + │ +23 │ if (b) { x = 1 } else {}; + │ ----------------------^^ + │ │ │ + │ │ Unnecessary 'else ()'. + │ An 'if' without an 'else' has an implicit 'else ()'. Consider removing the 'else' branch + │ + = For example 'if (cond) e else ()' can be simplified to 'if (cond) e' + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:25:31 + │ +25 │ if (b) { x = 1 } else { () }; + │ ----------------------^^^^^^ + │ │ │ + │ │ Unnecessary 'else ()'. + │ An 'if' without an 'else' has an implicit 'else ()'. Consider removing the 'else' branch + │ + = For example 'if (cond) e else ()' can be simplified to 'if (cond) e' + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:27:31 + │ +27 │ ╭ if (b) { x = 1 } else { + │ ╭─────────────────────────────────^ +28 │ │ │ // new line and comment does not suppress it +29 │ │ │ }; + │ ╰─│─────────^ Unnecessary 'else ()'. + │ ╰─────────' An 'if' without an 'else' has an implicit 'else ()'. Consider removing the 'else' branch + │ + = For example 'if (cond) e else ()' can be simplified to 'if (cond) e' + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:34:9 + │ +34 │ (); + │ ^^ Unnecessary unit in sequence '();'. Consider removing + │ + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:37:18 + │ +37 │ if (b) { (); () } else { x = 1 }; // doesn't trigger if/else case + │ ^^ Unnecessary unit in sequence '();'. Consider removing + │ + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:39:33 + │ +39 │ if (b) { x = 1 } else { (); (); () }; // doesn't trigger if/else case + │ ^^ Unnecessary unit in sequence '();'. Consider removing + │ + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:39:37 + │ +39 │ if (b) { x = 1 } else { (); (); () }; // doesn't trigger if/else case + │ ^^ Unnecessary unit in sequence '();'. Consider removing + │ + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:41:9 + │ +41 │ {}; + │ ^^ Unnecessary unit in sequence '();'. Consider removing + │ + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:42:9 + │ +42 │ { () }; // inner isn't an error but the outer is + │ ^^^^^^ Unnecessary unit in sequence '();'. Consider removing + │ + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:43:11 + │ +43 │ { (); }; // inner is an error but outer isn't + │ ^^ Unnecessary unit in sequence '();'. Consider removing + │ + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + diff --git a/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_unit.move b/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_unit.move new file mode 100644 index 0000000000000..3cd380ffbd1ce --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_unit.move @@ -0,0 +1,52 @@ +// tests unnecessary units in if, else, and block +module a::unnecessary_unit { + public fun t_if(b: bool) { + let x = 0; + x; + if (b) () else { x = 1 }; + x; + if (b) {} else { x = 1 }; + x; + if (b) { () } else { x = 1 }; + x; + if (b) { + // new line and comment does not suppress it + } else { x = 1 }; + x; + } + + public fun t_else(b: bool) { + let x = 0; + x; + if (b) { x = 1 } else (); + x; + if (b) { x = 1 } else {}; + x; + if (b) { x = 1 } else { () }; + x; + if (b) { x = 1 } else { + // new line and comment does not suppress it + }; + x; + } + + public fun t_block(b: bool) { + (); + let x = 0; + x; + if (b) { (); () } else { x = 1 }; // doesn't trigger if/else case + x; + if (b) { x = 1 } else { (); (); () }; // doesn't trigger if/else case + x; + {}; + { () }; // inner isn't an error but the outer is + { (); }; // inner is an error but outer isn't + () + } + + // public fun t_if_else_if(b: bool, c: bool) { + // let x = 0; + // x; + // if (b) { x = 1 } else if (c) {}; + // } +} diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_complex.move b/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_complex.move new file mode 100644 index 0000000000000..e27a87cda2d14 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_complex.move @@ -0,0 +1,14 @@ +module a::m; + +fun t0() { + let x = 2 + 5; + match (x) { _ => {} } +} + +fun t1() { + match ({ 2 + 3 + 4}) { _ => {} } +} + +fun t2() { + match ({ let x = 2 + 3; x + 4}) { _ => {} } +} diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_mut_ref_type.move b/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_mut_ref_type.move new file mode 100644 index 0000000000000..7294975f79172 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_mut_ref_type.move @@ -0,0 +1,5 @@ +module a::m; + +fun t() { + match (&mut 10) { _ => {} } +} diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_ref_type.move b/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_ref_type.move new file mode 100644 index 0000000000000..a3a0eca9c2b9c --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_ref_type.move @@ -0,0 +1,5 @@ +module a::m; + +fun t() { + match (&10) { _ => {} } +} diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_subject.move b/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_subject.move new file mode 100644 index 0000000000000..015242e02bc2f --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_subject.move @@ -0,0 +1,5 @@ +module a::m; + +fun t() { + match (10) { _ => {} } +} diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/parser/labeled_control_exp_associativity_else_after_if_block.exp b/external-crates/move/crates/move-compiler/tests/move_2024/parser/labeled_control_exp_associativity_else_after_if_block.exp index a155e9d748798..8d8a57abe2613 100644 --- a/external-crates/move/crates/move-compiler/tests/move_2024/parser/labeled_control_exp_associativity_else_after_if_block.exp +++ b/external-crates/move/crates/move-compiler/tests/move_2024/parser/labeled_control_exp_associativity_else_after_if_block.exp @@ -4,9 +4,27 @@ error[E04007]: incompatible types 13 │ if (cond) 'a: { s1 }.f else s2.f │ ^^^^^^^^^^^^^^^^^^^^ │ │ │ - │ │ Found: '0x42::M::S'. It is not compatible with the other type. - │ Incompatible branches - │ Found: '()'. It is not compatible with the other type. + │ │ Given: '0x42::M::S' + │ Invalid 'if'. The body of an 'if' without an 'else' must have type '()' + │ Expected: '()' + +error[E04005]: expected a single type + ┌─ tests/move_2024/parser/labeled_control_exp_associativity_else_after_if_block.move:13:9 + │ +13 │ if (cond) 'a: { s1 }.f else s2.f + │ ^^^^^^^^^^^^^^^^^^^^ + │ │ + │ Invalid dot access + │ Expected a single type, but found expression list type: '()' + +error[E04009]: expected specific type + ┌─ tests/move_2024/parser/labeled_control_exp_associativity_else_after_if_block.move:13:9 + │ +13 │ if (cond) 'a: { s1 }.f else s2.f + │ ^^^^^^^^^^^^^^^^^^^^^^ + │ │ + │ Unbound field 'f' + │ Expected a struct type in the current module but got: '()' error[E01002]: unexpected token ┌─ tests/move_2024/parser/labeled_control_exp_associativity_else_after_if_block.move:13:32 diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/parser/labeled_control_exp_associativity_typing_invalid.exp b/external-crates/move/crates/move-compiler/tests/move_2024/parser/labeled_control_exp_associativity_typing_invalid.exp index c89ea5aa9dae2..c97e100134719 100644 --- a/external-crates/move/crates/move-compiler/tests/move_2024/parser/labeled_control_exp_associativity_typing_invalid.exp +++ b/external-crates/move/crates/move-compiler/tests/move_2024/parser/labeled_control_exp_associativity_typing_invalid.exp @@ -2,13 +2,13 @@ error[E04007]: incompatible types ┌─ tests/move_2024/parser/labeled_control_exp_associativity_typing_invalid.move:12:9 │ 7 │ fun bar(): u64 { 0 } - │ --- Found: 'u64'. It is not compatible with the other type. + │ --- Given: 'u64' · 12 │ if (cond) bar() + 1; │ ^^^^^^^^^^^^^^^^^^^ │ │ - │ Incompatible branches - │ Found: '()'. It is not compatible with the other type. + │ Invalid 'if'. The body of an 'if' without an 'else' must have type '()' + │ Expected: '()' error[E04003]: built-in operation not supported ┌─ tests/move_2024/parser/labeled_control_exp_associativity_typing_invalid.move:15:9 diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/parsing/clever_errors_raw_abort.move b/external-crates/move/crates/move-compiler/tests/move_2024/parsing/clever_errors_raw_abort.move new file mode 100644 index 0000000000000..b12ba9f501c91 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_2024/parsing/clever_errors_raw_abort.move @@ -0,0 +1,40 @@ +#[allow(dead_code)] +module 0x42::a; + +fun f() { + abort +} + +fun f1(): u64 { + abort; + 1 + 1 +} + +fun f2(): u64 { + 1 + 2; + abort; + 1 + 1 +} + +fun f3(): u64 { + 1 + abort; + 1 + 1 +} + +fun f4(): u64 { + abort abort; + 1 + 1 +} + +#[allow(unused_trailing_semi)] +fun f5() { + abort; +} + +fun f6() { + assert!(abort); +} + +fun f7(v: u64) { + if (v > 100) abort +} diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/parsing/expr_abort_missing_value.move b/external-crates/move/crates/move-compiler/tests/move_2024/parsing/expr_abort_missing_value.move new file mode 100644 index 0000000000000..3edcf78e1e932 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_2024/parsing/expr_abort_missing_value.move @@ -0,0 +1,5 @@ +module 0x42::M { + fun f(v: u64) { + if (v > 100) abort + } +} diff --git a/external-crates/move/crates/move-compiler/tests/move_check/parser/control_exp_associativity_else_after_if_block.exp b/external-crates/move/crates/move-compiler/tests/move_check/parser/control_exp_associativity_else_after_if_block.exp index f8e6e2bde401f..f77e9f1d5e7f7 100644 --- a/external-crates/move/crates/move-compiler/tests/move_check/parser/control_exp_associativity_else_after_if_block.exp +++ b/external-crates/move/crates/move-compiler/tests/move_check/parser/control_exp_associativity_else_after_if_block.exp @@ -2,13 +2,31 @@ error[E04007]: incompatible types ┌─ tests/move_check/parser/control_exp_associativity_else_after_if_block.move:13:9 │ 7 │ fun t(cond: bool, s1: S, s2: S) { - │ - Found: '0x42::M::S'. It is not compatible with the other type. + │ - Given: '0x42::M::S' · 13 │ if (cond) { s1 }.f else s2.f │ ^^^^^^^^^^^^^^^^ │ │ - │ Incompatible branches - │ Found: '()'. It is not compatible with the other type. + │ Invalid 'if'. The body of an 'if' without an 'else' must have type '()' + │ Expected: '()' + +error[E04005]: expected a single type + ┌─ tests/move_check/parser/control_exp_associativity_else_after_if_block.move:13:9 + │ +13 │ if (cond) { s1 }.f else s2.f + │ ^^^^^^^^^^^^^^^^ + │ │ + │ Invalid dot access + │ Expected a single type, but found expression list type: '()' + +error[E04009]: expected specific type + ┌─ tests/move_check/parser/control_exp_associativity_else_after_if_block.move:13:9 + │ +13 │ if (cond) { s1 }.f else s2.f + │ ^^^^^^^^^^^^^^^^^^ + │ │ + │ Unbound field 'f' + │ Expected a struct type in the current module but got: '()' error[E01002]: unexpected token ┌─ tests/move_check/parser/control_exp_associativity_else_after_if_block.move:13:28 diff --git a/external-crates/move/crates/move-compiler/tests/move_check/parser/control_exp_associativity_typing_invalid.exp b/external-crates/move/crates/move-compiler/tests/move_check/parser/control_exp_associativity_typing_invalid.exp index 789d0a379b1a2..be79cfda6a95f 100644 --- a/external-crates/move/crates/move-compiler/tests/move_check/parser/control_exp_associativity_typing_invalid.exp +++ b/external-crates/move/crates/move-compiler/tests/move_check/parser/control_exp_associativity_typing_invalid.exp @@ -2,13 +2,13 @@ error[E04007]: incompatible types ┌─ tests/move_check/parser/control_exp_associativity_typing_invalid.move:12:9 │ 7 │ fun bar(): u64 { 0 } - │ --- Found: 'u64'. It is not compatible with the other type. + │ --- Given: 'u64' · 12 │ if (cond) bar() + 1; │ ^^^^^^^^^^^^^^^^^^^ │ │ - │ Incompatible branches - │ Found: '()'. It is not compatible with the other type. + │ Invalid 'if'. The body of an 'if' without an 'else' must have type '()' + │ Expected: '()' error[E04003]: built-in operation not supported ┌─ tests/move_check/parser/control_exp_associativity_typing_invalid.move:15:9 diff --git a/external-crates/move/crates/move-compiler/tests/move_check/parser/expr_abort_missing_value.exp b/external-crates/move/crates/move-compiler/tests/move_check/parser/expr_abort_missing_value.exp index 802e95f26007c..9fd456e62887b 100644 --- a/external-crates/move/crates/move-compiler/tests/move_check/parser/expr_abort_missing_value.exp +++ b/external-crates/move/crates/move-compiler/tests/move_check/parser/expr_abort_missing_value.exp @@ -1,9 +1,8 @@ -error[E01002]: unexpected token - ┌─ tests/move_check/parser/expr_abort_missing_value.move:5:5 +error[E13001]: feature is not supported in specified edition + ┌─ tests/move_check/parser/expr_abort_missing_value.move:4:22 │ -5 │ } - │ ^ - │ │ - │ Unexpected '}' - │ Expected an expression term +4 │ if (v > 100) abort + │ ^^^^^ Clever `assert!`, `abort`, and `#[error]` are not supported by current edition 'legacy', only '2024.alpha' and '2024.beta' support this feature + │ + = You can update the edition in the 'Move.toml', or via command line flag if invoking the compiler directly. diff --git a/external-crates/move/crates/move-compiler/tests/move_check/parser/expr_abort_missing_value.move b/external-crates/move/crates/move-compiler/tests/move_check/parser/expr_abort_missing_value.move index 2b53c3c6bbe93..b167cba1efde3 100644 --- a/external-crates/move/crates/move-compiler/tests/move_check/parser/expr_abort_missing_value.move +++ b/external-crates/move/crates/move-compiler/tests/move_check/parser/expr_abort_missing_value.move @@ -1,6 +1,6 @@ module 0x42::M { - fun f(_v: u64) { - // Aborts always require a value + fun f(v: u64) { + // Aborts always require a value if not in Move 2024 if (v > 100) abort } } diff --git a/external-crates/move/crates/move-compiler/tests/move_check/translated_ir_tests/move/commands/abort_negative_stack_size.exp b/external-crates/move/crates/move-compiler/tests/move_check/translated_ir_tests/move/commands/abort_negative_stack_size.exp deleted file mode 100644 index 95782c0d701ec..0000000000000 --- a/external-crates/move/crates/move-compiler/tests/move_check/translated_ir_tests/move/commands/abort_negative_stack_size.exp +++ /dev/null @@ -1,9 +0,0 @@ -error[E01002]: unexpected token - ┌─ tests/move_check/translated_ir_tests/move/commands/abort_negative_stack_size.move:6:1 - │ -6 │ } - │ ^ - │ │ - │ Unexpected '}' - │ Expected an expression term - diff --git a/external-crates/move/crates/move-compiler/tests/move_check/translated_ir_tests/move/commands/abort_negative_stack_size.move b/external-crates/move/crates/move-compiler/tests/move_check/translated_ir_tests/move/commands/abort_negative_stack_size.move deleted file mode 100644 index 5ae2e1b36d63a..0000000000000 --- a/external-crates/move/crates/move-compiler/tests/move_check/translated_ir_tests/move/commands/abort_negative_stack_size.move +++ /dev/null @@ -1,7 +0,0 @@ -// check: NEGATIVE_STACK_SIZE_WITHIN_BLOCK -module 0x42::m { - -fun main() { - abort -} -} diff --git a/external-crates/move/crates/move-compiler/tests/move_check/typing/if_no_else.exp b/external-crates/move/crates/move-compiler/tests/move_check/typing/if_no_else.exp new file mode 100644 index 0000000000000..56264bc013239 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_check/typing/if_no_else.exp @@ -0,0 +1,34 @@ +error[E04007]: incompatible types + ┌─ tests/move_check/typing/if_no_else.move:4:9 + │ +4 │ if (cond) 0; + │ ^^^^^^^^^^^ + │ │ │ + │ │ Given: integer + │ Invalid 'if'. The body of an 'if' without an 'else' must have type '()' + │ Expected: '()' + +error[E04007]: incompatible types + ┌─ tests/move_check/typing/if_no_else.move:5:9 + │ + 5 │ if (cond) foo(); + │ ^^^^^^^^^^^^^^^ + │ │ + │ Invalid 'if'. The body of an 'if' without an 'else' must have type '()' + │ Expected: '()' + · +13 │ fun foo(): u64 { 0 } + │ --- Given: 'u64' + +error[E04007]: incompatible types + ┌─ tests/move_check/typing/if_no_else.move:6:9 + │ + 6 │ ╭ ╭ if (cond) { + 7 │ │ │ let x = 0; + 8 │ │ │ let y = 1; + │ │ │ - Given: integer + 9 │ │ │ x * y +10 │ │ │ } + │ ╰─│─────────^ Invalid 'if'. The body of an 'if' without an 'else' must have type '()' + │ ╰─────────' Expected: '()' + diff --git a/external-crates/move/crates/move-compiler/tests/move_check/typing/if_no_else.move b/external-crates/move/crates/move-compiler/tests/move_check/typing/if_no_else.move new file mode 100644 index 0000000000000..245ac3a26b03e --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_check/typing/if_no_else.move @@ -0,0 +1,14 @@ + +module a::m { + fun t(cond: bool) { + if (cond) 0; + if (cond) foo(); + if (cond) { + let x = 0; + let y = 1; + x * y + } + } + + fun foo(): u64 { 0 } +} diff --git a/external-crates/move/crates/move-compiler/tests/move_check_testsuite.rs b/external-crates/move/crates/move-compiler/tests/move_check_testsuite.rs index 502e5421645bb..1f9066f2a5505 100644 --- a/external-crates/move/crates/move-compiler/tests/move_check_testsuite.rs +++ b/external-crates/move/crates/move-compiler/tests/move_check_testsuite.rs @@ -10,6 +10,7 @@ use move_command_line_common::{ }; use move_compiler::{ command_line::compiler::move_check_for_errors, + diagnostics::warning_filters::WarningFilters, diagnostics::*, editions::{Edition, Flavor}, linters::{self, LintLevel}, diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/coin_field.exp b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/coin_field.exp index cd2e3896f492e..f28726716acaf 100644 --- a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/coin_field.exp +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/coin_field.exp @@ -1,22 +1,16 @@ warning[Lint W99003]: sub-optimal 'sui::coin::Coin' field type - ┌─ tests/sui_mode/linter/coin_field.move:11:12 + ┌─ tests/sui_mode/linter/coin_field.move:13:12 │ -11 │ struct S2 has key, store { - │ ^^ The field 'c' of 'S2' has type 'sui::coin::Coin' -12 │ id: UID, 13 │ c: Coin, - │ - Storing 'sui::balance::Balance' in this field will typically be more space-efficient + │ ^^^^^^^^ Sub-optimal 'sui::coin::Coin' field type. Using 'sui::balance::Balance' instead will be more space efficient │ = This warning can be suppressed with '#[allow(lint(coin_field))]' applied to the 'module' or module member ('const', 'fun', or 'struct') warning[Lint W99003]: sub-optimal 'sui::coin::Coin' field type - ┌─ tests/sui_mode/linter/coin_field.move:25:12 + ┌─ tests/sui_mode/linter/coin_field.move:27:12 │ -25 │ struct S2 has key, store { - │ ^^ The field 'c' of 'S2' has type 'sui::coin::Coin' -26 │ id: UID, 27 │ c: Balance, - │ - Storing 'sui::balance::Balance' in this field will typically be more space-efficient + │ ^^^^^^^^^^^ Sub-optimal 'sui::coin::Coin' field type. Using 'sui::balance::Balance' instead will be more space efficient │ = This warning can be suppressed with '#[allow(lint(coin_field))]' applied to the 'module' or module member ('const', 'fun', or 'struct') diff --git a/external-crates/move/crates/move-core-types/src/annotated_extractor.rs b/external-crates/move/crates/move-core-types/src/annotated_extractor.rs new file mode 100644 index 0000000000000..72c9a1bc7717d --- /dev/null +++ b/external-crates/move/crates/move-core-types/src/annotated_extractor.rs @@ -0,0 +1,334 @@ +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + account_address::AccountAddress, annotated_value as A, annotated_visitor as AV, + language_storage::TypeTag, +}; + +/// Elements are components of paths that select values from the sub-structure of other values. +/// They are split into two categories: +/// +/// - Selectors, which recurse into the sub-structure. +/// - Filters, which check properties of the value at that position in the sub-structure. +#[derive(Debug, Clone)] +pub enum Element<'e> { + // Selectors + /// Select a named field, assuming the value in question is a struct or an enum variant. + Field(&'e str), + + /// Select a positional element. This can be the element of a vector, or it can be a positional + /// field in an enum or a struct. + Index(u64), + + // Filters + /// Confirm that the current value has a certain type. + Type(&'e TypeTag), + + /// Confirm that the current value is an enum and its variant has this name. Note that to + /// filter on both the enum type and the variant name, the path must contain the Type first, + /// and then the Variant. Otherwise the type filter will be assumed + Variant(&'e str), +} + +/// An Extractor is an [`AV::Visitor`] that deserializes a sub-structure of the value. The +/// sub-structure is found at the end of a path of [`Element`]s which select fields from structs, +/// indices from vectors, and variants from enums. Deserialization is delegated to another visitor, +/// of type `V`, with the Extractor returning `Option`: +/// +/// - `Some(v)` if the given path exists in the value, or +/// - `None` if the path did not exist, +/// - Or an error if the underlying visitor failed for some reason. +/// +/// At every stage, the path can optionally start with an [`Element::Type`], which restricts the +/// type of the top-level value being deserialized. From there, the elements expected are driven by +/// the layout being deserialized: +/// +/// - When deserializing a vector, the next element must be an [`Element::Index`] which selects the +/// offset into the vector that the extractor recurses into. +/// - When deserializing a struct, the next element may be an [`Element::Field`] which selects the +/// field of the struct that the extractor recurses into by name, or an [`Element::Index`] which +/// selects the field by its offset. +/// - When deserializing a variant, the next elements may optionally be an [`Element::Variant`] +/// which expects a particular variant of the enum, followed by either an [`Element::Field`] or +/// an [`Element::Index`], similar to a struct. +pub struct Extractor<'p, 'v, V> { + inner: &'v mut V, + path: &'p [Element<'p>], +} + +impl<'p, 'v, 'b, 'l, V: AV::Visitor<'b, 'l>> Extractor<'p, 'v, V> +where + V::Error: std::error::Error + Send + Sync + 'static, +{ + pub fn new(inner: &'v mut V, path: &'p [Element<'p>]) -> Self { + Self { inner, path } + } + + pub fn deserialize_value( + bytes: &'b [u8], + layout: &'l A::MoveTypeLayout, + inner: &'v mut V, + path: Vec>, + ) -> anyhow::Result> { + let mut extractor = Extractor::new(inner, &path); + A::MoveValue::visit_deserialize(bytes, layout, &mut extractor) + } + + pub fn deserialize_struct( + bytes: &'b [u8], + layout: &'l A::MoveStructLayout, + inner: &'v mut V, + path: Vec>, + ) -> anyhow::Result> { + let mut extractor = Extractor::new(inner, &path); + A::MoveStruct::visit_deserialize(bytes, layout, &mut extractor) + } +} + +impl<'p, 'v, 'b, 'l, V: AV::Visitor<'b, 'l>> AV::Visitor<'b, 'l> for Extractor<'p, 'v, V> { + type Value = Option; + type Error = V::Error; + + fn visit_u8( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: u8, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::U8)] => Some(self.inner.visit_u8(driver, value)?), + _ => None, + }) + } + + fn visit_u16( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: u16, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::U16)] => Some(self.inner.visit_u16(driver, value)?), + _ => None, + }) + } + + fn visit_u32( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: u32, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::U32)] => Some(self.inner.visit_u32(driver, value)?), + _ => None, + }) + } + + fn visit_u64( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: u64, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::U64)] => Some(self.inner.visit_u64(driver, value)?), + _ => None, + }) + } + + fn visit_u128( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: u128, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::U128)] => Some(self.inner.visit_u128(driver, value)?), + _ => None, + }) + } + + fn visit_u256( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: crate::u256::U256, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::U256)] => Some(self.inner.visit_u256(driver, value)?), + _ => None, + }) + } + + fn visit_bool( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: bool, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::Bool)] => Some(self.inner.visit_bool(driver, value)?), + _ => None, + }) + } + + fn visit_address( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: AccountAddress, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::Address)] => { + Some(self.inner.visit_address(driver, value)?) + } + _ => None, + }) + } + + fn visit_signer( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: AccountAddress, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::Signer)] => Some(self.inner.visit_signer(driver, value)?), + _ => None, + }) + } + + fn visit_vector( + &mut self, + driver: &mut AV::VecDriver<'_, 'b, 'l>, + ) -> Result { + use Element as E; + use TypeTag as T; + + // If there is a type element, check that it is a vector type with the correct element + // type, and remove it from the path. + let path = if let [E::Type(t), path @ ..] = self.path { + if !matches!(t, T::Vector(t) if driver.element_layout().is_type(t)) { + return Ok(None); + } + path + } else { + self.path + }; + + // If there are no further path elements, we can delegate to the inner visitor. + let [index, path @ ..] = path else { + return Ok(Some(self.inner.visit_vector(driver)?)); + }; + + // Visiting a vector, the next part of the path must be an index -- anything else is + // guaranteed to fail. + let E::Index(i) = index else { + return Ok(None); + }; + + // Skip all the elements before the index, and then recurse. + while driver.off() < *i && driver.skip_element()? {} + Ok(driver + .next_element(&mut Extractor { + inner: self.inner, + path, + })? + .flatten()) + } + + fn visit_struct( + &mut self, + driver: &mut AV::StructDriver<'_, 'b, 'l>, + ) -> Result { + use Element as E; + use TypeTag as T; + + // If there is a type element, check that it is a struct type with the correct struct tag, + // and remove it from the path. + let path = if let [E::Type(t), path @ ..] = self.path { + if !matches!(t, T::Struct(t) if driver.struct_layout().is_type(t)) { + return Ok(None); + } + path + } else { + self.path + }; + + // If there are no further path elements, we can delegate to the inner visitor. + let [field, path @ ..] = path else { + return Ok(Some(self.inner.visit_struct(driver)?)); + }; + + match field { + // Skip over mismatched fields by name. + E::Field(f) => { + while matches!(driver.peek_field(), Some(l) if l.name.as_str() != *f) { + driver.skip_field()?; + } + } + + // Skip over fields by offset. + E::Index(i) => while driver.off() < *i && driver.skip_field()?.is_some() {}, + + // Any other element is invalid in this position. + _ => return Ok(None), + } + + Ok(driver + .next_field(&mut Extractor { + inner: self.inner, + path, + })? + .and_then(|(_, v)| v)) + } + + fn visit_variant( + &mut self, + driver: &mut AV::VariantDriver<'_, 'b, 'l>, + ) -> Result { + use Element as E; + use TypeTag as T; + + // If there is a type element, check that it is a struct type with the correct struct tag, + // and remove it from the path. + let path = if let [E::Type(t), path @ ..] = self.path { + if !matches!(t, T::Struct(t) if driver.enum_layout().is_type(t)) { + return Ok(None); + } + path + } else { + self.path + }; + + // If there is a variant element, check that it matches and remove it from the path. + let path = if let [E::Variant(v), path @ ..] = path { + if driver.variant_name().as_str() != *v { + return Ok(None); + } + path + } else { + path + }; + + // If there are no further path elements, we can delegate to the inner visitor. + let [field, path @ ..] = path else { + return Ok(Some(self.inner.visit_variant(driver)?)); + }; + + match field { + // Skip over mismatched fields by name. + E::Field(f) => { + while matches!(driver.peek_field(), Some(l) if l.name.as_str() != *f) { + driver.skip_field()?; + } + } + + // Skip over fields by offset. + E::Index(i) => while driver.off() < *i && driver.skip_field()?.is_some() {}, + + // Any other element is invalid in this position. + _ => return Ok(None), + } + + Ok(driver + .next_field(&mut Extractor { + inner: self.inner, + path, + })? + .and_then(|(_, v)| v)) + } +} diff --git a/external-crates/move/crates/move-core-types/src/annotated_value.rs b/external-crates/move/crates/move-core-types/src/annotated_value.rs index f5a31cc571941..22718e716f9b1 100644 --- a/external-crates/move/crates/move-core-types/src/annotated_value.rs +++ b/external-crates/move/crates/move-core-types/src/annotated_value.rs @@ -142,6 +142,43 @@ pub enum MoveTypeLayout { Enum(Box), } +impl MoveStructLayout { + /// Returns `true` if and only if the layout is for `type_`. + pub fn is_type(&self, type_: &StructTag) -> bool { + self.type_ == *type_ + } +} + +impl MoveEnumLayout { + /// Returns `true` if and only if the layout is for `type_`. + pub fn is_type(&self, type_: &StructTag) -> bool { + self.type_ == *type_ + } +} + +impl MoveTypeLayout { + /// Returns `true` if and only if the layout is for `type_`. + pub fn is_type(&self, type_: &TypeTag) -> bool { + use MoveTypeLayout as L; + use TypeTag as T; + + match self { + L::Bool => matches!(type_, T::Bool), + L::U8 => matches!(type_, T::U8), + L::U16 => matches!(type_, T::U16), + L::U32 => matches!(type_, T::U32), + L::U64 => matches!(type_, T::U64), + L::U128 => matches!(type_, T::U128), + L::U256 => matches!(type_, T::U256), + L::Address => matches!(type_, T::Address), + L::Signer => matches!(type_, T::Signer), + L::Vector(l) => matches!(type_, T::Vector(t) if l.is_type(t)), + L::Struct(l) => matches!(type_, T::Struct(t) if l.is_type(t)), + L::Enum(l) => matches!(type_, T::Struct(t) if l.is_type(t)), + } + } +} + impl MoveValue { /// TODO (annotated-visitor): Port legacy uses of this method to `BoundedVisitor`. pub fn simple_deserialize(blob: &[u8], ty: &MoveTypeLayout) -> AResult { diff --git a/external-crates/move/crates/move-core-types/src/annotated_visitor.rs b/external-crates/move/crates/move-core-types/src/annotated_visitor.rs index 9160d4565dc27..6e87e8405e61d 100644 --- a/external-crates/move/crates/move-core-types/src/annotated_visitor.rs +++ b/external-crates/move/crates/move-core-types/src/annotated_visitor.rs @@ -333,7 +333,7 @@ pub struct VecDriver<'c, 'b, 'l> { pub struct StructDriver<'c, 'b, 'l> { inner: ValueDriver<'c, 'b, 'l>, layout: &'l MoveStructLayout, - off: usize, + off: u64, } /// Exposes information about a variant being visited (its layout, details about the next field to @@ -345,7 +345,7 @@ pub struct VariantDriver<'c, 'b, 'l> { tag: u16, variant_name: &'l IdentStr, variant_layout: &'l [MoveFieldLayout], - off: usize, + off: u64, } #[derive(thiserror::Error, Debug)] @@ -461,7 +461,12 @@ impl<'c, 'b, 'l> VecDriver<'c, 'b, 'l> { self.layout } - /// The number of elements in this vector + /// The number of elements in this vector that have been visited so far. + pub fn off(&self) -> u64 { + self.off + } + + /// The number of elements in this vector. pub fn len(&self) -> u64 { self.len } @@ -532,9 +537,14 @@ impl<'c, 'b, 'l> StructDriver<'c, 'b, 'l> { self.layout } + /// The number of fields in this struct that have been visited so far. + pub fn off(&self) -> u64 { + self.off + } + /// The layout of the next field to be visited (if there is one), or `None` otherwise. pub fn peek_field(&self) -> Option<&'l MoveFieldLayout> { - self.layout.fields.get(self.off) + self.layout.fields.get(self.off as usize) } /// Visit the next field in the struct. The driver accepts a visitor to use for this field, @@ -624,9 +634,14 @@ impl<'c, 'b, 'l> VariantDriver<'c, 'b, 'l> { self.variant_name } + /// The number of elements in this vector that have been visited so far. + pub fn off(&self) -> u64 { + self.off + } + /// The layout of the next field to be visited (if there is one), or `None` otherwise. pub fn peek_field(&self) -> Option<&'l MoveFieldLayout> { - self.variant_layout.get(self.off) + self.variant_layout.get(self.off as usize) } /// Visit the next field in the variant. The driver accepts a visitor to use for this field, diff --git a/external-crates/move/crates/move-core-types/src/language_storage.rs b/external-crates/move/crates/move-core-types/src/language_storage.rs index 64f314cfe49ef..bbf597fc5d326 100644 --- a/external-crates/move/crates/move-core-types/src/language_storage.rs +++ b/external-crates/move/crates/move-core-types/src/language_storage.rs @@ -6,7 +6,7 @@ use crate::{ account_address::AccountAddress, gas_algebra::{AbstractMemorySize, BOX_ABSTRACT_SIZE, ENUM_BASE_ABSTRACT_SIZE}, identifier::{IdentStr, Identifier}, - parser::{parse_struct_tag, parse_type_tag}, + parsing::types::{ParsedModuleId, ParsedStructType, ParsedType}, }; use move_proc_macros::test_variant_order; use once_cell::sync::Lazy; @@ -137,7 +137,7 @@ impl FromStr for TypeTag { type Err = anyhow::Error; fn from_str(s: &str) -> Result { - parse_type_tag(s) + ParsedType::parse(s)?.into_type_tag(&|_| None) } } @@ -252,7 +252,7 @@ impl FromStr for StructTag { type Err = anyhow::Error; fn from_str(s: &str) -> Result { - parse_struct_tag(s) + ParsedStructType::parse(s)?.into_struct_tag(&|_| None) } } @@ -327,6 +327,13 @@ impl Display for ModuleId { } } +impl FromStr for ModuleId { + type Err = anyhow::Error; + fn from_str(s: &str) -> Result { + ParsedModuleId::parse(s)?.into_module_id(&|_| None) + } +} + impl ModuleId { pub fn short_str_lossless(&self) -> String { format!("0x{}::{}", self.address.short_str_lossless(), self.name) diff --git a/external-crates/move/crates/move-core-types/src/lib.rs b/external-crates/move/crates/move-core-types/src/lib.rs index b43bbdca21714..ed3921df2dfdc 100644 --- a/external-crates/move/crates/move-core-types/src/lib.rs +++ b/external-crates/move/crates/move-core-types/src/lib.rs @@ -8,6 +8,7 @@ use std::fmt; pub mod abi; pub mod account_address; +pub mod annotated_extractor; pub mod annotated_value; pub mod annotated_visitor; pub mod effects; @@ -17,7 +18,7 @@ pub mod identifier; pub mod language_storage; pub mod metadata; pub mod move_resource; -pub mod parser; +pub mod parsing; #[cfg(any(test, feature = "fuzzing"))] pub mod proptest_types; pub mod resolver; diff --git a/external-crates/move/crates/move-core-types/src/parser.rs b/external-crates/move/crates/move-core-types/src/parser.rs deleted file mode 100644 index 99af0f2dce1b5..0000000000000 --- a/external-crates/move/crates/move-core-types/src/parser.rs +++ /dev/null @@ -1,632 +0,0 @@ -// Copyright (c) The Diem Core Contributors -// Copyright (c) The Move Contributors -// SPDX-License-Identifier: Apache-2.0 - -use crate::{ - account_address::AccountAddress, - identifier::{self, Identifier}, - language_storage::{StructTag, TypeTag}, - transaction_argument::TransactionArgument, -}; -use anyhow::{bail, format_err, Result}; -use std::iter::Peekable; - -#[derive(Eq, PartialEq, Debug)] -enum Token { - U8Type, - U16Type, - U32Type, - U64Type, - U128Type, - U256Type, - BoolType, - AddressType, - VectorType, - SignerType, - Whitespace(String), - Name(String), - Address(String), - U8(String), - U16(String), - U32(String), - U64(String), - U128(String), - U256(String), - - Bytes(String), - True, - False, - ColonColon, - Lt, - Gt, - Comma, - EOF, -} - -impl Token { - fn is_whitespace(&self) -> bool { - matches!(self, Self::Whitespace(_)) - } -} - -fn token_as_name(tok: Token) -> Result { - use Token::*; - Ok(match tok { - U8Type => "u8".to_string(), - U16Type => "u16".to_string(), - U32Type => "u32".to_string(), - U64Type => "u64".to_string(), - U128Type => "u128".to_string(), - U256Type => "u256".to_string(), - BoolType => "bool".to_string(), - AddressType => "address".to_string(), - VectorType => "vector".to_string(), - True => "true".to_string(), - False => "false".to_string(), - SignerType => "signer".to_string(), - Name(s) => s, - Whitespace(_) | Address(_) | U8(_) | U16(_) | U32(_) | U64(_) | U128(_) | U256(_) - | Bytes(_) | ColonColon | Lt | Gt | Comma | EOF => { - bail!("Invalid token. Expected a name but got {:?}", tok) - } - }) -} - -fn name_token(s: String) -> Token { - match s.as_str() { - "u8" => Token::U8Type, - "u16" => Token::U16Type, - "u32" => Token::U32Type, - "u64" => Token::U64Type, - "u128" => Token::U128Type, - "u256" => Token::U256Type, - "bool" => Token::BoolType, - "address" => Token::AddressType, - "vector" => Token::VectorType, - "true" => Token::True, - "false" => Token::False, - "signer" => Token::SignerType, - _ => Token::Name(s), - } -} - -fn next_number(initial: char, mut it: impl Iterator) -> Result<(Token, usize)> { - let mut num = String::new(); - num.push(initial); - loop { - match it.next() { - Some(c) if c.is_ascii_digit() || c == '_' => num.push(c), - Some(c) if c.is_alphanumeric() => { - let mut suffix = String::new(); - suffix.push(c); - loop { - match it.next() { - Some(c) if c.is_ascii_alphanumeric() => suffix.push(c), - _ => { - let len = num.len() + suffix.len(); - let tok = match suffix.as_str() { - "u8" => Token::U8(num), - "u16" => Token::U16(num), - "u32" => Token::U32(num), - "u64" => Token::U64(num), - "u128" => Token::U128(num), - "u256" => Token::U256(num), - _ => bail!("invalid suffix"), - }; - return Ok((tok, len)); - } - } - } - } - _ => { - let len = num.len(); - return Ok((Token::U64(num), len)); - } - } - } -} - -#[allow(clippy::many_single_char_names)] -fn next_token(s: &str) -> Result> { - let mut it = s.chars().peekable(); - match it.next() { - None => Ok(None), - Some(c) => Ok(Some(match c { - '<' => (Token::Lt, 1), - '>' => (Token::Gt, 1), - ',' => (Token::Comma, 1), - ':' => match it.next() { - Some(':') => (Token::ColonColon, 2), - _ => bail!("unrecognized token"), - }, - '0' if it.peek() == Some(&'x') || it.peek() == Some(&'X') => { - it.next().unwrap(); - match it.next() { - Some(c) if c.is_ascii_hexdigit() => { - let mut r = String::new(); - r.push('0'); - r.push('x'); - r.push(c); - for c in it { - if c.is_ascii_hexdigit() { - r.push(c); - } else { - break; - } - } - let len = r.len(); - (Token::Address(r), len) - } - _ => bail!("unrecognized token"), - } - } - c if c.is_ascii_digit() => next_number(c, it)?, - 'b' if it.peek() == Some(&'"') => { - it.next().unwrap(); - let mut r = String::new(); - loop { - match it.next() { - Some('"') => break, - Some(c) if c.is_ascii() => r.push(c), - _ => bail!("unrecognized token"), - } - } - let len = r.len() + 3; - (Token::Bytes(hex::encode(r)), len) - } - 'x' if it.peek() == Some(&'"') => { - it.next().unwrap(); - let mut r = String::new(); - loop { - match it.next() { - Some('"') => break, - Some(c) if c.is_ascii_hexdigit() => r.push(c), - _ => bail!("unrecognized token"), - } - } - let len = r.len() + 3; - (Token::Bytes(r), len) - } - c if c.is_ascii_whitespace() => { - let mut r = String::new(); - r.push(c); - for c in it { - if c.is_ascii_whitespace() { - r.push(c); - } else { - break; - } - } - let len = r.len(); - (Token::Whitespace(r), len) - } - c if c.is_ascii_alphabetic() => { - let mut r = String::new(); - r.push(c); - for c in it { - if identifier::is_valid_identifier_char(c) { - r.push(c); - } else { - break; - } - } - let len = r.len(); - (name_token(r), len) - } - _ => bail!("unrecognized token"), - })), - } -} - -fn tokenize(mut s: &str) -> Result> { - let mut v = vec![]; - while let Some((tok, n)) = next_token(s)? { - v.push(tok); - s = &s[n..]; - } - Ok(v) -} - -struct Parser> { - it: Peekable, -} - -impl> Parser { - fn new>(v: T) -> Self { - Self { - it: v.into_iter().peekable(), - } - } - - fn next(&mut self) -> Result { - match self.it.next() { - Some(tok) => Ok(tok), - None => bail!("out of tokens, this should not happen"), - } - } - - fn peek(&mut self) -> Option<&Token> { - self.it.peek() - } - - fn consume(&mut self, tok: Token) -> Result<()> { - let t = self.next()?; - if t != tok { - bail!("expected token {:?}, got {:?}", tok, t) - } - Ok(()) - } - - fn parse_comma_list( - &mut self, - parse_list_item: F, - end_token: Token, - allow_trailing_comma: bool, - ) -> Result> - where - F: Fn(&mut Self) -> Result, - R: std::fmt::Debug, - { - let mut v = vec![]; - if !(self.peek() == Some(&end_token)) { - loop { - v.push(parse_list_item(self)?); - if self.peek() == Some(&end_token) { - break; - } - self.consume(Token::Comma)?; - if self.peek() == Some(&end_token) && allow_trailing_comma { - break; - } - } - } - Ok(v) - } - - fn parse_type_tag(&mut self) -> Result { - Ok(match self.next()? { - Token::U8Type => TypeTag::U8, - Token::U16Type => TypeTag::U16, - Token::U32Type => TypeTag::U32, - Token::U64Type => TypeTag::U64, - Token::U128Type => TypeTag::U128, - Token::U256Type => TypeTag::U256, - Token::BoolType => TypeTag::Bool, - Token::AddressType => TypeTag::Address, - Token::SignerType => TypeTag::Signer, - Token::VectorType => { - self.consume(Token::Lt)?; - let ty = self.parse_type_tag()?; - self.consume(Token::Gt)?; - TypeTag::Vector(Box::new(ty)) - } - Token::Address(addr) => { - self.consume(Token::ColonColon)?; - let module = self.next().and_then(token_as_name)?; - self.consume(Token::ColonColon)?; - let name = self.next().and_then(token_as_name)?; - let ty_args = if self.peek() == Some(&Token::Lt) { - self.next()?; - let ty_args = - self.parse_comma_list(|parser| parser.parse_type_tag(), Token::Gt, true)?; - self.consume(Token::Gt)?; - ty_args - } else { - vec![] - }; - TypeTag::Struct(Box::new(StructTag { - address: AccountAddress::from_hex_literal(&addr)?, - module: Identifier::new(module)?, - name: Identifier::new(name)?, - type_params: ty_args, - })) - } - tok => bail!("unexpected token {:?}, expected type tag", tok), - }) - } - - fn parse_transaction_argument(&mut self) -> Result { - Ok(match self.next()? { - Token::U8(s) => TransactionArgument::U8(s.replace('_', "").parse()?), - Token::U16(s) => TransactionArgument::U16(s.replace('_', "").parse()?), - Token::U32(s) => TransactionArgument::U32(s.replace('_', "").parse()?), - Token::U64(s) => TransactionArgument::U64(s.replace('_', "").parse()?), - Token::U128(s) => TransactionArgument::U128(s.replace('_', "").parse()?), - Token::U256(s) => TransactionArgument::U256(s.replace('_', "").parse()?), - Token::True => TransactionArgument::Bool(true), - Token::False => TransactionArgument::Bool(false), - Token::Address(addr) => { - TransactionArgument::Address(AccountAddress::from_hex_literal(&addr)?) - } - Token::Bytes(s) => TransactionArgument::U8Vector(hex::decode(s)?), - tok => bail!("unexpected token {:?}, expected transaction argument", tok), - }) - } -} - -fn parse(s: &str, f: F) -> Result -where - F: Fn(&mut Parser>) -> Result, -{ - let mut tokens: Vec<_> = tokenize(s)? - .into_iter() - .filter(|tok| !tok.is_whitespace()) - .collect(); - tokens.push(Token::EOF); - let mut parser = Parser::new(tokens); - let res = f(&mut parser)?; - parser.consume(Token::EOF)?; - Ok(res) -} - -pub fn parse_type_tag(s: &str) -> Result { - parse(s, |parser| parser.parse_type_tag()) -} - -pub fn parse_transaction_argument(s: &str) -> Result { - parse(s, |parser| parser.parse_transaction_argument()) -} - -pub fn parse_struct_tag(s: &str) -> Result { - let type_tag = parse(s, |parser| parser.parse_type_tag()) - .map_err(|e| format_err!("invalid struct tag: {}, {}", s, e))?; - if let TypeTag::Struct(struct_tag) = type_tag { - Ok(*struct_tag) - } else { - bail!("invalid struct tag: {}", s) - } -} - -#[cfg(test)] -mod tests { - use std::str::FromStr; - - use crate::{ - account_address::AccountAddress, - parser::{parse_struct_tag, parse_transaction_argument, parse_type_tag}, - transaction_argument::TransactionArgument, - u256, - }; - - #[allow(clippy::unreadable_literal)] - #[test] - fn tests_parse_transaction_argument_positive() { - use TransactionArgument as T; - - for (s, expected) in &[ - (" 0u8", T::U8(0)), - ("0u8", T::U8(0)), - ("255u8", T::U8(255)), - ("0", T::U64(0)), - ("0123", T::U64(123)), - ("0u64", T::U64(0)), - ("18446744073709551615", T::U64(18446744073709551615)), - ("18446744073709551615u64", T::U64(18446744073709551615)), - ("0u128", T::U128(0)), - ("1_0u8", T::U8(1_0)), - ("10_u8", T::U8(10)), - ("10___u8", T::U8(10)), - ("1_000u64", T::U64(1_000)), - ("1_000", T::U64(1_000)), - ("1_0_0_0u64", T::U64(1_000)), - ("1_000_000u128", T::U128(1_000_000)), - ( - "340282366920938463463374607431768211455u128", - T::U128(340282366920938463463374607431768211455), - ), - (" 0u16", T::U16(0)), - ("0u16", T::U16(0)), - ("532u16", T::U16(532)), - ("65535u16", T::U16(65535)), - ("0u32", T::U32(0)), - ("01239498u32", T::U32(1239498)), - ("35366u32", T::U32(35366)), - ("4294967295u32", T::U32(4294967295)), - ("0u256", T::U256(u256::U256::from(0u8))), - ("1_0u16", T::U16(1_0)), - ("10_u16", T::U16(10)), - ("10___u16", T::U16(10)), - ("1_000u32", T::U32(1_000)), - ("1_0_00u32", T::U32(1_000)), - ("1_0_0_0u32", T::U32(1_000)), - ("1_000_000u256", T::U256(u256::U256::from(1_000_000u64))), - ( - "1_000_000_000u256", - T::U256(u256::U256::from(1_000_000_000u128)), - ), - ( - "3402823669209384634633746074317682114551234u256", - T::U256( - u256::U256::from_str("3402823669209384634633746074317682114551234").unwrap(), - ), - ), - ("true", T::Bool(true)), - ("false", T::Bool(false)), - ( - "0x0", - T::Address(AccountAddress::from_hex_literal("0x0").unwrap()), - ), - ( - "0x54afa3526", - T::Address(AccountAddress::from_hex_literal("0x54afa3526").unwrap()), - ), - ( - "0X54afa3526", - T::Address(AccountAddress::from_hex_literal("0x54afa3526").unwrap()), - ), - ("x\"7fff\"", T::U8Vector(vec![0x7f, 0xff])), - ("x\"\"", T::U8Vector(vec![])), - ("x\"00\"", T::U8Vector(vec![0x00])), - ("x\"deadbeef\"", T::U8Vector(vec![0xde, 0xad, 0xbe, 0xef])), - ] { - assert_eq!(&parse_transaction_argument(s).unwrap(), expected) - } - } - - #[test] - fn tests_parse_transaction_argument_negative() { - /// Test cases for the parser that should always fail. - const PARSE_VALUE_NEGATIVE_TEST_CASES: &[&str] = &[ - "-3", - "0u42", - "0u645", - "0u64x", - "0u6 4", - "0u", - "_10", - "_10_u8", - "_10__u8", - "_1014__u32", - "10_u8__", - "_", - "__", - "__4", - "_u8", - "5_bool", - "256u8", - "18446744073709551616u64", - "340282366920938463463374607431768211456u128", - "340282366920938463463374607431768211456340282366920938463463374607431768211456340282366920938463463374607431768211456u256", - "0xg", - "0x00g0", - "0x", - "0x_", - "", - "@@", - "()", - "x\"ffff", - "x\"a \"", - "x\" \"", - "x\"0g\"", - "x\"0\"", - "garbage", - "true3", - "3false", - "3 false", - "", - ]; - - for s in PARSE_VALUE_NEGATIVE_TEST_CASES { - assert!( - parse_transaction_argument(s).is_err(), - "test case unexpectedly succeeded: {}", - s - ) - } - } - - #[test] - fn test_type_tag() { - for s in &[ - "u64", - "bool", - "vector", - "vector>", - "vector", - "vector>", - "vector", - "vector>", - "vector", - "vector>", - "vector", - "vector>", - "signer", - "0x1::M::S", - "0x2::M::S_", - "0x3::M_::S", - "0x4::M_::S_", - "0x00000000004::M::S", - "0x1::M::S", - "0x1::M::S", - "0x1::M::S", - "0x1::M::S", - "0x1::M::S<0x2::P::Q>", - "vector<0x1::M::S>", - "vector<0x1::M_::S_>", - "vector>", - "0x1::M::S>", - "0x1::M::S>", - "0x1::M::S>", - "0x1::M::S>", - "0x1::M::S>", - "0x1::M::S>", - ] { - assert!(parse_type_tag(s).is_ok(), "Failed to parse tag {}", s); - } - } - - #[test] - fn test_parse_valid_struct_tag() { - let valid = vec![ - "0x1::Diem::Diem", - "0x1::Diem_Type::Diem", - "0x1::Diem_::Diem", - "0x1::X_123::X32_", - "0x1::Diem::Diem_Type", - "0x1::Diem::Diem<0x1::XDX::XDX>", - "0x1::Diem::Diem<0x1::XDX::XDX_Type>", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem
", - "0x1::Diem::Diem", - "0x1::Diem::Diem>", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem,address,signer>", - "0x1::Diem::Diem>>", - "0x1::Diem::Diem<0x1::Diem::Struct, 0x1::Diem::Diem>>>>", - ]; - for text in valid { - let st = parse_struct_tag(text).expect("valid StructTag"); - assert_eq!( - st.to_string().replace(' ', ""), - text.replace(' ', ""), - "text: {:?}, StructTag: {:?}", - text, - st - ); - } - } - - #[test] - fn test_parse_struct_tag_with_type_names() { - let names = vec![ - "address", "vector", "u128", "u256", "u64", "u32", "u16", "u8", "bool", "signer", - ]; - - let mut tests = vec![]; - for name in &names { - for name_type in &names { - tests.push(format!("0x1::{name}::{name_type}")) - } - } - - let mut instantiations = vec![]; - for ty in &tests { - for other_ty in &tests { - instantiations.push(format!("{ty}<{other_ty}>")) - } - } - - for text in tests.iter().chain(instantiations.iter()) { - let st = parse_struct_tag(text).expect("valid StructTag"); - assert_eq!( - st.to_string().replace(' ', ""), - text.replace(' ', ""), - "text: {:?}, StructTag: {:?}", - text, - st - ); - } - } -} diff --git a/external-crates/move/crates/move-command-line-common/src/address.rs b/external-crates/move/crates/move-core-types/src/parsing/address.rs similarity index 93% rename from external-crates/move/crates/move-command-line-common/src/address.rs rename to external-crates/move/crates/move-core-types/src/parsing/address.rs index 0e63a23b8d85d..44ff9810fc613 100644 --- a/external-crates/move/crates/move-command-line-common/src/address.rs +++ b/external-crates/move/crates/move-core-types/src/parsing/address.rs @@ -1,10 +1,10 @@ // Copyright (c) The Move Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::parser::{parse_address_number, NumberFormat}; +use crate::account_address::AccountAddress; +use crate::parsing::parser::{parse_address_number, NumberFormat}; +use crate::u256::U256; use anyhow::anyhow; -use move_core_types::account_address::AccountAddress; -use num_bigint::BigUint; use std::{fmt, hash::Hash}; // Parsed Address, either a name or a numerical address @@ -62,10 +62,7 @@ impl NumericalAddress { pub fn parse_str(s: &str) -> Result { match parse_address_number(s) { - Some((n, format)) => Ok(NumericalAddress { - bytes: AccountAddress::new(n), - format, - }), + Some((n, format)) => Ok(NumericalAddress { bytes: n, format }), None => // TODO the kind of error is in an unstable nightly API // But currently the only way this should fail is if the number is too long @@ -90,7 +87,7 @@ impl fmt::Display for NumericalAddress { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self.format { NumberFormat::Decimal => { - let n = BigUint::from_bytes_be(self.bytes.as_ref()); + let n = U256::from_be_bytes(&self.bytes); write!(f, "{}", n) } NumberFormat::Hex => write!(f, "{:#X}", self), diff --git a/external-crates/move/crates/move-core-types/src/parsing/mod.rs b/external-crates/move/crates/move-core-types/src/parsing/mod.rs new file mode 100644 index 0000000000000..46c51e639f0a6 --- /dev/null +++ b/external-crates/move/crates/move-core-types/src/parsing/mod.rs @@ -0,0 +1,10 @@ +// Copyright (c) The Diem Core Contributors +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +#![forbid(unsafe_code)] + +pub mod address; +pub mod parser; +pub mod types; +pub mod values; diff --git a/external-crates/move/crates/move-command-line-common/src/parser.rs b/external-crates/move/crates/move-core-types/src/parsing/parser.rs similarity index 60% rename from external-crates/move/crates/move-command-line-common/src/parser.rs rename to external-crates/move/crates/move-core-types/src/parsing/parser.rs index accd1d1a94653..eba50aef13801 100644 --- a/external-crates/move/crates/move-command-line-common/src/parser.rs +++ b/external-crates/move/crates/move-core-types/src/parsing/parser.rs @@ -1,21 +1,22 @@ // Copyright (c) The Move Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::{ +use crate::parsing::{ address::{NumericalAddress, ParsedAddress}, types::{ParsedFqName, ParsedModuleId, ParsedStructType, ParsedType, TypeToken}, values::{ParsableValue, ParsedValue, ValueToken}, }; -use anyhow::{anyhow, bail, Result}; -use move_core_types::{ +use crate::{ account_address::AccountAddress, u256::{U256FromStrError, U256}, }; -use num_bigint::BigUint; +use anyhow::{anyhow, bail, Result}; use std::{fmt::Display, iter::Peekable, num::ParseIntError}; const MAX_TYPE_DEPTH: u64 = 128; const MAX_TYPE_NODE_COUNT: u64 = 256; +// See: https://stackoverflow.com/questions/43787672/the-max-number-of-digits-in-an-int-based-on-number-of-bits +const U256_MAX_DECIMAL_DIGITS: usize = 241 * AccountAddress::LENGTH / 100 + 1; pub trait Token: Display + Copy + Eq { fn is_whitespace(&self) -> bool; @@ -76,7 +77,7 @@ impl ParsedValue { } } -fn parse<'a, Tok: Token, R>( +pub(crate) fn parse<'a, Tok: Token, R>( s: &'a str, f: impl FnOnce(&mut Parser<'a, Tok, std::vec::IntoIter<(Tok, &'a str)>>) -> Result, ) -> Result { @@ -139,8 +140,12 @@ impl<'a, Tok: Token, I: Iterator> Parser<'a, Tok, I> { break; } self.advance(delim)?; - if is_end(self.peek_tok()) && allow_trailing_delim { - break; + if is_end(self.peek_tok()) { + if allow_trailing_delim { + break; + } else { + bail!("Invalid type list: trailing delimiter '{}'", delim) + } } } Ok(v) @@ -225,6 +230,9 @@ impl<'a, I: Iterator> Parser<'a, TypeToken, I> { true, )?; self.advance(TypeToken::Gt)?; + if type_args.is_empty() { + bail!("expected at least one type argument") + } type_args } _ => vec![], @@ -440,306 +448,23 @@ pub fn parse_u256(s: &str) -> Result<(U256, NumberFormat), U256FromStrError> { } // Parse an address from a decimal or hex encoding -pub fn parse_address_number(s: &str) -> Option<([u8; AccountAddress::LENGTH], NumberFormat)> { +pub fn parse_address_number(s: &str) -> Option<(AccountAddress, NumberFormat)> { let (txt, base) = determine_num_text_and_base(s); - let parsed = BigUint::parse_bytes( - txt.as_bytes(), + let txt = txt.replace('_', ""); + let max_len = match base { + NumberFormat::Hex => AccountAddress::LENGTH * 2, + NumberFormat::Decimal => U256_MAX_DECIMAL_DIGITS, + }; + if txt.len() > max_len { + return None; + } + let parsed = U256::from_str_radix( + &txt, match base { NumberFormat::Hex => 16, NumberFormat::Decimal => 10, }, - )?; - let bytes = parsed.to_bytes_be(); - if bytes.len() > AccountAddress::LENGTH { - return None; - } - let mut result = [0u8; AccountAddress::LENGTH]; - result[(AccountAddress::LENGTH - bytes.len())..].clone_from_slice(&bytes); - Some((result, base)) -} - -#[cfg(test)] -mod tests { - use crate::{ - address::{NumericalAddress, ParsedAddress}, - types::{ParsedStructType, ParsedType}, - values::ParsedValue, - }; - use move_core_types::{account_address::AccountAddress, identifier::Identifier, u256::U256}; - use proptest::prelude::*; - use proptest::proptest; - - #[allow(clippy::unreadable_literal)] - #[test] - fn tests_parse_value_positive() { - use ParsedValue as V; - let cases: &[(&str, V)] = &[ - (" 0u8", V::U8(0)), - ("0u8", V::U8(0)), - ("0xF_Fu8", V::U8(255)), - ("0xF__FF__Eu16", V::U16(u16::MAX - 1)), - ("0xFFF_FF__FF_Cu32", V::U32(u32::MAX - 3)), - ("255u8", V::U8(255)), - ("255u256", V::U256(U256::from(255u64))), - ("0", V::InferredNum(U256::from(0u64))), - ("0123", V::InferredNum(U256::from(123u64))), - ("0xFF", V::InferredNum(U256::from(0xFFu64))), - ("0xF_F", V::InferredNum(U256::from(0xFFu64))), - ("0xFF__", V::InferredNum(U256::from(0xFFu64))), - ( - "0x12_34__ABCD_FF", - V::InferredNum(U256::from(0x1234ABCDFFu64)), - ), - ("0u64", V::U64(0)), - ("0x0u64", V::U64(0)), - ( - "18446744073709551615", - V::InferredNum(U256::from(18446744073709551615u128)), - ), - ("18446744073709551615u64", V::U64(18446744073709551615)), - ("0u128", V::U128(0)), - ("1_0u8", V::U8(1_0)), - ("10_u8", V::U8(10)), - ("1_000u64", V::U64(1_000)), - ("1_000", V::InferredNum(U256::from(1_000u32))), - ("1_0_0_0u64", V::U64(1_000)), - ("1_000_000u128", V::U128(1_000_000)), - ( - "340282366920938463463374607431768211455u128", - V::U128(340282366920938463463374607431768211455), - ), - ("true", V::Bool(true)), - ("false", V::Bool(false)), - ( - "@0x0", - V::Address(ParsedAddress::Numerical(NumericalAddress::new( - AccountAddress::from_hex_literal("0x0") - .unwrap() - .into_bytes(), - crate::parser::NumberFormat::Hex, - ))), - ), - ( - "@0", - V::Address(ParsedAddress::Numerical(NumericalAddress::new( - AccountAddress::from_hex_literal("0x0") - .unwrap() - .into_bytes(), - crate::parser::NumberFormat::Hex, - ))), - ), - ( - "@0x54afa3526", - V::Address(ParsedAddress::Numerical(NumericalAddress::new( - AccountAddress::from_hex_literal("0x54afa3526") - .unwrap() - .into_bytes(), - crate::parser::NumberFormat::Hex, - ))), - ), - ( - "b\"hello\"", - V::Vector("hello".as_bytes().iter().copied().map(V::U8).collect()), - ), - ("x\"7fff\"", V::Vector(vec![V::U8(0x7f), V::U8(0xff)])), - ("x\"\"", V::Vector(vec![])), - ("x\"00\"", V::Vector(vec![V::U8(0x00)])), - ( - "x\"deadbeef\"", - V::Vector(vec![V::U8(0xde), V::U8(0xad), V::U8(0xbe), V::U8(0xef)]), - ), - ]; - - for (s, expected) in cases { - assert_eq!(&ParsedValue::parse(s).unwrap(), expected) - } - } - - #[test] - fn tests_parse_value_negative() { - /// Test cases for the parser that should always fail. - const PARSE_VALUE_NEGATIVE_TEST_CASES: &[&str] = &[ - "-3", - "0u42", - "0u645", - "0u64x", - "0u6 4", - "0u", - "_10", - "_10_u8", - "_10__u8", - "10_u8__", - "0xFF_u8_", - "0xF_u8__", - "0x_F_u8__", - "_", - "__", - "__4", - "_u8", - "5_bool", - "256u8", - "4294967296u32", - "65536u16", - "18446744073709551616u64", - "340282366920938463463374607431768211456u128", - "340282366920938463463374607431768211456340282366920938463463374607431768211456340282366920938463463374607431768211456340282366920938463463374607431768211456u256", - "0xg", - "0x00g0", - "0x", - "0x_", - "", - "@@", - "()", - "x\"ffff", - "x\"a \"", - "x\" \"", - "x\"0g\"", - "x\"0\"", - "garbage", - "true3", - "3false", - "3 false", - "", - "0XFF", - "0X0", - ]; - - for s in PARSE_VALUE_NEGATIVE_TEST_CASES { - assert!( - ParsedValue::<()>::parse(s).is_err(), - "Unexpectedly succeeded in parsing: {}", - s - ) - } - } - - #[test] - fn test_parse_type_negative() { - for s in &[ - "_", - "_::_::_", - "0x1::_", - "0x1::__::_", - "0x1::_::__", - "0x1::_::foo", - "0x1::foo::_", - "0x1::_::_", - "0x1::bar::foo<0x1::_::foo>", - ] { - assert!( - ParsedType::parse(s).is_err(), - "Parsed type {s} but should have failed" - ); - } - } - - #[test] - fn test_parse_struct_negative() { - for s in &[ - "_", - "_::_::_", - "0x1::_", - "0x1::__::_", - "0x1::_::__", - "0x1::_::foo", - "0x1::foo::_", - "0x1::_::_", - "0x1::bar::foo<0x1::_::foo>", - ] { - assert!( - ParsedStructType::parse(s).is_err(), - "Parsed type {s} but should have failed" - ); - } - } - - #[test] - fn test_type_type() { - for s in &[ - "u64", - "bool", - "vector", - "vector>", - "address", - "signer", - "0x1::M::S", - "0x2::M::S_", - "0x3::M_::S", - "0x4::M_::S_", - "0x00000000004::M::S", - "0x1::M::S", - "0x1::M::S<0x2::P::Q>", - "vector<0x1::M::S>", - "vector<0x1::M_::S_>", - "vector>", - "0x1::M::S>", - "0x1::_bar::_BAR", - "0x1::__::__", - "0x1::_bar::_BAR<0x2::_____::______fooo______>", - "0x1::__::__<0x2::_____::______fooo______, 0xff::Bar____::_______foo>", - ] { - assert!(ParsedType::parse(s).is_ok(), "Failed to parse type {}", s); - } - } - - #[test] - fn test_parse_valid_struct_type() { - let valid = vec![ - "0x1::Foo::Foo", - "0x1::Foo_Type::Foo", - "0x1::Foo_::Foo", - "0x1::X_123::X32_", - "0x1::Foo::Foo_Type", - "0x1::Foo::Foo<0x1::ABC::ABC>", - "0x1::Foo::Foo<0x1::ABC::ABC_Type>", - "0x1::Foo::Foo", - "0x1::Foo::Foo", - "0x1::Foo::Foo", - "0x1::Foo::Foo", - "0x1::Foo::Foo", - "0x1::Foo::Foo", - "0x1::Foo::Foo", - "0x1::Foo::Foo
", - "0x1::Foo::Foo", - "0x1::Foo::Foo>", - "0x1::Foo::Foo", - "0x1::Foo::Foo", - "0x1::Foo::Foo", - "0x1::Foo::Foo,address,signer>", - "0x1::Foo::Foo>>", - "0x1::Foo::Foo<0x1::Foo::Struct, 0x1::Foo::Foo>>>>", - "0x1::_bar::_BAR", - "0x1::__::__", - "0x1::_bar::_BAR<0x2::_____::______fooo______>", - "0x1::__::__<0x2::_____::______fooo______, 0xff::Bar____::_______foo>", - ]; - for s in valid { - assert!( - ParsedStructType::parse(s).is_ok(), - "Failed to parse struct {}", - s - ); - } - } - - fn struct_type_gen() -> impl Strategy { - ( - any::(), - any::(), - any::(), - ) - .prop_map(|(address, module, name)| format!("0x{}::{}::{}", address, module, name)) - } - - proptest! { - #[test] - fn test_parse_valid_struct_type_proptest(s in struct_type_gen()) { - prop_assert!(ParsedStructType::parse(&s).is_ok()); - } - - #[test] - fn test_parse_valid_type_struct_only_proptest(s in struct_type_gen()) { - prop_assert!(ParsedStructType::parse(&s).is_ok()); - } - } + ) + .ok()?; + Some((AccountAddress::new(parsed.to_be_bytes()), base)) } diff --git a/external-crates/move/crates/move-command-line-common/src/types.rs b/external-crates/move/crates/move-core-types/src/parsing/types.rs similarity index 94% rename from external-crates/move/crates/move-command-line-common/src/types.rs rename to external-crates/move/crates/move-core-types/src/parsing/types.rs index 442e0ed691629..acdb789d93156 100644 --- a/external-crates/move/crates/move-command-line-common/src/types.rs +++ b/external-crates/move/crates/move-core-types/src/parsing/types.rs @@ -3,14 +3,14 @@ use std::fmt::{self, Display}; -use anyhow::bail; -use move_core_types::{ +use crate::{ account_address::AccountAddress, identifier::{self, Identifier}, language_storage::{ModuleId, StructTag, TypeTag}, }; +use anyhow::bail; -use crate::{address::ParsedAddress, parser::Token}; +use crate::parsing::{address::ParsedAddress, parser::Token}; #[derive(Eq, PartialEq, Debug, Clone, Copy)] pub enum TypeToken { @@ -91,10 +91,10 @@ impl Token for TypeToken { Some(':') => (Self::ColonColon, 2), _ => bail!("unrecognized token: {}", s), }, - '0' if matches!(chars.peek(), Some('x') | Some('X')) => { + '0' if matches!(chars.peek(), Some('x')) => { chars.next().unwrap(); match chars.next() { - Some(c) if c.is_ascii_hexdigit() || c == '_' => { + Some(c) if c.is_ascii_hexdigit() => { // 0x + c + remaining let len = 3 + chars .take_while(|q| char::is_ascii_hexdigit(q) || *q == '_') @@ -106,7 +106,9 @@ impl Token for TypeToken { } c if c.is_ascii_digit() => { // c + remaining - let len = 1 + chars.take_while(char::is_ascii_digit).count(); + let len = 1 + chars + .take_while(|c| c.is_ascii_digit() || *c == '_') + .count(); (Self::AddressIdent, len) } c if c.is_ascii_whitespace() => { diff --git a/external-crates/move/crates/move-command-line-common/src/values.rs b/external-crates/move/crates/move-core-types/src/parsing/values.rs similarity index 99% rename from external-crates/move/crates/move-command-line-common/src/values.rs rename to external-crates/move/crates/move-core-types/src/parsing/values.rs index 03bf0a80ad9f4..951dafe04c11a 100644 --- a/external-crates/move/crates/move-command-line-common/src/values.rs +++ b/external-crates/move/crates/move-core-types/src/parsing/values.rs @@ -1,16 +1,16 @@ // Copyright (c) The Move Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::{ +use crate::parsing::{ address::ParsedAddress, parser::{Parser, Token}, }; -use anyhow::bail; -use move_core_types::{ +use crate::{ account_address::AccountAddress, identifier, runtime_value::{MoveStruct, MoveValue}, }; +use anyhow::bail; use std::fmt::{self, Display}; #[derive(Eq, PartialEq, Debug, Clone, Copy)] @@ -39,13 +39,13 @@ pub enum ValueToken { #[derive(Eq, PartialEq, Debug, Clone)] pub enum ParsedValue { Address(ParsedAddress), - InferredNum(move_core_types::u256::U256), + InferredNum(crate::u256::U256), U8(u8), U16(u16), U32(u32), U64(u64), U128(u128), - U256(move_core_types::u256::U256), + U256(crate::u256::U256), Bool(bool), Vector(Vec>), Struct(Vec>), diff --git a/external-crates/move/crates/move-core-types/src/u256.rs b/external-crates/move/crates/move-core-types/src/u256.rs index d47245857df5c..10657683d6eaf 100644 --- a/external-crates/move/crates/move-core-types/src/u256.rs +++ b/external-crates/move/crates/move-core-types/src/u256.rs @@ -308,6 +308,11 @@ impl U256 { Self(PrimitiveU256::from_little_endian(slice)) } + /// U256 from 32 big endian bytes + pub fn from_be_bytes(slice: &[u8; U256_NUM_BYTES]) -> Self { + Self(PrimitiveU256::from_big_endian(slice)) + } + /// U256 to 32 little endian bytes pub fn to_le_bytes(self) -> [u8; U256_NUM_BYTES] { let mut bytes = [0u8; U256_NUM_BYTES]; @@ -315,6 +320,13 @@ impl U256 { bytes } + /// U256 to 32 big endian bytes + pub fn to_be_bytes(self) -> [u8; U256_NUM_BYTES] { + let mut bytes = [0u8; U256_NUM_BYTES]; + self.0.to_big_endian(&mut bytes); + bytes + } + /// Leading zeros of the number pub fn leading_zeros(&self) -> u32 { self.0.leading_zeros() diff --git a/external-crates/move/crates/move-core-types/src/unit_tests/extractor_test.rs b/external-crates/move/crates/move-core-types/src/unit_tests/extractor_test.rs new file mode 100644 index 0000000000000..9d93abf734207 --- /dev/null +++ b/external-crates/move/crates/move-core-types/src/unit_tests/extractor_test.rs @@ -0,0 +1,852 @@ +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use std::str::FromStr; + +use crate::{ + account_address::AccountAddress, + annotated_extractor::{Element as E, Extractor}, + annotated_value::{MoveTypeLayout, MoveValue}, + language_storage::TypeTag, + unit_tests::visitor_test::{ + enum_layout_, serialize, struct_layout_, struct_value_, variant_value_, PrintVisitor, + }, +}; + +#[test] +fn struct_() { + let expect = r#" +[0] struct 0x0::foo::Bar { + a: u8, + b: u16, + c: u32, + d: u64, + e: u128, + f: u256, + g: bool, + h: address, + i: signer, + j: vector, + k: struct 0x0::foo::Baz { + l: u8, + }, + m: enum 0x0::foo::Qux { + n { + o: u8, + }, + }, + p: vector, +} +[1] 1: u8 +[1] 2: u16 +[1] 3: u32 +[1] 4: u64 +[1] 5: u128 +[1] 6: u256 +[1] true: bool +[1] 0000000000000000000000000000000000000000000000000000000000000000: address +[1] 0000000000000000000000000000000000000000000000000000000000000000: signer +[1] vector +[2] 7: u8 +[2] 8: u8 +[2] 9: u8 +[1] struct 0x0::foo::Baz { + l: u8, +} +[2] 10: u8 +[1] enum 0x0::foo::Qux { + n { + o: u8, + }, +} +[2] 11: u8 +[1] vector +[2] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[3] 12: u8 +[3] true: bool +[2] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[3] 13: u8 +[3] false: bool + "#; + + for path in enumerate_paths(vec![C::Opt(E::Type(&type_("0x0::foo::Bar")))]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_a() { + let expect = r#" +[0] 1: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("a"), E::Index(0)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_b() { + let expect = r#" +[0] 2: u16 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("b"), E::Index(1)]), + C::Opt(E::Type(&type_("u16"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_c() { + let expect = r#" +[0] 3: u32 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("c"), E::Index(2)]), + C::Opt(E::Type(&type_("u32"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_d() { + let expect = r#" +[0] 4: u64 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("d"), E::Index(3)]), + C::Opt(E::Type(&type_("u64"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_e() { + let expect = r#" +[0] 5: u128 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("e"), E::Index(4)]), + C::Opt(E::Type(&type_("u128"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_f() { + let expect = r#" +[0] 6: u256 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("f"), E::Index(5)]), + C::Opt(E::Type(&type_("u256"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_g() { + let expect = r#" +[0] true: bool + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("g"), E::Index(6)]), + C::Opt(E::Type(&type_("bool"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_h() { + let expect = r#" +[0] 0000000000000000000000000000000000000000000000000000000000000000: address + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("h"), E::Index(7)]), + C::Opt(E::Type(&type_("address"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_i() { + let expect = r#" +[0] 0000000000000000000000000000000000000000000000000000000000000000: signer + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("i"), E::Index(8)]), + C::Opt(E::Type(&type_("signer"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_j() { + let expect = r#" +[0] vector +[1] 7: u8 +[1] 8: u8 +[1] 9: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("j"), E::Index(9)]), + C::Opt(E::Type(&type_("vector"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_j_0() { + let expect = r#" +[0] 7: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("j"), E::Index(9)]), + C::Opt(E::Type(&type_("vector"))), + C::Req(vec![E::Index(0)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_j_1() { + let expect = r#" +[0] 8: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("j"), E::Index(9)]), + C::Opt(E::Type(&type_("vector"))), + C::Req(vec![E::Index(1)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_j_2() { + let expect = r#" +[0] 9: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("j"), E::Index(9)]), + C::Opt(E::Type(&type_("vector"))), + C::Req(vec![E::Index(2)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_k() { + let expect = r#" +[0] struct 0x0::foo::Baz { + l: u8, +} +[1] 10: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("k"), E::Index(10)]), + C::Opt(E::Type(&type_("0x0::foo::Baz"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_k_l() { + let expect = r#" +[0] 10: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("k"), E::Index(10)]), + C::Opt(E::Type(&type_("0x0::foo::Baz"))), + C::Req(vec![E::Field("l"), E::Index(0)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_m() { + let expect = r#" +[0] enum 0x0::foo::Qux { + n { + o: u8, + }, +} +[1] 11: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("m"), E::Index(11)]), + C::Opt(E::Type(&type_("0x0::foo::Qux"))), + C::Opt(E::Variant("n")), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_m_o() { + let expect = r#" +[0] 11: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("m"), E::Index(11)]), + C::Opt(E::Type(&type_("0x0::foo::Qux"))), + C::Opt(E::Variant("n")), + C::Req(vec![E::Field("o"), E::Index(0)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_p() { + let expect = r#" +[0] vector +[1] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[2] 12: u8 +[2] true: bool +[1] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[2] 13: u8 +[2] false: bool + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("p"), E::Index(12)]), + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_p_0() { + let expect = r#" +[0] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[1] 12: u8 +[1] true: bool + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("p"), E::Index(12)]), + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + C::Req(vec![E::Index(0)]), + C::Opt(E::Type(&type_("0x0::foo::Quy"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_p_0_q() { + let expect = r#" +[0] 12: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("p"), E::Index(12)]), + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + C::Req(vec![E::Index(0)]), + C::Opt(E::Type(&type_("0x0::foo::Quy"))), + C::Req(vec![E::Field("q"), E::Index(0)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_p_0_r() { + let expect = r#" +[0] true: bool + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("p"), E::Index(12)]), + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + C::Req(vec![E::Index(0)]), + C::Opt(E::Type(&type_("0x0::foo::Quy"))), + C::Req(vec![E::Field("r"), E::Index(1)]), + C::Opt(E::Type(&type_("bool"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_p_1() { + let expect = r#" +[0] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[1] 13: u8 +[1] false: bool + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("p"), E::Index(12)]), + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + C::Req(vec![E::Index(1)]), + C::Opt(E::Type(&type_("0x0::foo::Quy"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_p_1_q() { + let expect = r#" +[0] 13: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("p"), E::Index(12)]), + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + C::Req(vec![E::Index(1)]), + C::Opt(E::Type(&type_("0x0::foo::Quy"))), + C::Req(vec![E::Field("q"), E::Index(0)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_p_1_r() { + let expect = r#" +[0] false: bool + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("p"), E::Index(12)]), + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + C::Req(vec![E::Index(1)]), + C::Opt(E::Type(&type_("0x0::foo::Quy"))), + C::Req(vec![E::Field("r"), E::Index(1)]), + C::Opt(E::Type(&type_("bool"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn vector_() { + let expect = r#" +[0] vector +[1] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[2] 12: u8 +[2] true: bool +[1] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[2] 13: u8 +[2] false: bool + "#; + + for path in enumerate_paths(vec![C::Opt(E::Type(&type_("vector<0x0::foo::Quy>")))]) { + assert_path(test_vector(), path, expect); + } +} + +#[test] +fn vector_0() { + let expect = r#" +[0] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[1] 12: u8 +[1] true: bool + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + C::Req(vec![E::Index(0)]), + C::Opt(E::Type(&type_("0x0::foo::Quy"))), + ]) { + assert_path(test_vector(), path, expect); + } +} + +#[test] +fn vector_1() { + let expect = r#" +[0] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[1] 13: u8 +[1] false: bool + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + C::Req(vec![E::Index(1)]), + C::Opt(E::Type(&type_("0x0::foo::Quy"))), + ]) { + assert_path(test_vector(), path, expect); + } +} + +#[test] +fn enum_() { + let expect = r#" +[0] enum 0x0::foo::Qux { + n { + o: u8, + }, +} +[1] 11: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Qux"))), + C::Opt(E::Variant("n")), + ]) { + assert_path(test_enum(), path, expect); + } +} + +#[test] +fn enum_o() { + let expect = r#" +[0] 11: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Qux"))), + C::Opt(E::Variant("n")), + C::Req(vec![E::Field("o"), E::Index(0)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_enum(), path, expect); + } +} + +#[test] +fn field_not_found() { + for path in [ + vec![E::Field("z")], + // Trying to access a field on a primitive + vec![E::Field("a"), E::Field("z")], + // Nested field doesn't exist + vec![E::Field("k"), E::Field("z")], + // Nested field on an enum (that doesn't exist) + vec![E::Field("m"), E::Field("z")], + // Trying to access a field on a vector + vec![E::Field("p"), E::Field("z")], + // Nested field on a struct in a vector + vec![E::Field("p"), E::Index(0), E::Field("z")], + ] { + assert_no_path(test_struct(), path); + } +} + +#[test] +fn index_out_of_bounds() { + for path in [ + // Positional access of field, out of bounds + vec![E::Index(1000)], + // Trying to access index on a primitive + vec![E::Field("a"), E::Index(1000)], + // Out of bounds on primitive vector + vec![E::Field("j"), E::Index(1000)], + // Out of bounds field on nested struct + vec![E::Field("k"), E::Index(1000)], + // Out of bounds field on nested enum + vec![E::Field("m"), E::Index(1000)], + // Out of bounds field on struct vector + vec![E::Field("p"), E::Index(1000)], + // Out of bounds field on struct in vector + vec![E::Field("p"), E::Index(0), E::Index(1000)], + ] { + assert_no_path(test_struct(), path); + } +} + +#[test] +fn type_mismatch() { + for path in [ + // Wrong root type + vec![E::Type(&type_("0x0::foo::Baz"))], + // Wrong primitive type + vec![E::Field("a"), E::Type(&type_("u16"))], + // Wrong nested struct + vec![E::Field("k"), E::Type(&type_("0x0::foo::Bar"))], + // Wrong type with further nesting + vec![ + E::Field("k"), + E::Type(&type_("0x0::foo::Bar")), + E::Field("l"), + ], + // Wrong primitive vector + vec![E::Field("j"), E::Type(&type_("vector"))], + vec![E::Field("j"), E::Type(&type_("u8"))], + // Wrong enum type + vec![E::Field("m"), E::Type(&type_("0x0::foo::Bar"))], + // Wrong type nested inside enum + vec![E::Field("m"), E::Field("o"), E::Type(&type_("u16"))], + ] { + assert_no_path(test_struct(), path); + } +} + +#[test] +fn variant_not_found() { + assert_no_path(test_enum(), vec![E::Variant("z")]); + assert_no_path(test_struct(), vec![E::Field("m"), E::Variant("z")]); +} + +/// Components are used to generate paths. Each component offers a number of options for the +/// element that goes in the same position in the generated path. +enum C<'p> { + /// This element is optional -- paths are geneated with and without this element at the + /// component's position. + Opt(E<'p>), + + /// This element is required, and is picked from the provided list. + Req(Vec>), +} + +/// Generate a list of paths as a cartesian product of the provided components. +fn enumerate_paths(components: Vec>) -> Vec>> { + let mut paths = vec![vec![]]; + + for component in components { + let mut new_paths = vec![]; + + for path in paths { + match &component { + C::Opt(element) => { + new_paths.push(path.clone()); + let mut path = path.clone(); + path.push(element.clone()); + new_paths.push(path); + } + C::Req(elements) => { + new_paths.extend(elements.iter().map(|e| { + let mut path = path.clone(); + path.push(e.clone()); + path + })); + } + } + } + + paths = new_paths; + } + + paths +} + +fn assert_path((value, layout): (MoveValue, MoveTypeLayout), path: Vec>, expect: &str) { + let bytes = serialize(value); + let mut printer = PrintVisitor::default(); + + assert!( + Extractor::deserialize_value(&bytes, &layout, &mut printer, path.clone()) + .unwrap() + .is_some(), + "Failed to extract value {path:?}", + ); + + assert_eq!( + printer.output.trim(), + expect.trim(), + "Failed to match value at {path:?}" + ); +} + +fn assert_no_path((value, layout): (MoveValue, MoveTypeLayout), path: Vec>) { + let bytes = serialize(value); + let mut printer = PrintVisitor::default(); + + assert!( + Extractor::deserialize_value(&bytes, &layout, &mut printer, path.clone()) + .unwrap() + .is_none(), + "Expected not to find something at {path:?}", + ); + + assert!( + printer.output.is_empty(), + "Expected not to delegate to the inner visitor for {path:?}" + ); +} + +fn type_(t: &str) -> TypeTag { + TypeTag::from_str(t).unwrap() +} + +fn test_struct() -> (MoveValue, MoveTypeLayout) { + use MoveTypeLayout as T; + use MoveValue as V; + + let (vector, vector_layout) = test_vector(); + let (variant, enum_layout) = test_enum(); + + let value = struct_value_( + "0x0::foo::Bar", + vec![ + ("a", V::U8(1)), + ("b", V::U16(2)), + ("c", V::U32(3)), + ("d", V::U64(4)), + ("e", V::U128(5)), + ("f", V::U256(6u32.into())), + ("g", V::Bool(true)), + ("h", V::Address(AccountAddress::ZERO)), + ("i", V::Signer(AccountAddress::ZERO)), + ("j", V::Vector(vec![V::U8(7), V::U8(8), V::U8(9)])), + ("k", struct_value_("0x0::foo::Baz", vec![("l", V::U8(10))])), + ("m", variant), + ("p", vector), + ], + ); + + let layout = struct_layout_( + "0x0::foo::Bar", + vec![ + ("a", T::U8), + ("b", T::U16), + ("c", T::U32), + ("d", T::U64), + ("e", T::U128), + ("f", T::U256), + ("g", T::Bool), + ("h", T::Address), + ("i", T::Signer), + ("j", T::Vector(Box::new(T::U8))), + ("k", struct_layout_("0x0::foo::Baz", vec![("l", T::U8)])), + ("m", enum_layout), + ("p", vector_layout), + ], + ); + + (value, layout) +} + +fn test_enum() -> (MoveValue, MoveTypeLayout) { + use MoveTypeLayout as T; + use MoveValue as V; + + let value = variant_value_("0x0::foo::Qux", "n", 0, vec![("o", V::U8(11))]); + let layout = enum_layout_("0x0::foo::Qux", vec![("n", vec![("o", T::U8)])]); + + (value, layout) +} + +fn test_vector() -> (MoveValue, MoveTypeLayout) { + use MoveTypeLayout as T; + use MoveValue as V; + + let value = V::Vector(vec![ + struct_value_( + "0x0::foo::Quy", + vec![("q", V::U8(12)), ("r", V::Bool(true))], + ), + struct_value_( + "0x0::foo::Quy", + vec![("q", V::U8(13)), ("r", V::Bool(false))], + ), + ]); + + let layout = T::Vector(Box::new(struct_layout_( + "0x0::foo::Quy", + vec![("q", T::U8), ("r", T::Bool)], + ))); + + (value, layout) +} diff --git a/external-crates/move/crates/move-core-types/src/unit_tests/mod.rs b/external-crates/move/crates/move-core-types/src/unit_tests/mod.rs index 6788ebac27bde..64e54f71b241d 100644 --- a/external-crates/move/crates/move-core-types/src/unit_tests/mod.rs +++ b/external-crates/move/crates/move-core-types/src/unit_tests/mod.rs @@ -2,7 +2,9 @@ // Copyright (c) The Move Contributors // SPDX-License-Identifier: Apache-2.0 +mod extractor_test; mod identifier_test; mod language_storage_test; +mod parsing_test; mod value_test; mod visitor_test; diff --git a/external-crates/move/crates/move-core-types/src/unit_tests/parsing_test.rs b/external-crates/move/crates/move-core-types/src/unit_tests/parsing_test.rs new file mode 100644 index 0000000000000..2d69de0be29c6 --- /dev/null +++ b/external-crates/move/crates/move-core-types/src/unit_tests/parsing_test.rs @@ -0,0 +1,678 @@ +use crate::{ + account_address::AccountAddress, + identifier::Identifier, + language_storage::{ModuleId, StructTag, TypeTag}, + parsing::{ + address::{NumericalAddress, ParsedAddress}, + parser::parse, + types::{ParsedFqName, ParsedType, TypeToken}, + values::ParsedValue, + }, + u256::U256, +}; +use anyhow::bail; +use num::BigUint; +use proptest::{prelude::*, proptest}; +use std::str::FromStr; + +const VALID_ADDRS: &[&str] = &[ + "0x0", + "0x1", + "1", + "123", + "0x123", + "0x1234567890abcdef", + "100_00_00", + "0x0_0_0_0", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0_00000_0000000000000000000000000000000000000000000000000_000000000", + "000000000000000000000000000000000000000000000000000000000000000000000000000000", + "00_0000000000000000000000000000000000000000000000000000000_00000000000000000_0000", +]; + +const INVALID_ADDRS: &[&str] = &[ + "_x", + "0x", + "_0x0", + "_0", + "0x_", + "0x_00", + "+0x0", + "+0", + "0xg", + "0x0g", + "0X0", + "_0x0", + "_0x0_", + "_0", + "_0_", + "_00_", + "_0_0_", + "0x_00", + "0x00000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000_0000000", + "0x_0_00000_0000000000000000000000000000000000000000000000000_000000000", + "0000000000000000000000000000000000000000000000000000000000000000000000000000000", + "000_0000000000000000000000000000000000000000000000000000000_00000000000000000_0000", +]; + +#[allow(clippy::unreadable_literal)] +#[test] +fn tests_parse_value_positive() { + use ParsedValue as V; + let cases: &[(&str, V)] = &[ + (" 0u8", V::U8(0)), + ("0u8", V::U8(0)), + ("0xF_Fu8", V::U8(255)), + ("0xF__FF__Eu16", V::U16(u16::MAX - 1)), + ("0xFFF_FF__FF_Cu32", V::U32(u32::MAX - 3)), + ("255u8", V::U8(255)), + ("255u256", V::U256(U256::from(255u64))), + ("0", V::InferredNum(U256::from(0u64))), + ("0123", V::InferredNum(U256::from(123u64))), + ("0xFF", V::InferredNum(U256::from(0xFFu64))), + ("0xF_F", V::InferredNum(U256::from(0xFFu64))), + ("0xFF__", V::InferredNum(U256::from(0xFFu64))), + ( + "0x12_34__ABCD_FF", + V::InferredNum(U256::from(0x1234ABCDFFu64)), + ), + ("0u64", V::U64(0)), + ("0x0u64", V::U64(0)), + ( + "18446744073709551615", + V::InferredNum(U256::from(18446744073709551615u128)), + ), + ("18446744073709551615u64", V::U64(18446744073709551615)), + ("0u128", V::U128(0)), + ("1_0u8", V::U8(1_0)), + ("10_u8", V::U8(10)), + ("1_000u64", V::U64(1_000)), + ("1_000", V::InferredNum(U256::from(1_000u32))), + ("1_0_0_0u64", V::U64(1_000)), + ("1_000_000u128", V::U128(1_000_000)), + ( + "340282366920938463463374607431768211455u128", + V::U128(340282366920938463463374607431768211455), + ), + ("true", V::Bool(true)), + ("false", V::Bool(false)), + ( + "@0x0", + V::Address(ParsedAddress::Numerical(NumericalAddress::new( + AccountAddress::from_hex_literal("0x0") + .unwrap() + .into_bytes(), + crate::parsing::parser::NumberFormat::Hex, + ))), + ), + ( + "@0", + V::Address(ParsedAddress::Numerical(NumericalAddress::new( + AccountAddress::from_hex_literal("0x0") + .unwrap() + .into_bytes(), + crate::parsing::parser::NumberFormat::Hex, + ))), + ), + ( + "@0x54afa3526", + V::Address(ParsedAddress::Numerical(NumericalAddress::new( + AccountAddress::from_hex_literal("0x54afa3526") + .unwrap() + .into_bytes(), + crate::parsing::parser::NumberFormat::Hex, + ))), + ), + ( + "b\"hello\"", + V::Vector("hello".as_bytes().iter().copied().map(V::U8).collect()), + ), + ("x\"7fff\"", V::Vector(vec![V::U8(0x7f), V::U8(0xff)])), + ("x\"\"", V::Vector(vec![])), + ("x\"00\"", V::Vector(vec![V::U8(0x00)])), + ( + "x\"deadbeef\"", + V::Vector(vec![V::U8(0xde), V::U8(0xad), V::U8(0xbe), V::U8(0xef)]), + ), + ]; + + for (s, expected) in cases { + assert_eq!(&ParsedValue::parse(s).unwrap(), expected) + } +} + +#[test] +fn tests_parse_value_negative() { + /// Test cases for the parser that should always fail. + const PARSE_VALUE_NEGATIVE_TEST_CASES: &[&str] = &[ + "-3", + "0u42", + "0u645", + "0u64x", + "0u6 4", + "0u", + "_10", + "_10_u8", + "_10__u8", + "10_u8__", + "0xFF_u8_", + "0xF_u8__", + "0x_F_u8__", + "_", + "__", + "__4", + "_u8", + "5_bool", + "256u8", + "4294967296u32", + "65536u16", + "18446744073709551616u64", + "340282366920938463463374607431768211456u128", + "340282366920938463463374607431768211456340282366920938463463374607431768211456340282366920938463463374607431768211456340282366920938463463374607431768211456u256", + "0xg", + "0x00g0", + "0x", + "0x_", + "", + "@@", + "()", + "x\"ffff", + "x\"a \"", + "x\" \"", + "x\"0g\"", + "x\"0\"", + "garbage", + "true3", + "3false", + "3 false", + "", + "0XFF", + "0X0", + ]; + + for s in PARSE_VALUE_NEGATIVE_TEST_CASES { + assert!( + ParsedValue::<()>::parse(s).is_err(), + "Unexpectedly succeeded in parsing: {}", + s + ) + } +} + +#[test] +fn test_parse_struct_negative() { + for s in &[ + "_", + "_::_::_", + "0x1::_", + "0x1::__::_", + "0x1::_::__", + "0x1::_::foo", + "0x1::foo::_", + "0x1::_::_", + "0x1::bar::foo<0x1::_::foo>", + "0x1::bar::bar::foo", + "0x1::Foo::Foo<", + "0x1::Foo::Foo<0x1::ABC::ABC", + "0x1::Foo::Foo<0x1::ABC::ABC::>", + "0x1::Foo::Foo<0x1::ABC::ABC::A>", + "0x1::Foo::Foo<>", + "0x1::Foo::Foo<,>", + "0x1::Foo::Foo<,", + "0x1::Foo::Foo,>", + "0x1::Foo::Foo>", + "0x1::Foo::Foo,", + "_0x0_0::a::a", + "_0x_00::a::a", + "_0_0::a::a", + ] { + assert!( + TypeTag::from_str(s).is_err(), + "Parsed type {s} but should have failed" + ); + } +} + +#[test] +fn test_type_type() { + for s in &[ + "u8", + "u16", + "u32", + "u64", + "u128", + "u256", + "bool", + "vector", + "vector>", + "address", + "signer", + "0x1::M::S", + "0x2::M::S_", + "0x3::M_::S", + "0x4::M_::S_", + "0x00000000004::M::S", + "0x1::M::S", + "0x1::M::S<0x2::P::Q>", + "vector<0x1::M::S>", + "vector<0x1::M_::S_>", + "vector>", + "0x1::M::S>", + "0x1::_bar::_BAR", + "0x1::__::__", + "0x1::_bar::_BAR<0x2::_____::______fooo______>", + "0x1::__::__<0x2::_____::______fooo______, 0xff::Bar____::_______foo>", + "0x0_0::a::a", + "0_0::a::a", + ] { + assert!(TypeTag::from_str(s).is_ok(), "Failed to parse type {}", s); + } + + for valid_addr in VALID_ADDRS { + assert!( + TypeTag::from_str(&format!("{valid_addr}::a::a")).is_ok(), + "Failed to parse type {}::a::a", + valid_addr + ); + } + + for invalid_addr in INVALID_ADDRS { + assert!( + TypeTag::from_str(&format!("{invalid_addr}::a::a")).is_err(), + "Parse type {}::a::a but should have failed", + invalid_addr + ); + } +} + +#[test] +fn test_parse_valid_struct_type() { + let valid = vec![ + "0x1::Foo::Foo", + "0x1::Foo_Type::Foo", + "0x1::Foo_::Foo", + "0x1::X_123::X32_", + "0x1::Foo::Foo_Type", + "0x1::Foo::Foo<0x1::ABC::ABC>", + "0x1::Foo::Foo<0x1::ABC::ABC_Type>", + "0x1::Foo::Foo", + "0x1::Foo::Foo", + "0x1::Foo::Foo", + "0x1::Foo::Foo", + "0x1::Foo::Foo", + "0x1::Foo::Foo", + "0x1::Foo::Foo", + "0x1::Foo::Foo
", + "0x1::Foo::Foo", + "0x1::Foo::Foo>", + "0x1::Foo::Foo", + "0x1::Foo::Foo", + "0x1::Foo::Foo", + "0x1::Foo::Foo,address,signer>", + "0x1::Foo::Foo>>", + "0x1::Foo::Foo<0x1::Foo::Struct, 0x1::Foo::Foo>>>>", + "0x1::_bar::_BAR", + "0x1::__::__", + "0x1::_bar::_BAR<0x2::_____::______fooo______>", + "0x1::__::__<0x2::_____::______fooo______, 0xff::Bar____::_______foo>", + ]; + for s in valid { + assert!( + StructTag::from_str(s).is_ok(), + "Failed to parse struct {}", + s + ); + } +} + +#[test] +fn test_parse_type_list() { + let valid_with_trails = &[ + "", + "", + ",>", + ]; + let valid_no_trails = &[ + "", + "", + ">", + ]; + let invalid = &[ + "<>", + "<,>", + "", + "<,u64>", + "<,u64,>", + ",", + "", + "<", + "<<", + "><", + ">,<", + ">,", + ",>", + ",,", + ">>", + "", + "u64,>", + "u64, u64,>", + "u64, u64,", + "u64, u64", + "u64 u64", + "", + "", + "u64 u64,", + "", + ",", + ",,>", + ]; + + for t in valid_no_trails.iter().chain(valid_with_trails.iter()) { + assert!(parse_type_tags(t, true).is_ok()); + } + + for t in valid_no_trails { + assert!(parse_type_tags(t, false).is_ok()); + } + + for t in valid_with_trails { + assert!(parse_type_tags(t, false).is_err()); + } + + for t in invalid { + assert!(parse_type_tags(t, true).is_err(), "parsed type {}", t); + assert!(parse_type_tags(t, false).is_err(), "parsed type {}", t); + } +} + +fn struct_type_gen0() -> impl Strategy { + ( + any::(), + any::(), + any::(), + ) + .prop_map(|(address, module, name)| format!("0x{}::{}::{}", address, module, name)) +} + +fn struct_type_gen1() -> impl Strategy { + (any::(), any::(), any::()) + .prop_map(|(address, module, name)| format!("{}::{}::{}", address, module, name)) +} + +fn module_id_gen0() -> impl Strategy { + (any::(), any::()) + .prop_map(|(address, module)| format!("0x{address}::{module}")) +} + +fn module_id_gen1() -> impl Strategy { + (any::(), any::()) + .prop_map(|(address, module)| format!("{address}::{module}")) +} + +fn fq_id_gen0() -> impl Strategy { + ( + any::(), + any::(), + any::(), + ) + .prop_map(|(address, module, name)| format!("0x{address}::{module}::{name}")) +} + +fn fq_id_gen1() -> impl Strategy { + (any::(), any::(), any::()) + .prop_map(|(address, module, name)| format!("{address}::{module}::{name}")) +} + +fn parse_type_tags(s: &str, allow_trailing_delim: bool) -> anyhow::Result> { + parse(s, |parser| { + parser.advance(TypeToken::Lt)?; + let parsed = parser.parse_list( + |parser| parser.parse_type(), + TypeToken::Comma, + TypeToken::Gt, + allow_trailing_delim, + )?; + parser.advance(TypeToken::Gt)?; + if parsed.is_empty() { + bail!("expected at least one type argument") + } + Ok(parsed) + }) +} + +#[test] +fn address_parsing() { + for valid_addr in VALID_ADDRS { + assert!( + ParsedAddress::parse(valid_addr).is_ok(), + "parsed address {}", + valid_addr + ); + } + + for invalid_addr in INVALID_ADDRS { + assert!(ParsedAddress::parse(invalid_addr).is_err()); + } +} + +proptest! { + #[test] + fn parse_type_tag_list(t in struct_type_gen0(), args in proptest::collection::vec(struct_type_gen0(), 1..=100)) { + let s_no_trail = format!("<{}>", args.join(",")); + let s_with_trail = format!("<{},>", args.join(",")); + let s_no_trail_no_trail = parse_type_tags(&s_no_trail, false); + let s_no_trail_allow_trail = parse_type_tags(&s_no_trail, true); + let s_with_trail_no_trail = parse_type_tags(&s_with_trail, false); + let s_with_trail_allow_trail = parse_type_tags(&s_with_trail, true); + prop_assert!(s_no_trail_no_trail.is_ok()); + prop_assert!(s_no_trail_allow_trail.is_ok()); + prop_assert!(s_with_trail_no_trail.is_err()); + prop_assert!(s_with_trail_allow_trail.is_ok()); + let t_with_trail = format!("{t}{s_no_trail}"); + let t_no_trail = format!("{t}{s_with_trail}"); + let t_with_trail = TypeTag::from_str(&t_with_trail); + let t_no_trail = TypeTag::from_str(&t_no_trail); + prop_assert!(t_with_trail.is_ok()); + prop_assert!(t_no_trail.is_ok()); + prop_assert_eq!(t_with_trail.unwrap(), t_no_trail.unwrap()); + } + + #[test] + fn test_parse_valid_struct_type_proptest0(s in struct_type_gen0(), x in r#"(::foo)[^a-zA-Z0-9_\s]+"#) { + prop_assert!(StructTag::from_str(&s).is_ok()); + prop_assert!(TypeTag::from_str(&s).is_ok()); + prop_assert!(ParsedFqName::parse(&s).is_ok()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + + // Add remainder string + let s = s + &x; + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + + } + + #[test] + fn test_parse_valid_struct_type_proptest1(s in struct_type_gen1(), x in r#"(::foo)[^a-zA-Z0-9_\s]+"#) { + prop_assert!(StructTag::from_str(&s).is_ok()); + prop_assert!(TypeTag::from_str(&s).is_ok()); + prop_assert!(ParsedFqName::parse(&s).is_ok()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + // add remainder string + let s = s + &x; + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + } + + #[test] + fn test_parse_valid_module_id_proptest0(s in module_id_gen0(), x in r#"[^a-zA-Z0-9_\s]+"#) { + prop_assert!(ModuleId::from_str(&s).is_ok()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + // add remainder string + let s = s + &x; + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + } + + #[test] + fn test_parse_valid_module_id_proptest1(s in module_id_gen1(), x in r#"[^a-zA-Z0-9_\s]+"#) { + prop_assert!(ModuleId::from_str(&s).is_ok()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + // add remainder String + let s = s + &x; + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + + } + + #[test] + fn test_parse_valid_fq_id_proptest0(s in fq_id_gen0(), x in r#"[^a-zA-Z0-9_\s]+"#) { + prop_assert!(ParsedFqName::parse(&s).is_ok()); + prop_assert!(StructTag::from_str(&s).is_ok()); + prop_assert!(TypeTag::from_str(&s).is_ok()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + // add remainder string + let s = s + &x; + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + } + + #[test] + fn test_parse_valid_fq_id_proptest1(s in fq_id_gen1(), x in r#"[^a-zA-Z0-9_\s]+"#) { + prop_assert!(ParsedFqName::parse(&s).is_ok()); + prop_assert!(StructTag::from_str(&s).is_ok()); + prop_assert!(TypeTag::from_str(&s).is_ok()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + let s = s + &x; + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + } + + #[test] + fn test_parse_valid_numeric_address(s in "[0-9]{64}", x in r#"[^a-zA-Z0-9_\s]+"#) { + prop_assert!(AccountAddress::from_str(&s).is_ok()); + prop_assert!(ParsedAddress::parse(&s).is_ok()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + // add remainder string + let s = s + &x; + prop_assert!(AccountAddress::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + } + + #[test] + fn decimal_parse_parity(s in "[0-9]{64}") { + let bigint_parsed = { + let bytes = BigUint::parse_bytes(s.as_bytes(), 10).unwrap().to_bytes_be(); + let mut result = [0u8; AccountAddress::LENGTH]; + result[(AccountAddress::LENGTH - bytes.len())..].clone_from_slice(&bytes); + result + }; + let u256_parsed = U256::from_str(&s).unwrap(); + prop_assert_eq!(bigint_parsed, u256_parsed.to_be_bytes(), "Parsed addresses do not match: {}", s); + } + + #[test] + fn hex_parse_parity(s in "0x[0-9a-fA-F]{1,64}") { + let bigint_parsed = { + let bytes = BigUint::parse_bytes(s[2..].as_bytes(), 16).unwrap().to_bytes_be(); + let mut result = [0u8; AccountAddress::LENGTH]; + result[(AccountAddress::LENGTH - bytes.len())..].clone_from_slice(&bytes); + result + }; + let addr_parsed = AccountAddress::from_hex_literal(&s).unwrap().into_bytes(); + let u256_parsed = AccountAddress::new(U256::from_str_radix(&s[2..], 16).unwrap().to_be_bytes()).into_bytes(); + prop_assert_eq!(bigint_parsed, addr_parsed, "Parsed addresses do not match: {}", s); + prop_assert_eq!(addr_parsed, u256_parsed, "Parsed addresses do not match: {}", s); + } + + #[test] + fn test_parse_different_length_numeric_addresses(s in "[0-9]{1,63}", x in r#"[^a-zA-Z0-9_\s]+"#) { + prop_assert!(AccountAddress::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_ok()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + // add remainder string + let s = s + &x; + prop_assert!(AccountAddress::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + } + + #[test] + fn test_parse_valid_hex_address(s in "0x[0-9a-fA-F]{64}", x in r#"[^a-zA-Z0-9_\s]+"#) { + prop_assert!(AccountAddress::from_str(&s).is_ok()); + prop_assert!(ParsedAddress::parse(&s).is_ok()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + // add remainder string + let s = s + &x; + prop_assert!(AccountAddress::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + } + + #[test] + fn test_parse_invalid_hex_address(s in "[0-9]{63}[a-fA-F]{1}", x in r#"[^a-zA-Z0-9_\s]+"#) { + prop_assert!(AccountAddress::from_str(&s).is_ok()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + // add remainder string + let s = s + &x; + prop_assert!(AccountAddress::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + } +} diff --git a/external-crates/move/crates/move-core-types/src/unit_tests/visitor_test.rs b/external-crates/move/crates/move-core-types/src/unit_tests/visitor_test.rs index 7d0c06b8696af..3d64ecb1d598e 100644 --- a/external-crates/move/crates/move-core-types/src/unit_tests/visitor_test.rs +++ b/external-crates/move/crates/move-core-types/src/unit_tests/visitor_test.rs @@ -19,125 +19,284 @@ use crate::{ VARIANT_COUNT_MAX, }; -#[test] -fn traversal() { - use MoveTypeLayout as T; - use MoveValue as V; +#[derive(Default)] +pub(crate) struct CountingTraversal(usize); - #[derive(Default)] - struct CountingTraversal(usize); +#[derive(Default)] +pub(crate) struct PrintVisitor { + depth: usize, + pub output: String, +} - impl<'b, 'l> Traversal<'b, 'l> for CountingTraversal { - type Error = annotated_visitor::Error; +impl<'b, 'l> Traversal<'b, 'l> for CountingTraversal { + type Error = annotated_visitor::Error; - fn traverse_u8( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: u8, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_u8( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: u8, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_u16( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: u16, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_u16( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: u16, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_u32( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: u32, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_u32( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: u32, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_u64( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: u64, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_u64( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: u64, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_u128( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: u128, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_u128( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: u128, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_u256( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: U256, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_u256( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: U256, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_bool( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: bool, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_bool( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: bool, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_address( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: AccountAddress, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_address( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: AccountAddress, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_signer( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: AccountAddress, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_signer( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: AccountAddress, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_vector( - &mut self, - driver: &mut VecDriver<'_, 'b, 'l>, - ) -> Result<(), Self::Error> { - self.0 += 1; - while driver.next_element(self)?.is_some() {} - Ok(()) + fn traverse_vector(&mut self, driver: &mut VecDriver<'_, 'b, 'l>) -> Result<(), Self::Error> { + self.0 += 1; + while driver.next_element(self)?.is_some() {} + Ok(()) + } + + fn traverse_struct( + &mut self, + driver: &mut StructDriver<'_, 'b, 'l>, + ) -> Result<(), Self::Error> { + self.0 += 1; + while driver.next_field(self)?.is_some() {} + Ok(()) + } + + fn traverse_variant( + &mut self, + driver: &mut VariantDriver<'_, 'b, 'l>, + ) -> Result<(), Self::Error> { + self.0 += 1; + while driver.next_field(self)?.is_some() {} + Ok(()) + } +} + +impl<'b, 'l> Visitor<'b, 'l> for PrintVisitor { + type Value = MoveValue; + type Error = annotated_visitor::Error; + + fn visit_u8( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: u8, + ) -> Result { + write!(self.output, "\n[{}] {value}: u8", self.depth).unwrap(); + Ok(MoveValue::U8(value)) + } + + fn visit_u16( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: u16, + ) -> Result { + write!(self.output, "\n[{}] {value}: u16", self.depth).unwrap(); + Ok(MoveValue::U16(value)) + } + + fn visit_u32( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: u32, + ) -> Result { + write!(self.output, "\n[{}] {value}: u32", self.depth).unwrap(); + Ok(MoveValue::U32(value)) + } + + fn visit_u64( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: u64, + ) -> Result { + write!(self.output, "\n[{}] {value}: u64", self.depth).unwrap(); + Ok(MoveValue::U64(value)) + } + + fn visit_u128( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: u128, + ) -> Result { + write!(self.output, "\n[{}] {value}: u128", self.depth).unwrap(); + Ok(MoveValue::U128(value)) + } + + fn visit_u256( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: U256, + ) -> Result { + write!(self.output, "\n[{}] {value}: u256", self.depth).unwrap(); + Ok(MoveValue::U256(value)) + } + + fn visit_bool( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: bool, + ) -> Result { + write!(self.output, "\n[{}] {value}: bool", self.depth).unwrap(); + Ok(MoveValue::Bool(value)) + } + + fn visit_address( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: AccountAddress, + ) -> Result { + write!(self.output, "\n[{}] {value}: address", self.depth).unwrap(); + Ok(MoveValue::Address(value)) + } + + fn visit_signer( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: AccountAddress, + ) -> Result { + write!(self.output, "\n[{}] {value}: signer", self.depth).unwrap(); + Ok(MoveValue::Signer(value)) + } + + fn visit_vector( + &mut self, + driver: &mut VecDriver<'_, 'b, 'l>, + ) -> Result { + let layout = driver.element_layout(); + write!(self.output, "\n[{}] vector<{layout:#}>", self.depth).unwrap(); + + let mut elems = vec![]; + let mut elem_visitor = Self { + depth: self.depth + 1, + output: std::mem::take(&mut self.output), + }; + + while let Some(elem) = driver.next_element(&mut elem_visitor)? { + elems.push(elem) } - fn traverse_struct( - &mut self, - driver: &mut StructDriver<'_, 'b, 'l>, - ) -> Result<(), Self::Error> { - self.0 += 1; - while driver.next_field(self)?.is_some() {} - Ok(()) + self.output = elem_visitor.output; + Ok(MoveValue::Vector(elems)) + } + + fn visit_struct( + &mut self, + driver: &mut StructDriver<'_, 'b, 'l>, + ) -> Result { + let layout = driver.struct_layout(); + write!(self.output, "\n[{}] {layout:#}", self.depth).unwrap(); + + let mut fields = vec![]; + let mut field_visitor = Self { + depth: self.depth + 1, + output: std::mem::take(&mut self.output), + }; + + while let Some((field, value)) = driver.next_field(&mut field_visitor)? { + fields.push((field.name.clone(), value)); } - fn traverse_variant( - &mut self, - driver: &mut VariantDriver<'_, 'b, 'l>, - ) -> Result<(), Self::Error> { - self.0 += 1; - while driver.next_field(self)?.is_some() {} - Ok(()) + self.output = field_visitor.output; + let type_ = driver.struct_layout().type_.clone(); + Ok(MoveValue::Struct(MoveStruct { type_, fields })) + } + + fn visit_variant( + &mut self, + driver: &mut VariantDriver<'_, 'b, 'l>, + ) -> Result { + let layout = driver.enum_layout(); + write!(self.output, "\n[{}] {layout:#}", self.depth).unwrap(); + + let mut fields = vec![]; + let mut field_visitor = Self { + depth: self.depth + 1, + output: std::mem::take(&mut self.output), + }; + + while let Some((field, value)) = driver.next_field(&mut field_visitor)? { + fields.push((field.name.clone(), value)); } + + self.output = field_visitor.output; + let type_ = driver.enum_layout().type_.clone(); + Ok(MoveValue::Variant(MoveVariant { + type_, + variant_name: driver.variant_name().to_owned(), + tag: driver.tag(), + fields, + })) } +} + +#[test] +fn traversal() { + use MoveTypeLayout as T; + use MoveValue as V; let type_layout = struct_layout_( "0x0::foo::Bar", @@ -334,168 +493,6 @@ fn nested_datatype_visit() { use MoveTypeLayout as T; use MoveValue as V; - #[derive(Default)] - struct PrintVisitor { - depth: usize, - output: String, - } - - impl<'b, 'l> Visitor<'b, 'l> for PrintVisitor { - type Value = MoveValue; - type Error = annotated_visitor::Error; - - fn visit_u8( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: u8, - ) -> Result { - write!(self.output, "\n[{}] {value}: u8", self.depth).unwrap(); - Ok(V::U8(value)) - } - - fn visit_u16( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: u16, - ) -> Result { - write!(self.output, "\n[{}] {value}: u16", self.depth).unwrap(); - Ok(V::U16(value)) - } - - fn visit_u32( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: u32, - ) -> Result { - write!(self.output, "\n[{}] {value}: u32", self.depth).unwrap(); - Ok(V::U32(value)) - } - - fn visit_u64( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: u64, - ) -> Result { - write!(self.output, "\n[{}] {value}: u64", self.depth).unwrap(); - Ok(V::U64(value)) - } - - fn visit_u128( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: u128, - ) -> Result { - write!(self.output, "\n[{}] {value}: u128", self.depth).unwrap(); - Ok(V::U128(value)) - } - - fn visit_u256( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: U256, - ) -> Result { - write!(self.output, "\n[{}] {value}: u256", self.depth).unwrap(); - Ok(V::U256(value)) - } - - fn visit_bool( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: bool, - ) -> Result { - write!(self.output, "\n[{}] {value}: bool", self.depth).unwrap(); - Ok(V::Bool(value)) - } - - fn visit_address( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: AccountAddress, - ) -> Result { - write!(self.output, "\n[{}] {value}: address", self.depth).unwrap(); - Ok(V::Address(value)) - } - - fn visit_signer( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: AccountAddress, - ) -> Result { - write!(self.output, "\n[{}] {value}: signer", self.depth).unwrap(); - Ok(V::Signer(value)) - } - - fn visit_vector( - &mut self, - driver: &mut VecDriver<'_, 'b, 'l>, - ) -> Result { - let layout = driver.element_layout(); - write!(self.output, "\n[{}] vector<{layout:#}>", self.depth).unwrap(); - - let mut elems = vec![]; - let mut elem_visitor = Self { - depth: self.depth + 1, - output: std::mem::take(&mut self.output), - }; - - while let Some(elem) = driver.next_element(&mut elem_visitor)? { - elems.push(elem) - } - - self.output = elem_visitor.output; - Ok(V::Vector(elems)) - } - - fn visit_struct( - &mut self, - driver: &mut StructDriver<'_, 'b, 'l>, - ) -> Result { - let layout = driver.struct_layout(); - write!(self.output, "\n[{}] {layout:#}", self.depth).unwrap(); - - let mut fields = vec![]; - let mut field_visitor = Self { - depth: self.depth + 1, - output: std::mem::take(&mut self.output), - }; - - while let Some((field, value)) = driver.next_field(&mut field_visitor)? { - fields.push((field.name.clone(), value)); - } - - self.output = field_visitor.output; - let type_ = driver.struct_layout().type_.clone(); - Ok(V::Struct(MoveStruct { type_, fields })) - } - - fn visit_variant( - &mut self, - driver: &mut VariantDriver<'_, 'b, 'l>, - ) -> Result { - let layout = driver.enum_layout(); - write!(self.output, "\n[{}] {layout:#}", self.depth).unwrap(); - - let mut fields = vec![]; - let mut field_visitor = Self { - depth: self.depth + 1, - output: std::mem::take(&mut self.output), - }; - - while let Some((field, value)) = driver.next_field(&mut field_visitor)? { - fields.push((field.name.clone(), value)); - } - - self.output = field_visitor.output; - let type_ = driver.enum_layout().type_.clone(); - Ok(V::Variant(MoveVariant { - type_, - variant_name: driver.variant_name().to_owned(), - tag: driver.tag(), - fields, - })) - } - } - let type_layout = struct_layout_( "0x0::foo::Bar", vec![ @@ -1058,7 +1055,7 @@ fn byte_offset_test() { } /// Create a struct value for test purposes. -fn struct_value_(rep: &str, fields: Vec<(&str, MoveValue)>) -> MoveValue { +pub(crate) fn struct_value_(rep: &str, fields: Vec<(&str, MoveValue)>) -> MoveValue { let type_ = StructTag::from_str(rep).unwrap(); let fields = fields .into_iter() @@ -1069,7 +1066,7 @@ fn struct_value_(rep: &str, fields: Vec<(&str, MoveValue)>) -> MoveValue { } /// Create a struct layout for test purposes. -fn struct_layout_(rep: &str, fields: Vec<(&str, MoveTypeLayout)>) -> MoveTypeLayout { +pub(crate) fn struct_layout_(rep: &str, fields: Vec<(&str, MoveTypeLayout)>) -> MoveTypeLayout { let type_ = StructTag::from_str(rep).unwrap(); let fields = fields .into_iter() @@ -1083,7 +1080,12 @@ fn struct_layout_(rep: &str, fields: Vec<(&str, MoveTypeLayout)>) -> MoveTypeLay } /// Create a variant value for test purposes. -fn variant_value_(rep: &str, name: &str, tag: u16, fields: Vec<(&str, MoveValue)>) -> MoveValue { +pub(crate) fn variant_value_( + rep: &str, + name: &str, + tag: u16, + fields: Vec<(&str, MoveValue)>, +) -> MoveValue { let type_ = StructTag::from_str(rep).unwrap(); let fields = fields .into_iter() @@ -1099,7 +1101,10 @@ fn variant_value_(rep: &str, name: &str, tag: u16, fields: Vec<(&str, MoveValue) } /// Create an enum layout for test purposes. -fn enum_layout_(rep: &str, variants: Vec<(&str, Vec<(&str, MoveTypeLayout)>)>) -> MoveTypeLayout { +pub(crate) fn enum_layout_( + rep: &str, + variants: Vec<(&str, Vec<(&str, MoveTypeLayout)>)>, +) -> MoveTypeLayout { let type_ = StructTag::from_str(rep).unwrap(); let variants = variants .into_iter() @@ -1117,6 +1122,6 @@ fn enum_layout_(rep: &str, variants: Vec<(&str, Vec<(&str, MoveTypeLayout)>)>) - } /// BCS encode Move value. -fn serialize(value: MoveValue) -> Vec { +pub(crate) fn serialize(value: MoveValue) -> Vec { value.clone().undecorate().simple_serialize().unwrap() } diff --git a/external-crates/move/crates/move-model/src/lib.rs b/external-crates/move/crates/move-model/src/lib.rs index a81e9e5557e42..58b9b7cb5888a 100644 --- a/external-crates/move/crates/move-model/src/lib.rs +++ b/external-crates/move/crates/move-model/src/lib.rs @@ -23,7 +23,7 @@ use move_binary_format::file_format::{ use move_compiler::{ self, compiled_unit::{self, AnnotatedCompiledUnit}, - diagnostics::{Diagnostics, WarningFilters}, + diagnostics::{warning_filters::WarningFilters, Diagnostics}, expansion::ast::{self as E, ModuleIdent, ModuleIdent_, TargetKind}, parser::ast as P, shared::{parse_named_address, unique_map::UniqueMap, NumericalAddress, PackagePaths}, diff --git a/external-crates/move/crates/move-model/src/model.rs b/external-crates/move/crates/move-model/src/model.rs index f80c4c15b9ba1..40d0391f2a456 100644 --- a/external-crates/move/crates/move-model/src/model.rs +++ b/external-crates/move/crates/move-model/src/model.rs @@ -47,7 +47,8 @@ use move_binary_format::{ CompiledModule, }; use move_bytecode_source_map::{mapping::SourceMapping, source_map::SourceMap}; -use move_command_line_common::{address::NumericalAddress, files::FileHash}; +use move_command_line_common::files::FileHash; +use move_core_types::parsing::address::NumericalAddress; use move_core_types::{ account_address::AccountAddress, identifier::{IdentStr, Identifier}, diff --git a/external-crates/move/crates/move-model/tests/testsuite.rs b/external-crates/move/crates/move-model/tests/testsuite.rs index bf66d7215179a..965080f519ca1 100644 --- a/external-crates/move/crates/move-model/tests/testsuite.rs +++ b/external-crates/move/crates/move-model/tests/testsuite.rs @@ -5,7 +5,7 @@ use codespan_reporting::{diagnostic::Severity, term::termcolor::Buffer}; use move_binary_format::file_format::{FunctionDefinitionIndex, StructDefinitionIndex}; use move_command_line_common::testing::EXP_EXT; -use move_compiler::{diagnostics::WarningFilters, shared::PackagePaths}; +use move_compiler::{diagnostics::warning_filters::WarningFilters, shared::PackagePaths}; use move_model::{run_bytecode_model_builder, run_model_builder}; use move_prover_test_utils::baseline_test::verify_or_update_baseline; use std::path::Path; diff --git a/external-crates/move/crates/move-package/src/resolution/resolution_graph.rs b/external-crates/move/crates/move-package/src/resolution/resolution_graph.rs index d6aa0ed8b2f4f..1d45ef772f0e2 100644 --- a/external-crates/move/crates/move-package/src/resolution/resolution_graph.rs +++ b/external-crates/move/crates/move-package/src/resolution/resolution_graph.rs @@ -8,7 +8,7 @@ use move_command_line_common::files::{ }; use move_compiler::command_line::DEFAULT_OUTPUT_DIR; use move_compiler::editions::Edition; -use move_compiler::{diagnostics::WarningFilters, shared::PackageConfig}; +use move_compiler::{diagnostics::warning_filters::WarningFilters, shared::PackageConfig}; use move_core_types::account_address::AccountAddress; use move_symbol_pool::Symbol; use std::fs::File; diff --git a/external-crates/move/crates/move-stackless-bytecode/tests/testsuite.rs b/external-crates/move/crates/move-stackless-bytecode/tests/testsuite.rs index a992a3726479c..9465f65b7f5ef 100644 --- a/external-crates/move/crates/move-stackless-bytecode/tests/testsuite.rs +++ b/external-crates/move/crates/move-stackless-bytecode/tests/testsuite.rs @@ -5,7 +5,7 @@ use anyhow::anyhow; use codespan_reporting::{diagnostic::Severity, term::termcolor::Buffer}; use move_command_line_common::testing::EXP_EXT; -use move_compiler::{diagnostics::WarningFilters, shared::PackagePaths}; +use move_compiler::{diagnostics::warning_filters::WarningFilters, shared::PackagePaths}; use move_model::{model::GlobalEnv, options::ModelBuilderOptions, run_model_builder_with_options}; use move_prover_test_utils::{baseline_test::verify_or_update_baseline, extract_test_directives}; use move_stackless_bytecode::{ diff --git a/external-crates/move/crates/move-stdlib/src/lib.rs b/external-crates/move/crates/move-stdlib/src/lib.rs index 5d714e3d3acd1..a823dca9fefe4 100644 --- a/external-crates/move/crates/move-stdlib/src/lib.rs +++ b/external-crates/move/crates/move-stdlib/src/lib.rs @@ -3,10 +3,8 @@ // SPDX-License-Identifier: Apache-2.0 use log::LevelFilter; -use move_command_line_common::{ - address::NumericalAddress, - files::{extension_equals, find_filenames, MOVE_EXTENSION}, -}; +use move_command_line_common::files::{extension_equals, find_filenames, MOVE_EXTENSION}; +use move_core_types::parsing::address::NumericalAddress; use std::{collections::BTreeMap, path::PathBuf}; #[cfg(test)] diff --git a/external-crates/move/crates/move-transactional-test-runner/src/framework.rs b/external-crates/move/crates/move-transactional-test-runner/src/framework.rs index 1294d1760b6a9..631870c045224 100644 --- a/external-crates/move/crates/move-transactional-test-runner/src/framework.rs +++ b/external-crates/move/crates/move-transactional-test-runner/src/framework.rs @@ -14,20 +14,22 @@ use clap::Parser; use move_binary_format::file_format::CompiledModule; use move_bytecode_source_map::{mapping::SourceMapping, source_map::SourceMap}; use move_command_line_common::{ - address::ParsedAddress, env::read_bool_env_var, files::{MOVE_EXTENSION, MOVE_IR_EXTENSION}, testing::{add_update_baseline_fix, format_diff, read_env_update_baseline, EXP_EXT}, - types::ParsedType, - values::{ParsableValue, ParsedValue}, }; use move_compiler::{ compiled_unit::AnnotatedCompiledUnit, - diagnostics::{Diagnostics, WarningFilters}, + diagnostics::{warning_filters::WarningFilters, Diagnostics}, editions::{Edition, Flavor}, shared::{files::MappedFiles, NumericalAddress, PackageConfig}, FullyCompiledProgram, }; +use move_core_types::parsing::{ + address::ParsedAddress, + types::ParsedType, + values::{ParsableValue, ParsedValue}, +}; use move_core_types::{ account_address::AccountAddress, identifier::{IdentStr, Identifier}, diff --git a/external-crates/move/crates/move-transactional-test-runner/src/tasks.rs b/external-crates/move/crates/move-transactional-test-runner/src/tasks.rs index c1e62c544f60b..f5247de2c86a7 100644 --- a/external-crates/move/crates/move-transactional-test-runner/src/tasks.rs +++ b/external-crates/move/crates/move-transactional-test-runner/src/tasks.rs @@ -6,14 +6,14 @@ use anyhow::{anyhow, bail, Result}; use clap::*; -use move_command_line_common::{ +use move_command_line_common::files::{MOVE_EXTENSION, MOVE_IR_EXTENSION}; +use move_compiler::shared::NumericalAddress; +use move_core_types::identifier::Identifier; +use move_core_types::parsing::{ address::ParsedAddress, - files::{MOVE_EXTENSION, MOVE_IR_EXTENSION}, types::ParsedType, values::{ParsableValue, ParsedValue}, }; -use move_compiler::shared::NumericalAddress; -use move_core_types::identifier::Identifier; use std::{convert::TryInto, fmt::Debug, path::Path, str::FromStr}; use tempfile::NamedTempFile; diff --git a/external-crates/move/crates/move-transactional-test-runner/src/vm_test_harness.rs b/external-crates/move/crates/move-transactional-test-runner/src/vm_test_harness.rs index f04769afce091..05b3b5d281926 100644 --- a/external-crates/move/crates/move-transactional-test-runner/src/vm_test_harness.rs +++ b/external-crates/move/crates/move-transactional-test-runner/src/vm_test_harness.rs @@ -15,10 +15,9 @@ use move_binary_format::{ errors::{Location, VMError, VMResult}, CompiledModule, }; -use move_command_line_common::{ - address::ParsedAddress, files::verify_and_create_named_address_mapping, -}; +use move_command_line_common::files::verify_and_create_named_address_mapping; use move_compiler::{editions::Edition, shared::PackagePaths, FullyCompiledProgram}; +use move_core_types::parsing::address::ParsedAddress; use move_core_types::{ account_address::AccountAddress, identifier::IdentStr, diff --git a/external-crates/move/crates/move-unit-test/Cargo.toml b/external-crates/move/crates/move-unit-test/Cargo.toml index d6a3d543ffcf5..e485019a2083a 100644 --- a/external-crates/move/crates/move-unit-test/Cargo.toml +++ b/external-crates/move/crates/move-unit-test/Cargo.toml @@ -51,4 +51,4 @@ name = "move_unit_test_testsuite" harness = false [features] -gas-profiler = [] +tracing = [] diff --git a/external-crates/move/crates/move-unit-test/src/lib.rs b/external-crates/move/crates/move-unit-test/src/lib.rs index 00d42c0219885..496782ee916b9 100644 --- a/external-crates/move/crates/move-unit-test/src/lib.rs +++ b/external-crates/move/crates/move-unit-test/src/lib.rs @@ -187,7 +187,7 @@ impl UnitTestingConfig { let (_, compiler) = diagnostics::unwrap_or_report_pass_diagnostics(&files, comments_and_compiler_res); - let (mut compiler, cfgir) = compiler.into_ast(); + let (compiler, cfgir) = compiler.into_ast(); let compilation_env = compiler.compilation_env(); let test_plan = unit_test::plan_builder::construct_test_plan(compilation_env, None, &cfgir); let mapped_files = compilation_env.mapped_files().clone(); diff --git a/external-crates/move/crates/move-unit-test/src/test_runner.rs b/external-crates/move/crates/move-unit-test/src/test_runner.rs index 42310d40cece8..1dae13f18203f 100644 --- a/external-crates/move/crates/move-unit-test/src/test_runner.rs +++ b/external-crates/move/crates/move-unit-test/src/test_runner.rs @@ -262,7 +262,7 @@ impl SharedTestingConfig { let mut session = move_vm.new_session_with_extensions(&self.starting_storage_state, extensions); let mut gas_meter = GasStatus::new(&self.cost_table, Gas::new(self.execution_bound)); - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; use move_vm_types::gas::GasMeter; gas_meter.set_profiler(GasProfiler::init_default_cfg( diff --git a/external-crates/move/crates/move-vm-config/Cargo.toml b/external-crates/move/crates/move-vm-config/Cargo.toml index e0447de95b2c4..50eb97a427134 100644 --- a/external-crates/move/crates/move-vm-config/Cargo.toml +++ b/external-crates/move/crates/move-vm-config/Cargo.toml @@ -12,4 +12,4 @@ move-binary-format.workspace = true once_cell.workspace = true [features] -gas-profiler = [] +tracing = [] diff --git a/external-crates/move/crates/move-vm-config/src/runtime.rs b/external-crates/move/crates/move-vm-config/src/runtime.rs index a3c763e760ed3..6743c7f9d298a 100644 --- a/external-crates/move/crates/move-vm-config/src/runtime.rs +++ b/external-crates/move/crates/move-vm-config/src/runtime.rs @@ -4,13 +4,13 @@ use crate::verifier::{VerifierConfig, DEFAULT_MAX_CONSTANT_VECTOR_LEN}; use move_binary_format::binary_config::BinaryConfig; use move_binary_format::file_format_common::VERSION_MAX; -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] use once_cell::sync::Lazy; -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] const MOVE_VM_PROFILER_ENV_VAR_NAME: &str = "MOVE_VM_PROFILE"; -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] static PROFILER_ENABLED: Lazy = Lazy::new(|| std::env::var(MOVE_VM_PROFILER_ENV_VAR_NAME).is_ok()); @@ -88,7 +88,7 @@ pub struct VMProfilerConfig { pub use_long_function_name: bool, } -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] impl std::default::Default for VMProfilerConfig { fn default() -> Self { Self { @@ -99,7 +99,7 @@ impl std::default::Default for VMProfilerConfig { } } -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] impl VMProfilerConfig { pub fn get_default_config_if_enabled() -> Option { if *PROFILER_ENABLED { diff --git a/external-crates/move/crates/move-vm-integration-tests/Cargo.toml b/external-crates/move/crates/move-vm-integration-tests/Cargo.toml index 2a74255a839f7..37a6ebfd42bbc 100644 --- a/external-crates/move/crates/move-vm-integration-tests/Cargo.toml +++ b/external-crates/move/crates/move-vm-integration-tests/Cargo.toml @@ -13,7 +13,6 @@ edition = "2021" [dependencies] anyhow.workspace = true -expect-test = "1.4.0" fail = { workspace = true, features = ["failpoints"] } tempfile.workspace = true memory-stats = "1.0.0" @@ -33,11 +32,11 @@ move-ir-to-bytecode.workspace = true [features] default = [] -gas-profiler = [ - "move-vm-config/gas-profiler", - "move-vm-runtime/gas-profiler", - "move-vm-profiler/gas-profiler", - "move-vm-test-utils/gas-profiler", +tracing = [ + "move-vm-config/tracing", + "move-vm-runtime/tracing", + "move-vm-profiler/tracing", + "move-vm-test-utils/tracing", ] [[bin]] diff --git a/external-crates/move/crates/move-vm-integration-tests/src/tests/instantiation_tests.rs b/external-crates/move/crates/move-vm-integration-tests/src/tests/instantiation_tests.rs index 29974b219d5f9..8892eef2a70b5 100644 --- a/external-crates/move/crates/move-vm-integration-tests/src/tests/instantiation_tests.rs +++ b/external-crates/move/crates/move-vm-integration-tests/src/tests/instantiation_tests.rs @@ -23,7 +23,7 @@ use move_core_types::{ language_storage::{ModuleId, StructTag, TypeTag}, vm_status::StatusCode, }; -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] use move_vm_profiler::GasProfiler; use move_vm_runtime::{ move_vm::MoveVM, @@ -33,7 +33,7 @@ use move_vm_test_utils::{ gas_schedule::{Gas, GasStatus, INITIAL_COST_SCHEDULE}, InMemoryStorage, }; -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] use move_vm_types::gas::GasMeter; use std::time::Instant; @@ -555,7 +555,7 @@ fn run_with_module( .into_iter() .map(|tag| session.load_type(&tag)) .collect::>>(); - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { gas.set_profiler(GasProfiler::init( &session.vm_config().profiler_config, entry_name.to_string(), diff --git a/external-crates/move/crates/move-vm-profiler/Cargo.toml b/external-crates/move/crates/move-vm-profiler/Cargo.toml index 89bdd1396c016..05cf0baadc301 100644 --- a/external-crates/move/crates/move-vm-profiler/Cargo.toml +++ b/external-crates/move/crates/move-vm-profiler/Cargo.toml @@ -15,4 +15,4 @@ tracing.workspace = true move-vm-config.workspace = true [features] -gas-profiler = ["move-vm-config/gas-profiler"] +tracing = ["move-vm-config/tracing"] diff --git a/external-crates/move/crates/move-vm-profiler/src/lib.rs b/external-crates/move/crates/move-vm-profiler/src/lib.rs index 3312383e1b052..3479e1d059223 100644 --- a/external-crates/move/crates/move-vm-profiler/src/lib.rs +++ b/external-crates/move/crates/move-vm-profiler/src/lib.rs @@ -4,7 +4,7 @@ use move_vm_config::runtime::VMProfilerConfig; use serde::Serialize; use std::collections::BTreeMap; -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] use tracing::info; #[derive(Debug, Clone, Serialize)] @@ -62,7 +62,7 @@ pub struct GasProfiler { finished: bool, } -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] impl GasProfiler { // Used by profiler viz tool const OPEN_FRAME_IDENT: &'static str = "O"; @@ -70,7 +70,7 @@ impl GasProfiler { const TOP_LEVEL_FRAME_NAME: &'static str = "root"; - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] pub fn init(config: &Option, name: String, start_gas: u64) -> Self { let mut prof = GasProfiler { exporter: "speedscope@1.15.2".to_string(), @@ -101,7 +101,7 @@ impl GasProfiler { prof } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] pub fn init_default_cfg(name: String, start_gas: u64) -> Self { Self::init( &VMProfilerConfig::get_default_config_if_enabled(), @@ -110,22 +110,22 @@ impl GasProfiler { ) } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] pub fn short_name(s: &String) -> String { s.split("::").last().unwrap_or(s).to_string() } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] fn is_metered(&self) -> bool { (self.profiles[0].end_value != 0) && (self.start_gas != 0) } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] fn start_gas(&self) -> u64 { self.start_gas } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] fn add_frame( &mut self, frame_name: String, @@ -146,7 +146,7 @@ impl GasProfiler { } } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] pub fn open_frame(&mut self, frame_name: String, metadata: String, gas_start: u64) { if self.config.is_none() || self.start_gas == 0 { return; @@ -162,7 +162,7 @@ impl GasProfiler { }); } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] pub fn close_frame(&mut self, frame_name: String, metadata: String, gas_end: u64) { if self.config.is_none() || self.start_gas == 0 { return; @@ -178,7 +178,7 @@ impl GasProfiler { self.profiles[0].end_value = start - gas_end; } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] pub fn to_file(&self) { use std::ffi::{OsStr, OsString}; use std::fs::File; @@ -218,7 +218,7 @@ impl GasProfiler { info!("Gas profile written to file: {}", p.display()); } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] pub fn finish(&mut self) { if self.finished { return; @@ -231,7 +231,7 @@ impl GasProfiler { } } -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] impl Drop for GasProfiler { fn drop(&mut self) { self.finish(); @@ -241,7 +241,7 @@ impl Drop for GasProfiler { #[macro_export] macro_rules! profile_open_frame { ($gas_meter:expr, $frame_name:expr) => { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] { let gas_rem = $gas_meter.remaining_gas().into(); move_vm_profiler::profile_open_frame_impl!( @@ -256,7 +256,7 @@ macro_rules! profile_open_frame { #[macro_export] macro_rules! profile_open_frame_impl { ($profiler:expr, $frame_name:expr, $gas_rem:expr) => { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] { if let Some(profiler) = $profiler { if let Some(config) = &profiler.config { @@ -275,7 +275,7 @@ macro_rules! profile_open_frame_impl { #[macro_export] macro_rules! profile_close_frame { ($gas_meter:expr, $frame_name:expr) => { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] { let gas_rem = $gas_meter.remaining_gas().into(); move_vm_profiler::profile_close_frame_impl!( @@ -290,7 +290,7 @@ macro_rules! profile_close_frame { #[macro_export] macro_rules! profile_close_frame_impl { ($profiler:expr, $frame_name:expr, $gas_rem:expr) => { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] { if let Some(profiler) = $profiler { if let Some(config) = &profiler.config { @@ -309,7 +309,7 @@ macro_rules! profile_close_frame_impl { #[macro_export] macro_rules! profile_open_instr { ($gas_meter:expr, $frame_name:expr) => { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] { let gas_rem = $gas_meter.remaining_gas().into(); if let Some(profiler) = $gas_meter.get_profiler_mut() { @@ -326,7 +326,7 @@ macro_rules! profile_open_instr { #[macro_export] macro_rules! profile_close_instr { ($gas_meter:expr, $frame_name:expr) => { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] { let gas_rem = $gas_meter.remaining_gas().into(); if let Some(profiler) = $gas_meter.get_profiler_mut() { @@ -343,39 +343,39 @@ macro_rules! profile_close_instr { #[macro_export] macro_rules! profile_dump_file { ($profiler:expr) => { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] $profiler.to_file() }; } -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] #[macro_export] -macro_rules! gas_profiler_feature_enabled { +macro_rules! tracing_feature_enabled { ($($tt:tt)*) => { - if cfg!(feature = "gas-profiler") { + if cfg!(feature = "tracing") { $($tt)* } }; } -#[cfg(not(feature = "gas-profiler"))] +#[cfg(not(feature = "tracing"))] #[macro_export] -macro_rules! gas_profiler_feature_enabled { +macro_rules! tracing_feature_enabled { ( $( $tt:tt )* ) => {}; } -#[cfg(not(feature = "gas-profiler"))] +#[cfg(not(feature = "tracing"))] #[macro_export] -macro_rules! gas_profiler_feature_disabled { +macro_rules! tracing_feature_disabled { ($($tt:tt)*) => { - if !cfg!(feature = "gas-profiler") { + if !cfg!(feature = "tracing") { $($tt)* } }; } -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] #[macro_export] -macro_rules! gas_profiler_feature_disabled { +macro_rules! tracing_feature_disabled { ( $( $tt:tt )* ) => {}; } diff --git a/external-crates/move/crates/move-vm-runtime/Cargo.toml b/external-crates/move/crates/move-vm-runtime/Cargo.toml index 17e49de4c0a46..3089a93ce3d6d 100644 --- a/external-crates/move/crates/move-vm-runtime/Cargo.toml +++ b/external-crates/move/crates/move-vm-runtime/Cargo.toml @@ -39,11 +39,9 @@ move-compiler.workspace = true default = [] fuzzing = ["move-vm-types/fuzzing"] failpoints = ["fail/failpoints"] -# Enable tracing and debugging also for release builds. By default, it is only enabled for debug builds. -debugging = [] testing = [] lazy_natives = [] -gas-profiler = [ - "move-vm-config/gas-profiler", - "move-vm-profiler/gas-profiler", +tracing = [ + "move-vm-config/tracing", + "move-vm-profiler/tracing", ] diff --git a/external-crates/move/crates/move-vm-runtime/src/lib.rs b/external-crates/move/crates/move-vm-runtime/src/lib.rs index c69f0d150582d..c982a51ce1209 100644 --- a/external-crates/move/crates/move-vm-runtime/src/lib.rs +++ b/external-crates/move/crates/move-vm-runtime/src/lib.rs @@ -24,7 +24,7 @@ mod tracing; mod tracing2; // Only include debugging functionality in debug builds -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] mod debug; #[cfg(test)] diff --git a/external-crates/move/crates/move-vm-runtime/src/loader.rs b/external-crates/move/crates/move-vm-runtime/src/loader.rs index bffc555dbbaba..6f843fd75aec6 100644 --- a/external-crates/move/crates/move-vm-runtime/src/loader.rs +++ b/external-crates/move/crates/move-vm-runtime/src/loader.rs @@ -2207,7 +2207,7 @@ impl Function { ) } - #[cfg(any(debug_assertions, feature = "debugging"))] + #[cfg(any(debug_assertions, feature = "tracing"))] pub(crate) fn pretty_short_string(&self) -> String { let id = &self.module; format!( diff --git a/external-crates/move/crates/move-vm-runtime/src/runtime.rs b/external-crates/move/crates/move-vm-runtime/src/runtime.rs index b3656f597a3f2..6801254fbe246 100644 --- a/external-crates/move/crates/move-vm-runtime/src/runtime.rs +++ b/external-crates/move/crates/move-vm-runtime/src/runtime.rs @@ -498,7 +498,7 @@ impl VMRuntime { gas_meter: &mut impl GasMeter, extensions: &mut NativeContextExtensions, ) -> VMResult { - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; if gas_meter.get_profiler_mut().is_none() { gas_meter.set_profiler(GasProfiler::init_default_cfg( diff --git a/external-crates/move/crates/move-vm-runtime/src/session.rs b/external-crates/move/crates/move-vm-runtime/src/session.rs index b20694120a5b2..7dc5549a4f062 100644 --- a/external-crates/move/crates/move-vm-runtime/src/session.rs +++ b/external-crates/move/crates/move-vm-runtime/src/session.rs @@ -104,7 +104,7 @@ impl<'r, 'l, S: MoveResolver> Session<'r, 'l, S> { args: Vec>, gas_meter: &mut impl GasMeter, ) -> VMResult { - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; if gas_meter.get_profiler_mut().is_none() { gas_meter.set_profiler(GasProfiler::init_default_cfg( @@ -137,7 +137,7 @@ impl<'r, 'l, S: MoveResolver> Session<'r, 'l, S> { gas_meter: &mut impl GasMeter, tracer: Option<&mut MoveTraceBuilder>, ) -> VMResult { - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; if gas_meter.get_profiler_mut().is_none() { gas_meter.set_profiler(GasProfiler::init_default_cfg( @@ -147,7 +147,7 @@ impl<'r, 'l, S: MoveResolver> Session<'r, 'l, S> { } } - let tracer = if cfg!(feature = "gas-profiler") { + let tracer = if cfg!(feature = "tracing") { tracer } else { None diff --git a/external-crates/move/crates/move-vm-runtime/src/tracing.rs b/external-crates/move/crates/move-vm-runtime/src/tracing.rs index a4c984c62a537..d2a9662f12b6f 100644 --- a/external-crates/move/crates/move-vm-runtime/src/tracing.rs +++ b/external-crates/move/crates/move-vm-runtime/src/tracing.rs @@ -2,10 +2,10 @@ // Copyright (c) The Move Contributors // SPDX-License-Identifier: Apache-2.0 -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] use crate::debug::DebugContext; -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] use ::{ move_binary_format::file_format::Bytecode, move_vm_types::values::Locals, @@ -20,31 +20,31 @@ use ::{ }, }; -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] use crate::{ interpreter::Interpreter, loader::{Function, Loader}, }; -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] const MOVE_VM_TRACING_ENV_VAR_NAME: &str = "MOVE_VM_TRACE"; -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] const MOVE_VM_STEPPING_ENV_VAR_NAME: &str = "MOVE_VM_STEP"; -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] static FILE_PATH: Lazy = Lazy::new(|| { env::var(MOVE_VM_TRACING_ENV_VAR_NAME).unwrap_or_else(|_| "move_vm_trace.trace".to_string()) }); -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] static TRACING_ENABLED: Lazy = Lazy::new(|| env::var(MOVE_VM_TRACING_ENV_VAR_NAME).is_ok()); -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] static DEBUGGING_ENABLED: Lazy = Lazy::new(|| env::var(MOVE_VM_STEPPING_ENV_VAR_NAME).is_ok()); -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] static LOGGING_FILE: Lazy> = Lazy::new(|| { Mutex::new( OpenOptions::new() @@ -55,11 +55,11 @@ static LOGGING_FILE: Lazy> = Lazy::new(|| { ) }); -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] static DEBUG_CONTEXT: Lazy> = Lazy::new(|| Mutex::new(DebugContext::new())); // Only include in debug builds -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] pub(crate) fn trace( function_desc: &Function, locals: &Locals, @@ -93,7 +93,7 @@ pub(crate) fn trace( macro_rules! trace { ($function_desc:expr, $locals:expr, $pc:expr, $instr:tt, $resolver:expr, $interp:expr) => { // Only include this code in debug releases - #[cfg(any(debug_assertions, feature = "debugging"))] + #[cfg(any(debug_assertions, feature = "tracing"))] $crate::tracing::trace( &$function_desc, $locals, diff --git a/external-crates/move/crates/move-vm-runtime/src/tracing2/mod.rs b/external-crates/move/crates/move-vm-runtime/src/tracing2/mod.rs index 5a29145c8ba4f..fe06f916abf2e 100644 --- a/external-crates/move/crates/move-vm-runtime/src/tracing2/mod.rs +++ b/external-crates/move/crates/move-vm-runtime/src/tracing2/mod.rs @@ -1,9 +1,9 @@ pub(crate) mod tracer; -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] pub(crate) const TRACING_ENABLED: bool = true; -#[cfg(not(feature = "gas-profiler"))] +#[cfg(not(feature = "tracing"))] pub(crate) const TRACING_ENABLED: bool = false; #[macro_export] diff --git a/external-crates/move/crates/move-vm-test-utils/Cargo.toml b/external-crates/move/crates/move-vm-test-utils/Cargo.toml index a354e6dcc26ad..6b90a1df11b39 100644 --- a/external-crates/move/crates/move-vm-test-utils/Cargo.toml +++ b/external-crates/move/crates/move-vm-test-utils/Cargo.toml @@ -24,4 +24,4 @@ move-vm-profiler.workspace = true [features] default = [ ] tiered-gas = [] -gas-profiler = [] +tracing = [] diff --git a/external-crates/move/move-execution/v0/crates/move-vm-runtime/Cargo.toml b/external-crates/move/move-execution/v0/crates/move-vm-runtime/Cargo.toml index 4ef1818e16839..e47f97d101eef 100644 --- a/external-crates/move/move-execution/v0/crates/move-vm-runtime/Cargo.toml +++ b/external-crates/move/move-execution/v0/crates/move-vm-runtime/Cargo.toml @@ -41,7 +41,7 @@ failpoints = ["fail/failpoints"] debugging = [] testing = [] lazy_natives = [] -gas-profiler = [ - "move-vm-config/gas-profiler", - "move-vm-profiler/gas-profiler", +tracing = [ + "move-vm-config/tracing", + "move-vm-profiler/tracing", ] diff --git a/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/interpreter.rs b/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/interpreter.rs index 077ee45c803cd..86bc799896c51 100644 --- a/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/interpreter.rs +++ b/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/interpreter.rs @@ -20,8 +20,6 @@ use move_core_types::{ vm_status::{StatusCode, StatusType}, }; use move_vm_config::runtime::VMRuntimeLimitsConfig; -#[cfg(feature = "gas-profiler")] -use move_vm_profiler::GasProfiler; use move_vm_profiler::{ profile_close_frame, profile_close_instr, profile_open_frame, profile_open_instr, }; @@ -213,7 +211,7 @@ impl Interpreter { } ExitCode::Call(fh_idx) => { let func = resolver.function_from_handle(fh_idx); - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let func_name = func.pretty_string(); profile_open_frame!(gas_meter, func_name.clone()); @@ -255,7 +253,7 @@ impl Interpreter { .instantiate_generic_function(idx, current_frame.ty_args()) .map_err(|e| set_err_info!(current_frame, e))?; let func = resolver.function_from_instantiation(idx); - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let func_name = func.pretty_string(); profile_open_frame!(gas_meter, func_name.clone()); diff --git a/external-crates/move/move-execution/v1/crates/move-vm-runtime/Cargo.toml b/external-crates/move/move-execution/v1/crates/move-vm-runtime/Cargo.toml index 5fa8584679fb3..683e7c3229244 100644 --- a/external-crates/move/move-execution/v1/crates/move-vm-runtime/Cargo.toml +++ b/external-crates/move/move-execution/v1/crates/move-vm-runtime/Cargo.toml @@ -41,7 +41,7 @@ failpoints = ["fail/failpoints"] debugging = [] testing = [] lazy_natives = [] -gas-profiler = [ - "move-vm-config/gas-profiler", - "move-vm-profiler/gas-profiler", +tracing = [ + "move-vm-config/tracing", + "move-vm-profiler/tracing", ] diff --git a/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/interpreter.rs b/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/interpreter.rs index 0317a9216e4ff..6a6a4a263c347 100644 --- a/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/interpreter.rs +++ b/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/interpreter.rs @@ -20,8 +20,6 @@ use move_core_types::{ vm_status::{StatusCode, StatusType}, }; use move_vm_config::runtime::VMRuntimeLimitsConfig; -#[cfg(feature = "gas-profiler")] -use move_vm_profiler::GasProfiler; use move_vm_profiler::{ profile_close_frame, profile_close_instr, profile_open_frame, profile_open_instr, }; @@ -273,7 +271,7 @@ impl Interpreter { } ExitCode::Call(fh_idx) => { let func = resolver.function_from_handle(fh_idx); - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let func_name = func.pretty_string(); profile_open_frame!(gas_meter, func_name.clone()); @@ -314,7 +312,7 @@ impl Interpreter { .instantiate_generic_function(idx, current_frame.ty_args()) .map_err(|e| set_err_info!(current_frame, e))?; let func = resolver.function_from_instantiation(idx); - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let func_name = func.pretty_string(); profile_open_frame!(gas_meter, func_name.clone()); diff --git a/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/runtime.rs b/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/runtime.rs index 172628e30d6ae..2d4ff01b7308b 100644 --- a/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/runtime.rs +++ b/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/runtime.rs @@ -493,7 +493,7 @@ impl VMRuntime { gas_meter: &mut impl GasMeter, extensions: &mut NativeContextExtensions, ) -> VMResult { - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; if gas_meter.get_profiler_mut().is_none() { gas_meter.set_profiler(GasProfiler::init_default_cfg( diff --git a/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/session.rs b/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/session.rs index c2a8dbf905dd5..37b2a49e41d0e 100644 --- a/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/session.rs +++ b/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/session.rs @@ -102,7 +102,7 @@ impl<'r, 'l, S: MoveResolver> Session<'r, 'l, S> { args: Vec>, gas_meter: &mut impl GasMeter, ) -> VMResult { - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; if gas_meter.get_profiler_mut().is_none() { gas_meter.set_profiler(GasProfiler::init_default_cfg( diff --git a/external-crates/move/move-execution/v2/crates/move-vm-runtime/Cargo.toml b/external-crates/move/move-execution/v2/crates/move-vm-runtime/Cargo.toml index b7c15e1bba673..cf4a79153a2da 100644 --- a/external-crates/move/move-execution/v2/crates/move-vm-runtime/Cargo.toml +++ b/external-crates/move/move-execution/v2/crates/move-vm-runtime/Cargo.toml @@ -41,7 +41,7 @@ failpoints = ["fail/failpoints"] debugging = [] testing = [] lazy_natives = [] -gas-profiler = [ - "move-vm-config/gas-profiler", - "move-vm-profiler/gas-profiler", +tracing = [ + "move-vm-config/tracing", + "move-vm-profiler/tracing", ] diff --git a/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/interpreter.rs b/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/interpreter.rs index 29cc3dc4e9bad..e116037ba8b64 100644 --- a/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/interpreter.rs +++ b/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/interpreter.rs @@ -21,8 +21,6 @@ use move_core_types::{ vm_status::{StatusCode, StatusType}, }; use move_vm_config::runtime::VMRuntimeLimitsConfig; -#[cfg(feature = "gas-profiler")] -use move_vm_profiler::GasProfiler; use move_vm_profiler::{ profile_close_frame, profile_close_instr, profile_open_frame, profile_open_instr, }; @@ -275,7 +273,7 @@ impl Interpreter { } ExitCode::Call(fh_idx) => { let func = resolver.function_from_handle(fh_idx); - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let func_name = func.pretty_string(); profile_open_frame!(gas_meter, func_name.clone()); @@ -318,7 +316,7 @@ impl Interpreter { .instantiate_generic_function(idx, current_frame.ty_args()) .map_err(|e| set_err_info!(current_frame, e))?; let func = resolver.function_from_instantiation(idx); - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let func_name = func.pretty_string(); profile_open_frame!(gas_meter, func_name.clone()); diff --git a/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/runtime.rs b/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/runtime.rs index db7df944ec7ce..c21d9cff9dcc1 100644 --- a/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/runtime.rs +++ b/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/runtime.rs @@ -483,7 +483,7 @@ impl VMRuntime { gas_meter: &mut impl GasMeter, extensions: &mut NativeContextExtensions, ) -> VMResult { - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; if gas_meter.get_profiler_mut().is_none() { gas_meter.set_profiler(GasProfiler::init_default_cfg( diff --git a/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/session.rs b/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/session.rs index c2a8dbf905dd5..37b2a49e41d0e 100644 --- a/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/session.rs +++ b/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/session.rs @@ -102,7 +102,7 @@ impl<'r, 'l, S: MoveResolver> Session<'r, 'l, S> { args: Vec>, gas_meter: &mut impl GasMeter, ) -> VMResult { - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; if gas_meter.get_profiler_mut().is_none() { gas_meter.set_profiler(GasProfiler::init_default_cfg( diff --git a/external-crates/tests.sh b/external-crates/tests.sh index 7fa5a0531ff9a..5586c35becfea 100755 --- a/external-crates/tests.sh +++ b/external-crates/tests.sh @@ -4,4 +4,4 @@ cd move echo "Excluding prover Move tests" cargo nextest run -E '!package(move-prover) and !test(prove) and !test(run_all::simple_build_with_docs/args.txt) and !test(run_test::nested_deps_bad_parent/Move.toml)' --workspace --no-fail-fast echo "Running tracing-specific tests" -cargo nextest run -p move-cli --features gas-profiler +cargo nextest run -p move-cli --features tracing diff --git a/narwhal/executor/tests/consensus_integration_tests.rs b/narwhal/executor/tests/consensus_integration_tests.rs deleted file mode 100644 index 9bebf74c0e3c7..0000000000000 --- a/narwhal/executor/tests/consensus_integration_tests.rs +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 -use bytes::Bytes; -use fastcrypto::hash::Hash; -use narwhal_executor::get_restored_consensus_output; -use narwhal_executor::MockExecutionState; -use primary::consensus::{ - Bullshark, Consensus, ConsensusMetrics, ConsensusRound, LeaderSchedule, LeaderSwapTable, -}; -use primary::NUM_SHUTDOWN_RECEIVERS; -use prometheus::Registry; -use std::collections::BTreeSet; -use std::sync::Arc; -use storage::NodeStorage; -use telemetry_subscribers::TelemetryGuards; -use test_utils::latest_protocol_version; -use test_utils::{cluster::Cluster, temp_dir, CommitteeFixture}; -use tokio::sync::watch; - -use types::{Certificate, PreSubscribedBroadcastSender, Round, TransactionProto}; - -#[tokio::test] -async fn test_recovery() { - // Create storage - let storage = NodeStorage::reopen(temp_dir(), None); - - let consensus_store = storage.consensus_store; - let certificate_store = storage.certificate_store; - - // Setup consensus - let fixture = CommitteeFixture::builder().build(); - let committee = fixture.committee(); - - // Make certificates for rounds 1 and 2. - let ids: Vec<_> = fixture.authorities().map(|a| a.id()).collect(); - let genesis = Certificate::genesis(&latest_protocol_version(), &committee) - .iter() - .map(|x| x.digest()) - .collect::>(); - let (mut certificates, next_parents) = test_utils::make_optimal_certificates( - &committee, - &latest_protocol_version(), - 1..=2, - &genesis, - &ids, - ); - - // Make two certificate (f+1) with round 3 to trigger the commits. - let (_, certificate) = test_utils::mock_certificate( - &committee, - &latest_protocol_version(), - ids[0], - 3, - next_parents.clone(), - ); - certificates.push_back(certificate); - let (_, certificate) = test_utils::mock_certificate( - &committee, - &latest_protocol_version(), - ids[1], - 3, - next_parents, - ); - certificates.push_back(certificate); - - // Spawn the consensus engine and sink the primary channel. - let (tx_waiter, rx_waiter) = test_utils::test_channel!(1); - let (tx_primary, mut rx_primary) = test_utils::test_channel!(1); - let (tx_output, mut rx_output) = test_utils::test_channel!(1); - let (tx_consensus_round_updates, _rx_consensus_round_updates) = - watch::channel(ConsensusRound::default()); - - let mut tx_shutdown = PreSubscribedBroadcastSender::new(NUM_SHUTDOWN_RECEIVERS); - - const GC_DEPTH: Round = 50; - const NUM_SUB_DAGS_PER_SCHEDULE: u64 = 100; - let metrics = Arc::new(ConsensusMetrics::new(&Registry::new())); - let bullshark = Bullshark::new( - committee.clone(), - consensus_store.clone(), - latest_protocol_version(), - metrics.clone(), - NUM_SUB_DAGS_PER_SCHEDULE, - LeaderSchedule::new(committee.clone(), LeaderSwapTable::default()), - ); - - let _consensus_handle = Consensus::spawn( - committee, - GC_DEPTH, - consensus_store.clone(), - certificate_store.clone(), - tx_shutdown.subscribe(), - rx_waiter, - tx_primary, - tx_consensus_round_updates, - tx_output, - bullshark, - metrics, - ); - tokio::spawn(async move { while rx_primary.recv().await.is_some() {} }); - - // Feed all certificates to the consensus. Only the last certificate should trigger - // commits, so the task should not block. - while let Some(certificate) = certificates.pop_front() { - // we store the certificates so we can enable the recovery - // mechanism later. - certificate_store.write(certificate.clone()).unwrap(); - tx_waiter.send(certificate).await.unwrap(); - } - - // Ensure the first 4 ordered certificates are from round 1 (they are the parents of the committed - // leader); then the leader's certificate should be committed. - let consensus_index_counter = 4; - let num_of_committed_certificates = 5; - - let committed_sub_dag = rx_output.recv().await.unwrap(); - let mut sequence = committed_sub_dag.certificates.into_iter(); - for i in 1..=num_of_committed_certificates { - let output = sequence.next().unwrap(); - - if i < 5 { - assert_eq!(output.round(), 1); - } else { - assert_eq!(output.round(), 2); - } - } - - // Now assume that we want to recover from a crash. We are testing all the recovery cases - // from having executed no certificates at all (or certificate with index = 0), up to - // have executed the last committed certificate - for last_executed_certificate_index in 0..consensus_index_counter { - let mut execution_state = MockExecutionState::new(); - execution_state - .expect_last_executed_sub_dag_index() - .times(1) - .returning(|| 1); - - let consensus_output = get_restored_consensus_output( - consensus_store.clone(), - certificate_store.clone(), - &execution_state, - ) - .await - .unwrap(); - - // we expect to have recovered all the certificates from the last commit. The Sui executor engine - // will not execute twice the same certificate. - assert_eq!(consensus_output.len(), 1); - assert!( - consensus_output[0].len() - >= (num_of_committed_certificates - last_executed_certificate_index) as usize - ); - } -} - -#[tokio::test] -async fn test_internal_consensus_output() { - // Enabled debug tracing so we can easily observe the - // nodes logs. - let _guard = setup_tracing(); - - let mut cluster = Cluster::new(None); - - // start the cluster - cluster.start(Some(4), Some(1), None).await; - - // get a client to send transactions - let worker_id = 0; - - let authority = cluster.authority(0); - let mut client = authority.new_transactions_client(&worker_id).await; - - // Subscribe to the transaction confirmation channel - let mut receiver = authority - .primary() - .await - .tx_transaction_confirmation - .subscribe(); - - // Create arbitrary transactions - let mut transactions = Vec::new(); - - const NUM_OF_TRANSACTIONS: u32 = 10; - for i in 0..NUM_OF_TRANSACTIONS { - let tx = string_transaction(i); - - // serialise and send - let tr = bcs::to_bytes(&tx).unwrap(); - let txn = TransactionProto { - transactions: vec![Bytes::from(tr)], - }; - client.submit_transaction(txn).await.unwrap(); - - transactions.push(tx); - } - - // wait for transactions to complete - loop { - let result = receiver.recv().await.unwrap(); - - // deserialise transaction - let output_transaction = bcs::from_bytes::(&result).unwrap(); - - // we always remove the first transaction and check with the one - // sequenced. We want the transactions to be sequenced in the - // same order as we post them. - let expected_transaction = transactions.remove(0); - - assert_eq!( - expected_transaction, output_transaction, - "Expected to have received transaction with same id. Ordering is important" - ); - - if transactions.is_empty() { - break; - } - } -} - -fn string_transaction(id: u32) -> String { - format!("test transaction:{id}") -} - -fn setup_tracing() -> TelemetryGuards { - // Setup tracing - let tracing_level = "debug"; - let network_tracing_level = "info"; - - let log_filter = format!("{tracing_level},h2={network_tracing_level},tower={network_tracing_level},hyper={network_tracing_level},tonic::transport={network_tracing_level}"); - - telemetry_subscribers::TelemetryConfig::new() - // load env variables - .with_env() - // load special log filter - .with_log_level(&log_filter) - .init() - .0 -} diff --git a/narwhal/primary/tests/causal_completion_tests.rs b/narwhal/primary/tests/causal_completion_tests.rs deleted file mode 100644 index d564f9ea39f4d..0000000000000 --- a/narwhal/primary/tests/causal_completion_tests.rs +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 -use bytes::Bytes; -use std::time::Duration; -use test_utils::cluster::{setup_tracing, Cluster}; -use tracing::info; -use types::TransactionProto; - -type StringTransaction = String; - -#[ignore] -#[tokio::test] -async fn test_restore_from_disk() { - // Enabled debug tracing so we can easily observe the - // nodes logs. - let _guard = setup_tracing(); - - let mut cluster = Cluster::new(None); - - // start the cluster - cluster.start(Some(4), Some(1), None).await; - - let id = 0; - let client = cluster.authority(0).new_transactions_client(&id).await; - - // Subscribe to the transaction confirmation channel - let mut receiver = cluster - .authority(0) - .primary() - .await - .tx_transaction_confirmation - .subscribe(); - - // Create arbitrary transactions - let mut total_tx = 3; - for tx in [ - string_transaction(), - string_transaction(), - string_transaction(), - ] { - let mut c = client.clone(); - tokio::spawn(async move { - let tr = bcs::to_bytes(&tx).unwrap(); - let txn = TransactionProto { - transactions: vec![Bytes::from(tr)], - }; - - c.submit_transaction(txn).await.unwrap(); - }); - } - - // wait for transactions to complete - loop { - if let Ok(_result) = receiver.recv().await { - total_tx -= 1; - if total_tx < 1 { - break; - } - } - } - - // Now stop node 0 - cluster.stop_node(0).await; - - // Let other primaries advance and primary 0 releases its port. - tokio::time::sleep(Duration::from_secs(10)).await; - - // Now start the node 0 again - cluster.start_node(0, true, Some(1)).await; - - // Let the node recover - tokio::time::sleep(Duration::from_secs(2)).await; - - let node = cluster.authority(0); - - // Check the metrics to ensure the node was recovered from disk - let primary = node.primary().await; - - let node_recovered_state = - if let Some(metric) = primary.metric("recovered_consensus_state").await { - let value = metric.get_counter().get_value(); - info!("Found metric for recovered consensus state."); - - value > 0.0 - } else { - false - }; - - assert!(node_recovered_state, "Node did not recover state from disk"); -} - -fn string_transaction() -> StringTransaction { - StringTransaction::from("test transaction") -} - -#[ignore] -#[tokio::test] -async fn test_read_causal_signed_certificates() { - const CURRENT_ROUND_METRIC: &str = "current_round"; - - // Enabled debug tracing so we can easily observe the - // nodes logs. - let _guard = setup_tracing(); - - let mut cluster = Cluster::new(None); - - // start the cluster - cluster.start(Some(4), Some(1), None).await; - - // Let primaries advance little bit - tokio::time::sleep(Duration::from_secs(10)).await; - - // Ensure all nodes advanced - for authority in cluster.authorities().await { - if let Some(metric) = authority.primary().await.metric(CURRENT_ROUND_METRIC).await { - let value = metric.get_gauge().get_value(); - - info!("Metric -> {:?}", value); - - // If the current round is increasing then it means that the - // node starts catching up and is proposing. - assert!(value > 1.0, "Node didn't progress further than the round 1"); - } - } - - // Now stop node 0 - cluster.stop_node(0).await; - - // Let other primaries advance and primary 0 releases its port. - tokio::time::sleep(Duration::from_secs(10)).await; - - // Now start the validator 0 again - cluster.start_node(0, true, Some(1)).await; - - // Now check that the current round advances. Give the opportunity with a few - // iterations. If metric hasn't picked up then we know that node can't make - // progress. - let mut node_made_progress = false; - let node = cluster.authority(0).primary().await; - - for _ in 0..10 { - tokio::time::sleep(Duration::from_secs(1)).await; - - if let Some(metric) = node.metric(CURRENT_ROUND_METRIC).await { - let value = metric.get_gauge().get_value(); - info!("Metric -> {:?}", value); - - // If the current round is increasing then it means that the - // node starts catching up and is proposing. - if value > 1.0 { - node_made_progress = true; - break; - } - } - } - - assert!( - node_made_progress, - "Node 0 didn't make progress - causal completion didn't succeed" - ); -} diff --git a/narwhal/primary/tests/nodes_bootstrapping_tests.rs b/narwhal/primary/tests/nodes_bootstrapping_tests.rs deleted file mode 100644 index 373676bfe7cb1..0000000000000 --- a/narwhal/primary/tests/nodes_bootstrapping_tests.rs +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 -use bytes::Bytes; -use std::time::Duration; -use test_utils::cluster::{setup_tracing, Cluster}; -use types::TransactionProto; - -#[tokio::test(flavor = "current_thread", start_paused = true)] -async fn test_response_error_after_shutdown_internal_consensus() { - // Enabled debug tracing so we can easily observe the - // nodes logs. - let _guard = setup_tracing(); - - let delay = Duration::from_secs(10); // 10 seconds - - // A cluster of 4 nodes will be created, with internal consensus. - let cluster = Cluster::new(None); - - // ==== Start first authority ==== - let authority = cluster.authority(0); - authority.start(false, Some(1)).await; - - tokio::time::sleep(delay).await; - - authority.stop_all().await; - - tokio::time::sleep(delay).await; - - let worker_id = 0; - let mut client = authority.new_transactions_client(&worker_id).await; - - // Create a fake transaction - let tx_str = "test transaction".to_string(); - let tx = bcs::to_bytes(&tx_str).unwrap(); - let txn = TransactionProto { - transactions: vec![Bytes::from(tx)], - }; - - // Should fail submitting to consensus. - let Err(e) = client.submit_transaction(txn).await else { - panic!("Submitting transactions after Narwhal shutdown should fail!"); - }; - assert!(e.message().contains("tcp connect error:"), "Actual: {}", e); -} - -/// Nodes will be started in a staggered fashion. This is simulating -/// a real world scenario where nodes across validators will not start -/// in the same time. -#[ignore] -#[tokio::test] -async fn test_node_staggered_starts() { - // Enabled debug tracing so we can easily observe the - // nodes logs. - let _guard = setup_tracing(); - - let node_staggered_delay = Duration::from_secs(60 * 2); // 2 minutes - - // A cluster of 4 nodes will be created - let cluster = Cluster::new(None); - - // ==== Start first authority ==== - cluster.authority(0).start(false, Some(1)).await; - - tokio::time::sleep(node_staggered_delay).await; - - // No node should be able to commit, no reported round was expected - cluster.assert_progress(0, 0).await; - - // ==== Start second authority ==== - cluster.authority(1).start(false, Some(1)).await; - - tokio::time::sleep(node_staggered_delay).await; - - // No node should be able to commit, no reported round was expected - cluster.assert_progress(0, 0).await; - - // ==== Start third authority ==== - // Now 2f + 1 nodes are becoming available and we expect all the nodes to - // start making progress (advance in rounds). - cluster.authority(2).start(false, Some(1)).await; - - tokio::time::sleep(node_staggered_delay).await; - - // We have only (f) unavailable nodes, so all should have made progress and committed at least after the first round - cluster.assert_progress(3, 2).await; - - // ==== Start fourth authority ==== - // Now 3f + 1 nodes are becoming available (the whole network) and all the nodes - // should make progress - cluster.authority(3).start(false, Some(1)).await; - - tokio::time::sleep(node_staggered_delay).await; - - // All nodes are available so all should have made progress and committed at least after the first round - cluster.assert_progress(4, 2).await; -} - -/// All the nodes have an outage at the same time, when they recover, the rounds begin to advance. -#[ignore] -#[tokio::test] -async fn test_full_outage_and_recovery() { - let _guard = setup_tracing(); - - let stop_and_start_delay = Duration::from_secs(12); - let node_advance_delay = Duration::from_secs(60); - - // A cluster of 4 nodes will be created - let mut cluster = Cluster::new(None); - - // ===== Start the cluster ==== - cluster.start(Some(4), Some(1), None).await; - - // Let the nodes advance a bit - tokio::time::sleep(node_advance_delay).await; - - // Stop all the nodes - cluster.authority(0).stop_all().await; - tokio::time::sleep(stop_and_start_delay).await; - - cluster.authority(1).stop_all().await; - tokio::time::sleep(stop_and_start_delay).await; - - cluster.authority(2).stop_all().await; - tokio::time::sleep(stop_and_start_delay).await; - - cluster.authority(3).stop_all().await; - tokio::time::sleep(stop_and_start_delay).await; - - // Start all the nodes - cluster.authority(0).start(true, Some(1)).await; - tokio::time::sleep(stop_and_start_delay).await; - - cluster.authority(1).start(true, Some(1)).await; - tokio::time::sleep(stop_and_start_delay).await; - - cluster.authority(2).start(true, Some(1)).await; - tokio::time::sleep(stop_and_start_delay).await; - - cluster.authority(3).start(true, Some(1)).await; - - // now wait a bit to give the opportunity to recover - tokio::time::sleep(node_advance_delay).await; - - // Ensure that nodes have made progress - cluster.assert_progress(4, 2).await; -} - -#[ignore] -#[tokio::test] -async fn test_second_node_restart() { - // Enabled debug tracing so we can easily observe the - // nodes logs. - let _guard = setup_tracing(); - - let restart_delay = Duration::from_secs(120); - let node_advance_delay = Duration::from_secs(60); - - // A cluster of 4 nodes will be created - let mut cluster = Cluster::new(None); - - // ===== Start the cluster ==== - cluster.start(Some(4), Some(1), None).await; - - // Let the nodes advance a bit - tokio::time::sleep(node_advance_delay).await; - - // Now restart node 2 with some delay between - cluster.authority(2).restart(true, restart_delay).await; - - // now wait a bit to give the opportunity to recover - tokio::time::sleep(node_advance_delay).await; - - // Ensure that nodes have made progress - cluster.assert_progress(4, 2).await; - - // Now restart node 3 with some delay between - cluster.authority(3).restart(true, restart_delay).await; - - // now wait a bit to give the opportunity to recover - tokio::time::sleep(node_advance_delay).await; - - // Ensure that nodes have made progress - cluster.assert_progress(4, 2).await; -} - -#[ignore] -#[tokio::test] -/// We are testing the loss of liveness of a healthy cluster. While 3f+1 nodes run -/// we are shutting down f+1 nodes. Then we are bringing the f+1 nodes back again -/// We expect the restarted nodes to be able to make new proposals, and all the nodes -/// should be able to propose from where they left of at last round, and the rounds should -/// all advance. -async fn test_loss_of_liveness_without_recovery() { - // Enabled debug tracing so we can easily observe the - // nodes logs. - let _guard = setup_tracing(); - - let node_advance_delay = Duration::from_secs(60); - - // A cluster of 4 nodes will be created - let mut cluster = Cluster::new(None); - - // ===== Start the cluster ==== - cluster.start(Some(4), Some(1), None).await; - - // Let the nodes advance a bit - tokio::time::sleep(node_advance_delay).await; - - // Ensure that nodes have made progress - cluster.assert_progress(4, 2).await; - - // Now stop node 2 & 3 - cluster.authority(2).stop_all().await; - cluster.authority(3).stop_all().await; - - // wait and fetch the latest commit round - tokio::time::sleep(node_advance_delay).await; - let rounds_1 = cluster.assert_progress(2, 0).await; - - // wait and fetch again the rounds - tokio::time::sleep(node_advance_delay).await; - let rounds_2 = cluster.assert_progress(2, 0).await; - - // We assert that nodes haven't advanced at all - assert_eq!(rounds_1, rounds_2); - - // Now bring up nodes - cluster.authority(2).start(true, Some(1)).await; - cluster.authority(3).start(true, Some(1)).await; - - // wait and fetch the latest commit round. All of them should have advanced and we allow a small - // threshold in case some node is faster than the others - tokio::time::sleep(node_advance_delay).await; - let rounds_3 = cluster.assert_progress(4, 2).await; - - // we test that nodes 0 & 1 have actually advanced in rounds compared to before. - assert!(rounds_3.get(&0) > rounds_2.get(&0)); - assert!(rounds_3.get(&1) > rounds_2.get(&1)); -} - -#[ignore] -#[tokio::test] -/// We are testing the loss of liveness of a healthy cluster. While 3f+1 nodes run -/// we are shutting down f+1 nodes one by one with some delay between them. -/// Then we are bringing the f+1 nodes back again. We expect the cluster to -/// recover and effectively make progress. -async fn test_loss_of_liveness_with_recovery() { - // Enabled debug tracing so we can easily observe the - // nodes logs. - let _guard = setup_tracing(); - - let node_advance_delay = Duration::from_secs(60); - - // A cluster of 4 nodes will be created - let mut cluster = Cluster::new(None); - - // ===== Start the cluster ==== - cluster.start(Some(4), Some(1), None).await; - - // Let the nodes advance a bit - tokio::time::sleep(node_advance_delay).await; - - // Ensure that nodes have made progress - cluster.assert_progress(4, 2).await; - - // Now stop node 2 - cluster.authority(2).stop_all().await; - - // allow other nodes to advance - tokio::time::sleep(node_advance_delay).await; - - // Now stop node 3 - cluster.authority(3).stop_all().await; - - // wait and fetch the latest commit round - tokio::time::sleep(node_advance_delay).await; - let rounds_1 = cluster.assert_progress(2, 0).await; - - // wait and fetch again the rounds - tokio::time::sleep(node_advance_delay).await; - let rounds_2 = cluster.assert_progress(2, 0).await; - - // We assert that nodes haven't advanced at all - assert_eq!(rounds_1, rounds_2); - - // Now bring up nodes - cluster.authority(2).start(true, Some(1)).await; - cluster.authority(3).start(true, Some(1)).await; - - // wait and fetch the latest commit round - tokio::time::sleep(node_advance_delay).await; - let rounds_3 = cluster.assert_progress(4, 2).await; - - let round_2_max = rounds_2.values().max().unwrap(); - assert!( - rounds_3.values().all(|v| v > round_2_max), - "All the nodes should have advanced more from the previous round" - ); -} diff --git a/narwhal/test-utils/src/cluster.rs b/narwhal/test-utils/src/cluster.rs index cd0e426f2a90d..45202094800e4 100644 --- a/narwhal/test-utils/src/cluster.rs +++ b/narwhal/test-utils/src/cluster.rs @@ -754,6 +754,7 @@ impl AuthorityDetails { .get(worker_id) .unwrap() .transactions_address, + None, ) .unwrap(); diff --git a/narwhal/worker/src/lib.rs b/narwhal/worker/src/lib.rs index cb453b3bc12b9..f74fa44aca4fc 100644 --- a/narwhal/worker/src/lib.rs +++ b/narwhal/worker/src/lib.rs @@ -13,7 +13,6 @@ mod batch_maker; mod client; mod handlers; mod quorum_waiter; -mod transactions_server; mod tx_validator; mod worker; diff --git a/narwhal/worker/src/tests/worker_tests.rs b/narwhal/worker/src/tests/worker_tests.rs deleted file mode 100644 index 99421109124da..0000000000000 --- a/narwhal/worker/src/tests/worker_tests.rs +++ /dev/null @@ -1,371 +0,0 @@ -// Copyright (c) 2021, Facebook, Inc. and its affiliates -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 -use super::*; -use crate::LocalNarwhalClient; -use crate::{metrics::initialise_metrics, TrivialTransactionValidator}; -use async_trait::async_trait; -use bytes::Bytes; -use fastcrypto::hash::Hash; -use futures::stream::FuturesOrdered; -use futures::StreamExt; -use primary::{CHANNEL_CAPACITY, NUM_SHUTDOWN_RECEIVERS}; -use prometheus::Registry; -use store::rocks; -use store::rocks::MetricConf; -use store::rocks::ReadWriteOptions; -use test_utils::{ - batch, latest_protocol_version, temp_dir, test_network, transaction, CommitteeFixture, -}; -use types::{ - BatchAPI, MockWorkerToPrimary, MockWorkerToWorker, PreSubscribedBroadcastSender, - TransactionProto, TransactionsClient, WorkerBatchMessage, WorkerToWorkerClient, -}; - -// A test validator that rejects every transaction / batch -#[derive(Clone)] -struct NilTxValidator; -#[async_trait] -impl TransactionValidator for NilTxValidator { - type Error = eyre::Report; - - fn validate(&self, _tx: &[u8]) -> Result<(), Self::Error> { - eyre::bail!("Invalid transaction"); - } - fn validate_batch( - &self, - _txs: &Batch, - _protocol_config: &ProtocolConfig, - ) -> Result<(), Self::Error> { - eyre::bail!("Invalid batch"); - } -} - -#[tokio::test] -async fn reject_invalid_clients_transactions() { - let fixture = CommitteeFixture::builder().randomize_ports(true).build(); - let committee = fixture.committee(); - let worker_cache = fixture.worker_cache(); - - let worker_id = 0; - let my_primary = fixture.authorities().next().unwrap(); - let myself = my_primary.worker(worker_id); - let public_key = my_primary.public_key(); - let client = NetworkClient::new_from_keypair(&my_primary.network_keypair()); - - let parameters = Parameters { - batch_size: 200, // Two transactions. - ..Parameters::default() - }; - - // Create a new test store. - let batch_store = rocks::DBMap::::open( - temp_dir(), - MetricConf::default(), - None, - Some("batches"), - &ReadWriteOptions::default(), - ) - .unwrap(); - - let registry = Registry::new(); - let metrics = initialise_metrics(®istry); - - let mut tx_shutdown = PreSubscribedBroadcastSender::new(NUM_SHUTDOWN_RECEIVERS); - - // Spawn a `Worker` instance with a reject-all validator. - Worker::spawn( - my_primary.authority().clone(), - myself.keypair(), - worker_id, - committee.clone(), - worker_cache.clone(), - latest_protocol_version(), - parameters, - NilTxValidator, - client, - batch_store, - metrics, - &mut tx_shutdown, - ); - - // Wait till other services have been able to start up - tokio::task::yield_now().await; - // Send enough transactions to create a batch. - let address = worker_cache - .worker(&public_key, &worker_id) - .unwrap() - .transactions; - let config = mysten_network::config::Config::new(); - let channel = config.connect_lazy(&address).unwrap(); - let mut client = TransactionsClient::new(channel); - let tx = transaction(); - let txn = TransactionProto { - transactions: vec![Bytes::from(tx.clone())], - }; - - // Check invalid transactions are rejected - let res = client.submit_transaction(txn).await; - assert!(res.is_err()); - - let worker_pk = worker_cache.worker(&public_key, &worker_id).unwrap().name; - - let batch = batch(&latest_protocol_version()); - let batch_message = WorkerBatchMessage { - batch: batch.clone(), - }; - - // setup network : impersonate a send from another worker - let another_primary = fixture.authorities().nth(2).unwrap(); - let another_worker = another_primary.worker(worker_id); - let network = test_network( - another_worker.keypair(), - &another_worker.info().worker_address, - ); - // ensure that the networks are connected - network - .connect(myself.info().worker_address.to_anemo_address().unwrap()) - .await - .unwrap(); - let peer = network.peer(PeerId(worker_pk.0.to_bytes())).unwrap(); - - // Check invalid batches are rejected - let res = WorkerToWorkerClient::new(peer) - .report_batch(batch_message) - .await; - assert!(res.is_err()); -} - -/// TODO: test both RemoteNarwhalClient and LocalNarwhalClient in the same test case. -#[tokio::test] -async fn handle_remote_clients_transactions() { - let fixture = CommitteeFixture::builder().randomize_ports(true).build(); - let committee = fixture.committee(); - let worker_cache = fixture.worker_cache(); - - let worker_id = 0; - let my_primary = fixture.authorities().next().unwrap(); - let myself = my_primary.worker(worker_id); - let authority_public_key = my_primary.public_key(); - let client = NetworkClient::new_from_keypair(&my_primary.network_keypair()); - - let parameters = Parameters { - batch_size: 200, // Two transactions. - ..Parameters::default() - }; - - // Create a new test store. - let batch_store = rocks::DBMap::::open( - temp_dir(), - MetricConf::default(), - None, - Some("batches"), - &ReadWriteOptions::default(), - ) - .unwrap(); - - let registry = Registry::new(); - let metrics = initialise_metrics(®istry); - - let mut tx_shutdown = PreSubscribedBroadcastSender::new(NUM_SHUTDOWN_RECEIVERS); - - // Spawn a `Worker` instance. - Worker::spawn( - my_primary.authority().clone(), - myself.keypair(), - worker_id, - committee.clone(), - worker_cache.clone(), - latest_protocol_version(), - parameters, - TrivialTransactionValidator, - client.clone(), - batch_store, - metrics, - &mut tx_shutdown, - ); - - // Spawn a network listener to receive our batch's digest. - let mut peer_networks = Vec::new(); - - // Create batches - let batch = batch(&latest_protocol_version()); - let batch_digest = batch.digest(); - - let (tx_await_batch, mut rx_await_batch) = test_utils::test_channel!(CHANNEL_CAPACITY); - let mut mock_primary_server = MockWorkerToPrimary::new(); - mock_primary_server - .expect_report_own_batch() - .withf(move |request| { - let message = request.body(); - - message.digest == batch_digest && message.worker_id == worker_id - }) - .times(1) - .returning(move |_| { - tx_await_batch.try_send(()).unwrap(); - Ok(anemo::Response::new(())) - }); - client.set_worker_to_primary_local_handler(Arc::new(mock_primary_server)); - - // Spawn enough workers' listeners to acknowledge our batches. - for worker in fixture.authorities().skip(1).map(|a| a.worker(worker_id)) { - let mut mock_server = MockWorkerToWorker::new(); - mock_server - .expect_report_batch() - .returning(|_| Ok(anemo::Response::new(()))); - let routes = anemo::Router::new().add_rpc_service(WorkerToWorkerServer::new(mock_server)); - peer_networks.push(worker.new_network(routes)); - } - - // Wait till other services have been able to start up - tokio::task::yield_now().await; - // Send enough transactions to create a batch. - let address = worker_cache - .worker(&authority_public_key, &worker_id) - .unwrap() - .transactions; - let config = mysten_network::config::Config::new(); - let channel = config.connect_lazy(&address).unwrap(); - let client = TransactionsClient::new(channel); - - let join_handle = tokio::task::spawn(async move { - let mut fut_list = FuturesOrdered::new(); - for tx in batch.transactions() { - let txn = TransactionProto { - transactions: vec![Bytes::from(tx.clone())], - }; - - // Calls to submit_transaction are now blocking, so we need to drive them - // all at the same time, rather than sequentially. - let mut inner_client = client.clone(); - fut_list.push_back(async move { - inner_client.submit_transaction(txn).await.unwrap(); - }); - } - - // Drive all sending in parallel. - while fut_list.next().await.is_some() {} - }); - - // Ensure the primary received the batch's digest (ie. it did not panic). - rx_await_batch.recv().await.unwrap(); - - // Ensure sending ended. - assert!(join_handle.await.is_ok()); -} - -/// TODO: test both RemoteNarwhalClient and LocalNarwhalClient in the same test case. -#[tokio::test] -async fn handle_local_clients_transactions() { - let fixture = CommitteeFixture::builder().randomize_ports(true).build(); - let committee = fixture.committee(); - let worker_cache = fixture.worker_cache(); - - let worker_id = 0; - let my_primary = fixture.authorities().next().unwrap(); - let myself = my_primary.worker(worker_id); - let authority_public_key = my_primary.public_key(); - let client = NetworkClient::new_from_keypair(&my_primary.network_keypair()); - - let parameters = Parameters { - batch_size: 200, // Two transactions. - ..Parameters::default() - }; - - // Create a new test store. - let batch_store = rocks::DBMap::::open( - temp_dir(), - MetricConf::default(), - None, - Some("batches"), - &ReadWriteOptions::default(), - ) - .unwrap(); - - let registry = Registry::new(); - let metrics = initialise_metrics(®istry); - - let mut tx_shutdown = PreSubscribedBroadcastSender::new(NUM_SHUTDOWN_RECEIVERS); - - // Spawn a `Worker` instance. - Worker::spawn( - my_primary.authority().clone(), - myself.keypair(), - worker_id, - committee.clone(), - worker_cache.clone(), - latest_protocol_version(), - parameters, - TrivialTransactionValidator, - client.clone(), - batch_store, - metrics, - &mut tx_shutdown, - ); - - // Spawn a network listener to receive our batch's digest. - let mut peer_networks = Vec::new(); - - // Create batches - let batch = batch(&latest_protocol_version()); - let batch_digest = batch.digest(); - - let (tx_await_batch, mut rx_await_batch) = test_utils::test_channel!(CHANNEL_CAPACITY); - let mut mock_primary_server = MockWorkerToPrimary::new(); - mock_primary_server - .expect_report_own_batch() - .withf(move |request| { - let message = request.body(); - message.digest == batch_digest && message.worker_id == worker_id - }) - .times(1) - .returning(move |_| { - tx_await_batch.try_send(()).unwrap(); - Ok(anemo::Response::new(())) - }); - client.set_worker_to_primary_local_handler(Arc::new(mock_primary_server)); - - // Spawn enough workers' listeners to acknowledge our batches. - for worker in fixture.authorities().skip(1).map(|a| a.worker(worker_id)) { - let mut mock_server = MockWorkerToWorker::new(); - mock_server - .expect_report_batch() - .returning(|_| Ok(anemo::Response::new(()))); - let routes = anemo::Router::new().add_rpc_service(WorkerToWorkerServer::new(mock_server)); - peer_networks.push(worker.new_network(routes)); - } - - // Wait till other services have been able to start up - tokio::task::yield_now().await; - // Send enough transactions to create a batch. - let address = worker_cache - .worker(&authority_public_key, &worker_id) - .unwrap() - .transactions; - let client = LocalNarwhalClient::get_global(&address).unwrap().load(); - - let join_handle = tokio::task::spawn(async move { - let mut fut_list = FuturesOrdered::new(); - for txn in batch.transactions() { - // Calls to submit_transaction are now blocking, so we need to drive them - // all at the same time, rather than sequentially. - let inner_client = client.clone(); - fut_list.push_back(async move { - inner_client - .submit_transactions(vec![txn.clone()]) - .await - .unwrap(); - }); - } - - // Drive all sending in parallel. - while fut_list.next().await.is_some() {} - }); - - // Ensure the primary received the batch's digest (ie. it did not panic). - rx_await_batch.recv().await.unwrap(); - - // Ensure sending ended. - assert!(join_handle.await.is_ok()); -} diff --git a/narwhal/worker/src/transactions_server.rs b/narwhal/worker/src/transactions_server.rs deleted file mode 100644 index 0790083001c44..0000000000000 --- a/narwhal/worker/src/transactions_server.rs +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -use crate::client::LocalNarwhalClient; -use crate::metrics::WorkerEndpointMetrics; -use crate::TransactionValidator; -use async_trait::async_trait; -use futures::stream::FuturesUnordered; -use futures::StreamExt; -use mysten_metrics::metered_channel::Sender; -use mysten_metrics::{monitored_scope, spawn_logged_monitored_task}; -use mysten_network::server::Server; -use mysten_network::Multiaddr; -use std::sync::Arc; -use std::time::Duration; -use tokio::task::JoinHandle; -use tokio::time::{sleep, timeout}; -use tonic::{Request, Response, Status}; -use tracing::{error, info, warn}; -use types::{ - ConditionalBroadcastReceiver, Empty, Transaction, TransactionProto, Transactions, - TransactionsServer, TxResponse, -}; - -pub struct TxServer { - address: Multiaddr, - rx_shutdown: ConditionalBroadcastReceiver, - endpoint_metrics: WorkerEndpointMetrics, - local_client: Arc, - validator: V, -} - -impl TxServer { - #[must_use] - pub fn spawn( - address: Multiaddr, - rx_shutdown: ConditionalBroadcastReceiver, - endpoint_metrics: WorkerEndpointMetrics, - tx_batch_maker: Sender<(Vec, TxResponse)>, - validator: V, - ) -> JoinHandle<()> { - // create and initialize local Narwhal client. - let local_client = LocalNarwhalClient::new(tx_batch_maker); - LocalNarwhalClient::set_global(address.clone(), local_client.clone()); - - spawn_logged_monitored_task!( - Self { - address, - rx_shutdown, - endpoint_metrics, - local_client, - validator, - } - .run(), - "TxServer" - ) - } - - async fn run(mut self) { - const MAX_RETRIES: usize = 10; - const RETRY_BACKOFF: Duration = Duration::from_millis(1_000); - const GRACEFUL_SHUTDOWN_DURATION: Duration = Duration::from_millis(2_000); - - // create the handler - let tx_handler = TxReceiverHandler { - local_client: self.local_client.clone(), - validator: self.validator, - }; - - // now create the server - let mut retries = MAX_RETRIES; - let mut server: Server; - - loop { - match mysten_network::config::Config::new() - .server_builder_with_metrics(self.endpoint_metrics.clone()) - .add_service(TransactionsServer::new(tx_handler.clone())) - .bind(&self.address) - .await - { - Ok(s) => { - server = s; - break; - } - Err(err) => { - retries -= 1; - if retries == 0 { - panic!( - "Couldn't boot transactions server, permanently failed: {}", - err - ); - } - - error!( - "Couldn't boot transactions server at try {}, will wait {}s and retry: {}", - retries, - RETRY_BACKOFF.as_secs_f64(), - err - ); - - sleep(RETRY_BACKOFF).await; - } - } - } - - let shutdown_handle = server.take_cancel_handle().unwrap(); - - let server_handle = spawn_logged_monitored_task!(server.serve()); - - // wait to receive a shutdown signal - let _ = self.rx_shutdown.receiver.recv().await; - - // once do just gracefully signal the node to shutdown - shutdown_handle.send(()).unwrap(); - - // now wait until the handle completes or timeout if it takes long time - match timeout(GRACEFUL_SHUTDOWN_DURATION, server_handle).await { - Ok(_) => { - info!("Successfully shutting down gracefully transactions server"); - } - Err(err) => { - warn!( - "Time out while waiting to gracefully shutdown transactions server: {}", - err - ) - } - } - } -} - -/// Defines how the network receiver handles incoming transactions. -#[derive(Clone)] -pub(crate) struct TxReceiverHandler { - pub(crate) local_client: Arc, - pub(crate) validator: V, -} - -#[async_trait] -impl Transactions for TxReceiverHandler { - async fn submit_transaction( - &self, - request: Request, - ) -> Result, Status> { - let _scope = monitored_scope("SubmitTransaction"); - let transactions = request.into_inner().transactions; - - let validate_scope = monitored_scope("SubmitTransaction_ValidateTx"); - for transaction in &transactions { - if self.validator.validate(transaction.as_ref()).is_err() { - return Err(Status::invalid_argument("Invalid transaction")); - } - } - drop(validate_scope); - - // Send the transaction to Narwhal via the local client. - let submit_scope = monitored_scope("SubmitTransaction_SubmitTx"); - self.local_client - .submit_transactions(transactions.iter().map(|x| x.to_vec()).collect()) - .await - .map_err(|e| Status::internal(e.to_string()))?; - drop(submit_scope); - Ok(Response::new(Empty {})) - } - - async fn submit_transaction_stream( - &self, - request: Request>, - ) -> Result, Status> { - let mut transactions = request.into_inner(); - let mut requests = FuturesUnordered::new(); - - let _scope = monitored_scope("SubmitTransactionStream"); - while let Some(Ok(request)) = transactions.next().await { - let num_txns = request.transactions.len(); - if num_txns != 1 { - return Err(Status::invalid_argument(format!( - "Stream contains an invalid number of transactions: {num_txns}" - ))); - } - let txn = &request.transactions[0]; - let validate_scope = monitored_scope("SubmitTransactionStream_ValidateTx"); - if let Err(err) = self.validator.validate(txn.as_ref()) { - // If the transaction is invalid (often cryptographically), better to drop the client - return Err(Status::invalid_argument(format!( - "Stream contains an invalid transaction {err}" - ))); - } - drop(validate_scope); - // Send the transaction to Narwhal via the local client. - // Note that here we do not wait for a response because this would - // mean that we process only a single message from this stream at a - // time. Instead we gather them and resolve them once the stream is over. - let submit_scope = monitored_scope("SubmitTransactionStream_SubmitTx"); - requests.push(self.local_client.submit_transactions(vec![txn.to_vec()])); - drop(submit_scope); - } - - while let Some(result) = requests.next().await { - if let Err(e) = result { - return Err(Status::internal(e.to_string())); - } - } - - Ok(Response::new(Empty {})) - } -} diff --git a/narwhal/worker/src/worker.rs b/narwhal/worker/src/worker.rs index 63064a1b6c925..b38b80a755623 100644 --- a/narwhal/worker/src/worker.rs +++ b/narwhal/worker/src/worker.rs @@ -41,15 +41,10 @@ use types::{ PrimaryToWorkerServer, WorkerToWorkerServer, }; -#[cfg(test)] -#[path = "tests/worker_tests.rs"] -pub mod worker_tests; - /// The default channel capacity for each channel of the worker. pub const CHANNEL_CAPACITY: usize = 1_000; use crate::metrics::{Metrics, WorkerEndpointMetrics, WorkerMetrics}; -use crate::transactions_server::TxServer; pub struct Worker { /// This authority. @@ -440,19 +435,20 @@ impl Worker { } /// Spawn all tasks responsible to handle clients transactions. + // TODO: finish deleting this. It's partially deleted already and may not work right. fn handle_clients_transactions( &self, mut shutdown_receivers: Vec, node_metrics: Arc, channel_metrics: Arc, - endpoint_metrics: WorkerEndpointMetrics, - validator: impl TransactionValidator, + _endpoint_metrics: WorkerEndpointMetrics, + _validator: impl TransactionValidator, client: NetworkClient, network: anemo::Network, ) -> Vec> { info!("Starting handler for transactions"); - let (tx_batch_maker, rx_batch_maker) = channel_with_total( + let (_tx_batch_maker, rx_batch_maker) = channel_with_total( CHANNEL_CAPACITY, &channel_metrics.tx_batch_maker, &channel_metrics.tx_batch_maker_total, @@ -476,14 +472,6 @@ impl Worker { }) .unwrap_or(address); - let tx_server_handle = TxServer::spawn( - address.clone(), - shutdown_receivers.pop().unwrap(), - endpoint_metrics, - tx_batch_maker, - validator, - ); - // The transactions are sent to the `BatchMaker` that assembles them into batches. It then broadcasts // (in a reliable manner) the batches to all other workers that share the same `id` as us. Finally, it // gathers the 'cancel handlers' of the messages and send them to the `QuorumWaiter`. @@ -518,6 +506,6 @@ impl Worker { self.id, address ); - vec![batch_maker_handle, quorum_waiter_handle, tx_server_handle] + vec![batch_maker_handle, quorum_waiter_handle] } } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 04a14ddddf926..2755915c29141 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -668,6 +668,9 @@ importers: '@mysten/sui': specifier: workspace:* version: link:../../sdk/typescript + '@noble/hashes': + specifier: ^1.4.0 + version: 1.4.0 '@radix-ui/react-dialog': specifier: ^1.1.1 version: 1.1.1(@types/react-dom@18.3.0)(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -729,6 +732,9 @@ importers: '@tsconfig/docusaurus': specifier: ^2.0.3 version: 2.0.3 + '@types/node': + specifier: ^20.14.10 + version: 20.14.10 '@types/react': specifier: ^18.3.3 version: 18.3.3 @@ -1009,7 +1015,7 @@ importers: dependencies: '@mysten/dapp-kit': specifier: ^0.14.25 - version: 0.14.25(@tanstack/react-query@5.59.0(react@18.3.1))(@types/react-dom@18.3.0)(@types/react@18.3.3)(immer@9.0.21)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.5.3) + version: 0.14.25(@tanstack/react-query@5.59.0(react@18.3.1))(@types/react-dom@18.3.0)(@types/react@18.3.3)(babel-plugin-macros@3.1.0)(immer@9.0.21)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.5.3) '@mysten/sui': specifier: ^1.12.0 version: 1.12.0(typescript@5.5.3) @@ -1579,6 +1585,37 @@ importers: specifier: ^7.2.0 version: 7.2.0 + sdk/kms: + dependencies: + '@mysten/sui': + specifier: workspace:* + version: link:../typescript + '@noble/curves': + specifier: ^1.4.2 + version: 1.6.0 + '@noble/hashes': + specifier: ^1.4.0 + version: 1.5.0 + asn1-ts: + specifier: ^8.0.2 + version: 8.0.2 + aws4fetch: + specifier: ^1.0.20 + version: 1.0.20 + devDependencies: + '@mysten/build-scripts': + specifier: workspace:* + version: link:../build-scripts + '@types/node': + specifier: ^20.14.10 + version: 20.14.10 + typescript: + specifier: ^5.5.3 + version: 5.5.3 + vitest: + specifier: ^2.0.1 + version: 2.0.1(@types/node@20.14.10)(happy-dom@14.12.3)(jsdom@24.1.0)(lightningcss@1.27.0)(sass@1.77.6)(terser@5.31.1) + sdk/ledgerjs-hw-app-sui: dependencies: '@ledgerhq/hw-transport': @@ -4300,74 +4337,92 @@ packages: nanostores: ^0.9.0 || ^0.10.0 react: '>=18.0.0' - '@napi-rs/simple-git-android-arm-eabi@0.1.16': - resolution: {integrity: sha512-dbrCL0Pl5KZG7x7tXdtVsA5CO6At5ohDX3myf5xIYn9kN4jDFxsocl8bNt6Vb/hZQoJd8fI+k5VlJt+rFhbdVw==} + '@napi-rs/simple-git-android-arm-eabi@0.1.19': + resolution: {integrity: sha512-XryEH/hadZ4Duk/HS/HC/cA1j0RHmqUGey3MsCf65ZS0VrWMqChXM/xlTPWuY5jfCc/rPubHaqI7DZlbexnX/g==} engines: {node: '>= 10'} cpu: [arm] os: [android] - '@napi-rs/simple-git-android-arm64@0.1.16': - resolution: {integrity: sha512-xYz+TW5J09iK8SuTAKK2D5MMIsBUXVSs8nYp7HcMi8q6FCRO7yJj96YfP9PvKsc/k64hOyqGmL5DhCzY9Cu1FQ==} + '@napi-rs/simple-git-android-arm64@0.1.19': + resolution: {integrity: sha512-ZQ0cPvY6nV9p7zrR9ZPo7hQBkDAcY/CHj3BjYNhykeUCiSNCrhvwX+WEeg5on8M1j4d5jcI/cwVG2FslfiByUg==} engines: {node: '>= 10'} cpu: [arm64] os: [android] - '@napi-rs/simple-git-darwin-arm64@0.1.16': - resolution: {integrity: sha512-XfgsYqxhUE022MJobeiX563TJqyQyX4FmYCnqrtJwAfivESVeAJiH6bQIum8dDEYMHXCsG7nL8Ok0Dp8k2m42g==} + '@napi-rs/simple-git-darwin-arm64@0.1.19': + resolution: {integrity: sha512-viZB5TYgjA1vH+QluhxZo0WKro3xBA+1xSzYx8mcxUMO5gnAoUMwXn0ZO/6Zy6pai+aGae+cj6XihGnrBRu3Pg==} engines: {node: '>= 10'} cpu: [arm64] os: [darwin] - '@napi-rs/simple-git-darwin-x64@0.1.16': - resolution: {integrity: sha512-tkEVBhD6vgRCbeWsaAQqM3bTfpIVGeitamPPRVSbsq8qgzJ5Dx6ZedH27R7KSsA/uao7mZ3dsrNLXbu1Wy5MzA==} + '@napi-rs/simple-git-darwin-x64@0.1.19': + resolution: {integrity: sha512-6dNkzSNUV5X9rsVYQbpZLyJu4Gtkl2vNJ3abBXHX/Etk0ILG5ZasO3ncznIANZQpqcbn/QPHr49J2QYAXGoKJA==} engines: {node: '>= 10'} cpu: [x64] os: [darwin] - '@napi-rs/simple-git-linux-arm-gnueabihf@0.1.16': - resolution: {integrity: sha512-R6VAyNnp/yRaT7DV1Ao3r67SqTWDa+fNq2LrNy0Z8gXk2wB9ZKlrxFtLPE1WSpWknWtyRDLpRlsorh7Evk7+7w==} + '@napi-rs/simple-git-freebsd-x64@0.1.19': + resolution: {integrity: sha512-sB9krVIchzd20FjI2ZZ8FDsTSsXLBdnwJ6CpeVyrhXHnoszfcqxt49ocZHujAS9lMpXq7i2Nv1EXJmCy4KdhwA==} + engines: {node: '>= 10'} + cpu: [x64] + os: [freebsd] + + '@napi-rs/simple-git-linux-arm-gnueabihf@0.1.19': + resolution: {integrity: sha512-6HPn09lr9N1n5/XKfP8Np53g4fEXVxOFqNkS6rTH3Rm1lZHdazTRH62RggXLTguZwjcE+MvOLvoTIoR5kAS8+g==} engines: {node: '>= 10'} cpu: [arm] os: [linux] - '@napi-rs/simple-git-linux-arm64-gnu@0.1.16': - resolution: {integrity: sha512-LAGI0opFKw/HBMCV2qIBK3uWSEW9h4xd2ireZKLJy8DBPymX6NrWIamuxYNyCuACnFdPRxR4LaRFy4J5ZwuMdw==} + '@napi-rs/simple-git-linux-arm64-gnu@0.1.19': + resolution: {integrity: sha512-G0gISckt4cVDp3oh5Z6PV3GHJrJO6Z8bIS+9xA7vTtKdqB1i5y0n3cSFLlzQciLzhr+CajFD27doW4lEyErQ/Q==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@napi-rs/simple-git-linux-arm64-musl@0.1.16': - resolution: {integrity: sha512-I57Ph0F0Yn2KW93ep+V1EzKhACqX0x49vvSiapqIsdDA2PifdEWLc1LJarBolmK7NKoPqKmf6lAKKO9lhiZzkg==} + '@napi-rs/simple-git-linux-arm64-musl@0.1.19': + resolution: {integrity: sha512-OwTRF+H4IZYxmDFRi1IrLMfqbdIpvHeYbJl2X94NVsLVOY+3NUHvEzL3fYaVx5urBaMnIK0DD3wZLbcueWvxbA==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@napi-rs/simple-git-linux-x64-gnu@0.1.16': - resolution: {integrity: sha512-AZYYFY2V7hlcQASPEOWyOa3e1skzTct9QPzz0LiDM3f/hCFY/wBaU2M6NC5iG3d2Kr38heuyFS/+JqxLm5WaKA==} + '@napi-rs/simple-git-linux-powerpc64le-gnu@0.1.19': + resolution: {integrity: sha512-p7zuNNVyzpRvkCt2RIGv9FX/WPcPbZ6/FRUgUTZkA2WU33mrbvNqSi4AOqCCl6mBvEd+EOw5NU4lS9ORRJvAEg==} + engines: {node: '>= 10'} + cpu: [powerpc64le] + os: [linux] + + '@napi-rs/simple-git-linux-s390x-gnu@0.1.19': + resolution: {integrity: sha512-6N2vwJUPLiak8GLrS0a3is0gSb0UwI2CHOOqtvQxPmv+JVI8kn3vKiUscsktdDb0wGEPeZ8PvZs0y8UWix7K4g==} + engines: {node: '>= 10'} + cpu: [s390x] + os: [linux] + + '@napi-rs/simple-git-linux-x64-gnu@0.1.19': + resolution: {integrity: sha512-61YfeO1J13WK7MalLgP3QlV6of2rWnVw1aqxWkAgy/lGxoOFSJ4Wid6ANVCEZk4tJpPX/XNeneqkUz5xpeb2Cw==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@napi-rs/simple-git-linux-x64-musl@0.1.16': - resolution: {integrity: sha512-9TyMcYSBJwjT8jwjY9m24BZbu7ozyWTjsmYBYNtK3B0Um1Ov6jthSNneLVvouQ6x+k3Ow+00TiFh6bvmT00r8g==} + '@napi-rs/simple-git-linux-x64-musl@0.1.19': + resolution: {integrity: sha512-cCTWNpMJnN3PrUBItWcs3dQKCydsIasbrS3laMzq8k7OzF93Zrp2LWDTPlLCO9brbBVpBzy2Qk5Xg9uAfe/Ukw==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@napi-rs/simple-git-win32-arm64-msvc@0.1.16': - resolution: {integrity: sha512-uslJ1WuAHCYJWui6xjsyT47SjX6KOHDtClmNO8hqKz1pmDSNY7AjyUY8HxvD1lK9bDnWwc4JYhikS9cxCqHybw==} + '@napi-rs/simple-git-win32-arm64-msvc@0.1.19': + resolution: {integrity: sha512-sWavb1BjeLKKBA+PbTsRSSzVNfb7V/dOpaJvkgR5d2kWFn/AHmCZHSSj/3nyZdYf0BdDC+DIvqk3daAEZ6QMVw==} engines: {node: '>= 10'} cpu: [arm64] os: [win32] - '@napi-rs/simple-git-win32-x64-msvc@0.1.16': - resolution: {integrity: sha512-SoEaVeCZCDF1MP+M9bMSXsZWgEjk4On9GWADO5JOulvzR1bKjk0s9PMHwe/YztR9F0sJzrCxwtvBZowhSJsQPg==} + '@napi-rs/simple-git-win32-x64-msvc@0.1.19': + resolution: {integrity: sha512-FmNuPoK4+qwaSCkp8lm3sJlrxk374enW+zCE5ZksXlZzj/9BDJAULJb5QUJ7o9Y8A/G+d8LkdQLPBE2Jaxe5XA==} engines: {node: '>= 10'} cpu: [x64] os: [win32] - '@napi-rs/simple-git@0.1.16': - resolution: {integrity: sha512-C5wRPw9waqL2jk3jEDeJv+f7ScuO3N0a39HVdyFLkwKxHH4Sya4ZbzZsu2JLi6eEqe7RuHipHL6mC7B2OfYZZw==} + '@napi-rs/simple-git@0.1.19': + resolution: {integrity: sha512-jMxvwzkKzd3cXo2EB9GM2ic0eYo2rP/BS6gJt6HnWbsDO1O8GSD4k7o2Cpr2YERtMpGF/MGcDfsfj2EbQPtrXw==} engines: {node: '>= 10'} '@ndelangen/get-tarball@3.0.9': @@ -4433,10 +4488,18 @@ packages: '@noble/curves@1.4.2': resolution: {integrity: sha512-TavHr8qycMChk8UwMld0ZDRvatedkzWfH8IiaeGCfymOP5i0hSCozz9vHOL0nkwk7HRMlFnAiKpS2jrUmSybcw==} + '@noble/curves@1.6.0': + resolution: {integrity: sha512-TlaHRXDehJuRNR9TfZDNQ45mMEd5dwUwmicsafcIX4SsNiqnCHKjE/1alYPd/lDRVhxdhUAlv8uEhMCI5zjIJQ==} + engines: {node: ^14.21.3 || >=16} + '@noble/hashes@1.4.0': resolution: {integrity: sha512-V1JJ1WTRUqHHrOSh597hURcMqVKVGL/ea3kv0gSnEdsEZ0/+VyPghM1lMNGc00z7CIQorSvbKpuJkxvuHbvdbg==} engines: {node: '>= 16'} + '@noble/hashes@1.5.0': + resolution: {integrity: sha512-1j6kQFb7QRru7eKN3ZDvRcP13rugwdxZqCjbiAVZfIJwgj2A65UmT4TgARXGlXgnRkORLTDTrO19ZErt7+QXgA==} + engines: {node: ^14.21.3 || >=16} + '@nodelib/fs.scandir@2.1.5': resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} engines: {node: '>= 8'} @@ -6597,14 +6660,14 @@ packages: peerDependencies: react: ^18 || ^19 - '@tanstack/react-virtual@3.5.0': - resolution: {integrity: sha512-rtvo7KwuIvqK9zb0VZ5IL7fiJAEnG+0EiFZz8FUOs+2mhGqdGmjKIaT1XU7Zq0eFqL0jonLlhbayJI/J2SA/Bw==} + '@tanstack/react-virtual@3.10.8': + resolution: {integrity: sha512-VbzbVGSsZlQktyLrP5nxE+vE1ZR+U0NFAWPbJLoG2+DKPwd2D7dVICTVIIaYlJqX1ZCEnYDbaOpmMwbsyhBoIA==} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 - '@tanstack/virtual-core@3.5.0': - resolution: {integrity: sha512-KnPRCkQTyqhanNC0K63GBG3wA8I+D1fQuVnAvcBF8f13akOKeQp1gSbu6f77zCxhEk727iV5oQnbHLYzHrECLg==} + '@tanstack/virtual-core@3.10.8': + resolution: {integrity: sha512-PBu00mtt95jbKFi6Llk9aik8bnR3tR/oQP1o3TSi+iG//+Q2RTIzCEgKkHG8BB86kxMNW6O8wku+Lmi+QFR6jA==} '@testing-library/dom@10.3.1': resolution: {integrity: sha512-q/WL+vlXMpC0uXDyfsMtc1rmotzLV8Y0gq6q1gfrrDjQeHoeLrqHbxdPvPNAh1i+xuJl7+BezywcXArz7vLqKQ==} @@ -6835,8 +6898,8 @@ packages: '@types/mdast@3.0.15': resolution: {integrity: sha512-LnwD+mUEfxWMa1QpDraczIn6k0Ee3SMicuYSSzS6ZYl2gKS09EClnJYGd8Du6rfc5r/GZEk5o1mRb8TaTj03sQ==} - '@types/mdast@4.0.3': - resolution: {integrity: sha512-LsjtqsyF+d2/yFOYaN22dHZI1Cpwkrj+g06G8+qtUKlhovPW89YhqSnfKtMbkgmEtYpH2gydRNULd6y8mciAFg==} + '@types/mdast@4.0.4': + resolution: {integrity: sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==} '@types/mdx@2.0.13': resolution: {integrity: sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==} @@ -7708,6 +7771,9 @@ packages: asap@2.0.6: resolution: {integrity: sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==} + asn1-ts@8.0.2: + resolution: {integrity: sha512-M9btvRJRhMhPsUFzAfuqkmQPaLLw1KZNl8xtIBpC5fvbAmlpgJcsLKMP/hxKMAUcH52UUTViEQ/cm6/whkYb+Q==} + asn1@0.2.6: resolution: {integrity: sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==} @@ -7741,8 +7807,8 @@ packages: resolution: {integrity: sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==} engines: {node: '>=8'} - astring@1.8.6: - resolution: {integrity: sha512-ISvCdHdlTDlH5IpxQJIex7BWBywFWgjJSVdwst+/iQCoEYnyOaQ95+X1JGshuBjGp6nxKUy1jMgE3zPqN7fQdg==} + astring@1.9.0: + resolution: {integrity: sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==} hasBin: true async-limiter@1.0.1: @@ -7787,6 +7853,9 @@ packages: aws4@1.12.0: resolution: {integrity: sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg==} + aws4fetch@1.0.20: + resolution: {integrity: sha512-/djoAN709iY65ETD6LKCtyyEI04XIBP5xVvfmNxsEP0uJB5tyaGBztSryRr4HqMStr9R06PisQE7m9zDTXKu6g==} + axe-core@4.9.1: resolution: {integrity: sha512-QbUdXJVTpvUTHU7871ppZkdOLBeGUKBQWHkHrvN2V9IQWGMt61zf3B45BtzjxEJzYuj0JBjBZP/hmYS/R9pmAw==} engines: {node: '>=4'} @@ -8592,8 +8661,8 @@ packages: peerDependencies: cytoscape: ^3.2.0 - cytoscape@3.29.2: - resolution: {integrity: sha512-2G1ycU28Nh7OHT9rkXRLpCDP30MKH1dXJORZuBhtEhEW7pKwgPi77ImqlCWinouyE1PNepIOGZBOrE84DG7LyQ==} + cytoscape@3.30.2: + resolution: {integrity: sha512-oICxQsjW8uSaRmn4UK/jkczKOqTrVqt5/1WL0POiJUT2EKNc9STM4hYFHv917yu55aTBMFNRzymlJhVAiWPCxw==} engines: {node: '>=0.10'} d3-array@2.12.1: @@ -8776,8 +8845,8 @@ packages: resolution: {integrity: sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==} engines: {node: '>=0.11'} - dayjs@1.11.11: - resolution: {integrity: sha512-okzr3f11N6WuqYtZSvm+F776mB41wRZMhKP+hc34YdW+KmtYYK9iqvHSwo2k9FEH3fhGXvOPV6yz2IcSrfRUDg==} + dayjs@1.11.13: + resolution: {integrity: sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg==} de-indent@1.0.2: resolution: {integrity: sha512-e/1zu3xH5MQryN2zdVaF0OrdNLUbvWxzMbi+iNA6Bky7l1RoP8a2fIbRocyHclXt/arDrrR6lL3TqFD9pMQTsg==} @@ -9049,8 +9118,8 @@ packages: resolution: {integrity: sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==} engines: {node: '>= 4'} - dompurify@3.1.6: - resolution: {integrity: sha512-cTOAhc36AalkjtBpfG6O8JimdTMWNXjiePT2xQH/ppBGi/4uIpmj8eKyIkMJErXWARyINV/sB38yf8JCLF5pbQ==} + dompurify@3.1.7: + resolution: {integrity: sha512-VaTstWtsneJY8xzy7DekmYWEOZcmzIe3Qb3zPd4STve1OBTa+e+WmS1ITQec1fZYXI3HCsOZZiSMpG6oxoWMWQ==} domutils@2.8.0: resolution: {integrity: sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==} @@ -9800,8 +9869,8 @@ packages: resolution: {integrity: sha512-45eNySEs7n692jLN+eHQ6zvC9e1cqu9Dq1PpDHTcWRri2HFEs8is8Anmp1RcIhYxA5TZYD6RuESG2jdj6nkDJQ==} engines: {node: '>=0.4.0'} - focus-visible@5.2.0: - resolution: {integrity: sha512-Rwix9pBtC1Nuy5wysTmKy+UjbDJpIfg8eHjw0rjZ1mX4GNLz1Bmd16uDpI3Gk1i70Fgcs8Csg2lPm8HULFg9DQ==} + focus-visible@5.2.1: + resolution: {integrity: sha512-8Bx950VD1bWTQJEH/AM6SpEk+SU55aVnp4Ujhuuxy3eMEBCRwBnTBnVXr9YAPvZL3/CNjCa8u4IWfNmEO53whA==} follow-redirects@1.15.6: resolution: {integrity: sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==} @@ -10285,8 +10354,8 @@ packages: hast-util-from-html-isomorphic@2.0.0: resolution: {integrity: sha512-zJfpXq44yff2hmE0XmwEOzdWin5xwH+QIhMLOScpX91e/NSGPsAzNCvLQDIEPyO2TXi+lBmU6hjLIhV8MwP2kw==} - hast-util-from-html@2.0.1: - resolution: {integrity: sha512-RXQBLMl9kjKVNkJTIO6bZyb2n+cUH8LFaSSzo82jiLT6Tfc+Pt7VQCS+/h3YwG4jaNE2TA2sdJisGWR+aJrp0g==} + hast-util-from-html@2.0.3: + resolution: {integrity: sha512-CUSRHXyKjzHov8yKsQjGOElXy/3EKpyX56ELnkHH34vDVw1N1XSQ1ZcAvTyAPtGqLTuKP/uxM+aLkSPqF/EtMw==} hast-util-from-parse5@8.0.1: resolution: {integrity: sha512-Er/Iixbc7IEa7r/XLtuG52zoqn/b3Xng/w6aZQ0xGVxzhw5xUFxcRqdPzP6yFi/4HBYRaifaI5fQ1RH8n0ZeOQ==} @@ -10297,8 +10366,8 @@ packages: hast-util-parse-selector@4.0.0: resolution: {integrity: sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==} - hast-util-raw@9.0.3: - resolution: {integrity: sha512-ICWvVOF2fq4+7CMmtCPD5CM4QKjPbHpPotE6+8tDooV0ZuyJVUzHsrNX+O5NaRbieTf0F7FfeBOMAwi6Td0+yQ==} + hast-util-raw@9.0.4: + resolution: {integrity: sha512-LHE65TD2YiNsHD3YuXcKPHXPLuYh/gjp12mOfU8jxSrm1f/yJpsb0F/KKljS6U9LJoP0Ux+tCe8iJ2AsPzTdgA==} hast-util-to-estree@2.3.3: resolution: {integrity: sha512-ihhPIUPxN0v0w6M5+IiAZZrn0LH2uZomeWwhn7uP7avZC6TE7lIiEh2yBMPr5+zi1aUCXq6VoYRgs2Bw9xmycQ==} @@ -11071,8 +11140,8 @@ packages: engines: {node: '>=6'} hasBin: true - jsonc-parser@3.2.1: - resolution: {integrity: sha512-AilxAyFOAcK5wA1+LeaySVBrHsGQvUFCDWXKpZjzaL0PqW+xfBOttn8GNtWKFWqneyMZj41MWF9Kl6iPWLwgOA==} + jsonc-parser@3.3.1: + resolution: {integrity: sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ==} jsonfile@4.0.0: resolution: {integrity: sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==} @@ -11104,8 +11173,8 @@ packages: jws@3.2.2: resolution: {integrity: sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==} - katex@0.16.10: - resolution: {integrity: sha512-ZiqaC04tp2O5utMsl2TEZTXxa6WSC4yo0fv5ML++D3QZv/vx2Mct0mTlRx3O+uUkjfuAgOkzsCmq5MiUEsDDdA==} + katex@0.16.11: + resolution: {integrity: sha512-RQrI8rlHY92OLf3rho/Ts8i/XvjgguEjOkO1BEXcU3N8BqPpSzBNwV/G0Ukr+P/l3ivvJUE/Fa/CwbS6HesGNQ==} hasBin: true keyv@4.5.3: @@ -11408,8 +11477,8 @@ packages: resolution: {integrity: sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==} hasBin: true - markdown-table@3.0.3: - resolution: {integrity: sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw==} + markdown-table@3.0.4: + resolution: {integrity: sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==} markdown-to-jsx@7.3.2: resolution: {integrity: sha512-B+28F5ucp83aQm+OxNrPkS8z0tMKaeHiy0lHJs3LqCyDQFtWuenaIrkaVTgAm1pf1AU85LXltva86hlaT17i8Q==} @@ -11474,8 +11543,8 @@ packages: mdast-util-to-hast@12.3.0: resolution: {integrity: sha512-pits93r8PhnIoU4Vy9bjW39M2jJ6/tdHyja9rrot9uujkN7UTU9SDnE6WNJz/IGyQk3XHX6yNNtrBH6cQzm8Hw==} - mdast-util-to-hast@13.1.0: - resolution: {integrity: sha512-/e2l/6+OdGp/FB+ctrJ9Avz71AN/GRH3oi/3KAx/kMnoUsD6q0woXlDT8lLEeViVKE7oZxE7RXzvO3T8kF2/sA==} + mdast-util-to-hast@13.2.0: + resolution: {integrity: sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==} mdast-util-to-markdown@1.5.0: resolution: {integrity: sha512-bbv7TPv/WC49thZPg3jXuqzuvI45IL2EVAr/KxF0BSdHsU0ceFHOmwQn6evxAh1GaoK/6GQ1wp4R4oW2+LFL/A==} @@ -11526,8 +11595,8 @@ packages: resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} engines: {node: '>= 8'} - mermaid@10.9.0: - resolution: {integrity: sha512-swZju0hFox/B/qoLKK0rOxxgh8Cf7rJSfAUc1u8fezVihYMvrJAS45GzAxTVf4Q+xn9uMgitBcmWk7nWGXOs/g==} + mermaid@10.9.3: + resolution: {integrity: sha512-V80X1isSEvAewIL3xhmz/rVmc27CVljcsbWxkxlWJWY/1kQa4XOABqpDl2qQLGKzpKm6WbTfUEKImBlUfFYArw==} meros@1.3.0: resolution: {integrity: sha512-2BNGOimxEz5hmjUG2FwoxCt5HN7BXdaWyFqEwxPTrJzVdABtrL4TiHTcsWSFAxPQ/tOnEaQEJh3qWq71QRMY+w==} @@ -11888,8 +11957,8 @@ packages: react: '>=16.x <=18.x' react-dom: '>=16.x <=18.x' - next-seo@6.5.0: - resolution: {integrity: sha512-MfzUeWTN/x/rsKp/1n0213eojO97lIl0unxqbeCY+6pAucViHDA8GSLRRcXpgjsSmBxfCFdfpu7LXbt4ANQoNQ==} + next-seo@6.6.0: + resolution: {integrity: sha512-0VSted/W6XNtgAtH3D+BZrMLLudqfm0D5DYNJRXHcDgan/1ZF1tDFIsWrmvQlYngALyphPfZ3ZdOqlKpKdvG6w==} peerDependencies: next: ^8.1.1-canary.54 || >=9.0.0 react: '>=16.0.0' @@ -13314,8 +13383,8 @@ packages: resolution: {integrity: sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==} hasBin: true - rehype-katex@7.0.0: - resolution: {integrity: sha512-h8FPkGE00r2XKU+/acgqwWUlyzve1IiOKwsEkg4pDL3k48PiE0Pt+/uLtVHDVkN1yA4iurZN6UES8ivHVEQV6Q==} + rehype-katex@7.0.1: + resolution: {integrity: sha512-OiM2wrZ/wuhKkigASodFoo8wimG3H12LWQaH8qSPVJn9apWKFSH3YOCtbKpBorTVw/eI7cuT21XBbvwEswbIOA==} rehype-pretty-code@0.9.11: resolution: {integrity: sha512-Eq90eCYXQJISktfRZ8PPtwc5SUyH6fJcxS8XOMnHPUQZBtC6RYo67gGlley9X2nR8vlniPj0/7oCDEYHKQa/oA==} @@ -13745,8 +13814,8 @@ packages: sonic-boom@3.3.0: resolution: {integrity: sha512-LYxp34KlZ1a2Jb8ZQgFCK3niIHzibdwtwNUWKg0qQRzsDoJ3Gfgkf8KdBTFU3SkejDEIlWwnSnpVdOZIhFMl/g==} - sort-keys@5.0.0: - resolution: {integrity: sha512-Pdz01AvCAottHTPQGzndktFNdbRA75BgOfeT1hH+AMnJFv8lynkPi42rfeEhpx1saTEI3YNMWxfqu0sFD1G8pw==} + sort-keys@5.1.0: + resolution: {integrity: sha512-aSbHV0DaBcr7u0PVHXzM6NbZNAtrr9sF6+Qfs9UUVG7Ll3jQ6hHi8F/xqIIcn2rvIVbr0v/2zyjSdwSV47AgLQ==} engines: {node: '>=12'} source-map-js@1.2.0: @@ -14006,8 +14075,8 @@ packages: babel-plugin-macros: optional: true - stylis@4.3.2: - resolution: {integrity: sha512-bhtUjWd/z6ltJiQwg0dUfxEJ+W+jdqQd8TbWLWyeIJHlnsqmGLRFFd8e5mA0AZi/zx90smXRlN66YMTcaSFifg==} + stylis@4.3.4: + resolution: {integrity: sha512-osIBl6BGUmSfDkyH2mB7EFvCJntXDrLhKjHTRj/rK6xLH0yuPrHULDRQzKokSOD4VoorhtKpfcfW1GAntu8now==} sucrase@3.35.0: resolution: {integrity: sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==} @@ -14840,8 +14909,8 @@ packages: resolution: {integrity: sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==} engines: {'0': node >=0.6.0} - vfile-location@5.0.2: - resolution: {integrity: sha512-NXPYyxyBSH7zB5U6+3uDdd6Nybz6o6/od9rk8bp9H8GR3L+cm/fC0uUTbqBmUTnMCUDslAGBOIKNfvvb+gGlDg==} + vfile-location@5.0.3: + resolution: {integrity: sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==} vfile-matter@3.0.1: resolution: {integrity: sha512-CAAIDwnh6ZdtrqAuxdElUqQRQDQgbbIrYtDYI8gCjXS1qQ+1XdLoK8FIZWxJwn0/I+BkSSZpar3SOgjemQz4fg==} @@ -14855,8 +14924,8 @@ packages: vfile@5.3.7: resolution: {integrity: sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==} - vfile@6.0.1: - resolution: {integrity: sha512-1bYqc7pt6NIADBJ98UiG0Bn/CHIVOoZ/IyEkqIruLg0mE1BKzkOXY2D6CSqQIcKqgadppE5lrxgWXJmXd7zZJw==} + vfile@6.0.3: + resolution: {integrity: sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==} vite-node@1.6.0: resolution: {integrity: sha512-de6HJgzC+TFzOu0NTC4RAIsyf/DY/ibWDYQUcuEA84EMHhcefTUGkjFHKKEJhQN4A+6I0u++kr3l36ZF2d7XRw==} @@ -15422,26 +15491,26 @@ snapshots: dependencies: '@amplitude/types': 1.10.2 '@amplitude/utils': 1.10.2 - tslib: 2.6.3 + tslib: 2.7.0 '@amplitude/node@1.10.2': dependencies: '@amplitude/identify': 1.10.2 '@amplitude/types': 1.10.2 '@amplitude/utils': 1.10.2 - tslib: 2.6.3 + tslib: 2.7.0 '@amplitude/plugin-page-view-tracking-browser@0.8.0': dependencies: '@amplitude/analytics-client-common': 0.7.0 '@amplitude/analytics-types': 0.20.0 - tslib: 2.6.0 + tslib: 2.7.0 '@amplitude/plugin-web-attribution-browser@0.7.0': dependencies: '@amplitude/analytics-client-common': 0.7.0 '@amplitude/analytics-types': 0.20.0 - tslib: 2.6.0 + tslib: 2.7.0 '@amplitude/types@1.10.2': {} @@ -15595,7 +15664,7 @@ snapshots: '@babel/core': 7.24.7 '@babel/helper-compilation-targets': 7.24.7 '@babel/helper-plugin-utils': 7.24.7 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) lodash.debounce: 4.0.8 resolve: 1.22.8 semver: 7.6.2 @@ -15607,7 +15676,7 @@ snapshots: '@babel/core': 7.24.7 '@babel/helper-compilation-targets': 7.24.7 '@babel/helper-plugin-utils': 7.24.7 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) lodash.debounce: 4.0.8 resolve: 1.22.8 transitivePeerDependencies: @@ -16660,7 +16729,7 @@ snapshots: '@babel/helper-split-export-declaration': 7.24.7 '@babel/parser': 7.24.7 '@babel/types': 7.24.7 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) globals: 11.12.0 transitivePeerDependencies: - supports-color @@ -17119,7 +17188,7 @@ snapshots: '@devicefarmer/adbkit-monkey': 1.2.1 bluebird: 3.7.2 commander: 9.5.0 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) node-forge: 1.3.1 split: 1.0.1 transitivePeerDependencies: @@ -17559,7 +17628,7 @@ snapshots: '@graphql-tools/utils': 10.0.13(graphql@16.9.0) '@graphql-typed-document-node/core': 3.2.0(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.6.3 transitivePeerDependencies: - encoding - supports-color @@ -17570,7 +17639,7 @@ snapshots: '@graphql-tools/schema': 10.0.2(graphql@16.9.0) '@graphql-tools/utils': 10.0.13(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.6.3 '@graphql-codegen/gql-tag-operations@4.0.4(graphql@16.9.0)': dependencies: @@ -17592,7 +17661,7 @@ snapshots: graphql: 16.9.0 import-from: 4.0.0 lodash: 4.17.21 - tslib: 2.6.2 + tslib: 2.6.3 '@graphql-codegen/plugin-helpers@5.0.4(graphql@16.9.0)': dependencies: @@ -17698,7 +17767,7 @@ snapshots: '@graphql-tools/utils': 10.0.13(graphql@16.9.0) '@whatwg-node/fetch': 0.9.16 graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.7.0 transitivePeerDependencies: - encoding @@ -17716,7 +17785,7 @@ snapshots: '@graphql-tools/utils': 10.0.13(graphql@16.9.0) globby: 11.1.0 graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.7.0 unixify: 1.0.0 transitivePeerDependencies: - supports-color @@ -17791,7 +17860,7 @@ snapshots: graphql: 16.9.0 is-glob: 4.0.3 micromatch: 4.0.7 - tslib: 2.6.2 + tslib: 2.7.0 unixify: 1.0.0 transitivePeerDependencies: - supports-color @@ -17804,7 +17873,7 @@ snapshots: '@graphql-tools/utils': 10.0.13(graphql@16.9.0) '@whatwg-node/fetch': 0.9.16 graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.7.0 value-or-promise: 1.0.12 transitivePeerDependencies: - '@types/node' @@ -17817,7 +17886,7 @@ snapshots: '@graphql-tools/utils': 10.0.13(graphql@16.9.0) globby: 11.1.0 graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.7.0 unixify: 1.0.0 '@graphql-tools/graphql-tag-pluck@8.2.0(graphql@16.9.0)': @@ -17845,7 +17914,7 @@ snapshots: '@graphql-tools/utils': 10.0.13(graphql@16.9.0) globby: 11.1.0 graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.7.0 unixify: 1.0.0 '@graphql-tools/load@8.0.1(graphql@16.9.0)': @@ -17854,13 +17923,13 @@ snapshots: '@graphql-tools/utils': 10.0.13(graphql@16.9.0) graphql: 16.9.0 p-limit: 3.1.0 - tslib: 2.6.2 + tslib: 2.7.0 '@graphql-tools/merge@9.0.1(graphql@16.9.0)': dependencies: '@graphql-tools/utils': 10.0.13(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.7.0 '@graphql-tools/optimize@2.0.0(graphql@16.9.0)': dependencies: @@ -17875,7 +17944,7 @@ snapshots: '@types/json-stable-stringify': 1.0.36 '@whatwg-node/fetch': 0.9.16 chalk: 4.1.2 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) dotenv: 16.4.1 graphql: 16.9.0 graphql-request: 6.1.0(graphql@16.9.0) @@ -17886,7 +17955,7 @@ snapshots: json-stable-stringify: 1.1.1 lodash: 4.17.21 scuid: 1.1.0 - tslib: 2.6.2 + tslib: 2.7.0 yaml-ast-parser: 0.0.43 transitivePeerDependencies: - '@types/node' @@ -17936,7 +18005,7 @@ snapshots: '@whatwg-node/fetch': 0.9.16 graphql: 16.9.0 isomorphic-ws: 5.0.0(ws@8.18.0) - tslib: 2.6.2 + tslib: 2.7.0 value-or-promise: 1.0.12 ws: 8.18.0 transitivePeerDependencies: @@ -17951,7 +18020,7 @@ snapshots: cross-inspect: 1.0.0 dset: 3.1.3 graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.7.0 '@graphql-tools/utils@10.3.1(graphql@16.9.0)': dependencies: @@ -17997,7 +18066,7 @@ snapshots: '@headlessui/react@1.7.19(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@tanstack/react-virtual': 3.5.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@tanstack/react-virtual': 3.10.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1) client-only: 0.0.1 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) @@ -18363,7 +18432,7 @@ snapshots: dependencies: bs58: 6.0.0 - '@mysten/dapp-kit@0.14.25(@tanstack/react-query@5.59.0(react@18.3.1))(@types/react-dom@18.3.0)(@types/react@18.3.3)(immer@9.0.21)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.5.3)': + '@mysten/dapp-kit@0.14.25(@tanstack/react-query@5.59.0(react@18.3.1))(@types/react-dom@18.3.0)(@types/react@18.3.3)(babel-plugin-macros@3.1.0)(immer@9.0.21)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.5.3)': dependencies: '@mysten/sui': 1.12.0(typescript@5.5.3) '@mysten/wallet-standard': 0.13.7(typescript@5.5.3) @@ -18429,52 +18498,64 @@ snapshots: nanostores: 0.10.3 react: 18.3.1 - '@napi-rs/simple-git-android-arm-eabi@0.1.16': + '@napi-rs/simple-git-android-arm-eabi@0.1.19': + optional: true + + '@napi-rs/simple-git-android-arm64@0.1.19': optional: true - '@napi-rs/simple-git-android-arm64@0.1.16': + '@napi-rs/simple-git-darwin-arm64@0.1.19': optional: true - '@napi-rs/simple-git-darwin-arm64@0.1.16': + '@napi-rs/simple-git-darwin-x64@0.1.19': optional: true - '@napi-rs/simple-git-darwin-x64@0.1.16': + '@napi-rs/simple-git-freebsd-x64@0.1.19': optional: true - '@napi-rs/simple-git-linux-arm-gnueabihf@0.1.16': + '@napi-rs/simple-git-linux-arm-gnueabihf@0.1.19': optional: true - '@napi-rs/simple-git-linux-arm64-gnu@0.1.16': + '@napi-rs/simple-git-linux-arm64-gnu@0.1.19': optional: true - '@napi-rs/simple-git-linux-arm64-musl@0.1.16': + '@napi-rs/simple-git-linux-arm64-musl@0.1.19': optional: true - '@napi-rs/simple-git-linux-x64-gnu@0.1.16': + '@napi-rs/simple-git-linux-powerpc64le-gnu@0.1.19': optional: true - '@napi-rs/simple-git-linux-x64-musl@0.1.16': + '@napi-rs/simple-git-linux-s390x-gnu@0.1.19': optional: true - '@napi-rs/simple-git-win32-arm64-msvc@0.1.16': + '@napi-rs/simple-git-linux-x64-gnu@0.1.19': optional: true - '@napi-rs/simple-git-win32-x64-msvc@0.1.16': + '@napi-rs/simple-git-linux-x64-musl@0.1.19': optional: true - '@napi-rs/simple-git@0.1.16': + '@napi-rs/simple-git-win32-arm64-msvc@0.1.19': + optional: true + + '@napi-rs/simple-git-win32-x64-msvc@0.1.19': + optional: true + + '@napi-rs/simple-git@0.1.19': optionalDependencies: - '@napi-rs/simple-git-android-arm-eabi': 0.1.16 - '@napi-rs/simple-git-android-arm64': 0.1.16 - '@napi-rs/simple-git-darwin-arm64': 0.1.16 - '@napi-rs/simple-git-darwin-x64': 0.1.16 - '@napi-rs/simple-git-linux-arm-gnueabihf': 0.1.16 - '@napi-rs/simple-git-linux-arm64-gnu': 0.1.16 - '@napi-rs/simple-git-linux-arm64-musl': 0.1.16 - '@napi-rs/simple-git-linux-x64-gnu': 0.1.16 - '@napi-rs/simple-git-linux-x64-musl': 0.1.16 - '@napi-rs/simple-git-win32-arm64-msvc': 0.1.16 - '@napi-rs/simple-git-win32-x64-msvc': 0.1.16 + '@napi-rs/simple-git-android-arm-eabi': 0.1.19 + '@napi-rs/simple-git-android-arm64': 0.1.19 + '@napi-rs/simple-git-darwin-arm64': 0.1.19 + '@napi-rs/simple-git-darwin-x64': 0.1.19 + '@napi-rs/simple-git-freebsd-x64': 0.1.19 + '@napi-rs/simple-git-linux-arm-gnueabihf': 0.1.19 + '@napi-rs/simple-git-linux-arm64-gnu': 0.1.19 + '@napi-rs/simple-git-linux-arm64-musl': 0.1.19 + '@napi-rs/simple-git-linux-powerpc64le-gnu': 0.1.19 + '@napi-rs/simple-git-linux-s390x-gnu': 0.1.19 + '@napi-rs/simple-git-linux-x64-gnu': 0.1.19 + '@napi-rs/simple-git-linux-x64-musl': 0.1.19 + '@napi-rs/simple-git-win32-arm64-msvc': 0.1.19 + '@napi-rs/simple-git-win32-x64-msvc': 0.1.19 '@ndelangen/get-tarball@3.0.9': dependencies: @@ -18515,8 +18596,14 @@ snapshots: dependencies: '@noble/hashes': 1.4.0 + '@noble/curves@1.6.0': + dependencies: + '@noble/hashes': 1.5.0 + '@noble/hashes@1.4.0': {} + '@noble/hashes@1.5.0': {} + '@nodelib/fs.scandir@2.1.5': dependencies: '@nodelib/fs.stat': 2.0.5 @@ -18552,7 +18639,7 @@ snapshots: dependencies: '@oclif/errors': 1.3.6 '@oclif/parser': 3.8.17 - debug: 4.3.5(supports-color@8.1.1) + debug: 4.3.7(supports-color@8.1.1) globby: 11.1.0 is-wsl: 2.2.0 tslib: 2.7.0 @@ -18566,7 +18653,7 @@ snapshots: debug: 4.3.5(supports-color@8.1.1) globby: 11.1.0 is-wsl: 2.2.0 - tslib: 2.6.3 + tslib: 2.7.0 transitivePeerDependencies: - supports-color @@ -18574,7 +18661,7 @@ snapshots: dependencies: '@oclif/errors': 1.3.6 '@oclif/parser': 3.8.17 - debug: 4.3.5(supports-color@8.1.1) + debug: 4.3.7(supports-color@8.1.1) globby: 11.1.0 is-wsl: 2.2.0 tslib: 2.7.0 @@ -18618,7 +18705,7 @@ snapshots: '@oclif/errors': 1.3.6 '@oclif/linewrap': 1.0.0 chalk: 4.1.2 - tslib: 2.6.3 + tslib: 2.7.0 '@oclif/plugin-autocomplete@0.3.0': dependencies: @@ -18693,69 +18780,73 @@ snapshots: '@open-draft/until@2.1.0': {} - '@parcel/bundler-default@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/bundler-default@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 '@parcel/graph': 3.2.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/rust': 2.12.0 '@parcel/utils': 2.12.0 nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/cache@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/cache@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/core': 2.12.0(@swc/helpers@0.5.5) '@parcel/fs': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/logger': 2.12.0 '@parcel/utils': 2.12.0 lmdb: 2.8.5 + transitivePeerDependencies: + - '@swc/helpers' '@parcel/codeframe@2.12.0': dependencies: chalk: 4.1.2 - '@parcel/compressor-raw@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/compressor-raw@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' '@parcel/config-default@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)(postcss@8.4.39)(relateurl@0.2.7)(terser@5.31.1)(typescript@5.5.3)': dependencies: - '@parcel/bundler-default': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/compressor-raw': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/bundler-default': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/compressor-raw': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/core': 2.12.0(@swc/helpers@0.5.5) - '@parcel/namer-default': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/optimizer-css': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/optimizer-htmlnano': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(postcss@8.4.39)(relateurl@0.2.7)(terser@5.31.1)(typescript@5.5.3) - '@parcel/optimizer-image': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/optimizer-svgo': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/namer-default': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/optimizer-css': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/optimizer-htmlnano': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)(postcss@8.4.39)(relateurl@0.2.7)(terser@5.31.1)(typescript@5.5.3) + '@parcel/optimizer-image': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/optimizer-svgo': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/optimizer-swc': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) - '@parcel/packager-css': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/packager-html': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/packager-js': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/packager-raw': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/packager-svg': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/packager-wasm': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/reporter-dev-server': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/resolver-default': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/runtime-browser-hmr': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/runtime-js': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/runtime-react-refresh': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/runtime-service-worker': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-babel': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-css': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-html': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-image': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/packager-css': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/packager-html': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/packager-js': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/packager-raw': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/packager-svg': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/packager-wasm': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/reporter-dev-server': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/resolver-default': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/runtime-browser-hmr': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/runtime-js': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/runtime-react-refresh': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/runtime-service-worker': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-babel': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-css': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-html': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-image': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/transformer-js': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-json': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-postcss': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-posthtml': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-raw': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-react-refresh-wrap': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-svg': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/transformer-json': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-postcss': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-posthtml': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-raw': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-react-refresh-wrap': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-svg': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) transitivePeerDependencies: - '@swc/helpers' - cssnano @@ -18770,14 +18861,14 @@ snapshots: '@parcel/core@2.12.0(@swc/helpers@0.5.5)': dependencies: '@mischnic/json-sourcemap': 0.1.1 - '@parcel/cache': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/cache': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/diagnostic': 2.12.0 '@parcel/events': 2.12.0 '@parcel/fs': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/graph': 3.2.0 '@parcel/logger': 2.12.0 '@parcel/package-manager': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/profiler': 2.12.0 '@parcel/rust': 2.12.0 '@parcel/source-map': 2.1.1 @@ -18828,13 +18919,14 @@ snapshots: dependencies: chalk: 4.1.2 - '@parcel/namer-default@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/namer-default@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' '@parcel/node-resolver-core@3.3.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': dependencies: @@ -18848,10 +18940,10 @@ snapshots: transitivePeerDependencies: - '@parcel/core' - '@parcel/optimizer-css@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/optimizer-css@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/source-map': 2.1.1 '@parcel/utils': 2.12.0 browserslist: 4.23.1 @@ -18859,16 +18951,18 @@ snapshots: nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/optimizer-htmlnano@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(postcss@8.4.39)(relateurl@0.2.7)(terser@5.31.1)(typescript@5.5.3)': + '@parcel/optimizer-htmlnano@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)(postcss@8.4.39)(relateurl@0.2.7)(terser@5.31.1)(typescript@5.5.3)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) htmlnano: 2.1.1(postcss@8.4.39)(relateurl@0.2.7)(svgo@2.8.0)(terser@5.31.1)(typescript@5.5.3) nullthrows: 1.1.1 posthtml: 0.16.6 svgo: 2.8.0 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - cssnano - postcss - purgecss @@ -18878,28 +18972,31 @@ snapshots: - typescript - uncss - '@parcel/optimizer-image@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/optimizer-image@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/core': 2.12.0(@swc/helpers@0.5.5) '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/rust': 2.12.0 '@parcel/utils': 2.12.0 '@parcel/workers': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + transitivePeerDependencies: + - '@swc/helpers' - '@parcel/optimizer-svgo@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/optimizer-svgo@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 svgo: 2.8.0 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' '@parcel/optimizer-swc@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/source-map': 2.1.1 '@parcel/utils': 2.12.0 '@swc/core': 1.6.13(@swc/helpers@0.5.5) @@ -18923,31 +19020,33 @@ snapshots: transitivePeerDependencies: - '@swc/helpers' - '@parcel/packager-css@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/packager-css@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/source-map': 2.1.1 '@parcel/utils': 2.12.0 lightningcss: 1.27.0 nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/packager-html@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/packager-html@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/types': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 nullthrows: 1.1.1 posthtml: 0.16.6 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/packager-js@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/packager-js@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/rust': 2.12.0 '@parcel/source-map': 2.1.1 '@parcel/types': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) @@ -18956,33 +19055,38 @@ snapshots: nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/packager-raw@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/packager-raw@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/packager-svg@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/packager-svg@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/types': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 posthtml: 0.16.6 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/packager-wasm@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/packager-wasm@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/plugin@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/plugin@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/types': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' '@parcel/profiler@2.12.0': dependencies: @@ -18990,71 +19094,79 @@ snapshots: '@parcel/events': 2.12.0 chrome-trace-event: 1.0.4 - '@parcel/reporter-cli@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/reporter-cli@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/types': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 chalk: 4.1.2 term-size: 2.2.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/reporter-dev-server@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/reporter-dev-server@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/reporter-tracer@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/reporter-tracer@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 chrome-trace-event: 1.0.4 nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/resolver-default@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/resolver-default@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/node-resolver-core': 3.3.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/runtime-browser-hmr@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/runtime-browser-hmr@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/runtime-js@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/runtime-js@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/runtime-react-refresh@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/runtime-react-refresh@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 react-error-overlay: 6.0.9 react-refresh: 0.9.0 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/runtime-service-worker@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/runtime-service-worker@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' '@parcel/rust@2.12.0': {} @@ -19062,10 +19174,10 @@ snapshots: dependencies: detect-libc: 1.0.3 - '@parcel/transformer-babel@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-babel@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/source-map': 2.1.1 '@parcel/utils': 2.12.0 browserslist: 4.23.1 @@ -19074,11 +19186,12 @@ snapshots: semver: 7.6.2 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/transformer-css@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-css@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/source-map': 2.1.1 '@parcel/utils': 2.12.0 browserslist: 4.23.1 @@ -19086,11 +19199,12 @@ snapshots: nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/transformer-html@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-html@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/rust': 2.12.0 nullthrows: 1.1.1 posthtml: 0.16.6 @@ -19100,20 +19214,23 @@ snapshots: srcset: 4.0.0 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/transformer-image@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-image@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/core': 2.12.0(@swc/helpers@0.5.5) - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 '@parcel/workers': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) nullthrows: 1.1.1 + transitivePeerDependencies: + - '@swc/helpers' '@parcel/transformer-js@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': dependencies: '@parcel/core': 2.12.0(@swc/helpers@0.5.5) '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/rust': 2.12.0 '@parcel/source-map': 2.1.1 '@parcel/utils': 2.12.0 @@ -19124,17 +19241,18 @@ snapshots: regenerator-runtime: 0.13.11 semver: 7.6.2 - '@parcel/transformer-json@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-json@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) json5: 2.2.3 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/transformer-postcss@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-postcss@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/rust': 2.12.0 '@parcel/utils': 2.12.0 clone: 2.1.2 @@ -19143,10 +19261,11 @@ snapshots: semver: 7.6.2 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/transformer-posthtml@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-posthtml@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 nullthrows: 1.1.1 posthtml: 0.16.6 @@ -19155,25 +19274,28 @@ snapshots: semver: 7.6.2 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/transformer-raw@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-raw@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/transformer-react-refresh-wrap@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-react-refresh-wrap@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 react-refresh: 0.9.0 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/transformer-svg@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-svg@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/rust': 2.12.0 nullthrows: 1.1.1 posthtml: 0.16.6 @@ -19182,10 +19304,11 @@ snapshots: semver: 7.6.2 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' '@parcel/types@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/cache': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/cache': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/diagnostic': 2.12.0 '@parcel/fs': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/package-manager': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) @@ -20461,7 +20584,7 @@ snapshots: '@sentry/core': 7.61.0 '@sentry/types': 7.61.0 '@sentry/utils': 7.61.0 - tslib: 2.6.0 + tslib: 2.7.0 '@sentry/browser@7.59.2': dependencies: @@ -20512,7 +20635,7 @@ snapshots: dependencies: '@sentry/types': 7.61.0 '@sentry/utils': 7.61.0 - tslib: 2.6.0 + tslib: 2.7.0 '@sentry/hub@6.19.7': dependencies: @@ -20579,7 +20702,7 @@ snapshots: '@sentry/utils@7.61.0': dependencies: '@sentry/types': 7.61.0 - tslib: 2.6.0 + tslib: 2.7.0 '@sentry/webpack-plugin@1.20.0': dependencies: @@ -21328,7 +21451,7 @@ snapshots: '@storybook/react-docgen-typescript-plugin@1.0.6--canary.9.0c3f3b7.0(typescript@5.5.3)(webpack@5.92.1(@swc/core@1.6.13(@swc/helpers@0.5.5))(webpack-cli@5.1.4(webpack@5.92.1)))': dependencies: - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) endent: 2.1.0 find-cache-dir: 3.3.2 flat-cache: 3.2.0 @@ -21694,13 +21817,13 @@ snapshots: '@tanstack/query-core': 5.59.0 react: 18.3.1 - '@tanstack/react-virtual@3.5.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@tanstack/react-virtual@3.10.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@tanstack/virtual-core': 3.5.0 + '@tanstack/virtual-core': 3.10.8 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@tanstack/virtual-core@3.5.0': {} + '@tanstack/virtual-core@3.10.8': {} '@testing-library/dom@10.3.1': dependencies: @@ -21742,7 +21865,7 @@ snapshots: '@theguild/remark-mermaid@0.0.5(react@18.3.1)': dependencies: - mermaid: 10.9.0 + mermaid: 10.9.3 react: 18.3.1 unist-util-visit: 5.0.0 transitivePeerDependencies: @@ -21948,7 +22071,7 @@ snapshots: dependencies: '@types/unist': 2.0.10 - '@types/mdast@4.0.3': + '@types/mdast@4.0.4': dependencies: '@types/unist': 3.0.2 @@ -22093,7 +22216,7 @@ snapshots: '@typescript-eslint/scope-manager': 5.62.0 '@typescript-eslint/type-utils': 5.62.0(eslint@8.45.0)(typescript@5.5.3) '@typescript-eslint/utils': 5.62.0(eslint@8.45.0)(typescript@5.5.3) - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) eslint: 8.45.0 graphemer: 1.4.0 ignore: 5.3.1 @@ -22157,7 +22280,7 @@ snapshots: '@typescript-eslint/scope-manager': 5.62.0 '@typescript-eslint/types': 5.62.0 '@typescript-eslint/typescript-estree': 5.62.0(typescript@5.5.3) - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) eslint: 8.45.0 optionalDependencies: typescript: 5.5.3 @@ -22219,7 +22342,7 @@ snapshots: dependencies: '@typescript-eslint/typescript-estree': 5.62.0(typescript@5.5.3) '@typescript-eslint/utils': 5.62.0(eslint@8.45.0)(typescript@5.5.3) - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) eslint: 8.45.0 tsutils: 3.21.0(typescript@5.5.3) optionalDependencies: @@ -22243,7 +22366,7 @@ snapshots: dependencies: '@typescript-eslint/typescript-estree': 7.16.0(typescript@5.5.3) '@typescript-eslint/utils': 7.16.0(eslint@9.6.0)(typescript@5.5.3) - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) eslint: 9.6.0 ts-api-utils: 1.3.0(typescript@5.5.3) optionalDependencies: @@ -22265,7 +22388,7 @@ snapshots: dependencies: '@typescript-eslint/types': 5.33.1 '@typescript-eslint/visitor-keys': 5.33.1 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) globby: 11.1.0 is-glob: 4.0.3 semver: 7.6.2 @@ -22279,7 +22402,7 @@ snapshots: dependencies: '@typescript-eslint/types': 5.62.0 '@typescript-eslint/visitor-keys': 5.62.0 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) globby: 11.1.0 is-glob: 4.0.3 semver: 7.6.2 @@ -22322,7 +22445,7 @@ snapshots: dependencies: '@typescript-eslint/types': 8.0.0-alpha.30 '@typescript-eslint/visitor-keys': 8.0.0-alpha.30 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) globby: 11.1.0 is-glob: 4.0.3 minimatch: 9.0.5 @@ -22851,19 +22974,19 @@ snapshots: agent-base@6.0.2: dependencies: - debug: 4.3.5(supports-color@8.1.1) + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color agent-base@7.1.0: dependencies: - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color agent-base@7.1.1: dependencies: - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -22978,11 +23101,11 @@ snapshots: aria-hidden@1.2.3: dependencies: - tslib: 2.6.0 + tslib: 2.7.0 aria-hidden@1.2.4: dependencies: - tslib: 2.6.3 + tslib: 2.7.0 aria-query@4.2.2: dependencies: @@ -23079,6 +23202,10 @@ snapshots: asap@2.0.6: {} + asn1-ts@8.0.2: + dependencies: + tslib: 2.7.0 + asn1@0.2.6: dependencies: safer-buffer: 2.1.2 @@ -23113,7 +23240,7 @@ snapshots: astral-regex@2.0.0: {} - astring@1.8.6: {} + astring@1.9.0: {} async-limiter@1.0.1: {} @@ -23147,6 +23274,8 @@ snapshots: aws4@1.12.0: {} + aws4fetch@1.0.20: {} + axe-core@4.9.1: {} axios@0.26.1: @@ -24113,12 +24242,12 @@ snapshots: csstype@3.1.3: {} - cytoscape-cose-bilkent@4.1.0(cytoscape@3.29.2): + cytoscape-cose-bilkent@4.1.0(cytoscape@3.30.2): dependencies: cose-base: 1.0.3 - cytoscape: 3.29.2 + cytoscape: 3.30.2 - cytoscape@3.29.2: {} + cytoscape@3.30.2: {} d3-array@2.12.1: dependencies: @@ -24331,7 +24460,7 @@ snapshots: dependencies: '@babel/runtime': 7.24.7 - dayjs@1.11.11: {} + dayjs@1.11.13: {} de-indent@1.0.2: {} @@ -24359,9 +24488,11 @@ snapshots: optionalDependencies: supports-color: 8.1.1 - debug@4.3.7: + debug@4.3.7(supports-color@8.1.1): dependencies: ms: 2.1.3 + optionalDependencies: + supports-color: 8.1.1 decamelize@1.2.0: {} @@ -24482,7 +24613,7 @@ snapshots: detect-port@1.5.1: dependencies: address: 1.2.2 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -24550,7 +24681,7 @@ snapshots: dependencies: domelementtype: 2.3.0 - dompurify@3.1.6: {} + dompurify@3.1.7: {} domutils@2.8.0: dependencies: @@ -24819,14 +24950,14 @@ snapshots: esbuild-register@3.4.2(esbuild@0.18.20): dependencies: - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) esbuild: 0.18.20 transitivePeerDependencies: - supports-color esbuild-register@3.5.0(esbuild@0.18.20): dependencies: - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) esbuild: 0.18.20 transitivePeerDependencies: - supports-color @@ -25227,7 +25358,7 @@ snapshots: ajv: 6.12.6 chalk: 4.1.2 cross-spawn: 7.0.3 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) doctrine: 3.0.0 escape-string-regexp: 4.0.0 eslint-scope: 7.2.2 @@ -25388,7 +25519,7 @@ snapshots: estree-util-to-js@1.2.0: dependencies: '@types/estree-jsx': 1.0.5 - astring: 1.8.6 + astring: 1.9.0 source-map: 0.7.4 estree-util-value-to-estree@1.3.0: @@ -25708,7 +25839,7 @@ snapshots: flow-parser@0.212.0: {} - focus-visible@5.2.0: {} + focus-visible@5.2.1: {} follow-redirects@1.15.6: {} @@ -26175,12 +26306,12 @@ snapshots: graphql-tag@2.12.6(graphql@15.9.0): dependencies: graphql: 15.9.0 - tslib: 2.6.0 + tslib: 2.7.0 graphql-tag@2.12.6(graphql@16.9.0): dependencies: graphql: 16.9.0 - tslib: 2.6.0 + tslib: 2.7.0 graphql-ws@5.14.3(graphql@16.9.0): dependencies: @@ -26278,7 +26409,7 @@ snapshots: hash-obj@4.0.0: dependencies: is-obj: 3.0.0 - sort-keys: 5.0.0 + sort-keys: 5.1.0 type-fest: 1.4.0 hasown@2.0.2: @@ -26295,16 +26426,16 @@ snapshots: dependencies: '@types/hast': 3.0.4 hast-util-from-dom: 5.0.0 - hast-util-from-html: 2.0.1 + hast-util-from-html: 2.0.3 unist-util-remove-position: 5.0.0 - hast-util-from-html@2.0.1: + hast-util-from-html@2.0.3: dependencies: '@types/hast': 3.0.4 devlop: 1.1.0 hast-util-from-parse5: 8.0.1 parse5: 7.1.2 - vfile: 6.0.1 + vfile: 6.0.3 vfile-message: 4.0.2 hast-util-from-parse5@8.0.1: @@ -26314,8 +26445,8 @@ snapshots: devlop: 1.1.0 hastscript: 8.0.0 property-information: 6.5.0 - vfile: 6.0.1 - vfile-location: 5.0.2 + vfile: 6.0.3 + vfile-location: 5.0.3 web-namespaces: 2.0.1 hast-util-is-element@3.0.0: @@ -26326,7 +26457,7 @@ snapshots: dependencies: '@types/hast': 3.0.4 - hast-util-raw@9.0.3: + hast-util-raw@9.0.4: dependencies: '@types/hast': 3.0.4 '@types/unist': 3.0.2 @@ -26334,11 +26465,11 @@ snapshots: hast-util-from-parse5: 8.0.1 hast-util-to-parse5: 8.0.0 html-void-elements: 3.0.0 - mdast-util-to-hast: 13.1.0 + mdast-util-to-hast: 13.2.0 parse5: 7.1.2 unist-util-position: 5.0.0 unist-util-visit: 5.0.0 - vfile: 6.0.1 + vfile: 6.0.3 web-namespaces: 2.0.1 zwitch: 2.0.4 @@ -26475,7 +26606,7 @@ snapshots: http-call@5.3.0: dependencies: content-type: 1.0.5 - debug: 4.3.5(supports-color@8.1.1) + debug: 4.3.7(supports-color@8.1.1) is-retry-allowed: 1.2.0 is-stream: 2.0.1 parse-json: 4.0.0 @@ -26494,14 +26625,14 @@ snapshots: http-proxy-agent@7.0.0: dependencies: agent-base: 7.1.0 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color http-proxy-agent@7.0.2: dependencies: agent-base: 7.1.1 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -26519,7 +26650,7 @@ snapshots: https-proxy-agent@4.0.0: dependencies: agent-base: 5.1.1 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -26533,14 +26664,14 @@ snapshots: https-proxy-agent@7.0.2: dependencies: agent-base: 7.1.0 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color https-proxy-agent@7.0.5: dependencies: agent-base: 7.1.1 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -27138,7 +27269,7 @@ snapshots: json5@2.2.3: {} - jsonc-parser@3.2.1: {} + jsonc-parser@3.3.1: {} jsonfile@4.0.0: optionalDependencies: @@ -27189,7 +27320,7 @@ snapshots: jwa: 1.4.1 safe-buffer: 5.2.1 - katex@0.16.10: + katex@0.16.11: dependencies: commander: 8.3.0 @@ -27483,7 +27614,7 @@ snapshots: punycode.js: 2.3.1 uc.micro: 2.1.0 - markdown-table@3.0.3: {} + markdown-table@3.0.4: {} markdown-to-jsx@7.3.2(react@18.3.1): dependencies: @@ -27551,7 +27682,7 @@ snapshots: mdast-util-gfm-table@1.0.7: dependencies: '@types/mdast': 3.0.15 - markdown-table: 3.0.3 + markdown-table: 3.0.4 mdast-util-from-markdown: 1.3.1 mdast-util-to-markdown: 1.5.0 transitivePeerDependencies: @@ -27643,17 +27774,17 @@ snapshots: unist-util-position: 4.0.4 unist-util-visit: 4.1.2 - mdast-util-to-hast@13.1.0: + mdast-util-to-hast@13.2.0: dependencies: '@types/hast': 3.0.4 - '@types/mdast': 4.0.3 + '@types/mdast': 4.0.4 '@ungap/structured-clone': 1.2.0 devlop: 1.1.0 micromark-util-sanitize-uri: 2.0.0 trim-lines: 3.0.1 unist-util-position: 5.0.0 unist-util-visit: 5.0.0 - vfile: 6.0.1 + vfile: 6.0.3 mdast-util-to-markdown@1.5.0: dependencies: @@ -27706,25 +27837,25 @@ snapshots: merge2@1.4.1: {} - mermaid@10.9.0: + mermaid@10.9.3: dependencies: '@braintree/sanitize-url': 6.0.4 '@types/d3-scale': 4.0.8 '@types/d3-scale-chromatic': 3.0.3 - cytoscape: 3.29.2 - cytoscape-cose-bilkent: 4.1.0(cytoscape@3.29.2) + cytoscape: 3.30.2 + cytoscape-cose-bilkent: 4.1.0(cytoscape@3.30.2) d3: 7.9.0 d3-sankey: 0.12.3 dagre-d3-es: 7.0.10 - dayjs: 1.11.11 - dompurify: 3.1.6 + dayjs: 1.11.13 + dompurify: 3.1.7 elkjs: 0.9.3 - katex: 0.16.10 + katex: 0.16.11 khroma: 2.1.0 lodash-es: 4.17.21 mdast-util-from-markdown: 1.3.1 non-layered-tidy-tree-layout: 2.0.2 - stylis: 4.3.2 + stylis: 4.3.4 ts-dedent: 2.2.0 uuid: 9.0.1 web-worker: 1.3.0 @@ -27817,7 +27948,7 @@ snapshots: micromark-extension-math@2.1.2: dependencies: '@types/katex': 0.16.7 - katex: 0.16.10 + katex: 0.16.11 micromark-factory-space: 1.1.0 micromark-util-character: 1.2.0 micromark-util-symbol: 1.1.0 @@ -28009,7 +28140,7 @@ snapshots: micromark@3.2.0: dependencies: '@types/debug': 4.1.12 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) decode-named-character-reference: 1.0.2 micromark-core-commonmark: 1.1.0 micromark-factory-space: 1.1.0 @@ -28235,7 +28366,7 @@ snapshots: transitivePeerDependencies: - supports-color - next-seo@6.5.0(next@14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6))(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + next-seo@6.6.0(next@14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6))(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: next: 14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6) react: 18.3.1 @@ -28281,12 +28412,12 @@ snapshots: clsx: 2.1.1 escape-string-regexp: 5.0.0 flexsearch: 0.7.43 - focus-visible: 5.2.0 + focus-visible: 5.2.1 git-url-parse: 13.1.1 intersection-observer: 0.12.2 match-sorter: 6.3.4 next: 14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6) - next-seo: 6.5.0(next@14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next-seo: 6.6.0(next@14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) next-themes: 0.2.1(next@14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) nextra: 2.13.4(next@14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 @@ -28299,21 +28430,21 @@ snapshots: '@headlessui/react': 1.7.19(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@mdx-js/mdx': 2.3.0 '@mdx-js/react': 2.3.0(react@18.3.1) - '@napi-rs/simple-git': 0.1.16 + '@napi-rs/simple-git': 0.1.19 '@theguild/remark-mermaid': 0.0.5(react@18.3.1) '@theguild/remark-npm2yarn': 0.2.1 clsx: 2.1.1 github-slugger: 2.0.0 graceful-fs: 4.2.11 gray-matter: 4.0.3 - katex: 0.16.10 + katex: 0.16.11 lodash.get: 4.4.2 next: 14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6) next-mdx-remote: 4.4.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) p-limit: 3.1.0 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rehype-katex: 7.0.0 + rehype-katex: 7.0.1 rehype-pretty-code: 0.9.11(shiki@0.14.7) rehype-raw: 7.0.0 remark-gfm: 3.0.1 @@ -28680,9 +28811,9 @@ snapshots: '@parcel/fs': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/logger': 2.12.0 '@parcel/package-manager': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) - '@parcel/reporter-cli': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/reporter-dev-server': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/reporter-tracer': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/reporter-cli': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/reporter-dev-server': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/reporter-tracer': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 chalk: 4.1.2 commander: 7.2.0 @@ -29381,7 +29512,7 @@ snapshots: puppeteer-core@2.1.1: dependencies: '@types/mime-types': 2.1.1 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) extract-zip: 1.7.0 https-proxy-agent: 4.0.0 mime: 2.6.0 @@ -29553,7 +29684,7 @@ snapshots: dependencies: react: 18.3.1 react-style-singleton: 2.2.1(@types/react@18.3.3)(react@18.3.1) - tslib: 2.6.3 + tslib: 2.7.0 optionalDependencies: '@types/react': 18.3.3 @@ -29570,7 +29701,7 @@ snapshots: react: 18.3.1 react-remove-scroll-bar: 2.3.6(@types/react@18.3.3)(react@18.3.1) react-style-singleton: 2.2.1(@types/react@18.3.3)(react@18.3.1) - tslib: 2.6.3 + tslib: 2.7.0 use-callback-ref: 1.3.0(@types/react@18.3.3)(react@18.3.1) use-sidecar: 1.1.2(@types/react@18.3.3)(react@18.3.1) optionalDependencies: @@ -29581,7 +29712,7 @@ snapshots: react: 18.3.1 react-remove-scroll-bar: 2.3.6(@types/react@18.3.3)(react@18.3.1) react-style-singleton: 2.2.1(@types/react@18.3.3)(react@18.3.1) - tslib: 2.6.3 + tslib: 2.7.0 use-callback-ref: 1.3.2(@types/react@18.3.3)(react@18.3.1) use-sidecar: 1.1.2(@types/react@18.3.3)(react@18.3.1) optionalDependencies: @@ -29775,15 +29906,15 @@ snapshots: dependencies: jsesc: 0.5.0 - rehype-katex@7.0.0: + rehype-katex@7.0.1: dependencies: '@types/hast': 3.0.4 '@types/katex': 0.16.7 hast-util-from-html-isomorphic: 2.0.0 hast-util-to-text: 4.0.2 - katex: 0.16.10 + katex: 0.16.11 unist-util-visit-parents: 6.0.1 - vfile: 6.0.1 + vfile: 6.0.3 rehype-pretty-code@0.9.11(shiki@0.14.7): dependencies: @@ -29795,8 +29926,8 @@ snapshots: rehype-raw@7.0.0: dependencies: '@types/hast': 3.0.4 - hast-util-raw: 9.0.3 - vfile: 6.0.1 + hast-util-raw: 9.0.4 + vfile: 6.0.3 relateurl@0.2.7: {} @@ -30256,7 +30387,7 @@ snapshots: shiki@0.14.7: dependencies: ansi-sequence-parser: 1.1.1 - jsonc-parser: 3.2.1 + jsonc-parser: 3.3.1 vscode-oniguruma: 1.7.0 vscode-textmate: 8.0.0 @@ -30336,7 +30467,7 @@ snapshots: dependencies: atomic-sleep: 1.0.0 - sort-keys@5.0.0: + sort-keys@5.1.0: dependencies: is-plain-obj: 4.1.0 @@ -30602,7 +30733,7 @@ snapshots: optionalDependencies: '@babel/core': 7.24.7 - stylis@4.3.2: {} + stylis@4.3.4: {} sucrase@3.35.0: dependencies: @@ -31500,10 +31631,10 @@ snapshots: core-util-is: 1.0.2 extsprintf: 1.3.0 - vfile-location@5.0.2: + vfile-location@5.0.3: dependencies: '@types/unist': 3.0.2 - vfile: 6.0.1 + vfile: 6.0.3 vfile-matter@3.0.1: dependencies: @@ -31528,16 +31659,15 @@ snapshots: unist-util-stringify-position: 3.0.3 vfile-message: 3.1.4 - vfile@6.0.1: + vfile@6.0.3: dependencies: '@types/unist': 3.0.2 - unist-util-stringify-position: 4.0.0 vfile-message: 4.0.2 vite-node@1.6.0(@types/node@20.14.10)(lightningcss@1.27.0)(sass@1.77.6)(terser@5.31.1): dependencies: cac: 6.7.14 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) pathe: 1.1.2 picocolors: 1.0.1 vite: 5.3.3(@types/node@20.14.10)(lightningcss@1.27.0)(sass@1.77.6)(terser@5.31.1) @@ -31554,7 +31684,7 @@ snapshots: vite-node@2.0.1(@types/node@20.14.10)(lightningcss@1.27.0)(sass@1.77.6)(terser@5.31.1): dependencies: cac: 6.7.14 - debug: 4.3.5(supports-color@8.1.1) + debug: 4.3.7(supports-color@8.1.1) pathe: 1.1.2 picocolors: 1.0.1 vite: 5.3.3(@types/node@20.14.10)(lightningcss@1.27.0)(sass@1.77.6)(terser@5.31.1) @@ -31600,7 +31730,7 @@ snapshots: '@vitest/spy': 2.0.1 '@vitest/utils': 2.0.1 chai: 5.1.1 - debug: 4.3.5(supports-color@8.1.1) + debug: 4.3.7(supports-color@8.1.1) execa: 8.0.1 magic-string: 0.30.10 pathe: 1.1.2 diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml index 908c7ee00ae94..9bb3667fe64c2 100644 --- a/pnpm-workspace.yaml +++ b/pnpm-workspace.yaml @@ -26,3 +26,4 @@ packages: - '!sdk/typescript/keypairs/secp256r1' - '!sdk/typescript/graphql/schemas/2024.1' - '!sdk/typescript/graphql/schemas/2024.4' + - '!sdk/kms/aws' diff --git a/sdk/build-scripts/src/utils/buildPackage.ts b/sdk/build-scripts/src/utils/buildPackage.ts index fec294a1d1176..2e578dc729aa4 100755 --- a/sdk/build-scripts/src/utils/buildPackage.ts +++ b/sdk/build-scripts/src/utils/buildPackage.ts @@ -60,6 +60,7 @@ async function buildCJS( entryPoints, outdir: 'dist/cjs', sourcemap: true, + outbase: 'src', ...buildOptions, }); await buildTypes('tsconfig.json'); @@ -90,6 +91,7 @@ async function buildESM( target: 'es2020', entryPoints, outdir: 'dist/esm', + outbase: 'src', sourcemap: true, ...buildOptions, diff --git a/sdk/create-dapp/CHANGELOG.md b/sdk/create-dapp/CHANGELOG.md index c3362f697c758..f3358068f6cfb 100644 --- a/sdk/create-dapp/CHANGELOG.md +++ b/sdk/create-dapp/CHANGELOG.md @@ -1,5 +1,13 @@ # @mysten/create-dapp +## 0.3.28 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + - @mysten/dapp-kit@0.14.28 + ## 0.3.27 ### Patch Changes diff --git a/sdk/create-dapp/package.json b/sdk/create-dapp/package.json index 7737c2db3379a..353c4f2e4b7bc 100644 --- a/sdk/create-dapp/package.json +++ b/sdk/create-dapp/package.json @@ -3,7 +3,7 @@ "author": "Mysten Labs ", "description": "A CLI for creating new Sui dApps", "homepage": "https://sdk.mystenlabs.com", - "version": "0.3.27", + "version": "0.3.28", "license": "Apache-2.0", "files": [ "CHANGELOG.md", diff --git a/sdk/dapp-kit/CHANGELOG.md b/sdk/dapp-kit/CHANGELOG.md index ca385c4a3fc06..9fcd1d3708f6c 100644 --- a/sdk/dapp-kit/CHANGELOG.md +++ b/sdk/dapp-kit/CHANGELOG.md @@ -1,5 +1,14 @@ # @mysten/dapp-kit +## 0.14.28 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + - @mysten/wallet-standard@0.13.9 + - @mysten/zksend@0.11.9 + ## 0.14.27 ### Patch Changes diff --git a/sdk/dapp-kit/package.json b/sdk/dapp-kit/package.json index d9481732ea330..86327f7f2e732 100644 --- a/sdk/dapp-kit/package.json +++ b/sdk/dapp-kit/package.json @@ -3,7 +3,7 @@ "author": "Mysten Labs ", "description": "A collection of React hooks and components for interacting with the Sui blockchain and wallets.", "homepage": "https://sdk.mystenlabs.com/typescript", - "version": "0.14.27", + "version": "0.14.28", "license": "Apache-2.0", "files": [ "CHANGELOG.md", diff --git a/sdk/deepbook-v3/CHANGELOG.md b/sdk/deepbook-v3/CHANGELOG.md index 3aee7f84ea116..50b3bdf8fcc33 100644 --- a/sdk/deepbook-v3/CHANGELOG.md +++ b/sdk/deepbook-v3/CHANGELOG.md @@ -1,5 +1,12 @@ # @mysten/deepbook-v3 +## 0.12.2 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + ## 0.12.1 ### Patch Changes diff --git a/sdk/deepbook-v3/package.json b/sdk/deepbook-v3/package.json index d4002e7ac6e76..99054fc7780a1 100644 --- a/sdk/deepbook-v3/package.json +++ b/sdk/deepbook-v3/package.json @@ -2,7 +2,7 @@ "name": "@mysten/deepbook-v3", "author": "Mysten Labs ", "description": "Sui Deepbook SDK", - "version": "0.12.1", + "version": "0.12.2", "license": "Apache-2.0", "type": "commonjs", "main": "./dist/cjs/index.js", diff --git a/sdk/deepbook/CHANGELOG.md b/sdk/deepbook/CHANGELOG.md index 73608ddc7d768..a900fd1e38c4c 100644 --- a/sdk/deepbook/CHANGELOG.md +++ b/sdk/deepbook/CHANGELOG.md @@ -1,5 +1,12 @@ # @mysten/deepbook +## 0.8.23 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + ## 0.8.22 ### Patch Changes diff --git a/sdk/deepbook/package.json b/sdk/deepbook/package.json index 711d7b93b1fbf..10d4847acdfe0 100644 --- a/sdk/deepbook/package.json +++ b/sdk/deepbook/package.json @@ -2,7 +2,7 @@ "name": "@mysten/deepbook", "author": "Mysten Labs ", "description": "Sui Deepbook SDK", - "version": "0.8.22", + "version": "0.8.23", "license": "Apache-2.0", "type": "commonjs", "main": "./dist/cjs/index.js", diff --git a/sdk/enoki/CHANGELOG.md b/sdk/enoki/CHANGELOG.md index b4176c6e2d489..6bffef310266c 100644 --- a/sdk/enoki/CHANGELOG.md +++ b/sdk/enoki/CHANGELOG.md @@ -1,5 +1,13 @@ # @mysten/enoki +## 0.4.7 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + - @mysten/zklogin@0.7.24 + ## 0.4.6 ### Patch Changes diff --git a/sdk/enoki/package.json b/sdk/enoki/package.json index 5a72fedfc8f35..d6d4eb4c7e1a4 100644 --- a/sdk/enoki/package.json +++ b/sdk/enoki/package.json @@ -1,6 +1,6 @@ { "name": "@mysten/enoki", - "version": "0.4.6", + "version": "0.4.7", "description": "TODO: Description", "license": "Apache-2.0", "author": "Mysten Labs ", diff --git a/sdk/graphql-transport/CHANGELOG.md b/sdk/graphql-transport/CHANGELOG.md index d1746220f232a..f6d5e13a6d984 100644 --- a/sdk/graphql-transport/CHANGELOG.md +++ b/sdk/graphql-transport/CHANGELOG.md @@ -1,5 +1,12 @@ # @mysten/graphql-transport +## 0.2.25 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + ## 0.2.24 ### Patch Changes diff --git a/sdk/graphql-transport/package.json b/sdk/graphql-transport/package.json index 481d20b2dc86e..1518ad56194ce 100644 --- a/sdk/graphql-transport/package.json +++ b/sdk/graphql-transport/package.json @@ -1,6 +1,6 @@ { "name": "@mysten/graphql-transport", - "version": "0.2.24", + "version": "0.2.25", "description": "A GraphQL transport to allow SuiClient to work with RPC 2.0", "license": "Apache-2.0", "author": "Mysten Labs ", diff --git a/sdk/kiosk/CHANGELOG.md b/sdk/kiosk/CHANGELOG.md index 975703c2a7aa6..24d4204792caa 100644 --- a/sdk/kiosk/CHANGELOG.md +++ b/sdk/kiosk/CHANGELOG.md @@ -1,5 +1,13 @@ # @mysten/kiosk +## 0.9.23 + +### Patch Changes + +- 4166d71: Fix doc comment on `getKiosk` command +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + ## 0.9.22 ### Patch Changes diff --git a/sdk/kiosk/package.json b/sdk/kiosk/package.json index 98e442b40e885..b9246f27aa1ed 100644 --- a/sdk/kiosk/package.json +++ b/sdk/kiosk/package.json @@ -2,7 +2,7 @@ "name": "@mysten/kiosk", "author": "Mysten Labs ", "description": "Sui Kiosk library", - "version": "0.9.22", + "version": "0.9.23", "license": "Apache-2.0", "type": "commonjs", "main": "./dist/cjs/index.js", diff --git a/sdk/kiosk/src/client/kiosk-client.ts b/sdk/kiosk/src/client/kiosk-client.ts index 23fe8312896af..0c740e05524a7 100644 --- a/sdk/kiosk/src/client/kiosk-client.ts +++ b/sdk/kiosk/src/client/kiosk-client.ts @@ -77,8 +77,8 @@ export class KioskClient { /** * Fetches the kiosk contents. - * @param kioskId The ID of the kiosk to fetch. - * @param options Optioal + * @param id The ID of the kiosk to fetch. + * @param options Optional to control the fetch behavior. * @returns */ async getKiosk({ id, options }: { id: string; options?: FetchKioskOptions }): Promise { diff --git a/sdk/kms/.env.example b/sdk/kms/.env.example new file mode 100644 index 0000000000000..f369a86efb003 --- /dev/null +++ b/sdk/kms/.env.example @@ -0,0 +1,4 @@ +export AWS_ACCESS_KEY_ID="" +export AWS_SECRET_ACCESS_KEY="" +export AWS_REGION="" +export AWS_KMS_KEY_ID="" diff --git a/sdk/kms/CHANGELOG.md b/sdk/kms/CHANGELOG.md new file mode 100644 index 0000000000000..dbbe361c4ce9c --- /dev/null +++ b/sdk/kms/CHANGELOG.md @@ -0,0 +1,15 @@ +# @mysten/kms + +## 0.0.3 + +### Patch Changes + +- 02c9e46: Fix exports on the bundled package + +## 0.0.2 + +### Patch Changes + +- b3f3925: Introduces @mysten/kms which initially exposes a Sui AWS KMS signer +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 diff --git a/sdk/kms/README.md b/sdk/kms/README.md new file mode 100644 index 0000000000000..ec6fda431acff --- /dev/null +++ b/sdk/kms/README.md @@ -0,0 +1,75 @@ +# Sui KMS Signers + +The Sui KMS Signers package provides a set of tools for securely signing transactions using Key +Management Services (KMS) like AWS KMS. + +## Table of Contents + +- [AWS KMS Signer](#aws-kms-signer) + - [Usage](#usage) + - [API](#api) + - [fromKeyId](#fromkeyid) + - [Parameters](#parameters) + - [Examples](#examples) + +## AWS KMS Signer + +The AWS KMS Signer allows you to leverage AWS's Key Management Service to sign Sui transactions. + +### Usage + +```typescript +import { AwsKmsSigner } from '@mysten/kms/aws'; + +const prepareSigner = async () => { + const { AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION, AWS_KMS_KEY_ID } = process.env; + + return AwsKmsSigner.fromKeyId(AWS_KMS_KEY_ID, { + region: AWS_REGION, + accessKeyId: AWS_ACCESS_KEY_ID, + secretAccessKey: AWS_SECRET_ACCESS_KEY, + }); +}; +``` + +### API + +#### fromKeyId + +Create an AWS KMS signer from AWS Key ID and AWS credentials. This method initializes the signer +with the necessary AWS credentials and region information, allowing it to interact with AWS KMS to +perform cryptographic operations. + +##### Parameters + +- `keyId` + **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** + The AWS KMS key ID. +- `options` + **[object](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object)** An + object containing AWS credentials and region. + - `region` + **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** + The AWS region. + - `accessKeyId` + **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** + The AWS access key ID. + - `secretAccessKey` + **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** + The AWS secret access key. + +##### Examples + +```typescript +const signer = await AwsKmsSigner.fromKeyId('your-kms-key-id', { + region: 'us-west-2', + accessKeyId: 'your-access-key-id', + secretAccessKey: 'your-secret-access-key', +}); +``` + +Returns +**[Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)<[AwsKmsSigner](./src/aws/aws-kms-signer.ts)>** +An instance of AwsKmsSigner. + +**Notice**: AWS Signer requires Node >=20 due to dependency on `crypto` diff --git a/sdk/kms/aws/package.json b/sdk/kms/aws/package.json new file mode 100644 index 0000000000000..31e61fb81abe7 --- /dev/null +++ b/sdk/kms/aws/package.json @@ -0,0 +1,6 @@ +{ + "private": true, + "import": "../dist/esm/aws/index.js", + "main": "../dist/cjs/aws/index.js", + "sideEffects": false +} diff --git a/sdk/kms/package.json b/sdk/kms/package.json new file mode 100644 index 0000000000000..07e19974dd924 --- /dev/null +++ b/sdk/kms/package.json @@ -0,0 +1,59 @@ +{ + "name": "@mysten/kms", + "version": "0.0.3", + "description": "A collection of KMS signers for various cloud providers", + "license": "Apache-2.0", + "author": "Mysten Labs ", + "type": "commonjs", + "exports": { + "./aws": { + "import": "./dist/esm/aws/index.js", + "require": "./dist/cjs/aws/index.js" + } + }, + "sideEffects": false, + "files": [ + "CHANGELOG.md", + "LICENSE", + "README.md", + "aws", + "dist", + "src" + ], + "scripts": { + "clean": "rm -rf tsconfig.tsbuildinfo ./dist", + "build": "build-package", + "prepublishOnly": "pnpm build", + "prettier:check": "prettier -c --ignore-unknown .", + "prettier:fix": "prettier -w --ignore-unknown .", + "eslint:check": "eslint --max-warnings=0 .", + "eslint:fix": "pnpm run eslint:check --fix", + "lint": "pnpm run eslint:check && pnpm run prettier:check", + "lint:fix": "pnpm run eslint:fix && pnpm run prettier:fix", + "test": "vitest" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/mystenlabs/sui.git" + }, + "bugs": { + "url": "https://github.com/mystenlabs/sui/issues" + }, + "homepage": "https://github.com/mystenlabs/sui#readme", + "devDependencies": { + "@mysten/build-scripts": "workspace:*", + "@types/node": "^20.14.10", + "typescript": "^5.5.3", + "vitest": "^2.0.1" + }, + "dependencies": { + "@mysten/sui": "workspace:*", + "@noble/curves": "^1.4.2", + "@noble/hashes": "^1.4.0", + "asn1-ts": "^8.0.2", + "aws4fetch": "^1.0.20" + }, + "engines": { + "node": ">=20" + } +} diff --git a/sdk/kms/src/aws/aws-client.ts b/sdk/kms/src/aws/aws-client.ts new file mode 100644 index 0000000000000..9da87d090582d --- /dev/null +++ b/sdk/kms/src/aws/aws-client.ts @@ -0,0 +1,131 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +import { Secp256k1PublicKey } from '@mysten/sui/keypairs/secp256k1'; +import { Secp256r1PublicKey } from '@mysten/sui/keypairs/secp256r1'; +import { fromBase64 } from '@mysten/sui/utils'; +import { ASN1Construction, ASN1TagClass, DERElement } from 'asn1-ts'; +import { AwsClient } from 'aws4fetch'; + +import { compressPublicKeyClamped } from './utils.js'; + +interface KmsCommands { + Sign: { + request: { + KeyId: string; + Message: string; + MessageType: 'RAW' | 'DIGEST'; + SigningAlgorithm: 'ECDSA_SHA_256'; + }; + response: { + KeyId: string; + KeyOrigin: string; + Signature: string; + SigningAlgorithm: string; + }; + }; + GetPublicKey: { + request: { KeyId: string }; + response: { + CustomerMasterKeySpec: string; + KeyId: string; + KeyOrigin: string; + KeySpec: string; + KeyUsage: string; + PublicKey: string; + SigningAlgorithms: string[]; + }; + }; +} + +export interface AwsClientOptions extends Partial[0]> {} + +export class AwsKmsClient extends AwsClient { + constructor(options: AwsClientOptions = {}) { + if (!options.accessKeyId || !options.secretAccessKey) { + throw new Error('AWS Access Key ID and Secret Access Key are required'); + } + + if (!options.region) { + throw new Error('Region is required'); + } + + super({ + region: options.region, + accessKeyId: options.accessKeyId, + secretAccessKey: options.secretAccessKey, + service: 'kms', + ...options, + }); + } + + async getPublicKey(keyId: string) { + const publicKeyResponse = await this.runCommand('GetPublicKey', { KeyId: keyId }); + + if (!publicKeyResponse.PublicKey) { + throw new Error('Public Key not found for the supplied `keyId`'); + } + + const publicKey = fromBase64(publicKeyResponse.PublicKey); + + const encodedData: Uint8Array = publicKey; + const derElement = new DERElement(); + derElement.fromBytes(encodedData); + + // Validate the ASN.1 structure of the public key + if ( + !( + derElement.tagClass === ASN1TagClass.universal && + derElement.construction === ASN1Construction.constructed + ) + ) { + throw new Error('Unexpected ASN.1 structure'); + } + + const components = derElement.components; + const publicKeyElement = components[1]; + + if (!publicKeyElement) { + throw new Error('Public Key not found in the DER structure'); + } + + const compressedKey = compressPublicKeyClamped(publicKeyElement.bitString); + + switch (publicKeyResponse.KeySpec) { + case 'ECC_NIST_P256': + return new Secp256r1PublicKey(compressedKey); + case 'ECC_SECG_P256K1': + return new Secp256k1PublicKey(compressedKey); + default: + throw new Error('Unsupported key spec: ' + publicKeyResponse.KeySpec); + } + } + + async runCommand( + command: T, + body: KmsCommands[T]['request'], + { + region = this.region!, + }: { + region?: string; + } = {}, + ): Promise { + if (!region) { + throw new Error('Region is required'); + } + + const res = await this.fetch(`https://kms.${region}.amazonaws.com/`, { + headers: { + 'Content-Type': 'application/x-amz-json-1.1', + 'X-Amz-Target': `TrentService.${command}`, + }, + body: JSON.stringify(body), + }); + + if (!res.ok) { + throw new Error(await res.text()); + } + + return res.json(); + } +} diff --git a/sdk/kms/src/aws/aws-kms-signer.ts b/sdk/kms/src/aws/aws-kms-signer.ts new file mode 100644 index 0000000000000..8480bee7861c2 --- /dev/null +++ b/sdk/kms/src/aws/aws-kms-signer.ts @@ -0,0 +1,147 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +import type { PublicKey, SignatureFlag } from '@mysten/sui/cryptography'; +import { SIGNATURE_FLAG_TO_SCHEME, Signer } from '@mysten/sui/cryptography'; +import { fromBase64, toBase64 } from '@mysten/sui/utils'; +import { secp256r1 } from '@noble/curves/p256'; +import { secp256k1 } from '@noble/curves/secp256k1'; +import { DERElement } from 'asn1-ts'; + +import type { AwsClientOptions } from './aws-client.js'; +import { AwsKmsClient } from './aws-client.js'; + +/** + * Configuration options for initializing the AwsKmsSigner. + */ +export interface AwsKmsSignerOptions { + /** AWS KMS Key ID used for signing */ + kmsKeyId: string; + /** Options for setting up the AWS KMS client */ + client: AwsKmsClient; + /** Public key */ + publicKey: PublicKey; +} + +/** + * Aws KMS Signer integrates AWS Key Management Service (KMS) with the Sui blockchain + * to provide signing capabilities using AWS-managed cryptographic keys. + */ +export class AwsKmsSigner extends Signer { + #publicKey: PublicKey; + /** AWS KMS client instance */ + #client: AwsKmsClient; + /** AWS KMS Key ID used for signing */ + #kmsKeyId: string; + + /** + * Creates an instance of AwsKmsSigner. It's expected to call the static `fromKeyId` method to create an instance. + * For example: + * ``` + * const signer = await AwsKmsSigner.fromKeyId(keyId, options); + * ``` + * @throws Will throw an error if required AWS credentials or region are not provided. + */ + constructor({ kmsKeyId, client, publicKey }: AwsKmsSignerOptions) { + super(); + if (!kmsKeyId) throw new Error('KMS Key ID is required'); + + this.#client = client; + this.#kmsKeyId = kmsKeyId; + this.#publicKey = publicKey; + } + + /** + * Retrieves the key scheme used by this signer. + * @returns AWS supports only Secp256k1 and Secp256r1 schemes. + */ + getKeyScheme() { + return SIGNATURE_FLAG_TO_SCHEME[this.#publicKey.flag() as SignatureFlag]; + } + + /** + * Retrieves the public key associated with this signer. + * @returns The Secp256k1PublicKey instance. + * @throws Will throw an error if the public key has not been initialized. + */ + getPublicKey() { + return this.#publicKey; + } + + /** + * Signs the given data using AWS KMS. + * @param bytes - The data to be signed as a Uint8Array. + * @returns A promise that resolves to the signature as a Uint8Array. + * @throws Will throw an error if the public key is not initialized or if signing fails. + */ + async sign(bytes: Uint8Array): Promise { + const signResponse = await this.#client.runCommand('Sign', { + KeyId: this.#kmsKeyId, + Message: toBase64(bytes), + MessageType: 'RAW', + SigningAlgorithm: 'ECDSA_SHA_256', + }); + + // Concatenate the signature components into a compact form + return this.#getConcatenatedSignature(fromBase64(signResponse.Signature)); + } + + /** + * Synchronous signing is not supported by AWS KMS. + * @throws Always throws an error indicating synchronous signing is unsupported. + */ + signData(): never { + throw new Error('KMS Signer does not support sync signing'); + } + + /** + * Generates a concatenated signature from a DER-encoded signature. + * + * This signature format is consumable by Sui's `toSerializedSignature` method. + * + * @param signature - A `Uint8Array` representing the DER-encoded signature. + * @returns A `Uint8Array` containing the concatenated signature in compact form. + * + * @throws {Error} If the input signature is invalid or cannot be processed. + */ + #getConcatenatedSignature(signature: Uint8Array): Uint8Array { + if (!signature || signature.length === 0) { + throw new Error('Invalid signature'); + } + + // Initialize a DERElement to parse the DER-encoded signature + const derElement = new DERElement(); + derElement.fromBytes(signature); + + const [r, s] = derElement.toJSON() as [string, string]; + + switch (this.getKeyScheme()) { + case 'Secp256k1': + return new secp256k1.Signature(BigInt(r), BigInt(s)).normalizeS().toCompactRawBytes(); + case 'Secp256r1': + return new secp256r1.Signature(BigInt(r), BigInt(s)).normalizeS().toCompactRawBytes(); + } + + // Create a Secp256k1Signature using the extracted r and s values + const secp256k1Signature = new secp256k1.Signature(BigInt(r), BigInt(s)); + + // Normalize the signature and convert it to compact raw bytes + return secp256k1Signature.normalizeS().toCompactRawBytes(); + } + + /** + * Prepares the signer by fetching and setting the public key from AWS KMS. + * It is recommended to initialize an `AwsKmsSigner` instance using this function. + * @returns A promise that resolves once a `AwsKmsSigner` instance is prepared (public key is set). + */ + static async fromKeyId(keyId: string, options: AwsClientOptions) { + const client = new AwsKmsClient(options); + + const pubKey = await client.getPublicKey(keyId); + + return new AwsKmsSigner({ + kmsKeyId: keyId, + client, + publicKey: pubKey, + }); + } +} diff --git a/sdk/kms/src/aws/index.ts b/sdk/kms/src/aws/index.ts new file mode 100644 index 0000000000000..8902adb226c14 --- /dev/null +++ b/sdk/kms/src/aws/index.ts @@ -0,0 +1,9 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +import type { AwsClientOptions } from './aws-client.js'; +import type { AwsKmsSignerOptions } from './aws-kms-signer.js'; +import { AwsKmsSigner } from './aws-kms-signer.js'; + +export { AwsKmsSigner }; + +export type { AwsKmsSignerOptions, AwsClientOptions }; diff --git a/sdk/kms/src/aws/utils.ts b/sdk/kms/src/aws/utils.ts new file mode 100644 index 0000000000000..43a2afcc7e0b0 --- /dev/null +++ b/sdk/kms/src/aws/utils.ts @@ -0,0 +1,65 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/** The total number of bits in the DER bit string for the uncompressed public key. */ +export const DER_BIT_STRING_LENGTH = 520; + +/** The total number of bytes corresponding to the DER bit string length. */ +export const DER_BYTES_LENGTH = DER_BIT_STRING_LENGTH / 8; + +// Reference Specifications: +// https://datatracker.ietf.org/doc/html/rfc5480#section-2.2 +// https://www.secg.org/sec1-v2.pdf + +/** + * Converts an array of bits into a byte array. + * + * @param bitsArray - A `Uint8ClampedArray` representing the bits to convert. + * @returns A `Uint8Array` containing the corresponding bytes. + * + * @throws {Error} If the input array does not have the expected length. + */ +function bitsToBytes(bitsArray: Uint8ClampedArray): Uint8Array { + const bytes = new Uint8Array(DER_BYTES_LENGTH); + for (let i = 0; i < DER_BIT_STRING_LENGTH; i++) { + if (bitsArray[i] === 1) { + bytes[Math.floor(i / 8)] |= 1 << (7 - (i % 8)); + } + } + return bytes; +} + +/** + * Compresses an uncompressed public key into its compressed form. + * + * The uncompressed key must follow the DER bit string format as specified in [RFC 5480](https://datatracker.ietf.org/doc/html/rfc5480#section-2.2) + * and [SEC 1: Elliptic Curve Cryptography](https://www.secg.org/sec1-v2.pdf). + * + * @param uncompressedKey - A `Uint8ClampedArray` representing the uncompressed public key bits. + * @returns A `Uint8Array` containing the compressed public key. + * + * @throws {Error} If the uncompressed key has an unexpected length or does not start with the expected prefix. + */ +export function compressPublicKeyClamped(uncompressedKey: Uint8ClampedArray): Uint8Array { + if (uncompressedKey.length !== DER_BIT_STRING_LENGTH) { + throw new Error('Unexpected length for an uncompressed public key'); + } + + // Convert bits to bytes + const uncompressedBytes = bitsToBytes(uncompressedKey); + + // Ensure the public key starts with the standard uncompressed prefix 0x04 + if (uncompressedBytes[0] !== 0x04) { + throw new Error('Public key does not start with 0x04'); + } + + // Extract X-Coordinate (skip the first byte, which is the prefix 0x04) + const xCoord = uncompressedBytes.slice(1, 33); + + // Determine parity byte for Y coordinate based on the last byte + const yCoordLastByte = uncompressedBytes[64]; + const parityByte = yCoordLastByte % 2 === 0 ? 0x02 : 0x03; + + // Return the compressed public key consisting of the parity byte and X-coordinate + return new Uint8Array([parityByte, ...xCoord]); +} diff --git a/sdk/kms/tests/e2e-aws-kms.test.ts b/sdk/kms/tests/e2e-aws-kms.test.ts new file mode 100644 index 0000000000000..5eddbd57120df --- /dev/null +++ b/sdk/kms/tests/e2e-aws-kms.test.ts @@ -0,0 +1,46 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +import { beforeAll, describe, expect, it } from 'vitest'; + +import { AwsKmsSigner } from '../src/aws/aws-kms-signer'; + +const { E2E_AWS_KMS_TEST_ENABLE } = process.env; + +describe.runIf(E2E_AWS_KMS_TEST_ENABLE)('Aws KMS signer E2E testing', () => { + let signer: AwsKmsSigner; + beforeAll(async () => { + const { AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION, AWS_KMS_KEY_ID } = process.env; + + if (!AWS_ACCESS_KEY_ID || !AWS_SECRET_ACCESS_KEY || !AWS_REGION || !AWS_KMS_KEY_ID) { + throw new Error('Missing one or more required environment variables.'); + } + + signer = await AwsKmsSigner.fromKeyId(AWS_KMS_KEY_ID, { + region: AWS_REGION, + accessKeyId: AWS_ACCESS_KEY_ID, + secretAccessKey: AWS_SECRET_ACCESS_KEY, + }); + }); + + it('should retrieve the correct sui address', async () => { + // Get the public key + const publicKey = signer.getPublicKey(); + expect(publicKey.toSuiAddress()).toEqual( + '0x2bfc782b6bf66f305fdeb19a203386efee3e62bce3ceb9d3d53eafbe0b14a035', + ); + }); + + it('should sign a message and verify against pubkey', async () => { + // Define a test message + const testMessage = 'Hello, AWS KMS Signer!'; + const messageBytes = new TextEncoder().encode(testMessage); + + // Sign the test message + const { signature } = await signer.signPersonalMessage(messageBytes); + + // verify signature against pubkey + const publicKey = signer.getPublicKey(); + const isValid = await publicKey.verifyPersonalMessage(messageBytes, signature); + expect(isValid).toBe(true); + }); +}); diff --git a/sdk/kms/tsconfig.esm.json b/sdk/kms/tsconfig.esm.json new file mode 100644 index 0000000000000..5048bdf8ffc62 --- /dev/null +++ b/sdk/kms/tsconfig.esm.json @@ -0,0 +1,7 @@ +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "module": "ESNext", + "outDir": "dist/esm" + } +} diff --git a/sdk/kms/tsconfig.json b/sdk/kms/tsconfig.json new file mode 100644 index 0000000000000..b4ed52cfe8a5e --- /dev/null +++ b/sdk/kms/tsconfig.json @@ -0,0 +1,11 @@ +{ + "extends": "../build-scripts/tsconfig.shared.json", + "include": ["src"], + "compilerOptions": { + "module": "CommonJS", + "outDir": "dist/cjs", + "isolatedModules": true, + "rootDir": "src" + }, + "references": [{ "path": "../typescript" }] +} diff --git a/sdk/kms/vitest.config.ts b/sdk/kms/vitest.config.ts new file mode 100644 index 0000000000000..07452fe1219c2 --- /dev/null +++ b/sdk/kms/vitest.config.ts @@ -0,0 +1,19 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +import { defineConfig } from 'vitest/config'; + +export default defineConfig({ + test: { + minWorkers: 1, + maxWorkers: 4, + hookTimeout: 1000000, + testTimeout: 1000000, + env: { + NODE_ENV: 'test', + }, + }, + resolve: { + alias: {}, + }, +}); diff --git a/sdk/suins-toolkit/CHANGELOG.md b/sdk/suins-toolkit/CHANGELOG.md index 62172a2cb5e31..e4e196552a66b 100644 --- a/sdk/suins-toolkit/CHANGELOG.md +++ b/sdk/suins-toolkit/CHANGELOG.md @@ -1,5 +1,12 @@ # @mysten/suins-toolkit +## 0.5.23 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + ## 0.5.22 ### Patch Changes diff --git a/sdk/suins-toolkit/package.json b/sdk/suins-toolkit/package.json index 60d0d8024ee98..6474d6ba6b32e 100644 --- a/sdk/suins-toolkit/package.json +++ b/sdk/suins-toolkit/package.json @@ -2,7 +2,7 @@ "name": "@mysten/suins-toolkit", "author": "Mysten Labs ", "description": "SuiNS TypeScript SDK", - "version": "0.5.22", + "version": "0.5.23", "license": "Apache-2.0", "type": "commonjs", "main": "./dist/cjs/index.js", diff --git a/sdk/typescript/CHANGELOG.md b/sdk/typescript/CHANGELOG.md index cc7a666518210..0dfddaa1fa2b5 100644 --- a/sdk/typescript/CHANGELOG.md +++ b/sdk/typescript/CHANGELOG.md @@ -1,5 +1,12 @@ # @mysten/sui.js +## 1.14.0 + +### Minor Changes + +- c24814b: Adds a custom header; 'Client-Request-Method' which will contain the method name used in + each outgoing jsonrpc request + ## 1.13.0 ### Minor Changes diff --git a/sdk/typescript/package.json b/sdk/typescript/package.json index 27dcf7f82960f..ee6af72ea0b2b 100644 --- a/sdk/typescript/package.json +++ b/sdk/typescript/package.json @@ -3,7 +3,7 @@ "author": "Mysten Labs ", "description": "Sui TypeScript API(Work in Progress)", "homepage": "https://sdk.mystenlabs.com", - "version": "1.13.0", + "version": "1.14.0", "license": "Apache-2.0", "sideEffects": false, "files": [ diff --git a/sdk/typescript/src/client/http-transport.ts b/sdk/typescript/src/client/http-transport.ts index 54309646ea1a4..b690316dbcfbe 100644 --- a/sdk/typescript/src/client/http-transport.ts +++ b/sdk/typescript/src/client/http-transport.ts @@ -95,6 +95,7 @@ export class SuiHTTPTransport implements SuiTransport { 'Client-Sdk-Type': 'typescript', 'Client-Sdk-Version': PACKAGE_VERSION, 'Client-Target-Api-Version': TARGETED_RPC_VERSION, + 'Client-Request-Method': input.method, ...this.#options.rpc?.headers, }, body: JSON.stringify({ diff --git a/sdk/typescript/src/version.ts b/sdk/typescript/src/version.ts index 204307cfc05e4..0181845015457 100644 --- a/sdk/typescript/src/version.ts +++ b/sdk/typescript/src/version.ts @@ -3,5 +3,5 @@ // This file is generated by genversion.mjs. Do not edit it directly. -export const PACKAGE_VERSION = '1.13.0'; -export const TARGETED_RPC_VERSION = '1.36.0'; +export const PACKAGE_VERSION = '1.14.0'; +export const TARGETED_RPC_VERSION = '1.37.0'; diff --git a/sdk/typescript/test/unit/client/http-transport.test.ts b/sdk/typescript/test/unit/client/http-transport.test.ts index 654a2756af5dd..cd3e54692be70 100644 --- a/sdk/typescript/test/unit/client/http-transport.test.ts +++ b/sdk/typescript/test/unit/client/http-transport.test.ts @@ -62,6 +62,7 @@ describe('SuiHTTPTransport', () => { 'Client-Sdk-Type': 'typescript', 'Client-Sdk-Version': PACKAGE_VERSION, 'Client-Target-Api-Version': TARGETED_RPC_VERSION, + 'Client-Request-Method': 'getAllBalances', }, method: 'POST', }); diff --git a/sdk/wallet-standard/CHANGELOG.md b/sdk/wallet-standard/CHANGELOG.md index 0e4aea6b9f07f..e80ab97c666ab 100644 --- a/sdk/wallet-standard/CHANGELOG.md +++ b/sdk/wallet-standard/CHANGELOG.md @@ -1,5 +1,12 @@ # @mysten/wallet-standard +## 0.13.9 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + ## 0.13.8 ### Patch Changes diff --git a/sdk/wallet-standard/package.json b/sdk/wallet-standard/package.json index f3dfcb12cc5b1..3e22c3d53a556 100644 --- a/sdk/wallet-standard/package.json +++ b/sdk/wallet-standard/package.json @@ -1,6 +1,6 @@ { "name": "@mysten/wallet-standard", - "version": "0.13.8", + "version": "0.13.9", "description": "A suite of standard utilities for implementing wallets based on the Wallet Standard.", "license": "Apache-2.0", "author": "Mysten Labs ", diff --git a/sdk/zklogin/CHANGELOG.md b/sdk/zklogin/CHANGELOG.md index 94e9b9c9fb2fe..32a37002033fa 100644 --- a/sdk/zklogin/CHANGELOG.md +++ b/sdk/zklogin/CHANGELOG.md @@ -1,5 +1,12 @@ # @mysten/zklogin +## 0.7.24 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + ## 0.7.23 ### Patch Changes diff --git a/sdk/zklogin/package.json b/sdk/zklogin/package.json index 381fb1eef207e..f7729c21718ff 100644 --- a/sdk/zklogin/package.json +++ b/sdk/zklogin/package.json @@ -1,6 +1,6 @@ { "name": "@mysten/zklogin", - "version": "0.7.23", + "version": "0.7.24", "description": "Utilities for interacting with zkLogin in Sui", "license": "Apache-2.0", "author": "Mysten Labs ", diff --git a/sdk/zksend/CHANGELOG.md b/sdk/zksend/CHANGELOG.md index 678a1a2129fde..301e41a6e75ee 100644 --- a/sdk/zksend/CHANGELOG.md +++ b/sdk/zksend/CHANGELOG.md @@ -1,5 +1,13 @@ # @mysten/zksend +## 0.11.9 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + - @mysten/wallet-standard@0.13.9 + ## 0.11.8 ### Patch Changes diff --git a/sdk/zksend/package.json b/sdk/zksend/package.json index c7becb82c936f..e4c5e88609d2d 100644 --- a/sdk/zksend/package.json +++ b/sdk/zksend/package.json @@ -1,6 +1,6 @@ { "name": "@mysten/zksend", - "version": "0.11.8", + "version": "0.11.9", "description": "TODO: Write Description", "license": "Apache-2.0", "author": "Mysten Labs ", diff --git a/sui-execution/Cargo.toml b/sui-execution/Cargo.toml index 6da2eba7cbf57..785a0e47929f2 100644 --- a/sui-execution/Cargo.toml +++ b/sui-execution/Cargo.toml @@ -51,16 +51,16 @@ petgraph = "0.5.1" [features] default = [] -gas-profiler = [ - "sui-adapter-latest/gas-profiler", - "sui-adapter-v0/gas-profiler", - "sui-adapter-v1/gas-profiler", - "sui-adapter-v2/gas-profiler", -# "sui-adapter-$CUT/gas-profiler", - "move-vm-runtime-v0/gas-profiler", - "move-vm-runtime-v1/gas-profiler", - "move-vm-runtime-latest/gas-profiler", - "move-vm-runtime-v2/gas-profiler", -# "move-vm-runtime-$CUT/gas-profiler", - "move-vm-config/gas-profiler", +tracing = [ + "sui-adapter-latest/tracing", + "sui-adapter-v0/tracing", + "sui-adapter-v1/tracing", + "sui-adapter-v2/tracing", +# "sui-adapter-$CUT/tracing", + "move-vm-runtime-v0/tracing", + "move-vm-runtime-v1/tracing", + "move-vm-runtime-latest/tracing", + "move-vm-runtime-v2/tracing", +# "move-vm-runtime-$CUT/tracing", + "move-vm-config/tracing", ] diff --git a/sui-execution/latest/sui-adapter/Cargo.toml b/sui-execution/latest/sui-adapter/Cargo.toml index 89507f02d64d3..5da6ec1bd45be 100644 --- a/sui-execution/latest/sui-adapter/Cargo.toml +++ b/sui-execution/latest/sui-adapter/Cargo.toml @@ -40,9 +40,9 @@ parking_lot.workspace = true move-package.workspace = true [features] -gas-profiler = [ - "sui-types/gas-profiler", - "move-vm-runtime/gas-profiler", - "move-vm-profiler/gas-profiler", - "move-vm-config/gas-profiler", +tracing = [ + "sui-types/tracing", + "move-vm-runtime/tracing", + "move-vm-profiler/tracing", + "move-vm-config/tracing", ] diff --git a/sui-execution/latest/sui-adapter/src/adapter.rs b/sui-execution/latest/sui-adapter/src/adapter.rs index c1856234639c7..f7fca14dcf6a2 100644 --- a/sui-execution/latest/sui-adapter/src/adapter.rs +++ b/sui-execution/latest/sui-adapter/src/adapter.rs @@ -4,7 +4,7 @@ pub use checked::*; #[sui_macros::with_checked_arithmetic] mod checked { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] use move_vm_config::runtime::VMProfilerConfig; use std::path::PathBuf; use std::{collections::BTreeMap, sync::Arc}; @@ -44,9 +44,9 @@ mod checked { protocol_config: &ProtocolConfig, _enable_profiler: Option, ) -> Result { - #[cfg(not(feature = "gas-profiler"))] + #[cfg(not(feature = "tracing"))] let vm_profiler_config = None; - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let vm_profiler_config = _enable_profiler.clone().map(|path| VMProfilerConfig { full_path: path, track_bytecode_instructions: false, diff --git a/sui-execution/latest/sui-adapter/src/programmable_transactions/context.rs b/sui-execution/latest/sui-adapter/src/programmable_transactions/context.rs index 0b8d483463a45..e41b3c09f7ea4 100644 --- a/sui-execution/latest/sui-adapter/src/programmable_transactions/context.rs +++ b/sui-execution/latest/sui-adapter/src/programmable_transactions/context.rs @@ -194,7 +194,7 @@ mod checked { // Set the profiler if in CLI #[skip_checked_arithmetic] - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; use move_vm_types::gas::GasMeter; diff --git a/sui-execution/latest/sui-adapter/src/programmable_transactions/execution.rs b/sui-execution/latest/sui-adapter/src/programmable_transactions/execution.rs index 23a29dbc747a5..a43078c1cd283 100644 --- a/sui-execution/latest/sui-adapter/src/programmable_transactions/execution.rs +++ b/sui-execution/latest/sui-adapter/src/programmable_transactions/execution.rs @@ -642,10 +642,10 @@ mod checked { )]) } - fn check_compatibility<'a>( + fn check_compatibility( context: &ExecutionContext, existing_package: &MovePackage, - upgrading_modules: impl IntoIterator, + upgrading_modules: &[CompiledModule], policy: u8, ) -> Result<(), ExecutionError> { // Make sure this is a known upgrade policy. @@ -662,7 +662,26 @@ mod checked { invariant_violation!("Tried to normalize modules in existing package but failed") }; - let mut new_normalized = normalize_deserialized_modules(upgrading_modules.into_iter()); + let existing_modules_len = current_normalized.len(); + let upgrading_modules_len = upgrading_modules.len(); + let disallow_new_modules = context + .protocol_config + .disallow_new_modules_in_deps_only_packages() + && policy as u8 == UpgradePolicy::DEP_ONLY; + + if disallow_new_modules && existing_modules_len != upgrading_modules_len { + return Err(ExecutionError::new_with_source( + ExecutionErrorKind::PackageUpgradeError { + upgrade_error: PackageUpgradeError::IncompatibleUpgrade, + }, + format!( + "Existing package has {existing_modules_len} modules, but new package has \ + {upgrading_modules_len}. Adding or removing a module to a deps only package is not allowed." + ), + )); + } + + let mut new_normalized = normalize_deserialized_modules(upgrading_modules.iter()); for (name, cur_module) in current_normalized { let Some(new_module) = new_normalized.remove(&name) else { return Err(ExecutionError::new_with_source( @@ -676,6 +695,9 @@ mod checked { check_module_compatibility(&policy, &cur_module, &new_module)?; } + // If we disallow new modules double check that there are no modules left in `new_normalized`. + debug_assert!(!disallow_new_modules || new_normalized.is_empty()); + Ok(()) } diff --git a/sui-execution/latest/sui-move-natives/src/crypto/group_ops.rs b/sui-execution/latest/sui-move-natives/src/crypto/group_ops.rs index 93535741f7567..c94c3ed052fb5 100644 --- a/sui-execution/latest/sui-move-natives/src/crypto/group_ops.rs +++ b/sui-execution/latest/sui-move-natives/src/crypto/group_ops.rs @@ -42,6 +42,14 @@ fn is_msm_supported(context: &NativeContext) -> bool { .enable_group_ops_native_function_msm() } +fn is_uncompressed_g1_supported(context: &NativeContext) -> bool { + context + .extensions() + .get::() + .protocol_config + .uncompressed_g1_group_elements() +} + // Gas related structs and functions. #[derive(Clone)] @@ -86,6 +94,14 @@ pub struct GroupOpsCostParams { pub bls12381_msm_max_len: Option, // costs for decode, pairing, and encode output pub bls12381_pairing_cost: Option, + // costs for conversion to and from uncompressed form + pub bls12381_g1_to_uncompressed_g1_cost: Option, + pub bls12381_uncompressed_g1_to_g1_cost: Option, + // costs for sum of elements uncompressed form + pub bls12381_uncompressed_g1_sum_base_cost: Option, + pub bls12381_uncompressed_g1_sum_cost_per_term: Option, + // limit the number of terms in a sum + pub bls12381_uncompressed_g1_sum_max_terms: Option, } macro_rules! native_charge_gas_early_exit_option { @@ -109,6 +125,7 @@ enum Groups { BLS12381G1 = 1, BLS12381G2 = 2, BLS12381GT = 3, + BLS12381UncompressedG1 = 4, } impl Groups { @@ -118,6 +135,7 @@ impl Groups { 1 => Some(Groups::BLS12381G1), 2 => Some(Groups::BLS12381G2), 3 => Some(Groups::BLS12381GT), + 4 => Some(Groups::BLS12381UncompressedG1), _ => None, } } @@ -751,3 +769,149 @@ pub fn internal_pairing( Err(_) => Ok(NativeResult::err(cost, INVALID_INPUT_ERROR)), } } + +/*************************************************************************************************** + * native fun internal_convert + * Implementation of the Move native function `internal_convert(from_type:u8, to_type: u8, e: &vector): vector` + * gas cost: group_ops_bls12381_g1_from_uncompressed_cost / group_ops_bls12381_g1_from_compressed_cost + **************************************************************************************************/ +pub fn internal_convert( + context: &mut NativeContext, + ty_args: Vec, + mut args: VecDeque, +) -> PartialVMResult { + debug_assert!(ty_args.is_empty()); + debug_assert!(args.len() == 3); + + let cost = context.gas_used(); + + if !(is_uncompressed_g1_supported(context)) { + return Ok(NativeResult::err(cost, NOT_SUPPORTED_ERROR)); + } + + let e_ref = pop_arg!(args, VectorRef); + let e = e_ref.as_bytes_ref(); + let to_type = pop_arg!(args, u8); + let from_type = pop_arg!(args, u8); + + let cost_params = &context + .extensions() + .get::() + .group_ops_cost_params + .clone(); + + let result = match (Groups::from_u8(from_type), Groups::from_u8(to_type)) { + (Some(Groups::BLS12381UncompressedG1), Some(Groups::BLS12381G1)) => { + native_charge_gas_early_exit_option!( + context, + cost_params.bls12381_uncompressed_g1_to_g1_cost + ); + e.to_vec() + .try_into() + .map_err(|_| FastCryptoError::InvalidInput) + .map(bls::G1ElementUncompressed::from_trusted_byte_array) + .and_then(|e| bls::G1Element::try_from(&e)) + .map(|e| e.to_byte_array().to_vec()) + } + (Some(Groups::BLS12381G1), Some(Groups::BLS12381UncompressedG1)) => { + native_charge_gas_early_exit_option!( + context, + cost_params.bls12381_g1_to_uncompressed_g1_cost + ); + parse_trusted::(&e) + .map(|e| bls::G1ElementUncompressed::from(&e)) + .map(|e| e.into_byte_array().to_vec()) + } + _ => Err(FastCryptoError::InvalidInput), + }; + + match result { + Ok(bytes) => Ok(NativeResult::ok(cost, smallvec![Value::vector_u8(bytes)])), + // Since all Element are validated on construction, this error should never happen unless the requested type is wrong. + Err(_) => Ok(NativeResult::err(cost, INVALID_INPUT_ERROR)), + } +} + +/*************************************************************************************************** + * native fun internal_sum + * Implementation of the Move native function `internal_sum(type:u8, terms: &vector>): vector` + * gas cost: group_ops_bls12381_g1_sum_of_uncompressed_base_cost + len(terms) * group_ops_bls12381_g1_sum_of_uncompressed_cost_per_term + **************************************************************************************************/ +pub fn internal_sum( + context: &mut NativeContext, + ty_args: Vec, + mut args: VecDeque, +) -> PartialVMResult { + debug_assert!(ty_args.is_empty()); + debug_assert!(args.len() == 2); + + let cost = context.gas_used(); + + if !(is_uncompressed_g1_supported(context)) { + return Ok(NativeResult::err(cost, NOT_SUPPORTED_ERROR)); + } + + let cost_params = &context + .extensions() + .get::() + .group_ops_cost_params + .clone(); + + // The input is a reference to a vector of vector's + let inputs = pop_arg!(args, VectorRef); + let group_type = pop_arg!(args, u8); + + let length = inputs + .len(&Type::Vector(Box::new(Type::U8)))? + .value_as::()?; + + let result = match Groups::from_u8(group_type) { + Some(Groups::BLS12381UncompressedG1) => { + let max_terms = cost_params + .bls12381_uncompressed_g1_sum_max_terms + .ok_or_else(|| { + PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) + .with_message("Max number of terms is not set".to_string()) + })?; + + if length > max_terms { + return Ok(NativeResult::err(cost, INPUT_TOO_LONG_ERROR)); + } + + native_charge_gas_early_exit_option!( + context, + cost_params + .bls12381_uncompressed_g1_sum_base_cost + .and_then(|base| cost_params + .bls12381_uncompressed_g1_sum_cost_per_term + .map(|per_term| base + per_term * length.into())) + ); + + // Read the input vector + (0..length) + .map(|i| { + inputs + .borrow_elem(i as usize, &Type::Vector(Box::new(Type::U8))) + .and_then(Value::value_as::) + .map_err(|_| FastCryptoError::InvalidInput) + .and_then(|v| { + v.as_bytes_ref() + .to_vec() + .try_into() + .map_err(|_| FastCryptoError::InvalidInput) + }) + .map(bls::G1ElementUncompressed::from_trusted_byte_array) + }) + .collect::>>() + .and_then(|e| bls::G1ElementUncompressed::sum(&e)) + .map(|e| bls::G1ElementUncompressed::from(&e)) + .map(|e| e.into_byte_array().to_vec()) + } + _ => Err(FastCryptoError::InvalidInput), + }; + + match result { + Ok(bytes) => Ok(NativeResult::ok(cost, smallvec![Value::vector_u8(bytes)])), + Err(_) => Ok(NativeResult::err(cost, INVALID_INPUT_ERROR)), + } +} diff --git a/sui-execution/latest/sui-move-natives/src/lib.rs b/sui-execution/latest/sui-move-natives/src/lib.rs index e2f50dfe1b86a..193d2f1d51935 100644 --- a/sui-execution/latest/sui-move-natives/src/lib.rs +++ b/sui-execution/latest/sui-move-natives/src/lib.rs @@ -627,6 +627,21 @@ impl NativesCostTable { bls12381_pairing_cost: protocol_config .group_ops_bls12381_pairing_cost_as_option() .map(Into::into), + bls12381_g1_to_uncompressed_g1_cost: protocol_config + .group_ops_bls12381_g1_to_uncompressed_g1_cost_as_option() + .map(Into::into), + bls12381_uncompressed_g1_to_g1_cost: protocol_config + .group_ops_bls12381_uncompressed_g1_to_g1_cost_as_option() + .map(Into::into), + bls12381_uncompressed_g1_sum_base_cost: protocol_config + .group_ops_bls12381_uncompressed_g1_sum_base_cost_as_option() + .map(Into::into), + bls12381_uncompressed_g1_sum_cost_per_term: protocol_config + .group_ops_bls12381_uncompressed_g1_sum_cost_per_term_as_option() + .map(Into::into), + bls12381_uncompressed_g1_sum_max_terms: protocol_config + .group_ops_bls12381_uncompressed_g1_sum_max_terms_as_option() + .map(Into::into), }, vdf_cost_params: VDFCostParams { vdf_verify_cost: protocol_config @@ -890,6 +905,16 @@ pub fn all_natives(silent: bool, protocol_config: &ProtocolConfig) -> NativeFunc "internal_pairing", make_native!(group_ops::internal_pairing), ), + ( + "group_ops", + "internal_convert", + make_native!(group_ops::internal_convert), + ), + ( + "group_ops", + "internal_sum", + make_native!(group_ops::internal_sum), + ), ("object", "delete_impl", make_native!(object::delete_impl)), ("object", "borrow_uid", make_native!(object::borrow_uid)), ( diff --git a/sui-execution/v0/sui-adapter/Cargo.toml b/sui-execution/v0/sui-adapter/Cargo.toml index 2fff322014716..aec84a77b2d6d 100644 --- a/sui-execution/v0/sui-adapter/Cargo.toml +++ b/sui-execution/v0/sui-adapter/Cargo.toml @@ -40,9 +40,9 @@ parking_lot.workspace = true move-package.workspace = true [features] -gas-profiler = [ - "sui-types/gas-profiler", - "move-vm-runtime/gas-profiler", - "move-vm-profiler/gas-profiler", - "move-vm-config/gas-profiler", +tracing = [ + "sui-types/tracing", + "move-vm-runtime/tracing", + "move-vm-profiler/tracing", + "move-vm-config/tracing", ] diff --git a/sui-execution/v0/sui-adapter/src/adapter.rs b/sui-execution/v0/sui-adapter/src/adapter.rs index d95c13b547eb3..1874dca8f53e9 100644 --- a/sui-execution/v0/sui-adapter/src/adapter.rs +++ b/sui-execution/v0/sui-adapter/src/adapter.rs @@ -5,7 +5,7 @@ pub use checked::*; #[sui_macros::with_checked_arithmetic] mod checked { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] use move_vm_config::runtime::VMProfilerConfig; use std::path::PathBuf; use std::{collections::BTreeMap, sync::Arc}; @@ -45,9 +45,9 @@ mod checked { protocol_config: &ProtocolConfig, _enable_profiler: Option, ) -> Result { - #[cfg(not(feature = "gas-profiler"))] + #[cfg(not(feature = "tracing"))] let vm_profiler_config = None; - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let vm_profiler_config = _enable_profiler.clone().map(|path| VMProfilerConfig { full_path: path, track_bytecode_instructions: false, diff --git a/sui-execution/v0/sui-adapter/src/programmable_transactions/context.rs b/sui-execution/v0/sui-adapter/src/programmable_transactions/context.rs index 1e7f02c9babb2..92667370b5fea 100644 --- a/sui-execution/v0/sui-adapter/src/programmable_transactions/context.rs +++ b/sui-execution/v0/sui-adapter/src/programmable_transactions/context.rs @@ -199,7 +199,7 @@ mod checked { // Set the profiler if in CLI #[skip_checked_arithmetic] - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; use move_vm_types::gas::GasMeter; diff --git a/sui-execution/v1/sui-adapter/Cargo.toml b/sui-execution/v1/sui-adapter/Cargo.toml index 44a1409c61e5e..1ccf3b753f31f 100644 --- a/sui-execution/v1/sui-adapter/Cargo.toml +++ b/sui-execution/v1/sui-adapter/Cargo.toml @@ -39,9 +39,9 @@ parking_lot.workspace = true move-package.workspace = true [features] -gas-profiler = [ - "sui-types/gas-profiler", - "move-vm-runtime/gas-profiler", - "move-vm-profiler/gas-profiler", - "move-vm-config/gas-profiler", +tracing = [ + "sui-types/tracing", + "move-vm-runtime/tracing", + "move-vm-profiler/tracing", + "move-vm-config/tracing", ] diff --git a/sui-execution/v1/sui-adapter/src/adapter.rs b/sui-execution/v1/sui-adapter/src/adapter.rs index 8b72d30244853..68251237d683d 100644 --- a/sui-execution/v1/sui-adapter/src/adapter.rs +++ b/sui-execution/v1/sui-adapter/src/adapter.rs @@ -5,7 +5,7 @@ pub use checked::*; #[sui_macros::with_checked_arithmetic] mod checked { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] use move_vm_config::runtime::VMProfilerConfig; use std::path::PathBuf; use std::{collections::BTreeMap, sync::Arc}; @@ -45,9 +45,9 @@ mod checked { protocol_config: &ProtocolConfig, _enable_profiler: Option, ) -> Result { - #[cfg(not(feature = "gas-profiler"))] + #[cfg(not(feature = "tracing"))] let vm_profiler_config = None; - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let vm_profiler_config = _enable_profiler.clone().map(|path| VMProfilerConfig { full_path: path, track_bytecode_instructions: false, diff --git a/sui-execution/v1/sui-adapter/src/programmable_transactions/context.rs b/sui-execution/v1/sui-adapter/src/programmable_transactions/context.rs index 434d7a68d7fbd..d95bf2ba907db 100644 --- a/sui-execution/v1/sui-adapter/src/programmable_transactions/context.rs +++ b/sui-execution/v1/sui-adapter/src/programmable_transactions/context.rs @@ -190,7 +190,7 @@ mod checked { // Set the profiler if feature is enabled #[skip_checked_arithmetic] - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; use move_vm_types::gas::GasMeter; diff --git a/sui-execution/v2/sui-adapter/Cargo.toml b/sui-execution/v2/sui-adapter/Cargo.toml index 3e3c2eb9f79b0..2b1288a29ab72 100644 --- a/sui-execution/v2/sui-adapter/Cargo.toml +++ b/sui-execution/v2/sui-adapter/Cargo.toml @@ -39,9 +39,9 @@ parking_lot.workspace = true move-package.workspace = true [features] -gas-profiler = [ - "sui-types/gas-profiler", - "move-vm-runtime/gas-profiler", - "move-vm-profiler/gas-profiler", - "move-vm-config/gas-profiler", +tracing = [ + "sui-types/tracing", + "move-vm-runtime/tracing", + "move-vm-profiler/tracing", + "move-vm-config/tracing", ] diff --git a/sui-execution/v2/sui-adapter/src/adapter.rs b/sui-execution/v2/sui-adapter/src/adapter.rs index 687494b9d57af..5c89316384f6a 100644 --- a/sui-execution/v2/sui-adapter/src/adapter.rs +++ b/sui-execution/v2/sui-adapter/src/adapter.rs @@ -4,7 +4,7 @@ pub use checked::*; #[sui_macros::with_checked_arithmetic] mod checked { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] use move_vm_config::runtime::VMProfilerConfig; use std::path::PathBuf; use std::{collections::BTreeMap, sync::Arc}; @@ -44,9 +44,9 @@ mod checked { protocol_config: &ProtocolConfig, _enable_profiler: Option, ) -> Result { - #[cfg(not(feature = "gas-profiler"))] + #[cfg(not(feature = "tracing"))] let vm_profiler_config = None; - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let vm_profiler_config = _enable_profiler.clone().map(|path| VMProfilerConfig { full_path: path, track_bytecode_instructions: false, diff --git a/sui-execution/v2/sui-adapter/src/programmable_transactions/context.rs b/sui-execution/v2/sui-adapter/src/programmable_transactions/context.rs index c749d0eb8b078..7d92727099435 100644 --- a/sui-execution/v2/sui-adapter/src/programmable_transactions/context.rs +++ b/sui-execution/v2/sui-adapter/src/programmable_transactions/context.rs @@ -194,7 +194,7 @@ mod checked { // Set the profiler if in CLI #[skip_checked_arithmetic] - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; use move_vm_types::gas::GasMeter; diff --git a/turbo.json b/turbo.json index 93d745bb34ec5..4c9570f0756ce 100644 --- a/turbo.json +++ b/turbo.json @@ -4,7 +4,8 @@ "lint": {}, "test": { "dependsOn": ["^build", "build"], - "outputs": ["coverage/**"] + "outputs": ["coverage/**"], + "env": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION", "AWS_KMS_KEY_ID"] }, "dev": { "dependsOn": ["^build"],