From 7ffe93bde01af86f0cc630e73bcb36a3434a7157 Mon Sep 17 00:00:00 2001 From: dharit-tan Date: Sat, 2 Jul 2022 01:36:54 +0000 Subject: [PATCH 01/12] [PAY-385] Added First Playlist Challenge Adds challenge for creating your first playlist and adding a track to it. --- .../test_first_playlist_challenge.py | 72 +++++++++++++++++++ .../src/challenges/challenge_event.py | 1 + .../src/challenges/challenge_event_bus.py | 4 ++ .../src/challenges/challenges.dev.json | 7 ++ .../src/challenges/challenges.json | 7 ++ .../src/challenges/challenges.stage.json | 7 ++ .../challenges/first_playlist_challenge.py | 28 ++++++++ discovery-provider/src/tasks/playlists.py | 12 ++++ 8 files changed, 138 insertions(+) create mode 100644 discovery-provider/integration_tests/challenges/test_first_playlist_challenge.py create mode 100644 discovery-provider/src/challenges/first_playlist_challenge.py diff --git a/discovery-provider/integration_tests/challenges/test_first_playlist_challenge.py b/discovery-provider/integration_tests/challenges/test_first_playlist_challenge.py new file mode 100644 index 00000000000..13abc8deee2 --- /dev/null +++ b/discovery-provider/integration_tests/challenges/test_first_playlist_challenge.py @@ -0,0 +1,72 @@ +import logging +from datetime import datetime + +import redis +from src.challenges.challenge_event_bus import ChallengeEvent, ChallengeEventBus +from src.challenges.first_playlist_challenge import first_playlist_challenge_manager +from src.models.indexing.block import Block +from src.models.rewards.challenge import Challenge +from src.models.users.user import User +from src.utils.config import shared_config +from src.utils.db_session import get_db + +REDIS_URL = shared_config["redis"]["url"] +BLOCK_NUMBER = 10 +logger = logging.getLogger(__name__) + + +def test_first_playlist_challenge(app): + redis_conn = redis.Redis.from_url(url=REDIS_URL) + + with app.app_context(): + db = get_db() + + block = Block(blockhash="0x1", number=BLOCK_NUMBER) + user = User( + blockhash="0x1", + blocknumber=BLOCK_NUMBER, + txhash="xyz", + user_id=1, + is_current=True, + handle="TestHandle", + handle_lc="testhandle", + wallet="0x1", + is_creator=False, + is_verified=False, + name="test_name", + created_at=datetime.now(), + updated_at=datetime.now(), + ) + + with db.scoped_session() as session: + bus = ChallengeEventBus(redis_conn) + session.query(Challenge).filter(Challenge.id == "first-playlist").update( + {"active": True, "starting_block": BLOCK_NUMBER} + ) + + # Register events with the bus + bus.register_listener( + ChallengeEvent.first_playlist, first_playlist_challenge_manager + ) + + session.add(block) + session.flush() + session.add(user) + session.flush() + + bus.dispatch( + ChallengeEvent.first_playlist, + BLOCK_NUMBER, + 1, # user_id + {}, + ) + + bus.flush() + bus.process_events(session) + session.flush() + + state = first_playlist_challenge_manager.get_user_challenge_state( + session, ["1"] + )[0] + + assert state.is_complete diff --git a/discovery-provider/src/challenges/challenge_event.py b/discovery-provider/src/challenges/challenge_event.py index 8caaac463f4..5e7f1d94f2d 100644 --- a/discovery-provider/src/challenges/challenge_event.py +++ b/discovery-provider/src/challenges/challenge_event.py @@ -19,3 +19,4 @@ class ChallengeEvent(str, enum.Enum): trending_underground = "trending_underground" trending_playlist = "trending_playlist" send_tip = "send_tip" # Fired for sender + first_playlist = "first_playlist" diff --git a/discovery-provider/src/challenges/challenge_event_bus.py b/discovery-provider/src/challenges/challenge_event_bus.py index 876ca8d2978..60a2d866d61 100644 --- a/discovery-provider/src/challenges/challenge_event_bus.py +++ b/discovery-provider/src/challenges/challenge_event_bus.py @@ -8,6 +8,7 @@ from src.challenges.challenge import ChallengeManager, EventMetadata from src.challenges.challenge_event import ChallengeEvent from src.challenges.connect_verified_challenge import connect_verified_challenge_manager +from src.challenges.first_playlist_challenge import first_playlist_challenge_manager from src.challenges.listen_streak_challenge import listen_streak_challenge_manager from src.challenges.mobile_install_challenge import mobile_install_challenge_manager from src.challenges.profile_challenge import profile_challenge_manager @@ -235,5 +236,8 @@ def setup_challenge_bus(): ChallengeEvent.trending_playlist, trending_playlist_challenge_manager ) bus.register_listener(ChallengeEvent.send_tip, send_first_tip_challenge_manager) + bus.register_listener( + ChallengeEvent.first_playlist, first_playlist_challenge_manager + ) return bus diff --git a/discovery-provider/src/challenges/challenges.dev.json b/discovery-provider/src/challenges/challenges.dev.json index 8d6952d2cb0..201f7b0bb9e 100644 --- a/discovery-provider/src/challenges/challenges.dev.json +++ b/discovery-provider/src/challenges/challenges.dev.json @@ -55,5 +55,12 @@ "amount": 2, "active": true, "starting_block": 0 + }, + { + "id": "first-playlist", + "type": "boolean", + "amount": 2, + "active": true, + "starting_block": 0 } ] \ No newline at end of file diff --git a/discovery-provider/src/challenges/challenges.json b/discovery-provider/src/challenges/challenges.json index 3d00b7e82fb..bb57bccee35 100644 --- a/discovery-provider/src/challenges/challenges.json +++ b/discovery-provider/src/challenges/challenges.json @@ -87,5 +87,12 @@ "amount": 2, "active": true, "starting_block": 0 + }, + { + "id": "first-playlist", + "type": "boolean", + "amount": 2, + "active": true, + "starting_block": 0 } ] \ No newline at end of file diff --git a/discovery-provider/src/challenges/challenges.stage.json b/discovery-provider/src/challenges/challenges.stage.json index 8d6952d2cb0..201f7b0bb9e 100644 --- a/discovery-provider/src/challenges/challenges.stage.json +++ b/discovery-provider/src/challenges/challenges.stage.json @@ -55,5 +55,12 @@ "amount": 2, "active": true, "starting_block": 0 + }, + { + "id": "first-playlist", + "type": "boolean", + "amount": 2, + "active": true, + "starting_block": 0 } ] \ No newline at end of file diff --git a/discovery-provider/src/challenges/first_playlist_challenge.py b/discovery-provider/src/challenges/first_playlist_challenge.py new file mode 100644 index 00000000000..a06cf943a26 --- /dev/null +++ b/discovery-provider/src/challenges/first_playlist_challenge.py @@ -0,0 +1,28 @@ +from typing import List, Optional + +from sqlalchemy.orm.session import Session +from src.challenges.challenge import ( + ChallengeManager, + ChallengeUpdater, + FullEventMetadata, +) +from src.models.rewards.user_challenge import UserChallenge + + +class FirstPlaylistChallengeUpdater(ChallengeUpdater): + def update_user_challenges( + self, + session: Session, + event: str, + user_challenges: List[UserChallenge], + step_cout: Optional[int], + event_metadatas: List[FullEventMetadata], + starting_block: Optional[int], + ): + for user_challenge in user_challenges: + user_challenge.is_complete = True + + +first_playlist_challenge_manager = ChallengeManager( + "first-playlist", FirstPlaylistChallengeUpdater() +) diff --git a/discovery-provider/src/tasks/playlists.py b/discovery-provider/src/tasks/playlists.py index babb899bfea..ea99ac6155e 100644 --- a/discovery-provider/src/tasks/playlists.py +++ b/discovery-provider/src/tasks/playlists.py @@ -3,6 +3,7 @@ from typing import Any, Dict, Set, Tuple from sqlalchemy.orm.session import Session, make_transient +from src.challenges.challenge_event import ChallengeEvent from src.database_task import DatabaseTask from src.models.playlists.playlist import Playlist from src.queries.skipped_transactions import add_node_level_skipped_transaction @@ -36,6 +37,8 @@ def playlist_state_update( # This stores the playlist_ids created or updated in the set of transactions playlist_ids: Set[int] = set() + challenge_bus = update_task.challenge_event_bus + if not playlist_factory_txs: return num_total_changes, playlist_ids @@ -109,6 +112,15 @@ def playlist_state_update( if value_obj["events"]: invalidate_old_playlist(session, playlist_id) session.add(value_obj["playlist"]) + if ( + playlist_event_types_lookup["playlist_track_added"] + in value_obj["events"] + ): + challenge_bus.dispatch( + ChallengeEvent.first_playlist, + value_obj["playlist"].blocknumber, + value_obj["playlist"].playlist_owner_id, + ) return num_total_changes, playlist_ids From 88ad135220f17d7cbb9de34ce8c1ef18236828a8 Mon Sep 17 00:00:00 2001 From: dharit-tan Date: Tue, 5 Jul 2022 14:01:24 +0000 Subject: [PATCH 02/12] [PAY-385] CR Comments --- .../challenges/test_first_playlist_challenge.py | 2 +- discovery-provider/src/challenges/challenges.json | 2 +- discovery-provider/src/challenges/first_playlist_challenge.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/discovery-provider/integration_tests/challenges/test_first_playlist_challenge.py b/discovery-provider/integration_tests/challenges/test_first_playlist_challenge.py index 13abc8deee2..d9672273b40 100644 --- a/discovery-provider/integration_tests/challenges/test_first_playlist_challenge.py +++ b/discovery-provider/integration_tests/challenges/test_first_playlist_challenge.py @@ -57,7 +57,7 @@ def test_first_playlist_challenge(app): bus.dispatch( ChallengeEvent.first_playlist, BLOCK_NUMBER, - 1, # user_id + user.user_id, {}, ) diff --git a/discovery-provider/src/challenges/challenges.json b/discovery-provider/src/challenges/challenges.json index bb57bccee35..d65733f6d40 100644 --- a/discovery-provider/src/challenges/challenges.json +++ b/discovery-provider/src/challenges/challenges.json @@ -93,6 +93,6 @@ "type": "boolean", "amount": 2, "active": true, - "starting_block": 0 + "starting_block": 25346436 } ] \ No newline at end of file diff --git a/discovery-provider/src/challenges/first_playlist_challenge.py b/discovery-provider/src/challenges/first_playlist_challenge.py index a06cf943a26..3b5e6b4e3ed 100644 --- a/discovery-provider/src/challenges/first_playlist_challenge.py +++ b/discovery-provider/src/challenges/first_playlist_challenge.py @@ -15,7 +15,7 @@ def update_user_challenges( session: Session, event: str, user_challenges: List[UserChallenge], - step_cout: Optional[int], + step_count: Optional[int], event_metadatas: List[FullEventMetadata], starting_block: Optional[int], ): From 7a32c26bce8461fe7ac502971b45286a03b4ba95 Mon Sep 17 00:00:00 2001 From: Sid Sethi <3323835+SidSethi@users.noreply.github.com> Date: Tue, 5 Jul 2022 11:53:10 -0400 Subject: [PATCH 03/12] CON-38 - Divergent State - PR #3 V2 - Consume filesHash logic in stateMachine (#3310) --- creator-node/package-lock.json | 67 +- creator-node/package.json | 3 +- ...119-add-index-cnodeuser-multihash-clock.js | 21 + creator-node/src/dbManager.js | 112 ++- creator-node/src/routes/users.js | 179 +++-- .../stateMachineConstants.js | 49 +- .../stateMachineManager/stateMachineUtils.js | 52 +- .../findReplicaSetUpdates.jobProcessor.js | 47 +- .../findSyncRequests.jobProcessor.js | 112 ++- .../monitorState.jobProcessor.js | 31 +- .../stateMonitoring/stateMonitoringUtils.js | 319 +++++++- .../updateReplicaSet.jobProcessor.js | 29 +- creator-node/src/utils.js | 2 +- creator-node/test/dbManager.test.js | 169 +++-- ...findReplicaSetUpdates.jobProcessor.test.js | 27 +- .../findSyncRequests.jobProcessor.test.js | 692 ++++++++++++------ creator-node/test/lib/helpers.js | 13 +- .../test/monitorState.jobProcessor.test.js | 63 +- creator-node/test/pollingTracks.test.js | 53 +- creator-node/test/snapbackSM.test.js | 2 + creator-node/test/stateMachineUtils.test.js | 57 +- .../test/stateMonitoringUtils.test.js | 636 +++++++++++++++- .../updateReplicaSet.jobProcessor.test.js | 37 +- 23 files changed, 2091 insertions(+), 681 deletions(-) create mode 100644 creator-node/sequelize/migrations/20220622152119-add-index-cnodeuser-multihash-clock.js diff --git a/creator-node/package-lock.json b/creator-node/package-lock.json index 46e85aae98e..00f5a04daaa 100644 --- a/creator-node/package-lock.json +++ b/creator-node/package-lock.json @@ -161,6 +161,14 @@ "uri-js": "^4.2.2" } }, + "async-retry": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/async-retry/-/async-retry-1.3.1.tgz", + "integrity": "sha512-aiieFW/7h3hY0Bq5d+ktDBejxuwR78vRu9hDUdR8rNhSaQ29VzPL4AoIRG7D/c7tdenwOcKvgPM6tIxB3cB6HA==", + "requires": { + "retry": "0.12.0" + } + }, "bignumber.js": { "version": "9.0.2", "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.0.2.tgz", @@ -252,6 +260,11 @@ "http-https": "^1.0.0" } }, + "retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", + "integrity": "sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs=" + }, "uuid": { "version": "8.3.2", "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", @@ -2217,7 +2230,7 @@ "@protobufjs/aspromise": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", - "integrity": "sha1-m4sMxmPWaafY9vXQiToU00jzD78=" + "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==" }, "@protobufjs/base64": { "version": "1.1.2", @@ -2232,12 +2245,12 @@ "@protobufjs/eventemitter": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", - "integrity": "sha1-NVy8mLr61ZePntCV85diHx0Ga3A=" + "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==" }, "@protobufjs/fetch": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", - "integrity": "sha1-upn7WYYUr2VwDBYZ/wbUVLDYTEU=", + "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", "requires": { "@protobufjs/aspromise": "^1.1.1", "@protobufjs/inquire": "^1.1.0" @@ -2246,27 +2259,27 @@ "@protobufjs/float": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", - "integrity": "sha1-Xp4avctz/Ap8uLKR33jIy9l7h9E=" + "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==" }, "@protobufjs/inquire": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", - "integrity": "sha1-/yAOPnzyQp4tyvwRQIKOjMY48Ik=" + "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==" }, "@protobufjs/path": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", - "integrity": "sha1-bMKyDFya1q0NzP0hynZz2Nf79o0=" + "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==" }, "@protobufjs/pool": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", - "integrity": "sha1-Cf0V8tbTq/qbZbw2ZQbWrXhG/1Q=" + "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==" }, "@protobufjs/utf8": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", - "integrity": "sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA=" + "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==" }, "@sindresorhus/is": { "version": "0.14.0", @@ -3099,7 +3112,7 @@ "strict-uri-encode": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-2.0.0.tgz", - "integrity": "sha1-ucczDHBChi9rFC3CdLvMWGbONUY=" + "integrity": "sha512-QwiXZgpRcKkhTj2Scnn++4PKtWsH0kpzZ62L2R6c/LUVYv7hVnZqcg2+sMuT6R7Jusu1vviK/MFsu6kNJfWlEQ==" } } }, @@ -3597,11 +3610,11 @@ "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==" }, "async-retry": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/async-retry/-/async-retry-1.3.1.tgz", - "integrity": "sha512-aiieFW/7h3hY0Bq5d+ktDBejxuwR78vRu9hDUdR8rNhSaQ29VzPL4AoIRG7D/c7tdenwOcKvgPM6tIxB3cB6HA==", + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/async-retry/-/async-retry-1.3.3.tgz", + "integrity": "sha512-wfr/jstw9xNi/0teMHrRW7dsz3Lt5ARhYNZ2ewpadnhaIp5mbALhOAP+EAdsC7t4Z6wqsDVv9+W6gm1Dk9mEyw==", "requires": { - "retry": "0.12.0" + "retry": "0.13.1" } }, "asynckit": { @@ -3741,7 +3754,7 @@ "babel-plugin-syntax-jsx": { "version": "6.18.0", "resolved": "https://registry.npmjs.org/babel-plugin-syntax-jsx/-/babel-plugin-syntax-jsx-6.18.0.tgz", - "integrity": "sha1-CvMqmm4Tyno/1QaeYtew9Y0NiUY=" + "integrity": "sha512-qrPaCSo9c8RHNRHIotaufGbuOBN8rtdC4QrrFFc43vyWCCz7Kl7GL1PGaXtMGQZUXrkCjNEgxDfmAuAabr/rlw==" }, "balanced-match": { "version": "1.0.2", @@ -3939,7 +3952,7 @@ "bmp-js": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/bmp-js/-/bmp-js-0.1.0.tgz", - "integrity": "sha1-4Fpj95amwf8l9Hcex62twUjAcjM=" + "integrity": "sha512-vHdS19CnY3hwiNdkaqk93DvjVLfbEcI8mys4UjuWrlX1haDmroo8o4xCzh4wD6DGV6HxRCyauwhHRqMTfERtjw==" }, "bn.js": { "version": "4.12.0", @@ -4197,7 +4210,7 @@ "buffer-equal": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/buffer-equal/-/buffer-equal-0.0.1.tgz", - "integrity": "sha1-kbx0sR6kBbyRa8aqkI+q+ltKrEs=" + "integrity": "sha512-RgSV6InVQ9ODPdLWJ5UAqBqJBOg370Nz6ZQtRzpt6nUjc8v0St97uJ4PYC6NztqIScrAXafKM3mZPMygSe1ggA==" }, "buffer-layout": { "version": "1.2.2", @@ -4468,7 +4481,7 @@ "camelize": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/camelize/-/camelize-1.0.0.tgz", - "integrity": "sha1-FkpUg+Yw+kMh5a8HAg5TGDGyYJs=" + "integrity": "sha512-W2lPwkBkMZwFlPCXhIlYgxu+7gC/NUlCtdK652DAJ1JdgV0sTrvuPFshNPrFa1TY2JOkLhgdeEBplB4ezEa+xg==" }, "caniuse-lite": { "version": "1.0.30001241", @@ -5168,7 +5181,7 @@ "css-color-keywords": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/css-color-keywords/-/css-color-keywords-1.0.0.tgz", - "integrity": "sha1-/qJhbcZ2spYmhrOvjb2+GAskTgU=" + "integrity": "sha512-FyyrDHZKEjXDpNJYvVsV960FiqQyXc/LlYmsxl2BcdMb2WPx0OGRVgTg55rPSyLSNMqP52R9r8geSp7apN3Ofg==" }, "css-to-react-native": { "version": "3.0.0", @@ -7108,7 +7121,7 @@ "fast-stable-stringify": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fast-stable-stringify/-/fast-stable-stringify-1.0.0.tgz", - "integrity": "sha1-XFVDRisiru79NtBbNOUceMuG0xM=" + "integrity": "sha512-wpYMUmFu5f00Sm0cj2pfivpmawLZ0NKdviQ4w9zJeR8JVtOpOxHmLaJuj0vxvGqMJQWyP/COUkF75/57OKyRag==" }, "fastq": { "version": "1.13.0", @@ -11500,7 +11513,7 @@ "qr.js": { "version": "0.0.0", "resolved": "https://registry.npmjs.org/qr.js/-/qr.js-0.0.0.tgz", - "integrity": "sha1-ys6GOG9ZoNuAUPqQ2baw6IoeNk8=" + "integrity": "sha512-c4iYnWb+k2E+vYpRimHqSu575b1/wKl4XFeJGpFmrJQz5I88v9aY2czh7s0w36srfCM1sXgC/xpoJz5dJfq+OQ==" }, "qrcode.react": { "version": "1.0.1", @@ -12043,9 +12056,9 @@ "dev": true }, "retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", - "integrity": "sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs=" + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", + "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==" }, "retry-as-promised": { "version": "2.3.2", @@ -12666,7 +12679,7 @@ "slide": { "version": "1.1.6", "resolved": "https://registry.npmjs.org/slide/-/slide-1.1.6.tgz", - "integrity": "sha1-VusCfWW00tzmyy4tMsTUr8nh1wc=" + "integrity": "sha512-NwrtjCg+lZoqhFU8fOwl4ay2ei8PaqCBOUV3/ektPY9trO1yQ1oXEfmHAhKArUVUr/hOHvy5f6AdP17dCM0zMw==" }, "snake-case": { "version": "3.0.4", @@ -15018,7 +15031,7 @@ "wif": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/wif/-/wif-2.0.6.tgz", - "integrity": "sha1-CNP1IFbGZnkplyb63g1DKudLRwQ=", + "integrity": "sha512-HIanZn1zmduSF+BQhkE+YXIbEiH0xPr1012QbFEGB0xsKqJii0/SqJjyn8dFv6y36kOznMgMB+LGcbZTJ1xACQ==", "requires": { "bs58check": "<3.0.0" } @@ -15134,7 +15147,7 @@ "write-file-atomic": { "version": "1.3.4", "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-1.3.4.tgz", - "integrity": "sha1-+Aek8LHZ6ROuekgRLmzDrxmRtF8=", + "integrity": "sha512-SdrHoC/yVBPpV0Xq/mUZQIpW2sWXAShb/V4pomcJXh92RuaO+f3UTWItiR3Px+pLnV2PvC2/bfn5cwr5X6Vfxw==", "requires": { "graceful-fs": "^4.1.11", "imurmurhash": "^0.1.4", @@ -15220,7 +15233,7 @@ "xmlhttprequest": { "version": "1.8.0", "resolved": "https://registry.npmjs.org/xmlhttprequest/-/xmlhttprequest-1.8.0.tgz", - "integrity": "sha1-Z/4HXFwk/vOfnWX197f+dRcZaPw=" + "integrity": "sha512-58Im/U0mlVBLM38NdZjHyhuMtCqa61469k2YP/AaPbvCoV9aQGUpbJBj1QRm2ytRiVQBD/fsw7L2bJGDVQswBA==" }, "xtend": { "version": "4.0.2", diff --git a/creator-node/package.json b/creator-node/package.json index 9648fe0ca43..d756875b361 100644 --- a/creator-node/package.json +++ b/creator-node/package.json @@ -11,7 +11,7 @@ "test:teardown": "./scripts/run-tests.sh teardown", "test:unit": "./scripts/run-tests.sh unit_test", "test:coverage": "nyc --reporter=lcov --reporter=text npm run test", - "test:coverage:ci": "nyc --reporter=lcov npm run test:ci && nyc report --reporter=text-lcov | coveralls", + "test:coverage:ci": "nyc --reporter=lcov --reporter=text npm run test:ci && nyc report --reporter=text-lcov | coveralls", "lint:fix": "eslint --fix --ext=js,ts src", "lint": "eslint --ext=js,ts src" }, @@ -23,6 +23,7 @@ "@bull-board/express": "3.11.0", "@solana/web3.js": "1.31.0", "JSONStream": "^1.3.5", + "async-retry": "^1.3.3", "axios": "^0.19.2", "base64-url": "^2.3.3", "bl": "^4.1.0", diff --git a/creator-node/sequelize/migrations/20220622152119-add-index-cnodeuser-multihash-clock.js b/creator-node/sequelize/migrations/20220622152119-add-index-cnodeuser-multihash-clock.js new file mode 100644 index 00000000000..d7c3de50f9f --- /dev/null +++ b/creator-node/sequelize/migrations/20220622152119-add-index-cnodeuser-multihash-clock.js @@ -0,0 +1,21 @@ +'use strict'; + +/** + * Create index on "Files" ("cnodeUserUUID", "multihash", "clock") + * Used to speed up DBManager.fetchFilesHashFromDB() and DBManager.fetchFilesHashesFromDB() + */ + +module.exports = { + up: async (queryInterface, Sequelize) => { + await queryInterface.sequelize.query(` + CREATE INDEX IF NOT EXISTS "Files_cnodeUserUUID_multihash_clock_idx" + ON public."Files" + USING btree + ("cnodeUserUUID", "multihash", "clock") + `) + }, + + down: async (queryInterface, Sequelize) => { + await queryInterface.removeColumn('CNodeUsers', 'filesHash') + } +}; diff --git a/creator-node/src/dbManager.js b/creator-node/src/dbManager.js index 5c85f49a273..ee09f8f452c 100644 --- a/creator-node/src/dbManager.js +++ b/creator-node/src/dbManager.js @@ -56,11 +56,11 @@ class DBManager { /** * Deletes all data for a cnodeUser from DB (every table, including CNodeUsers) * - * @param {Object} CNodeUserLookupObj specifies either `lookupCnodeUserUUID` or `lookupWallet` properties + * @param {Object} CNodeUserLookupObj specifies either `lookupCNodeUserUUID` or `lookupWallet` properties * @param {?Transaction} externalTransaction sequelize transaction object */ static async deleteAllCNodeUserDataFromDB( - { lookupCnodeUserUUID, lookupWallet }, + { lookupCNodeUserUUID, lookupWallet }, externalTransaction = null ) { const transaction = @@ -73,7 +73,7 @@ class DBManager { try { const cnodeUserWhereFilter = lookupWallet ? { walletPublicKey: lookupWallet } - : { cnodeUserUUID: lookupCnodeUserUUID } + : { cnodeUserUUID: lookupCNodeUserUUID } const cnodeUser = await models.CNodeUser.findOne({ where: cnodeUserWhereFilter, transaction @@ -202,66 +202,108 @@ class DBManager { } /** - * Retrieves md5 hash of all File multihashes for user ordered by clock asc, optionally by clock range + * Computes and returns filesHash for user, optionally by clock range + * filesHash = md5 hash of all user's File multihashes, ordered by clock asc * * @param {Object} lookupKey lookup user by either cnodeUserUUID or walletPublicKey * @param {Number?} clockMin if provided, consider only Files with clock >= clockMin (inclusive) * @param {Number?} clockMax if provided, consider only Files with clock < clockMax (exclusive) - * @returns {Number} filesHash + * @returns {string|null} filesHash */ static async fetchFilesHashFromDB({ lookupKey: { lookupCNodeUserUUID, lookupWallet }, clockMin = null, clockMax = null }) { - let subquery = 'select multihash from "Files"' - - if (lookupWallet) { - subquery += ` where "cnodeUserUUID" = ( - select "cnodeUserUUID" from "CNodeUsers" where "walletPublicKey" = :lookupWallet - )` - } else if (lookupCNodeUserUUID) { - subquery += ` where "cnodeUserUUID" = :lookupCNodeUserUUID` + let query = ` + select + md5(string_agg("multihash", ',' order by "clock" asc)) + from "Files" + ` + + if (lookupCNodeUserUUID) { + query += ' where "cnodeUserUUID" = :lookupCNodeUserUUID' + } else if (lookupWallet) { + query += + ' where "cnodeUserUUID" = (select "cnodeUserUUID" from "CNodeUsers" where "walletPublicKey" = :lookupWallet)' } else { - throw new Error( - '[fetchFilesHashFromDB] Error: Must provide lookupCNodeUserUUID or lookupWallet' - ) + throw new Error('Error: Must provide lookupCNodeUserUUID or lookupWallet') } if (clockMin) { clockMin = parseInt(clockMin) // inclusive - subquery += ` and clock >= :clockMin` + query += ` and "clock" >= :clockMin` } if (clockMax) { clockMax = parseInt(clockMax) // exclusive - subquery += ` and clock < :clockMax` + query += ` and "clock" < :clockMax` } - subquery += ` order by "clock" asc` - try { - const filesHashResp = await sequelize.query( - ` - select - md5(cast(array_agg(sorted_hashes.multihash) as text)) - from (${subquery}) as sorted_hashes; - `, - { - replacements: { - lookupWallet, - lookupCNodeUserUUID, - clockMin, - clockMax - } + const filesHashResp = await sequelize.query(query, { + replacements: { + lookupWallet, + lookupCNodeUserUUID, + clockMin, + clockMax } - ) + }) const filesHash = filesHashResp[0][0].md5 return filesHash } catch (e) { - throw new Error(e.message) + throw new Error(`[fetchFilesHashFromDB] ${e.message}`) + } + } + + /** + * Computes and returns filesHashes for all users + * filesHash will be null if user not found or if no files exist for user + * filesHash = md5 hash of all user's File multihashes, ordered by clock asc + * + * Similar to fetchFilesHashFromDB() above, but for multiple users + * Makes single DB query to compute filesHash for all users + * + * @param {Array} cnodeUserUUIDs cnodeUserUUID array + * @returns {Object} filesHashesByUUIDMap = map(cnodeUserUUID => filesHash) + */ + static async fetchFilesHashesFromDB({ cnodeUserUUIDs }) { + try { + // Initialize filesHashesByUUIDMap with null values + const filesHashesByUUIDMap = {} + cnodeUserUUIDs.forEach((cnodeUserUUID) => { + filesHashesByUUIDMap[cnodeUserUUID] = null + }) + if (cnodeUserUUIDs.length === 0) { + return filesHashesByUUIDMap + } + + const query = ` + select + "cnodeUserUUID", + md5(string_agg("multihash", ',' order by "clock" asc)) as "filesHash" + from ( + select "cnodeUserUUID", "multihash", "clock" + from "Files" + where "cnodeUserUUID" in (:cnodeUserUUIDs) + ) as subquery + group by "cnodeUserUUID" + ` + // Returns [{ cnodeUserUUID, filesHash }] + const queryResp = await sequelize.query(query, { + replacements: { cnodeUserUUIDs } + }) + + // Populate filesHashesByUUIDMap + queryResp[0].forEach((resp) => { + filesHashesByUUIDMap[resp.cnodeUserUUID] = resp.filesHash + }) + + return filesHashesByUUIDMap + } catch (e) { + throw new Error(`[fetchFilesHashesFromDB] ${e.message}`) } } } diff --git a/creator-node/src/routes/users.js b/creator-node/src/routes/users.js index 787ad71e2d1..1261f109660 100644 --- a/creator-node/src/routes/users.js +++ b/creator-node/src/routes/users.js @@ -173,8 +173,12 @@ module.exports = function (app) { const redisClient = req.app.get('redisClient') const walletPublicKey = req.params.walletPublicKey.toLowerCase() - const returnSkipInfo = !!req.query.returnSkipInfo // default false - const returnFilesHash = !!req.query.returnFilesHash // default false + const returnSkipInfo = req.query.returnSkipInfo === 'true' // default false + const returnFilesHash = req.query.returnFilesHash === 'true' // default false + const filesHashClockRangeMin = + parseInt(req.query.filesHashClockRangeMin) || null + const filesHashClockRangeMax = + parseInt(req.query.filesHashClockRangeMax) || null const response = {} @@ -188,24 +192,29 @@ module.exports = function (app) { response.clockValue = clockValue async function fetchCIDSkipInfoIfRequested() { - if (returnSkipInfo && cnodeUserUUID) { - const countsQuery = ( - await sequelize.query( - ` - select - count(*) as "numCIDs", - count(case when "skipped" = true then 1 else null end) as "numSkippedCIDs" - from "Files" - where "cnodeUserUUID" = :cnodeUserUUID - `, - { replacements: { cnodeUserUUID } } - ) - )[0][0] - - const numCIDs = parseInt(countsQuery.numCIDs) - const numSkippedCIDs = parseInt(countsQuery.numSkippedCIDs) - - response.CIDSkipInfo = { numCIDs, numSkippedCIDs } + if (returnSkipInfo) { + // Set response to default values + response.CIDSkipInfo = { numCIDs: 0, numSkippedCIDs: 0 } + + if (cnodeUserUUID) { + const countsQuery = ( + await sequelize.query( + ` + select + count(*) as "numCIDs", + count(case when "skipped" = true then 1 else null end) as "numSkippedCIDs" + from "Files" + where "cnodeUserUUID" = :cnodeUserUUID + `, + { replacements: { cnodeUserUUID } } + ) + )[0][0] + + const numCIDs = parseInt(countsQuery.numCIDs) + const numSkippedCIDs = parseInt(countsQuery.numSkippedCIDs) + + response.CIDSkipInfo = { numCIDs, numSkippedCIDs } + } } } @@ -225,30 +234,29 @@ module.exports = function (app) { } async function fetchFilesHashIfRequested() { - if (returnFilesHash && cnodeUserUUID) { - const filesHash = await DBManager.fetchFilesHashFromDB({ - lookupKey: { lookupCNodeUserUUID: cnodeUserUUID } - }) - response.filesHash = filesHash - - const filesHashClockRangeMin = - req.query.filesHashClockRangeMin || null - const filesHashClockRangeMax = - req.query.filesHashClockRangeMax || null - + if (returnFilesHash) { + // Set response to default values + response.filesHash = null if (filesHashClockRangeMin || filesHashClockRangeMax) { - const filesHashForClockRange = await DBManager.fetchFilesHashFromDB( - { - lookupKey: { lookupCNodeUserUUID: cnodeUserUUID }, - clockMin: filesHashClockRangeMin, - clockMax: filesHashClockRangeMax - } - ) - response.filesHashForClockRange = filesHashForClockRange + response.filesHashForClockRange = null + } + + if (cnodeUserUUID) { + const filesHash = await DBManager.fetchFilesHashFromDB({ + lookupKey: { lookupCNodeUserUUID: cnodeUserUUID } + }) + response.filesHash = filesHash + + if (filesHashClockRangeMin || filesHashClockRangeMax) { + const filesHashForClockRange = + await DBManager.fetchFilesHashFromDB({ + lookupKey: { lookupCNodeUserUUID: cnodeUserUUID }, + clockMin: filesHashClockRangeMin, + clockMax: filesHashClockRangeMax + }) + response.filesHashForClockRange = filesHashForClockRange + } } - } - if (returnFilesHash && !cnodeUserUUID) { - response.filesHash = null } } @@ -263,12 +271,20 @@ module.exports = function (app) { ) /** - * Returns latest clock value stored in CNodeUsers entry given wallet, or -1 if no entry found + * Returns latest clock value for CNodeUser, or -1 if no entry found + * Optionally returns user filesHash, or null if no files found */ app.post( '/users/batch_clock_status', handleResponse(async (req, res) => { - const { walletPublicKeys } = req.body + const { walletPublicKeys /* [walletPublicKey] */ } = req.body + + if (walletPublicKeys == null) { + return errorResponseBadRequest( + 'Must provide valid walletPublicKeys field in request body' + ) + } + const walletPublicKeysSet = new Set(walletPublicKeys) // Enforce max # of wallets to prevent high db query time @@ -279,8 +295,22 @@ module.exports = function (app) { ) } - const returnFilesHash = !!req.query.returnFilesHash // default false + const returnFilesHash = req.query.returnFilesHash === 'true' // default false + + // Initialize users response object with default values + const users = {} + walletPublicKeys.forEach((wallet) => { + const user = { + walletPublicKey: wallet, + clock: -1 + } + if (returnFilesHash) { + user.filesHash = null + } + users[wallet] = user + }) + // Fetch all cnodeUsers for wallets const cnodeUsers = await models.CNodeUser.findAll({ where: { walletPublicKey: { @@ -289,40 +319,39 @@ module.exports = function (app) { } }) - const users = await Promise.all( - cnodeUsers.map(async (cnodeUser) => { - walletPublicKeysSet.delete(cnodeUser.walletPublicKey) - - const user = { - walletPublicKey: cnodeUser.walletPublicKey, - clock: cnodeUser.clock - } - - if (returnFilesHash) { - const filesHash = await DBManager.fetchFilesHashFromDB({ - lookupKey: { lookupCNodeUserUUID: cnodeUser.cnodeUserUUID } - }) - user.filesHash = filesHash - } + // Populate users response object with cnodeUsers data + cnodeUsers.forEach(({ walletPublicKey, clock }) => { + users[walletPublicKey].walletPublicKey = walletPublicKey + users[walletPublicKey].clock = clock + }) - return user + // Fetch filesHashes if requested + if (returnFilesHash && cnodeUsers.length > 0) { + // Fetch filesHashes + const cnodeUserUUIDs = cnodeUsers.map( + (cnodeUser) => cnodeUser.cnodeUserUUID + ) + const filesHashesByCNodeUserUUID = + await DBManager.fetchFilesHashesFromDB({ cnodeUserUUIDs }) + + // Populate users response object with filesHash data + const cnodeUserUUIDToWalletMap = {} + cnodeUsers.forEach((cnodeUser) => { + cnodeUserUUIDToWalletMap[cnodeUser.cnodeUserUUID] = + cnodeUser.walletPublicKey }) - ) + Object.entries(filesHashesByCNodeUserUUID).forEach( + ([cnodeUserUUID, filesHash]) => { + const wallet = cnodeUserUUIDToWalletMap[cnodeUserUUID] + users[wallet].filesHash = filesHash + } + ) + } - // Set default values for remaining users - const remainingWalletPublicKeys = Array.from(walletPublicKeysSet) - remainingWalletPublicKeys.forEach((wallet) => { - const user = { - walletPublicKey: wallet, - clock: -1 - } - if (returnFilesHash) { - user.filesHash = null - } - users.push(user) - }) + // Convert response object from map(wallet => { info }) to [{ info }] + const usersResp = Object.values(users) - return successResponse({ users }) + return successResponse({ users: usersResp }) }) ) } diff --git a/creator-node/src/services/stateMachineManager/stateMachineConstants.js b/creator-node/src/services/stateMachineManager/stateMachineConstants.js index 7ed282b4f23..7f1ac572f24 100644 --- a/creator-node/src/services/stateMachineManager/stateMachineConstants.js +++ b/creator-node/src/services/stateMachineManager/stateMachineConstants.js @@ -1,32 +1,52 @@ module.exports = { + // Max number of completed/failed jobs to keep in redis for the state monitoring queue + MONITORING_QUEUE_HISTORY: 20, + + // Max number of completed/failed jobs to keep in redis for the state monitoring queue + RECONCILIATION_QUEUE_HISTORY: 300, + // Max millis to run a fetch cNodeEndpoint->spId mapping job for before marking it as stalled (1 minute) C_NODE_ENDPOINT_TO_SP_ID_MAP_QUEUE_MAX_JOB_RUNTIME_MS: 1000 * 60, + // Max millis to run a StateMonitoringQueue job for before marking it as stalled (1 hour) STATE_MONITORING_QUEUE_MAX_JOB_RUNTIME_MS: 1000 * 60 * 60, + // Millis to delay starting the first job in the StateMonitoringQueue (30 seconds) STATE_MONITORING_QUEUE_INIT_DELAY_MS: 1000 * 30, + // Max millis to run a StateReconciliationQueue job for before marking it as stalled (1 hour) STATE_RECONCILIATION_QUEUE_MAX_JOB_RUNTIME_MS: 1000 * 60 * 60, + // Millis to timeout request for getting users who have a node as their primary/secondary (60 seconds) GET_NODE_USERS_TIMEOUT_MS: 1000 * 60, + // Millis to forcibly cancel getNodeUsers request if axios timeout doesn't work (70 seconds) GET_NODE_USERS_CANCEL_TOKEN_MS: 1000 * 70, + // Max number of users to fetch if no maximum is given GET_NODE_USERS_DEFAULT_PAGE_SIZE: 100_000, + // Timeout for fetching a clock value for a single user (2 seconds) CLOCK_STATUS_REQUEST_TIMEOUT_MS: 2000, + // Timeout for fetching batch clock values (10 seconds) BATCH_CLOCK_STATUS_REQUEST_TIMEOUT: 1000 * 10, + // Max number of attempts to fetch clock statuses from /users/batch_clock_status MAX_USER_BATCH_CLOCK_FETCH_RETRIES: 5, + // Number of users to process in each batch when calculating reconfigs FIND_REPLICA_SET_UPDATES_BATCH_SIZE: 500, + // Number of users to process in each batch when calculating reconfigs and syncs AGGREGATE_RECONFIG_AND_POTENTIAL_SYNC_OPS_BATCH_SIZE: 500, + // Retry delay (in millis) between requests while monitoring a sync SYNC_MONITORING_RETRY_DELAY_MS: 15_000, + // Max number of attempts to select new replica set in reconfig MAX_SELECT_NEW_REPLICA_SET_ATTEMPTS: 5, + QUEUE_HISTORY: Object.freeze({ // Max number of completed/failed jobs to keep in redis for the state monitoring queue STATE_MONITORING: 20, @@ -35,6 +55,7 @@ module.exports = { // Max number of completed/failed jobs to keep in redis for the state monitoring queue STATE_RECONCILIATION: 300 }), + QUEUE_NAMES: Object.freeze({ // Name of StateMonitoringQueue STATE_MONITORING: 'state-monitoring-queue', @@ -43,6 +64,7 @@ module.exports = { // Name of StateReconciliationQueue STATE_RECONCILIATION: 'state-reconciliation-queue' }), + JOB_NAMES: Object.freeze({ // Name of job in monitoring queue that takes a slice of users and gathers data for them MONITOR_STATE: 'monitor-state', @@ -59,10 +81,12 @@ module.exports = { // Name of job in reconciliation queue that executes a reconfiguration of a user's replica set when it's unhealthy UPDATE_REPLICA_SET: 'update-replica-set' }), - // Modes used in issuing a reconfig. Each successive mode is a superset of the mode prior. - // The `key` of the reconfig states is used to identify the current reconfig mode. - // The `value` of the reconfig states is used in the superset logic of determining which type of - // reconfig is enabled. + + /** + * Modes used in issuing a reconfig. Each successive mode is a superset of the mode prior. + * The `key` of the reconfig states is used to identify the current reconfig mode. + * The `value` of the reconfig states is used in the superset logic of determining which type of reconfig is enabled. + */ RECONFIG_MODES: Object.freeze({ // Reconfiguration is entirely disabled RECONFIG_DISABLED: { @@ -92,9 +116,24 @@ module.exports = { value: 4 } }), + // Describes the type of sync operation SyncType: Object.freeze({ Recurring: 'RECURRING', // Scheduled background sync to keep secondaries up to date Manual: 'MANUAL' // Triggered by a user data write to primary - }) + }), + + // Sync mode for a (primary, secondary) pair for a user + SYNC_MODES: Object.freeze({ + // Replicas already in sync - no further sync needed + None: 'NONE', + + // Base case - secondary should sync its local state to primary's state + SyncSecondaryFromPrimary: 'SYNC_SECONDARY_FROM_PRIMARY', + + // Edge case - secondary has state that primary needs: primary should merge its local state with secondary's state, and have secondary re-sync its entire local state + MergePrimaryAndSecondary: 'MERGE_PRIMARY_AND_SECONDARY' + }), + + FETCH_FILES_HASH_NUM_RETRIES: 3 } diff --git a/creator-node/src/services/stateMachineManager/stateMachineUtils.js b/creator-node/src/services/stateMachineManager/stateMachineUtils.js index 136fcd67523..6b4eb268533 100644 --- a/creator-node/src/services/stateMachineManager/stateMachineUtils.js +++ b/creator-node/src/services/stateMachineManager/stateMachineUtils.js @@ -4,7 +4,6 @@ const axios = require('axios') const retry = require('async-retry') const { - MetricTypes, MetricNames, MetricLabels } = require('../../services/prometheusMonitoring/prometheus.constants') @@ -23,29 +22,30 @@ const MAX_BATCH_CLOCK_STATUS_BATCH_SIZE = config.get( const DELEGATE_PRIVATE_KEY = config.get('delegatePrivateKey') /** - * Given map(replica node => userWallets[]), retrieves clock values for every (node, userWallet) pair. - * Also returns a set of any nodes that were unhealthy when queried for clock values. - * @param {Object} replicasToWalletsMap map of + * Given map(replica set node => userWallets[]), retrieves user info for every (node, userWallet) pair + * Also updates unhealthyPeers param with nodes that were unhealthy when queried * - * @returns {Object} { replicasToUserClockStatusMap: map(replica node => map(wallet => clockValue)), unhealthyPeers: Set } + * @param {Object} replicaSetNodesToUserWalletsMap map of + * + * @returns {Object} response + * @returns {Object} response.replicaToUserInfoMap map(replica => map(wallet => { clock, filesHash })) + * @returns {Set} response.unhealthyPeers unhealthy peer endpoints */ -const retrieveClockStatusesForUsersAcrossReplicaSet = async ( - replicasToWalletsMap -) => { - const replicasToUserClockStatusMap = {} +const retrieveUserInfoFromReplicaSet = async (replicaToWalletMap) => { + const replicaToUserInfoMap = {} const unhealthyPeers = new Set() const spID = config.get('spID') /** In parallel for every replica, fetch clock status for all users on that replica */ - const replicas = Object.keys(replicasToWalletsMap) + const replicas = Object.keys(replicaToWalletMap) await Promise.all( replicas.map(async (replica) => { - replicasToUserClockStatusMap[replica] = {} + replicaToUserInfoMap[replica] = {} - const walletsOnReplica = replicasToWalletsMap[replica] + const walletsOnReplica = replicaToWalletMap[replica] - // Make requests in batches, sequentially, to ensure POST request body does not exceed max size + // Make requests in batches, sequentially, since this is an expensive query for ( let i = 0; i < walletsOnReplica.length; @@ -58,13 +58,13 @@ const retrieveClockStatusesForUsersAcrossReplicaSet = async ( const axiosReqParams = { baseURL: replica, - url: '/users/batch_clock_status', + url: '/users/batch_clock_status?returnFilesHash=true', method: 'post', data: { walletPublicKeys: walletsOnReplicaSlice }, timeout: BATCH_CLOCK_STATUS_REQUEST_TIMEOUT } - // Sign request to other CN to bypass rate limiting + // Generate and attach SP signature to bypass route rate limits const { timestamp, signature } = generateTimestampAndSignature( { spID: spID }, DELEGATE_PRIVATE_KEY @@ -83,25 +83,31 @@ const retrieveClockStatusesForUsersAcrossReplicaSet = async ( errorMsg = e } - // If failed to get response after all attempts, add replica to `unhealthyPeers` set for reconfig + // If failed to get response after all attempts, add replica to `unhealthyPeers` list for reconfig if (errorMsg) { logger.error( - `retrieveClockStatusesForUsersAcrossReplicaSet() Could not fetch clock values for wallets=${walletsOnReplica} on replica=${replica} ${errorMsg.toString()}` + `[retrieveUserInfoFromReplicaSet] Could not fetch clock values from replica ${replica}: ${errorMsg.toString()}` ) unhealthyPeers.add(replica) } - // Add batch response data to aggregate output map - batchClockStatusResp.forEach((userClockValueResp) => { - const { walletPublicKey, clock } = userClockValueResp - replicasToUserClockStatusMap[replica][walletPublicKey] = clock + // Add response data to output aggregate map + batchClockStatusResp.forEach((clockStatusResp) => { + /** + * @notice `filesHash` will be null if node has no files for user. This can happen even if clock > 0 if user has AudiusUser or Track table records without any File table records + */ + const { walletPublicKey, clock, filesHash } = clockStatusResp + replicaToUserInfoMap[replica][walletPublicKey] = { + clock, + filesHash + } }) } }) ) return { - replicasToUserClockStatusMap, + replicaToUserInfoMap, unhealthyPeers } } @@ -168,7 +174,7 @@ const makeHistogramToRecord = (metricName, metricValue, metricLabels = {}) => { } module.exports = { - retrieveClockStatusesForUsersAcrossReplicaSet, retrieveClockValueForUserFromReplica, + retrieveUserInfoFromReplicaSet, makeHistogramToRecord } diff --git a/creator-node/src/services/stateMachineManager/stateMonitoring/findReplicaSetUpdates.jobProcessor.js b/creator-node/src/services/stateMachineManager/stateMonitoring/findReplicaSetUpdates.jobProcessor.js index 5574ad5a8d3..d92968bd62a 100644 --- a/creator-node/src/services/stateMachineManager/stateMonitoring/findReplicaSetUpdates.jobProcessor.js +++ b/creator-node/src/services/stateMachineManager/stateMonitoring/findReplicaSetUpdates.jobProcessor.js @@ -24,21 +24,21 @@ const minFailedSyncRequestsBeforeReconfig = config.get( * @param {Object} param.logger a logger that can be filtered by jobName and jobId * @param {Object[]} param.users array of { primary, secondary1, secondary2, primarySpID, secondary1SpID, secondary2SpID, user_id, wallet } * @param {Set} param.unhealthyPeers set of unhealthy peers - * @param {Object} param.replicaSetNodesToUserClockStatusesMap map of secondary endpoint strings to (map of user wallet strings to clock value of secondary for user) + * @param {Object} param.replicaToUserInfoMap map(secondary endpoint => map(user wallet => { clock, filesHash })) * @param {string (wallet): Object{ string (secondary endpoint): Object{ successRate: number (0-1), successCount: number, failureCount: number }}} param.userSecondarySyncMetricsMap mapping of nodeUser's wallet (string) to metrics for their sync success to secondaries */ module.exports = async function ({ logger, users, unhealthyPeers, - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, userSecondarySyncMetricsMap }) { _validateJobData( logger, users, unhealthyPeers, - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, userSecondarySyncMetricsMap ) @@ -106,11 +106,10 @@ module.exports = async function ({ secondary1: updateReplicaSetOp.secondary1, secondary2: updateReplicaSetOp.secondary2, unhealthyReplicas: Array.from(updateReplicaSetOp.unhealthyReplicas), - replicaSetNodesToUserClockStatusesMap: - _transformAndFilterNodeToClockValuesMapping( - replicaSetNodesToUserClockStatusesMap, - wallet - ) + replicaToUserInfoMap: _transformAndFilterReplicaToUserInfoMap( + replicaToUserInfoMap, + wallet + ) } }) } @@ -130,7 +129,7 @@ const _validateJobData = ( logger, users, unhealthyPeers, - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, userSecondarySyncMetricsMap ) => { if (typeof logger !== 'object') { @@ -149,11 +148,11 @@ const _validateJobData = ( ) } if ( - typeof replicaSetNodesToUserClockStatusesMap !== 'object' || - replicaSetNodesToUserClockStatusesMap instanceof Array + typeof replicaToUserInfoMap !== 'object' || + replicaToUserInfoMap instanceof Array ) { throw new Error( - `Invalid type ("${typeof replicaSetNodesToUserClockStatusesMap}") or value ("${replicaSetNodesToUserClockStatusesMap}") of replicaSetNodesToUserClockStatusesMap` + `Invalid type ("${typeof replicaToUserInfoMap}") or value ("${replicaToUserInfoMap}") of replicaToUserInfoMap` ) } if ( @@ -312,23 +311,25 @@ const _findReplicaSetUpdatesForUser = async ( } /** - * Transforms data type from ((K1,V1) => (K2,V2)) to (K1,V1[K2]), where K1 is the node endpoint, - * V1 is the mapping, and K2 is the wallet to filter by. Also filters out nodes that don't have a value for the wallet. - * @param {Object} replicaSetNodesToUserClockStatusesMap map of secondary endpoint strings to (map of user wallet strings to clock value of secondary for user) - * @param {string} wallet the wallet to filter clock values for (other wallets will be excluded from the output) - * @returns mapping of node endpoint (string) to clock value (number) on that node for the given wallet + * Filters input map to only user info for provided wallet, also filtering out nodes that have no clock value for provided wallet + * @param {Object} replicaToUserInfoMap map(secondary endpoint => map(user wallet => { clock, filesHash })) + * @param {string} wallet the wallet to filter for (other wallets will be excluded from the output) + * @returns map(replica (string) => { clock (number), filesHash (string) } ) mapping of node endpoint to user info on that node for the given wallet */ -const _transformAndFilterNodeToClockValuesMapping = ( - replicaSetNodesToUserClockStatusesMap, +const _transformAndFilterReplicaToUserInfoMap = ( + replicaToUserInfoMap, wallet ) => { return Object.fromEntries( - Object.entries(replicaSetNodesToUserClockStatusesMap) - .map(([node, clockValueMapping]) => [ + Object.entries(replicaToUserInfoMap) // [[replica, map(wallet => { clock, filesHash })]] + .map(([node, userInfoMap]) => [ node, - clockValueMapping[wallet] || -1 + { + ...userInfoMap[wallet], + clock: userInfoMap[wallet]?.clock || -1 // default clock to -1 where not present + } ]) // Only include nodes that have clock values -- this means only the nodes in the user's replica set - .filter(([, clockValue]) => clockValue !== -1) + .filter(([, userInfoMap]) => userInfoMap.clock !== -1) ) } diff --git a/creator-node/src/services/stateMachineManager/stateMonitoring/findSyncRequests.jobProcessor.js b/creator-node/src/services/stateMachineManager/stateMonitoring/findSyncRequests.jobProcessor.js index 3d6e1f34e05..e52bc632d4f 100644 --- a/creator-node/src/services/stateMachineManager/stateMonitoring/findSyncRequests.jobProcessor.js +++ b/creator-node/src/services/stateMachineManager/stateMonitoring/findSyncRequests.jobProcessor.js @@ -2,10 +2,15 @@ const _ = require('lodash') const config = require('../../../config') const CNodeToSpIdMapManager = require('../CNodeToSpIdMapManager') -const { SyncType, QUEUE_NAMES } = require('../stateMachineConstants') +const { + SyncType, + QUEUE_NAMES, + SYNC_MODES +} = require('../stateMachineConstants') const { getNewOrExistingSyncReq } = require('../stateReconciliation/stateReconciliationUtils') +const { computeSyncModeForUserAndReplica } = require('./stateMonitoringUtils') const thisContentNodeEndpoint = config.get('creatorNodeEndpoint') const minSecondaryUserSyncSuccessPercent = @@ -21,21 +26,21 @@ const minFailedSyncRequestsBeforeReconfig = config.get( * @param {Object} param.logger the logger that can be filtered by jobName and jobId * @param {Object[]} param.users array of { primary, secondary1, secondary2, primarySpID, secondary1SpID, secondary2SpID, user_id, wallet} * @param {string[]} param.unhealthyPeers array of unhealthy peers - * @param {Object} param.replicaSetNodesToUserClockStatusesMap map of secondary endpoint strings to (map of user wallet strings to clock value of secondary for user) + * @param {Object} param.replicaToUserInfoMap map(secondary endpoint => map(user wallet => { clock, filesHash })) * @param {string (secondary endpoint): Object{ successRate: number (0-1), successCount: number, failureCount: number }} param.userSecondarySyncMetricsMap mapping of each secondary to the success metrics the nodeUser has had syncing to it */ -module.exports = function ({ - logger, +module.exports = async function ({ users, unhealthyPeers, - replicaSetNodesToUserClockStatusesMap, - userSecondarySyncMetricsMap + replicaToUserInfoMap, + userSecondarySyncMetricsMap, + logger }) { _validateJobData( logger, users, unhealthyPeers, - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, userSecondarySyncMetricsMap ) @@ -46,22 +51,27 @@ module.exports = function ({ let duplicateSyncReqs = [] let errors = [] for (const user of users) { + const userSecondarySyncMetrics = userSecondarySyncMetricsMap[ + user.wallet + ] || { + [user.secondary1]: { successRate: 1, failureCount: 0 }, + [user.secondary2]: { successRate: 1, failureCount: 0 } + } + const { syncReqsToEnqueue: userSyncReqsToEnqueue, duplicateSyncReqs: userDuplicateSyncReqs, errors: userErrors - } = _findSyncsForUser( + } = await _findSyncsForUser( user, - thisContentNodeEndpoint, unhealthyPeersSet, - userSecondarySyncMetricsMap[user.wallet] || { - [user.secondary1]: { successRate: 1, failureCount: 0 }, - [user.secondary2]: { successRate: 1, failureCount: 0 } - }, + userSecondarySyncMetrics, minSecondaryUserSyncSuccessPercent, minFailedSyncRequestsBeforeReconfig, - replicaSetNodesToUserClockStatusesMap + replicaToUserInfoMap, + logger ) + if (userSyncReqsToEnqueue?.length) { syncReqsToEnqueue = syncReqsToEnqueue.concat(userSyncReqsToEnqueue) } else if (userDuplicateSyncReqs?.length) { @@ -84,7 +94,7 @@ const _validateJobData = ( logger, users, unhealthyPeers, - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, userSecondarySyncMetricsMap ) => { if (typeof logger !== 'object') { @@ -103,11 +113,11 @@ const _validateJobData = ( ) } if ( - typeof replicaSetNodesToUserClockStatusesMap !== 'object' || - replicaSetNodesToUserClockStatusesMap instanceof Array + typeof replicaToUserInfoMap !== 'object' || + replicaToUserInfoMap instanceof Array ) { throw new Error( - `Invalid type ("${typeof replicaSetNodesToUserClockStatusesMap}") or value ("${replicaSetNodesToUserClockStatusesMap}") of replicaSetNodesToUserClockStatusesMap` + `Invalid type ("${typeof replicaToUserInfoMap}") or value ("${replicaToUserInfoMap}") of replicaToUserInfoMap` ) } if ( @@ -124,31 +134,38 @@ const _validateJobData = ( * Determines which sync requests should be sent for a given user to any of their secondaries. * * @param {Object} user { primary, secondary1, secondary2, primarySpID, secondary1SpID, secondary2SpID, user_id, wallet} - * @param {string} thisContentNodeEndpoint URL or IP address of this Content Node * @param {Set} unhealthyPeers set of unhealthy peers * @param {string (secondary endpoint): Object{ successRate: number (0-1), successCount: number, failureCount: number }} userSecondarySyncMetricsMap mapping of each secondary to the success metrics the user has had syncing to it * @param {number} minSecondaryUserSyncSuccessPercent 0-1 minimum sync success rate a secondary must have to perform a sync to it * @param {number} minFailedSyncRequestsBeforeReconfig minimum number of failed sync requests to a secondary before the user's replica set gets updated to not include the secondary - * @param {Object} replicaSetNodesToUserClockStatusesMap map of secondary endpoint strings to (map of user wallet strings to clock value of secondary for user) + * @param {Object} replicaToUserInfoMap map(secondary endpoint => map(user wallet => { clock value, filesHash })) */ -const _findSyncsForUser = ( +const _findSyncsForUser = async ( user, - thisContentNodeEndpoint, unhealthyPeers, userSecondarySyncMetricsMap, minSecondaryUserSyncSuccessPercent, minFailedSyncRequestsBeforeReconfig, - replicaSetNodesToUserClockStatusesMap + replicaToUserInfoMap, + logger ) => { const syncReqsToEnqueue = [] const duplicateSyncReqs = [] const errors = [] - const { primary, secondary1, secondary2, secondary1SpID, secondary2SpID } = - user + const { + wallet, + primary, + secondary1, + secondary2, + secondary1SpID, + secondary2SpID + } = user // Only sync from this node to other nodes if this node is the user's primary - if (primary !== thisContentNodeEndpoint) return {} + if (primary !== thisContentNodeEndpoint) { + return {} + } const replicaSetNodesToObserve = [ { endpoint: secondary1, spId: secondary1SpID }, @@ -185,22 +202,37 @@ const _findSyncsForUser = ( continue } - // Determine if secondary requires a sync by comparing clock values against primary (this node) - const { wallet } = user - const userPrimaryClockVal = - replicaSetNodesToUserClockStatusesMap[primary][wallet] - const userSecondaryClockVal = - replicaSetNodesToUserClockStatusesMap[secondary][wallet] + // Determine if secondary requires a sync by comparing its user data against primary (this node) + const { clock: primaryClock, filesHash: primaryFilesHash } = + replicaToUserInfoMap[primary][wallet] + const { clock: secondaryClock, filesHash: secondaryFilesHash } = + replicaToUserInfoMap[secondary][wallet] + + let syncMode + try { + syncMode = await computeSyncModeForUserAndReplica({ + wallet, + primaryClock, + secondaryClock, + primaryFilesHash, + secondaryFilesHash + }) + } catch (e) { + errors.push( + `Error computing sync mode for user ${wallet} and secondary ${secondary} - ${e.message}` + ) + continue + } - // Secondary is healthy and has lower clock value, so we want to sync the user's data to it from this primary - if (userPrimaryClockVal > userSecondaryClockVal) { + if (syncMode === SYNC_MODES.SyncSecondaryFromPrimary) { try { const { duplicateSyncReq, syncReqToEnqueue } = getNewOrExistingSyncReq({ userWallet: wallet, - secondaryEndpoint: secondary, primaryEndpoint: thisContentNodeEndpoint, + secondaryEndpoint: secondary, syncType: SyncType.Recurring }) + if (!_.isEmpty(syncReqToEnqueue)) { syncReqsToEnqueue.push(syncReqToEnqueue) } else if (!_.isEmpty(duplicateSyncReq)) { @@ -210,7 +242,17 @@ const _findSyncsForUser = ( errors.push( `Error getting new or existing sync request for user ${wallet} and secondary ${secondary} - ${e.message}` ) + continue } + } else if (syncMode === SYNC_MODES.MergePrimaryAndSecondary) { + /** + * TODO - currently just logs as placeholder + * 1. Primary will sync all content from secondary + * 2. Primary will force secondary to wipe its local state and resync all content + */ + logger.info( + `[findSyncRequests][_findSyncsForUser][MergePrimaryAndSecondary = true][SyncType = ${SyncType.Recurring}] wallet ${wallet} secondary ${secondary} Clocks: [${primaryClock},${secondaryClock}] Files hashes: [${primaryFilesHash},${secondaryFilesHash}]` + ) } } diff --git a/creator-node/src/services/stateMachineManager/stateMonitoring/monitorState.jobProcessor.js b/creator-node/src/services/stateMachineManager/stateMonitoring/monitorState.jobProcessor.js index 361f86079f5..8fe80be6a5f 100644 --- a/creator-node/src/services/stateMachineManager/stateMonitoring/monitorState.jobProcessor.js +++ b/creator-node/src/services/stateMachineManager/stateMonitoring/monitorState.jobProcessor.js @@ -5,9 +5,7 @@ const { buildReplicaSetNodesToUserWalletsMap, computeUserSecondarySyncSuccessRatesMap } = require('./stateMonitoringUtils') -const { - retrieveClockStatusesForUsersAcrossReplicaSet -} = require('../stateMachineUtils') +const { retrieveUserInfoFromReplicaSet } = require('../stateMachineUtils') const { QUEUE_NAMES, JOB_NAMES } = require('../stateMachineConstants') // Number of users to process each time monitor-state job processor is called @@ -47,7 +45,7 @@ module.exports = async function ({ let users = [] let unhealthyPeers = new Set() - let replicaSetNodesToUserClockStatusesMap = {} + let replicaToUserInfoMap = {} let userSecondarySyncMetricsMap = {} try { try { @@ -103,36 +101,33 @@ module.exports = async function ({ } ) - // Retrieve clock statuses for all users and their current replica sets + // Retrieve user info for all users and their current replica sets try { - // Set mapping of replica endpoint to (mapping of wallet to clock value) - const clockStatusResp = - await retrieveClockStatusesForUsersAcrossReplicaSet( - replicaSetNodesToUserWalletsMap - ) - replicaSetNodesToUserClockStatusesMap = - clockStatusResp.replicasToUserClockStatusMap + const retrieveUserInfoResp = await retrieveUserInfoFromReplicaSet( + replicaSetNodesToUserWalletsMap + ) + replicaToUserInfoMap = retrieveUserInfoResp.replicaToUserInfoMap // Mark peers as unhealthy if they were healthy before but failed to return a clock value unhealthyPeers = new Set([ ...unhealthyPeers, - ...clockStatusResp.unhealthyPeers + ...retrieveUserInfoResp.unhealthyPeers ]) _addToDecisionTree( decisionTree, - 'retrieveClockStatusesForUsersAcrossReplicaSet Success', + 'retrieveUserInfoFromReplicaSet Success', logger ) } catch (e) { _addToDecisionTree( decisionTree, - 'retrieveClockStatusesForUsersAcrossReplicaSet Error', + 'retrieveUserInfoFromReplicaSet Error', logger, { error: e.message } ) throw new Error( - 'monitor-state job processor retrieveClockStatusesForUsersAcrossReplicaSet Error' + 'monitor-state job processor retrieveUserInfoFromReplicaSet Error' ) } @@ -184,7 +179,7 @@ module.exports = async function ({ jobData: { users, unhealthyPeers: Array.from(unhealthyPeers), // Bull messes up passing a Set - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, userSecondarySyncMetricsMap } }, @@ -193,7 +188,7 @@ module.exports = async function ({ jobData: { users, unhealthyPeers: Array.from(unhealthyPeers), // Bull messes up passing a Set - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, userSecondarySyncMetricsMap } }, diff --git a/creator-node/src/services/stateMachineManager/stateMonitoring/stateMonitoringUtils.js b/creator-node/src/services/stateMachineManager/stateMonitoring/stateMonitoringUtils.js index a9e6e300d38..5a32b6e7c46 100644 --- a/creator-node/src/services/stateMachineManager/stateMonitoring/stateMonitoringUtils.js +++ b/creator-node/src/services/stateMachineManager/stateMonitoring/stateMonitoringUtils.js @@ -1,17 +1,23 @@ const _ = require('lodash') const axios = require('axios') const { CancelToken } = axios +const retry = require('async-retry') const config = require('../../../config') const Utils = require('../../../utils') -const { isPrimaryHealthy } = require('../CNodeHealthManager') const { logger } = require('../../../logging') + +const { isPrimaryHealthy } = require('../CNodeHealthManager') const SecondarySyncHealthTracker = require('../../../snapbackSM/secondarySyncHealthTracker') +const DBManager = require('../../../dbManager') + const { AGGREGATE_RECONFIG_AND_POTENTIAL_SYNC_OPS_BATCH_SIZE, GET_NODE_USERS_TIMEOUT_MS, GET_NODE_USERS_CANCEL_TOKEN_MS, - GET_NODE_USERS_DEFAULT_PAGE_SIZE + GET_NODE_USERS_DEFAULT_PAGE_SIZE, + SYNC_MODES, + FETCH_FILES_HASH_NUM_RETRIES } = require('../stateMachineConstants') const MIN_FAILED_SYNC_REQUESTS_BEFORE_RECONFIG = config.get( @@ -181,9 +187,316 @@ const computeUserSecondarySyncSuccessRatesMap = async (nodeUsers) => { return userSecondarySyncMetricsMap } +/** + * For every node user, record sync requests to issue to secondaries if this node is primary + * and record replica set updates to issue for any unhealthy replicas + * + * @param {Object} nodeUser { primary, secondary1, secondary2, primarySpID, secondary1SpID, secondary2SpID, user_id, wallet } + * @param {Set} unhealthyPeers set of unhealthy peers + * @param {string (wallet): Object{ string (secondary endpoint): Object{ successRate: number (0-1), successCount: number, failureCount: number }}} userSecondarySyncMetricsMap mapping of nodeUser's wallet (string) to metrics for their sync success to secondaries + * @param {Object} endpointToSPIdMap + * @returns + * { + * requiredUpdateReplicaSetOps: {Object[]} array of {...nodeUsers, unhealthyReplicas: {string[]} endpoints of unhealthy rset nodes } + * potentialSyncRequests: {Object[]} array of {...nodeUsers, endpoint: {string} endpoint to sync to } + * } + * @notice this will issue sync to healthy secondary and update replica set away from unhealthy secondary + */ +const aggregateReconfigAndPotentialSyncOps = async ( + nodeUsers, + unhealthyPeers, + userSecondarySyncMetricsMap, + endpointToSPIdMap, + thisContentNodeEndpoint +) => { + // Parallelize calling _aggregateOps on chunks of 500 nodeUsers at a time + const nodeUserBatches = _.chunk( + nodeUsers, + AGGREGATE_RECONFIG_AND_POTENTIAL_SYNC_OPS_BATCH_SIZE + ) + const results = [] + for (const nodeUserBatch of nodeUserBatches) { + const resultBatch = await Promise.allSettled( + nodeUserBatch.map((nodeUser) => + _aggregateOps( + nodeUser, + unhealthyPeers, + userSecondarySyncMetricsMap[nodeUser.wallet], + endpointToSPIdMap, + thisContentNodeEndpoint + ) + ) + ) + results.push(...resultBatch) + } + + // Combine each batch's requiredUpdateReplicaSetOps and potentialSyncRequests + let requiredUpdateReplicaSetOps = [] + let potentialSyncRequests = [] + for (const promiseResult of results) { + // Skip and log failed promises + const { + status: promiseStatus, + value: reconfigAndSyncOps, + reason: promiseError + } = promiseResult + if (promiseStatus !== 'fulfilled') { + logger.error( + `aggregateReconfigAndPotentialSyncOps encountered unexpected failure: ${ + promiseError.message || promiseError + }` + ) + continue + } + + // Combine each promise's requiredUpdateReplicaSetOps and potentialSyncRequests + const { + requiredUpdateReplicaSetOps: requiredUpdateReplicaSetOpsFromPromise, + potentialSyncRequests: potentialSyncRequestsFromPromise + } = reconfigAndSyncOps + requiredUpdateReplicaSetOps = requiredUpdateReplicaSetOps.concat( + requiredUpdateReplicaSetOpsFromPromise + ) + potentialSyncRequests = potentialSyncRequests.concat( + potentialSyncRequestsFromPromise + ) + } + + return { requiredUpdateReplicaSetOps, potentialSyncRequests } +} + +/** + * Used to determine the `requiredUpdateReplicaSetOps` and `potentialSyncRequests` for a given nodeUser. + * @param {Object} nodeUser { primary, secondary1, secondary2, primarySpID, secondary1SpID, secondary2SpID, user_id, wallet} + * @param {Set} unhealthyPeers set of unhealthy peers + * @param {string (secondary endpoint): Object{ successRate: number (0-1), successCount: number, failureCount: number }} userSecondarySyncMetrics mapping of each secondary to the success metrics the nodeUser has had syncing to it + * @param {Object} endpointToSPIdMap + */ +const _aggregateOps = async ( + nodeUser, + unhealthyPeers, + userSecondarySyncMetrics, + endpointToSPIdMap, + thisContentNodeEndpoint +) => { + const requiredUpdateReplicaSetOps = [] + const potentialSyncRequests = [] + const unhealthyReplicas = new Set() + + const { + wallet, + primary, + secondary1, + secondary2, + primarySpID, + secondary1SpID, + secondary2SpID + } = nodeUser + + /** + * If this node is primary for user, check both secondaries for health + * Enqueue SyncRequests against healthy secondaries, and enqueue UpdateReplicaSetOps against unhealthy secondaries + */ + let replicaSetNodesToObserve = [ + { endpoint: secondary1, spId: secondary1SpID }, + { endpoint: secondary2, spId: secondary2SpID } + ] + + if (primary === thisContentNodeEndpoint) { + // filter out false-y values to account for incomplete replica sets + const secondariesInfo = replicaSetNodesToObserve.filter( + (entry) => entry.endpoint + ) + + /** + * For each secondary, enqueue `potentialSyncRequest` if healthy else add to `unhealthyReplicas` + */ + for (const secondaryInfo of secondariesInfo) { + const secondary = secondaryInfo.endpoint + + const { successRate, successCount, failureCount } = + userSecondarySyncMetrics[secondary] + + // Error case 1 - mismatched spID + if (endpointToSPIdMap[secondary] !== secondaryInfo.spId) { + logger.error( + `processStateMachineOperation Secondary ${secondary} for user ${wallet} mismatched spID. Expected ${secondaryInfo.spId}, found ${endpointToSPIdMap[secondary]}. Marking replica as unhealthy.` + ) + unhealthyReplicas.add(secondary) + + // Error case 2 - already marked unhealthy + } else if (unhealthyPeers.has(secondary)) { + logger.error( + `processStateMachineOperation Secondary ${secondary} for user ${wallet} in unhealthy peer set. Marking replica as unhealthy.` + ) + unhealthyReplicas.add(secondary) + + // Error case 3 - low user sync success rate + } else if ( + failureCount >= MIN_FAILED_SYNC_REQUESTS_BEFORE_RECONFIG && + successRate < MIN_SECONDARY_USER_SYNC_SUCCESS_PERCENT + ) { + logger.error( + `processStateMachineOperation Secondary ${secondary} for user ${wallet} has userSyncSuccessRate of ${successRate}, which is below threshold of ${MIN_SECONDARY_USER_SYNC_SUCCESS_PERCENT}. ${successCount} Successful syncs vs ${failureCount} Failed syncs. Marking replica as unhealthy.` + ) + unhealthyReplicas.add(secondary) + + // Success case + } else { + potentialSyncRequests.push({ ...nodeUser, endpoint: secondary }) + } + } + + /** + * If any unhealthy replicas found for user, enqueue an updateReplicaSetOp for later processing + */ + if (unhealthyReplicas.size > 0) { + requiredUpdateReplicaSetOps.push({ ...nodeUser, unhealthyReplicas }) + } + + /** + * If this node is secondary for user, check both secondaries for health and enqueue SyncRequests against healthy secondaries + * Ignore unhealthy secondaries for now + */ + } else { + // filter out false-y values to account for incomplete replica sets and filter out the + // the self node + replicaSetNodesToObserve = [ + { endpoint: primary, spId: primarySpID }, + ...replicaSetNodesToObserve + ] + replicaSetNodesToObserve = replicaSetNodesToObserve.filter((entry) => { + return entry.endpoint && entry.endpoint !== thisContentNodeEndpoint + }) + + for (const replica of replicaSetNodesToObserve) { + // If the map's spId does not match the query's spId, then regardless + // of the relationship of the node to the user, issue a reconfig for that node + if (endpointToSPIdMap[replica.endpoint] !== replica.spId) { + unhealthyReplicas.add(replica.endpoint) + } else if (unhealthyPeers.has(replica.endpoint)) { + // Else, continue with conducting extra health check if the current observed node is a primary, and + // add to `unhealthyReplicas` if observed node is a secondary + let addToUnhealthyReplicas = true + + if (replica.endpoint === primary) { + addToUnhealthyReplicas = !(await isPrimaryHealthy(primary)) + } + + if (addToUnhealthyReplicas) { + unhealthyReplicas.add(replica.endpoint) + } + } + } + + if (unhealthyReplicas.size > 0) { + requiredUpdateReplicaSetOps.push({ ...nodeUser, unhealthyReplicas }) + } + } + + return { requiredUpdateReplicaSetOps, potentialSyncRequests } +} + +/** + * Given user state info, determines required sync mode for user and replica. This fn is called for each (primary, secondary) pair + * + * It is possible for filesHashes to diverge despite clock equality because clock equality can happen if different content with same number of clockRecords is uploaded to different replicas. + * This is an error condition and needs to be identified and rectified. + * + * @param {Object} param + * @param {string} param.wallet user wallet + * @param {number} param.primaryClock clock value on user's primary + * @param {number} param.secondaryClock clock value on user's secondary + * @param {string} param.primaryFilesHash filesHash on user's primary + * @param {string} param.secondaryFilesHash filesHash on user's secondary + * @returns {SYNC_MODES} syncMode one of None, SyncSecondaryFromPrimary, MergePrimaryAndSecondary + */ +const computeSyncModeForUserAndReplica = async ({ + wallet, + primaryClock, + secondaryClock, + primaryFilesHash, + secondaryFilesHash +}) => { + if ( + !Number.isInteger(primaryClock) || + !Number.isInteger(secondaryClock) || + // `null` is a valid filesHash value; `undefined` is not + primaryFilesHash === undefined || + secondaryFilesHash === undefined + ) { + throw new Error( + '[computeSyncModeForUserAndReplica()] Error: Missing or invalid params' + ) + } + + if ( + primaryClock === secondaryClock && + primaryFilesHash !== secondaryFilesHash + ) { + /** + * This is an error condition, indicating that primary and secondary states for user have diverged. + * To fix this issue, primary should sync content from secondary and then force secondary to resync its entire state from primary. + */ + return SYNC_MODES.MergePrimaryAndSecondary + } else if (primaryClock < secondaryClock) { + /** + * Secondary has more data than primary -> primary must sync from secondary + */ + + return SYNC_MODES.MergePrimaryAndSecondary + } else if (primaryClock > secondaryClock && secondaryFilesHash === null) { + /** + * secondaryFilesHash will be null if secondary has no clockRecords for user -> secondary must sync from primary + */ + + return SYNC_MODES.SyncSecondaryFromPrimary + } else if (primaryClock > secondaryClock && secondaryFilesHash !== null) { + /** + * If primaryClock > secondaryClock, need to check that nodes have same content for each clock value. To do this, we compute filesHash from primary matching clock range from secondary. + */ + + let primaryFilesHashForRange + try { + // Throws error if fails after all retries + primaryFilesHashForRange = await Utils.asyncRetry({ + asyncFn: async () => + DBManager.fetchFilesHashFromDB({ + lookupKey: { lookupWallet: wallet }, + clockMin: 0, + clockMax: secondaryClock + 1 + }), + options: { retries: FETCH_FILES_HASH_NUM_RETRIES }, + logger, + logLabel: + '[computeSyncModeForUserAndReplica()] [DBManager.fetchFilesHashFromDB()]' + }) + } catch (e) { + const errorMsg = `[computeSyncModeForUserAndReplica()] [DBManager.fetchFilesHashFromDB()] Error - ${e.message}` + logger.error(errorMsg) + throw new Error(errorMsg) + } + + if (primaryFilesHashForRange === secondaryFilesHash) { + return SYNC_MODES.SyncSecondaryFromPrimary + } else { + return SYNC_MODES.MergePrimaryAndSecondary + } + } else { + /** + * primaryClock === secondaryClock && primaryFilesHash === secondaryFilesHash + * Nodes have identical data = healthy state -> no sync needed + */ + + return SYNC_MODES.None + } +} + module.exports = { getLatestUserIdFromDiscovery, getNodeUsers, buildReplicaSetNodesToUserWalletsMap, - computeUserSecondarySyncSuccessRatesMap + computeUserSecondarySyncSuccessRatesMap, + aggregateReconfigAndPotentialSyncOps, + computeSyncModeForUserAndReplica } diff --git a/creator-node/src/services/stateMachineManager/stateReconciliation/updateReplicaSet.jobProcessor.js b/creator-node/src/services/stateMachineManager/stateReconciliation/updateReplicaSet.jobProcessor.js index 7ddf1437402..243f9975dfe 100644 --- a/creator-node/src/services/stateMachineManager/stateReconciliation/updateReplicaSet.jobProcessor.js +++ b/creator-node/src/services/stateMachineManager/stateReconciliation/updateReplicaSet.jobProcessor.js @@ -27,7 +27,7 @@ const reconfigNodeWhitelist = config.get('reconfigNodeWhitelist') * @param {number} param.secondary1 the current secondary1 endpoint of the user whose replica set will be reconfigured * @param {number} param.secondary2 the current secondary2 endpoint of the user whose replica set will be reconfigured * @param {string[]} param.unhealthyReplicas the endpoints of the user's replica set that are currently unhealthy - * @param {Object} param.replicaSetNodesToUserClockStatusesMap map of user's node endpoint strings to clock value of node for user whose replica set should be updated + * @param {Object} param.replicaToUserInfoMap map(secondary endpoint => { clock, filesHash }) map of user's node endpoint strings to user info on node for user whose replica set should be updated * @param {string[]} param.enabledReconfigModes array of which reconfig modes are enabled */ module.exports = async function ({ @@ -38,7 +38,7 @@ module.exports = async function ({ secondary1, secondary2, unhealthyReplicas, - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, enabledReconfigModes }) { /** @@ -70,7 +70,7 @@ module.exports = async function ({ wallet, unhealthyReplicas, healthyNodes, - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, enabledReconfigModes ) @@ -87,7 +87,7 @@ module.exports = async function ({ secondary2, unhealthyReplicasSet: new Set(unhealthyReplicas || []), healthyNodes, - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, enabledReconfigModes }) ;({ errorMsg, issuedReconfig, syncJobsToEnqueue } = @@ -145,6 +145,7 @@ module.exports = async function ({ * @param {Set} param.unhealthyReplicasSet a set of endpoints of unhealthy replica set nodes * @param {string[]} param.healthyNodes array of healthy Content Node endpoints used for selecting new replica set * @param {Object} param.replicaSetNodesToUserClockStatusesMap map of secondary endpoint strings to clock value of secondary for user whose replica set should be updated + * @param {Object} param.replicaToUserInfoMap map(secondary endpoint => { clock, filesHash }) map of user's node endpoint strings to user info on node for user whose replica set should be updated * @param {string[]} param.enabledReconfigModes array of which reconfig modes are enabled * @returns {Object} * { @@ -162,7 +163,7 @@ const _determineNewReplicaSet = async ({ wallet, unhealthyReplicasSet = new Set(), healthyNodes, - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, enabledReconfigModes }) => { const currentReplicaSet = [primary, secondary1, secondary2] @@ -183,7 +184,7 @@ const _determineNewReplicaSet = async ({ secondary1, secondary2, unhealthyReplicasSet, - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, newReplicaNodes[0], enabledReconfigModes ) @@ -216,7 +217,7 @@ const _validateJobData = ( wallet, unhealthyReplicas, healthyNodes, - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, enabledReconfigModes ) => { if (typeof logger !== 'object') { @@ -255,11 +256,11 @@ const _validateJobData = ( ) } if ( - typeof replicaSetNodesToUserClockStatusesMap !== 'object' || - replicaSetNodesToUserClockStatusesMap instanceof Array + typeof replicaToUserInfoMap !== 'object' || + replicaToUserInfoMap instanceof Array ) { throw new Error( - `Invalid type ("${typeof replicaSetNodesToUserClockStatusesMap}") or value ("${replicaSetNodesToUserClockStatusesMap}") of replicaSetNodesToUserClockStatusesMap` + `Invalid type ("${typeof replicaToUserInfoMap}") or value ("${replicaToUserInfoMap}") of replicaToUserInfoMap` ) } if (!(enabledReconfigModes instanceof Array)) { @@ -275,7 +276,7 @@ const _validateJobData = ( * @param {*} secondary1 user's current first secondary endpoint * @param {*} secondary2 user's current second secondary endpoint * @param {*} unhealthyReplicasSet a set of endpoints of unhealthy replica set nodes - * @param {*} replicaSetNodesToUserClockStatusesMap map of secondary endpoint strings to clock value of secondary for user whose replica set should be updated + * @param {Object} param.replicaToUserInfoMap map(secondary endpoint => { clock, filesHash }) map of user's node endpoint strings to user info on node for user whose replica set should be updated * @param {*} newReplicaNode endpoint of node that will replace the unhealthy node * @returns reconfig info to update the user's replica set to replace the 1 unhealthy node */ @@ -284,7 +285,7 @@ const _determineNewReplicaSetWhenOneNodeIsUnhealthy = ( secondary1, secondary2, unhealthyReplicasSet, - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, newReplicaNode, enabledReconfigModes ) => { @@ -292,8 +293,8 @@ const _determineNewReplicaSetWhenOneNodeIsUnhealthy = ( // value of the two secondaries as the new primary, leave the other as the first secondary, and select a new second secondary if (unhealthyReplicasSet.has(primary)) { const [newPrimary, currentHealthySecondary] = - replicaSetNodesToUserClockStatusesMap[secondary1] >= - replicaSetNodesToUserClockStatusesMap[secondary2] + replicaToUserInfoMap[secondary1].clock >= + replicaToUserInfoMap[secondary2].clock ? [secondary1, secondary2] : [secondary2, secondary1] return { diff --git a/creator-node/src/utils.js b/creator-node/src/utils.js index d4000926974..02973c20796 100644 --- a/creator-node/src/utils.js +++ b/creator-node/src/utils.js @@ -384,7 +384,7 @@ function asyncRetry({ if (err && log) { const logPrefix = (logLabel ? `[${logLabel}] ` : '') + `[asyncRetry] [attempt #${i}]` - logger.warn(`${logPrefix}: `, err) + logger.warn(`${logPrefix}: `, err.message || err) } }, ...options diff --git a/creator-node/test/dbManager.test.js b/creator-node/test/dbManager.test.js index f76682bc5bc..ff46e4d7251 100644 --- a/creator-node/test/dbManager.test.js +++ b/creator-node/test/dbManager.test.js @@ -2,7 +2,7 @@ const assert = require('assert') const proxyquire = require('proxyquire') const _ = require('lodash') const getUuid = require('uuid/v4') -const crypto = require('crypto') + const request = require('supertest') const path = require('path') const sinon = require('sinon') @@ -19,11 +19,12 @@ const { createStarterCNodeUser, getCNodeUser, destroyUsers, - createSession + createSession, + createStarterCNodeUserWithKey } = require('./lib/dataSeeds') const { getApp } = require('./lib/app') const { getLibsMock } = require('./lib/libsMock') -const { saveFileToStorage } = require('./lib/helpers') +const { saveFileToStorage, computeFilesHash } = require('./lib/helpers') const TestAudioFilePath = path.resolve(__dirname, 'testTrack.mp3') @@ -77,7 +78,7 @@ describe('Test createNewDataRecord()', async function () { await server.close() }) - it('Sequential createNewDataRecord - create 2 records', async () => { + it('Sequential createNewDataRecord - create 2 records', async function () { const sequelizeTableInstance = models.File /** @@ -165,7 +166,7 @@ describe('Test createNewDataRecord()', async function () { assert.strictEqual(file.clock, initialClockVal + 2) }) - it('Concurrent createNewDataRecord - successfully makes concurrent calls in separate transactions', async () => { + it('Concurrent createNewDataRecord - successfully makes concurrent calls in separate transactions', async function () { const sequelizeTableInstance = models.File const numEntries = 5 @@ -186,7 +187,7 @@ describe('Test createNewDataRecord()', async function () { // Make multiple concurrent calls - create a transaction for each call const arr = _.range(1, numEntries + 1) // [1, 2, ..., numEntries] let createdFiles = await Promise.all( - arr.map(async () => { + arr.map(async function () { const transaction = await models.sequelize.transaction() const createdFile = await DBManager.createNewDataRecord( createFileQueryObj, @@ -235,7 +236,7 @@ describe('Test createNewDataRecord()', async function () { }) }) - it('Concurrent createNewDataRecord - fails to make concurrent calls in a single transaction due to ClockRecords_pkey', async () => { + it('Concurrent createNewDataRecord - fails to make concurrent calls in a single transaction due to ClockRecords_pkey', async function () { const sequelizeTableInstance = models.File const numEntries = 5 @@ -258,7 +259,7 @@ describe('Test createNewDataRecord()', async function () { try { const arr = _.range(1, numEntries + 1) // [1, 2, ..., numEntries] await Promise.all( - arr.map(async () => { + arr.map(async function () { const createdFile = await DBManager.createNewDataRecord( createFileQueryObj, cnodeUserUUID, @@ -304,7 +305,7 @@ describe('Test createNewDataRecord()', async function () { /** * Simulates /image_upload and /track_content routes, which write multiple files sequentially in atomic tx */ - it('Sequential createNewDataRecord - successfully makes multiple sequential calls in single transaction', async () => { + it('Sequential createNewDataRecord - successfully makes multiple sequential calls in single transaction', async function () { const sequelizeTableInstance = models.File const numEntries = 5 @@ -359,7 +360,7 @@ describe('Test createNewDataRecord()', async function () { }) }) - it('Confirm file.pkey will block duplicate clock vals from being written', async () => { + it('Confirm file.pkey will block duplicate clock vals from being written', async function () { const transaction = await models.sequelize.transaction() try { createFileQueryObj = { @@ -632,7 +633,7 @@ describe('Test deleteSessionTokensFromDB() when provided an Array of SessionToke }) }) -describe('Test deleteAllCNodeUserDataFromDB()', async () => { +describe('Test deleteAllCNodeUserDataFromDB()', async function () { const initialClockVal = 0 const userId = 1 @@ -649,7 +650,7 @@ describe('Test deleteAllCNodeUserDataFromDB()', async () => { let session, app, cnodeUser, cnodeUserUUID, server, libsMock /** Init server to run DB migrations */ - before(async () => { + before(async function () { const spId = 1 libsMock = getLibsMock() const appInfo = await getApp(libsMock, BlacklistManager, null, spId) @@ -658,7 +659,7 @@ describe('Test deleteAllCNodeUserDataFromDB()', async () => { }) /** Reset DB state + Create cnodeUser + confirm initial clock state + define global vars */ - beforeEach(async () => { + beforeEach(async function () { // Wipe all CNodeUsers + dependent data await destroyUsers() session = await createStarterCNodeUser(userId) @@ -670,14 +671,14 @@ describe('Test deleteAllCNodeUserDataFromDB()', async () => { }) /** Wipe all CNodeUsers + dependent data */ - after(async () => { + after(async function () { sinon.restore() await destroyUsers() await server.close() }) - it('Successfully deletes all state for CNodeUser with data in all tables', async () => { - const uploadAudiusUserState = async () => { + it('Successfully deletes all state for CNodeUser with data in all tables', async function () { + const uploadAudiusUserState = async function () { const audiusUserMetadata = { test: 'field1' } const audiusUserMetadataResp = await request(app) .post('/audius_users/metadata') @@ -821,7 +822,7 @@ describe('Test deleteAllCNodeUserDataFromDB()', async () => { // delete all DB records await DBManager.deleteAllCNodeUserDataFromDB({ - lookupCnodeUserUUID: cnodeUserUUID + lookupCNodeUserUUID: cnodeUserUUID }) /** assert all tables empty */ @@ -839,23 +840,24 @@ describe('Test deleteAllCNodeUserDataFromDB()', async () => { assert.strictEqual(clockRecordEntries.length, 0) }) - it.skip('external & internal transaction', async () => {}) + it.skip('external & internal transaction', async function () {}) }) -describe('Test fetchFilesHashFromDB()', async () => { +describe('Test fetchFilesHashFromDB()', async function () { const initialClockVal = 0 + const ClockZero = 0 const filesTableInst = models.File let cnodeUser, cnodeUserUUID, server /** Init server to run DB migrations */ - before(async () => { + before(async function() { const appInfo = await getApp(getLibsMock(), BlacklistManager) server = appInfo.server }) /** Reset DB state + Create cnodeUser + confirm initial clock state + define global vars */ - beforeEach(async () => { + beforeEach(async function () { // Wipe all CNodeUsers + dependent data await destroyUsers() const resp = await createStarterCNodeUser() @@ -867,7 +869,7 @@ describe('Test fetchFilesHashFromDB()', async () => { }) /** Wipe all CNodeUsers + dependent data */ - after(async () => { + after(async function () { await destroyUsers() await server.close() @@ -904,7 +906,7 @@ describe('Test fetchFilesHashFromDB()', async () => { return multihashes } - it('fetchFilesHashFromDB successfully returns hash', async () => { + it('fetchFilesHashFromDB successfully returns hash', async function () { const numFiles = 10 const randomFileQueryObjects = generateRandomFileQueryObjects(numFiles) const multihashes = await createFilesForUser( @@ -913,11 +915,7 @@ describe('Test fetchFilesHashFromDB()', async () => { ) // compute expectedFilesHash - const multihashString = `{${multihashes.join(',')}}` - const expectedFilesHash = crypto - .createHash('md5') - .update(multihashString) - .digest('hex') + let expectedFilesHash = computeFilesHash(multihashes) // fetch filesHash by cnodeUserUUID & assert equal let actualFilesHash = await DBManager.fetchFilesHashFromDB({ @@ -930,9 +928,31 @@ describe('Test fetchFilesHashFromDB()', async () => { lookupKey: { lookupWallet: cnodeUser.walletPublicKey } }) assert.strictEqual(actualFilesHash, expectedFilesHash) + + // Create CNU2 + const walletCNU2 = getUuid() + const createCNU2Resp = await createStarterCNodeUserWithKey(walletCNU2) + const cnodeUserUUID2 = createCNU2Resp.cnodeUserUUID + const cnodeUser2 = await getCNodeUser(cnodeUserUUID2) + assert.strictEqual(cnodeUser2.clock, initialClockVal) + + // Confirm handles user with no data + actualFilesHash = await DBManager.fetchFilesHashFromDB({ + lookupKey: { lookupCNodeUserUUID: cnodeUserUUID2 } + }) + expectedFilesHash = null + assert.strictEqual(actualFilesHash, expectedFilesHash) + + // Confirm handles non-existent user + const cnodeUserUUID3 = getUuid() + actualFilesHash = await DBManager.fetchFilesHashFromDB({ + lookupKey: { lookupCNodeUserUUID: cnodeUserUUID3 } + }) + expectedFilesHash = null + assert.strictEqual(actualFilesHash, expectedFilesHash) }) - it('fetchFilesHashFromDB successully returns hash by clock range when supplied', async () => { + it('fetchFilesHashFromDB successully returns hash by clock range when supplied', async function () { const numFiles = 10 const randomFileQueryObjects = generateRandomFileQueryObjects(numFiles) const multihashes = await createFilesForUser( @@ -944,11 +964,7 @@ describe('Test fetchFilesHashFromDB()', async () => { const clockMax = 8 // exclusive /** clockMin */ - let multihashString = `{${multihashes.slice(clockMin - 1).join(',')}}` - let expectedFilesHash = crypto - .createHash('md5') - .update(multihashString) - .digest('hex') + let expectedFilesHash = computeFilesHash(multihashes.slice(clockMin - 1)) let actualFilesHash = await DBManager.fetchFilesHashFromDB({ lookupKey: { lookupCNodeUserUUID: cnodeUserUUID }, clockMin @@ -956,11 +972,7 @@ describe('Test fetchFilesHashFromDB()', async () => { assert.strictEqual(actualFilesHash, expectedFilesHash) /** clockMax */ - multihashString = `{${multihashes.slice(0, clockMax - 1).join(',')}}` - expectedFilesHash = crypto - .createHash('md5') - .update(multihashString) - .digest('hex') + expectedFilesHash = computeFilesHash(multihashes.slice(0, clockMax - 1)) actualFilesHash = await DBManager.fetchFilesHashFromDB({ lookupKey: { lookupCNodeUserUUID: cnodeUserUUID }, clockMax @@ -968,13 +980,8 @@ describe('Test fetchFilesHashFromDB()', async () => { assert.strictEqual(actualFilesHash, expectedFilesHash) /** clockMin and clockMax */ - multihashString = `{${multihashes - .slice(clockMin - 1, clockMax - 1) - .join(',')}}` - expectedFilesHash = crypto - .createHash('md5') - .update(multihashString) - .digest('hex') + expectedFilesHash = computeFilesHash(multihashes + .slice(clockMin - 1, clockMax - 1)) actualFilesHash = await DBManager.fetchFilesHashFromDB({ lookupKey: { lookupCNodeUserUUID: cnodeUserUUID }, clockMin, @@ -988,4 +995,74 @@ describe('Test fetchFilesHashFromDB()', async () => { }) assert.strictEqual(actualFilesHash, expectedFilesHash) }) + + it('fetchFilesHashesFromDB', async function () { + const numFiles = 10 + + // Upload files for CNU1 + const randomFileQueryObjectsCNU1 = generateRandomFileQueryObjects(numFiles) + const multihashesCNU1 = await createFilesForUser( + cnodeUserUUID, + randomFileQueryObjectsCNU1 + ) + + // compute expectedFilesHashCNU1 + const expectedFilesHashCNU1 = computeFilesHash(multihashesCNU1) + + // Create CNU2 + const walletCNU2 = getUuid() + const createCNU2Resp = await createStarterCNodeUserWithKey(walletCNU2) + const cnodeUserUUID2 = createCNU2Resp.cnodeUserUUID + const cnodeUser2 = await getCNodeUser(cnodeUserUUID2) + assert.strictEqual(cnodeUser2.clock, initialClockVal) + + // Upload files for cnodeUser2 + const randomFileQueryObjectsCNU2 = generateRandomFileQueryObjects(numFiles) + const multihashesCNU2 = await createFilesForUser( + cnodeUserUUID2, + randomFileQueryObjectsCNU2 + ) + + // compute expectedFilesHashCNU2 + const expectedFilesHashCNU2 = computeFilesHash(multihashesCNU2) + + // fetch filesHashes & assert equal + let cnodeUserUUIDs = [cnodeUserUUID, cnodeUserUUID2] + let actualResp = await DBManager.fetchFilesHashesFromDB({ cnodeUserUUIDs }) + let expectedResp = { + [cnodeUserUUID]: expectedFilesHashCNU1, + [cnodeUserUUID2]: expectedFilesHashCNU2 + } + assert.deepEqual(actualResp, expectedResp) + + // Create CNU3 with no files + const walletCNU3 = getUuid() + const createCNU3Resp = await createStarterCNodeUserWithKey(walletCNU3) + const cnodeUserUUID3 = createCNU3Resp.cnodeUserUUID + const cnodeUser3 = await getCNodeUser(cnodeUserUUID3) + assert.strictEqual(cnodeUser3.clock, ClockZero) + + // Correctly handles user with no files + actualResp = await DBManager.fetchFilesHashesFromDB({ cnodeUserUUIDs: [cnodeUserUUID3] }) + expectedResp = { [cnodeUserUUID3]: null } + assert.deepEqual(actualResp, expectedResp) + + // Correctly handles non-existent user + const cnodeUserUUID4 = getUuid() + actualResp = await DBManager.fetchFilesHashesFromDB({ cnodeUserUUIDs: [cnodeUserUUID4] }) + expectedResp = { [cnodeUserUUID4]: null } + assert.deepEqual(actualResp, expectedResp) + + // Correctly handles request with valid user, invalid user, and user with no files + actualResp = await DBManager.fetchFilesHashesFromDB({ + cnodeUserUUIDs: [cnodeUserUUID, cnodeUserUUID2, cnodeUserUUID3, cnodeUserUUID4] + }) + expectedResp = { + [cnodeUserUUID]: expectedFilesHashCNU1, + [cnodeUserUUID2]: expectedFilesHashCNU2, + [cnodeUserUUID3]: null, + [cnodeUserUUID4]: null + } + assert.deepEqual(actualResp, expectedResp) + }) }) diff --git a/creator-node/test/findReplicaSetUpdates.jobProcessor.test.js b/creator-node/test/findReplicaSetUpdates.jobProcessor.test.js index a441876852d..8de193923cb 100644 --- a/creator-node/test/findReplicaSetUpdates.jobProcessor.test.js +++ b/creator-node/test/findReplicaSetUpdates.jobProcessor.test.js @@ -18,6 +18,7 @@ chai.use(require('chai-as-promised')) describe('test findReplicaSetUpdates job processor', function () { let server, sandbox, originalContentNodeEndpoint, logger + beforeEach(async function () { const appInfo = await getApp(getLibsMock()) await appInfo.app.get('redisClient').flushdb() @@ -72,25 +73,25 @@ describe('test findReplicaSetUpdates job processor', function () { [secondary2]: secondary2SpID } - const DEFAULT_CLOCK_STATUSES_MAP = { + const DEFAULT_REPLICA_TO_USER_INFO_MAP = { [primary]: { - [wallet]: 10, - randomWallet: 10 + [wallet]: { clock: 10, filesHash: '0xasdf' }, + randomWallet: { clock: 10, filesHash: '0xasdf' } }, [secondary1]: { - [wallet]: 10, - anotherWallet: 100 + [wallet]: { clock: 10, filesHash: '0xasdf' }, + anotherWallet: { clock: 100, filesHash: '0xnotasdf' } }, [secondary2]: { - [wallet]: 10 + [wallet]: { clock: 10, filesHash: '0xasdf' } }, unusedNode: {} } - const CLOCK_STATUSES_MAP_FILTERED_TO_WALLET = { - [primary]: 10, - [secondary1]: 10, - [secondary2]: 10 + const REPLICA_TO_USER_INFO_MAP_FILTERED_TO_WALLET = { + [primary]: { clock: 10, filesHash: '0xasdf' }, + [secondary1]: { clock: 10, filesHash: '0xasdf' }, + [secondary2]: { clock: 10, filesHash: '0xasdf' } } function getJobProcessorStub( @@ -134,7 +135,7 @@ describe('test findReplicaSetUpdates job processor', function () { logger, users, unhealthyPeers, - replicaSetNodesToUserClockStatusesMap: DEFAULT_CLOCK_STATUSES_MAP, + replicaToUserInfoMap: DEFAULT_REPLICA_TO_USER_INFO_MAP, userSecondarySyncMetricsMap }) } @@ -161,8 +162,8 @@ describe('test findReplicaSetUpdates job processor', function () { secondary1, secondary2, unhealthyReplicas: expectedUnhealthyReplicas, - replicaSetNodesToUserClockStatusesMap: - CLOCK_STATUSES_MAP_FILTERED_TO_WALLET + replicaToUserInfoMap: + REPLICA_TO_USER_INFO_MAP_FILTERED_TO_WALLET } } ] diff --git a/creator-node/test/findSyncRequests.jobProcessor.test.js b/creator-node/test/findSyncRequests.jobProcessor.test.js index 2699708f576..a5f38c33748 100644 --- a/creator-node/test/findSyncRequests.jobProcessor.test.js +++ b/creator-node/test/findSyncRequests.jobProcessor.test.js @@ -10,20 +10,29 @@ const { getLibsMock } = require('./lib/libsMock') const config = require('../src/config') const { SyncType, - QUEUE_NAMES + QUEUE_NAMES, + SYNC_MODES } = require('../src/services/stateMachineManager/stateMachineConstants') chai.use(require('sinon-chai')) describe('test findSyncRequests job processor', function () { - let server, sandbox, originalContentNodeEndpoint + let server, sandbox, originalContentNodeEndpoint, logger + beforeEach(async function () { const appInfo = await getApp(getLibsMock()) await appInfo.app.get('redisClient').flushdb() server = appInfo.server sandbox = sinon.createSandbox() + config.set('spID', 1) originalContentNodeEndpoint = config.get('creatorNodeEndpoint') + + logger = { + info: sandbox.stub(), + warn: sandbox.stub(), + error: sandbox.stub() + } }) afterEach(async function () { @@ -42,20 +51,21 @@ describe('test findSyncRequests job processor', function () { const wallet = '0x123456789' const users = [ { + user_id, + wallet, primary, secondary1, secondary2, primarySpID, secondary1SpID, - secondary2SpID, - user_id, - wallet + secondary2SpID } ] function getJobProcessorStub( getNewOrExistingSyncReqStub, - getCNodeEndpointToSpIdMapStub + getCNodeEndpointToSpIdMapStub, + computeSyncModeForUserAndReplicaStub ) { return proxyquire( '../src/services/stateMachineManager/stateMonitoring/findSyncRequests.jobProcessor.js', @@ -66,433 +76,637 @@ describe('test findSyncRequests job processor', function () { }, '../CNodeToSpIdMapManager': { getCNodeEndpointToSpIdMap: getCNodeEndpointToSpIdMapStub + }, + './stateMonitoringUtils': { + computeSyncModeForUserAndReplica: computeSyncModeForUserAndReplicaStub } } ) } - it('returns correct syncs (happy path)', function () { - // Set variables that satisfy conditions for user1 to be synced from primary1 to secondary1 + /** + * Creates and returns a stub that fakes a function call given expected input and output conditions + * The stub throws an error if called with unexpected inputs + * @param {String} functionName + * @param {Object[]} expectedConditionsArr - array containing all possible expected conditions for stub + * @param {Object[]} expectedConditionsArr[].input - expected stubbed function input params + * @param {Object[]} expectedConditionsArr[].output - expected stubbed function output for above input params + * @returns stub + */ + function getConditionalStub(functionName, expectedConditionsArr) { + const expectedConditions = {} + expectedConditionsArr.map( + ({ input, output }) => { + expectedConditions[JSON.stringify(input)] = output + } + ) + + const stub = sandbox.stub().callsFake((args) => { + if (!(JSON.stringify(args) in expectedConditions)) { + throw new Error(`${functionName} was not expected to be called with the given args: ${JSON.stringify(args)}`) + } + + return expectedConditions[JSON.stringify(args)] + }) + + return stub + } + + function getGetCNodeEndpointToSpIdMapStub (cNodeEndpointToSpIdMap) { + const stub = sandbox + .stub() + .returns(cNodeEndpointToSpIdMap) + return stub + } + + it('Correctly returns sync from primary to secondary1 when secondary1 clock < primary clock', async function () { + /** + * Define all input variables + */ + // spIds in mapping must match those in the `users` variable const cNodeEndpointToSpIdMap = { [primary]: primarySpID, [secondary1]: secondary1SpID, [secondary2]: secondary2SpID } + const unhealthyPeers = [] - // Clock value of secondary1 being less than primary means we'll sync from primary to secondary1 - const replicaSetNodesToUserClockStatusesMap = { + + // Since secondary1.wallet.clock < primary.wallet.clock, we will sync from primary to secondary1 + const replicaToUserInfoMap = { [primary]: { - [wallet]: 10 + [wallet]: {clock: 10, filesHash: '0xabc'} }, [secondary1]: { - [wallet]: 9 + [wallet]: {clock: 9, filesHash: '0xnotabc'} }, [secondary2]: { - [wallet]: 10 + [wallet]: {clock: 10, filesHash: '0xabc'} } } + const userSecondarySyncMetricsMap = {} + // This node must be the primary in order to sync config.set('creatorNodeEndpoint', primary) - // Stub successfully finding a new sync to enqueue from primary1 to secondary1 + + /** + * Create all stubs for jobProcessor + */ + const expectedSyncReqToEnqueue = 'expectedSyncReqToEnqueue' - const getNewOrExistingSyncReqStub = sandbox.stub().callsFake((args) => { - const { userWallet, secondaryEndpoint, primaryEndpoint, syncType } = args - if ( - userWallet === wallet && - secondaryEndpoint === secondary1 && - primaryEndpoint === primary && - syncType === SyncType.Recurring - ) { - return { syncReqToEnqueue: expectedSyncReqToEnqueue } + const getNewOrExistingSyncReqExpectedConditionsArr = [{ + input: { + userWallet: wallet, + primaryEndpoint: primary, + secondaryEndpoint: secondary1, + syncType: SyncType.Recurring + }, + /** + * note - this value can be anything as it's outside scope of this integration test suite + * TODO - should prob change this to reflect real object + */ + output: { syncReqToEnqueue: expectedSyncReqToEnqueue } + }] + const getNewOrExistingSyncReqStub = getConditionalStub( + 'getNewOrExistingSyncReq', + getNewOrExistingSyncReqExpectedConditionsArr + ) + + const getCNodeEndpointToSpIdMapStub = getGetCNodeEndpointToSpIdMapStub(cNodeEndpointToSpIdMap) + + const computeSyncModeForUserAndReplicaExpectedConditionsArr = [ + { + input: { + wallet, + primaryClock: replicaToUserInfoMap[primary][wallet].clock, + secondaryClock: replicaToUserInfoMap[secondary1][wallet].clock, + primaryFilesHash: replicaToUserInfoMap[primary][wallet].filesHash, + secondaryFilesHash: replicaToUserInfoMap[secondary1][wallet].filesHash + }, + output: SYNC_MODES.SyncSecondaryFromPrimary + }, + { + input: { + wallet, + primaryClock: replicaToUserInfoMap[primary][wallet].clock, + secondaryClock: replicaToUserInfoMap[secondary2][wallet].clock, + primaryFilesHash: replicaToUserInfoMap[primary][wallet].filesHash, + secondaryFilesHash: replicaToUserInfoMap[secondary2][wallet].filesHash + }, + output: SYNC_MODES.None } - throw new Error( - 'getNewOrExistingSyncReq was not expected to be called with the given args' - ) - }) - const getCNodeEndpointToSpIdMapStub = sandbox - .stub() - .returns(cNodeEndpointToSpIdMap) - const logger = { - info: sandbox.stub(), - warn: sandbox.stub(), - error: sandbox.stub() - } + ] + const computeSyncModeForUserAndReplicaStub = getConditionalStub('computeSyncModeForUserAndReplica', computeSyncModeForUserAndReplicaExpectedConditionsArr) const findSyncRequestsJobProcessor = getJobProcessorStub( getNewOrExistingSyncReqStub, - getCNodeEndpointToSpIdMapStub + getCNodeEndpointToSpIdMapStub, + computeSyncModeForUserAndReplicaStub ) - // Verify job outputs the correct results: sync to user1 to secondary1 because its clock value is behind - expect( - findSyncRequestsJobProcessor({ - logger, - users, - unhealthyPeers, - replicaSetNodesToUserClockStatusesMap, - userSecondarySyncMetricsMap - }) - ).to.deep.equal({ + /** + * Verify job outputs the correct results: sync to user1 to secondary1 because its clock value is behind + */ + + const expectedOutput = { duplicateSyncReqs: [], errors: [], jobsToEnqueue: { [QUEUE_NAMES.STATE_RECONCILIATION]: [expectedSyncReqToEnqueue] } + } + const actualOutput = await findSyncRequestsJobProcessor({ + users, + unhealthyPeers, + replicaToUserInfoMap, + userSecondarySyncMetricsMap, + logger }) - expect(getNewOrExistingSyncReqStub).to.have.been.calledOnceWithExactly({ - userWallet: wallet, - secondaryEndpoint: secondary1, - primaryEndpoint: primary, - syncType: SyncType.Recurring - }) + expect(actualOutput).to.deep.equal(expectedOutput) + expect(getNewOrExistingSyncReqStub).to.have.been.calledOnceWithExactly(getNewOrExistingSyncReqExpectedConditionsArr[0].input) + expect(computeSyncModeForUserAndReplicaStub).to.have.been.calledTwice + .and.to.have.been.calledWithExactly(computeSyncModeForUserAndReplicaExpectedConditionsArr[0].input) + .and.to.have.been.calledWithExactly(computeSyncModeForUserAndReplicaExpectedConditionsArr[1].input) }) - it("doesn't enqueue duplicate syncs", function () { - // Set variables that satisfy conditions for user1 to be synced from primary1 to secondary1 - const primary = 'http://primary_cn.co' - const secondary1 = 'http://secondary_to_sync_to.co' - const secondary2 = 'http://secondary_already_synced.co' - const primarySpID = 1 - const secondary1SpID = 2 - const secondary2SpID = 3 - const user_id = 1 - const wallet = '0x123456789' + it("doesn't enqueue duplicate syncs", async function () { + /** + * Define all input variables + */ + // spIds in mapping must match those in the `users` variable const cNodeEndpointToSpIdMap = { [primary]: primarySpID, [secondary1]: secondary1SpID, [secondary2]: secondary2SpID } - const users = [ - { - primary, - secondary1, - secondary2, - primarySpID, - secondary1SpID, - secondary2SpID, - user_id, - wallet - } - ] + const unhealthyPeers = [] + // Clock value of secondary1 being less than primary means we'll sync from primary to secondary1 - const replicaSetNodesToUserClockStatusesMap = { + const replicaToUserInfoMap = { [primary]: { - [wallet]: 10 + [wallet]: {clock: 10, filesHash: '0xabc'} }, [secondary1]: { - [wallet]: 9 + [wallet]: {clock: 9, filesHash: '0xnotabc'} }, [secondary2]: { - [wallet]: 10 + [wallet]: {clock: 10, filesHash: '0xabc'} } } + const userSecondarySyncMetricsMap = {} + // This node must be the primary in order to sync config.set('creatorNodeEndpoint', primary) + + /** + * Create all stubs for jobProcessor + */ + // Stub having a duplicate sync so that no new sync will be enqueued const expectedDuplicateSyncReq = 'expectedDuplicateSyncReq' - const getNewOrExistingSyncReqStub = sandbox.stub().callsFake((args) => { - const { userWallet, secondaryEndpoint, primaryEndpoint, syncType } = args - if ( - userWallet === wallet && - secondaryEndpoint === secondary1 && - primaryEndpoint === primary && - syncType === SyncType.Recurring - ) { - return { duplicateSyncReq: expectedDuplicateSyncReq } - } - throw new Error( - 'getNewOrExistingSyncReq was not expected to be called with the given args' - ) - }) - const getCNodeEndpointToSpIdMapStub = sandbox - .stub() - .returns(cNodeEndpointToSpIdMap) - const logger = { - info: sandbox.stub(), - warn: sandbox.stub(), - error: sandbox.stub() - } + const getNewOrExistingSyncReqExpectedConditionsArr = [{ + input: { + userWallet: wallet, + primaryEndpoint: primary, + secondaryEndpoint: secondary1, + syncType: SyncType.Recurring + }, + output: { duplicateSyncReq: expectedDuplicateSyncReq } + }] + const getNewOrExistingSyncReqStub = getConditionalStub( + 'getNewOrExistingSyncReq', + getNewOrExistingSyncReqExpectedConditionsArr + ) + + const getCNodeEndpointToSpIdMapStub = getGetCNodeEndpointToSpIdMapStub(cNodeEndpointToSpIdMap) + const computeSyncModeForUserAndReplicaExpectedConditionsArr = [ + { + input: { + wallet, + primaryClock: replicaToUserInfoMap[primary][wallet].clock, + secondaryClock: replicaToUserInfoMap[secondary1][wallet].clock, + primaryFilesHash: replicaToUserInfoMap[primary][wallet].filesHash, + secondaryFilesHash: replicaToUserInfoMap[secondary1][wallet].filesHash + }, + output: SYNC_MODES.SyncSecondaryFromPrimary + }, + { + input: { + wallet, + primaryClock: replicaToUserInfoMap[primary][wallet].clock, + secondaryClock: replicaToUserInfoMap[secondary2][wallet].clock, + primaryFilesHash: replicaToUserInfoMap[primary][wallet].filesHash, + secondaryFilesHash: replicaToUserInfoMap[secondary2][wallet].filesHash + }, + output: SYNC_MODES.None + } + ] + const computeSyncModeForUserAndReplicaStub = getConditionalStub('computeSyncModeForUserAndReplica', computeSyncModeForUserAndReplicaExpectedConditionsArr) + const findSyncRequestsJobProcessor = getJobProcessorStub( getNewOrExistingSyncReqStub, - getCNodeEndpointToSpIdMapStub + getCNodeEndpointToSpIdMapStub, + computeSyncModeForUserAndReplicaStub ) - // Verify job outputs the correct results: sync to user1 to secondary1 doesn't happen because a duplicate is already in the queue - expect( - findSyncRequestsJobProcessor({ - logger, - users, - unhealthyPeers, - replicaSetNodesToUserClockStatusesMap, - userSecondarySyncMetricsMap - }) - ).to.deep.equal({ + /** + * Verify job outputs the correct results: sync to user1 to secondary1 doesn't happen because a duplicate is already in the queue + */ + + const expectedOutput = { duplicateSyncReqs: [expectedDuplicateSyncReq], errors: [], jobsToEnqueue: {} + } + const actualOutput = await findSyncRequestsJobProcessor({ + users, + unhealthyPeers, + replicaToUserInfoMap, + userSecondarySyncMetricsMap, + logger }) - expect(getNewOrExistingSyncReqStub).to.have.been.calledOnceWithExactly({ - userWallet: wallet, - secondaryEndpoint: secondary1, - primaryEndpoint: primary, - syncType: SyncType.Recurring - }) + expect(actualOutput).to.deep.equal(expectedOutput) + expect(getNewOrExistingSyncReqStub).to.have.been.calledOnceWithExactly(getNewOrExistingSyncReqExpectedConditionsArr[0].input) + expect(computeSyncModeForUserAndReplicaStub).to.have.been.calledTwice + .and.to.have.been.calledWithExactly(computeSyncModeForUserAndReplicaExpectedConditionsArr[0].input) + .and.to.have.been.calledWithExactly(computeSyncModeForUserAndReplicaExpectedConditionsArr[1].input) }) - it("doesn't sync to unhealthy secondaries", function () { - // Set variables that satisfy conditions for user1 to be synced from primary1 to secondary1 (except being healthy) + it("doesn't sync to unhealthy secondaries", async function () { + /** + * Define all input variables + */ + // spIds in mapping must match those in the `users` variable const cNodeEndpointToSpIdMap = { [primary]: primarySpID, [secondary1]: secondary1SpID, [secondary2]: secondary2SpID } + // Mark secondary1 as healthy so it won't sync to it const unhealthyPeers = [secondary1] - // Clock value of secondary1 being less than primary means we would sync from primary to secondary1 if secondary1 is healthy - const replicaSetNodesToUserClockStatusesMap = { + + // Since secondary1.wallet.clock < primary.wallet.clock, we would sync from primary to secondary1 if it were healthy + const replicaToUserInfoMap = { [primary]: { - [wallet]: 10 + [wallet]: {clock: 10, filesHash: '0xabc'} }, [secondary1]: { - [wallet]: 9 + [wallet]: {clock: 9, filesHash: '0xnotabc'} }, [secondary2]: { - [wallet]: 10 + [wallet]: {clock: 10, filesHash: '0xabc'} } } + const userSecondarySyncMetricsMap = {} + // This node must be the primary in order to sync config.set('creatorNodeEndpoint', primary) - // Stub finding syncs never being called because it should short-circuit first when seeing secondary1 is unhealthy - const getNewOrExistingSyncReqStub = sandbox.stub().callsFake((args) => { - throw new Error('getNewOrExistingSyncReq was not expected to be called') - }) - const getCNodeEndpointToSpIdMapStub = sandbox - .stub() - .returns(cNodeEndpointToSpIdMap) - const logger = { - info: sandbox.stub(), - warn: sandbox.stub(), - error: sandbox.stub() - } + + /** + * Create all stubs for jobProcessor + */ + + // Stub getNewOrExistingSyncReq() to never be called since it should short-circuit first when seeing secondary1 is unhealthy + const getNewOrExistingSyncReqExpectedConditionsArr = [] + const getNewOrExistingSyncReqStub = getConditionalStub( + 'getNewOrExistingSyncReq', + getNewOrExistingSyncReqExpectedConditionsArr + ) + + const getCNodeEndpointToSpIdMapStub = getGetCNodeEndpointToSpIdMapStub(cNodeEndpointToSpIdMap) + + const computeSyncModeForUserAndReplicaExpectedConditionsArr = [ + { + input: { + wallet, + primaryClock: replicaToUserInfoMap[primary][wallet].clock, + secondaryClock: replicaToUserInfoMap[secondary2][wallet].clock, + primaryFilesHash: replicaToUserInfoMap[primary][wallet].filesHash, + secondaryFilesHash: replicaToUserInfoMap[secondary2][wallet].filesHash + }, + output: SYNC_MODES.None + } + ] + const computeSyncModeForUserAndReplicaStub = getConditionalStub('computeSyncModeForUserAndReplica', computeSyncModeForUserAndReplicaExpectedConditionsArr) const findSyncRequestsJobProcessor = getJobProcessorStub( getNewOrExistingSyncReqStub, - getCNodeEndpointToSpIdMapStub + getCNodeEndpointToSpIdMapStub, + computeSyncModeForUserAndReplicaStub ) - // Verify job outputs the correct results: no syncs because secondary1 would normally sync but it's unhealthy - expect( - findSyncRequestsJobProcessor({ - logger, - users, - unhealthyPeers, - replicaSetNodesToUserClockStatusesMap, - userSecondarySyncMetricsMap - }) - ).to.deep.equal({ + /** + * Verify job outputs the correct results: no syncs because secondary1 would normally sync but it's unhealthy + */ + + const expectedOutput = { duplicateSyncReqs: [], errors: [], jobsToEnqueue: {} + } + const actualOutput = await findSyncRequestsJobProcessor({ + users, + unhealthyPeers, + replicaToUserInfoMap, + userSecondarySyncMetricsMap, + logger }) + expect(actualOutput).to.deep.equal(expectedOutput) expect(getNewOrExistingSyncReqStub).to.not.have.been.called + expect(computeSyncModeForUserAndReplicaStub).to.have.been.calledOnceWithExactly(computeSyncModeForUserAndReplicaExpectedConditionsArr[0].input) }) - it("doesn't sync if spId is mismatched in cNodeEndpointToSpId mapping", function () { - // Set variables that satisfy conditions for user1 to be synced from primary1 to secondary1 (except spId matching) + it("doesn't sync if spId is mismatched in cNodeEndpointToSpId mapping", async function () { + /** + * Define input variables that satisfy conditions for user1 to be synced from primary1 to secondary1 (except spId matching) + */ + // Make secondary1's spId in mapping NOT match the spId in the `users` variable const cNodeEndpointToSpIdMap = { [primary]: primarySpID, [secondary1]: secondary1SpID + 100, [secondary2]: secondary2SpID } + const unhealthyPeers = [] - // Clock value of secondary1 being less than primary means we would sync from primary to secondary1 if spId matched - const replicaSetNodesToUserClockStatusesMap = { + + // Since secondary1.wallet.clock < primary.wallet.clock, we would sync from primary to secondary1 if spID matched + const replicaToUserInfoMap = { [primary]: { - [wallet]: 10 + [wallet]: {clock: 10, filesHash: '0xabc'} }, [secondary1]: { - [wallet]: 9 + [wallet]: {clock: 9, filesHash: '0xnotabc'} }, [secondary2]: { - [wallet]: 10 + [wallet]: {clock: 10, filesHash: '0xabc'} } } + const userSecondarySyncMetricsMap = {} + // This node must be the primary in order to sync config.set('creatorNodeEndpoint', primary) + + /** + * Create all stubs for jobProcessor + */ + // Stub finding syncs never being called because it should short-circuit first when seeing secondary1 spId mismatches - const getNewOrExistingSyncReqStub = sandbox.stub().callsFake((args) => { - throw new Error('getNewOrExistingSyncReq was not expected to be called') - }) - const getCNodeEndpointToSpIdMapStub = sandbox - .stub() - .returns(cNodeEndpointToSpIdMap) - const logger = { - info: sandbox.stub(), - warn: sandbox.stub(), - error: sandbox.stub() - } + const getNewOrExistingSyncReqExpectedConditionsArr = [] + const getNewOrExistingSyncReqStub = getConditionalStub( + 'getNewOrExistingSyncReq', + getNewOrExistingSyncReqExpectedConditionsArr + ) + + const getCNodeEndpointToSpIdMapStub = getGetCNodeEndpointToSpIdMapStub(cNodeEndpointToSpIdMap) + + // stub computeSyncModeForUserAndReplica() only being called with primary-secondary2 pair, since primary-secondary1 processing would short circuit before this call + const computeSyncModeForUserAndReplicaExpectedConditionsArr = [ + { + input: { + wallet, + primaryClock: replicaToUserInfoMap[primary][wallet].clock, + secondaryClock: replicaToUserInfoMap[secondary2][wallet].clock, + primaryFilesHash: replicaToUserInfoMap[primary][wallet].filesHash, + secondaryFilesHash: replicaToUserInfoMap[secondary2][wallet].filesHash + }, + output: SYNC_MODES.None + } + ] + const computeSyncModeForUserAndReplicaStub = getConditionalStub('computeSyncModeForUserAndReplica', computeSyncModeForUserAndReplicaExpectedConditionsArr) const findSyncRequestsJobProcessor = getJobProcessorStub( getNewOrExistingSyncReqStub, - getCNodeEndpointToSpIdMapStub + getCNodeEndpointToSpIdMapStub, + computeSyncModeForUserAndReplicaStub ) - // Verify job outputs the correct results: no syncs because secondary1 would normally sync but its spId mismatches - expect( - findSyncRequestsJobProcessor({ - logger, - users, - unhealthyPeers, - replicaSetNodesToUserClockStatusesMap, - userSecondarySyncMetricsMap - }) - ).to.deep.equal({ + /** + * Verify job outputs the correct results: no syncs because secondary1 would normally sync but its spId mismatches, and secondary2 is already synced + */ + + const expectedOutput = { duplicateSyncReqs: [], errors: [], jobsToEnqueue: {} + } + const actualOutput = await findSyncRequestsJobProcessor({ + users, + unhealthyPeers, + replicaToUserInfoMap, + userSecondarySyncMetricsMap, + logger }) + expect(actualOutput).to.deep.equal(expectedOutput) expect(getNewOrExistingSyncReqStub).to.not.have.been.called + expect(computeSyncModeForUserAndReplicaStub).to.have.been.calledOnceWithExactly(computeSyncModeForUserAndReplicaExpectedConditionsArr[0].input) }) - it("doesn't sync if success rate is too low", function () { - // Set variables that satisfy conditions for user1 to be synced from primary1 to secondary1 (except success rate) + it("doesn't sync if success rate is too low", async function () { + /** + * Define input variables that satisfy conditions for user1 to be synced from primary1 to secondary1 (except success rate) + */ + // spIds in mapping must match those in the `users` variable const cNodeEndpointToSpIdMap = { [primary]: primarySpID, [secondary1]: secondary1SpID, [secondary2]: secondary2SpID } + const unhealthyPeers = [] - // Clock value of secondary1 being less than primary means we would sync from primary to secondary1 if success rate were higher - const replicaSetNodesToUserClockStatusesMap = { + + // Since secondary1.wallet.clock < primary.wallet.clock, we would sync from primary to secondary1 if sync success rate were higher + const replicaToUserInfoMap = { [primary]: { - [wallet]: 10 + [wallet]: {clock: 10, filesHash: '0xabc'} }, [secondary1]: { - [wallet]: 9 + [wallet]: {clock: 9, filesHash: '0xnotabc'} }, [secondary2]: { - [wallet]: 10 + [wallet]: {clock: 10, filesHash: '0xabc'} } } - // Make success rate lower than threshold + + // Make sync success rate lower than threshold for secondary1 const userSecondarySyncMetricsMap = { [wallet]: { [secondary1]: { successRate: 0, failureCount: 100 }, [secondary2]: { successRate: 1, failureCount: 0 } } } + // This node must be the primary in order to sync config.set('creatorNodeEndpoint', primary) - // Stub finding syncs never being called because it should short-circuit first when seeing low secondary1 success rate - const getNewOrExistingSyncReqStub = sandbox.stub().callsFake((args) => { - throw new Error('getNewOrExistingSyncReq was not expected to be called') - }) - const getCNodeEndpointToSpIdMapStub = sandbox - .stub() - .returns(cNodeEndpointToSpIdMap) - const logger = { - info: sandbox.stub(), - warn: sandbox.stub(), - error: sandbox.stub() - } + + /** + * Create all stubs for jobProcessor + */ + + // Stub getNewOrExistingSyncReq() never being called because it should short-circuit first when seeing low secondary1 success rate + const getNewOrExistingSyncReqExpectedConditionsArr = [] + const getNewOrExistingSyncReqStub = getConditionalStub( + 'getNewOrExistingSyncReq', + getNewOrExistingSyncReqExpectedConditionsArr + ) + + const getCNodeEndpointToSpIdMapStub = getGetCNodeEndpointToSpIdMapStub(cNodeEndpointToSpIdMap) + + // stub computeSyncModeForUserAndReplica() only being called with primary-secondary2 pair, since primary-secondary1 processing would short circuit before this call + const computeSyncModeForUserAndReplicaExpectedConditionsArr = [ + { + input: { + wallet, + primaryClock: replicaToUserInfoMap[primary][wallet].clock, + secondaryClock: replicaToUserInfoMap[secondary2][wallet].clock, + primaryFilesHash: replicaToUserInfoMap[primary][wallet].filesHash, + secondaryFilesHash: replicaToUserInfoMap[secondary2][wallet].filesHash + }, + output: SYNC_MODES.None + } + ] + const computeSyncModeForUserAndReplicaStub = getConditionalStub('computeSyncModeForUserAndReplica', computeSyncModeForUserAndReplicaExpectedConditionsArr) const findSyncRequestsJobProcessor = getJobProcessorStub( getNewOrExistingSyncReqStub, - getCNodeEndpointToSpIdMapStub + getCNodeEndpointToSpIdMapStub, + computeSyncModeForUserAndReplicaStub ) - // Verify job outputs the correct results: no syncs because secondary1 would normally sync but its success rate is low - expect( - findSyncRequestsJobProcessor({ - logger, - users, - unhealthyPeers, - replicaSetNodesToUserClockStatusesMap, - userSecondarySyncMetricsMap - }) - ).to.deep.equal({ + /** + * Verify job outputs the correct results: no syncs because secondary1 would normally sync but its success rate is low + */ + const expectedOutput = { duplicateSyncReqs: [], errors: [], jobsToEnqueue: {} + } + const actualOutput = await findSyncRequestsJobProcessor({ + users, + unhealthyPeers, + replicaToUserInfoMap, + userSecondarySyncMetricsMap, + logger }) + expect(actualOutput).to.deep.equal(expectedOutput) expect(getNewOrExistingSyncReqStub).to.not.have.been.called + expect(computeSyncModeForUserAndReplicaStub).to.have.been.calledOnceWithExactly(computeSyncModeForUserAndReplicaExpectedConditionsArr[0].input) }) - it('catches errors from finding syncs', function () { - // Set variables that satisfy conditions for user1 to be synced from primary1 to secondary1 + it('catches errors from finding syncs', async function () { + /** + * Define input variables that satisfy conditions for user1 to be synced from primary1 to secondary1 + */ + // spIds in mapping must match those in the `users` variable const cNodeEndpointToSpIdMap = { [primary]: primarySpID, [secondary1]: secondary1SpID, [secondary2]: secondary2SpID } + const unhealthyPeers = [] - // Clock value of secondary1 being less than primary means we'll sync from primary to secondary1 - const replicaSetNodesToUserClockStatusesMap = { + + // Since secondary1.wallet.clock < primary.wallet.clock, we will sync from primary to secondary1 + const replicaToUserInfoMap = { [primary]: { - [wallet]: 10 + [wallet]: {clock: 10, filesHash: '0xabc'} }, [secondary1]: { - [wallet]: 9 + [wallet]: {clock: 9, filesHash: '0xnotabc'} }, [secondary2]: { - [wallet]: 10 + [wallet]: {clock: 10, filesHash: '0xabc'} } } + const userSecondarySyncMetricsMap = {} + // This node must be the primary in order to sync config.set('creatorNodeEndpoint', primary) + + /** + * Create all stubs for jobProcessor + */ + // Stub error when trying to find a new sync to enqueue from primary1 to secondary1 const expectedErrorMsg = 'expectedErrorMsg' + const getNewOrExistingSyncReqExpectedInput = { + userWallet: wallet, + primaryEndpoint: primary, + secondaryEndpoint: secondary1, + syncType: SyncType.Recurring + } const getNewOrExistingSyncReqStub = sandbox.stub().callsFake((args) => { throw new Error(expectedErrorMsg) }) - const getCNodeEndpointToSpIdMapStub = sandbox - .stub() - .returns(cNodeEndpointToSpIdMap) - const logger = { - info: sandbox.stub(), - warn: sandbox.stub(), - error: sandbox.stub() - } + + const getCNodeEndpointToSpIdMapStub = getGetCNodeEndpointToSpIdMapStub(cNodeEndpointToSpIdMap) + + // stub computeSyncModeForUserAndReplica() being called with both primary-secondary1 and primary-secondary2 pairs + const computeSyncModeForUserAndReplicaExpectedConditionsArr = [ + { + input: { + wallet, + primaryClock: replicaToUserInfoMap[primary][wallet].clock, + secondaryClock: replicaToUserInfoMap[secondary1][wallet].clock, + primaryFilesHash: replicaToUserInfoMap[primary][wallet].filesHash, + secondaryFilesHash: replicaToUserInfoMap[secondary1][wallet].filesHash + }, + output: SYNC_MODES.SyncSecondaryFromPrimary + }, + { + input: { + wallet, + primaryClock: replicaToUserInfoMap[primary][wallet].clock, + secondaryClock: replicaToUserInfoMap[secondary2][wallet].clock, + primaryFilesHash: replicaToUserInfoMap[primary][wallet].filesHash, + secondaryFilesHash: replicaToUserInfoMap[secondary2][wallet].filesHash + }, + output: SYNC_MODES.None + } + ] + const computeSyncModeForUserAndReplicaStub = getConditionalStub('computeSyncModeForUserAndReplica', computeSyncModeForUserAndReplicaExpectedConditionsArr) const findSyncRequestsJobProcessor = getJobProcessorStub( getNewOrExistingSyncReqStub, - getCNodeEndpointToSpIdMapStub + getCNodeEndpointToSpIdMapStub, + computeSyncModeForUserAndReplicaStub ) - // Verify job outputs the correct results: sync to user1 to secondary1 because its clock value is behind - expect( - findSyncRequestsJobProcessor({ - logger, - users, - unhealthyPeers, - replicaSetNodesToUserClockStatusesMap, - userSecondarySyncMetricsMap - }) - ).to.deep.equal({ + /** + * Verify job outputs the correct results: an error syncing from primary to secondary1 + */ + + const expectedOutput = { duplicateSyncReqs: [], errors: [ `Error getting new or existing sync request for user ${wallet} and secondary ${secondary1} - ${expectedErrorMsg}` ], jobsToEnqueue: {} + } + const actualOutput = await findSyncRequestsJobProcessor({ + users, + unhealthyPeers, + replicaToUserInfoMap, + userSecondarySyncMetricsMap, + logger }) - expect(getNewOrExistingSyncReqStub).to.have.been.calledOnceWithExactly({ - userWallet: wallet, - secondaryEndpoint: secondary1, - primaryEndpoint: primary, - syncType: SyncType.Recurring - }) + expect(actualOutput).to.deep.equal(expectedOutput) + expect(getNewOrExistingSyncReqStub).to.have.been.calledOnceWithExactly(getNewOrExistingSyncReqExpectedInput) + expect(computeSyncModeForUserAndReplicaStub).to.have.been.calledTwice + .and.to.have.been.calledWithExactly(computeSyncModeForUserAndReplicaExpectedConditionsArr[0].input) + .and.to.have.been.calledWithExactly(computeSyncModeForUserAndReplicaExpectedConditionsArr[1].input) }) }) diff --git a/creator-node/test/lib/helpers.js b/creator-node/test/lib/helpers.js index 41a3a0ef2c9..d9ca052e5bc 100644 --- a/creator-node/test/lib/helpers.js +++ b/creator-node/test/lib/helpers.js @@ -1,6 +1,7 @@ const fs = require('fs') const uuid = require('uuid/v4') const path = require('path') +const crypto = require('crypto') const DiskManager = require('../../src/diskManager') @@ -44,7 +45,17 @@ const saveFileToStorage = (filePath) => { return { fileUUID: fileName, fileDir } } +const computeFilesHash = function (multihashes) { + const multihashString = `${multihashes.join(',')}` + const filesHash = crypto + .createHash('md5') + .update(multihashString) + .digest('hex') + return filesHash +} + module.exports = { uploadTrack, - saveFileToStorage + saveFileToStorage, + computeFilesHash } diff --git a/creator-node/test/monitorState.jobProcessor.test.js b/creator-node/test/monitorState.jobProcessor.test.js index bfa9f3ffe95..bc6c34c6915 100644 --- a/creator-node/test/monitorState.jobProcessor.test.js +++ b/creator-node/test/monitorState.jobProcessor.test.js @@ -24,9 +24,10 @@ describe('test monitorState job processor', function () { getNodeUsersStub, getUnhealthyPeersStub, buildReplicaSetNodesToUserWalletsMapStub, - retrieveClockStatusesForUsersAcrossReplicaSetStub, + retrieveUserInfoFromReplicaSetStub, computeUserSecondarySyncSuccessRatesMapStub, getCNodeEndpointToSpIdMapStub + beforeEach(async function () { const appInfo = await getApp(getLibsMock()) await appInfo.app.get('redisClient').flushdb() @@ -44,7 +45,7 @@ describe('test monitorState job processor', function () { getNodeUsersStub = null getUnhealthyPeersStub = null buildReplicaSetNodesToUserWalletsMapStub = null - retrieveClockStatusesForUsersAcrossReplicaSetStub = null + retrieveUserInfoFromReplicaSetStub = null computeUserSecondarySyncSuccessRatesMapStub = null getCNodeEndpointToSpIdMapStub = null }) @@ -59,13 +60,13 @@ describe('test monitorState job processor', function () { const REPLICA_SET_NODES_TO_USER_WALLETS_MAP = { 'http://healthCn1.co': ['wallet1'] } - const REPLICAS_TO_USER_CLOCK_STATUS_MAP = { - 'http://healthCn1.co': { wallet1: 1 } + const REPLICA_TO_USER_INFO_MAP = { + 'http://healthCn1.co': { wallet1: { clock: 1, filesHash: '0x1' } } } - const RETRIEVE_CLOCK_STATUS_EXTRA_UNHEALTHY_PEERS = new Set() - const RETRIEVE_CLOCK_STATUSES_FOR_USERS_ACROSS_REPLICA_SET_RESP = { - replicasToUserClockStatusMap: REPLICAS_TO_USER_CLOCK_STATUS_MAP, - unhealthyPeers: RETRIEVE_CLOCK_STATUS_EXTRA_UNHEALTHY_PEERS + const RETRIEVE_USER_INFO_EXTRA_UNHEALTHY_PEERS = new Set() + const RETRIEVE_USER_INFO_FROM_REPLICA_SET_RESP = { + replicaToUserInfoMap: REPLICA_TO_USER_INFO_MAP, + unhealthyPeers: RETRIEVE_USER_INFO_EXTRA_UNHEALTHY_PEERS } const USER_SECONDARY_SYNC_SUCCESS_RATES_MAP = { dummyMap: 'dummyMap' } const CNODE_ENDPOINT_TO_SP_ID_MAP = { dummyCNodeMap: 'dummyCNodeMap' } @@ -76,7 +77,7 @@ describe('test monitorState job processor', function () { users = USERS, unhealthyPeers = UNHEALTHY_PEERS, replicaSetNodesToUserWalletsMap = REPLICA_SET_NODES_TO_USER_WALLETS_MAP, - retrieveClockStatusesForUsersAcrossReplicaSetResp = RETRIEVE_CLOCK_STATUSES_FOR_USERS_ACROSS_REPLICA_SET_RESP, + retrieveUserInfoFromReplicaSetResp = RETRIEVE_USER_INFO_FROM_REPLICA_SET_RESP, userSecondarySyncSuccessRatesMap = USER_SECONDARY_SYNC_SUCCESS_RATES_MAP, cNodeEndpointToSpIdMap = CNODE_ENDPOINT_TO_SP_ID_MAP }) { @@ -92,10 +93,10 @@ describe('test monitorState job processor', function () { .stub() .returns(replicaSetNodesToUserWalletsMap) } - if (!retrieveClockStatusesForUsersAcrossReplicaSetStub) { - retrieveClockStatusesForUsersAcrossReplicaSetStub = sandbox + if (!retrieveUserInfoFromReplicaSetStub) { + retrieveUserInfoFromReplicaSetStub = sandbox .stub() - .resolves(retrieveClockStatusesForUsersAcrossReplicaSetResp) + .resolves(retrieveUserInfoFromReplicaSetResp) } if (!computeUserSecondarySyncSuccessRatesMapStub) { computeUserSecondarySyncSuccessRatesMapStub = sandbox @@ -127,8 +128,8 @@ describe('test monitorState job processor', function () { getCNodeEndpointToSpIdMap: getCNodeEndpointToSpIdMapStub }, '../stateMachineUtils': { - retrieveClockStatusesForUsersAcrossReplicaSet: - retrieveClockStatusesForUsersAcrossReplicaSetStub + retrieveUserInfoFromReplicaSet: + retrieveUserInfoFromReplicaSetStub } } ) @@ -159,7 +160,7 @@ describe('test monitorState job processor', function () { lastProcessedUserId, users = USERS, unhealthyPeers = UNHEALTHY_PEERS, - replicaSetNodesToUserClockStatusesMap = REPLICAS_TO_USER_CLOCK_STATUS_MAP, + replicaToUserInfoMap = REPLICA_TO_USER_INFO_MAP, userSecondarySyncMetricsMap = USER_SECONDARY_SYNC_SUCCESS_RATES_MAP }) { const monitorJobs = jobResult.jobsToEnqueue[QUEUE_NAMES.STATE_MONITORING] @@ -181,7 +182,7 @@ describe('test monitorState job processor', function () { jobData: { users, unhealthyPeers: Array.from(unhealthyPeers), - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, userSecondarySyncMetricsMap } }) @@ -191,7 +192,7 @@ describe('test monitorState job processor', function () { jobData: { users, unhealthyPeers: Array.from(unhealthyPeers), - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, userSecondarySyncMetricsMap } }) @@ -267,7 +268,7 @@ describe('test monitorState job processor', function () { buildReplicaSetNodesToUserWalletsMapStub ).to.have.been.calledOnceWithExactly(USERS) expect( - retrieveClockStatusesForUsersAcrossReplicaSetStub + retrieveUserInfoFromReplicaSetStub ).to.have.been.calledOnceWithExactly(REPLICA_SET_NODES_TO_USER_WALLETS_MAP) expect( computeUserSecondarySyncSuccessRatesMapStub @@ -297,7 +298,7 @@ describe('test monitorState job processor', function () { buildReplicaSetNodesToUserWalletsMapStub ).to.have.been.calledOnceWithExactly(USERS) expect( - retrieveClockStatusesForUsersAcrossReplicaSetStub + retrieveUserInfoFromReplicaSetStub ).to.have.been.calledOnceWithExactly(REPLICA_SET_NODES_TO_USER_WALLETS_MAP) expect( computeUserSecondarySyncSuccessRatesMapStub @@ -309,14 +310,14 @@ describe('test monitorState job processor', function () { }) }) - it('should return without throwing when retrieveClockStatusesForUsersAcrossReplicaSet throws an error', async function () { - // Run processStateMonitoringJob with each step succeeding except retrieveClockStatusesForUsersAcrossReplicaSetStub - retrieveClockStatusesForUsersAcrossReplicaSetStub = sandbox + it('should return without throwing when retrieveUserInfoFromReplicaSet throws an error', async function () { + // Run processStateMonitoringJob with each step succeeding except retrieveUserInfoFromReplicaSetStub + retrieveUserInfoFromReplicaSetStub = sandbox .stub() .rejects('test unexpected error') const jobResult = await processStateMonitoringJob({}) - // Verify that retrieveClockStatusesForUsersAcrossReplicaSetStub fails and other steps succeed + // Verify that retrieveUserInfoFromReplicaSetStub fails and other steps succeed expect(getNodeUsersStub).to.have.been.calledOnceWithExactly( DISCOVERY_NODE_ENDPOINT, CONTENT_NODE_ENDPOINT, @@ -328,13 +329,13 @@ describe('test monitorState job processor', function () { buildReplicaSetNodesToUserWalletsMapStub ).to.have.been.calledOnceWithExactly(USERS) expect( - retrieveClockStatusesForUsersAcrossReplicaSetStub + retrieveUserInfoFromReplicaSetStub ).to.have.been.calledOnceWithExactly(REPLICA_SET_NODES_TO_USER_WALLETS_MAP) expect(computeUserSecondarySyncSuccessRatesMapStub).to.not.have.been.called verifyJobResult({ jobResult, lastProcessedUserId: USER_ID, - replicaSetNodesToUserClockStatusesMap: {}, + replicaToUserInfoMap: {}, userSecondarySyncMetricsMap: {} }) }) @@ -357,13 +358,13 @@ describe('test monitorState job processor', function () { expect( buildReplicaSetNodesToUserWalletsMapStub ).to.have.been.calledOnceWithExactly(USERS) - expect(retrieveClockStatusesForUsersAcrossReplicaSetStub).to.not.have.been + expect(retrieveUserInfoFromReplicaSetStub).to.not.have.been .called expect(computeUserSecondarySyncSuccessRatesMapStub).to.not.have.been.called verifyJobResult({ jobResult, lastProcessedUserId: USER_ID, - replicaSetNodesToUserClockStatusesMap: {}, + replicaToUserInfoMap: {}, userSecondarySyncMetricsMap: {} }) }) @@ -382,14 +383,14 @@ describe('test monitorState job processor', function () { ) expect(getUnhealthyPeersStub).to.have.been.calledOnceWithExactly(USERS) expect(buildReplicaSetNodesToUserWalletsMapStub).to.not.have.been.called - expect(retrieveClockStatusesForUsersAcrossReplicaSetStub).to.not.have.been + expect(retrieveUserInfoFromReplicaSetStub).to.not.have.been .called expect(computeUserSecondarySyncSuccessRatesMapStub).to.not.have.been.called verifyJobResult({ jobResult, lastProcessedUserId: USER_ID, unhealthyPeers: new Set(), - replicaSetNodesToUserClockStatusesMap: {}, + replicaToUserInfoMap: {}, userSecondarySyncMetricsMap: {} }) }) @@ -408,7 +409,7 @@ describe('test monitorState job processor', function () { ) expect(getUnhealthyPeersStub).to.not.have.been.called expect(buildReplicaSetNodesToUserWalletsMapStub).to.not.have.been.called - expect(retrieveClockStatusesForUsersAcrossReplicaSetStub).to.not.have.been + expect(retrieveUserInfoFromReplicaSetStub).to.not.have.been .called expect(computeUserSecondarySyncSuccessRatesMapStub).to.not.have.been.called verifyJobResult({ @@ -416,7 +417,7 @@ describe('test monitorState job processor', function () { lastProcessedUserId: LAST_PROCESSED_USER_ID, users: [{ user_id: LAST_PROCESSED_USER_ID }], unhealthyPeers: new Set(), - replicaSetNodesToUserClockStatusesMap: {}, + replicaToUserInfoMap: {}, userSecondarySyncMetricsMap: {} }) }) diff --git a/creator-node/test/pollingTracks.test.js b/creator-node/test/pollingTracks.test.js index 9f83d800dd9..a2cfe4bc730 100644 --- a/creator-node/test/pollingTracks.test.js +++ b/creator-node/test/pollingTracks.test.js @@ -6,7 +6,6 @@ const sinon = require('sinon') const uuid = require('uuid/v4') const proxyquire = require('proxyquire') const _ = require('lodash') -const crypto = require('crypto') const config = require('../src/config') const defaultConfig = require('../default-config.json') @@ -26,7 +25,7 @@ const { } = require('./lib/dataSeeds') const { getLibsMock } = require('./lib/libsMock') const { sortKeys } = require('../src/apiSigning') -const { saveFileToStorage } = require('./lib/helpers') +const { saveFileToStorage, computeFilesHash } = require('./lib/helpers') const testAudioFilePath = path.resolve(__dirname, 'testTrack.mp3') const testAudioFileWrongFormatPath = path.resolve( @@ -70,8 +69,11 @@ function _getTestSegmentFilePathAtIndex(index) { return path.join(__dirname, 'test-segments', `segment${suffix}.ts`) } -describe('test Polling Tracks with mocks', function () { - let app, server, libsMock, handleTrackContentRoute +describe('test Polling Tracks with mocked IPFS', function () { + let app, + server, + libsMock, + handleTrackContentRoute let session, userId, userWallet const spId = 1 @@ -285,11 +287,7 @@ describe('test Polling Tracks with mocks', function () { const multihashesSorted = filesSorted.map((file) => file.multihash) // Confirm /users/clock_status returns expected info with `returnFilesHash` flag - const multihashStringFull = `{${multihashesSorted.join(',')}}` - const expectedFilesHashFull = crypto - .createHash('md5') - .update(multihashStringFull) - .digest('hex') + const expectedFilesHashFull = computeFilesHash(multihashesSorted) resp = await request(app) .get(`/users/clock_status/${wallet}?returnFilesHash=true`) .expect(200) @@ -304,13 +302,8 @@ describe('test Polling Tracks with mocks', function () { const clockMax = 8 /** clockMin */ - const multihashStringClockMin = `{${multihashesSorted - .slice(clockMin - 1) - .join(',')}}` - const expectedFilesHashClockMin = crypto - .createHash('md5') - .update(multihashStringClockMin) - .digest('hex') + const expectedFilesHashClockMin = computeFilesHash(multihashesSorted + .slice(clockMin - 1)) resp = await request(app) .get( `/users/clock_status/${wallet}?returnFilesHash=true&filesHashClockRangeMin=${clockMin}` @@ -324,13 +317,8 @@ describe('test Polling Tracks with mocks', function () { }) /** clockMax */ - const multihashStringClockMax = `{${multihashesSorted - .slice(0, clockMax - 1) - .join(',')}}` - const expectedFilesHashClockMax = crypto - .createHash('md5') - .update(multihashStringClockMax) - .digest('hex') + const expectedFilesHashClockMax = computeFilesHash(multihashesSorted + .slice(0, clockMax - 1)) resp = await request(app) .get( `/users/clock_status/${wallet}?returnFilesHash=true&filesHashClockRangeMax=${clockMax}` @@ -344,13 +332,8 @@ describe('test Polling Tracks with mocks', function () { }) /** clockMin and clockMax */ - let multihashStringClockRange = `{${multihashesSorted - .slice(clockMin - 1, clockMax - 1) - .join(',')}}` - let expectedFilesHashClockRange = crypto - .createHash('md5') - .update(multihashStringClockRange) - .digest('hex') + let expectedFilesHashClockRange = computeFilesHash(multihashesSorted + .slice(clockMin - 1, clockMax - 1)) resp = await request(app) .get( `/users/clock_status/${wallet}?returnFilesHash=true&filesHashClockRangeMin=${clockMin}&filesHashClockRangeMax=${clockMax}` @@ -389,13 +372,9 @@ describe('test Polling Tracks with mocks', function () { /** partially overlapping clockrange */ const clockMaxTooHigh = numExpectedFilesForUser + 5 - multihashStringClockRange = `{${multihashesSorted - .slice(clockMin - 1, clockMaxTooHigh - 1) - .join(',')}}` - expectedFilesHashClockRange = crypto - .createHash('md5') - .update(multihashStringClockRange) - .digest('hex') + expectedFilesHashClockRange = computeFilesHash( + multihashesSorted.slice(clockMin - 1, clockMaxTooHigh - 1) + ) resp = await request(app) .get( `/users/clock_status/${wallet}?returnFilesHash=true&filesHashClockRangeMin=${clockMin}&filesHashClockRangeMax=${clockMaxTooHigh}` diff --git a/creator-node/test/snapbackSM.test.js b/creator-node/test/snapbackSM.test.js index b31c52324cf..32248bafa8e 100644 --- a/creator-node/test/snapbackSM.test.js +++ b/creator-node/test/snapbackSM.test.js @@ -1,3 +1,5 @@ +/** Integration tests for SnapbackSM module */ + const nock = require('nock') const assert = require('assert') const chai = require('chai') diff --git a/creator-node/test/stateMachineUtils.test.js b/creator-node/test/stateMachineUtils.test.js index 90b449a253e..cb9fdf25ad1 100644 --- a/creator-node/test/stateMachineUtils.test.js +++ b/creator-node/test/stateMachineUtils.test.js @@ -9,7 +9,7 @@ const proxyquire = require('proxyquire') const config = require('../src/config') -describe('test retrieveClockStatusesForUsersAcrossReplicaSet()', function () { +describe('test retrieveUserInfoFromReplicaSet()', function () { beforeEach(function () { nock.disableNetConnect() }) @@ -19,26 +19,26 @@ describe('test retrieveClockStatusesForUsersAcrossReplicaSet()', function () { nock.enableNetConnect() }) - it('returns expected clock values and updates unhealthyPeers', async function () { + it('returns expected user info and updates unhealthyPeers', async function () { const healthyCn1 = 'http://healthyCn1.co' const healthyCn2 = 'http://healthyCn2.co' const unhealthyCn = 'http://unhealthyCn.co' - const replicasToWalletsMap = { + const replicaToWalletMap = { [healthyCn1]: ['wallet1', 'wallet2', 'wallet3', 'wallet4', 'wallet5'], [healthyCn2]: ['wallet1', 'wallet2'], [unhealthyCn]: ['wallet1', 'wallet2'] } - const expectedReplicaToClockValueMap = { + const expectedReplicaToUserInfoMap = { [healthyCn1]: { - wallet1: 1, - wallet2: 2, - wallet3: 3, - wallet4: 4, - wallet5: 5 + wallet1: { clock: 1, filesHash: '0x1'}, + wallet2: { clock: 2, filesHash: '0x2'}, + wallet3: { clock: 3, filesHash: '0x3'}, + wallet4: { clock: 4, filesHash: '0x4'}, + wallet5: { clock: 5, filesHash: '0x5'} }, [healthyCn2]: { - wallet1: 10, - wallet2: 20 + wallet1: { clock: 1, filesHash: '0x1'}, + wallet2: { clock: 2, filesHash: '0x2'} }, [unhealthyCn]: {} } @@ -46,27 +46,16 @@ describe('test retrieveClockStatusesForUsersAcrossReplicaSet()', function () { // Mock the axios requests for healthy Content Nodes to return clock values nock(healthyCn1) .post('/users/batch_clock_status') - .query(true) // Match any query because we don't care about signature, timestamp, and spID + .query(queryObj => 'returnFilesHash' in queryObj && queryObj.returnFilesHash === 'true') .times(3) // 3 times because there are 5 wallets and the batch size is 2 wallets per request .reply(200, function (uri, requestBody) { const { walletPublicKeys } = requestBody - console.log(`cn1 walletPublicKeys: ${walletPublicKeys}`) - console.log( - `returning ${JSON.stringify( - walletPublicKeys.map((wallet) => { - return { - wallet, - clock: expectedReplicaToClockValueMap[healthyCn1][wallet] - } - }) - )}` - ) return { data: { users: walletPublicKeys.map((wallet) => { return { walletPublicKey: wallet, - clock: expectedReplicaToClockValueMap[healthyCn1][wallet] + ...expectedReplicaToUserInfoMap[healthyCn1][wallet] } }) } @@ -74,7 +63,7 @@ describe('test retrieveClockStatusesForUsersAcrossReplicaSet()', function () { }) nock(healthyCn2) .post('/users/batch_clock_status') - .query(true) // Match any query because we don't care about signature, timestamp, and spID + .query(queryObj => 'returnFilesHash' in queryObj && queryObj.returnFilesHash === 'true') .reply(200, function (uri, requestBody) { const { walletPublicKeys } = requestBody return { @@ -82,7 +71,7 @@ describe('test retrieveClockStatusesForUsersAcrossReplicaSet()', function () { users: walletPublicKeys.map((wallet) => { return { walletPublicKey: wallet, - clock: expectedReplicaToClockValueMap[healthyCn2][wallet] + ...expectedReplicaToUserInfoMap[healthyCn2][wallet] } }) } @@ -92,13 +81,13 @@ describe('test retrieveClockStatusesForUsersAcrossReplicaSet()', function () { // Mock the axios request to the unhealthy Content Node to return an error nock(unhealthyCn) .post('/users/batch_clock_status') - .query(true) // Match any because we don't care about signature, timestamp, and spID + .query(queryObj => 'returnFilesHash' in queryObj && queryObj.returnFilesHash === 'true') .times(2) // It retries the failure once .reply(500) - // Mock retrieveClockStatusesForUsersAcrossReplicaSet to have our desired config and constants + // Mock retrieveUserInfoFromReplicaSet to have our desired config and constants config.set('maxBatchClockStatusBatchSize', 2) - const { retrieveClockStatusesForUsersAcrossReplicaSet } = proxyquire( + const { retrieveUserInfoFromReplicaSet } = proxyquire( '../src/services/stateMachineManager/stateMachineUtils.js', { '../../config': config, @@ -109,16 +98,16 @@ describe('test retrieveClockStatusesForUsersAcrossReplicaSet()', function () { } ) - const { replicasToUserClockStatusMap, unhealthyPeers } = - await retrieveClockStatusesForUsersAcrossReplicaSet(replicasToWalletsMap) + const { replicaToUserInfoMap, unhealthyPeers } = + await retrieveUserInfoFromReplicaSet(replicaToWalletMap) // Verify that all mocked endpoints were been hit the expected number of times expect(nock.isDone()).to.be.true // Verify that each wallet had the expected clock value and the unhealthy node was marked as unhealthy - expect(Object.keys(replicasToUserClockStatusMap)).to.have.lengthOf(3) - expect(replicasToUserClockStatusMap).to.deep.equal( - expectedReplicaToClockValueMap + expect(Object.keys(replicaToUserInfoMap)).to.have.lengthOf(3) + expect(replicaToUserInfoMap).to.deep.equal( + expectedReplicaToUserInfoMap ) expect(unhealthyPeers).to.have.property('size', 1) expect(unhealthyPeers).to.include('http://unhealthyCn.co') diff --git a/creator-node/test/stateMonitoringUtils.test.js b/creator-node/test/stateMonitoringUtils.test.js index e38c74a19a3..f488291657e 100644 --- a/creator-node/test/stateMonitoringUtils.test.js +++ b/creator-node/test/stateMonitoringUtils.test.js @@ -8,7 +8,9 @@ chai.use(require('chai-as-promised')) const proxyquire = require('proxyquire') const _ = require('lodash') const { CancelToken } = require('axios').default +const assert = require('assert') +const DBManager = require('../src/dbManager') const config = require('../src/config') const { getApp } = require('./lib/app') const { getLibsMock } = require('./lib/libsMock') @@ -16,10 +18,12 @@ const Utils = require('../src/utils') const { getLatestUserIdFromDiscovery, buildReplicaSetNodesToUserWalletsMap, - computeUserSecondarySyncSuccessRatesMap + computeUserSecondarySyncSuccessRatesMap, + computeSyncModeForUserAndReplica } = require('../src/services/stateMachineManager/stateMonitoring/stateMonitoringUtils') const { - SyncType + SyncType, + SYNC_MODES } = require('../src/services/stateMachineManager/stateMachineConstants') const SecondarySyncHealthTracker = require('../src/snapbackSM/secondarySyncHealthTracker') @@ -439,3 +443,631 @@ describe('test computeUserSecondarySyncSuccessRatesMap()', function () { ) }) }) + +describe('test aggregateReconfigAndPotentialSyncOps()', function () { + let server + + beforeEach(async function () { + const appInfo = await getApp(getLibsMock()) + server = appInfo.server + const app = appInfo.app + await app.get('redisClient').flushdb() + config.set('spID', 1) + }) + + afterEach(async function () { + await server.close() + }) + + function getAggregateReconfigAndPotentialSyncOps(config) { + const { aggregateReconfigAndPotentialSyncOps } = proxyquire( + '../src/services/stateMachineManager/stateMonitoring/stateMonitoringUtils.js', + { + '../../../config': config + } + ) + return aggregateReconfigAndPotentialSyncOps + } + + it('if the self node is the secondary and a primary spId is different from what is on chain, issue reconfig', async function () { + // Mock that one of the nodes got reregistered from spId 3 to spId 4 + const endpointToSPIdMap = { + 'http://cnOriginallySpId3ReregisteredAsSpId4.co': 4, + 'http://cnWithSpId2.co': 2, + 'http://cnWithSpId3.co': 3 + } + const thisContentNodeEndpoint = 'http://cnWithSpId2.co' + + const nodeUsers = [ + { + user_id: 1, + wallet: '0x00fc5bff87afb1f15a02e82c3f671cf5c9ad9e6d', + primary: 'http://cnOriginallySpId3ReregisteredAsSpId4.co', + secondary1: 'http://cnWithSpId2.co', + secondary2: 'http://cnWithSpId3.co', + primarySpID: 1, + secondary1SpID: 2, + secondary2SpID: 3 + } + ] + const unhealthyPeers = new Set() + const userSecondarySyncMetricsMap = { + [nodeUsers[0].wallet]: { + 'http://cnWithSpId2.co': { + successRate: 1, + successCount: 0, + failureCount: 0 + }, + 'http://cnWithSpId3.co': { + successRate: 1, + successCount: 0, + failureCount: 0 + } + } + } + const aggregateReconfigAndPotentialSyncOps = + getAggregateReconfigAndPotentialSyncOps(config) + const { requiredUpdateReplicaSetOps, potentialSyncRequests } = + await aggregateReconfigAndPotentialSyncOps( + nodeUsers, + unhealthyPeers, + userSecondarySyncMetricsMap, + endpointToSPIdMap, + thisContentNodeEndpoint + ) + + // Make sure that the CN with the different spId gets put into `requiredUpdateReplicaSetOps` + expect(requiredUpdateReplicaSetOps) + .to.have.nested.property('[0]') + .that.has.property('unhealthyReplicas') + .that.has.keys(['http://cnOriginallySpId3ReregisteredAsSpId4.co']) + expect(potentialSyncRequests).to.have.lengthOf(0) + }) + + it('if the self node is the primary and a secondary spId is different from what is on chain, issue reconfig', async function () { + // Mock that one of the nodes got reregistered from spId 3 to spId 4 + const endpointToSPIdMap = { + 'http://some_healthy_primary.co': 1, + 'http://cnWithSpId2.co': 2, + 'http://cnOriginallySpId3ReregisteredAsSpId4.co': 4 + } + + const thisContentNodeEndpoint = 'http://some_healthy_primary.co' + const nodeUsers = [ + { + user_id: 1, + wallet: '0x00fc5bff87afb1f15a02e82c3f671cf5c9ad9e6d', + primary: 'http://some_healthy_primary.co', + secondary1: 'http://cnWithSpId2.co', + secondary2: 'http://cnOriginallySpId3ReregisteredAsSpId4.co', + primarySpID: 1, + secondary1SpID: 2, + secondary2SpID: 3 + } + ] + const unhealthyPeers = new Set() + const userSecondarySyncMetricsMap = { + [nodeUsers[0].wallet]: { + 'http://cnWithSpId2.co': { + successRate: 1, + successCount: 0, + failureCount: 0 + }, + 'http://cnOriginallySpId3ReregisteredAsSpId4.co': { + successRate: 1, + successCount: 0, + failureCount: 0 + } + } + } + const aggregateReconfigAndPotentialSyncOps = + getAggregateReconfigAndPotentialSyncOps(config) + const { requiredUpdateReplicaSetOps, potentialSyncRequests } = + await aggregateReconfigAndPotentialSyncOps( + nodeUsers, + unhealthyPeers, + userSecondarySyncMetricsMap, + endpointToSPIdMap, + thisContentNodeEndpoint + ) + + // Make sure that the CN with the different spId gets put into `requiredUpdateReplicaSetOps` + expect(requiredUpdateReplicaSetOps) + .to.have.nested.property('[0]') + .that.has.property('unhealthyReplicas') + .that.has.keys(['http://cnOriginallySpId3ReregisteredAsSpId4.co']) + expect(potentialSyncRequests) + .to.have.nested.property('[0]') + .that.has.property('endpoint') + .that.equals('http://cnWithSpId2.co') + }) + + it('if the self node (primary) is the same as the SP with a different spId, do not issue reconfig', async function () { + // Mock that one of the nodes got reregistered from spId 3 to spId 4 + const endpointToSPIdMap = { + 'http://some_healthy_primary.co': 4, + 'http://cnWithSpId2.co': 2, + 'http://cnWithSpId3.co': 3 + } + + const thisContentNodeEndpoint = 'http://some_healthy_primary.co' + const nodeUsers = [ + { + user_id: 1, + wallet: '0x00fc5bff87afb1f15a02e82c3f671cf5c9ad9e6d', + primary: 'http://some_healthy_primary.co', + secondary1: 'http://cnWithSpId2.co', + secondary2: 'http://cnWithSpId3.co', + primarySpID: 1, + secondary1SpID: 2, + secondary2SpID: 3 + } + ] + const unhealthyPeers = new Set() + const userSecondarySyncMetricsMap = { + [nodeUsers[0].wallet]: { + 'http://cnWithSpId2.co': { + successRate: 1, + successCount: 0, + failureCount: 0 + }, + 'http://cnWithSpId3.co': { + successRate: 1, + successCount: 0, + failureCount: 0 + } + } + } + const aggregateReconfigAndPotentialSyncOps = + getAggregateReconfigAndPotentialSyncOps(config) + const { requiredUpdateReplicaSetOps, potentialSyncRequests } = + await aggregateReconfigAndPotentialSyncOps( + nodeUsers, + unhealthyPeers, + userSecondarySyncMetricsMap, + endpointToSPIdMap, + thisContentNodeEndpoint + ) + + // Make sure that the CN with the different spId gets put into `requiredUpdateReplicaSetOps` + expect(requiredUpdateReplicaSetOps).to.have.lengthOf(0) + expect(potentialSyncRequests).to.have.lengthOf(2) + expect(potentialSyncRequests) + .to.have.nested.property('[0]') + .that.has.property('endpoint') + .that.equals('http://cnWithSpId2.co') + expect(potentialSyncRequests) + .to.have.nested.property('[1]') + .that.has.property('endpoint') + .that.equals('http://cnWithSpId3.co') + }) + + it('if the self node (secondary) is the same as the SP with a different spId, do not issue reconfig', async function () { + // Mock that one of the nodes got reregistered from spId 3 to spId 4 + const endpointToSPIdMap = { + 'http://some_healthy_primary.co': 1, + 'http://cnWithSpId2.co': 2, + 'http://cnOriginallySpId3ReregisteredAsSpId4.co': 4 + } + + const thisContentNodeEndpoint = + 'http://cnOriginallySpId3ReregisteredAsSpId4.co' + const nodeUsers = [ + { + user_id: 1, + wallet: '0x00fc5bff87afb1f15a02e82c3f671cf5c9ad9e6d', + primary: 'http://some_healthy_primary.co', + secondary1: 'http://cnWithSpId2.co', + secondary2: 'http://cnOriginallySpId3ReregisteredAsSpId4.co', + primarySpID: 1, + secondary1SpID: 2, + secondary2SpID: 3 + } + ] + const unhealthyPeers = new Set() + const userSecondarySyncMetricsMap = { + [nodeUsers[0].wallet]: { + 'http://cnWithSpId2.co': { + successRate: 1, + successCount: 0, + failureCount: 0 + }, + 'http://cnOriginallySpId3ReregisteredAsSpId4.co': { + successRate: 1, + successCount: 0, + failureCount: 0 + } + } + } + const aggregateReconfigAndPotentialSyncOps = + getAggregateReconfigAndPotentialSyncOps(config) + const { requiredUpdateReplicaSetOps, potentialSyncRequests } = + await aggregateReconfigAndPotentialSyncOps( + nodeUsers, + unhealthyPeers, + userSecondarySyncMetricsMap, + endpointToSPIdMap, + thisContentNodeEndpoint + ) + + expect(requiredUpdateReplicaSetOps).to.have.lengthOf(0) + expect(potentialSyncRequests).to.have.lengthOf(0) + }) + + it('if any replica set node is not in the map, issue reconfig', async function () { + // Mock the deregistered node to not have any spId + const endpointToSPIdMap = { + 'http://some_healthy_primary.co': 1, + 'http://cnWithSpId2.co': 2 + } + + const thisContentNodeEndpoint = 'http://some_healthy_primary.co' + const nodeUsers = [ + { + user_id: 1, + wallet: '0x00fc5bff87afb1f15a02e82c3f671cf5c9ad9e6d', + primary: 'http://some_healthy_primary.co', + secondary1: 'http://cnWithSpId2.co', + secondary2: 'http://deregisteredCN.co', + primarySpID: 1, + secondary1SpID: 2, + secondary2SpID: 3 + } + ] + const unhealthyPeers = new Set() + const userSecondarySyncMetricsMap = { + [nodeUsers[0].wallet]: { + 'http://cnWithSpId2.co': { + successRate: 1, + successCount: 0, + failureCount: 0 + }, + 'http://deregisteredCN.co': { + successRate: 1, + successCount: 0, + failureCount: 0 + } + } + } + const aggregateReconfigAndPotentialSyncOps = + getAggregateReconfigAndPotentialSyncOps(config) + const { requiredUpdateReplicaSetOps, potentialSyncRequests } = + await aggregateReconfigAndPotentialSyncOps( + nodeUsers, + unhealthyPeers, + userSecondarySyncMetricsMap, + endpointToSPIdMap, + thisContentNodeEndpoint + ) + + expect(requiredUpdateReplicaSetOps) + .to.have.nested.property('[0]') + .that.has.property('unhealthyReplicas') + .that.has.keys(['http://deregisteredCN.co']) + expect(potentialSyncRequests) + .to.have.nested.property('[0]') + .that.has.property('endpoint') + .that.equals('http://cnWithSpId2.co') + }) + + it('if the self node (primary) and 1 secondary are healthy but not the other secondary, issue reconfig for the unhealthy secondary', async function () { + const endpointToSPIdMap = { + 'http://some_healthy_primary.co': 1, + 'http://cnWithSpId2.co': 2, + 'http://unhealthyCnWithSpId3.co': 3 + } + + const thisContentNodeEndpoint = 'http://some_healthy_primary.co' + const nodeUsers = [ + { + user_id: 1, + wallet: '0x00fc5bff87afb1f15a02e82c3f671cf5c9ad9e6d', + primary: 'http://some_healthy_primary.co', + secondary1: 'http://cnWithSpId2.co', + secondary2: 'http://unhealthyCnWithSpId3.co', + primarySpID: 1, + secondary1SpID: 2, + secondary2SpID: 3 + } + ] + const unhealthyPeers = new Set(['http://unhealthyCnWithSpId3.co']) + const userSecondarySyncMetricsMap = { + [nodeUsers[0].wallet]: { + 'http://cnWithSpId2.co': { + successRate: 1, + successCount: 0, + failureCount: 0 + }, + 'http://unhealthyCnWithSpId3.co': { + successRate: 1, + successCount: 0, + failureCount: 0 + } + } + } + const aggregateReconfigAndPotentialSyncOps = + getAggregateReconfigAndPotentialSyncOps(config) + const { requiredUpdateReplicaSetOps, potentialSyncRequests } = + await aggregateReconfigAndPotentialSyncOps( + nodeUsers, + unhealthyPeers, + userSecondarySyncMetricsMap, + endpointToSPIdMap, + thisContentNodeEndpoint + ) + + // Make sure that the unhealthy secondary put into `requiredUpdateReplicaSetOps` + expect(requiredUpdateReplicaSetOps).to.have.lengthOf(1) + expect(requiredUpdateReplicaSetOps) + .to.have.nested.property('[0]') + .that.has.property('unhealthyReplicas') + .that.has.property('size', 1) + expect(requiredUpdateReplicaSetOps) + .to.have.nested.property('[0]') + .that.has.property('unhealthyReplicas') + .that.has.keys(['http://unhealthyCnWithSpId3.co']) + expect(potentialSyncRequests).to.have.lengthOf(1) + expect(potentialSyncRequests) + .to.have.nested.property('[0]') + .that.has.property('endpoint') + .that.equals('http://cnWithSpId2.co') + }) + + it('if the self node (primary) and and secondaries are healthy but sync success rate is low, issue reconfig', async function () { + config.set('minimumFailedSyncRequestsBeforeReconfig', 5) + config.set('minimumSecondaryUserSyncSuccessPercent', 25) + const endpointToSPIdMap = { + 'http://some_healthy_primary.co': 1, + 'http://cnWithSpId2.co': 2, + 'http://cnWithSpId3.co': 3 + } + + const thisContentNodeEndpoint = 'http://some_healthy_primary.co' + + const nodeUsers = [ + { + user_id: 1, + wallet: '0x00fc5bff87afb1f15a02e82c3f671cf5c9ad9e6d', + primary: 'http://some_healthy_primary.co', + secondary1: 'http://cnWithSpId2.co', + secondary2: 'http://cnWithSpId3.co', + primarySpID: 1, + secondary1SpID: 2, + secondary2SpID: 3 + } + ] + + const unhealthyPeers = new Set() + const userSecondarySyncMetricsMap = { + [nodeUsers[0].wallet]: { + 'http://cnWithSpId2.co': { + successRate: 1, + successCount: 1, + failureCount: 0 + }, + 'http://cnWithSpId3.co': { + successRate: 0.1, + successCount: 1, + failureCount: 9 + } + } + } + const aggregateReconfigAndPotentialSyncOps = + getAggregateReconfigAndPotentialSyncOps(config) + const { requiredUpdateReplicaSetOps, potentialSyncRequests } = + await aggregateReconfigAndPotentialSyncOps( + nodeUsers, + unhealthyPeers, + userSecondarySyncMetricsMap, + endpointToSPIdMap, + thisContentNodeEndpoint + ) + + // Make sure that the CN with low sync success put into `requiredUpdateReplicaSetOps` + expect(requiredUpdateReplicaSetOps).to.have.lengthOf(1) + expect(requiredUpdateReplicaSetOps) + .to.have.nested.property('[0]') + .that.has.property('unhealthyReplicas') + .that.has.property('size', 1) + expect(requiredUpdateReplicaSetOps) + .to.have.nested.property('[0]') + .that.has.property('unhealthyReplicas') + .that.has.keys(['http://cnWithSpId3.co']) + expect(potentialSyncRequests).to.have.lengthOf(1) + expect(potentialSyncRequests) + .to.have.nested.property('[0]') + .that.has.property('endpoint') + .that.equals('http://cnWithSpId2.co') + }) +}) + +describe('Test computeSyncModeForUserAndReplica()', function () { + let primaryClock, + secondaryClock, + primaryFilesHash, + secondaryFilesHash, + primaryFilesHashMock + + // Can be anything for test purposes + const wallet = 'wallet' + + it('Throws if missing or invalid params', async function () { + primaryClock = 10 + secondaryClock = 10 + primaryFilesHash = undefined + secondaryFilesHash = undefined + + try { + await computeSyncModeForUserAndReplica({ + wallet, + primaryClock, + secondaryClock, + primaryFilesHash, + secondaryFilesHash + }) + } catch (e) { + assert.strictEqual( + e.message, + '[computeSyncModeForUserAndReplica()] Error: Missing or invalid params' + ) + } + }) + + it('Returns SYNC_MODES.None if clocks and filesHashes equal', async function () { + primaryClock = 10 + secondaryClock = primaryClock + primaryFilesHash = '0x123' + secondaryFilesHash = primaryFilesHash + + const syncMode = await computeSyncModeForUserAndReplica({ + wallet, + primaryClock, + secondaryClock, + primaryFilesHash, + secondaryFilesHash + }) + + assert.strictEqual(syncMode, SYNC_MODES.None) + }) + + it('Returns SYNC_MODES.MergePrimaryAndSecondary if clocks equal and filesHashes unequal', async function () { + primaryClock = 10 + secondaryClock = primaryClock + primaryFilesHash = '0x123' + secondaryFilesHash = '0x456' + + const syncMode = await computeSyncModeForUserAndReplica({ + wallet, + primaryClock, + secondaryClock, + primaryFilesHash, + secondaryFilesHash + }) + + assert.strictEqual(syncMode, SYNC_MODES.MergePrimaryAndSecondary) + }) + + it('Returns SYNC_MODES.MergePrimaryAndSecondary if primaryClock < secondaryClock', async function () { + primaryClock = 5 + secondaryClock = 10 + primaryFilesHash = '0x123' + secondaryFilesHash = '0x456' + + const syncMode = await computeSyncModeForUserAndReplica({ + wallet, + primaryClock, + secondaryClock, + primaryFilesHash, + secondaryFilesHash + }) + + assert.strictEqual(syncMode, SYNC_MODES.MergePrimaryAndSecondary) + }) + + it('Returns SYNC_MODES.SyncSecondaryFromPrimary if primaryClock > secondaryClock & secondaryFilesHash === null', async function () { + primaryClock = 10 + secondaryClock = 5 + primaryFilesHash = '0x123' + secondaryFilesHash = null + + const syncMode = await computeSyncModeForUserAndReplica({ + wallet, + primaryClock, + secondaryClock, + primaryFilesHash, + secondaryFilesHash + }) + + assert.strictEqual(syncMode, SYNC_MODES.SyncSecondaryFromPrimary) + }) + + describe('primaryClock > secondaryClock', function () { + it('Returns SYNC_MODES.SyncSecondaryFromPrimary if primaryFilesHashForRange = secondaryFilesHash', async function () { + primaryClock = 10 + secondaryClock = 5 + primaryFilesHash = '0x123' + secondaryFilesHash = '0x456' + + // Mock DBManager.fetchFilesHashFromDB() to return `secondaryFilesHash` for clock range + const DBManagerMock = DBManager + DBManagerMock.fetchFilesHashFromDB = async () => { + return secondaryFilesHash + } + proxyquire('../src/services/stateMachineManager/stateMonitoring/stateMonitoringUtils', { + '../../../dbManager': DBManagerMock + }) + + const syncMode = await computeSyncModeForUserAndReplica({ + wallet, + primaryClock, + secondaryClock, + primaryFilesHash, + secondaryFilesHash + }) + + assert.strictEqual(syncMode, SYNC_MODES.SyncSecondaryFromPrimary) + }) + + it('Returns SYNC_MODES.MergePrimaryAndSecondary if primaryFilesHashForRange != secondaryFilesHash', async function () { + primaryClock = 10 + secondaryClock = 5 + primaryFilesHash = '0x123' + secondaryFilesHash = '0x456' + primaryFilesHashMock = '0x789' + + // Mock DBManager.fetchFilesHashFromDB() to return different filesHash for clock range + const DBManagerMock = DBManager + DBManagerMock.fetchFilesHashFromDB = async () => { + return primaryFilesHashMock + } + proxyquire('../src/services/stateMachineManager/stateMonitoring/stateMonitoringUtils', { + '../../../dbManager': DBManagerMock + }) + + const syncMode = await computeSyncModeForUserAndReplica({ + wallet, + primaryClock, + secondaryClock, + primaryFilesHash, + secondaryFilesHash + }) + + assert.strictEqual(syncMode, SYNC_MODES.MergePrimaryAndSecondary) + }) + + it("Throws error primaryFilesHashForRange can't be retrieved", async function () { + // Increase mocha test timeout from default 2s to accommodate `async-retry` runtime + this.timeout(30000) // 30s + + primaryClock = 10 + secondaryClock = 5 + primaryFilesHash = '0x123' + secondaryFilesHash = '0x456' + + // Mock DBManager.fetchFilesHashFromDB() to throw error + const errorMsg = 'Mock - Failed to fetch filesHash' + const DBManagerMock = require('../src/dbManager') + DBManagerMock.fetchFilesHashFromDB = async () => { + throw new Error(errorMsg) + } + proxyquire('../src/services/stateMachineManager/stateMonitoring/stateMonitoringUtils', { + '../../../dbManager': DBManagerMock + }) + + try { + await computeSyncModeForUserAndReplica({ + wallet, + primaryClock, + secondaryClock, + primaryFilesHash, + secondaryFilesHash + }) + } catch (e) { + assert.strictEqual( + e.message, + `[computeSyncModeForUserAndReplica()] [DBManager.fetchFilesHashFromDB()] Error - ${errorMsg}` + ) + } + }) + }) +}) diff --git a/creator-node/test/updateReplicaSet.jobProcessor.test.js b/creator-node/test/updateReplicaSet.jobProcessor.test.js index 4f2cafb8c12..b5c07415b8a 100644 --- a/creator-node/test/updateReplicaSet.jobProcessor.test.js +++ b/creator-node/test/updateReplicaSet.jobProcessor.test.js @@ -20,6 +20,7 @@ const { expect } = chai describe('test updateReplicaSet job processor', function () { let server, sandbox, originalContentNodeEndpoint, logger + beforeEach(async function () { const appInfo = await getApp(getLibsMock()) await appInfo.app.get('redisClient').flushdb() @@ -130,11 +131,11 @@ describe('test updateReplicaSet job processor', function () { // Mark secondary1 as unhealthy and fourthHealthyNode as not having any user state const retrieveClockValueForUserFromReplicaStub = sandbox.stub().resolves(-1) const unhealthyReplicas = [secondary1] - const replicaSetNodesToUserClockStatusesMap = { - [primary]: 1, - [secondary1]: 1, - [secondary2]: 1, - [fourthHealthyNode]: -1 + const replicaToUserInfoMap = { + [primary]: { clock: 1 }, + [secondary1]: { clock: 1 }, + [secondary2]: { clock: 1 }, + [fourthHealthyNode]: { clock: -1 } } const updateReplicaSetJobProcessor = getJobProcessorStub({ @@ -153,7 +154,7 @@ describe('test updateReplicaSet job processor', function () { secondary1, secondary2, unhealthyReplicas, - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, enabledReconfigModes: [RECONFIG_MODES.ONE_SECONDARY.key] }) ).to.eventually.be.fulfilled.and.deep.equal({ @@ -202,11 +203,11 @@ describe('test updateReplicaSet job processor', function () { // Mark secondary1 as unhealthy and fourthHealthyNode as not having any user state const retrieveClockValueForUserFromReplicaStub = sandbox.stub().resolves(-1) const unhealthyReplicas = [secondary1] - const replicaSetNodesToUserClockStatusesMap = { - [primary]: 1, - [secondary1]: 1, - [secondary2]: 1, - [fourthHealthyNode]: -1 + const replicaToUserInfoMap = { + [primary]: { clock: 1 }, + [secondary1]: { clock: 1 }, + [secondary2]: { clock: 1 }, + [fourthHealthyNode]: { clock: -1 } } const updateReplicaSetJobProcessor = getJobProcessorStub({ @@ -225,7 +226,7 @@ describe('test updateReplicaSet job processor', function () { secondary1, secondary2, unhealthyReplicas, - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, enabledReconfigModes: [RECONFIG_MODES.RECONFIG_DISABLED.key] // Disable reconfigs }) ).to.eventually.be.fulfilled.and.deep.equal({ @@ -259,11 +260,11 @@ describe('test updateReplicaSet job processor', function () { // Mark all nodes in the replica set as unhealthy and fourthHealthyNode as not having any user state const retrieveClockValueForUserFromReplicaStub = sandbox.stub().resolves(-1) const unhealthyReplicas = [primary, secondary1, secondary2] - const replicaSetNodesToUserClockStatusesMap = { - [primary]: 1, - [secondary1]: 1, - [secondary2]: 1, - [fourthHealthyNode]: -1 + const replicaToUserInfoMap = { + [primary]: { clock: 1 }, + [secondary1]: { clock: 1 }, + [secondary2]: { clock: 1 }, + [fourthHealthyNode]: { clock: -1 } } const updateReplicaSetJobProcessor = getJobProcessorStub({ @@ -282,7 +283,7 @@ describe('test updateReplicaSet job processor', function () { secondary1, secondary2, unhealthyReplicas, - replicaSetNodesToUserClockStatusesMap, + replicaToUserInfoMap, enabledReconfigModes: [RECONFIG_MODES.ENTIRE_REPLICA_SET.key] }) ).to.eventually.be.fulfilled.and.deep.equal({ From 490eab3cf5a733b8d92a679dce6d153a21aa127c Mon Sep 17 00:00:00 2001 From: Johannes Naylor Date: Tue, 5 Jul 2022 13:34:28 -0400 Subject: [PATCH 04/12] Remove old run data from network monitoring DB --- .../src/db/migrations/create_tables.ts | 18 +++++++------ .../network-monitoring/src/discovery/index.ts | 4 +++ .../src/discovery/queries.ts | 25 +++++++++++++++++++ 3 files changed, 40 insertions(+), 7 deletions(-) diff --git a/discovery-provider/plugins/network-monitoring/src/db/migrations/create_tables.ts b/discovery-provider/plugins/network-monitoring/src/db/migrations/create_tables.ts index 65834144bf0..870867b0a7f 100644 --- a/discovery-provider/plugins/network-monitoring/src/db/migrations/create_tables.ts +++ b/discovery-provider/plugins/network-monitoring/src/db/migrations/create_tables.ts @@ -28,7 +28,8 @@ const migration: RunnableMigration = { run_id INT, CONSTRAINT fk_run_id FOREIGN KEY (run_id) - REFERENCES network_monitoring_index_blocks(run_id), + REFERENCES network_monitoring_index_blocks(run_id) + ON DELETE CASCADE, PRIMARY KEY (run_id, spID) ); @@ -49,7 +50,8 @@ const migration: RunnableMigration = { secondary2SpID INT, CONSTRAINT fk_run_id FOREIGN KEY (run_id) - REFERENCES network_monitoring_index_blocks(run_id), + REFERENCES network_monitoring_index_blocks(run_id) + ON DELETE CASCADE, CONSTRAINT fk_primarySpID FOREIGN KEY (run_id, primarySpID) REFERENCES network_monitoring_content_nodes(run_id, spID), @@ -74,6 +76,7 @@ const migration: RunnableMigration = { CONSTRAINT fk_run_id FOREIGN KEY (run_id) REFERENCES network_monitoring_index_blocks(run_id) + ON DELETE CASCADE ); `) @@ -86,7 +89,8 @@ const migration: RunnableMigration = { content_node_spID INT, CONSTRAINT fk_run_id FOREIGN KEY (run_id) - REFERENCES network_monitoring_index_blocks(run_id), + REFERENCES network_monitoring_index_blocks(run_id) + ON DELETE CASCADE, CONSTRAINT fk_content_node_spID FOREIGN KEY (run_id, content_node_spID) REFERENCES network_monitoring_content_nodes(run_id, spID) @@ -95,11 +99,11 @@ const migration: RunnableMigration = { }, down: async (params: MigrationParams) => { await params.context.sequelize.query(` - DROP TABLE network_monitoring_index_blocks; - DROP TABLE network_monitoring_content_nodes; - DROP TABLE network_monitoring_users; - DROP TABLE network_monitoring_cids_from_discovery; DROP TABLE network_monitoring_cids_from_content; + DROP TABLE network_monitoring_cids_from_discovery; + DROP TABLE network_monitoring_users; + DROP TABLE network_monitoring_content_nodes; + DROP TABLE network_monitoring_index_blocks; `) } } diff --git a/discovery-provider/plugins/network-monitoring/src/discovery/index.ts b/discovery-provider/plugins/network-monitoring/src/discovery/index.ts index c7ffcbc1f56..ef45e3c60da 100644 --- a/discovery-provider/plugins/network-monitoring/src/discovery/index.ts +++ b/discovery-provider/plugins/network-monitoring/src/discovery/index.ts @@ -2,6 +2,7 @@ import { indexingDiscoveryDurationGauge } from "../prometheus" import { createNewRun, + deleteOldRunData, importCids, importContentNodes, importUsers @@ -17,6 +18,9 @@ export const indexDiscovery = async (): Promise => { // Create new run in table `network_monitoring_index_blocks` const run_id = await createNewRun() + // Delete old runs + await deleteOldRunData(run_id) + // Pull Content Nodes list into table `network_monitoring_content_nodes` await importContentNodes(run_id) diff --git a/discovery-provider/plugins/network-monitoring/src/discovery/queries.ts b/discovery-provider/plugins/network-monitoring/src/discovery/queries.ts index 37b83bd64b1..a04f45b513f 100644 --- a/discovery-provider/plugins/network-monitoring/src/discovery/queries.ts +++ b/discovery-provider/plugins/network-monitoring/src/discovery/queries.ts @@ -49,6 +49,31 @@ export const createNewRun = async (): Promise => { return run_id } +// Delete old runs so the postgres DB doesn't hog disk space +export const deleteOldRunData = async (run_id: number): Promise => { + console.log(`[${run_id}] deleting old run data`) + + // Number of runs to keep in the DB + const latestRunsToKeep = 3 + const toDelete = run_id - latestRunsToKeep + + if (toDelete <= 0) { + console.log("\t-> nothing to delete") + return + } + + // Delete old runs + console.log("\t-> network_monitoring_index_blocks + cascading") + await sequelizeConn.query(` + DELETE FROM network_monitoring_index_blocks + WHERE run_id < :toDelete; + `, { + type: QueryTypes.DELETE, + replacements: { toDelete } + }) + +} + export const importContentNodes = async (run_id: number) => { console.log(`[${run_id}] importing content nodes`) From 27dff6a24df9452f5c3e1e85beef9d355620262b Mon Sep 17 00:00:00 2001 From: Dheeraj Manjunath Date: Tue, 5 Jul 2022 14:01:26 -0400 Subject: [PATCH 05/12] Add support for content nodes as prometheus targets (#3380) * Add support for content nodes in prometheus scraping * Fix generateJobYaml --- monitoring/prometheus/generateProm.js | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/monitoring/prometheus/generateProm.js b/monitoring/prometheus/generateProm.js index 8584f91e305..2779778482e 100644 --- a/monitoring/prometheus/generateProm.js +++ b/monitoring/prometheus/generateProm.js @@ -11,7 +11,7 @@ const readFromFileAndWriteToStream = (stream, filename) => { stream.write("\n") } -const generateJobYaml = (url, env, scheme = 'https', component = 'discovery-provider') => { +const generateJobYaml = ({ url, env, scheme = 'https', component = 'discovery-provider' }) => { url = url.replace("https://", "").replace("http://", "") sanitizedUrl = url.split(".").join("-") @@ -52,7 +52,9 @@ const generateEnv = async (stream, env) => { preferHigherPatchForSecondaries: true }) await audiusLibs.init() - const serviceProviders = await audiusLibs.ethContracts.ServiceProviderFactoryClient.getServiceProviderList('discovery-node'); + const discoveryNodes = await audiusLibs.ethContracts.ServiceProviderFactoryClient.getServiceProviderList('discovery-node') + const contentNodes = await audiusLibs.ethContracts.ServiceProviderFactoryClient.getServiceProviderList('content-node') + const serviceProviders = [...discoveryNodes, ...contentNodes] // copy from environment-specific stubs readFromFileAndWriteToStream(stream, `${env}.yml`) @@ -65,7 +67,8 @@ const generateEnv = async (stream, env) => { for (const sp of serviceProviders) { const spEndpoint = sp.endpoint; - const yamlString = generateJobYaml(spEndpoint, env) + const serviceType = sp.type + const yamlString = generateJobYaml({ url: spEndpoint, env, component: serviceType }) stream.write(yamlString); stream.write("\n") } From 590bbd6ca263b85b8e4bc540202fd1adf85b7a41 Mon Sep 17 00:00:00 2001 From: "vicky :)" <60366641+vicky-g@users.noreply.github.com> Date: Tue, 5 Jul 2022 17:04:02 -0400 Subject: [PATCH 06/12] Add success metrics to prometheus for track availability worker (#3382) --- .../src/tasks/update_track_is_available.py | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/discovery-provider/src/tasks/update_track_is_available.py b/discovery-provider/src/tasks/update_track_is_available.py index 8c2ac17f043..17762acaeca 100644 --- a/discovery-provider/src/tasks/update_track_is_available.py +++ b/discovery-provider/src/tasks/update_track_is_available.py @@ -229,16 +229,15 @@ def update_track_is_available(self) -> None: have_lock = update_lock.acquire(blocking=False) if have_lock: - metric = None + metric = PrometheusMetric( + "update_track_is_available_duration_seconds", + "Runtimes for src.task.update_track_is_available:celery.task()", + ("task_name", "success"), + ) try: - metric = PrometheusMetric( - "update_track_is_available_duration_seconds", - "Runtimes for src.task.update_track_is_available:celery.task()", - ("task_name",), - ) - # TODO: we can deprecate this manual redis timestamp tracker once we confirm - # that prometheus works in tracking duration + # that prometheus works in tracking duration. Needs to be removed from + # the health check too redis.set( UPDATE_TRACK_IS_AVAILABLE_START_REDIS_KEY, datetime.now(tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S.%f %Z"), @@ -248,15 +247,19 @@ def update_track_is_available(self) -> None: fetch_unavailable_track_ids_in_network(session, redis) update_tracks_is_available_status(db, redis) + + metric.save_time( + {"task_name": "update_track_is_available", "success": "true"} + ) except Exception as e: + metric.save_time( + {"task_name": "update_track_is_available", "success": "false"} + ) logger.error( "update_track_is_available.py | Fatal error in main loop", exc_info=True ) raise e finally: - if metric is not None: - metric.save_time({"task_name": "update_track_is_available"}) - # TODO: see comment above about deprecation redis.set( UPDATE_TRACK_IS_AVAILABLE_FINISH_REDIS_KEY, From 2be7f08859999eb8098f2cd86ba3cf94461dc496 Mon Sep 17 00:00:00 2001 From: Raymond Jacobson Date: Tue, 5 Jul 2022 15:30:54 -0700 Subject: [PATCH 07/12] [AUD-1964] Add read replica to celery and trending cache job (#3383) * [AUD-1964] Add read replica to celery and trending cache job * Fix lint --- discovery-provider/src/app.py | 8 ++++++++ discovery-provider/src/database_task.py | 6 ++++++ discovery-provider/src/tasks/cache_trending_playlists.py | 2 +- 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/discovery-provider/src/app.py b/discovery-provider/src/app.py index 7c360db9566..b34e84667fe 100644 --- a/discovery-provider/src/app.py +++ b/discovery-provider/src/app.py @@ -352,12 +352,15 @@ def delete_last_scanned_eth_block_redis(redis_inst): def configure_celery(celery, test_config=None): database_url = shared_config["db"]["url"] + database_url_read_replica = shared_config["db"]["url_read_replica"] redis_url = shared_config["redis"]["url"] if test_config is not None: if "db" in test_config: if "url" in test_config["db"]: database_url = test_config["db"]["url"] + if "url_read_replica" in test_config["db"]: + database_url_read_replica = test_config["db"]["url_read_replica"] ipld_interval = int(shared_config["discprov"]["blacklist_block_indexing_interval"]) # default is 5 seconds @@ -523,6 +526,10 @@ def configure_celery(celery, test_config=None): db = SessionManager( database_url, ast.literal_eval(shared_config["db"]["engine_args_literal"]) ) + db_read_replica = SessionManager( + database_url_read_replica, + ast.literal_eval(shared_config["db"]["engine_args_literal"]), + ) logger.info("Database instance initialized!") # Initialize Redis connection @@ -582,6 +589,7 @@ def __init__(self, *args, **kwargs): DatabaseTask.__init__( self, db=db, + db_read_replica=db_read_replica, web3=web3, abi_values=abi_values, eth_abi_values=eth_abi_values, diff --git a/discovery-provider/src/database_task.py b/discovery-provider/src/database_task.py index eb79b140962..dd0c2bc68c4 100644 --- a/discovery-provider/src/database_task.py +++ b/discovery-provider/src/database_task.py @@ -8,6 +8,7 @@ class DatabaseTask(Task): def __init__( self, db=None, + db_read_replica=None, web3=None, abi_values=None, eth_abi_values=None, @@ -20,6 +21,7 @@ def __init__( anchor_program_indexer=None, ): self._db = db + self._db_read_replica = db_read_replica self._web3_provider = web3 self._abi_values = abi_values self._eth_abi_values = eth_abi_values @@ -47,6 +49,10 @@ def web3(self): def db(self) -> SessionManager: return self._db + @property + def db_read_replica(self) -> SessionManager: + return self._db_read_replica + @property def shared_config(self): return self._shared_config diff --git a/discovery-provider/src/tasks/cache_trending_playlists.py b/discovery-provider/src/tasks/cache_trending_playlists.py index c8fbc1a76ff..73edc23b6b2 100644 --- a/discovery-provider/src/tasks/cache_trending_playlists.py +++ b/discovery-provider/src/tasks/cache_trending_playlists.py @@ -30,7 +30,7 @@ def cache_trending(db, redis, strategy): def cache_trending_playlists(self): """Caches trending playlists for time period""" - db = cache_trending_playlists.db + db = cache_trending_playlists.db_read_replica redis = cache_trending_playlists.redis have_lock = False From cdf6abd57042725f28df87b634adbdf4ca988e75 Mon Sep 17 00:00:00 2001 From: Johannes Naylor Date: Tue, 5 Jul 2022 19:59:58 -0400 Subject: [PATCH 08/12] Add Ghost Users Metric --- .../network-monitoring/src/content/index.ts | 41 ++++++++++++------- .../network-monitoring/src/content/queries.ts | 18 ++++---- .../network-monitoring/src/metrics/index.ts | 7 +++- .../network-monitoring/src/metrics/queries.ts | 18 ++++++++ .../network-monitoring/src/prometheus.ts | 8 +++- 5 files changed, 69 insertions(+), 23 deletions(-) diff --git a/discovery-provider/plugins/network-monitoring/src/content/index.ts b/discovery-provider/plugins/network-monitoring/src/content/index.ts index 065fd3c750c..efa5597f369 100644 --- a/discovery-provider/plugins/network-monitoring/src/content/index.ts +++ b/discovery-provider/plugins/network-monitoring/src/content/index.ts @@ -14,7 +14,6 @@ import { saveSecondary2UserResults, } from "./queries" import { - // asyncSleep, getEnv, generateSPSignatureParams, makeRequest, @@ -93,7 +92,7 @@ const checkUsers = async (run_id: number, spid: number, endpoint: string) => { const { deregisteredCN, signatureSpID, signatureSPDelegatePrivateKey } = getEnv() - const [ primaryCount, secondary1Count, secondary2Count ] = await getUserCounts(run_id, spid) + const [primaryCount, secondary1Count, secondary2Count] = await getUserCounts(run_id, spid) let missedUsers = 0 @@ -128,17 +127,18 @@ const checkUsers = async (run_id: number, spid: number, endpoint: string) => { ) console.log(`[getBatch:${offset}:${batchSize}:${count}]`) - if (walletBatch.length === 0) { return } + if (walletBatch.length === 0) { continue } // Fetch the clock values for all the users in the batch from // the content nodes in their replica set - const results = await getUserClockValues( + const { canceledUsers, results } = await getUserClockValues( endpoint, walletBatch, deregisteredCN, signatureSpID, signatureSPDelegatePrivateKey, ) + missedUsers += canceledUsers console.log(`[getUserClockValues ${run_id}:${spid}:${offset}] `) @@ -158,6 +158,7 @@ const checkUsers = async (run_id: number, spid: number, endpoint: string) => { } } catch (e) { console.log(`[checkUsers:${spid}] error - ${(e as Error).message}`) + missedUsers += batchSize } } }) @@ -300,7 +301,13 @@ const getUserClockValues = async ( deregisteredCN: string[], signatureSpID: number | undefined, signatureSPDelegatePrivateKey: string | undefined, -): Promise<{ walletPublicKey: string, clock: number }[]> => { +): Promise<{ + canceledUsers: number, + results: { + walletPublicKey: string, + clock: number + }[], +}> => { try { const axiosReqObj = { @@ -326,25 +333,31 @@ const getUserClockValues = async ( if (batchClockStatusResp.canceled) { console.log(`[getUsersClockValues canceled] - ${endpoint}`) // Return map of wallets to -1 clock (default value) - return walletPublicKeys.map(walletPublicKey => ({ - walletPublicKey, - clock: -1 - })) + return { + canceledUsers: walletPublicKeys.length, + results: walletPublicKeys.map(walletPublicKey => ({ + walletPublicKey, + clock: -1 + })) + } } const batchClockStatus = batchClockStatusResp.response!.data.data.users const batchClockStatusAttemptCount = batchClockStatusResp.attemptCount console.log(`[getUserClockValues Complete] ${endpoint} - reqAttemptCount ${batchClockStatusAttemptCount}`) - return batchClockStatus + return { canceledUsers: 0, results: batchClockStatus } } catch (e) { console.log(`[getUserClockValues Error] - ${endpoint} - ${(e as Error).message}`) // Return map of wallets to -1 clock (default value) - return walletPublicKeys.map(walletPublicKey => ({ - walletPublicKey, - clock: -1 - })) + return { + canceledUsers: walletPublicKeys.length, + results: walletPublicKeys.map(walletPublicKey => ({ + walletPublicKey, + clock: -1 + })), + } } } \ No newline at end of file diff --git a/discovery-provider/plugins/network-monitoring/src/content/queries.ts b/discovery-provider/plugins/network-monitoring/src/content/queries.ts index f0e8086e31b..64c6c8d4013 100644 --- a/discovery-provider/plugins/network-monitoring/src/content/queries.ts +++ b/discovery-provider/plugins/network-monitoring/src/content/queries.ts @@ -287,13 +287,17 @@ export const getUserCounts = async (run_id: number, spid: number): Promise<[numb }) const userCounts = (userCountsResp as { - spid: number, - primary_count: number, - secondary1_count: number, - secondary2_count: number, - }[])[0] || { primary_count: 0, secondary1_count: 0, secondary2_count: 0 } - - return [userCounts.primary_count, userCounts.secondary1_count, userCounts.secondary2_count] + spid: string, + primary_count: string, + secondary1_count: string, + secondary2_count: string, + }[])[0] || { primary_count: '0', secondary1_count: '0', secondary2_count: '0' } + + return [ + parseInt(userCounts.primary_count), + parseInt(userCounts.secondary1_count), + parseInt(userCounts.secondary2_count), + ] } // Fetch a batch of users with a specific content node as their primary from the table `network_monitoring_users` diff --git a/discovery-provider/plugins/network-monitoring/src/metrics/index.ts b/discovery-provider/plugins/network-monitoring/src/metrics/index.ts index d6392521cd4..e492462640d 100644 --- a/discovery-provider/plugins/network-monitoring/src/metrics/index.ts +++ b/discovery-provider/plugins/network-monitoring/src/metrics/index.ts @@ -3,6 +3,7 @@ import { fullySyncedUsersCountGauge, gateway, generatingMetricsDurationGauge, + nullPrimaryUsersCountGauge, partiallySyncedUsersCountGauge, primaryUserCountGauge, unsyncedUsersCountGauge, @@ -12,7 +13,8 @@ import { getAllUserCount, getFullySyncedUsersCount, getPartiallySyncedUsersCount, - getUnsyncedUsersCount + getUnsyncedUsersCount, + getUsersWithNullPrimaryClock } from "./queries" export const generateMetrics = async (run_id: number) => { @@ -51,6 +53,8 @@ export const generateMetrics = async (run_id: number) => { const unsyncedUsersCount = await getUnsyncedUsersCount(run_id) + const usersWithNullPrimaryClock = await getUsersWithNullPrimaryClock(run_id) + allUserCount.forEach(({ endpoint, count }) => { allUserCountGauge.set({ endpoint, run_id }, count) }) @@ -61,6 +65,7 @@ export const generateMetrics = async (run_id: number) => { fullySyncedUsersCountGauge.set({ run_id }, fullySyncedUsersCount) partiallySyncedUsersCountGauge.set({ run_id }, partiallySyncedUserCount) unsyncedUsersCountGauge.set({ run_id }, unsyncedUsersCount) + nullPrimaryUsersCountGauge.set({ run_id }, usersWithNullPrimaryClock) // Record duration for generating metrics and export to prometheus endTimer({ run_id: run_id }) diff --git a/discovery-provider/plugins/network-monitoring/src/metrics/queries.ts b/discovery-provider/plugins/network-monitoring/src/metrics/queries.ts index af32594ef02..b9771a19126 100644 --- a/discovery-provider/plugins/network-monitoring/src/metrics/queries.ts +++ b/discovery-provider/plugins/network-monitoring/src/metrics/queries.ts @@ -206,3 +206,21 @@ export const getUnsyncedUsersCount = async (run_id: number): Promise => return usersCount } +// The number of users whose primary content node clock value is null +export const getUsersWithNullPrimaryClock = async (run_id: number): Promise => { + const usersResp: unknown[] = await sequelizeConn.query(` + SELECT COUNT(*) as user_count + FROM network_monitoring_users + WHERE + run_id = :run_id + AND + primary_clock_value IS NULL; + `, { + type: QueryTypes.SELECT, + replacements: { run_id }, + }) + + const usersCount = parseInt(((usersResp as { user_count: string }[])[0] || { user_count: '0' }).user_count) + + return usersCount +} \ No newline at end of file diff --git a/discovery-provider/plugins/network-monitoring/src/prometheus.ts b/discovery-provider/plugins/network-monitoring/src/prometheus.ts index 5f4c6609888..421361d9c24 100644 --- a/discovery-provider/plugins/network-monitoring/src/prometheus.ts +++ b/discovery-provider/plugins/network-monitoring/src/prometheus.ts @@ -36,6 +36,12 @@ export const unsyncedUsersCountGauge = new client.Gauge({ labelNames: ['run_id'] }) +export const nullPrimaryUsersCountGauge = new client.Gauge({ + name: 'audius_nm_no_primary_user_count', + help: 'the number of users whose primary is null', + labelNames: ['run_id'] +}) + export const missedUsersCountGauge = new client.Gauge({ name: 'audius_nm_missed_users_count', help: 'the number of users that got skipped while indexing content nodes', @@ -70,4 +76,4 @@ export const userBatchDurationGauge = new client.Gauge({ name: 'audius_nm_user_batch_duration', help: 'the amount of time it takes to fetch and save a user batch', labelNames: ['run_id', 'endpoint'], -}) \ No newline at end of file +}) From 7725a9602c8870ae38be6abcd7cec389364eb457 Mon Sep 17 00:00:00 2001 From: Johannes Naylor Date: Tue, 5 Jul 2022 21:09:42 -0400 Subject: [PATCH 09/12] Add Slack Alerts from Network Monitoring --- .../network-monitoring/src/metrics/index.ts | 31 +++++++++++++++++++ .../plugins/network-monitoring/src/utils.ts | 12 ++++++- 2 files changed, 42 insertions(+), 1 deletion(-) diff --git a/discovery-provider/plugins/network-monitoring/src/metrics/index.ts b/discovery-provider/plugins/network-monitoring/src/metrics/index.ts index e492462640d..7c1a49ccb88 100644 --- a/discovery-provider/plugins/network-monitoring/src/metrics/index.ts +++ b/discovery-provider/plugins/network-monitoring/src/metrics/index.ts @@ -1,3 +1,4 @@ +import axios from "axios" import { allUserCountGauge, fullySyncedUsersCountGauge, @@ -8,6 +9,7 @@ import { primaryUserCountGauge, unsyncedUsersCountGauge, } from "../prometheus" +import { getEnv } from "../utils" import { getPrimaryUserCount, getAllUserCount, @@ -70,6 +72,13 @@ export const generateMetrics = async (run_id: number) => { // Record duration for generating metrics and export to prometheus endTimer({ run_id: run_id }) + await publishSlackReport({ + fullySyncedUsersCount: fullySyncedUsersCount, + partiallySyncedUsersCount: partiallySyncedUserCount, + unsyncedUsersCount: unsyncedUsersCount, + usersWithNullPrimaryClock: usersWithNullPrimaryClock + }) + try { // Finish by publishing metrics to prometheus push gateway console.log(`[${run_id}] pushing metrics to gateway`); @@ -82,3 +91,25 @@ export const generateMetrics = async (run_id: number) => { console.log(`[${run_id}] finish generating metrics`); } +const publishSlackReport = async (metrics: Object) => { + + const { slackUrl } = getEnv() + + if (slackUrl === '') { + return + } + + let message = `\`\`\`${JSON.stringify(metrics, null, 2)}\`\`\`` + console.log(message) + + try { + await axios.post( + slackUrl, + { + text: message, + }, + ) + } catch (e) { + console.log(`Error posting to slack in slack reporter ${(e as Error).toString()}`) + } +} \ No newline at end of file diff --git a/discovery-provider/plugins/network-monitoring/src/utils.ts b/discovery-provider/plugins/network-monitoring/src/utils.ts index 6895e0390c8..52dbd06e6ae 100644 --- a/discovery-provider/plugins/network-monitoring/src/utils.ts +++ b/discovery-provider/plugins/network-monitoring/src/utils.ts @@ -196,6 +196,16 @@ export const getEnv = () => { const pushGatewayUrl = process.env['PUSH_GATEWAY_URL'] || 'http://localhost:9091' - return { db, fdb, deregisteredCN, signatureSpID, signatureSPDelegatePrivateKey, pushGatewayUrl } + const slackUrl = process.env['SLACK_URL'] || '' + + return { + db, + fdb, + deregisteredCN, + signatureSpID, + signatureSPDelegatePrivateKey, + pushGatewayUrl, + slackUrl, + } } From ec0ede8de0ff7a8b3cd64d86dbc5906042999421 Mon Sep 17 00:00:00 2001 From: dharit-tan Date: Sat, 2 Jul 2022 01:36:54 +0000 Subject: [PATCH 10/12] [PAY-385] Added First Playlist Challenge Adds challenge for creating your first playlist and adding a track to it. --- .../test_first_playlist_challenge.py | 72 +++++++++++++++++++ .../src/challenges/challenge_event.py | 1 + .../src/challenges/challenge_event_bus.py | 4 ++ .../src/challenges/challenges.dev.json | 7 ++ .../src/challenges/challenges.json | 7 ++ .../src/challenges/challenges.stage.json | 7 ++ .../challenges/first_playlist_challenge.py | 28 ++++++++ discovery-provider/src/tasks/playlists.py | 12 ++++ 8 files changed, 138 insertions(+) create mode 100644 discovery-provider/integration_tests/challenges/test_first_playlist_challenge.py create mode 100644 discovery-provider/src/challenges/first_playlist_challenge.py diff --git a/discovery-provider/integration_tests/challenges/test_first_playlist_challenge.py b/discovery-provider/integration_tests/challenges/test_first_playlist_challenge.py new file mode 100644 index 00000000000..13abc8deee2 --- /dev/null +++ b/discovery-provider/integration_tests/challenges/test_first_playlist_challenge.py @@ -0,0 +1,72 @@ +import logging +from datetime import datetime + +import redis +from src.challenges.challenge_event_bus import ChallengeEvent, ChallengeEventBus +from src.challenges.first_playlist_challenge import first_playlist_challenge_manager +from src.models.indexing.block import Block +from src.models.rewards.challenge import Challenge +from src.models.users.user import User +from src.utils.config import shared_config +from src.utils.db_session import get_db + +REDIS_URL = shared_config["redis"]["url"] +BLOCK_NUMBER = 10 +logger = logging.getLogger(__name__) + + +def test_first_playlist_challenge(app): + redis_conn = redis.Redis.from_url(url=REDIS_URL) + + with app.app_context(): + db = get_db() + + block = Block(blockhash="0x1", number=BLOCK_NUMBER) + user = User( + blockhash="0x1", + blocknumber=BLOCK_NUMBER, + txhash="xyz", + user_id=1, + is_current=True, + handle="TestHandle", + handle_lc="testhandle", + wallet="0x1", + is_creator=False, + is_verified=False, + name="test_name", + created_at=datetime.now(), + updated_at=datetime.now(), + ) + + with db.scoped_session() as session: + bus = ChallengeEventBus(redis_conn) + session.query(Challenge).filter(Challenge.id == "first-playlist").update( + {"active": True, "starting_block": BLOCK_NUMBER} + ) + + # Register events with the bus + bus.register_listener( + ChallengeEvent.first_playlist, first_playlist_challenge_manager + ) + + session.add(block) + session.flush() + session.add(user) + session.flush() + + bus.dispatch( + ChallengeEvent.first_playlist, + BLOCK_NUMBER, + 1, # user_id + {}, + ) + + bus.flush() + bus.process_events(session) + session.flush() + + state = first_playlist_challenge_manager.get_user_challenge_state( + session, ["1"] + )[0] + + assert state.is_complete diff --git a/discovery-provider/src/challenges/challenge_event.py b/discovery-provider/src/challenges/challenge_event.py index 8caaac463f4..5e7f1d94f2d 100644 --- a/discovery-provider/src/challenges/challenge_event.py +++ b/discovery-provider/src/challenges/challenge_event.py @@ -19,3 +19,4 @@ class ChallengeEvent(str, enum.Enum): trending_underground = "trending_underground" trending_playlist = "trending_playlist" send_tip = "send_tip" # Fired for sender + first_playlist = "first_playlist" diff --git a/discovery-provider/src/challenges/challenge_event_bus.py b/discovery-provider/src/challenges/challenge_event_bus.py index 876ca8d2978..60a2d866d61 100644 --- a/discovery-provider/src/challenges/challenge_event_bus.py +++ b/discovery-provider/src/challenges/challenge_event_bus.py @@ -8,6 +8,7 @@ from src.challenges.challenge import ChallengeManager, EventMetadata from src.challenges.challenge_event import ChallengeEvent from src.challenges.connect_verified_challenge import connect_verified_challenge_manager +from src.challenges.first_playlist_challenge import first_playlist_challenge_manager from src.challenges.listen_streak_challenge import listen_streak_challenge_manager from src.challenges.mobile_install_challenge import mobile_install_challenge_manager from src.challenges.profile_challenge import profile_challenge_manager @@ -235,5 +236,8 @@ def setup_challenge_bus(): ChallengeEvent.trending_playlist, trending_playlist_challenge_manager ) bus.register_listener(ChallengeEvent.send_tip, send_first_tip_challenge_manager) + bus.register_listener( + ChallengeEvent.first_playlist, first_playlist_challenge_manager + ) return bus diff --git a/discovery-provider/src/challenges/challenges.dev.json b/discovery-provider/src/challenges/challenges.dev.json index 8d6952d2cb0..201f7b0bb9e 100644 --- a/discovery-provider/src/challenges/challenges.dev.json +++ b/discovery-provider/src/challenges/challenges.dev.json @@ -55,5 +55,12 @@ "amount": 2, "active": true, "starting_block": 0 + }, + { + "id": "first-playlist", + "type": "boolean", + "amount": 2, + "active": true, + "starting_block": 0 } ] \ No newline at end of file diff --git a/discovery-provider/src/challenges/challenges.json b/discovery-provider/src/challenges/challenges.json index 3d00b7e82fb..bb57bccee35 100644 --- a/discovery-provider/src/challenges/challenges.json +++ b/discovery-provider/src/challenges/challenges.json @@ -87,5 +87,12 @@ "amount": 2, "active": true, "starting_block": 0 + }, + { + "id": "first-playlist", + "type": "boolean", + "amount": 2, + "active": true, + "starting_block": 0 } ] \ No newline at end of file diff --git a/discovery-provider/src/challenges/challenges.stage.json b/discovery-provider/src/challenges/challenges.stage.json index 8d6952d2cb0..201f7b0bb9e 100644 --- a/discovery-provider/src/challenges/challenges.stage.json +++ b/discovery-provider/src/challenges/challenges.stage.json @@ -55,5 +55,12 @@ "amount": 2, "active": true, "starting_block": 0 + }, + { + "id": "first-playlist", + "type": "boolean", + "amount": 2, + "active": true, + "starting_block": 0 } ] \ No newline at end of file diff --git a/discovery-provider/src/challenges/first_playlist_challenge.py b/discovery-provider/src/challenges/first_playlist_challenge.py new file mode 100644 index 00000000000..a06cf943a26 --- /dev/null +++ b/discovery-provider/src/challenges/first_playlist_challenge.py @@ -0,0 +1,28 @@ +from typing import List, Optional + +from sqlalchemy.orm.session import Session +from src.challenges.challenge import ( + ChallengeManager, + ChallengeUpdater, + FullEventMetadata, +) +from src.models.rewards.user_challenge import UserChallenge + + +class FirstPlaylistChallengeUpdater(ChallengeUpdater): + def update_user_challenges( + self, + session: Session, + event: str, + user_challenges: List[UserChallenge], + step_cout: Optional[int], + event_metadatas: List[FullEventMetadata], + starting_block: Optional[int], + ): + for user_challenge in user_challenges: + user_challenge.is_complete = True + + +first_playlist_challenge_manager = ChallengeManager( + "first-playlist", FirstPlaylistChallengeUpdater() +) diff --git a/discovery-provider/src/tasks/playlists.py b/discovery-provider/src/tasks/playlists.py index babb899bfea..ea99ac6155e 100644 --- a/discovery-provider/src/tasks/playlists.py +++ b/discovery-provider/src/tasks/playlists.py @@ -3,6 +3,7 @@ from typing import Any, Dict, Set, Tuple from sqlalchemy.orm.session import Session, make_transient +from src.challenges.challenge_event import ChallengeEvent from src.database_task import DatabaseTask from src.models.playlists.playlist import Playlist from src.queries.skipped_transactions import add_node_level_skipped_transaction @@ -36,6 +37,8 @@ def playlist_state_update( # This stores the playlist_ids created or updated in the set of transactions playlist_ids: Set[int] = set() + challenge_bus = update_task.challenge_event_bus + if not playlist_factory_txs: return num_total_changes, playlist_ids @@ -109,6 +112,15 @@ def playlist_state_update( if value_obj["events"]: invalidate_old_playlist(session, playlist_id) session.add(value_obj["playlist"]) + if ( + playlist_event_types_lookup["playlist_track_added"] + in value_obj["events"] + ): + challenge_bus.dispatch( + ChallengeEvent.first_playlist, + value_obj["playlist"].blocknumber, + value_obj["playlist"].playlist_owner_id, + ) return num_total_changes, playlist_ids From 115bd56f0e52bb8057848eb67375feaed16b657b Mon Sep 17 00:00:00 2001 From: dharit-tan Date: Tue, 5 Jul 2022 14:01:24 +0000 Subject: [PATCH 11/12] [PAY-385] CR Comments --- .../challenges/test_first_playlist_challenge.py | 2 +- discovery-provider/src/challenges/challenges.json | 2 +- discovery-provider/src/challenges/first_playlist_challenge.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/discovery-provider/integration_tests/challenges/test_first_playlist_challenge.py b/discovery-provider/integration_tests/challenges/test_first_playlist_challenge.py index 13abc8deee2..d9672273b40 100644 --- a/discovery-provider/integration_tests/challenges/test_first_playlist_challenge.py +++ b/discovery-provider/integration_tests/challenges/test_first_playlist_challenge.py @@ -57,7 +57,7 @@ def test_first_playlist_challenge(app): bus.dispatch( ChallengeEvent.first_playlist, BLOCK_NUMBER, - 1, # user_id + user.user_id, {}, ) diff --git a/discovery-provider/src/challenges/challenges.json b/discovery-provider/src/challenges/challenges.json index bb57bccee35..d65733f6d40 100644 --- a/discovery-provider/src/challenges/challenges.json +++ b/discovery-provider/src/challenges/challenges.json @@ -93,6 +93,6 @@ "type": "boolean", "amount": 2, "active": true, - "starting_block": 0 + "starting_block": 25346436 } ] \ No newline at end of file diff --git a/discovery-provider/src/challenges/first_playlist_challenge.py b/discovery-provider/src/challenges/first_playlist_challenge.py index a06cf943a26..3b5e6b4e3ed 100644 --- a/discovery-provider/src/challenges/first_playlist_challenge.py +++ b/discovery-provider/src/challenges/first_playlist_challenge.py @@ -15,7 +15,7 @@ def update_user_challenges( session: Session, event: str, user_challenges: List[UserChallenge], - step_cout: Optional[int], + step_count: Optional[int], event_metadatas: List[FullEventMetadata], starting_block: Optional[int], ): From 431502d9c4584952b11cc7af20f69c713f8ac0e4 Mon Sep 17 00:00:00 2001 From: dharit-tan Date: Wed, 6 Jul 2022 02:57:56 +0000 Subject: [PATCH 12/12] starting block 1 week in future --- discovery-provider/src/challenges/challenges.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/discovery-provider/src/challenges/challenges.json b/discovery-provider/src/challenges/challenges.json index d65733f6d40..1ebb1a29a63 100644 --- a/discovery-provider/src/challenges/challenges.json +++ b/discovery-provider/src/challenges/challenges.json @@ -93,6 +93,6 @@ "type": "boolean", "amount": 2, "active": true, - "starting_block": 25346436 + "starting_block": 28350000 } ] \ No newline at end of file