diff --git a/creator-node/src/middlewares.js b/creator-node/src/middlewares.js index f00b921389b..bde168aff41 100644 --- a/creator-node/src/middlewares.js +++ b/creator-node/src/middlewares.js @@ -508,7 +508,7 @@ async function getOwnEndpoint({ libs }) { } /** - * Retrieves user replica set from discprov + * Retrieves user replica set endpoints from discprov * * Polls discprov conditionally as follows: * - If blockNumber provided, polls discprov until it has indexed that blocknumber (for up to 200 seconds) @@ -516,24 +516,26 @@ async function getOwnEndpoint({ libs }) { * - Errors if retrieved primary does not match myCnodeEndpoint * - If neither of above conditions are met, falls back to single discprov query without polling * + * @param {string} wallet - wallet used to query discprov for user data * @param {Object} serviceRegistry * @param {Object} logger - * @param {string} wallet - wallet used to query discprov for user data * @param {number} blockNumber - blocknumber of eth TX preceding CN call * @param {string} myCnodeEndpoint - endpoint of this CN * @param {boolean} ensurePrimary - determines if function should error if this CN is not primary * * @returns {Array} - array of strings of replica set */ -async function getCreatorNodeEndpoints({ +async function getUserReplicaSetEndpointsFromDiscovery({ + wallet, libs, logger, - wallet, blockNumber, ensurePrimary, myCnodeEndpoint }) { - logger.info(`Starting getCreatorNodeEndpoints for wallet ${wallet}`) + logger.info( + `Starting getUserReplicaSetEndpointsFromDiscovery for wallet ${wallet}` + ) const start = Date.now() let user = null @@ -551,7 +553,7 @@ async function getCreatorNodeEndpoints({ let discprovBlockNumber = -1 for (let retry = 1; retry <= MaxRetries; retry++) { logger.info( - `getCreatorNodeEndpoints retry #${retry}/${MaxRetries} || time from start: ${ + `getUserReplicaSetEndpointsFromDiscovery retry #${retry}/${MaxRetries} || time from start: ${ Date.now() - start2 } discprovBlockNumber ${discprovBlockNumber} || blockNumber ${blockNumber}` ) @@ -584,7 +586,7 @@ async function getCreatorNodeEndpoints({ await utils.timeout(RetryTimeout) logger.info( - `getCreatorNodeEndpoints AFTER TIMEOUT retry #${retry}/${MaxRetries} || time from start: ${ + `getUserReplicaSetEndpointsFromDiscovery AFTER TIMEOUT retry #${retry}/${MaxRetries} || time from start: ${ Date.now() - start2 } discprovBlockNumber ${discprovBlockNumber} || blockNumber ${blockNumber}` ) @@ -609,7 +611,7 @@ async function getCreatorNodeEndpoints({ * Errors if retrieved primary does not match myCnodeEndpoint */ logger.info( - `getCreatorNodeEndpoints || no blockNumber passed, retrying until DN returns same endpoint` + `getUserReplicaSetEndpointsFromDiscovery || no blockNumber passed, retrying until DN returns same endpoint` ) const start2 = Date.now() @@ -621,7 +623,7 @@ async function getCreatorNodeEndpoints({ let returnedPrimaryEndpoint = null for (let retry = 1; retry <= MaxRetries; retry++) { logger.info( - `getCreatorNodeEndpoints retry #${retry}/${MaxRetries} || time from start: ${ + `getUserReplicaSetEndpointsFromDiscovery retry #${retry}/${MaxRetries} || time from start: ${ Date.now() - start2 } myCnodeEndpoint ${myCnodeEndpoint}` ) @@ -650,7 +652,7 @@ async function getCreatorNodeEndpoints({ await utils.timeout(RetryTimeout) logger.info( - `getCreatorNodeEndpoints AFTER TIMEOUT retry #${retry}/${MaxRetries} || time from start: ${ + `getUserReplicaSetEndpointsFromDiscovery AFTER TIMEOUT retry #${retry}/${MaxRetries} || time from start: ${ Date.now() - start2 } myCnodeEndpoint ${myCnodeEndpoint}` ) @@ -674,7 +676,7 @@ async function getCreatorNodeEndpoints({ * If neither of above conditions are met, falls back to single discprov query without polling */ logger.info( - `getCreatorNodeEndpoints || ensurePrimary === false, fetching user without retries` + `getUserReplicaSetEndpointsFromDiscovery || ensurePrimary === false, fetching user without retries` ) user = await libs.User.getUsers(1, 0, null, wallet) } @@ -692,7 +694,9 @@ async function getCreatorNodeEndpoints({ const endpoint = user[0].creator_node_endpoint const userReplicaSet = endpoint ? endpoint.split(',') : [] - logger.info(`getCreatorNodeEndpoints route time ${Date.now() - start}`) + logger.info( + `getUserReplicaSetEndpointsFromDiscovery route time ${Date.now() - start}` + ) return userReplicaSet } @@ -920,5 +924,5 @@ module.exports = { ensureValidSPMiddleware, issueAndWaitForSecondarySyncRequests, getOwnEndpoint, - getCreatorNodeEndpoints + getUserReplicaSetEndpointsFromDiscovery } diff --git a/creator-node/src/services/stateMachineManager/ContentNodeInfoManager.js b/creator-node/src/services/stateMachineManager/ContentNodeInfoManager.js index f06ec6c5b0f..5dbe55ae901 100644 --- a/creator-node/src/services/stateMachineManager/ContentNodeInfoManager.js +++ b/creator-node/src/services/stateMachineManager/ContentNodeInfoManager.js @@ -4,6 +4,8 @@ const _ = require('lodash') /** * Queries to periodically keep the mapping of (Content Node endpoint -> SP ID) * up to date. + * + * @dev Since this class holds all state in memory, it is not concurrency-compatible */ class ContentNodeInfoManager { constructor() { diff --git a/creator-node/src/services/sync/primarySyncFromSecondary.js b/creator-node/src/services/sync/primarySyncFromSecondary.js index 5c253e1dff2..4376fe1728e 100644 --- a/creator-node/src/services/sync/primarySyncFromSecondary.js +++ b/creator-node/src/services/sync/primarySyncFromSecondary.js @@ -7,17 +7,17 @@ const { WalletWriteLock } = redis const models = require('../../models') const { logger: genericLogger } = require('../../logging') const DBManager = require('../../dbManager') -const { getCreatorNodeEndpoints } = require('../../middlewares') +const { getUserReplicaSetEndpointsFromDiscovery } = require('../../middlewares') const { saveFileForMultihashToFS } = require('../../fileManager') const SyncHistoryAggregator = require('../../snapbackSM/syncHistoryAggregator') const initAudiusLibs = require('../initAudiusLibs') const asyncRetry = require('../../utils/asyncRetry') +const DecisionTree = require('../../utils/decisionTree') const EXPORT_REQ_TIMEOUT_MS = 10000 // 10000ms = 10s const EXPORT_REQ_MAX_RETRIES = 3 const DEFAULT_LOG_CONTEXT = {} -const devMode = config.get('devMode') -const DB_QUERY_LIMIT = devMode ? 5 : 10000 +const DB_QUERY_LIMIT = config.get('devMode') ? 5 : 10000 /** * Export data for user from secondary and save locally, until complete @@ -30,20 +30,31 @@ module.exports = async function primarySyncFromSecondary({ }) { const logPrefix = `[primarySyncFromSecondary][Wallet: ${wallet}][Secondary: ${secondary}]` const logger = genericLogger.child(logContext) - logger.info(`${logPrefix} Beginning...`) - const start = Date.now() - // This is used only for logging record endpoint of requesting node - const selfEndpoint = config.get('creatorNodeEndpoint') || null + const decisionTree = new DecisionTree({ name: logPrefix, logger }) + decisionTree.recordStage({ name: 'Begin' }) // object to track if the function errored, returned at the end of the function let error = null try { + const selfEndpoint = config.get('creatorNodeEndpoint') + + if (!selfEndpoint) { + decisionTree.recordStage({ name: 'selfEndpoint missing', log: false }) + throw new Error('selfEndpoint missing') + } + let libs try { libs = await initAudiusLibs({}) + decisionTree.recordStage({ name: 'initAudiusLibs() success' }) } catch (e) { + decisionTree.recordStage({ + name: 'initAudiusLibs() Error', + data: { errorMsg: e.message }, + log: false + }) throw new Error(`InitAudiusLibs Error - ${e.message}`) } @@ -59,10 +70,16 @@ module.exports = async function primarySyncFromSecondary({ logger, libs }) + decisionTree.recordStage({ name: 'getUserReplicaSet() success ' }) // Error if this node is not primary for user if (userReplicaSet[0] !== selfEndpoint) { - throw new Error(`Failure - this node is not primary for user`) + decisionTree.recordState({ + name: 'Error - Node is not primary for user', + data: { userReplicaSet }, + log: false + }) + throw new Error(`Node is not primary for user`) } // filter out current node from user's replica set @@ -72,26 +89,68 @@ module.exports = async function primarySyncFromSecondary({ let completed = false let exportClockRangeMin = 0 while (!completed) { - const fetchedCNodeUser = await fetchExportFromSecondary({ - secondary, - wallet, - exportClockRangeMin, - selfEndpoint - }) + const decisionTreeData = { exportClockRangeMin } + + let fetchedCNodeUser + try { + fetchedCNodeUser = await fetchExportFromSecondary({ + secondary, + wallet, + exportClockRangeMin, + selfEndpoint + }) + decisionTree.recordStage({ + name: 'fetchExportFromSecondary() Success', + data: decisionTreeData + }) + } catch (e) { + decisionTree.recordStage({ + name: 'fetchExportFromSecondary() Error', + data: { ...decisionTreeData, errorMsg: e.message }, + log: false + }) + throw e + } // Save all files to disk separately from DB writes to minimize DB transaction duration - await saveFilesToDisk({ - files: fetchedCNodeUser.files, - userReplicaSet, - libs, - logger - }) + try { + await saveFilesToDisk({ + files: fetchedCNodeUser.files, + userReplicaSet, + libs, + logger + }) + decisionTree.recordStage({ + name: 'saveFilesToDisk() Success', + data: decisionTreeData + }) + } catch (e) { + decisionTree.recordStage({ + name: 'saveFilesToDisk() Error', + data: { ...decisionTreeData, errorMsg: e.message }, + log: false + }) + throw e + } - await saveEntriesToDB({ - fetchedCNodeUser, - logger, - logPrefix - }) + try { + await saveEntriesToDB({ + fetchedCNodeUser, + logger, + logPrefix + }) + decisionTree.recordStage({ + name: 'saveEntriesToDB() Success', + data: decisionTreeData + }) + } catch (e) { + decisionTree.recordStage({ + name: 'saveEntriesToDB() Error', + data: { ...decisionTreeData, errorMsg: e.message }, + log: false + }) + throw e + } const clockInfo = fetchedCNodeUser.clockInfo if (clockInfo.localClockMax <= clockInfo.requestedClockRangeMax) { @@ -100,6 +159,8 @@ module.exports = async function primarySyncFromSecondary({ exportClockRangeMin = clockInfo.requestedClockRangeMax + 1 } } + + decisionTree.recordStage({ name: 'Complete Success' }) } catch (e) { error = e @@ -107,15 +168,7 @@ module.exports = async function primarySyncFromSecondary({ } finally { await WalletWriteLock.release(wallet) - if (error) { - logger.error( - `${logPrefix} Error ${error.message} [Duration: ${ - Date.now() - start - }ms]` - ) - } else { - logger.info(`${logPrefix} Complete [Duration: ${Date.now() - start}ms]`) - } + decisionTree.printTree() } return error @@ -138,43 +191,43 @@ async function fetchExportFromSecondary({ force_export: true } - try { - const exportResp = await asyncRetry({ - // Throws on any non-200 response code - asyncFn: () => - axios({ - method: 'get', - baseURL: secondary, - url: '/export', - responseType: 'json', - params: exportQueryParams, - timeout: EXPORT_REQ_TIMEOUT_MS - }), - retries: EXPORT_REQ_MAX_RETRIES, - log: false - }) - - // Validate export response - if ( - !_.has(exportResp, 'data.data') || - !_.has(exportResp.data.data, 'cnodeUsers') || - Object.keys(exportResp.data.data.cnodeUsers).length !== 1 - ) { - throw new Error('Malformatted export response data') - } - - const { cnodeUsers } = exportResp.data.data + const exportResp = await asyncRetry({ + // Throws on any non-200 response code + asyncFn: () => + axios({ + method: 'get', + baseURL: secondary, + url: '/export', + responseType: 'json', + params: exportQueryParams, + timeout: EXPORT_REQ_TIMEOUT_MS + }), + retries: EXPORT_REQ_MAX_RETRIES, + log: false + }) + + // Validate export response + if ( + !_.has(exportResp, 'data.data') || + !_.has(exportResp.data.data, 'cnodeUsers') + ) { + throw new Error('Malformatted export response data') + } - const fetchedCNodeUser = cnodeUsers[Object.keys(cnodeUsers)[0]] + const { cnodeUsers } = exportResp.data.data - if (fetchedCNodeUser.walletPublicKey !== wallet) { - throw new Error('Wallet mismatch') - } + if (!cnodeUsers.length === 0) { + throw new Error('No cnodeUser returned from export') + } else if (cnodeUsers.length > 1) { + throw new Error('Multiple cnodeUsers returned from export') + } - return fetchedCNodeUser - } catch (e) { - throw new Error(`[fetchExportFromSecondary] ERROR: ${e.message}`) + const fetchedCNodeUser = cnodeUsers[Object.keys(cnodeUsers)[0]] + if (fetchedCNodeUser.walletPublicKey !== wallet) { + throw new Error('Wallet mismatch') } + + return fetchedCNodeUser } /** @@ -186,104 +239,100 @@ async function fetchExportFromSecondary({ async function saveFilesToDisk({ files, userReplicaSet, libs, logger }) { const FileSaveMaxConcurrency = config.get('nodeSyncFileSaveMaxConcurrency') - try { - const trackFiles = files.filter((file) => - models.File.TrackTypes.includes(file.type) - ) - const nonTrackFiles = files.filter((file) => - models.File.NonTrackTypes.includes(file.type) - ) + const trackFiles = files.filter((file) => + models.File.TrackTypes.includes(file.type) + ) + const nonTrackFiles = files.filter((file) => + models.File.NonTrackTypes.includes(file.type) + ) + + /** + * Save all Track files to disk + */ + for (let i = 0; i < trackFiles.length; i += FileSaveMaxConcurrency) { + const trackFilesSlice = trackFiles.slice(i, i + FileSaveMaxConcurrency) /** - * Save all Track files to disk + * Fetch content for each CID + save to FS + * Record any CIDs that failed retrieval/saving for later use + * + * - `saveFileForMultihashToFS()` should never reject - it will return error indicator for post processing */ - for (let i = 0; i < trackFiles.length; i += FileSaveMaxConcurrency) { - const trackFilesSlice = trackFiles.slice(i, i + FileSaveMaxConcurrency) + await Promise.all( + trackFilesSlice.map(async (trackFile) => { + const succeeded = await saveFileForMultihashToFS( + libs, + logger, + trackFile.multihash, + trackFile.storagePath, + userReplicaSet, + null, // fileNameForImage + trackFile.trackBlockchainId + ) + if (!succeeded) { + throw new Error( + `[saveFileForMultihashToFS] Failed for multihash ${trackFile.multihash}` + ) + } + }) + ) + } - /** - * Fetch content for each CID + save to FS - * Record any CIDs that failed retrieval/saving for later use - * - * - `saveFileForMultihashToFS()` should never reject - it will return error indicator for post processing - */ - await Promise.all( - trackFilesSlice.map(async (trackFile) => { - const succeeded = await saveFileForMultihashToFS( + /** + * Save all non-Track files to disk + */ + for (let i = 0; i < nonTrackFiles.length; i += FileSaveMaxConcurrency) { + const nonTrackFilesSlice = nonTrackFiles.slice( + i, + i + FileSaveMaxConcurrency + ) + + await Promise.all( + nonTrackFilesSlice.map(async (nonTrackFile) => { + // Skip over directories since there's no actual content to sync + // The files inside the directory are synced separately + if (nonTrackFile.type === 'dir') { + return + } + + const multihash = nonTrackFile.multihash + + // if it's an image file, we need to pass in the actual filename because the gateway request is /ipfs/Qm123/ + // need to also check fileName is not null to make sure it's a dir-style image. non-dir images won't have a 'fileName' db column + let succeeded + if (nonTrackFile.type === 'image' && nonTrackFile.fileName !== null) { + succeeded = await saveFileForMultihashToFS( libs, logger, - trackFile.multihash, - trackFile.storagePath, + multihash, + nonTrackFile.storagePath, userReplicaSet, - null, // fileNameForImage - trackFile.trackBlockchainId + nonTrackFile.fileName ) - if (!succeeded) { - throw new Error( - `[saveFileForMultihashToFS] Failed for multihash ${trackFile.multihash}` - ) - } - }) - ) - } - - /** - * Save all non-Track files to disk - */ - for (let i = 0; i < nonTrackFiles.length; i += FileSaveMaxConcurrency) { - const nonTrackFilesSlice = nonTrackFiles.slice( - i, - i + FileSaveMaxConcurrency - ) + } else { + succeeded = await saveFileForMultihashToFS( + libs, + logger, + multihash, + nonTrackFile.storagePath, + userReplicaSet + ) + } - await Promise.all( - nonTrackFilesSlice.map(async (nonTrackFile) => { - // Skip over directories since there's no actual content to sync - // The files inside the directory are synced separately - if (nonTrackFile.type === 'dir') { - return - } - - const multihash = nonTrackFile.multihash - - // if it's an image file, we need to pass in the actual filename because the gateway request is /ipfs/Qm123/ - // need to also check fileName is not null to make sure it's a dir-style image. non-dir images won't have a 'fileName' db column - let succeeded - if (nonTrackFile.type === 'image' && nonTrackFile.fileName !== null) { - succeeded = await saveFileForMultihashToFS( - libs, - logger, - multihash, - nonTrackFile.storagePath, - userReplicaSet, - nonTrackFile.fileName - ) - } else { - succeeded = await saveFileForMultihashToFS( - libs, - logger, - multihash, - nonTrackFile.storagePath, - userReplicaSet - ) - } - - if (!succeeded) { - throw new Error( - `[saveFileForMultihashToFS] Failed for multihash ${multihash}` - ) - } - }) - ) - } - } catch (e) { - throw new Error(`[saveFilesToDisk] ERROR: ${e.message}`) + if (!succeeded) { + throw new Error( + `[saveFileForMultihashToFS] Failed for multihash ${multihash}` + ) + } + }) + ) } } /** * Saves all entries to DB that don't already exist in DB */ -async function saveEntriesToDB({ fetchedCNodeUser, logger, logPrefix }) { +async function saveEntriesToDB({ fetchedCNodeUser }) { const transaction = await models.sequelize.transaction() try { @@ -294,11 +343,6 @@ async function saveEntriesToDB({ fetchedCNodeUser, logger, logPrefix }) { files: fetchedFiles } = fetchedCNodeUser - logger.info( - logPrefix, - `beginning add ops for cnodeUser wallet ${walletPublicKey}` - ) - let localCNodeUser = await models.CNodeUser.findOne({ where: { walletPublicKey }, transaction @@ -392,7 +436,7 @@ async function saveEntriesToDB({ fetchedCNodeUser, logger, logPrefix }) { await transaction.commit() } catch (e) { await transaction.rollback() - throw new Error(`[saveEntriesToDB] ERROR: ${e.message}`) + throw e } } @@ -458,7 +502,7 @@ async function filterOutAlreadyPresentDBEntries({ async function getUserReplicaSet({ wallet, libs, logger }) { try { - let userReplicaSet = await getCreatorNodeEndpoints({ + let userReplicaSet = await getUserReplicaSetEndpointsFromDiscovery({ libs, logger, wallet, diff --git a/creator-node/src/services/sync/secondarySyncFromPrimary.js b/creator-node/src/services/sync/secondarySyncFromPrimary.js index 08fc924b745..8a28fcaefe6 100644 --- a/creator-node/src/services/sync/secondarySyncFromPrimary.js +++ b/creator-node/src/services/sync/secondarySyncFromPrimary.js @@ -4,7 +4,10 @@ const axios = require('axios') const { logger: genericLogger } = require('../../logging') const models = require('../../models') const { saveFileForMultihashToFS } = require('../../fileManager') -const { getOwnEndpoint, getCreatorNodeEndpoints } = require('../../middlewares') +const { + getOwnEndpoint, + getUserReplicaSetEndpointsFromDiscovery +} = require('../../middlewares') const SyncHistoryAggregator = require('../../snapbackSM/syncHistoryAggregator') const DBManager = require('../../dbManager') const UserSyncFailureCountManager = require('./UserSyncFailureCountManager') @@ -194,7 +197,7 @@ const handleSyncFromPrimary = async ({ let userReplicaSet = [] try { const myCnodeEndpoint = await getOwnEndpoint(serviceRegistry) - userReplicaSet = await getCreatorNodeEndpoints({ + userReplicaSet = await getUserReplicaSetEndpointsFromDiscovery({ libs, logger: genericLogger, wallet: fetchedWalletPublicKey, diff --git a/creator-node/src/utils/decisionTree.ts b/creator-node/src/utils/decisionTree.ts new file mode 100644 index 00000000000..630148fb320 --- /dev/null +++ b/creator-node/src/utils/decisionTree.ts @@ -0,0 +1,89 @@ +import type Logger from 'bunyan' + +type Stage = { + name: string + data: object | null + timestamp: number + duration: number + fullDuration: number +} + +type ConstructorParams = { + name: string + logger: Logger +} + +type RecordStageParams = { + name: string + data: object | null + log: boolean +} + +/** + * Class for recording and logging multi-stage processes + */ +module.exports = class DecisionTree { + name: string + logger: Logger + + tree: Stage[] + + public constructor({ name, logger }: ConstructorParams) { + this.logger = logger + this.tree = [] + this.name = name + } + + recordStage = ({ name, data = null, log = true }: RecordStageParams) => { + const timestamp = Date.now() + + let stage: Stage + if (this.tree.length > 0) { + const previousStage: Stage = this.tree[this.tree.length - 1] + const duration: number = timestamp - previousStage.timestamp + stage = { + name, + data, + timestamp, + duration, + fullDuration: previousStage.duration + duration + } + } else { + stage = { + name, + data, + timestamp, + duration: 0, + fullDuration: 0 + } + } + + this.tree.push(stage) + + if (log) { + this.printLastStage() + } + } + + printTree() { + this._logInfo(`DecisionTree Full - ${JSON.stringify(this.tree, null, 2)}`) + } + + printLastStage() { + if (this.tree.length > 0) { + this._logInfo( + `DecisionTree Last Stage - ${JSON.stringify( + this.tree[this.tree.length - 1], + null, + 2 + )}` + ) + } else { + this._logInfo('DecisionTree Last Stage - empty') + } + } + + private _logInfo(msg: string) { + this.logger.info(`${this.name} - ${msg}`) + } +} diff --git a/creator-node/test/sync/assets/export.json b/creator-node/test/sync/assets/export.json index ffee8294d3d..ffd5301797d 100644 --- a/creator-node/test/sync/assets/export.json +++ b/creator-node/test/sync/assets/export.json @@ -963,17 +963,6 @@ "localClockMax": 37 } } - }, - "ipfsIDObj": { - "id": "Qmbg2iXM4SeMn9CiALonDt9ejFUosEF9rkT38CcCpAVqxs", - "publicKey": "CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCuwbUXm0VdPWKFcCftIZsTmoRSpgGhSL/+RtsAys7+SoUzx/1IOqTu0apq1eH9hpjm0s/m92MIiifSohk9vv5OIoxarlQjxAIumG9dxGntquv2X9p3INjTm5gD2Zafl6xnf36EUHi4lyuJtMSOCt2O1nVHOBoTZ5q2oZoQTsO8pI3qZvNZwsFSJzgH3GOBJlVDXTTgNmoMN1INKEWDmsQiTRXfVq+m4l1eOpxthkXg2tlkxxRm3kcOzHuQrGCln3AFnMgom2X1Xcm4eG0zWKLvlOgX88zHYuguf6YTTfqxNHSo33NtflZxZI+CQq0RxvkWKiJKqbpmlK+shJnv9Z3zAgMBAAE=", - "addresses": [ - "/ip4/127.0.0.1/tcp/4001/ipfs/Qmbg2iXM4SeMn9CiALonDt9ejFUosEF9rkT38CcCpAVqxs", - "/ip4/172.17.0.2/tcp/4001/ipfs/Qmbg2iXM4SeMn9CiALonDt9ejFUosEF9rkT38CcCpAVqxs", - "/ip4/35.238.168.173/tcp/4001/ipfs/Qmbg2iXM4SeMn9CiALonDt9ejFUosEF9rkT38CcCpAVqxs" - ], - "agentVersion": "go-ipfs/0.4.23/6ce9a35", - "protocolVersion": "ipfs/0.1.0" } }, "signer": "0x1eC723075E67a1a2B6969dC5CfF0C6793cb36D25", diff --git a/creator-node/test/sync/assets/realExport.json b/creator-node/test/sync/assets/realExport.json index 935d9e0eb42..77b68cb0c13 100644 --- a/creator-node/test/sync/assets/realExport.json +++ b/creator-node/test/sync/assets/realExport.json @@ -963,17 +963,6 @@ "localClockMax": 37 } } - }, - "ipfsIDObj": { - "id": "QmZvtTwkaantxw5raytS8JLkQTpufdgvfeGFuzEwdfEdUt", - "publicKey": "CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCr5DuScNPNNMnSLv8RRqb2vKZPrAVnRp8xeUzOxYfY+ytf86RO5lkBU6y2/u6oRidteb/HPV7gtkDwouBejXgg1zNq8Ya6bleE62iTqtAwB7+lrrPgMKtblljFt0fY6G7gea13KazLVbH+Y9hQlAQoAsZwFFdWbVLXLejq/BLZsHe/vx6A6uA7TSdOjY8sjWP3E3KWUNxOPVFwdk1dSlOnQDbHVJ3vSUmi5zpMYKYr0SXHa5OG20gq/qaP1hIRKl3IU0NoGweyLhgGxAtDSugfXAPyvj3BIwjRHc9x8B7XlmpQxfamLWz4u5SbiTB4c09S1Pmsn4JXKGeOoFK00tWNAgMBAAE=", - "addresses": [ - "/ip4/127.0.0.1/tcp/4001/ipfs/QmZvtTwkaantxw5raytS8JLkQTpufdgvfeGFuzEwdfEdUt", - "/ip4/172.17.0.2/tcp/4001/ipfs/QmZvtTwkaantxw5raytS8JLkQTpufdgvfeGFuzEwdfEdUt", - "/ip4/35.238.168.173/tcp/4001/ipfs/QmZvtTwkaantxw5raytS8JLkQTpufdgvfeGFuzEwdfEdUt" - ], - "agentVersion": "go-ipfs/0.4.23/6ce9a35", - "protocolVersion": "ipfs/0.1.0" } }, "signer": "0x1eC723075E67a1a2B6969dC5CfF0C6793cb36D25",