diff --git a/creator-node/nginx_conf/nginx.conf b/creator-node/nginx_conf/nginx.conf index 92ed75859a2..af7898ca417 100644 --- a/creator-node/nginx_conf/nginx.conf +++ b/creator-node/nginx_conf/nginx.conf @@ -15,16 +15,19 @@ http { client_max_body_size 0; lua_package_path "/usr/local/openresty/conf/?.lua;;"; + # Inactive = how long an item can remain in the cache without being accessed + # If inactive period passes, content WILL BE DELETED from the cache by the cache + # manager, regardless whether or not it has expired. + # https://www.nginx.com/blog/nginx-caching-guide#How-to-Set-Up-and-Configure-Basic-Caching proxy_cache_path /usr/local/openresty/cache levels=1:2 keys_zone=cidcache:1000m - max_size=10g inactive=30m use_temp_path=off; + max_size=10g inactive=12h use_temp_path=off; proxy_read_timeout 3600; # 1 hour in seconds server { listen 4000; # Match the paths /ipfs/ and /content/. - # If present in cache, serve. - # Else, hit upstream server + update cache + serve. + # If present in cache, serve. Else, hit upstream server + update cache + serve. # http://nginx.org/en/docs/http/ngx_http_core_module.html#location location ~ (/ipfs/|/content/) { proxy_cache cidcache; @@ -38,8 +41,10 @@ http { proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; proxy_cache_background_update on; - # Cache only 200 responses for 30m before considered stale - proxy_cache_valid 200 30m; + # Cache only responses with status code = 200 for some duration before considered stale. + # Stale implies content will be fetched from the upstream server. Stale content WILL NOT + # BE REMOVED from the cache. + proxy_cache_valid 200 12h; # When enabled, only one request at a time will be allowed to populate a new cache element # Other requests of the same cache element will either wait for a response to appear in the cache diff --git a/creator-node/src/routes/files.js b/creator-node/src/routes/files.js index 114bbfc30de..8eb3adc5048 100644 --- a/creator-node/src/routes/files.js +++ b/creator-node/src/routes/files.js @@ -111,6 +111,17 @@ const streamFromFileSystem = async ( res.set('Content-Length', stat.size) } + // If client has provided filename, set filename in header to be auto-populated in download prompt. + if (req.query.filename) { + res.setHeader( + 'Content-Disposition', + contentDisposition(req.query.filename) + ) + } + + // Set the CID cache-control so that client caches the response for 30 days + res.setHeader('cache-control', 'public, max-age=2592000, immutable') + await new Promise((resolve, reject) => { fileStream .on('open', () => fileStream.pipe(res)) @@ -123,6 +134,9 @@ const streamFromFileSystem = async ( }) }) } catch (e) { + // Unset the cache-control header so that a bad response is not cached + res.removeHeader('cache-control') + // Unable to stream from file system. Throw a server error message throw e } @@ -141,10 +155,10 @@ const logGetCIDDecisionTree = (decisionTree, req) => { /** * Given a CID, return the appropriate file * 1. Check if file exists at expected storage path (current and legacy) - * 1. If found, stream from FS - * 2. Else, check if CID exists in DB. If not, return 404 not found error - * 3. If exists in DB, fetch file from CN network, save to FS, and stream from FS - * 4. If not avail in CN network, respond with 400 server error + * 2. If found, stream from FS + * 3. Else, check if CID exists in DB. If not, return 404 not found error + * 4. If exists in DB, fetch file from CN network, save to FS, and stream from FS + * 5. If not avail in CN network, respond with 400 server error */ const getCID = async (req, res) => { if (!(req.params && req.params.CID)) { @@ -355,14 +369,6 @@ const getCID = async (req, res) => { } } - // If client has provided filename, set filename in header to be auto-populated in download prompt. - if (req.query.filename) { - res.setHeader('Content-Disposition', contentDisposition(req.query.filename)) - } - - // Set the CID cache-control so that client caches the response for 30 days - res.setHeader('cache-control', 'public, max-age=2592000, immutable') - /** * If the file is found on file system, stream from file system */ diff --git a/creator-node/src/services/stateMachineManager/CNodeToSpIdMapManager.js b/creator-node/src/services/stateMachineManager/CNodeToSpIdMapManager.js index 91a29862c92..cf983dcfda0 100644 --- a/creator-node/src/services/stateMachineManager/CNodeToSpIdMapManager.js +++ b/creator-node/src/services/stateMachineManager/CNodeToSpIdMapManager.js @@ -44,7 +44,7 @@ class CNodeToSpIdMapManager { throw new Error(errorMessage) } - logger.info(`updateEndpointToSpIdMap Success. Size: ${mapLength.length}`) + logger.info(`updateEndpointToSpIdMap Success. Size: ${mapLength}`) } } diff --git a/creator-node/src/services/stateMachineManager/stateMachineUtils.js b/creator-node/src/services/stateMachineManager/stateMachineUtils.js index 803779ab07d..136fcd67523 100644 --- a/creator-node/src/services/stateMachineManager/stateMachineUtils.js +++ b/creator-node/src/services/stateMachineManager/stateMachineUtils.js @@ -20,7 +20,6 @@ const { const MAX_BATCH_CLOCK_STATUS_BATCH_SIZE = config.get( 'maxBatchClockStatusBatchSize' ) -const SP_ID = config.get('spID') const DELEGATE_PRIVATE_KEY = config.get('delegatePrivateKey') /** @@ -36,6 +35,8 @@ const retrieveClockStatusesForUsersAcrossReplicaSet = async ( const replicasToUserClockStatusMap = {} const unhealthyPeers = new Set() + const spID = config.get('spID') + /** In parallel for every replica, fetch clock status for all users on that replica */ const replicas = Object.keys(replicasToWalletsMap) await Promise.all( @@ -65,10 +66,10 @@ const retrieveClockStatusesForUsersAcrossReplicaSet = async ( // Sign request to other CN to bypass rate limiting const { timestamp, signature } = generateTimestampAndSignature( - { spID: SP_ID }, + { spID: spID }, DELEGATE_PRIVATE_KEY ) - axiosReqParams.params = { spID: SP_ID, timestamp, signature } + axiosReqParams.params = { spID: spID, timestamp, signature } let batchClockStatusResp = [] let errorMsg @@ -110,7 +111,7 @@ const retrieveClockStatusesForUsersAcrossReplicaSet = async ( * Signs request with spID to bypass rate limits */ const retrieveClockValueForUserFromReplica = async (replica, wallet) => { - const spID = SP_ID + const spID = config.get('spID') const { timestamp, signature } = generateTimestampAndSignature( { spID },