Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Release v0.3.61 cn changes #3352

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 10 additions & 5 deletions creator-node/nginx_conf/nginx.conf
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,19 @@ http {
client_max_body_size 0;
lua_package_path "/usr/local/openresty/conf/?.lua;;";

# Inactive = how long an item can remain in the cache without being accessed
# If inactive period passes, content WILL BE DELETED from the cache by the cache
# manager, regardless whether or not it has expired.
# https://www.nginx.com/blog/nginx-caching-guide#How-to-Set-Up-and-Configure-Basic-Caching
proxy_cache_path /usr/local/openresty/cache levels=1:2 keys_zone=cidcache:1000m
max_size=10g inactive=30m use_temp_path=off;
max_size=10g inactive=12h use_temp_path=off;
proxy_read_timeout 3600; # 1 hour in seconds

server {
listen 4000;

# Match the paths /ipfs/<cid: string> and /content/<cid: string>.
# If present in cache, serve.
# Else, hit upstream server + update cache + serve.
# If present in cache, serve. Else, hit upstream server + update cache + serve.
# http://nginx.org/en/docs/http/ngx_http_core_module.html#location
location ~ (/ipfs/|/content/) {
proxy_cache cidcache;
Expand All @@ -38,8 +41,10 @@ http {
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
proxy_cache_background_update on;

# Cache only 200 responses for 30m before considered stale
proxy_cache_valid 200 30m;
# Cache only responses with status code = 200 for some duration before considered stale.
# Stale implies content will be fetched from the upstream server. Stale content WILL NOT
# BE REMOVED from the cache.
proxy_cache_valid 200 12h;

# When enabled, only one request at a time will be allowed to populate a new cache element
# Other requests of the same cache element will either wait for a response to appear in the cache
Expand Down
30 changes: 18 additions & 12 deletions creator-node/src/routes/files.js
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,17 @@ const streamFromFileSystem = async (
res.set('Content-Length', stat.size)
}

// If client has provided filename, set filename in header to be auto-populated in download prompt.
if (req.query.filename) {
res.setHeader(
'Content-Disposition',
contentDisposition(req.query.filename)
)
}

// Set the CID cache-control so that client caches the response for 30 days
res.setHeader('cache-control', 'public, max-age=2592000, immutable')

await new Promise((resolve, reject) => {
fileStream
.on('open', () => fileStream.pipe(res))
Expand All @@ -123,6 +134,9 @@ const streamFromFileSystem = async (
})
})
} catch (e) {
// Unset the cache-control header so that a bad response is not cached
res.removeHeader('cache-control')

// Unable to stream from file system. Throw a server error message
throw e
}
Expand All @@ -141,10 +155,10 @@ const logGetCIDDecisionTree = (decisionTree, req) => {
/**
* Given a CID, return the appropriate file
* 1. Check if file exists at expected storage path (current and legacy)
* 1. If found, stream from FS
* 2. Else, check if CID exists in DB. If not, return 404 not found error
* 3. If exists in DB, fetch file from CN network, save to FS, and stream from FS
* 4. If not avail in CN network, respond with 400 server error
* 2. If found, stream from FS
* 3. Else, check if CID exists in DB. If not, return 404 not found error
* 4. If exists in DB, fetch file from CN network, save to FS, and stream from FS
* 5. If not avail in CN network, respond with 400 server error
*/
const getCID = async (req, res) => {
if (!(req.params && req.params.CID)) {
Expand Down Expand Up @@ -355,14 +369,6 @@ const getCID = async (req, res) => {
}
}

// If client has provided filename, set filename in header to be auto-populated in download prompt.
if (req.query.filename) {
res.setHeader('Content-Disposition', contentDisposition(req.query.filename))
}

// Set the CID cache-control so that client caches the response for 30 days
res.setHeader('cache-control', 'public, max-age=2592000, immutable')

/**
* If the file is found on file system, stream from file system
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ class CNodeToSpIdMapManager {
throw new Error(errorMessage)
}

logger.info(`updateEndpointToSpIdMap Success. Size: ${mapLength.length}`)
logger.info(`updateEndpointToSpIdMap Success. Size: ${mapLength}`)
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ const {
const MAX_BATCH_CLOCK_STATUS_BATCH_SIZE = config.get(
'maxBatchClockStatusBatchSize'
)
const SP_ID = config.get('spID')
const DELEGATE_PRIVATE_KEY = config.get('delegatePrivateKey')

/**
Expand All @@ -36,6 +35,8 @@ const retrieveClockStatusesForUsersAcrossReplicaSet = async (
const replicasToUserClockStatusMap = {}
const unhealthyPeers = new Set()

const spID = config.get('spID')

/** In parallel for every replica, fetch clock status for all users on that replica */
const replicas = Object.keys(replicasToWalletsMap)
await Promise.all(
Expand Down Expand Up @@ -65,10 +66,10 @@ const retrieveClockStatusesForUsersAcrossReplicaSet = async (

// Sign request to other CN to bypass rate limiting
const { timestamp, signature } = generateTimestampAndSignature(
{ spID: SP_ID },
{ spID: spID },
DELEGATE_PRIVATE_KEY
)
axiosReqParams.params = { spID: SP_ID, timestamp, signature }
axiosReqParams.params = { spID: spID, timestamp, signature }

let batchClockStatusResp = []
let errorMsg
Expand Down Expand Up @@ -110,7 +111,7 @@ const retrieveClockStatusesForUsersAcrossReplicaSet = async (
* Signs request with spID to bypass rate limits
*/
const retrieveClockValueForUserFromReplica = async (replica, wallet) => {
const spID = SP_ID
const spID = config.get('spID')

const { timestamp, signature } = generateTimestampAndSignature(
{ spID },
Expand Down