diff --git a/packages/discovery-provider/nginx_conf/main.lua b/packages/discovery-provider/nginx_conf/main.lua index e2663f8b8b5..4089d45fde6 100644 --- a/packages/discovery-provider/nginx_conf/main.lua +++ b/packages/discovery-provider/nginx_conf/main.lua @@ -266,12 +266,7 @@ function _M.validate_nethermind_rpc_request () local data = ngx.req.get_body_data() if data then local body = cjson.decode(data) - local is_ok = utils.starts_with(body.method, "eth_") or utils.starts_with(body.method, "net_") - - if body.method == "eth_sendRawTransaction" or body.method == "eth_sendTransaction" then - ngx.log(ngx.WARN, "WOULD_BLOCK: ", data) - -- is_ok = false - end + is_ok = utils.starts_with(body.method, "eth_") or utils.starts_with(body.method, "net_") if not is_ok then ngx.exit(405) diff --git a/packages/discovery-provider/nginx_conf/nginx.conf b/packages/discovery-provider/nginx_conf/nginx.conf new file mode 100644 index 00000000000..7c1df796a6b --- /dev/null +++ b/packages/discovery-provider/nginx_conf/nginx.conf @@ -0,0 +1,370 @@ +# This config rate limits and redirect requests that exceed the rate limit to other discovery providers. + +# The requests are rate limited by only allowing config.limit_to_rps requests every second to the discovery node; requests are redirected to other discovery nodes following this. + +# To avoid infinite redirection, we set redirect_nonce, redirect_sig, and redirect_from when redirecting; the discovery provider receiving the redirect verifies this signature. We have a nonce to avoid an attacker from being able to get a valid redirect_sig since that could be used to focus a DDoS attack on a single node. + +worker_processes 1; + +error_log logs/error.log notice; + +env audius_openresty_accept_redirect_from; +env audius_discprov_url; +env audius_openresty_rps; +env audius_openresty_redirect_targets; +env audius_openresty_rsa_private_key; +env audius_openresty_rsa_public_key; +env REGISTERED_PLUGINS; + +events { + worker_connections 4096; +} + +http { + # set dns nginx should use for resolving external domains + resolver 1.1.1.1; + + # set lua_socket_keepalive_timeout to 3 mins to fix problems when fetching redirect_weights + lua_socket_keepalive_timeout 180s; + + proxy_cache_path /usr/local/openresty/cache levels=1:2 keys_zone=cache:10m max_size=1g inactive=1m use_temp_path=off; + + proxy_read_timeout 600; # 10 mins in seconds + + lua_package_path "/usr/local/openresty/conf/?.lua;;"; + + lua_shared_dict limit_count_store 100m; + lua_shared_dict locks 12k; + lua_shared_dict nonce_store 10m; + lua_shared_dict request_count 10m; + lua_shared_dict rsa_public_key_store 10m; + log_format custom_format '{"remote_addr":"$remote_addr",' + '"time_local":"$time_local",' + '"request":"$request",' + '"status":$status,' + '"body_bytes_sent":$body_bytes_sent,' + '"http_referer":"$http_referer",' + '"http_user_agent":"$http_user_agent",' + '"request_time":$request_time}'; + + init_worker_by_lua_block { + local main = require "main" + main.start_update_redirect_weights_timer() + } + + server { + listen 5000; + gzip on; + gzip_types text/plain application/xml application/json; + access_log /usr/local/openresty/logs/access.log custom_format; + + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $remote_addr; + + location = /openresty_pubkey { + content_by_lua_block { + local config = require "config" + ngx.say(config.rsa_public_key) + } + } + + location = /request_count { + content_by_lua_block { + local count = ngx.shared.request_count:get("request-count") + if count == nil then + ngx.say(0) + else + ngx.say(count) + end + } + } + + location /v1/metrics { + proxy_pass http://127.0.0.1:3000; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + # $upstream references the audius-docker-compose network'd containers + location /prometheus/postgres { + resolver 127.0.0.11 valid=30s; + set $upstream exporter_postgres:9187; + proxy_pass http://$upstream/metrics; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location /prometheus/postgres/read-replica { + resolver 127.0.0.11 valid=30s; + set $upstream exporter_postgres_read_replica:9187; + proxy_pass http://$upstream/metrics; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location /prometheus/redis { + resolver 127.0.0.11 valid=30s; + set $upstream exporter_redis:9121; + proxy_pass http://$upstream/metrics; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location /prometheus/docker { + resolver 127.0.0.11 valid=30s; + set $upstream exporter_docker:9104; + proxy_pass http://$upstream/metrics; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location /v1/tracks/unclaimed_id { + access_by_lua_block { + local main = require "main" + main.mark_request_processing() + return main.limit_to_rps() + } + + log_by_lua_block { + local main = require "main" + main.mark_request_processed() + } + + proxy_pass http://127.0.0.1:3000; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + add_header Cache-Control no-cache; + } + + location /v1/playlists/unclaimed_id { + access_by_lua_block { + local main = require "main" + main.mark_request_processing() + return main.limit_to_rps() + } + + log_by_lua_block { + local main = require "main" + main.mark_request_processed() + } + + proxy_pass http://127.0.0.1:3000; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + add_header Cache-Control no-cache; + } + + location /v1/users/unclaimed_id { + access_by_lua_block { + local main = require "main" + main.mark_request_processing() + return main.limit_to_rps() + } + + log_by_lua_block { + local main = require "main" + main.mark_request_processed() + } + + proxy_pass http://127.0.0.1:3000; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + add_header Cache-Control no-cache; + } + + # Do not redirect any /v1/challenges/... requests, which need to resolve + # to the node that the request was intended for. Selection of + # nodes to respond to challenge attestations is intentional. + location /v1/challenges { + proxy_pass http://127.0.0.1:3000; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location ~* .*/trending/.* { + access_by_lua_block { + local main = require "main" + main.mark_request_processing() + return main.limit_to_rps() + } + + log_by_lua_block { + local main = require "main" + main.mark_request_processed() + } + + # Don't use cache if user_id or X-User-ID is set + proxy_cache_bypass $arg_user_id$http_x_user_id; + proxy_no_cache $arg_user_id$http_x_user_id; + + proxy_cache_valid any 5m; + proxy_cache cache; + proxy_cache_revalidate on; + proxy_cache_min_uses 1; + proxy_cache_lock on; + + proxy_pass http://127.0.0.1:3000; + } + + location ~* .*/search/.* { + access_by_lua_block { + local main = require "main" + main.mark_request_processing() + return main.limit_to_rps() + } + + log_by_lua_block { + local main = require "main" + main.mark_request_processed() + } + + # Don't use cache if user_id or X-User-ID is set + proxy_cache_bypass $arg_user_id$http_x_user_id; + proxy_no_cache $arg_user_id$http_x_user_id; + + proxy_cache_valid any 60s; + proxy_cache cache; + proxy_cache_revalidate on; + proxy_cache_min_uses 1; + proxy_cache_lock on; + + proxy_pass http://127.0.0.1:3000; + } + + location /v1 { + access_by_lua_block { + local main = require "main" + main.mark_request_processing() + return main.limit_to_rps() + } + + log_by_lua_block { + local main = require "main" + main.mark_request_processed() + } + + proxy_cache_valid any 1s; + proxy_cache cache; + proxy_cache_revalidate on; + proxy_cache_min_uses 1; + proxy_cache_lock on; + + proxy_pass http://127.0.0.1:3000; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location /sitemaps/ { + proxy_cache cache; + proxy_cache_use_stale updating; + proxy_cache_background_update on; + + proxy_pass http://127.0.0.1:3000; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location /comms { + client_max_body_size 500M; + resolver 127.0.0.11 valid=30s; + set $upstream comms:8925; + proxy_pass http://$upstream; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # for websockets: + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + } + + location /d { + resolver 127.0.0.11 valid=30s; + set $upstream uptime:1996; + proxy_pass http://$upstream; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location /trpc/ { + resolver 127.0.0.11 valid=30s; + proxy_pass http://trpc:2022/; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location ^~ /healthz { + resolver 127.0.0.11 valid=30s; + proxy_pass http://healthz; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location ~ ^/relay(/.*)?$ { + resolver 127.0.0.11 valid=30s; + proxy_pass http://relay:6001/relay$1; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location ~ ^/solana(/.*)?$ { + resolver 127.0.0.11 valid=30s; + proxy_pass http://solana-relay:6002/solana$1; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + + location ~* ^/(nethermind|chain)/peer { + add_header 'Access-Control-Allow-Origin' '*'; + add_header 'Access-Control-Allow-Methods' '*'; + resolver 127.0.0.11 valid=30s; + content_by_lua_block { + local port = require "port" + local can_get_ip, ip = port.get_public_ip() + local can_connect = port.get_is_port_exposed(ip, 30300) + if can_get_ip and can_connect then + ngx.status = ngx.HTTP_OK + else + ngx.status = ngx.HTTP_INTERNAL_SERVER_ERROR + end + ngx.say(ip .. "\n" .. tostring(can_connect)) + } + } + + location ~* ^/(nethermind|chain) { + access_by_lua_block { + local main = require "main" + return main.validate_nethermind_rpc_request() + } + + resolver 127.0.0.11 valid=30s; + set $upstream chain:8545; + proxy_pass http://$upstream/; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location = /plugins { + add_header 'Access-Control-Allow-Origin' '*'; + add_header 'Access-Control-Allow-Methods' '*'; + content_by_lua_block { + local plugins = require "plugins" + local json = plugins.get_health_check() + ngx.say(json) + } + } + + location ~ ^/plugins/(?\w+)/(.*)$ { + resolver 127.0.0.11 valid=30s; + proxy_pass http://$upstream:6000/$2; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location / { + proxy_pass http://127.0.0.1:3000; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + } +} diff --git a/packages/discovery-provider/nginx_conf/nginx_container.conf b/packages/discovery-provider/nginx_conf/nginx_container.conf index 7a2e0c4b84e..ad08c55d1d0 100644 --- a/packages/discovery-provider/nginx_conf/nginx_container.conf +++ b/packages/discovery-provider/nginx_conf/nginx_container.conf @@ -338,14 +338,14 @@ http { resolver 127.0.0.11 valid=30s; content_by_lua_block { local port = require "port" - local can_get_ip, ip = port.get_public_ip() + local ip = port.get_public_ip() local can_connect = port.get_is_port_exposed(ip, 30300) - if can_get_ip and can_connect then + if can_connect then ngx.status = ngx.HTTP_OK else ngx.status = ngx.HTTP_INTERNAL_SERVER_ERROR end - ngx.say(ip .. "\n" .. tostring(can_connect)) + ngx.say(can_connect) } } diff --git a/packages/discovery-provider/scripts/prod-server.sh b/packages/discovery-provider/scripts/prod-server.sh index 4543cf83182..68e5eb38a75 100755 --- a/packages/discovery-provider/scripts/prod-server.sh +++ b/packages/discovery-provider/scripts/prod-server.sh @@ -24,12 +24,24 @@ fi audius_discprov_loglevel=${audius_discprov_loglevel:-info} - -# If a worker class is specified, use that. Otherwise, use sync workers. -if [[ -z "${audius_gunicorn_worker_class}" ]]; then - exec gunicorn -b :5000 --error-logfile - src.wsgi:app --log-level=$audius_discprov_loglevel --workers=$WORKERS --threads=$THREADS --timeout=600 +if [[ "$audius_openresty_enable" == true ]]; then + openresty -p /usr/local/openresty -c /usr/local/openresty/conf/nginx.conf + tail -f /usr/local/openresty/logs/error.log | python3 scripts/openresty_log_convertor.py ERROR & + tail -f /usr/local/openresty/logs/access.log & + + # If a worker class is specified, use that. Otherwise, use sync workers. + if [[ -z "${audius_gunicorn_worker_class}" ]]; then + exec gunicorn -b :3000 --error-logfile - src.wsgi:app --log-level=$audius_discprov_loglevel --workers=$WORKERS --threads=$THREADS --timeout=600 + else + WORKER_CLASS="${audius_gunicorn_worker_class}" + exec gunicorn -b :3000 --error-logfile - src.wsgi:app --log-level=$audius_discprov_loglevel --worker-class=$WORKER_CLASS --workers=$WORKERS --timeout=600 + fi else - WORKER_CLASS="${audius_gunicorn_worker_class}" - exec gunicorn -b :5000 --error-logfile - src.wsgi:app --log-level=$audius_discprov_loglevel --worker-class=$WORKER_CLASS --workers=$WORKERS --timeout=600 + # If a worker class is specified, use that. Otherwise, use sync workers. + if [[ -z "${audius_gunicorn_worker_class}" ]]; then + exec gunicorn -b :5000 --error-logfile - src.wsgi:app --log-level=$audius_discprov_loglevel --workers=$WORKERS --threads=$THREADS --timeout=600 + else + WORKER_CLASS="${audius_gunicorn_worker_class}" + exec gunicorn -b :5000 --error-logfile - src.wsgi:app --log-level=$audius_discprov_loglevel --worker-class=$WORKER_CLASS --workers=$WORKERS --timeout=600 + fi fi -