137 lines
3.8 KiB
Nginx Configuration File
137 lines
3.8 KiB
Nginx Configuration File
# proxy-vm/nginx/nginx.conf
|
|
worker_processes auto;
|
|
error_log /dev/stderr warn;
|
|
pid /tmp/nginx.pid;
|
|
|
|
events {
|
|
worker_connections 1024;
|
|
}
|
|
|
|
http {
|
|
# --- JSON access log ---
|
|
log_format json_log escape=json
|
|
'{'
|
|
'"time":"$time_iso8601",'
|
|
'"remote_addr":"$remote_addr",'
|
|
'"method":"$request_method",'
|
|
'"uri":"$request_uri",'
|
|
'"status":$status,'
|
|
'"bytes_sent":$bytes_sent,'
|
|
'"request_time":$request_time,'
|
|
'"upstream_addr":"$upstream_addr",'
|
|
'"upstream_response_time":"$upstream_response_time"'
|
|
'}';
|
|
|
|
access_log /dev/stdout json_log;
|
|
|
|
# --- Performance ---
|
|
sendfile on;
|
|
tcp_nopush on;
|
|
tcp_nodelay on;
|
|
keepalive_timeout 65;
|
|
client_max_body_size 50m;
|
|
gzip off;
|
|
|
|
# --- IP allowlist (generated at container start) ---
|
|
include /etc/nginx/conf.d/allowlist.conf;
|
|
|
|
# --- Token auth ---
|
|
include /etc/nginx/conf.d/auth.conf;
|
|
|
|
# --- Upstreams with keepalive ---
|
|
upstream elevenlabs_backend {
|
|
server api.elevenlabs.io:443;
|
|
keepalive 32;
|
|
}
|
|
|
|
upstream openai_backend {
|
|
server api.openai.com:443;
|
|
keepalive 32;
|
|
}
|
|
|
|
server {
|
|
listen 8080;
|
|
server_name _;
|
|
|
|
# --- Health check (no auth) ---
|
|
location = /health {
|
|
access_log off;
|
|
default_type application/json;
|
|
return 200 '{"status":"ok","version":"1.0"}';
|
|
}
|
|
|
|
# --- ElevenLabs ---
|
|
location /elevenlabs/ {
|
|
# Auth checks
|
|
if ($allowed_ip = 0) {
|
|
return 403 '{"error":"ip_not_allowed"}';
|
|
}
|
|
if ($auth_ok = 0) {
|
|
return 403 '{"error":"invalid_token"}';
|
|
}
|
|
|
|
# Strip /elevenlabs/ prefix and proxy
|
|
rewrite ^/elevenlabs/(.*) /$1 break;
|
|
|
|
proxy_pass https://elevenlabs_backend;
|
|
proxy_ssl_server_name on;
|
|
proxy_ssl_name api.elevenlabs.io;
|
|
|
|
# Pass original Host header for SNI
|
|
proxy_set_header Host api.elevenlabs.io;
|
|
proxy_set_header X-Real-IP $remote_addr;
|
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
proxy_set_header X-Forwarded-Proto $scheme;
|
|
proxy_set_header Connection "";
|
|
|
|
# Do NOT forward proxy token to upstream
|
|
proxy_set_header X-Proxy-Token "";
|
|
|
|
# HTTP/1.1 for keepalive
|
|
proxy_http_version 1.1;
|
|
|
|
# Streaming / performance
|
|
proxy_buffering off;
|
|
proxy_request_buffering off;
|
|
proxy_read_timeout 120s;
|
|
proxy_send_timeout 120s;
|
|
}
|
|
|
|
# --- OpenAI ---
|
|
location /openai/ {
|
|
if ($allowed_ip = 0) {
|
|
return 403 '{"error":"ip_not_allowed"}';
|
|
}
|
|
if ($auth_ok = 0) {
|
|
return 403 '{"error":"invalid_token"}';
|
|
}
|
|
|
|
rewrite ^/openai/(.*) /$1 break;
|
|
|
|
proxy_pass https://openai_backend;
|
|
proxy_ssl_server_name on;
|
|
proxy_ssl_name api.openai.com;
|
|
|
|
proxy_set_header Host api.openai.com;
|
|
proxy_set_header X-Real-IP $remote_addr;
|
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
proxy_set_header X-Forwarded-Proto $scheme;
|
|
proxy_set_header Connection "";
|
|
proxy_set_header X-Proxy-Token "";
|
|
|
|
proxy_http_version 1.1;
|
|
|
|
proxy_buffering off;
|
|
proxy_request_buffering off;
|
|
proxy_read_timeout 120s;
|
|
proxy_send_timeout 120s;
|
|
}
|
|
|
|
# --- Catch-all ---
|
|
location / {
|
|
default_type application/json;
|
|
return 404 '{"error":"unknown_upstream","hint":"use /elevenlabs/ or /openai/"}';
|
|
}
|
|
}
|
|
}
|