How to make a preemptive cache with nginx?
Currently, the cache becomes stale and unloads lots of images at once.
In my http section I have
proxy_cache_path /var/cache/nginx levels=1:1 keys_zone=zone:10m;
In my server configuration I have something like
server {
listen 80 default deferred;
server_name myservername
root /myapp/public;
client_max_body_size 2G;
proxy_cache_bypass $http_pragma;
proxy_cache_valid 200 301 302 304 1M;
proxy_cache_use_stale error timeout invalid_header updating http_500 http_502 http_503 http_504;
proxy_cache zone;
gzip_static on;
try_files $uri #app;
location #app {
if ($request_uri ~* "\.(ico|css|js|gif|jpe?g|png)\?[0-9]+$") {
expires max;
break;
}
client_body_buffer_size 32k;
proxy_buffers 8 64k;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://myupstream;
}
}
Related
We would like to launch a NextJS 10 app using NGINX so we use a configuration similar to:
location /_next/static/ {
alias /home/ec2-user/my-app/.next/static/;
expires 1y;
access_log on;
}
It works great, it caches for a year our statics but as we use NextJS images I'm failing to add an expires tag on on-the-fly resized images.
If I do:
location /_next/image/ {
alias /home/ec2-user/my-app/.next/image;
expires 1y;
access_log on;
}
It just returns a 404 on images.
Here is my server part NGINX config :
server {
listen 80;
server_name *.my-website.com;
# root /usr/share/nginx/html;
# root /home/ec2-user/my-app;
charset utf-8;
client_max_body_size 20M;
client_body_buffer_size 20M;
proxy_connect_timeout 600;
proxy_send_timeout 600;
proxy_read_timeout 600;
send_timeout 600;
underscores_in_headers on;
add_header X-Frame-Options SAMEORIGIN always;
add_header X-Content-Type-Options nosniff always;
add_header X-XSS-Protection "1; mode=block" always;
add_header Referrer-Policy "same-origin" always;
location = /robots.txt {
proxy_pass https://api.my-website.com/robots.txt;
}
location /_next/static/ {
alias /home/ec2-user/my-app/.next/static/;
expires 1y;
access_log on;
}
location / {
# reverse proxy for merchant next server
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass_request_headers on;
proxy_cache_bypass $http_upgrade;
proxy_buffering off;
}
}
Here is an example how you can rely of upstream Content-Type header to set up the Expires and Cache-Control headers:
map $upstream_http_content_type $expire {
~^image/ 1y; # 'image/*' content type
default off;
}
server {
...
location / {
# reverse proxy for merchant next server
proxy_pass http://localhost:3000;
...
expires $expire;
}
}
The same way you can tune cache control headers for any other content type of proxied response. The $upstream_http_<name> nginx variable is described here.
Update
To add cache control headers only by specific URIs you can use two chained map blocks:
map $uri $expire_by_uri {
~^/_next/image/ 1y;
default off;
}
map $upstream_http_content_type $expire {
~^image/ $expire_by_uri;
default off;
}
And if you don't expect anything but the images from /_next/image/... URIs, you can just use the
map $uri $expire {
~^/_next/image/ 1y;
default off;
}
I have dockerized spring boot application and keycloak for authorization. So, i tried to use nginx as reverse proxy(nginx not dockerized). When i use nginx without ssl it works perfectly. But when i try enable ssl with https to http redirect, keycloak fall into redirect loop after enter credentials.
My nginx config
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/
user docker-user;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/doc/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 5m;
ssl_prefer_server_ciphers on;
ssl_stapling on;
resolver 8.8.8.8;
include /etc/nginx/mime.types;
default_type application/octet-stream;
include /etc/nginx/conf.d/*.conf;
server {
server_name myhost.com;
listen 443 ssl;
ssl_certificate /etc/nginx/ssl/certs/bundle.crt;
ssl_certificate_key /etc/nginx/ssl/certs/cert.key;
ssl_dhparam /etc/nginx/ssl/certs/dhparam.pem;
ssl_session_cache shared:le_nginx_SSL:10m;
ssl_session_timeout 1440m;
ssl_session_tickets off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers off;
ssl_ciphers "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384";
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
client_max_body_size 500M;
proxy_read_timeout 3600;
location /auth {
proxy_http_version 1.1;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $server_name;
proxy_pass http://myhost.com:8080;
proxy_redirect off;
}
location / {
proxy_http_version 1.1;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $server_name;
proxy_pass http://myhost.com:9010;
proxy_redirect off;
}
error_page 404 /404.html;
location = /usr/share/nginx/html/50x.html/40x.html {
}
error_page 500 502 503 504 /50x.html;
location = /usr/share/nginx/html/50x.html {
}
}
server {
if ($host = myhost.com) {
return 301 https://$host$request_uri;
}
server_name myhost.com;
listen 80;
return 404;
}
}
spring boot log on redirect loop
2021-05-13 10:45:25.756 ERROR 1 --- [qtp276869158-20] o.k.adapters.OAuthRequestAuthenticator : failed to turn code into token
2021-05-13 10:45:25.757 ERROR 1 --- [qtp276869158-20] o.k.adapters.OAuthRequestAuthenticator : status from server: 301
2021-05-13 10:45:25.757 ERROR 1 --- [qtp276869158-20] o.k.adapters.OAuthRequestAuthenticator : <html>
<head><title>301 Moved Permanently</title></head>
<body bgcolor="white">
<center><h1>301 Moved Permanently</h1></center>
<hr><center>nginx/1.14.1</center>
</body>
</html>
I solved my problem.
In keycloak docker compose file need to add reverse proxy location in KEYCLOAK_FRONTEND_URL
Like KEYCLOAK_FRONTEND_URL: "https://myhost.com/auth"
'm pretty stuck after a few days of trying to get this working and I could use some help.
I have a vapor API that works fine. I created a route and can access it from http://localhost:8080/backend/returnA in a browser on the server. It returns some JSON.
Where I'm stuck is in trying to configure Nginx to server as a proxy. Can anyone help me understand how the http://localhost:8080/backend/returnA URL translates into a working URL accessible from the LAN?
I'm pretty confused as the Nginx.conf asks for a root URL but I don't know what to put in. If I leave it blank it defaults to /usr/local/Cellar/nginx/1.15.6/html/backend/returnA/index.html which obviously won't work. If I set it to the public folder in the Vapor app directory this also doesn't work. In both instances I get a "No such file or directory".
I've gone through countless Nginx conf settings found online, tried adding a proxy location, nothing works. Trying http://172.16.1.25/backend/returnA/ always returns a 404 from the Nginx server.
How do I point Nginx to my Vapor route when it's not serving a static file like index.html, and instead retuning JSON?
Any help is much appreciated.
Here's the config, edited to include Thanh's code, old location commented out:
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#gzip on;
server {
server_name 172.16.1.25;
listen 80 default_server;
root /Users/localadmin/Developer/server/MedicapAPI/Public/;
# location #proxy {
# proxy_pass http://127.0.0.1:8080;
# proxy_pass_header Server;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_pass_header Server;
# proxy_connect_timeout 3s;
# proxy_read_timeout 10s;
# }
location / {
proxy_ignore_client_abort on;
proxy_pass http://localhost:8080/;
proxy_redirect off;
}
}
include servers/*;
}
Using this :
listen 80 default_server;
server_name 172.16.1.25; #ip address of server
it will catch all server blokck and:
location / {
proxy_ignore_client_abort on;
proxy_pass http://localhost:8080/;
proxy_redirect off;
}
It will be proxy_pass to application which is running in port 8080
server {
#server_name mysite.com;
listen 80;
error_log /var/log/mysite.com_error.log warn;
access_log /var/log/mysite.com.ru_access.log;
large_client_header_buffers 8 32k;
client_max_body_size 10M;
location / {
# redirect all traffic to localhost:8080;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-NginX-Proxy true;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://127.0.0.1:8080/;
proxy_redirect off;
proxy_read_timeout 86400;
# enables WS support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
# prevents 502 bad gateway error
proxy_buffers 8 32k;
proxy_buffer_size 64k;
reset_timedout_connection on;
tcp_nodelay on;
}
# Give direct access to Public files of your app instead of using FileMiddleware
location ~* ^.+.(jpg|jpeg|gif|css|png|js|ico|xml|html|mp4|pdf)$ {
access_log off;
expires 30d;
root /path/to/your/app/Public;
}
}
This is my working example.
For production I suggest you to use SSL certificate e.g. from LetsEncrypt, replace listen port to 443 and add the following configuration lines after listen line:
ssl on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
ssl_prefer_server_ciphers on;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_certificate /etc/letsencrypt/live/mysite.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/mysite.com/privkey.pem;
ssl_ciphers 'HIGH:!aNULL:!MD5:!kEDH';
add_header Strict-Transport-Security "max-age=31536000; includeSubdomains;";
ssl_stapling on;
ssl_stapling_verify on;
That's it, now you're ready for production!
I want to fetch a file from the cache conditionally, based on a custom header in the request.
If the X-Proxy header is present in the request, return the file only if it's present in the cache. Otherwise fetch it from the internet if necessary.
Here's my .conf file:
worker_processes 1;
events {
worker_connections 1024;
}
http {
proxy_cache_path /home/nginx/proxy levels=1:2 keys_zone=one:15m inactive=7d max_size=1000m;
proxy_temp_path /home/nginx/temp;
proxy_buffering on;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_set_header Connection "";
proxy_http_version 1.1;
server {
listen 8000;
location / {
root /home/nginx/preload;
try_files /$uri #local #remote;
}
location #local {
internal;
add_header X-Local true;
add_header X-Cache $upstream_cache_status;
proxy_pass http://$http_host$uri$is_args$args;
proxy_cache one;
proxy_cache_key backend$request_uri;
proxy_cache_valid 200 1h;
proxy_cache_use_stale error timeout invalid_header;
}
location #remote {
resolver 8.8.8.8;
add_header X-Remote true;
add_header X-Cache $upstream_cache_status;
if ($http_x_proxy) {
return 404;
}
proxy_pass http://$http_host$uri$is_args$args;
proxy_cache one;
proxy_cache_key backend$request_uri;
proxy_cache_valid 200 1h;
proxy_cache_use_stale error timeout invalid_header;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
}
The problem is that the try_files directive always passes into my #remote location, even if the fetched file is cached. How do I tell it that the file was found when it returns from #local?
try_files directive only accepts one named location, so apparently it goes for the last one. This blog post proposes a workaround that works in your case. In case you don't won't read the whole post, you can add the following lines at the end of #local block:
proxy_intercept_errors on;
recursive_error_pages on;
error_page 404 = #remote;
and change your try_files to this:
try_files /$uri #local;
I am using the following config file for nginx and it works fine with Chrome but not with Firefox. With Firefox, I get the following error:
"Firefox has detected that the server is redirecting the request for
this address in a way that will never complete."
Clearing the cookies and cache if Firefox does not help.
upstream dev_server {
server 127.0.0.1:8100 fail_timeout=0;
}
server {
listen 80;
server_name subdomain.pro.domain.com;
location /blog {
proxy_pass http://dev_server;
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_set_header Accept-Encoding "";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
add_header Front-End-Https on;
proxy_redirect off;
}
location / {
rewrite ^(.*)$ https://subdomain.pro.domain.com$1;
}
}
server {
listen 443;
ssl on;
server_name subdomain.pro.domain.com;
ssl_certificate /etc/nginx/star.pro.domain.com.crt;
ssl_certificate_key /etc/nginx/star.pro.domain.com.key;
### SSL settings here ###
ssl_protocols SSLv3 TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers RC4:HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
keepalive_timeout 60;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
add_header Strict-Transport-Security max-age=500;
location /blog {
rewrite ^(.*)$ http://subdomain.pro.domain.com$1;
}
location / {
proxy_pass http://dev_server;
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_set_header Accept-Encoding "";
proxy_set_header Host $http_host;
proxy_set_header X-M-Secure "true";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
add_header Front-End-Https on;
proxy_max_temp_file_size 0;
proxy_redirect off;
}
}
Found the issue.
Because the /blog redirected to HTTP and all other paths redirected to HTTPS, the problem was with the following configuration line:
add_header Strict-Transport-Security max-age=500;
When I commented out that line, the issue went away.