k8s ingress configmaps not updated inside the nginx-ingress pods - kubernetes

from nginx DaemonSet
args:
- -nginx-configmaps=$(POD_NAMESPACE)/nginx-config
- -default-server-tls-secret=$(POD_NAMESPACE)/default-server-secret
- -enable-tls-passthrough
from nginx configmap
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-config
namespace: nginx-ingress
data:
enable-underscores-in-headers: "true"
proxy_buffers: "4 256k"
proxy_buffer_size: "128k"
proxy_busy_buffers_size: "256k"
cilient-max-body-size: "0m"
error-log-level: "debug"
http2: "true"
http2_max_field_size: "64k"
http2_max_header_size: "64k"
large_client_header_buffers: "16 128k"
proxy-add-original-uri-header: "true"
server-names-hash-bucket-size: "2048"
server-names-hash-max-size: "1024"
use-forwarded-headers: "true"
use-proxy-protocol: "true"
log-format: '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" "$http_x_forwarded_for" "$resource_name" "$resource_type" "$resource_namespace" "$service"'
from the nginx-ingress pods of nginx.conf
daemon off;
error_log stderr debug;
pid /var/lib/nginx/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main
'$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" "$http_x_forwarded_for" "$resource_name" "$resource_type" "$resource_namespace" "$service"'
;
access_log /dev/stdout main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65s;
keepalive_requests 100;
#gzip on;
server_names_hash_max_size 1024;
server_names_hash_bucket_size 2048;
variables_hash_bucket_size 256;
variables_hash_max_size 1024;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
map $http_upgrade $vs_connection_header {
default upgrade;
'' $default_connection_header;
}
server {
# required to support the Websocket protocol in VirtualServer/VirtualServerRoutes
set $default_connection_header "";
set $resource_type "";
set $resource_name "";
set $resource_namespace "";
set $service "";
listen 80 default_server;
listen unix:/var/lib/nginx/passthrough-https.sock ssl default_server proxy_protocol;
set_real_ip_from unix:;
real_ip_header proxy_protocol;
ssl_certificate /etc/nginx/secrets/default;
ssl_certificate_key /etc/nginx/secrets/default;
server_name _;
server_tokens "on";
location / {
return 404;
}
}
# stub_status
server {
listen 8080;
allow 127.0.0.1;
deny all;
location /stub_status {
stub_status;
}
}
include /etc/nginx/config-version.conf;
include /etc/nginx/conf.d/*.conf;
server {
listen unix:/var/lib/nginx/nginx-502-server.sock;
access_log off;
return 502;
}
server {
listen unix:/var/lib/nginx/nginx-418-server.sock;
access_log off;
return 418;
}
}
stream {
log_format stream-main '$remote_addr [$time_local] '
'$protocol $status $bytes_sent $bytes_received '
'$session_time "$ssl_preread_server_name"';
access_log /dev/stdout stream-main;
map $ssl_preread_server_name $dest_internal_passthrough {
default unix:/var/lib/nginx/passthrough-https.sock;
include /etc/nginx/tls-passthrough-hosts.conf;
}
server {
listen 443;
ssl_preread on;
proxy_protocol on;
proxy_pass $dest_internal_passthrough;
}
include /etc/nginx/stream-conf.d/*.conf;
}```

Related

`nginx` reverse proxy issue in `docker`

Map the 80 port of nginx using the port running nginx, docker 11111
nginx is configured as follows
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
server {
listen 80;
location /goto/ {
proxy_pass https://www.kugou.com/;
}
}
}
For example host Ip: 114.188.16.20
Visit: 114.188.16.20:11111/goto
The expectation is to proxy to https://www.kugou.com/, but every visit to 114.188.16.20:11111/goto will always be directed to 114.188.16.20/goto, which becomes the default 80,
how to deal with this
/goto/ does not match /goto.
Use location /goto .

Openshift route redirects to cluster console

I'm installing OKD v3.11 cluster with ansible.
Cluster configuration:
4 master-infra-etcd nodes
4 computing nodes
1 external load balancer
Load balancer configuration is described here:
https://github.com/redhat-cop/openshift-playbooks/blob/master/playbooks/installation/load_balancing.adoc#custom-certificate-ssl-termination-production
I'm using nginx as my external load balancer.
Inventory (truncated):
openshift_master_cluster_hostname: okd-internal.example.com
openshift_master_cluster_public_hostname: okd.example.com
openshift_master_default_subdomain: okd.example.com
Here is my NGINX configuration:
user nginx;
error_log /var/log/nginx/error.log warn;
load_module /usr/lib64/nginx/modules/ngx_stream_module.so;
pid /var/run/nginx.pid;
worker_processes "4";
events {
worker_connections 1024;
multi_accept off;
}
stream {
upstream okd_masters {
server okdmaster01.lab.example.com:8443;
server okdmaster02.lab.example.com:8443;
server okdmaster03.lab.example.com:8443;
server okdmaster04.lab.example.com:8443;
}
server {
listen 8443;
proxy_pass okd_masters;
}
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
server_names_hash_bucket_size 64;
client_max_body_size 20M;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 120;
keepalive_requests 10;
server_tokens on;
upstream okd_masters {
server okdmaster01.lab.example.com:8443;
server okdmaster02.lab.example.com:8443;
server okdmaster03.lab.example.com:8443;
server okdmaster04.lab.example.com:8443;
}
server {
listen 443 ssl;
server_name okd.example.com *.okd.example.com;
ssl_certificate /etc/nginx/ssl/all.okd.example.com.crt;
ssl_certificate_key /etc/nginx/ssl/all.okd.example.com.key;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 5m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
proxy_connect_timeout 600;
proxy_send_timeout 600;
proxy_read_timeout 600;
send_timeout 600;
keepalive_timeout 600;
keepalive_requests 10;
proxy_set_header X-Forwarded-Host $host:$server_port;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_set_header Host $host;
location / {
proxy_pass https://okd_masters;
}
}
}
The problem is when I'm trying to access route (app.okd.example.com) I'm getting redirected to okd.example.com instead

nginx on vagrant keeps on dropping port

so I have vagrant with port forwarding
here is vagrantfile
Vagrant.configure("2") do |config|
config.vm.provider "virtualbox" do |v|
v.memory = 2048
v.cpus = 4
end
config.berkshelf.enabled = true
config.berkshelf.berksfile_path = "./Berksfile"
config.omnibus.chef_version = :latest
config.vm.network :forwarded_port, guest: 80, host: 8080
config.vm.network :forwarded_port, guest: 22, host: 2020
end
Here is nginx.conf
user www-data;
worker_processes 4;
error_log /var/log/nginx/error.log;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log /var/log/nginx/access.log;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_requests 100;
keepalive_timeout 65;
gzip on;
gzip_http_version 1.0;
gzip_comp_level 2;
gzip_proxied any;
gzip_vary off;
gzip_types text/plain text/css application/x-javascript text/xml application/xml application/rss+xml application/atom+xml text/javascript application/javascript application/json text/mathml;
gzip_min_length 1000;
gzip_disable "MSIE [1-6]\.";
variables_hash_max_size 1024;
variables_hash_bucket_size 64;
server_names_hash_bucket_size 64;
types_hash_max_size 2048;
types_hash_bucket_size 64;
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
here is sites_enabled/default
server {
listen 80;
server_name precise64;
access_log /var/log/nginx/localhost.access.log;
root /vagrant/site/www/public;
index index.php index.html index.htm;
location / {
try_files $uri $uri/ /index.php?$query_string;
}
location ~ \.php$ {
try_files $uri /index.php =404;
fastcgi_split_path_info ^(.+\.php)(/.+)$;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_pass unix:/run/php/php7.1-fpm.sock;
fastcgi_index index.php;
}
}
The issue is that everytime I go to http://localhost:8080/somedirectory on my browser, nginx ends up redirecting (301) to http://localhost/somedirectory
If I access a specific file, it doesn't do the redirect as expected
why is that happening and how can I prevent nginx from dropping the port?
I don't see a redirect in your NGINX config so my guess is that it's coming from your PHP app.

nginx 301 redirect to incorrect vhost

I'm having a 301 redirect issue for multiple sites pointing to our primary site although the intended affected sites are in their own server blocks. If I disable the primary site, the others work as intended, so it seems something in the primary config is trumping the others. Any help would be appreciated.
/etc/nginx/nginx.conf:
user nginx;
worker_processes 8;
worker_rlimit_nofile 100000;
error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;
events {
worker_connections 4096;
multi_accept on;
use epoll;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
#access_log /var/log/nginx/access.log;
access_log off;
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_min_length 1100;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
open_file_cache max=2000 inactive=20s;
open_file_cache_valid 60s;
open_file_cache_min_uses 5;
open_file_cache_errors off;
client_max_body_size 50M;
client_body_buffer_size 1m;
client_body_timeout 15;
client_header_timeout 15;
keepalive_timeout 2 2;
send_timeout 15;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
fastcgi_buffers 256 16k;
fastcgi_buffer_size 128k;
fastcgi_connect_timeout 3s;
fastcgi_send_timeout 120s;
fastcgi_read_timeout 120s;
fastcgi_busy_buffers_size 256k;
fastcgi_max_temp_file_size 0;
reset_timedout_connection on;
server_names_hash_bucket_size 100;
fastcgi_cache_path /var/cache/nginx levels=1:2 keys_zone=microcache:10m max_size=1000m inactive=60m;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
include /etc/nginx/conf.d/*.conf;
}
This is the vhost conf that appears to be trumping other vhosts. /etc/nginx/conf.d/site1.conf:
server {
listen 10.10.10.1:80;
listen 10.10.10.1:443 ssl;
server_name ^site1\.org$ ^www\.site1\.org$ ^old\.site1domain\.org$;
ssl_certificate ...;
ssl_certificate_key ...;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!aNULL:!MD5;
keepalive_timeout 70;
root /var/www/vhosts/site1.org/httpdocs;
index index.php;
client_max_body_size 128M;
location ~ \.php$ {
try_files $uri =404;
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_index index.php;
fastcgi_pass unix:/var/run/php-fpm/site1.sock;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info;
include /etc/nginx/fastcgi_params;
include /etc/nginx/fastcgi.conf;
open_file_cache max=4000 inactive=20s;
open_file_cache_valid 30s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
}
add_header Strict-Transport-Security "max-age=31536000; includeSubdomains";
if ($scheme != "https") {
rewrite ^ https://site1.org$uri permanent;
}
if ($host != "site1.org") {
rewrite ^ https://site1.org$uri permanent;
}
#wp-super-cache
....
location ~* .(ogg|ogv|svg|svgz|eot|otf|woff|mp4|ttf|css|js|jpg|jpeg|gif|png|ico|zip|tgz|gz|rar|bz2|doc|xls|exe|tar|mid|midi|wav|bmp)$ {
expires max;
}
}
Phew. OK, here is an example of a different vhost config, which does not seem to be answering requests (instead, i get a 301 to the vhost above, with or without the redirects commented out).
/etc/nginx/conf.d/site2.conf:
server {
listen 10.10.10.1:80;
server_name ^sub1\.site2\.org$;
allow all;
proxy_redirect / http://10.10.10.1:6969;
location / {
proxy_pass http://10.10.10.1:6969;
}
}
However, for some reason, this SSL proxy works as intended (on different IP):
/etc/nginx/conf.d/site3.conf:
server {
listen 10.10.10.2:443 ssl;
server_name ^sub3\.site1\.org$;
ssl_certificate ...;
ssl_certificate_key ...;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!aNULL:!MD5;
allow all;
proxy_redirect / http://sub3.site1.org:80/;
location / {
proxy_pass http://sub3.site1.org:80/;
}
proxy_set_header Host $http_host;
}
Here is what I get when I attempt to connect to http://sub1.site2.org:
[c09 79] /etc/nginx/conf.d # wget {sub1.site2.url}
--2015-11-25 09:09:28-- {sub1.site2.url}
Resolving sub1.site2.org... 10.10.10.1
Connecting to sub1.site2.org|10.10.10.1|:80... connected.
HTTP request sent, awaiting response... 301 Moved Permanently
Location: {site1.url} [following]
and so on...
Thanks in advance.
Your server_name directives are all invalid so none of them match. So nginx uses the first server container as the default and processes all requests through that.
It then hits your rewrite ^ https://site1.org$uri permanent; conditional rewrite.
If you must use regex in your server_names (although it's less efficient unless you really need it), you must prefix the name with ~. Otherwise, just use the plain name.
server_name site1.org www.site1.org old.site1domain.org;
See this document for details.

Deploying rails with capistrano + Nginx + Passenger + MYSQL to VPS

This is my first time deploying a rails app and I'm finding the process very frustrating. For work related reasons we are using Rackspace cloud with Ubuntu 12.04 LTS (Precise Pangolin) and MYSQL instead of the Heroku route.
I've been trying to figure this out for 2+ days and I'm finally turning to the community for help. Currently I'm getting a "404 Not Found Error on my server"
I've followed Ryan's Screencasts on "Deploying to a VPS", "Capistrano Recipes", this tutorial, and others on google etc. and I'm still not quite there.
I managed to get the following installed:
Node.js
RVM
ruby 1.9.3p194 (2012-04-20 revision 35410) [x86_64-linux]
Rails 3.2.8
MYSQL
Passenger
Nginx
I'm pretty sure I'm missing something simple in my Capistrano Deployer here:
require "bundler/capistrano"
server "198.101.242.242", :web, :app, :db, primary: true
set :application, "myapp"
set :user, "deployer"
set :deploy_to, "/home/#{user}/#{application}"
set :deploy_via, :remote_cache
set :use_sudo, false
set :scm, "git"
set :repository, "git#github.com:xxxx/#{application}.git"
set :branch, "master"
default_run_options[:pty] = true
ssh_options[:forward_agent] = true
# if you want to clean up old releases on each deploy uncomment this:
after "deploy", "deploy:cleanup"
# if you're still using the script/reaper helper you will need
# these http://github.com/rails/irs_process_scripts
# If you are using Passenger mod_rails uncomment this:
namespace :deploy do
task :start do ; end
task :stop do ; end
task :restart, :roles => :app, :except => { :no_release => true } do
run "#{try_sudo} touch #{File.join(current_path,'tmp','restart.txt')}"
end
end
Is there something I should be doing before deploying?
Here are Nginx error logs:
2012/09/16 23:22:17 [error] 13939#0: *1 "/home/deployer/myapp/public/index.html" is not found (2: No such file or directory), client: ip, server: localhost, request: "GET / HTTP/1.1", host: "ip"
2012/09/16 23:22:17 [error] 13939#0: *1 open() "/home/deployer/myapp/public/favicon.ico" failed (2: No such file or directory), client: ip, server: localhost, request: "GET /favicon.ico HTTP/1.1", host: "65.61.189.109"
2012/09/16 23:22:19 [error] 13939#0: *1 "/home/deployer/myapp/public/index.html" is not found (2: No such file or directory), client: ip, server: localhost, request: "GET / HTTP/1.1", host: "ip"
2012/09/16 23:22:19 [error] 13939#0: *1 open() "/home/deployer/myapp/public/favicon.ico" failed (2: No such file or directory), client: ip, server: localhost, request: "GET /favicon.ico HTTP/1.1", host: "ip"
Nginx Server Config File:
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
passenger_root /home/deployer/.rvm/gems/ruby-1.9.3-p194/gems/passenger-3.0.17;
passenger_ruby /home/deployer/.rvm/wrappers/ruby-1.9.3-p194/ruby;
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#gzip on;
server {
listen 80;
server_name localhost;
location / {
root /home/deployer/myapp/public; # <--- be sure to point to 'public'!
}
passenger_enabled on;
# listen 80;
# server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
# location / {
# root html;
# index index.html index.htm;
# }
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
# error_page 500 502 503 504 /50x.html;
# location = /50x.html {
# root html;
# }
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
# server {
# listen 80;
# server_name localhost;
# location / {
# root /home/deployer/myapp/public; # <--- be sure to point to 'public'!
# }
# passenger_enabled on;
# }
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443;
# server_name localhost;
# ssl on;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_timeout 5m;
# ssl_protocols SSLv2 SSLv3 TLSv1;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}
Thanks.
Ok so I ended up figuring out the problem with trial and error and from Beef Jerky's comments.
The first issues was capistrano was unable to install some gem's from my application that had dependencies. So I installed those on production...specifically rvm capistrano gem and rmagick. The RVM Capistrano gem was the main problem and reason I was getting errors when I depoloyed. The rmagick came after fixing the rvm problem which I resolved by installing it's dependents.
The second issue I was having was pointing to my apps folder myapp/public instead of the current release myapp/current/public in the Nginx server config file. So I kept the existing server block and placed my settings inside and then it worked. Below are the settings for the Nginx server config file
server {
listen 80;
server_name localhost;
root /home/deployer/banking_analytics/current/public; # <--- be sure to point to 'public'!
passenger_enabled on;
# listen 80;
# server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
# location / {
# root html;
# index index.html index.htm;
# }
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
# error_page 500 502 503 504 /50x.html;
# location = /50x.html {
# root html;
# }
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
# server {
# listen 80;
# server_name localhost;
# location / {
# root /home/deployer/banking_analytics/public; # <--- be sure to point to 'public'!
# }
# passenger_enabled on;
# }
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443;
# server_name localhost;
# ssl on;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_timeout 5m;
# ssl_protocols SSLv2 SSLv3 TLSv1;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}
I had a similar error some time ago in my first deploy.
In my case I had set a wrong path in the file mod-http-passenger.conf
My recommendation is to enable passenger_friendly_error_pages in the nginx settings for more error information. In some cases it seems to be better than the logs.
On server side
sudo vim /etc/nginx/sites-enabled/myApp
Add
passenger_friendly_error_pages on;