I have include load module in httpd.conf file as shown below.
LoadModule dispatcher_module libexec/apache2/dispatcher-apache2.4-4.2.0.so
Is it fine should it always be included as libexec/apache2/mod_dispatcher.so
The docroot in the dispathcer.any file is configured as /docroot "/var/log/apache2/cache"
I am able to see the logs but the cache directory is empty (its not caching the content).
this is dispatcher.any
# Each farm configures a set of load balanced renders (i.e. remote servers)
/farms
{
# First farm entry
/website
{
# Request headers that should be forwarded to the remote server.
/clientheaders
{
# Forward all request headers that are end-to-end. If you want
# to forward a specific set of headers, you'll have to list
# them here.
"*"
}
# Hostname globbing for farm selection (virtual domain addressing)
/virtualhosts
{
# Entries will be compared against the "Host" request header
# and an optional request URL prefix.
#
# Examples:
#
# www.company.com
# intranet.*
# myhost:8888/mysite
"*"
}
# The load will be balanced among these render instances
/renders
{
/rend01
{
# Hostname or IP of the render
/hostname "127.0.0.1"
# Port of the render
/port "4503"
# Connect timeout in milliseconds, 0 to wait indefinitely
# /timeout "0"
}
}
# The filter section defines the requests that should be handled by the dispatcher.
#
# Entries can be either specified using globs, or elements of the request line:
#
# (1) globs will be compared against the entire request line, e.g.:
#
# /0001 { /type "deny" /glob "* /index.html *" }
#
# denies request "GET /index.html HTTP/1.1" but not "GET /index.html?a=b HTTP/1.1".
#
# (2) method/url/query/protocol/path/selectors/extension/suffix will be compared
# against the respective elements of the request line, e.g.:
#
# /0001 { /type "deny" /method "GET" /url "/index.html" }
#
# denies both "GET /index.html" and "GET /index.html?a=b HTTP/1.1".
#
# (3) all elements of the request line can also be specified as regular expressions,
# which are identified by using single quotes, e.g.
#
# /0001 { /type "allow" /method '(GET|HEAD)' }
#
# allows GET or HEAD requests, or:
#
# /0002 { /type "deny" /extension '()' }
#
# denies requests having no extension.
#
# Note: specifying elements of the request line is the preferred method.
#
/filter
{
# Deny everything first and then allow specific entries
/0001 { /type "allow" /glob "*" }
# Open consoles
# /0011 { /type "allow" /url "/admin/*" } # allow servlet engine admin
# /0012 { /type "allow" /url "/crx/*" } # allow content repository
# /0013 { /type "allow" /url "/system/*" } # allow OSGi console
# Allow non-public content directories
# /0021 { /type "allow" /url "/apps/*" } # allow apps access
# /0022 { /type "allow" /url "/bin/*" }
/0023 { /type "allow" /url "/content*" } # disable this rule to allow mapped content only
# /0024 { /type "allow" /url "/libs/*" }
# /0025 { /type "deny" /url "/libs/shindig/proxy*" } # if you enable /libs close access to proxy
# /0026 { /type "allow" /url "/home/*" }
# /0027 { /type "allow" /url "/tmp/*" }
# /0028 { /type "allow" /url "/var/*" }
# Enable extensions in non-public content directories, using a regular expression
/0041
{
/type "allow"
/extension '(css|gif|ico|js|png|swf|jpe?g)'
}
/0042
{
/type "allow"
/extension '(html)'
}
# Enable features
/0062 { /type "allow" /url "/libs/cq/personalization/*" } # enable personalization
# Deny content grabbing, on all accessible pages, using regular expressions
/0081
{
/type "deny"
/selectors '((sys|doc)view|query|[0-9-]+)'
/extension '(json|xml)'
}
# Deny content grabbing for /content
/0082
{
/type "deny"
/path "/content"
/selectors '(feed|rss|pages|languages|blueprint|infinity|tidy)'
/extension '(json|xml|html)'
}
# /0087 { /type "allow" /method "GET" /extension 'json' "*.1.json" } # allow one-level json requests
}
# The cache section regulates what responses will be cached and where.
/cache
{
# The docroot must be equal to the document root of the webserver. The
# dispatcher will store files relative to this directory and subsequent
# requests may be "declined" by the dispatcher, allowing the webserver
# to deliver them just like static files.
/docroot "/var/log/apache2/cache"
# Sets the level upto which files named ".stat" will be created in the
# document root of the webserver. When an activation request for some
# page is received, only files within the same subtree are affected
# by the invalidation.
#/statfileslevel "0"
# Flag indicating whether to cache responses to requests that contain
# authorization information.
#/allowAuthorized "1"
# Flag indicating whether the dispatcher should serve stale content if
# no remote server is available.
#/serveStaleOnError "0"
# The rules section defines what responses should be cached based on
# the requested URL. Please note that only the following requests can
# lead to cacheable responses:
#
# - HTTP method is GET
# - URL has an extension
# - Request has no query string
# - Request has no "Authorization" header (unless allowAuthorized is 1)
/rules
{
/0000
{
# the globbing pattern to be compared against the url
# example: * -> everything
# : /foo/bar.* -> only the /foo/bar documents
# : /foo/bar/* -> all pages below /foo/bar
# : /foo/bar[./]* -> all pages below and /foo/bar itself
# : *.html -> all .html files
/glob "*"
/type "allow"
}
}
# The invalidate section defines the pages that are "invalidated" after
# any activation. Please note that the activated page itself and all
# related documents are flushed on an modification. For example: if the
# page /foo/bar is activated, all /foo/bar.* files are removed from the
# cache.
/invalidate
{
/0000
{
/glob "*"
/type "deny"
}
/0001
{
# Consider all HTML files stale after an activation.
/glob "*.html"
/type "allow"
}
/0002
{
/glob "/etc/segmentation.segment.js"
/type "allow"
}
/0003
{
/glob "*/analytics.sitecatalyst.js"
/type "allow"
}
}
# The allowedClients section restricts the client IP addresses that are
# allowed to issue activation requests.
/allowedClients
{
# Uncomment the following to restrict activation requests to originate
# from "localhost" only.
#
#/0000
# {
# /glob "*"
# /type "deny"
# }
#/0001
# {
# /glob "127.0.0.1"
# /type "allow"
# }
}
# The ignoreUrlParams section contains query string parameter names that
# should be ignored when determining whether some request's output can be
# cached or delivered from cache.
#
# In this example configuration, the "q" parameter will be ignored.
#/ignoreUrlParams
# {
# /0001 { /glob "*" /type "deny" }
# /0002 { /glob "q" /type "allow" }
# }
# Cache response headers next to a cached file. On the first request to
# an uncached resource, all headers matching one of the values found here
# are stored in a separate file, next to the cache file. On subsequent
# requests to the cached resource, the stored headers are added to the
# response.
#
# Note, that file globbing characters are not allowed here.
#
#/headers
# {
# "Cache-Control"
# "Content-Disposition"
# "Content-Type"
# "Expires"
# "Last-Modified"
# "X-Content-Type-Options"
# }
# A grace period defines the number of seconds a stale, auto-invalidated
# resource may still be served from the cache after the last activation
# occurring. Auto-invalidated resources are invalidated by any activation,
# when their path matches the /invalidate section above. This setting
# can be used in a setup, where a batch of activations would otherwise
# repeatedly invalidate the entire cache.
#
#/gracePeriod "2"
# Enable TTL evaluates the response headers from the backend, and if they
# contain a Cache-Control max-age or Expires date, an auxiliary, empty file
# next to the cache file is created, with modification time equal to the
# expiry date. When the cache file is requested past the modification time
# it is automatically re-requested from the backend.
#
# /enableTTL "1"
}
# The statistics sections dictates how the load should be balanced among the
# renders according to the media-type.
/statistics
{
/categories
{
/html
{
/glob "*.html"
}
/others
{
/glob "*"
}
}
}
# Authorization checker: before a page in the cache is delivered, a HEAD
# request is sent to the URL specified in /url with the query string
# '?uri='. If the response status is 200 (OK), the page is returned
# from the cache. Otherwise, the request is forwarded to the render and
# its response returned.
#
# Only pages matching the /filter section below are checked, all other pages
# get delivered unchecked.
#
# All header lines returned from the auth_checker's HEAD request that match
# the /headers section will be returned as well.
#
#/auth_checker
# {
# /url "/bin/permissioncheck.html"
# /filter
# {
# /0000
# {
# /glob "*"
# /type "deny"
# }
# /0001
# {
# /glob "*.html"
# /type "allow"
# }
# }
# /headers
# {
# /0000
# {
# /glob "*"
# /type "deny"
# }
# /0001
# {
# /glob "Set-Cookie:*"
# /type "allow"
# }
# }
# }
}
}
you need to configure the dispatcher mod as well.
to enable caching you have to specifically configure the /cache section
to be more precise you need some "allows" in /cache -> /rules section.
easiest one:
/0001 { /type "allow" /glob "*" }
Read the manual: https://docs.adobe.com/docs/en/dispatcher/disp-config.html
Related
I am running kafka and influxDB on docker.
I have created a digital twin on ditto, that correctly updates when i send a message with mqtt.
I want the data to be sent from ditto to the influxDB but on influxDB once i create the bucket it shows no data whatsoever.
I have followed this guide:https://www.influxdata.com/blog/getting-started-apache-kafka-influxdb/
(i know this is for a python program but the steps should be the same, i just use the telegraf plugin for kafka consumer instead of the one used in the guide).
I have created the connection and the configuration file of telegraf but nothing happens on InfluxDB.
Here is the telegraf.conf
`
[[outputs.influxdb_v2]]
## The URLs of the InfluxDB cluster nodes.
##
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
urls = ["http://localhost:8086"]
## API token for authentication.
token = "$INFLUX_TOKEN"
## Organization is the name of the organization you wish to write to; must exist.
organization = "digital"
## Destination bucket to write into.
bucket = "arduino"
## The value of this tag will be used to determine the bucket. If this
## tag is not set the 'bucket' option is used as the default.
# bucket_tag = ""
## If true, the bucket tag will not be added to the metric.
# exclude_bucket_tag = false
## Timeout for HTTP messages.
# timeout = "5s"
## Additional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## HTTP Proxy override, if unset values the standard proxy environment
## variables are consulted to determine which proxy, if any, should be used.
# http_proxy = "http://corporate.proxy:3128"
## HTTP User-Agent
# user_agent = "telegraf"
## Content-Encoding for write request body, can be set to "gzip" to
## compress body or "identity" to apply no encoding.
# content_encoding = "gzip"
## Enable or disable uint support for writing uints influxdb 2.0.
# influx_uint_support = false
## Optional TLS Config for use on HTTP connections.
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
# Read metrics from Kafka topics
[[inputs.kafka_consumer]]
## Kafka brokers.
brokers = ["localhost:9092"]
## Topics to consume.
topics = ["arduino"]
## When set this tag will be added to all metrics with the topic as the value.
# topic_tag = ""
## Optional Client id
# client_id = "Telegraf"
## Set the minimal supported Kafka version. Setting this enables the use of new
## Kafka features and APIs. Must be 0.10.2.0 or greater.
## ex: version = "1.1.0"
# version = ""
## Optional TLS Config
# enable_tls = false
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## SASL authentication credentials. These settings should typically be used
## with TLS encryption enabled
# sasl_username = "kafka"
# sasl_password = "secret"
## Optional SASL:
## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI
## (defaults to PLAIN)
# sasl_mechanism = ""
## used if sasl_mechanism is GSSAPI (experimental)
# sasl_gssapi_service_name = ""
# ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH
# sasl_gssapi_auth_type = "KRB5_USER_AUTH"
# sasl_gssapi_kerberos_config_path = "/"
# sasl_gssapi_realm = "realm"
# sasl_gssapi_key_tab_path = ""
# sasl_gssapi_disable_pafxfast = false
## used if sasl_mechanism is OAUTHBEARER (experimental)
# sasl_access_token = ""
## SASL protocol version. When connecting to Azure EventHub set to 0.
# sasl_version = 1
# Disable Kafka metadata full fetch
# metadata_full = false
## Name of the consumer group.
# consumer_group = "telegraf_metrics_consumers"
## Compression codec represents the various compression codecs recognized by
## Kafka in messages.
## 0 : None
## 1 : Gzip
## 2 : Snappy
## 3 : LZ4
## 4 : ZSTD
# compression_codec = 0
## Initial offset position; one of "oldest" or "newest".
# offset = "oldest"
## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky".
# balance_strategy = "range"
## Maximum length of a message to consume, in bytes (default 0/unlimited);
## larger messages are dropped
max_message_len = 1000000
## Maximum messages to read from the broker that have not been written by an
## output. For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
##
## For example, if each message from the queue contains 10 metrics and the
## output metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
## Maximum amount of time the consumer should take to process messages. If
## the debug log prints messages from sarama about 'abandoning subscription
## to [topic] because consuming was taking too long', increase this value to
## longer than the time taken by the output plugin(s).
##
## Note that the effective timeout could be between 'max_processing_time' and
## '2 * max_processing_time'.
# max_processing_time = "100ms"
## The default number of message bytes to fetch from the broker in each
## request (default 1MB). This should be larger than the majority of
## your messages, or else the consumer will spend a lot of time
## negotiating sizes and not actually consuming. Similar to the JVM's
## `fetch.message.max.bytes`.
# consumer_fetch_default = "1MB"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "json"
the kafka connection as it is on ditto explorer:
{
"id": "0ab4b527-617f-4f4f-8bac-4ffa4b5a8471",
"name": "Kafka 2.x",
"connectionType": "kafka",
"connectionStatus": "open",
"uri": "tcp://192.168.109.74:9092",
"sources": [
{
"addresses": [
"arduino"
],
"consumerCount": 1,
"qos": 1,
"authorizationContext": [
"nginx:ditto"
],
"enforcement": {
"input": "{{ header:device_id }}",
"filters": [
"{{ entity:id }}"
]
},
"acknowledgementRequests": {
"includes": []
},
"headerMapping": {},
"payloadMapping": [
"Ditto"
],
"replyTarget": {
"address": "theReplyTopic",
"headerMapping": {},
"expectedResponseTypes": [
"response",
"error",
"nack"
],
"enabled": true
}
}
],
"targets": [
{
"address": "topic/key",
"topics": [
"_/_/things/twin/events",
"_/_/things/live/messages"
],
"authorizationContext": [
"nginx:ditto"
],
"headerMapping": {}
}
],
"clientCount": 1,
"failoverEnabled": true,
"validateCertificates": true,
"processorPoolSize": 1,
"specificConfig": {
"saslMechanism": "plain",
"bootstrapServers": "localhost:9092"
},
"tags": []
}
the policy file for ditto:
{
"policyId": "my.test:policy1",
"entries": {
"owner": {
"subjects": {
"nginx:ditto": {
"type": "nginx basic auth user"
}
},
"resources": {
"thing:/": {
"grant": ["READ","WRITE"],
"revoke": []
},
"policy:/": {
"grant": ["READ","WRITE"],
"revoke": []
},
"message:/": {
"grant": ["READ","WRITE"],
"revoke": []
}
}
},
"observer": {
"subjects": {
"ditto:observer": {
"type": "observer user"
}
},
"resources": {
"thing:/features": {
"grant": ["READ"],
"revoke": []
},
"policy:/": {
"grant": ["READ"],
"revoke": []
},
"message:/": {
"grant": ["READ"],
"revoke": []
}
}
}
}
}
the configuration file of telegraf but nothing happens on InfluxDB
When Telegraf is reading data from Kafka is needs to transform that into time-series metrics that InfluxDB can digest. You have correctly selected the JSON parser, but there may be additional configuraiton required, or even the use of the more powerful json_v2 parser to be able to set the tags and fields based on the JSON data.
My suggestion is to use the [[outputs.file]] output to see if anything is even getting passed, probably nothing will show up. Then do the following:
determine what your JSON looks like in kafka
what you want that JSON to look like as time-series data in influxdb
use the json_v2 parser to set the apporporiate tags and fields.
I'm working on an automating a hashicorp vault process, and I need to repeatedly run the vault operator init command because of trial and error testing, I tried uninstalling vault and installing it again, but it seems like that doesn't remove the previous unseal keys + root token it generates, how can I do this?
I read somewhere that I needed to delete my storage "file" path which I already did but its not working (Actually my /opt/vault/data/ directory is empty), here is my vault.hcl file:
# Full configuration options can be found at
https://www.vaultproject.io/docs/configuration
ui = true
#mlock = true
#disable_mlock = true
storage "file" {
path = "/opt/vault/data"
}
#storage "consul" {
# address = "127.0.0.1:8500"
# path = "vault"
#}
# HTTP listener
#listener "tcp" {
# address = "127.0.0.1:8200"
# tls_disable = 1
#}
# HTTPS listener
listener "tcp" {
address = "0.0.0.0:8200"
tls_cert_file = "/opt/vault/tls/tls.crt"
tls_key_file = "/opt/vault/tls/tls.key"
}
# Enterprise license_path
# This will be required for enterprise as of v1.8
#license_path = "/etc/vault.d/vault.hclic"
# Example AWS KMS auto unseal
#seal "awskms" {
# region = "us-east-1"
# kms_key_id = "REPLACE-ME"
#}
# Example HSM auto unseal
#seal "pkcs11" {
# lib = "/usr/vault/lib/libCryptoki2_64.so"
# slot = "0"
# pin = "AAAA-BBBB-CCCC-DDDD"
# key_label = "vault-hsm-key"
# hmac_key_label = "vault-hsm-hmac-key"
#}
Best practice for this type of setup is actually terraform or chef or any other stateful transformer. That way you can bring the environment to an ideal state (terraform apply) and easily removed (terraform destroy).
To reinit vault, you can bring it down, delete the data folder: "/opt/vault/data" in your case. Bring up another instance.
Delete /opt/vault/data
Reboot your computer
(You many also need to delete the file located at ~/.vault-token)
If you want to do the testing only why don't you use the vault in dev mode?
We use symfony with the platform api in docker and we have a problem with varnish.
For local development varnish works with the default.vcl configuration file (https://github.com/api-platform/api-platform/blob/master/api/docker/varnish/conf/default.vcl). When uploading to the server, varnish provides the error "Error (null) Backend fetch failed"
When disabling varnish and redirecting to nginx with php-fpm, api-platform works properly.
I increased the http_resp_hdr_len parameters to 131072 bytes (128K) and http_rasp_size to 10485760 bytes (10Mb), but it doesn't help, and the error remains.
docker command to start varnish:
CMD ["varnishd", "-F", "-f", "/etc/varnish/default.vcl", "-p", "http_resp_hdr_len=131072", "-p", "http_resp_size=10485760"]
Also the parameter - .first_byte_timeout = 600s; was added to default.vcl varnish
default.vcl
vcl 4.0;
import std;
backend default {
.host = "api";
.port = "80";
.first_byte_timeout = 600s;
# Health check
.probe = {
.url = "/";
.timeout = 5s;
.interval = 50s;
.window = 5;
.threshold = 3;
}
}
# Hosts allowed to send BAN requests
acl invalidators {
"localhost";
"php";
# local Kubernetes network
"10.0.0.0"/8;
"172.16.0.0"/12;
"192.168.0.0"/16;
}
sub vcl_recv {
if (req.restarts > 0) {
set req.hash_always_miss = true;
}
# Remove the "Forwarded" HTTP header if exists (security)
unset req.http.forwarded;
# To allow API Platform to ban by cache tags
if (req.method == "BAN") {
if (client.ip !~ invalidators) {
return (synth(405, "Not allowed"));
}
if (req.http.ApiPlatform-Ban-Regex) {
ban("obj.http.Cache-Tags ~ " + req.http.ApiPlatform-Ban-Regex);
return (synth(200, "Ban added"));
}
return (synth(400, "ApiPlatform-Ban-Regex HTTP header must be set."));
}
# For health checks
if (req.method == "GET" && req.url == "/healthz") {
return (synth(200, "OK"));
}
}
sub vcl_hit {
if (obj.ttl >= 0s) {
# A pure unadulterated hit, deliver it
return (deliver);
}
if (std.healthy(req.backend_hint)) {
# The backend is healthy
# Fetch the object from the backend
return (restart);
}
# No fresh object and the backend is not healthy
if (obj.ttl + obj.grace > 0s) {
# Deliver graced object
# Automatically triggers a background fetch
return (deliver);
}
# No valid object to deliver
# No healthy backend to handle request
# Return error
return (synth(503, "API is down"));
}
sub vcl_deliver {
# Don't send cache tags related headers to the client
unset resp.http.url;
# Comment the following line to send the "Cache-Tags" header to the client (e.g. to use CloudFlare cache tags)
unset resp.http.Cache-Tags;
}
sub vcl_backend_response {
# Ban lurker friendly header
set beresp.http.url = bereq.url;
# Add a grace in case the backend is down
set beresp.grace = 1h;
}
Please advise what might be a problem with varnish and how to make it work correctly?
The problem was solved as follows - I took the parameters for running varnish from thomasmoreaumaster (https://github.com/api-platform/api-platform/issues/1367).
CMD ["varnishd", "-F", "-f", "/etc/varnish/default.vcl", "-p", "http_resp_hdr_len=128k", "-p", "http_resp_size=128k", "-p", "http_req_hdr_len=64k", "-p", "workspace_backend=256k", "-p", "workspace_client=256k", "-p", "http_max_hdr=256"]
Also in the proxying nginx api-platform removed directory binding with ssl and formed an nginx-proxy with ssl enabled via volume.
Now varnish works well.
Thanks for your help and support.
Problem: Recently We have changed IP address of the stage server. We are using Capistrano for deploying rails application. So after changing server IP address when we run a command: cap develop(branch name) deploy, it is not working. Please find below config files
deploy.rb
# config valid for current version and patch releases of Capistrano
lock "~> 3.10.0"
set :application, "app_name"
set :repo_url, "git#bitbucket.org:repo.git"
set :branch, :develop
set :deploy_to, '/home/deploy/app_name'
set :pty, true
set :linked_files, %w{config/mongoid.yml config/application.yml}
set :linked_dirs, %w{ bin log tmp/pids tmp/cache tmp/sockets vendor/bundle public/system public/uploads}
set :keep_releases, 5
set :rvm_type, :user
set :rvm_ruby_version, 'ruby-2.3.1' # Edit this if you are using MRI Ruby
set :bundle_binstubs, nil
set :puma_rackup, -> { File.join(current_path, 'config.ru') }
set :puma_state, "#{shared_path}/tmp/pids/puma.state"
set :puma_pid, "#{shared_path}/tmp/pids/puma.pid"
set :puma_bind, "unix://#{shared_path}/tmp/sockets/puma.sock" #accept array for multi-bind
set :puma_conf, "#{shared_path}/puma.rb"
set :puma_access_log, "#{shared_path}/log/puma_error.log"
set :puma_error_log, "#{shared_path}/log/puma_access.log"
set :puma_role, :app
set :puma_env, fetch(:rack_env, fetch(:rails_env, 'production'))
set :puma_threads, [0, 8]
set :puma_workers, 0
set :puma_worker_timeout, nil
set :puma_init_active_record, false
set :puma_preload_app, false
# Default branch is :master
# ask :branch, `git rev-parse --abbrev-ref HEAD`.chomp
# Default deploy_to directory is /var/www/my_app_name
# set :deploy_to, "/var/www/my_app_name"
# Default value for :format is :airbrussh.
# set :format, :airbrussh
# You can configure the Airbrussh format using :format_options.
# These are the defaults.
# set :format_options, command_output: true, log_file: "log/capistrano.log", color: :auto, truncate: :auto
# Default value for :pty is false
# set :pty, true
# Default value for :linked_files is []
# append :linked_files, "config/database.yml", "config/secrets.yml"
# Default value for linked_dirs is []
# append :linked_dirs, "log", "tmp/pids", "tmp/cache", "tmp/sockets", "public/system"
# Default value for default_env is {}
# set :default_env, { path: "/opt/ruby/bin:$PATH" }
# Default value for local_user is ENV['USER']
# set :local_user, -> { `git config user.name`.chomp }
# Default value for keep_releases is 5
# set :keep_releases, 5
# Uncomment the following to require manually verifying the host key before first deploy.
# set :ssh_options, verify_host_key: :secure
namespace :deploy do
after :restart, :clear_cache do
on roles(:web), in: :groups, limit: 3, wait: 10 do
# Here we can do anything such as:
# within release_path do
# execute :rake, 'cache:clear'
# end
end
end
end
config/deploy/develop.rb
# server-based syntax
# ======================
# Defines a single server with a list of roles and multiple properties.
# You can define all roles on a single server, or split them:
# server "example.com", user: "deploy", roles: %w{app db web}, my_property: :my_value
# server "example.com", user: "deploy", roles: %w{app web}, other_property: :other_value
# server "db.example.com", user: "deploy", roles: %w{db}
server '<new_ip>', user: 'deploy', roles: %w{web app db}
# role-based syntax
# ==================
# Defines a role with one or multiple servers. The primary server in each
# group is considered to be the first unless any hosts have the primary
# property set. Specify the username and a domain or IP for the server.
# Don't use `:all`, it's a meta role.
# role :app, %w{deploy#example.com}, my_property: :my_value
# role :web, %w{user1#primary.com user2#additional.com}, other_property: :other_value
# role :db, %w{deploy#example.com}
# Configuration
# =============
# You can set any configuration variable like in config/deploy.rb
# These variables are then only loaded and set in this stage.
# For available Capistrano configuration variables see the documentation page.
# http://capistranorb.com/documentation/getting-started/configuration/
# Feel free to add new variables to customise your setup.
# Custom SSH Options
# ==================
# You may pass any option but keep in mind that net/ssh understands a
# limited set of options, consult the Net::SSH documentation.
# http://net-ssh.github.io/net-ssh/classes/Net/SSH.html#method-c-start
#
# Global options
# --------------
# set :ssh_options, {
# keys: %w(/home/rlisowski/.ssh/id_rsa),
# forward_agent: false,
# auth_methods: %w(password)
# }
#
# The server-based syntax can be used to override options:
# ------------------------------------
# server "example.com",
# user: "user_name",
# roles: %w{web app},
# ssh_options: {
# user: "user_name", # overrides user setting above
# keys: %w(/home/user_name/.ssh/id_rsa),
# forward_agent: false,
# auth_methods: %w(publickey password)
# # password: "please use keys"
# }
Not sure what we are missing, Any help would be appreciated
So, i just installed gitlab on my server. I'm running the bundled nginx on port 256, and i've set up https using let's ecnrypt. There's still a small problem, you can access it through a normal http address which will throw an nginx error since my external address is https://example.com:256. So i set the redirect_http_to_https setting and now all the requests just timeout... Any ideas?
My gitlab.rb config:
## Url on which GitLab will be reachable.
## For more details on configuring external_url see:
## https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/doc/settings/config$
external_url 'https://example.com:256'
#####################
# GitLab Web server #
#####################
## see: https://gitlab.com/gitlab-org/omnibus-gitlab/tree/master/doc/settings/nginx.md#using-a-non-bundled-web-server
## When bundled nginx is disabled we need to add the external webserver user to the GitLab webserver group.
# web_server['external_users'] = []
# web_server['username'] = 'gitlab-www'
# web_server['group'] = 'gitlab-www'
# web_server['uid'] = nil
# web_server['gid'] = nil
# web_server['shell'] = '/bin/false'
# web_server['home'] = '/var/opt/gitlab/nginx'
################
# GitLab Nginx #
################
## see: https://gitlab.com/gitlab-org/omnibus-gitlab/tree/master/doc/settings/nginx.md
nginx['enable'] = true
# nginx['client_max_body_size'] = '250m'
nginx['redirect_http_to_https'] = true
# nginx['redirect_http_to_https_port'] = 8080
# nginx['ssl_client_certificate'] = "/etc/gitlab/ssl/ca.crt" # Most root CA's are included by default
# nginx['ssl_verify_client'] = "off" # enable/disable 2-way SSL client authentication
# nginx['ssl_verify_depth'] = "1" # if ssl_verify_client on, verification depth in the client certificates chain
nginx['ssl_certificate'] = "/etc/letsencrypt/live/example.com-0001/fullchain.pem"
nginx['ssl_certificate_key'] = "/etc/letsencrypt/live/example.com-0001/privkey.pem"
# nginx['ssl_ciphers'] = "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256"
# nginx['ssl_prefer_server_ciphers'] = "on"
# nginx['ssl_protocols'] = "TLSv1 TLSv1.1 TLSv1.2" # recommended by
https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html & https://cipherli.st/
# nginx['ssl_session_cache'] = "builtin:1000 shared:SSL:10m" # recommended in http://nginx.org/en/docs/http/ngx_http_ssl_module.html
# nginx['ssl_session_timeout'] = "5m" # default according to http://nginx.org/en/docs/http/ngx_http_ssl_module.html
# nginx['ssl_dhparam'] = nil # Path to dhparams.pem, eg. /etc/gitlab/ssl/dhparams.pem
# nginx['listen_addresses'] = ['*']
# nginx['listen_port'] = nil # override only if you use a reverse proxy: https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/doc/settings/nginx.md#setting-the-nginx-listen-port
# nginx['listen_https'] = nil # override only if your reverse proxy internally communicates over HTTP: https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/doc/settings/nginx.md#supporting-proxied-ssl
nginx['custom_gitlab_server_config'] = "location ^~ /.well-known { root /var/www/letsencrypt; }"
# nginx['custom_nginx_config'] = "include /etc/nginx/conf.d/example.conf;"
# nginx['proxy_read_timeout'] = 3600
# nginx['proxy_connect_timeout'] = 300
# nginx['proxy_set_headers'] = {
# "Host" => "$http_host",
# "X-Real-IP" => "$remote_addr",
# "X-Forwarded-For" => "$proxy_add_x_forwarded_for",
# "X-Forwarded-Proto" => "https",
# "X-Forwarded-Ssl" => "on"
# }
# nginx['proxy_cache_path'] = 'proxy_cache keys_zone=gitlab:10m max_size=1g levels=1:2'
# nginx['proxy_cache'] = 'gitlab'
# nginx['http2_enabled'] = true
# nginx['real_ip_trusted_addresses'] = []
# nginx['real_ip_header'] = nil
# nginx['real_ip_recursive'] = nil
Uncomment:
# nginx['redirect_http_to_https_port'] = 8080
Make it port 80 like the following:
nginx['redirect_http_to_https_port'] = 80