Problem: Recently We have changed IP address of the stage server. We are using Capistrano for deploying rails application. So after changing server IP address when we run a command: cap develop(branch name) deploy, it is not working. Please find below config files
deploy.rb
# config valid for current version and patch releases of Capistrano
lock "~> 3.10.0"
set :application, "app_name"
set :repo_url, "git#bitbucket.org:repo.git"
set :branch, :develop
set :deploy_to, '/home/deploy/app_name'
set :pty, true
set :linked_files, %w{config/mongoid.yml config/application.yml}
set :linked_dirs, %w{ bin log tmp/pids tmp/cache tmp/sockets vendor/bundle public/system public/uploads}
set :keep_releases, 5
set :rvm_type, :user
set :rvm_ruby_version, 'ruby-2.3.1' # Edit this if you are using MRI Ruby
set :bundle_binstubs, nil
set :puma_rackup, -> { File.join(current_path, 'config.ru') }
set :puma_state, "#{shared_path}/tmp/pids/puma.state"
set :puma_pid, "#{shared_path}/tmp/pids/puma.pid"
set :puma_bind, "unix://#{shared_path}/tmp/sockets/puma.sock" #accept array for multi-bind
set :puma_conf, "#{shared_path}/puma.rb"
set :puma_access_log, "#{shared_path}/log/puma_error.log"
set :puma_error_log, "#{shared_path}/log/puma_access.log"
set :puma_role, :app
set :puma_env, fetch(:rack_env, fetch(:rails_env, 'production'))
set :puma_threads, [0, 8]
set :puma_workers, 0
set :puma_worker_timeout, nil
set :puma_init_active_record, false
set :puma_preload_app, false
# Default branch is :master
# ask :branch, `git rev-parse --abbrev-ref HEAD`.chomp
# Default deploy_to directory is /var/www/my_app_name
# set :deploy_to, "/var/www/my_app_name"
# Default value for :format is :airbrussh.
# set :format, :airbrussh
# You can configure the Airbrussh format using :format_options.
# These are the defaults.
# set :format_options, command_output: true, log_file: "log/capistrano.log", color: :auto, truncate: :auto
# Default value for :pty is false
# set :pty, true
# Default value for :linked_files is []
# append :linked_files, "config/database.yml", "config/secrets.yml"
# Default value for linked_dirs is []
# append :linked_dirs, "log", "tmp/pids", "tmp/cache", "tmp/sockets", "public/system"
# Default value for default_env is {}
# set :default_env, { path: "/opt/ruby/bin:$PATH" }
# Default value for local_user is ENV['USER']
# set :local_user, -> { `git config user.name`.chomp }
# Default value for keep_releases is 5
# set :keep_releases, 5
# Uncomment the following to require manually verifying the host key before first deploy.
# set :ssh_options, verify_host_key: :secure
namespace :deploy do
after :restart, :clear_cache do
on roles(:web), in: :groups, limit: 3, wait: 10 do
# Here we can do anything such as:
# within release_path do
# execute :rake, 'cache:clear'
# end
end
end
end
config/deploy/develop.rb
# server-based syntax
# ======================
# Defines a single server with a list of roles and multiple properties.
# You can define all roles on a single server, or split them:
# server "example.com", user: "deploy", roles: %w{app db web}, my_property: :my_value
# server "example.com", user: "deploy", roles: %w{app web}, other_property: :other_value
# server "db.example.com", user: "deploy", roles: %w{db}
server '<new_ip>', user: 'deploy', roles: %w{web app db}
# role-based syntax
# ==================
# Defines a role with one or multiple servers. The primary server in each
# group is considered to be the first unless any hosts have the primary
# property set. Specify the username and a domain or IP for the server.
# Don't use `:all`, it's a meta role.
# role :app, %w{deploy#example.com}, my_property: :my_value
# role :web, %w{user1#primary.com user2#additional.com}, other_property: :other_value
# role :db, %w{deploy#example.com}
# Configuration
# =============
# You can set any configuration variable like in config/deploy.rb
# These variables are then only loaded and set in this stage.
# For available Capistrano configuration variables see the documentation page.
# http://capistranorb.com/documentation/getting-started/configuration/
# Feel free to add new variables to customise your setup.
# Custom SSH Options
# ==================
# You may pass any option but keep in mind that net/ssh understands a
# limited set of options, consult the Net::SSH documentation.
# http://net-ssh.github.io/net-ssh/classes/Net/SSH.html#method-c-start
#
# Global options
# --------------
# set :ssh_options, {
# keys: %w(/home/rlisowski/.ssh/id_rsa),
# forward_agent: false,
# auth_methods: %w(password)
# }
#
# The server-based syntax can be used to override options:
# ------------------------------------
# server "example.com",
# user: "user_name",
# roles: %w{web app},
# ssh_options: {
# user: "user_name", # overrides user setting above
# keys: %w(/home/user_name/.ssh/id_rsa),
# forward_agent: false,
# auth_methods: %w(publickey password)
# # password: "please use keys"
# }
Not sure what we are missing, Any help would be appreciated
Related
I'm trying set ElastAlert for Opensearch 2.8.
I Write config
# This is the folder that contains the rule yaml files
# Any .yaml file will be loaded as a rule
rules_folder: /etc/elastalert/rules
# How often ElastAlert will query Elasticsearch
# The unit can be anything from weeks to seconds
run_every:
minutes: 1
# ElastAlert will buffer results from the most recent
# period of time, in case some log sources are not in real time
buffer_time:
minutes: 15
# The Elasticsearch hostname for metadata writeback
# Note that every rule can have its own Elasticsearch host
es_host: localhost
# The Elasticsearch port
es_port: 9200
# The AWS region to use. Set this when using AWS-managed elasticsearch
#aws_region: us-east-1
# The AWS profile to use. Use this if you are using an aws-cli profile.
# See http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html
# for details
#profile: test
# Optional URL prefix for Elasticsearch
#es_url_prefix: elasticsearch
# Connect with TLS to Elasticsearch
use_ssl: True
# GET request with body is the default option for Elasticsearch.
# If it fails for some reason, you can pass 'GET', 'POST' or 'source'.
# See http://elasticsearch-py.readthedocs.io/en/master/connection.html?highlight=send_get_body_as#transport
# for details
# es_send_get_body_as: GET
# Option basic-auth username and password for Elasticsearch
es_username: admin
es_password: password
# Use SSL authentication with client certificates client_cert must be
# a pem file containing both cert and key for client
verify_certs: False
#ca_certs: /path/to/cacert.pem
#client_cert: /path/to/client_cert.pem
#client_key: /path/to/client_key.key
# The index on es_host which is used for metadata storage
# This can be a unmapped index, but it is recommended that you run
# elastalert-create-index to set a mapping
writeback_index: elastalert_status
writeback_alias: elastalert_alerts
# If an alert fails for some reason, ElastAlert will retry
# sending the alert until this time period has elapsed
alert_time_limit:
days: 2
... And rule file
# Alert when the rate of events exceeds a threshold
.
# (Optional)
# Elasticsearch host
es_host: localhost
.
# (Optional)
# Elasticsearch port
es_port: 9200
.
# (OptionaL) Connect with SSL to Elasticsearch
use_ssl: True
ssl_show_warn: False
verify_certs: False
.
# (Optional) basic-auth username and password for Elasticsearch
# es_username: admin
# es_password: ytnhfvgkby
.
# (Required)
# Rule name, must be unique
name: Loopdetect
.
# (Required)
# Type of alert.
# the frequency rule type alerts when num_events events occur with timeframe time
type: any
.
# (Required)
# Index to search, wildcard supported
index: syslog-20221104
.
# (Required, frequency specific)
# Alert when this many documents matching the query occur within a timeframe
num_events: 1
.
# (Required, frequency specific)
# num_events must occur within this amount of time to trigger an alert
timeframe:
hours: 24
.
# (Required)
# A list of Elasticsearch filters used for find events
# These filters are joined with AND and nested in a filtered query
# For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html
# filter:
# - term:
# process.name: "JUSTME"
filter:
- query:
query_string:
query: "message: *loop*"
# (Required)
# The alert is use when a match is found
alert:
- "email"
.
# (required, email specific)
# a list of email addresses to send alerts to
email:
- "myemail"
But when I try check this rule.
I get error:
elastalert-test-rule rules/loopdetect_alert.yaml
INFO:elastalert:Note: In debug mode, alerts will be logged to console but NOT actually sent.
To send them but remain verbose, use --verbose instead.
WARNING:elasticsearch:POST https://localhost:9200/syslog-20221104/_search?ignore_unavailable=true&size=1 [status:400 request:0.048s]
Error running your filter:
RequestError(400, 'search_phase_execution_exception', {'error': {'root_cause': [{'type': 'query_shard_exception', 'reason': 'No mapping found for [#timestamp] in order to sort on', 'index': 'syslog-20221104', 'index_uuid': 'BG6MQmmYRUyLBY3tEFykEQ'}], 'type': 'search_phase_execution_exception', 'reason': 'all shards failed', 'phase': 'query', 'grouped': True, 'failed_shards': [{'shard': 0, 'index': 'syslog-20221104', 'node': '5spTsU7-QienT8Jn064MMA', 'reason': {'type': 'query_shard_exception', 'reason': 'No mapping found for [#timestamp] in order to sort on', 'index': 'syslog-20221104', 'index_uuid': 'BG6MQmmYRUyLBY3tEFykEQ'}}]}, 'status': 400})
INFO:elastalert:Note: In debug mode, alerts will be logged to console but NOT actually sent.
To send them but remain verbose, use --verbose instead.
INFO:elastalert:1 rules loaded
INFO:apscheduler.scheduler:Adding job tentatively -- it will be properly scheduled when the scheduler starts
WARNING:elasticsearch:POST https://localhost:9200/syslog-20221104/_search?_source_includes=%40timestamp%2C%2A&ignore_unavailable=true&scroll=30s&size=10000 [status:400 request:0.039s]
ERROR:elastalert:Error running query: RequestError(400, 'search_phase_execution_exception', 'No mapping found for [#timestamp] in order to sort on')
{"writeback": {"elastalert_error": {"message": "Error running query: RequestError(400, 'search_phase_execution_exception', 'No mapping found for [#timestamp] in order to sort on')", "traceback": ["Traceback (most recent call last):", " File \"/usr/local/lib/python3.11/dist-packages/elastalert2-2.8.0-py3.11.egg/elastalert/elastalert.py\", line 370, in get_hits", " res = self.thread_data.current_es.search(", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", " File \"/usr/local/lib/python3.11/dist-packages/elasticsearch/client/utils.py\", line 152, in _wrapped", " return func(*args, params=params, headers=headers, **kwargs)", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", " File \"/usr/local/lib/python3.11/dist-packages/elasticsearch/client/__init__.py\", line 1658, in search", " return self.transport.perform_request(", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", " File \"/usr/local/lib/python3.11/dist-packages/elasticsearch/transport.py\", line 392, in perform_request", " raise e", " File \"/usr/local/lib/python3.11/dist-packages/elasticsearch/transport.py\", line 358, in perform_request", " status, headers_response, data = connection.perform_request(", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", " File \"/usr/local/lib/python3.11/dist-packages/elasticsearch/connection/http_requests.py\", line 199, in perform_request", " self._raise_error(response.status_code, raw_data)", " File \"/usr/local/lib/python3.11/dist-packages/elasticsearch/connection/base.py\", line 315, in _raise_error", " raise HTTP_EXCEPTIONS.get(status_code, TransportError)(", "elasticsearch.exceptions.RequestError: RequestError(400, 'search_phase_execution_exception', 'No mapping found for [#timestamp] in order to sort on')"], "data": {"rule": "Loopdetect", "query": {"query": {"bool": {"filter": {"bool": {"must": [{"range": {"#timestamp": {"gt": "2022-11-03T12:12:39.618168Z", "lte": "2022-11-03T12:27:39.618168Z"}}}, {"query_string": {"query": "message: *loop*"}}]}}}}, "sort": [{"#timestamp": {"order": "asc"}}]}}}}}
But if I try get data by CURL, it's ok
curl -X GET 'https://localhost:9200/syslog-20221104/_search?ignore_unavailable=true&size=1' -u 'admin:password' --insecure
{"took":4,"timed_out":false,"_shards":{"total":1,"successful":1,"skipped":0,"failed":0},"hits":{"total":{"value":10000,"relation":"gte"},"max_score":1.0,"hits":[{"_index":"syslog-20221104","_id":"_bSKQYQB_cpiH2g_hgvj","_score":1.0,"_source":{"host":"10.53.0.35","hostname":"10.53.0.35","message":"Port 2 link up, 100Mbps FULL duplex","source_ip":"91.195.230.4","source_type":"syslog","timestamp":"2022-11-04T07:28:27Z"}}]}}
Help me please understand, what I do wrong.
Thanks.
I add timestamp_field: timestamp.
And all work fine!
I'm working on an automating a hashicorp vault process, and I need to repeatedly run the vault operator init command because of trial and error testing, I tried uninstalling vault and installing it again, but it seems like that doesn't remove the previous unseal keys + root token it generates, how can I do this?
I read somewhere that I needed to delete my storage "file" path which I already did but its not working (Actually my /opt/vault/data/ directory is empty), here is my vault.hcl file:
# Full configuration options can be found at
https://www.vaultproject.io/docs/configuration
ui = true
#mlock = true
#disable_mlock = true
storage "file" {
path = "/opt/vault/data"
}
#storage "consul" {
# address = "127.0.0.1:8500"
# path = "vault"
#}
# HTTP listener
#listener "tcp" {
# address = "127.0.0.1:8200"
# tls_disable = 1
#}
# HTTPS listener
listener "tcp" {
address = "0.0.0.0:8200"
tls_cert_file = "/opt/vault/tls/tls.crt"
tls_key_file = "/opt/vault/tls/tls.key"
}
# Enterprise license_path
# This will be required for enterprise as of v1.8
#license_path = "/etc/vault.d/vault.hclic"
# Example AWS KMS auto unseal
#seal "awskms" {
# region = "us-east-1"
# kms_key_id = "REPLACE-ME"
#}
# Example HSM auto unseal
#seal "pkcs11" {
# lib = "/usr/vault/lib/libCryptoki2_64.so"
# slot = "0"
# pin = "AAAA-BBBB-CCCC-DDDD"
# key_label = "vault-hsm-key"
# hmac_key_label = "vault-hsm-hmac-key"
#}
Best practice for this type of setup is actually terraform or chef or any other stateful transformer. That way you can bring the environment to an ideal state (terraform apply) and easily removed (terraform destroy).
To reinit vault, you can bring it down, delete the data folder: "/opt/vault/data" in your case. Bring up another instance.
Delete /opt/vault/data
Reboot your computer
(You many also need to delete the file located at ~/.vault-token)
If you want to do the testing only why don't you use the vault in dev mode?
So, i just installed gitlab on my server. I'm running the bundled nginx on port 256, and i've set up https using let's ecnrypt. There's still a small problem, you can access it through a normal http address which will throw an nginx error since my external address is https://example.com:256. So i set the redirect_http_to_https setting and now all the requests just timeout... Any ideas?
My gitlab.rb config:
## Url on which GitLab will be reachable.
## For more details on configuring external_url see:
## https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/doc/settings/config$
external_url 'https://example.com:256'
#####################
# GitLab Web server #
#####################
## see: https://gitlab.com/gitlab-org/omnibus-gitlab/tree/master/doc/settings/nginx.md#using-a-non-bundled-web-server
## When bundled nginx is disabled we need to add the external webserver user to the GitLab webserver group.
# web_server['external_users'] = []
# web_server['username'] = 'gitlab-www'
# web_server['group'] = 'gitlab-www'
# web_server['uid'] = nil
# web_server['gid'] = nil
# web_server['shell'] = '/bin/false'
# web_server['home'] = '/var/opt/gitlab/nginx'
################
# GitLab Nginx #
################
## see: https://gitlab.com/gitlab-org/omnibus-gitlab/tree/master/doc/settings/nginx.md
nginx['enable'] = true
# nginx['client_max_body_size'] = '250m'
nginx['redirect_http_to_https'] = true
# nginx['redirect_http_to_https_port'] = 8080
# nginx['ssl_client_certificate'] = "/etc/gitlab/ssl/ca.crt" # Most root CA's are included by default
# nginx['ssl_verify_client'] = "off" # enable/disable 2-way SSL client authentication
# nginx['ssl_verify_depth'] = "1" # if ssl_verify_client on, verification depth in the client certificates chain
nginx['ssl_certificate'] = "/etc/letsencrypt/live/example.com-0001/fullchain.pem"
nginx['ssl_certificate_key'] = "/etc/letsencrypt/live/example.com-0001/privkey.pem"
# nginx['ssl_ciphers'] = "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256"
# nginx['ssl_prefer_server_ciphers'] = "on"
# nginx['ssl_protocols'] = "TLSv1 TLSv1.1 TLSv1.2" # recommended by
https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html & https://cipherli.st/
# nginx['ssl_session_cache'] = "builtin:1000 shared:SSL:10m" # recommended in http://nginx.org/en/docs/http/ngx_http_ssl_module.html
# nginx['ssl_session_timeout'] = "5m" # default according to http://nginx.org/en/docs/http/ngx_http_ssl_module.html
# nginx['ssl_dhparam'] = nil # Path to dhparams.pem, eg. /etc/gitlab/ssl/dhparams.pem
# nginx['listen_addresses'] = ['*']
# nginx['listen_port'] = nil # override only if you use a reverse proxy: https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/doc/settings/nginx.md#setting-the-nginx-listen-port
# nginx['listen_https'] = nil # override only if your reverse proxy internally communicates over HTTP: https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/doc/settings/nginx.md#supporting-proxied-ssl
nginx['custom_gitlab_server_config'] = "location ^~ /.well-known { root /var/www/letsencrypt; }"
# nginx['custom_nginx_config'] = "include /etc/nginx/conf.d/example.conf;"
# nginx['proxy_read_timeout'] = 3600
# nginx['proxy_connect_timeout'] = 300
# nginx['proxy_set_headers'] = {
# "Host" => "$http_host",
# "X-Real-IP" => "$remote_addr",
# "X-Forwarded-For" => "$proxy_add_x_forwarded_for",
# "X-Forwarded-Proto" => "https",
# "X-Forwarded-Ssl" => "on"
# }
# nginx['proxy_cache_path'] = 'proxy_cache keys_zone=gitlab:10m max_size=1g levels=1:2'
# nginx['proxy_cache'] = 'gitlab'
# nginx['http2_enabled'] = true
# nginx['real_ip_trusted_addresses'] = []
# nginx['real_ip_header'] = nil
# nginx['real_ip_recursive'] = nil
Uncomment:
# nginx['redirect_http_to_https_port'] = 8080
Make it port 80 like the following:
nginx['redirect_http_to_https_port'] = 80
So I'm trying to use Compass and Asset Pack in a Sinatra app. My setup is the following:
require 'sinatra'
require 'bundler'
require 'sinatra/assetpack'
require 'sinatra/support'
require 'compass'
class Application < Sinatra::Base
register Sinatra::AssetPack
register Sinatra::CompassSupport
register Sinatra::SimpleNavigation
set :static, true
set :root, File.dirname(__FILE__)
set :public_folder, File.dirname(__FILE__) + '/public'
set :scss, Compass.sass_engine_options
set :scss, { :load_paths => [ "#{Application.root}/assets/css" ] }
Compass.configuration do |config|
config.sass_dir = "assets/css"
config.project_path = root
config.images_dir = "assets/images"
config.http_generated_images_path = "/images"
config.fonts_dir = "assets/fonts"
end
assets {
serve '/fonts', from: 'assets/fonts'
serve '/js', from: 'assets/js' # Default
serve '/css', from: 'assets/css' # Default
serve '/images', from: 'assets/images' # Default
# The second parameter defines where the compressed version will be served.
# (Note: that parameter is optional, AssetPack will figure it out.)
js :app, '/js/app.js', [
'/js/*',
]
css :application, '/css/application.css', [
'/css/style.css'
]
js_compression :jsmin # :jsmin | :yui | :closure | :uglify
css_compression :sass # :simple | :sass | :yui | :sqwish
}
# Routes and things here
end
With the line set :scss, Compass.sass_engine_options it seems to enable Compass, but this over rides the next line and I then can't use SCSS partials because it doesn't know the load path. Having them on the same line has the same effect.
TL:DR: I can't get Compass and SCSS Partials to work together with the Asset Pack
Try merging the :scss options? It looks like you're overriding with your second set call:
set :scss, Compass.sass_engine_options.merge({ :load_paths => [ "#{Application.root}/assets/css" ] })
Also, the Compass configuration and set calls should be inside a configure block.
My App seems to be deploying correctly but I'm getting this error:
* executing "cd /home/deploy/tomahawk/releases/20120208222225 && bundle exec rake RAILS_ENV=production RAILS_GROUPS=assets assets:precompile"
servers: ["ip_address"]
[ip_address] executing command
*** [err :: ip_address] /opt/ruby/bin/ruby /opt/ruby/bin/rake assets:precompile:nondigest RAILS_ENV=production RAILS_GROUPS=assets
I've tried solutions here for trying to compile assets: http://lassebunk.dk/2011/09/03/getting-your-assets-to-work-when-upgrading-to-rails-3-1/
And Here: http://railsmonkey.net/2011/08/deploying-rails-3-1-applications-with-capistrano/
And here: http://dev.af83.com/2011/09/30/capistrano-rails-3-1-assets-can-be-tricky.html
Here is my deploy.rb :
require "bundler/capistrano"
load 'deploy/assets'
set :default_environment, {
'PATH' => "/opt/ruby/bin/:$PATH"
}
set :application, "tomahawk"
set :repository, "repo_goes_here"
set :deploy_to, "/home/deploy/#{application}"
set :rails_env, 'production'
set :branch, "master"
set :scm, :git
set :user, "deploy"
set :runner, "deploy"
set :use_sudo, true
role :web, "my_ip"
role :app, "my_ip"
role :db, "my_ip", :primary => true
set :normalize_asset_timestamps, false
after "deploy", "deploy:cleanup"
namespace :deploy do
desc "Restarting mod_rails with restart.txt"
task :restart, :roles => :app, :except => { :no_release => true } do
run "touch #{current_path}/tmp/restart.txt"
end
[:start, :stop].each do |t|
desc "#{t} task is a no-op with mod_rails"
task t, :roles => :domain do ; end
end
end
task :after_update_code do
run "ln -nfs #{deploy_to}/shared/config/database.yml #{release_path}/config/database.yml"
end
first don't forget to add the gem below
group :production do
gem 'therubyracer'
gem 'execjs'
end
then in your cap file just add this line in your after_update_code
run "cd #{release_path}; rake assets:precompile RAILS_ENV=production "
this worked fine for me ;)
cheers,
Gregory HORION
I have the same problem. I have added this to my deploy.rb (for adding option '--trace'):
namespace :deploy do
namespace :assets do
task :precompile, :roles => :web, :except => { :no_release => true } do
run "cd #{current_path} && #{rake} RAILS_ENV=#{rails_env} RAILS_GROUPS=assets assets:precompile --trace"
end
end
end
And error seems to be just notice :
*** [err :: my-server] ** Invoke assets:precompile (first_time)
...
I later noticed that capistrano wasn't able to delete old releases, I got an error:
*** [err :: ip_address] sudo: no tty present and no askpass program specified
I found this link regarding this error:
http://www.mail-archive.com/capistrano#googlegroups.com/msg07323.html
I had to add this line to my deploy file:
default_run_options[:pty] = true
This also solved the weird error I was getting above.
The official explanation, which I don't understand :)
No default PTY. Prior to 2.1, Capistrano would request a pseudo-tty for each command that it executed. This had the side-effect of causing the profile scripts for the user to not be loaded. Well, no more! As of 2.1, Capistrano no longer requests a pty on each command, which means your .profile (or .bashrc, or whatever) will be properly loaded on each command! Note, however, that some have reported on some systems, when a pty is not allocated, some commands will go into non-interactive mode automatically. If you’re not seeing commands prompt like they used to, like svn or passwd, you can return to the previous behavior by adding the following line to your capfile: default_run_options[:pty] = true
Here's what worked for me:
1) Add rvm-capistrano to your Gemfile
2) in confg/deploy, add the lines:
require 'rvm/capistrano'
set :rvm_ruby_string, '1.9.2' # Set to your version number
3) You may also need to set :rvm_type and :rvm_bin_path. See this Ninjahideout blog that goes into more detail.
4) apt-get/yum install nodejs on your server
(See my reply to this related Stackoverflow question.)
The message you see is the output of rake assets:precompile .
When you run rake assets:precompile, how to avoid default output
the solution is to add -q behand your command,
analysis is below, if you want to see:
# :gem_path/actionpack/lib/sprockets/assets.rake
namespace :assets do
# task entry, it will call invoke_or_reboot_rake_task
task :precompile do
invoke_or_reboot_rake_task "assets:precompile:all"
end
# it will call ruby_rake_task
def invoke_or_reboot_rake_task(task)
ruby_rake_task task
end
# it will call ruby
def ruby_rake_task(task, fork = true)
env = ENV['RAILS_ENV'] || 'production'
groups = ENV['RAILS_GROUPS'] || 'assets'
args = [$0, task,"RAILS_ENV=#{env}","RAILS_GROUPS=#{groups}"]
ruby(*args)
end
end
# :gem_path/rake/file_utils.rb
module FileUtils
# it will call sh
def ruby(*args,&block)
options = (Hash === args.last) ? args.pop : {}
sh(*([RUBY] + args + [options]), &block)
end
# it will call set_verbose_option
# and if options[:verbose] == true, it do not output cmd
# but default of options[:verbose] is an object
def sh(*cmd, &block)
# ...
set_verbose_option(options)
# ...
Rake.rake_output_message cmd.join(" ") if options[:verbose]
# ...
end
# default of options[:verbose] is Rake::FileUtilsExt::DEFAULT, which is an object
def set_verbose_option(options) # :nodoc:
unless options.key? :verbose
options[:verbose] =
Rake::FileUtilsExt.verbose_flag == Rake::FileUtilsExt::DEFAULT ||
Rake::FileUtilsExt.verbose_flag
end
end
end
# :gem_path/rake/file_utils_ext.rb
module Rake
module FileUtilsExt
DEFAULT = Object.new
end
end
# :gem_path/rake/application.rb
# the only to solve the disgusting output when run `rake assets:precompile`
# is add a `-q` option.
['--quiet', '-q',
"Do not log messages to standard output.",
lambda { |value| Rake.verbose(false) }
],
['--verbose', '-v',
"Log message to standard output.",
lambda { |value| Rake.verbose(true) }
],