converting capistrano v2 deploy.rb to capistrano v3 deploy.rb - capistrano

Trying to confer this block of code to the version 3 of capistrano to no avail. Everything is so changed. Anyone can help me on the right track to convert it ?
Old version 2 deploy.rb:
#require 'new_relic/recipes'
require 'bundler/capistrano'
require './config/boot'
require 'whenever/capistrano'
require 'rvm/capistrano'
require 'bundler/capistrano'
require 'puma/capistrano'
set :application, "books"
set :whenever_command, "bundle exec whenever"
set :rvm_type, :system
set :rvm_ruby_string, ENV['GEM_HOME'].gsub(/.*\//,'')
set :scm, :git
set :repository, "/srv/books.git"
set :bundle_flags, "--deployment --binstubs"
set (:bundle_cmd) { "#{release_path}/bin/bundle" }
set :branch, "master"
set :migrate_target, :current
set :ssh_options, {:forward_agent => true}
set :rails_env, "production"
set :deploy_to, "/srv/books"
set :normalize_asset_timestamps, false
set :keep_releases, 5
after "deploy:update", "deploy:cleanup"
#, "deploy:eye:stop", "deploy:eye:start"
set :user, "root"
set :group, ""
set :use_sudo, false
default_run_options[:pty] = true
#set :port, 5984
#ssh_options[:port] = 5984
set :port, 22
ssh_options[:port] = 22
role :web, "x"
role :app, "x"
role :db, "x", :primary => true
set(:latest_release) { fetch(:current_path) }
set(:release_path) { fetch(:current_path) }
set(:current_release) { fetch(:current_path) }
set(:current_revision) { capture("cd #{current_path}; git rev-parse --short HEAD").strip }
set(:latest_revision) { capture("cd #{current_path}; git rev-parse --short HEAD").strip }
set(:previous_revision) { capture("cd #{current_path}; git rev-parse --short HEAD#{1}").strip }
default_environment["RAILS_ENV"] = 'production'
#before :deploy, 'pgsql:backup'#, "deploy:rvm:trust_rvmrc"
load 'config/recipes/asset_pipeline.cap'
load 'config/recipes/database.cap'
load 'config/recipes/misc'
#load 'config/recipes/performance'
load 'config/recipes/rvm'
load 'config/recipes/web.cap'
load 'config/recipes/eye.cap'
#new relic
#after "deploy", "newrelic:notice_deployment"
#after "deploy:update", "newrelic:notice_deployment"
#after "deploy:migrations", "newrelic:notice_deployment"
#after "deploy:cold", "newrelic:notice_deployment"
namespace :deploy do
desc "Deploy your application"
task :default do
cleanlog
update
migrate
#sitemap
restart_nginx
end
desc "Setup your git-based deployment app."
task :setup, :except => {:no_release => true} do
dirs = [deploy_to, shared_path]
dirs += shared_children.map { |d| File.join(shared_path, d) }
run "#{try_sudo} mkdir -p #{dirs.join(' ')} && #{try_sudo} chmod g+w #{dirs.join(' ')}"
run "git clone #{repository} #{current_path}"
end
task :cold do
update
migrate
end
desc "Update the deployed code."
task :update_code, :except => {:no_release => true} do
run "cd #{current_path}; git fetch origin; git reset --hard #{branch}"
finalize_update
end
desc "Update the database (overwritten to avoid symlink)"
task :migrations do
transaction do
update_code
end
migrate
restart
end
task :finalize_update, :except => {:no_release => true} do
run "chmod -R g+w #{latest_release}" if fetch(:group_writable, true)
# mkdir -p is making sure that the directories are there for some SCM's that don't
# save empty folders
# ln -sf #{shared_path}/database.yml #{latest_release}/config/database.yml
run <<-CMD
rm -rf #{latest_release}/log #{latest_release}/public/system #{latest_release}/tmp/pids &&
mkdir -p #{latest_release}/public &&
mkdir -p #{latest_release}/tmp &&
ln -s #{shared_path}/log #{latest_release}/log &&
ln -s #{shared_path}/system #{latest_release}/public/system &&
ln -s #{shared_path}/pids #{latest_release}/tmp/pids
CMD
if fetch(:normalize_asset_timestamps, true)
stamp = Time.now.utc.strftime("%Y%m%d%H%M.%S")
asset_paths = fetch(:public_children, %w(images stylesheets javascripts)).map { |p| "#{latest_release}/public/#{p}" }.join(" ")
run "find #{asset_paths} -exec touch -t #{stamp} {} ';'; true", :env => {"TZ" => "UTC"}
end
end
desc "Zero-downtime restart of Unicorn"
task :restart, :except => {:no_release => true} do
run "#{try_sudo} touch #{File.join(current_path, 'tmp', 'restart.txt')}"
run "kill -s USR2 `cat /srv/books/shared/tmp/pids/unicorn.pid`"
deploy.eye.stop
deploy.eye.start
end
desc "reload the database with seed data"
task :seed do
run "cd #{current_path}; bundle exec rake db:seed RAILS_ENV=#{rails_env}"
end
namespace :rollback do
desc "Moves the repo back to the previous version of HEAD"
task :repo, :except => {:no_release => true} do
set :branch, "HEAD#{1}"
deploy.default
end
desc "Rewrite reflog so HEAD#{1} will continue to point to at the next previous release."
task :cleanup, :except => {:no_release => true} do
run "cd #{current_path}; git reflog delete --rewrite HEAD#{1}; git reflog delete --rewrite HEAD#{1}"
end
desc "Rolls back to the previously deployed version."
task :default do
rollback.repo
rollback.cleanup
end
end
end
def run_rake(cmd)
run "cd #{current_path}; #{rake} #{cmd}"
end
New version V3 deploy.rb:
set :application, 'books'
set :repo_url, 'root#prod:srv/books.git'
# ask :branch, proc { `git rev-parse --abbrev-ref HEAD`.chomp }
# set :deploy_to, '/var/www/my_app'
# set :scm, :git
# set :format, :pretty
# set :log_level, :debug
# set :pty, true
# set :linked_files, %w{config/database.yml}
# set :linked_dirs, %w{bin log tmp/pids tmp/cache tmp/sockets vendor/bundle public/system}
# set :default_env, { path: "/opt/ruby/bin:$PATH" }
# set :keep_releases, 5
namespace :deploy do
desc 'Restart application'
task :restart do
on roles(:app), in: :sequence, wait: 5 do
# Your restart mechanism here, for example:
# execute :touch, release_path.join('tmp/restart.txt')
end
end
after :restart, :clear_cache do
on roles(:web), in: :groups, limit: 3, wait: 10 do
# Here we can do anything such as:
# within release_path do
# execute :rake, 'cache:clear'
# end
end
end
after :finishing, 'deploy:cleanup'
## CUSTOM NEEDS UPDATE
desc "Clearing the production log"
task :cleanlog do
run "cd #{current_path}; rake log:clear"
end
desc "Refresh the sitemap."
task :sitemap do
run "cd #{current_path}; rake sitemap:refresh"
end
task :update do
transaction do
update_code
end
end
end

Within every task you need to add the role, like this:
task :update do
on roles(:web) do
transaction do
update_code
end
end
end

Related

CircleCI run failed on delete k8s resource

I have CircleCI setup and running fine normally, it will helps with creating deployment for me. Today I have suddenly had an issue with the step in creating the deployment due to an error related to kubernetes.
I have the config.yml followed the doc from https://circleci.com/developer/orbs/orb/circleci/kubernetes
Here is my version of setup in the config file:
version: 2.1
orbs:
kube-orb: circleci/kubernetes#1.3.0
commands:
docker-check:
steps:
- docker/check:
docker-username: MY_USERNAME
docker-password: MY_PASS
registry: $DOCKER_REGISTRY
jobs:
create-deployment:
executor: aws-eks/python3
parameters:
cluster-name:
description: Name of the EKS cluster
type: string
steps:
- checkout
# It failed on this step
- kube-orb/delete-resource:
now: true
resource-names: my-frontend-deployment
resource-types: deployments
wait: true
Below is a copy of the error log
#!/bin/bash -eo pipefail
#!/bin/bash
RESOURCE_FILE_PATH=$(eval echo "$PARAM_RESOURCE_FILE_PATH")
RESOURCE_TYPES=$(eval echo "$PARAM_RESOURCE_TYPES")
RESOURCE_NAMES=$(eval echo "$PARAM_RESOURCE_NAMES")
LABEL_SELECTOR=$(eval echo "$PARAM_LABEL_SELECTOR")
ALL=$(eval echo "$PARAM_ALL")
CASCADE=$(eval echo "$PARAM_CASCADE")
FORCE=$(eval echo "$PARAM_FORCE")
GRACE_PERIOD=$(eval echo "$PARAM_GRACE_PERIOD")
IGNORE_NOT_FOUND=$(eval echo "$PARAM_IGNORE_NOT_FOUND")
NOW=$(eval echo "$PARAM_NOW")
WAIT=$(eval echo "$PARAM_WAIT")
NAMESPACE=$(eval echo "$PARAM_NAMESPACE")
DRY_RUN=$(eval echo "$PARAM_DRY_RUN")
KUSTOMIZE=$(eval echo "$PARAM_KUSTOMIZE")
if [ -n "${RESOURCE_FILE_PATH}" ]; then
if [ "${KUSTOMIZE}" == "1" ]; then
set -- "$#" -k
else
set -- "$#" -f
fi
set -- "$#" "${RESOURCE_FILE_PATH}"
elif [ -n "${RESOURCE_TYPES}" ]; then
set -- "$#" "${RESOURCE_TYPES}"
if [ -n "${RESOURCE_NAMES}" ]; then
set -- "$#" "${RESOURCE_NAMES}"
elif [ -n "${LABEL_SELECTOR}" ]; then
set -- "$#" -l
set -- "$#" "${LABEL_SELECTOR}"
fi
fi
if [ "${ALL}" == "true" ]; then
set -- "$#" --all=true
fi
if [ "${FORCE}" == "true" ]; then
set -- "$#" --force=true
fi
if [ "${GRACE_PERIOD}" != "-1" ]; then
set -- "$#" --grace-period="${GRACE_PERIOD}"
fi
if [ "${IGNORE_NOT_FOUND}" == "true" ]; then
set -- "$#" --ignore-not-found=true
fi
if [ "${NOW}" == "true" ]; then
set -- "$#" --now=true
fi
if [ -n "${NAMESPACE}" ]; then
set -- "$#" --namespace="${NAMESPACE}"
fi
if [ -n "${DRY_RUN}" ]; then
set -- "$#" --dry-run="${DRY_RUN}"
fi
set -- "$#" --wait="${WAIT}"
set -- "$#" --cascade="${CASCADE}"
if [ "$SHOW_EKSCTL_COMMAND" == "1" ]; then
set -x
fi
kubectl delete "$#"
if [ "$SHOW_EKSCTL_COMMAND" == "1" ]; then
set +x
fi
error: exec plugin: invalid apiVersion "client.authentication.k8s.io/v1alpha1"
Exited with code exit status 1
CircleCI received exit code 1
Does anyone have idea what is wrong with it? Im not sure whether the issue is happening on Circle CI side or Kubernetes side.
I was facing the exact issue since yesterday morning (16 hours ago). Then taking #Gavy's advice, I simply added this in my config.yml:
steps:
- checkout
# !!! HERE !!!
- kubernetes/install-kubectl:
kubectl-version: v1.23.5
- run:
And now it works. Hope it helps.

Terraform Azure Pipeline is not getting the workspace

I have a pipeline created in Azure Devops that uses terraform module. I have been able to run my pipeline but I'm having issues to detect the created workspace.
The pipeline tasks are described below:
The bash script creates the workspace in case it doesn't exist, here you can see the script:
#!/bin/bash
echo "*************************************************************"
echo "* Create or select workspace *"
echo "*************************************************************"
if [ $(terraform workspace list | grep -c "$1") -eq 0 ] ; then
echo "** Create new workspace $1 **"
terraform workspace new "$1" -no-color
else
echo "** Switch to workspace $1 **"
terraform workspace select "$1" -no-color
fi
I'm certain that the workspace has been created but the terraform subsequent tasks are not picking up the workspace.
You can see is setting default instead of development. This is is in terraform plan task
2021-03-12T18:13:48.0424826Z [1m # azurerm_resource_group.k8s[0m will be created[0m[0m
2021-03-12T18:13:48.0426216Z [0m [32m+[0m[0m resource "azurerm_resource_group" "k8s" {
2021-03-12T18:13:48.0427763Z [32m+[0m [0m[1m[0mid[0m[0m = (known after apply)
2021-03-12T18:13:48.0428525Z [32m+[0m [0m[1m[0mlocation[0m[0m = "eastus"
2021-03-12T18:13:48.0429278Z [32m+[0m [0m[1m[0mname[0m[0m = "default-k8s"
2021-03-12T18:13:48.0430000Z [32m+[0m [0m[1m[0mtags[0m[0m = {
2021-03-12T18:13:48.0430713Z [32m+[0m [0m"environment" = "default"
2021-03-12T18:13:48.0431181Z }
2021-03-12T18:13:48.0431534Z }
Has someone faced this issue before, if so any advice on having the terraform tasks detect the workspace that has been created in the bash script?
I was missing a key detail in the bash script section. Which is the working directory where I wanted my scripts to be executed.
You can see that is in the Advanced section. Without that path the scripts were running in the wrong place.
As a result I have the development workspace and the resource group development-k8s.
2021-03-12T19:40:13.7898170Z [1m # azurerm_resource_group.k8s[0m will be created[0m[0m
2021-03-12T19:40:13.7898875Z [0m [32m+[0m[0m resource "azurerm_resource_group" "k8s" {
2021-03-12T19:40:13.7928911Z [32m+[0m [0m[1m[0mid[0m[0m = (known after apply)
2021-03-12T19:40:13.7930291Z [32m+[0m [0m[1m[0mlocation[0m[0m = "eastus"
2021-03-12T19:40:13.7954850Z [32m+[0m [0m[1m[0mname[0m[0m = "development-k8s"
2021-03-12T19:40:13.7955573Z [32m+[0m [0m[1m[0mtags[0m[0m = {
2021-03-12T19:40:13.7956351Z [32m+[0m [0m"environment" = "development"
2021-03-12T19:40:13.7956951Z }
2021-03-12T19:40:13.7957351Z }
I hope it saves you the several hours I spent going back and forth through the process :)

Capistrano deployment is not happenning after Server IP Change

Problem: Recently We have changed IP address of the stage server. We are using Capistrano for deploying rails application. So after changing server IP address when we run a command: cap develop(branch name) deploy, it is not working. Please find below config files
deploy.rb
# config valid for current version and patch releases of Capistrano
lock "~> 3.10.0"
set :application, "app_name"
set :repo_url, "git#bitbucket.org:repo.git"
set :branch, :develop
set :deploy_to, '/home/deploy/app_name'
set :pty, true
set :linked_files, %w{config/mongoid.yml config/application.yml}
set :linked_dirs, %w{ bin log tmp/pids tmp/cache tmp/sockets vendor/bundle public/system public/uploads}
set :keep_releases, 5
set :rvm_type, :user
set :rvm_ruby_version, 'ruby-2.3.1' # Edit this if you are using MRI Ruby
set :bundle_binstubs, nil
set :puma_rackup, -> { File.join(current_path, 'config.ru') }
set :puma_state, "#{shared_path}/tmp/pids/puma.state"
set :puma_pid, "#{shared_path}/tmp/pids/puma.pid"
set :puma_bind, "unix://#{shared_path}/tmp/sockets/puma.sock" #accept array for multi-bind
set :puma_conf, "#{shared_path}/puma.rb"
set :puma_access_log, "#{shared_path}/log/puma_error.log"
set :puma_error_log, "#{shared_path}/log/puma_access.log"
set :puma_role, :app
set :puma_env, fetch(:rack_env, fetch(:rails_env, 'production'))
set :puma_threads, [0, 8]
set :puma_workers, 0
set :puma_worker_timeout, nil
set :puma_init_active_record, false
set :puma_preload_app, false
# Default branch is :master
# ask :branch, `git rev-parse --abbrev-ref HEAD`.chomp
# Default deploy_to directory is /var/www/my_app_name
# set :deploy_to, "/var/www/my_app_name"
# Default value for :format is :airbrussh.
# set :format, :airbrussh
# You can configure the Airbrussh format using :format_options.
# These are the defaults.
# set :format_options, command_output: true, log_file: "log/capistrano.log", color: :auto, truncate: :auto
# Default value for :pty is false
# set :pty, true
# Default value for :linked_files is []
# append :linked_files, "config/database.yml", "config/secrets.yml"
# Default value for linked_dirs is []
# append :linked_dirs, "log", "tmp/pids", "tmp/cache", "tmp/sockets", "public/system"
# Default value for default_env is {}
# set :default_env, { path: "/opt/ruby/bin:$PATH" }
# Default value for local_user is ENV['USER']
# set :local_user, -> { `git config user.name`.chomp }
# Default value for keep_releases is 5
# set :keep_releases, 5
# Uncomment the following to require manually verifying the host key before first deploy.
# set :ssh_options, verify_host_key: :secure
namespace :deploy do
after :restart, :clear_cache do
on roles(:web), in: :groups, limit: 3, wait: 10 do
# Here we can do anything such as:
# within release_path do
# execute :rake, 'cache:clear'
# end
end
end
end
config/deploy/develop.rb
# server-based syntax
# ======================
# Defines a single server with a list of roles and multiple properties.
# You can define all roles on a single server, or split them:
# server "example.com", user: "deploy", roles: %w{app db web}, my_property: :my_value
# server "example.com", user: "deploy", roles: %w{app web}, other_property: :other_value
# server "db.example.com", user: "deploy", roles: %w{db}
server '<new_ip>', user: 'deploy', roles: %w{web app db}
# role-based syntax
# ==================
# Defines a role with one or multiple servers. The primary server in each
# group is considered to be the first unless any hosts have the primary
# property set. Specify the username and a domain or IP for the server.
# Don't use `:all`, it's a meta role.
# role :app, %w{deploy#example.com}, my_property: :my_value
# role :web, %w{user1#primary.com user2#additional.com}, other_property: :other_value
# role :db, %w{deploy#example.com}
# Configuration
# =============
# You can set any configuration variable like in config/deploy.rb
# These variables are then only loaded and set in this stage.
# For available Capistrano configuration variables see the documentation page.
# http://capistranorb.com/documentation/getting-started/configuration/
# Feel free to add new variables to customise your setup.
# Custom SSH Options
# ==================
# You may pass any option but keep in mind that net/ssh understands a
# limited set of options, consult the Net::SSH documentation.
# http://net-ssh.github.io/net-ssh/classes/Net/SSH.html#method-c-start
#
# Global options
# --------------
# set :ssh_options, {
# keys: %w(/home/rlisowski/.ssh/id_rsa),
# forward_agent: false,
# auth_methods: %w(password)
# }
#
# The server-based syntax can be used to override options:
# ------------------------------------
# server "example.com",
# user: "user_name",
# roles: %w{web app},
# ssh_options: {
# user: "user_name", # overrides user setting above
# keys: %w(/home/user_name/.ssh/id_rsa),
# forward_agent: false,
# auth_methods: %w(publickey password)
# # password: "please use keys"
# }
Not sure what we are missing, Any help would be appreciated

chef service start_command not working

I'm trying to launch a node process as a service using forever, but the configuration is not working correctly. What's wrong with it?
execute "npm install -g forever"
restart_command_string = "forever restart /#{studio_server_folder}/#{studio_server_script}"
reload_command_string = "forever restart /#{studio_server_folder}/#{studio_server_script}"
start_command_string = "forever start /#{studio_server_folder}/#{studio_server_script}"
stop_command_string = "forever stop /#{studio_server_folder}/#{studio_server_script}"
status_command_string = "if [ $(forever list | grep -c \"studio-server\") -gt 0 ]; then echo 1; else echo 0; fi"
# execute "if [ $(forever list | grep -c \"studio-server\") -gt 0 ]; then #{restart_command_string}; else #{start_command}; fi"
service 'studio-server' do
supports :status => true, :restart => true, :reload => true
start_command start_command_string
reload_command reload_command_string
stop_command stop_command_string
status_command status_command_string
restart_command restart_command_string
action [:start]
end
execute 'service --status-all >> /servicestatus'
That status command isn't a command, it is a fragment of bash script and thus is unlikely to be working. In general I would highly recommend using a real service manager like supervisord or systemd.

Capistrano compile assets error - assets:precompile:nondigest?

My App seems to be deploying correctly but I'm getting this error:
* executing "cd /home/deploy/tomahawk/releases/20120208222225 && bundle exec rake RAILS_ENV=production RAILS_GROUPS=assets assets:precompile"
servers: ["ip_address"]
[ip_address] executing command
*** [err :: ip_address] /opt/ruby/bin/ruby /opt/ruby/bin/rake assets:precompile:nondigest RAILS_ENV=production RAILS_GROUPS=assets
I've tried solutions here for trying to compile assets: http://lassebunk.dk/2011/09/03/getting-your-assets-to-work-when-upgrading-to-rails-3-1/
And Here: http://railsmonkey.net/2011/08/deploying-rails-3-1-applications-with-capistrano/
And here: http://dev.af83.com/2011/09/30/capistrano-rails-3-1-assets-can-be-tricky.html
Here is my deploy.rb :
require "bundler/capistrano"
load 'deploy/assets'
set :default_environment, {
'PATH' => "/opt/ruby/bin/:$PATH"
}
set :application, "tomahawk"
set :repository, "repo_goes_here"
set :deploy_to, "/home/deploy/#{application}"
set :rails_env, 'production'
set :branch, "master"
set :scm, :git
set :user, "deploy"
set :runner, "deploy"
set :use_sudo, true
role :web, "my_ip"
role :app, "my_ip"
role :db, "my_ip", :primary => true
set :normalize_asset_timestamps, false
after "deploy", "deploy:cleanup"
namespace :deploy do
desc "Restarting mod_rails with restart.txt"
task :restart, :roles => :app, :except => { :no_release => true } do
run "touch #{current_path}/tmp/restart.txt"
end
[:start, :stop].each do |t|
desc "#{t} task is a no-op with mod_rails"
task t, :roles => :domain do ; end
end
end
task :after_update_code do
run "ln -nfs #{deploy_to}/shared/config/database.yml #{release_path}/config/database.yml"
end
first don't forget to add the gem below
group :production do
gem 'therubyracer'
gem 'execjs'
end
then in your cap file just add this line in your after_update_code
run "cd #{release_path}; rake assets:precompile RAILS_ENV=production "
this worked fine for me ;)
cheers,
Gregory HORION
I have the same problem. I have added this to my deploy.rb (for adding option '--trace'):
namespace :deploy do
namespace :assets do
task :precompile, :roles => :web, :except => { :no_release => true } do
run "cd #{current_path} && #{rake} RAILS_ENV=#{rails_env} RAILS_GROUPS=assets assets:precompile --trace"
end
end
end
And error seems to be just notice :
*** [err :: my-server] ** Invoke assets:precompile (first_time)
...
I later noticed that capistrano wasn't able to delete old releases, I got an error:
*** [err :: ip_address] sudo: no tty present and no askpass program specified
I found this link regarding this error:
http://www.mail-archive.com/capistrano#googlegroups.com/msg07323.html
I had to add this line to my deploy file:
default_run_options[:pty] = true
This also solved the weird error I was getting above.
The official explanation, which I don't understand :)
No default PTY. Prior to 2.1, Capistrano would request a pseudo-tty for each command that it executed. This had the side-effect of causing the profile scripts for the user to not be loaded. Well, no more! As of 2.1, Capistrano no longer requests a pty on each command, which means your .profile (or .bashrc, or whatever) will be properly loaded on each command! Note, however, that some have reported on some systems, when a pty is not allocated, some commands will go into non-interactive mode automatically. If you’re not seeing commands prompt like they used to, like svn or passwd, you can return to the previous behavior by adding the following line to your capfile: default_run_options[:pty] = true
Here's what worked for me:
1) Add rvm-capistrano to your Gemfile
2) in confg/deploy, add the lines:
require 'rvm/capistrano'
set :rvm_ruby_string, '1.9.2' # Set to your version number
3) You may also need to set :rvm_type and :rvm_bin_path. See this Ninjahideout blog that goes into more detail.
4) apt-get/yum install nodejs on your server
(See my reply to this related Stackoverflow question.)
The message you see is the output of rake assets:precompile .
When you run rake assets:precompile, how to avoid default output
the solution is to add -q behand your command,
analysis is below, if you want to see:
# :gem_path/actionpack/lib/sprockets/assets.rake
namespace :assets do
# task entry, it will call invoke_or_reboot_rake_task
task :precompile do
invoke_or_reboot_rake_task "assets:precompile:all"
end
# it will call ruby_rake_task
def invoke_or_reboot_rake_task(task)
ruby_rake_task task
end
# it will call ruby
def ruby_rake_task(task, fork = true)
env = ENV['RAILS_ENV'] || 'production'
groups = ENV['RAILS_GROUPS'] || 'assets'
args = [$0, task,"RAILS_ENV=#{env}","RAILS_GROUPS=#{groups}"]
ruby(*args)
end
end
# :gem_path/rake/file_utils.rb
module FileUtils
# it will call sh
def ruby(*args,&block)
options = (Hash === args.last) ? args.pop : {}
sh(*([RUBY] + args + [options]), &block)
end
# it will call set_verbose_option
# and if options[:verbose] == true, it do not output cmd
# but default of options[:verbose] is an object
def sh(*cmd, &block)
# ...
set_verbose_option(options)
# ...
Rake.rake_output_message cmd.join(" ") if options[:verbose]
# ...
end
# default of options[:verbose] is Rake::FileUtilsExt::DEFAULT, which is an object
def set_verbose_option(options) # :nodoc:
unless options.key? :verbose
options[:verbose] =
Rake::FileUtilsExt.verbose_flag == Rake::FileUtilsExt::DEFAULT ||
Rake::FileUtilsExt.verbose_flag
end
end
end
# :gem_path/rake/file_utils_ext.rb
module Rake
module FileUtilsExt
DEFAULT = Object.new
end
end
# :gem_path/rake/application.rb
# the only to solve the disgusting output when run `rake assets:precompile`
# is add a `-q` option.
['--quiet', '-q',
"Do not log messages to standard output.",
lambda { |value| Rake.verbose(false) }
],
['--verbose', '-v',
"Log message to standard output.",
lambda { |value| Rake.verbose(true) }
],