Destination /etc/default/kubelet does not exist - kubernetes

I am trying to install kubernetes cluster with vagrant and ansible and it does not work.
As the error message, I've got:
TASK [Configure node ip] *******************************************************
fatal: [k8s-node-3]: FAILED! => {"changed": false, "msg": "Destination /etc/default/kubelet does not exist !", "rc": 257}
RUNNING HANDLER [docker status] ************************************************
PLAY RECAP *********************************************************************
k8s-node-3 : ok=10 changed=8 unreachable=0 failed=1 skipped=1 rescued=0 ignored=0
Ansible failed to complete successfully. Any error output should be
visible above. Please fix these errors and try again.
The vagrant file:
IMAGE_NAME = "ubuntu/bionic64"
Nodes = 3
Vagrant.configure("2") do |config|
config.ssh.insert_key = false
config.vm.provider "virtualbox" do |v|
v.memory = 1024
v.cpus = 2
end
config.vm.define "k8s-master" do |master|
master.vm.box = IMAGE_NAME
master.vm.network "private_network", ip: "192.168.99.100", name: "vboxnet0", adapter: 2
master.vm.hostname = "k8s-master"
master.vm.provision "ansible" do |ansible|
ansible.playbook = "k8s-setup/master-playbook.yml"
ansible.extra_vars = {
node_ip: "192.168.99.100",
}
end
end
(1..Nodes).each do |i|
config.vm.define "k8s-node-#{i}" do |node|
node.vm.box = IMAGE_NAME
node.vm.network "private_network", ip: "192.168.99.#{100 + i}", name: "vboxnet0", adapter: 2
node.vm.hostname = "k8s-node-#{i}"
node.vm.provision "ansible" do |ansible|
ansible.playbook = "k8s-setup/node-playbook.yml"
ansible.extra_vars = {
node_ip: "192.168.99.#{100 + i}",
}
end
end
end
end
and the master-playbook.yml file
---
- hosts: all
become: true
tasks:
- name: Install packages that allow apt to be used over HTTPS
apt:
name: "{{ packages }}"
state: present
update_cache: yes
vars:
packages:
- apt-transport-https
- ca-certificates
- curl
- gnupg-agent
- software-properties-common
- name: Add an apt signing key for Docker
apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
state: present
- name: Add apt repository for stable version
apt_repository:
repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable
state: present
- name: Install docker and its dependecies
apt:
name: "{{ packages }}"
state: present
update_cache: yes
vars:
packages:
- docker-ce
- docker-ce-cli
- containerd.io
notify:
- docker status
- name: Add vagrant user to docker group
user:
name: vagrant
group: docker
- name: Remove swapfile from /etc/fstab
mount:
name: "{{ item }}"
fstype: swap
state: absent
with_items:
- swap
- none
- name: Disable swap
command: swapoff -a
when: ansible_swaptotal_mb > 0
- name: Add an apt signing key for Kubernetes
apt_key:
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
state: present
- name: Adding apt repository for Kubernetes
apt_repository:
repo: deb https://apt.kubernetes.io/ kubernetes-xenial main
state: present
filename: kubernetes.list
- name: Install Kubernetes binaries
apt:
name: "{{ packages }}"
state: present
update_cache: yes
vars:
packages:
- kubelet
- kubeadm
- kubectl
- name: Configure node ip
lineinfile:
path: /etc/default/kubelet
line: KUBELET_EXTRA_ARGS=--node-ip={{ node_ip }}
- name: Restart kubelet
service:
name: kubelet
daemon_reload: yes
state: restarted
- name: Initialize the Kubernetes cluster using kubeadm
command: kubeadm init --apiserver-advertise-address="192.168.99.100" --apiserver-cert-extra-sans="192.168.99.100" --node-name k8s-master --pod-network-cidr=192.168.0.0/16
- name: Setup kubeconfig for vagrant user
command: "{{ item }}"
with_items:
- mkdir -p /home/vagrant/.kube
- cp -i /etc/kubernetes/admin.conf /home/vagrant/.kube/config
- chown vagrant:vagrant /home/vagrant/.kube/config
- name: Install calico pod network
become: false
command: kubectl create -f https://docs.projectcalico.org/v3.4/getting-started/kubernetes/installation/hosted/calico.yaml
- name: Generate join command
command: kubeadm token create --print-join-command
register: join_command
- name: Copy join command to local file
local_action: copy content="{{ join_command.stdout_lines[0] }}" dest="./join-command"
handlers:
- name: docker status
service: name=docker state=started
and the node-playbook.yml
---
- hosts: all
become: true
tasks:
- name: Install packages that allow apt to be used over HTTPS
apt:
name: "{{ packages }}"
state: present
update_cache: yes
vars:
packages:
- apt-transport-https
- ca-certificates
- curl
- gnupg-agent
- software-properties-common
- name: Add an apt signing key for Docker
apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
state: present
- name: Add apt repository for stable version
apt_repository:
repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable
state: present
- name: Install docker and its dependecies
apt:
name: "{{ packages }}"
state: present
update_cache: yes
vars:
packages:
- docker-ce
- docker-ce-cli
- containerd.io
notify:
- docker status
- name: Add vagrant user to docker group
user:
name: vagrant
group: docker
- name: Remove swapfile from /etc/fstab
mount:
name: "{{ item }}"
fstype: swap
state: absent
with_items:
- swap
- none
- name: Disable swap
command: swapoff -a
when: ansible_swaptotal_mb > 0
- name: Add an apt signing key for Kubernetes
apt_key:
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
state: present
- name: Adding apt repository for Kubernetes
apt_repository:
repo: deb https://apt.kubernetes.io/ kubernetes-xenial main
state: present
filename: kubernetes.list
- name: Install Kubernetes binaries
apt:
name: "{{ packages }}"
state: present
update_cache: yes
vars:
packages:
- kubelet
- kubeadm
- kubectl
- name: Configure node ip
lineinfile:
path: /etc/default/kubelet
line: KUBELET_EXTRA_ARGS=--node-ip={{ node_ip }}
- name: Restart kubelet
service:
name: kubelet
daemon_reload: yes
state: restarted
- name: Copy the join command to server location
copy: src=join-command dest=/tmp/join-command.sh mode=0777
- name: Join the node to cluster
command: sh /tmp/join-command.sh
handlers:
- name: docker status
service: name=docker state=starte
What is wrong? Why the kubelet file can not be found?

The error occurs, because /etc/default/kubelet does not exist on the VMs. Add create: yes to the "Configure node ip" tasks in master-playbook.yml and node-playbook.yml, so that they look like this:
- name: Configure node ip
lineinfile:
path: /etc/default/kubelet
line: KUBELET_EXTRA_ARGS=--node-ip={{ node_ip }}
create: yes
This way, the file will be created if it does not exist.

I found this generic ansible-playbook I see at git that generally follows the official manual. Initially, it was created (half year ago?) for ubuntu 16.04. I tried to run (instructions from official manual) against ubuntu 18 (as you using bionic), but I should say, there is no /etc/default/kubelet installed (after apt install ...).
Update:
And here is why...
P.S.
I would suggest using Kubespray as local vagrant/kubernetes setup, but it's because it just works from the box.

You are following the tutorial on kubernetes.io.
I got the same error as you:
"Destination /etc/default/kubelet does not exist".
Just look at the instructions here.
You need to adjust the playbook slightly to the other instructions:
Change the line: kubeadm init --apiserver-advertise-address="192.168.50.10" --apiserver-cert-extra-sans="192.168.50.10" --node-name k8s-master --pod-network-cidr=192.168.0.0/16" according to the other instruction to kubeadm init --pod-network-cidr=192.168.0.0/16 --apiserver-advertise-address="192.168.50.10"
The result will be a join command that you need to register and re-use to join the two worker nodes.

I had the same error last time:
TASK [Configure node ip]
*******************************************************
fatal: [k8s-master]: FAILED! => {"changed": false, "msg": "Destination
/etc/default/kubelet does not exist !", "rc": 257}
So please check your ansible-playbook and verify that the kubelet will be installed. If not please add create parameter:
create: yes
So in your case, it should look like this:
- name: Configure node ip
lineinfile:
path: /etc/default/kubelet
line: KUBELET_EXTRA_ARGS=--node-ip={{ node_ip }}
create: yes

Related

why does ansible not see admin.conf and it needs to be manually exported?

why does ansible not see admin.conf when creating resources in the cloud?
- name: apply ingress
shell: export KUBECONFIG=/etc/kubernetes/admin.conf && kubectl apply -f /home/ingress.yaml
works like this and sees everything, and if so
- name: apply ingress
shell: kubectl apply -f /home/ingress.yaml
error:
The connection to the server localhost:8080 was refused - did you
specify the right host or port?", "stderr_lines": ["The connection to
the server localhost:8080 was refused - did you specify the right host
or port?"], "stdout": "", "stdout_lines": []}
at the same time, if I log on to the server via ssh, the command is used under the ubuntu order, and under the root order, without exports.
P.S. just in case, I copied admin.conf to the user directory
- name: Create directory for kube config.
become: yes
file:
path: /home/{{ ansible_user }}/.kube
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: 0755
- name: Copy admin.conf to user's home directory
become_user: root
become_method: sudo
become: true
copy:
src: /etc/kubernetes/admin.conf
dest: "/home/{{ ansible_user }}/.kube/config"
remote_src: yes
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: 0644
i dont know why, but solution:
name: apply ingress
become: true
become: ubuntu
shell: kubectl apply -f /home/ingress.yaml

add kubernetes worker node with ansible but it doesn't get join

I'm trying to stablish a kubernetes system and I'm using ansible.
and these are my playbooks:
hosts:
[masters]
master ansible_host=157.90.96.140 ansible_user=root
[workers]
worker1 ansible_host=157.90.96.138 ansible_user=root
worker2 ansible_host=157.90.96.139 ansible_user=root
[all:vars]
ansible_user=ubuntu
ansible_python_interpreter=/usr/bin/python3
kubelet_cgroup_driver=cgroupfs
ansible_ssh_common_args='-o StrictHostKeyChecking=no
initial
become: yes
tasks:
- name: create the 'ubuntu' user
user: name=ubuntu append=yes state=present createhome=yes shell=/bin/bash
- name: allow 'ubuntu' to have passwordless sudo
lineinfile:
dest: /etc/sudoers
line: 'ubuntu ALL=(ALL) NOPASSWD: ALL'
validate: 'visudo -cf %s'
- name: set up authorized keys for the ubuntu user
authorized_key: user=ubuntu key="{{item}}"
with_file:
- ~/.ssh/id_rsa.pub
kube-dependencies
- hosts: all
become: yes
tasks:
- name: install Docker
apt:
name: docker.io
state: present
update_cache: true
- name: install APT Transport HTTPS
apt:
name: apt-transport-https
state: present
- name: add Kubernetes apt-key
apt_key:
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
state: present
- name: add Kubernetes' APT repository
apt_repository:
repo: deb http://apt.kubernetes.io/ kubernetes-xenial main
state: present
filename: 'kubernetes'
- name: install kubernetes-cni
apt:
name: kubernetes-cni=0.7.5-00
state: present
force: yes
update_cache: true
- name: install kubelet
apt:
name: kubelet=1.14.0-00
state: present
update_cache: true
- name: install kubeadm
apt:
name: kubeadm=1.14.0-00
state: present
- hosts: master
become: yes
tasks:
- name: install kubectl
apt:
name: kubectl=1.14.0-00
state: present
force: yes
master
- hosts: master
become: yes
tasks:
- name: initialize the cluster
shell: kubeadm init --pod-network-cidr=10.244.0.0/16 >> cluster_initialized.txt
args:
chdir: $HOME
creates: cluster_initialized.txt
- name: create .kube directory
become: yes
become_user: ubuntu
file:
path: $HOME/.kube
state: directory
mode: 0755
- name: copy admin.conf to user's kube config
copy:
src: /etc/kubernetes/admin.conf
dest: /home/ubuntu/.kube/config
remote_src: yes
owner: ubuntu
- name: install Pod network
become: yes
become_user: ubuntu
shell: kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/a70459be0084506e4ec919aa1c114638878db11b/Documentation/kube-flannel.yml >> pod_network_setup.txt
args:
chdir: $HOME
creates: pod_network_setup.txt
worker
- hosts: master
become: yes
gather_facts: false
tasks:
- name: get join command
shell: kubeadm token create --print-join-command
register: join_command_raw
- name: set join command
set_fact:
join_command: "{{ join_command_raw.stdout_lines[0] }}"
- hosts: workers
become: yes
tasks:
- name: join cluster
shell: "{{ hostvars['master'].join_command }} >> node_joined.txt"
args:
chdir: $HOME
creates: node_joined.txt
my manual for installation was
https://www.digitalocean.com/community/tutorials/how-to-install-and-configure-ansible-on-ubuntu-16-04
now I have several question:
Are these configs right?
2.In master playbook I'm using manual pod network and I didn't change it is that correct?
my main problem is my workers don't get join what's the problem?

write tasks to install nginx and postgresql using ansible-playbook

.score.sh is given as
#!/bin/bash
pass=0;
fail=0;
if [ $? -eq 0 ];then
worker=`ps -eaf|grep nginx|grep worker`
master=`ps -eaf|grep nginx|grep master`
serverup=`curl -Is http://localhost:9090/|grep -i "200 OK"`
serverurl=`curl -Is http://localhost:9090/|grep -io "google.com"`
if [[ ! -z ${worker} ]];then
((pass++))
echo "nginx is running as worker";
else
((fail++))
echo "nginx is not running as worker";
fi;
if [[ ! -z ${master} ]];then
((pass++))
echo "nginx is running as master";
else
((fail++))
echo "nginx is not running as master";
fi;
if [[ ! -z ${serverup} ]];then
((pass++))
echo "Nginx server is up";
else
((fail++))
echo "Nginx server is not up";
fi;
if [[ ! -z ${serverurl} ]];then
((pass++))
echo "Nginx server is redirecting to google.com";
else
((fail++))
echo "Nginx server is not redirecting to google.com ";
fi;
fi;
echo $pass $fail
score=$(( $pass * 25 ))
echo "FS_SCORE:$score%"
i was only able to install nginx and postgresql but not satisy the conditions given in .score.sh
Can someone help me how do I install nginx as both master worker node and master and direct it to google?
#installing nginx and postgresql
name: Updating apt
command: sudo apt-get update
name: Install list of packages
apt:
pkg: ['nginx', 'postgresql']
state: latest
name: Start Ngnix service
service:
name: nginx
state: started
name: Start PostgreSQL service
service:
name: postgresql
state: started
if nginx not started use
'sudo service nginx restart'
This worked for me and the fresco course did get passed for me.
---
#installing nginx and postgresql
- name: Install nginx
apt: name=nginx state=latest
tags: nginx
- name: restart nginx
service:
name: nginx
state: started
- name: Install PostgreSQL
apt: name=postgresql state=latest
tags: PostgreSQL
- name: Start PostgreSQL
service:
name: postgresql
state: started
---
#installing nginx and postgresql
- name: Install nginx
apt: name=nginx state=latest
tags: nginx
- name: restart nginx
service:
name: nginx
state: started
- name: Install PostgreSQL
apt: name=postgresql state=latest
tags: PostgreSQL
- name: Start PostgreSQL
service:
name: postgresql
state: started
I have tried the above one getting below error message
ERROR! 'apt' is not a valid attribute for a Play
The error appears to be in '/projects/challenge/fresco_nginx/tasks/main.yml': line 3, column 3, but may
be elsewhere in the file depending on the exact syntax problem.
The offending line appears to be:
#installing nginx and postgresql
- name: Install nginx
^ here
All the answers give above works on installing nginx, the problem is nginx is running port 80 and the score script check 9090. If you curl using port 80 you will get response. So you need to find some way to change the nginx conf file to use port 9090.
Below code worked for me:
Define your port number and the site you wish to redirect nginx server to in .j2 file in Templates folder under your roles.
Include a task in Playbook to set the template to /etc/nginx/sites-enabled/default folder. Include a notify for the handler defined in
'Handlers' folder.
In some cases if nginx server doesnt restart, use 'sudo service nginx restart' at the terminal before testing your code.
Ansible-Sibelius (Try it Out- Write a Playbook)
#installing nginx and postgresql
- name: Install nginx
apt: name=nginx state=latest
tags: nginx
- name: restart nginx
service:
name: nginx
state: started
- name: Install PostgreSQL
apt: name=postgresql state=latest
tags: PostgreSQL
- name: Start PostgreSQL
service:
name: postgresql
state: started
- name: Set the configuration for the template file
template:
src: /<path-to-your-roles>/templates/sites-enabled.j2
dest: /etc/nginx/sites-enabled/default
notify: restart nginx
I found below code useful and passed the frescoplay. and above mentioned code also passess the handson in frescoplay.
- hosts: all
tasks:
- name: ensure nginx is at the latest version
apt: name=nginx state=latest
- name: start nginx
service:
name: nginx
state: started
server {
listen 9090;
root /var/www/your_domain/html;
index index.html;
server_name google.com;
location / {
try_files $uri $uri/ =404;
proxy_pass https://www.google.com;
}
}

Vagrant: running Ansible provisioning after all VMs booted, Ansible cannot connect to all hosts

I'm trying to figure out how to use Ansible with Vagrant the proper way. By default, it seems Vagrant is isolating Ansible execution per box and executes playbooks after each box partially as it applies to that single box in the loop. I find this VERY counterproductive and I have tried tricking Vagrant into executing a playbook across all of the hosts AFTER all of them booted, but it seems Ansible, when started from Vagrant never sees more than a single box at a time.
Edit: these are the version I am working with:
Vagrant: 2.2.6
Ansible: 2.5.1
Virtualbox: 6.1
The playbook (with the hosts.ini) by itsef executes without issues when I run it stand-alone with the ansible-playbook executable after the hosts come up, so the problem is with my Vagrant file. I just cannot figure it out.
This is the Vagrantfile:
# -*- mode: ruby -*-
# vi: set ft=ruby :
IMAGE_NAME = "ubuntu/bionic64"
Vagrant.configure("2") do |config|
config.ssh.insert_key = false
config.vm.box = IMAGE_NAME
# Virtualbox configuration
config.vm.provider "virtualbox" do |v|
v.memory = 4096
v.cpus = 2
#v.linked_clone = true
end
# master and node definition
boxes = [
{ :name => "k8s-master", :ip => "192.168.50.10" },
{ :name => "k8s-node-1", :ip => "192.168.50.11" }
]
boxes.each do |opts|
config.vm.define opts[:name] do |config|
config.vm.hostname = opts[:name]
config.vm.network :private_network, ip: opts[:ip]
if opts[:name] == "k8s-node-1"
config.vm.provision "ansible_local" do |ansible|
ansible.compatibility_mode = "2.0"
ansible.limit = "all"
ansible.config_file = "ansible.cfg"
ansible.become = true
ansible.playbook = "playbook.yml"
ansible.groups = {
"masters" => ["k8s-master"],
"nodes" => ["k8s-node-1"]
}
end
end
end
end
end
ansible.cfg
[defaults]
connection = smart
timeout = 60
deprecation_warnings = False
host_key_checking = False
inventory = hosts.ini
[ssh_connection]
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes
hosts.ini
[masters]
k8s-master ansible_host=192.168.50.10 ansible_user=vagrant
[nodes]
k8s-node-1 ansible_host=192.168.50.11 ansible_user=vagrant
[all:vars]
ansible_python_interpreter=/usr/bin/python3
ansible_ssh_user=vagrant
ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key
playbook.yml
- hosts: all
become: yes
tasks:
- name: Update apt cache.
apt: update_cache=yes cache_valid_time=3600
when: ansible_os_family == 'Debian'
- name: Ensure swap is disabled.
mount:
name: swap
fstype: swap
state: absent
- name: Disable swap.
command: swapoff -a
when: ansible_swaptotal_mb > 0
- name: create the 'mobile' user
user: name=mobile append=yes state=present createhome=yes shell=/bin/bash
- name: allow 'mobile' to have passwordless sudo
lineinfile:
dest: /etc/sudoers
line: 'mobile ALL=(ALL) NOPASSWD: ALL'
validate: 'visudo -cf %s'
- name: set up authorized keys for the mobile user
authorized_key:
user: mobile
key: "{{ lookup('pipe','cat ssh_keys/*.pub') }}"
state: present
exclusive: yes
- hosts: all
become: yes
tasks:
- name: install Docker
apt:
name: docker.io
state: present
update_cache: true
- name: install APT Transport HTTPS
apt:
name: apt-transport-https
state: present
- name: add Kubernetes apt-key
apt_key:
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
state: present
- name: add Kubernetes' APT repository
apt_repository:
repo: deb http://apt.kubernetes.io/ kubernetes-xenial main
state: present
filename: 'kubernetes'
- name: install kubelet
apt:
name: kubelet=1.17.0-00
state: present
update_cache: true
- name: install kubeadm
apt:
name: kubeadm=1.17.0-00
state: present
- hosts: masters
become: yes
tasks:
- name: install kubectl
apt:
name: kubectl=1.17.0-00
state: present
force: yes
- hosts: k8s-master
become: yes
tasks:
- name: check docker status
systemd:
state: started
name: docker
- name: initialize the cluster
shell: kubeadm init --apiserver-advertise-address 192.168.50.10 --pod-network-cidr=10.244.0.0/16 >> cluster_initialized.txt
args:
chdir: $HOME
creates: cluster_initialized.txt
- name: create .kube directory
become: yes
become_user: mobile
file:
path: $HOME/.kube
state: directory
mode: 0755
- name: copy admin.conf to user's kube config
copy:
src: /etc/kubernetes/admin.conf
dest: /home/mobile/.kube/config
remote_src: yes
owner: mobile
- name: install Pod network
become: yes
become_user: mobile
shell: kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml >> pod_network_setup.txt
args:
chdir: $HOME
creates: pod_network_setup.txt
- hosts: k8s-master
become: yes
gather_facts: false
tasks:
- name: get join command
shell: kubeadm token create --print-join-command 2>/dev/null
register: join_command_raw
- name: set join command
set_fact:
join_command: "{{ join_command_raw.stdout_lines[0] }}"
- hosts: nodes
become: yes
tasks:
- name: check docker status
systemd:
state: started
name: docker
- name: join cluster
shell: "{{ hostvars['k8s-master'].join_command }} >> node_joined.txt"
args:
chdir: $HOME
creates: node_joined.txt
The moment the playbook tries to execute against k8s-master, it fails like this:
fatal: [k8s-master]: UNREACHABLE! => {"changed": false, "msg": "Failed to connect to the host via ssh: ssh: Could not resolve hostname k8s-master: Temporary failure in name resolution", "unreachable": true}
The host is up. SSH works.
Who can help me sort this out?
Thanks!
I have managed to use Ansible inside of Vagrant.
Here is what I did to make it work:
Steps to reproduce:
Install Vagrant, Virtualbox
Create all the necessary files and directories
ansible.cfg
playbook.yml
hosts
insecure_private_key
Vagrant file
Test
Install Vagrant, Virtualbox
Follow installation guides at appropriate sites:
Vagrant
Virtualbox
Create all the necessary files and directories
This example bases on original poster files.
Create vagrant and ansible folders to store all the configuration files and directories. The structure of it could look like that:
vagrant - directory
Vagrantfile - file with main configuration
ansible - directory
ansible.cfg - configuration file of Ansible
playbook.yml - file with steps for Ansible to execute
hosts - file with information about hosts
insecure_private_key - private key of created machines
Ansible folder is a seperate directory that will be copied to k8s-node-1.
By default Vagrant shares a vagrant folder with permissions of 777. It allows owner, group and others to have full access on everything that is inside of it.
Logging to virtual machine manualy and running ansible-playbook command inside vagrant directory will output errors connected with permissions. It will render ansible.cfg and insecure_private_key useless.
Ansible.cfg
Ansible.cfg is configuration file of Ansible. Example used below:
[defaults]
connection = smart
timeout = 60
deprecation_warnings = False
host_key_checking = False
[ssh_connection]
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes
Create ansible.cfg inside ansible directory.
Playbook.yml
Example playbook.yml is a file with steps for Ansible to execute.
It will check connections and test if groups are configured correctly:
- name: Check all connections
hosts: all
tasks:
- name: Ping
ping:
- name: Check specific connection to masters
hosts: masters
tasks:
- name: Ping
ping:
- name: Check specific connection to nodes
hosts: nodes
tasks:
- name: Ping
ping:
Create playbook.yml inside ansible directory.
Insecure_private_key
To successfully connect to virtual machines you will need insecure_private_key. You can create it by invoking command:$ vagrant init inside vagrant directory.
It will create insecure_private_key inside your physical machine in HOME_DIRECTORY/.vagrant.d.
Copy it to ansible folder.
Hosts
Below hosts file is responsible for passing the information about hosts to Ansible:
[masters]
k8s-master ansible_host=192.168.50.10 ansible_user=vagrant
[nodes]
k8s-node-1 ansible_host=192.168.50.11 ansible_user=vagrant
[all:vars]
ansible_python_interpreter=/usr/bin/python3
ansible_ssh_user=vagrant
ansible_ssh_private_key_file=/ansible/insecure_private_key
Create hosts file inside ansible directory.
Please take a specific look on: ansible_ssh_private_key_file=/ansible/insecure_private_key
This is declaration for Ansible to use earlier mentioned key.
Vagrant
Vagrant file is the main configuration file:
# -*- mode: ruby -*-
# vi: set ft=ruby :
IMAGE_NAME = "ubuntu/bionic64"
Vagrant.configure("2") do |config|
config.ssh.insert_key = false
config.vm.box = IMAGE_NAME
# Virtualbox configuration
config.vm.provider "virtualbox" do |v|
v.memory = 4096
v.cpus = 2
#v.linked_clone = true
end
# master and node definition
boxes = [
{ :name => "k8s-master", :ip => "192.168.50.10" },
{ :name => "k8s-node-1", :ip => "192.168.50.11" }
]
boxes.each do |opts|
config.vm.define opts[:name] do |config|
config.vm.hostname = opts[:name]
config.vm.network :private_network, ip: opts[:ip]
if opts[:name] == "k8s-node-1"
config.vm.synced_folder "../ansible", "/ansible", :mount_options => ["dmode=700", "fmode=700"]
config.vm.provision "ansible_local" do |ansible|
ansible.compatibility_mode = "2.0"
ansible.limit = "all"
ansible.config_file = "/ansible/ansible.cfg"
ansible.become = true
ansible.playbook = "/ansible/playbook.yml"
ansible.inventory_path = "/ansible/hosts"
end
end
end
end
end
Please take a specific look on:
config.vm.synced_folder "../ansible", "/ansible", :mount_options => ["dmode=700", "fmode=700"]
config.vm.synced_folder will copy ansible directory to k8s-node-1 with all the files inside.
It will set permissions for full access only to owner (vagrant user).
ansible.inventory_path = "/ansible/hosts"
ansible.inventory_path will tell Vagrant to provide hosts file for Ansible.
Test
To check run the following command from the vagrant directory:
$ vagrant up
The part of the output responsible for Ansible should look like that:
==> k8s-node-1: Running provisioner: ansible_local...
k8s-node-1: Installing Ansible...
k8s-node-1: Running ansible-playbook...
PLAY [Check all connections] ***************************************************
TASK [Gathering Facts] *********************************************************
ok: [k8s-master]
ok: [k8s-node-1]
TASK [Ping] ********************************************************************
ok: [k8s-master]
ok: [k8s-node-1]
PLAY [Check specific connection to masters] ************************************
TASK [Gathering Facts] *********************************************************
ok: [k8s-master]
TASK [Ping] ********************************************************************
ok: [k8s-master]
PLAY [Check specific connection to nodes] **************************************
TASK [Gathering Facts] *********************************************************
ok: [k8s-node-1]
TASK [Ping] ********************************************************************
ok: [k8s-node-1]
PLAY RECAP *********************************************************************
k8s-master : ok=4 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
k8s-node-1 : ok=4 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0

Some tasks in ansible are't executed

I'm using ansible for some deployment issues.
I want to do the following:
Install virtualenv
Activate installed virtual environment
Check if I'm in virtual environment
For this purposes I have the following playbook:
---
- hosts: servers
tasks:
- name: update repository
apt: update_cache=yes
sudo: true
tasks:
- name: install git
apt: name=git state=latest
sudo: true
tasks:
- name: install pip
apt: name=python-pip state=latest
sudo: true
tasks:
- name: installing postgres
sudo: true
apt: name=postgresql state=latest
tasks:
- name: installing libpd-dev
sudo: true
apt: name=libpq-dev state=latest
tasks:
- name: installing psycopg
sudo: true
apt: name=python-psycopg2 state=latest
tasks:
- name: configuration of virtual env
sudo: true
pip: name=virtualenvwrapper state=latest
tasks:
- name: create virtualenv
command: virtualenv venv
tasks:
- name: virtualenv activate
shell: . ~/venv/bin/activate
tasks:
- name: "Guard code, so we are more certain we are in a virtualenv"
shell: echo $VIRTUAL_ENV
register: command_result
failed_when: command_result.stdout == ""
The problem is that sometimes some tasks are not executed, but they have to... For instance in my case the task:
tasks:
- name: create virtualenv
command: virtualenv venv
Is not executed.
But if I will comment 2 last tasks:
tasks:
- name: virtualenv activate
shell: . ~/venv/bin/activate
tasks:
- name: "Guard code, so we are more certain we are in a virtualenv"
shell: echo $VIRTUAL_ENV
register: command_result
failed_when: command_result.stdout == ""
The previous one works...
Can't get what I'm doing wrong. Can somebody hint me?
assuming hosts: servers covers the correct servers, you should only have one tasks entry. Here's an optimized and simplified playbook.
---
- hosts: servers
sudo: yes
tasks:
- name: update repository daily
apt: update_cache=yes cache_valid_time=86400
- name: install development dependencies
apt: name={{item}} state=latest
with_items:
- git
- python-pip
- postgresql
- libpq-dev
- python-psycopg2
- name: configuration of virtual env
pip: name=virtualenvwrapper state=present
- name: create virtualenv
command: virtualenv venv
- name: virtualenv activate
shell: . ~/venv/bin/activate
- name: "Guard code, so we are more certain we are in a virtualenv"
shell: echo $VIRTUAL_ENV
register: command_result
failed_when: command_result.stdout == ""
Note I've cached the apt call and I've also changed state to present. You likely want to install specific versions, rather than rechecking on every run of ansible.