Skip to content

Commit

Permalink
vendor in recent ansible changes
Browse files Browse the repository at this point in the history
  • Loading branch information
mapuri committed Jun 2, 2016
2 parents 9394887 + e262540 commit fecc5d9
Show file tree
Hide file tree
Showing 28 changed files with 177 additions and 83 deletions.
10 changes: 10 additions & 0 deletions vendor/ansible/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
.PHONY: test-up test-provision test-cleanup

test-up:
vagrant up

test-provision:
vagrant provision

test-cleanup:
CONTIV_ANSIBLE_PLAYBOOK="./cleanup.yml" CONTIV_ANSIBLE_TAGS="all" vagrant provision
4 changes: 2 additions & 2 deletions vendor/ansible/cleanup.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# Note: cleanup is not expected to fail, so we set ignore_errors to yes here

- hosts: all
sudo: true
become: true
tasks:
- include_vars: roles/{{ item }}/defaults/main.yml
with_items:
Expand All @@ -20,7 +20,7 @@
- contiv_storage
- swarm
- ucp
- docker
- etcd
- ucarp
- docker
ignore_errors: yes
3 changes: 3 additions & 0 deletions vendor/ansible/roles/base/tasks/redhat_tasks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
update_cache: true
state: latest
with_items:
- yum-utils
- ntp
- unzip
- bzip2
Expand All @@ -18,6 +19,8 @@
- bash-completion
- kernel #keep kernel up to date
- libselinux-python
- e2fsprogs
- openssh-server

- name: install and start ntp
service: name=ntpd state=started enabled=yes
2 changes: 2 additions & 0 deletions vendor/ansible/roles/base/tasks/ubuntu_tasks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,5 @@
- python-software-properties
- bash-completion
- python-selinux
- e2fsprogs
- openssh-server
2 changes: 1 addition & 1 deletion vendor/ansible/roles/contiv_cluster/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ collins_guest_port: 9000
clusterm_args_file: "clusterm.args"
clusterm_conf_file: "clusterm.conf"

contiv_cluster_version: "v0.1-05-12-2016.08-27-16.UTC"
contiv_cluster_version: "v0.1-05-14-2016.00-33-02.UTC"
contiv_cluster_tar_file: "cluster-{{ contiv_cluster_version }}.tar.bz2"
contiv_cluster_src_file: "https://github.com/contiv/cluster/releases/download/{{ contiv_cluster_version }}/{{ contiv_cluster_tar_file }}"
contiv_cluster_dest_file: "/tmp/{{ contiv_cluster_tar_file }}"
10 changes: 5 additions & 5 deletions vendor/ansible/roles/contiv_cluster/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,12 @@

- name: copy conf files for clusterm
copy:
src: "{{ item }}"
dest: /etc/default/clusterm/{{ item }}
force: yes
src: "{{ item.file }}"
dest: "/etc/default/clusterm/{{ item.file }}"
force: "{{ item.force }}"
with_items:
- "{{ clusterm_args_file }}"
- "{{ clusterm_conf_file }}"
- { file: "{{ clusterm_args_file }}", force: "yes" }
- { file: "{{ clusterm_conf_file }}", force: "no" }

- name: copy systemd units for clusterm
template: src=clusterm.j2 dest=/etc/systemd/system/clusterm.service
Expand Down
11 changes: 6 additions & 5 deletions vendor/ansible/roles/contiv_network/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,16 @@ bgp_port: 179
vxlan_port: 4789
netplugin_rule_comment: "contiv network traffic"

contiv_network_version: "v0.1-05-08-2016.20-28-46.UTC"
contiv_network_version: "v0.1-05-16-2016.08-29-25.UTC"
contiv_network_tar_file: "netplugin-{{ contiv_network_version }}.tar.bz2"
contiv_network_src_file: "https://github.com/contiv/netplugin/releases/download/{{ contiv_network_version }}/{{ contiv_network_tar_file }}"
contiv_network_dest_file: "/tmp/{{ contiv_network_tar_file }}"

contivctl_version: "v0.0.0-03-10-2016.22-13-24.UTC"
contivctl_tar_file: "contivctl-{{ contivctl_version }}.tar.bz2"
contivctl_src_file: "https://github.com/contiv/contivctl/releases/download/{{ contivctl_version }}/{{ contivctl_tar_file }}"
contivctl_dest_file: "/tmp/{{ contivctl_tar_file }}"
contivctl_version_no_v: "0.1-05-26-2016.22-31-22.UTC"
contivctl_version: "v{{ contivctl_version_no_v }}"
contivctl_tar_file: "{{ contivctl_version }}.tar.gz"
contivctl_src_file: "https://github.com/contiv/contivctl/archive/{{ contivctl_tar_file }}"
contivctl_dest_file: "/tmp/contivctl-{{ contivctl_tar_file }}"

apic_epg_bridge_domain: "not_specified"
apic_contracts_unrestricted_mode: "no"
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,5 @@ After=auditd.service systemd-user-sessions.service time-sync.target etcd.service
[Service]
EnvironmentFile=/etc/default/netplugin
ExecStart=/usr/bin/netplugin $NETPLUGIN_ARGS
ExecStopPost=/usr/bin/rm -f /run/docker/plugins/netplugin.sock
KillMode=control-group
2 changes: 1 addition & 1 deletion vendor/ansible/roles/contiv_network/tasks/aci_tasks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@
service: name=aci-gw state=started

- name: set aci mode
shell: contivctl net global set --fabric-mode aci
shell: contivctl network global set --fabric-mode aci
2 changes: 1 addition & 1 deletion vendor/ansible/roles/contiv_network/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@
force: no

- name: install contivctl
shell: tar vxjf {{ contivctl_dest_file }}
shell: tar vxzf {{ contivctl_dest_file }} --strip-components=1 contivctl-{{ contivctl_version_no_v }}/contivctl
args:
chdir: /usr/bin/

Expand Down
6 changes: 3 additions & 3 deletions vendor/ansible/roles/contiv_network/tasks/ovs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,15 @@
url: "{{ item.url }}"
with_items:
- {
url: "https://cisco.box.com/shared/static/51eo9dcw04qx2y1f14n99y4yt5kug3q4.rpm",
dest: /tmp/openvswitch-2.3.1-1.x86_64.rpm
url: "https://cisco.box.com/shared/static/zzmpe1zesdpf270k9pml40rlm4o8fs56.rpm",
dest: /tmp/openvswitch-2.3.1-2.el7.x86_64.rpm
}
when: ansible_os_family == "RedHat"
tags:
- prebake-for-dev

- name: install ovs (redhat)
yum: name=/tmp/openvswitch-2.3.1-1.x86_64.rpm state=present
yum: name=/tmp/openvswitch-2.3.1-2.el7.x86_64.rpm state=present
when: ansible_os_family == "RedHat"
tags:
- prebake-for-dev
Expand Down
2 changes: 1 addition & 1 deletion vendor/ansible/roles/contiv_storage/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

# Role defaults for contiv_storage

contiv_storage_version: "v0.0.0-05-12-2016.08-24-33.UTC"
contiv_storage_version: "v0.0.0-05-12-2016.07-23-53.UTC"
contiv_storage_tar_file: "volplugin-{{ contiv_storage_version }}.tar.bz2"
contiv_storage_src_file: "https://github.com/contiv/volplugin/releases/download/{{ contiv_storage_version }}/{{ contiv_storage_tar_file }}"
contiv_storage_dest_file: "/tmp/{{ contiv_storage_tar_file }}"
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,5 @@ After=auditd.service systemd-user-sessions.service time-sync.target etcd.service
[Service]
EnvironmentFile=/etc/default/volplugin
ExecStart=/usr/bin/volplugin $VOLPLUGIN_ARGS
ExecStopPost=/usr/bin/rm -f /run/docker/plugins/volplugin.sock
KillMode=control-group
2 changes: 1 addition & 1 deletion vendor/ansible/roles/dev/meta/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@
dependencies:
- { role: ceph-install, tags: 'prebake-for-dev' }
- { role: ansible, tags: 'prebake-for-dev' }
- { role: etcd }
- { role: docker }
- { role: etcd }
- { role: swarm }
- { role: ucp }
- { role: contiv_cluster }
Expand Down
19 changes: 5 additions & 14 deletions vendor/ansible/roles/docker/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,20 +9,12 @@
tags:
- prebake-for-dev

- name: install docker (debian)
shell: curl https://get.docker.com | sed 's/docker-engine/--force-yes docker-engine={{ docker_version }}-0~{{ ansible_distribution_release }}/' | bash
- include: ubuntu_install_tasks.yml
when: (ansible_os_family == "Debian") and not (docker_installed_version.stdout | match("Docker version {{ docker_version }}, build.*"))
tags:
- prebake-for-dev

- name: remove docker (redhat)
yum: name=docker-engine state=absent
when: (ansible_os_family == "RedHat") and not (docker_installed_version.stdout | match("Docker version {{ docker_version }}, build.*"))
tags:
- prebake-for-dev

- name: install docker (redhat)
shell: curl https://get.docker.com | sed 's/docker-engine/docker-engine-{{ docker_version }}/' | bash
- include: redhat_install_tasks.yml
when: (ansible_os_family == "RedHat") and not (docker_installed_version.stdout | match("Docker version {{ docker_version }}, build.*"))
tags:
- prebake-for-dev
Expand All @@ -41,7 +33,6 @@
shell: >
( iptables -L INPUT | grep "{{ docker_rule_comment }} ({{ item }})" ) || \
iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ docker_rule_comment }} ({{ item }})"
become: true
with_items:
- "{{ docker_api_port }}"

Expand All @@ -67,7 +58,7 @@

# tcp socket service requires docker service to be started after it
- name: reload systemd configuration
shell: sudo systemctl daemon-reload
shell: systemctl daemon-reload
when: "(docker_tcp_socket | changed) or (docker_tcp_socket_state.stdout != 'Active: active')"

- name: stop docker
Expand All @@ -83,7 +74,7 @@
when: "(docker_tcp_socket | changed) or (docker_tcp_socket_state.stdout != 'Active: active')"

- name: check docker service state
shell: sudo systemctl status docker | grep 'Active.*active' -o
shell: systemctl status docker | grep 'Active.*active' -o
ignore_errors: true
register: docker_service_state
tags:
Expand All @@ -97,7 +88,7 @@
# https://github.com/ansible/ansible-modules-core/issues/191
- name: reload docker systemd configuration
#service: name=docker state=restarted
shell: sudo systemctl daemon-reload
shell: systemctl daemon-reload
when: "(docker_service_state.stderr | match('.*docker.service changed on disk.*')) or (docker_service_state.stdout != 'Active: active')"
tags:
- prebake-for-dev
Expand Down
22 changes: 22 additions & 0 deletions vendor/ansible/roles/docker/tasks/redhat_install_tasks.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
---
# This role contains tasks for installing docker service
#

- name: add docker's public key for CS-engine (redhat)
rpm_key:
key: "https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e"
state: present
validate_certs: "{{ validate_certs }}"

- name: add docker CS-engine repos (redhat)
shell: yum-config-manager --add-repo https://packages.docker.com/{{ item }}/yum/repo/main/centos/7
become: true
with_items:
- "1.10"
- "1.11"

- name: remove docker (redhat)
yum: name=docker-engine state=absent

- name: install docker (redhat)
shell: curl https://get.docker.com | sed 's/docker-engine/docker-engine-{{ docker_version }}/' | bash
20 changes: 20 additions & 0 deletions vendor/ansible/roles/docker/tasks/ubuntu_install_tasks.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
---
# This role contains tasks for installing docker service
#

- name: add docker's public key for CS-engine (debian)
apt_key:
url: "https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e"
state: present
validate_certs: "{{ validate_certs }}"

- name: add docker CS-engine repos (debian)
apt_repository:
repo: "deb https://packages.docker.com/{{ item }}/apt/repo ubuntu-{{ ansible_distribution_release }} main"
state: present
with_items:
- "1.10"
- "1.11"

- name: install docker (debian)
shell: curl https://get.docker.com | sed 's/docker-engine/--force-yes docker-engine={{ docker_version }}-0~{{ ansible_distribution_release }}/' | bash
5 changes: 3 additions & 2 deletions vendor/ansible/roles/etcd/files/etcd.service
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
[Unit]
Description=Etcd
After=auditd.service systemd-user-sessions.service time-sync.target
After=auditd.service systemd-user-sessions.service time-sync.target docker.service

[Service]
Restart=on-failure
RestartSec=10s
ExecStart=/usr/bin/etcd.sh start
ExecStop=/usr/bin/etcd.sh stop
KillMode=control-group
ExecStopPost=/usr/bin/etcd.sh post-stop
9 changes: 7 additions & 2 deletions vendor/ansible/roles/etcd/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,21 +1,26 @@
---
# This role contains tasks for configuring and starting etcd service

- name: download etcd {{ etcd_version }}
- name: download etcdctl {{ etcd_version }}
get_url:
validate_certs: "{{ validate_certs }}"
url: https://github.com/coreos/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz
dest: /tmp/etcd-{{ etcd_version }}-linux-amd64.tar.gz
tags:
- prebake-for-dev

- name: install etcd
- name: install etcdctl
shell: >
tar vxzf /tmp/etcd-{{ etcd_version }}-linux-amd64.tar.gz && \
mv etcd-{{ etcd_version }}-linux-amd64/etcd* /usr/bin
tags:
- prebake-for-dev

- name: install etcd {{ etcd_version }}
shell: docker pull quay.io/coreos/etcd:{{ etcd_version }}
tags:
- prebake-for-dev

- name: setup iptables for etcd
shell: >
( iptables -L INPUT | grep "{{ etcd_rule_comment }} ({{ item }})" ) || \
Expand Down
33 changes: 26 additions & 7 deletions vendor/ansible/roles/etcd/templates/etcd.j2
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ http://{{ addr }}:{{ etcd_client_port1 }},http://{{ addr }}:{{ etcd_client_port2
{%- macro get_peer_addr() -%}
{# we can't use a simple filter as shown, as it needs python 2.8.
# So resorting to loop below to get a peer.
{%- set peer_name=groups[etcd_peers_group]|reject("equalto", node_name)|first -%} #}
#{%- set peer_name=groups[etcd_peers_group]|reject("equalto", node_name)|first -%} #}
{%- set peers=[] -%}
{%- for host in groups[etcd_peers_group] -%}
{%- if host != node_name -%}
Expand Down Expand Up @@ -124,6 +124,15 @@ export ETCD_ELECTION_TIMEOUT={{ etcd_election_timeout }}
set -x
case $1 in
start)
# check if docker is running, else fail early.
# this is done instead of adding a 'Requires' dependency for docker in
# unit file to ensure that the etcd service starts as soon as docker starts
# even after a manual restart of docker.
out=$(/usr/bin/docker ps 2>&1 | grep -o "Cannot connect to the Docker daemon")
if [ "${out}" == "Cannot connect to the Docker daemon" ]; then
echo "docker is not running."
exit 1
fi
{% if run_as == "worker" -%}
{{ add_proxy() }}
{% elif etcd_init_cluster -%}
Expand All @@ -135,7 +144,20 @@ start)

#start etcd
echo "==> starting etcd with environment:" `env`
/usr/bin/etcd
/usr/bin/docker run -t --rm --net=host --name etcd \
-e ETCD_NAME=${ETCD_NAME} \
-e ETCD_DATA_DIR=${ETCD_DATA_DIR} \
-e ETCD_INITIAL_CLUSTER_TOKEN=${ETCD_INITIAL_CLUSTER_TOKEN} \
-e ETCD_LISTEN_CLIENT_URLS=${ETCD_LISTEN_CLIENT_URLS} \
-e ETCD_ADVERTISE_CLIENT_URLS=${ETCD_ADVERTISE_CLIENT_URLS} \
-e ETCD_INITIAL_ADVERTISE_PEER_URLS=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
-e ETCD_LISTEN_PEER_URLS=${ETCD_LISTEN_PEER_URLS} \
-e ETCD_HEARTBEAT_INTERVAL=${ETCD_HEARTBEAT_INTERVAL} \
-e ETCD_ELECTION_TIMEOUT=${ETCD_ELECTION_TIMEOUT} \
-e ETCD_INITIAL_CLUSTER=${ETCD_INITIAL_CLUSTER} \
-e ETCD_INITIAL_CLUSTER_STATE=${ETCD_INITIAL_CLUSTER_STATE} \
-e ETCD_PROXY=${ETCD_PROXY} \
quay.io/coreos/etcd:{{ etcd_version }}
;;

stop)
Expand All @@ -145,18 +167,15 @@ stop)
{% set peer_addr=get_peer_addr() -%}
{% if peer_addr == "" -%}
echo "==> no peer found or single member cluster at time of commission"
exit 1
{% else -%}
{{ remove_member(peer_addr=peer_addr) }}
{% endif %}
{% else -%}
{{ remove_member(peer_addr=etcd_master_addr) }}
{% endif -%}
;;

post-stop)
#XXX: is there a case when we should not cleanup the data dir on stop?
rm -rf $ETCD_DATA_DIR
/usr/bin/docker stop etcd
/usr/bin/docker rm etcd
;;

*)
Expand Down
7 changes: 7 additions & 0 deletions vendor/ansible/roles/test/defaults/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
# role variable for the test environment packages

vbox_major_version: "5.0.20"
vbox_version: "5.0-{{ vbox_major_version }}_106931"
vagrant_version: "1.8.1"
packer_version: "0.10.0"
Loading

0 comments on commit fecc5d9

Please sign in to comment.