WIP: Reform packaging-centric laybook for ease of maintnence

Broke appart the giant monolithic list of includes, tasks and plays
into additional discrete roles.  Setup an example Ansible inventory file
to group hosts by purpose.  That permitted moving all runtime variables
out of the global scope, and into purpose-based group_vars/*/files.

Setup additional taggs for plays/tasks to reflect the more complete
picture.  Making a distinction between 'building', 'installing' and
'testing'.  This should permit more intelligent caching of the
setup/config stages, assuming that's needed (for performance).

FIXME: Get 'integration' tests working w/ package instead of source
FIXME: Finish writing `scripts/cri-o_e2e_test.sh`

Signed-off-by: Chris Evich <cevich@redhat.com>
This commit is contained in:
Chris Evich 2017-11-20 14:59:19 -05:00
parent 904cecfbf3
commit a36286de85
22 changed files with 384 additions and 194 deletions

View file

@ -35,7 +35,7 @@ ask_pass = False
# implicit - gather by default, turn off with gather_facts: False # implicit - gather by default, turn off with gather_facts: False
# explicit - do not gather by default, must say gather_facts: True # explicit - do not gather by default, must say gather_facts: True
#gathering = implicit #gathering = implicit
gathering = smart gathering = explicit
# by default retrieve all facts subsets # by default retrieve all facts subsets
# all - gather all subsets # all - gather all subsets

12
contrib/test/build.yml Normal file
View file

@ -0,0 +1,12 @@
---
- name: Subjects build packages for testing
hosts: SubjectHosts
gather_facts: False # requires ansible-dependencies
roles:
- role: runscript
execute: 'build.sh'
basedir: '{{ cri_o_dest_path }}'
tags:
- build
- test

View file

@ -55,6 +55,9 @@ class CallbackModule(DEFAULT_MODULE.CallbackModule): # pylint: disable=too-few-
'''Return the text to output for a result.''' '''Return the text to output for a result.'''
result['_ansible_verbose_always'] = True result['_ansible_verbose_always'] = True
if '_ansible_no_log' not in result.keys():
result['_ansible_no_log'] = False
save = {} save = {}
for key in ['stdout', 'stdout_lines', 'stderr', 'stderr_lines', 'msg']: for key in ['stdout', 'stdout_lines', 'stderr', 'stderr_lines', 'msg']:
if key in result: if key in result:
@ -63,7 +66,7 @@ class CallbackModule(DEFAULT_MODULE.CallbackModule): # pylint: disable=too-few-
output = BASECLASS._dump_results(self, result) # pylint: disable=protected-access output = BASECLASS._dump_results(self, result) # pylint: disable=protected-access
for key in ['stdout', 'stderr', 'msg']: for key in ['stdout', 'stderr', 'msg']:
if key in save and save[key]: if key in save and save[key] and result['_ansible_no_log'] is False:
output += '\n\n%s:\n---\n%s\n---' % (key.upper(), save[key]) output += '\n\n%s:\n---\n%s\n---' % (key.upper(), save[key])
for key, value in save.items(): for key, value in save.items():
@ -74,7 +77,7 @@ class CallbackModule(DEFAULT_MODULE.CallbackModule): # pylint: disable=too-few-
def v2_runner_on_unreachable(self, result): def v2_runner_on_unreachable(self, result):
self.failed_task = result self.failed_task = result
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: if self._play.strategy == 'free' and getattr(self, '_last_task_banner', None) != result._task._uuid:
self._print_task_banner(result._task) self._print_task_banner(result._task)
delegated_vars = result._result.get('_ansible_delegated_vars', None) delegated_vars = result._result.get('_ansible_delegated_vars', None)
@ -93,7 +96,7 @@ class CallbackModule(DEFAULT_MODULE.CallbackModule): # pylint: disable=too-few-
# Save last failure # Save last failure
self.failed_task = result self.failed_task = result
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: if self._play.strategy == 'free' and getattr(self, '_last_task_banner', None) != result._task._uuid:
self._print_task_banner(result._task) self._print_task_banner(result._task)
delegated_vars = result._result.get('_ansible_delegated_vars', None) delegated_vars = result._result.get('_ansible_delegated_vars', None)

View file

@ -0,0 +1,7 @@
---
# Absolute path containing subject-subdirectories and top-level artifacts.
collection_dirpath: '{{ playbook_dir }}/artifacts'
# Filename for jUnit xml file combining all subject jUnit xml files of the same name
collection_canonical_junit: "junit_03.xml"

View file

@ -12,10 +12,6 @@ cri_o_dest_path: "{{ go_path }}/src/github.com/kubernetes-incubator/cri-o"
# Subject-host, absolute source path for files to be synchronized to control-host # Subject-host, absolute source path for files to be synchronized to control-host
artifacts: "/tmp/artifacts" # Base-directory for collection artifacts: "/tmp/artifacts" # Base-directory for collection
# Subject-host, absolute dest. path on control-host where results should be synchronized to.
collection_dirpath: '{{ lookup("env","WORKSPACE") |
default(playbook_dir, True) }}/artifacts/{{ inventory_hostname }}'
# List of absolute paths to extra filenames to collect into {{ artifacts }}. # List of absolute paths to extra filenames to collect into {{ artifacts }}.
# Non-existing files and any name-collisions will be skipped. # Non-existing files and any name-collisions will be skipped.
extra_artifact_filepaths: extra_artifact_filepaths:
@ -38,12 +34,10 @@ generated_artifacts:
customcluster.service: 'journalctl --unit customcluster.service --no-pager --all --lines=all' customcluster.service: 'journalctl --unit customcluster.service --no-pager --all --lines=all'
systemd-journald.service: 'journalctl --unit systemd-journald.service --no-pager --all --lines=all' systemd-journald.service: 'journalctl --unit systemd-journald.service --no-pager --all --lines=all'
# Subject-host, artifacts relative, junit file paths # Input filenames for parse2junit.py if found within artifacts directory
# for ``parse2junit.py`` input.
parsed_artifacts: parsed_artifacts:
- "./testout.txt" - "integration_results.txt"
- "./junit_01.xml" - "junit_01.xml"
# Subject-host, artifacts relative, jUnit file path for # Output filename for parse2junit.py to combining all parsed, parsed_artifacts files
# ``parse2junit.py`` output. canonical_junit: "junit_02.xml"
canonical_junit: "./junit_01.xml"

12
contrib/test/install.yml Normal file
View file

@ -0,0 +1,12 @@
---
- name: Subjects install packages
hosts: SubjectHosts
gather_facts: False # requires ansible-dependencies
roles:
- role: runscript
execute: "install.sh"
basedir: '{{ cri_o_dest_path }}'
tags:
- install
- test

View file

@ -0,0 +1,14 @@
# Standard Ansible ini-like inventory format. Ref:
# http://docs.ansible.com/ansible/latest/intro_inventory.html#hosts-and-groups
# The host executing the playbook. This will always be localhost, referenced
# here to provide clarity in the playbooks, and apply specific group_vars.
[ControlHost]
localhost
# This group contains all hosts which are being tested. This may or may/not
# include the control-host (i.e. localhost). If the control-host is NOT in
# this group, additional results-processing will be performed to gather
# remote results locally.
[SubjectHosts]
localhost

View file

@ -1,53 +1,7 @@
--- ---
- hosts: '{{ subjects | default("all") }}' - include: setup.yml
gather_facts: False # requires ansible-dependencies - include: build.yml
tags: # FIXME: fudging past origin-ci use of tags - include: install.yml
- e2e - include: test.yml
- integration - include: results.yml
roles:
- ansible_dependencies
- hosts: '{{ subjects | default("all") }}'
gather_facts: True
gather_subset: network
vars_files:
- "{{ playbook_dir }}/vars.yml"
tags: # FIXME: fudging past origin-ci use of tags
- e2e
- integration
pre_tasks:
- name: Subject's bring in group-vars based on their ansible_distribution
group_by:
key: "{{ ansible_distribution }}"
roles:
- role: yumrepos
- role: test_subject
- role: runscript
execute: 'setup.sh'
- role: runscript
execute: 'build.sh'
basedir: '{{ cri_o_dest_path }}'
- role: runscript
execute: "install.sh"
basedir: '{{ cri_o_dest_path }}'
- role: runscript
execute: "cri-o_smoke_test.sh"
- role: runscript
execute: "cri-o_integration_test.sh"
basedir: '{{ cri_o_dest_path }}'
post_tasks:
- name: Execute Kubernetes e2e tests
include: includes/e2e.yml

View file

@ -1,99 +1,16 @@
--- ---
# vim-syntax: ansible
- hosts: '{{ subjects | default("all") }}' - name: Subjects generate and gather their artifacts
vars_files: hosts: SubjectHosts
- "{{ playbook_dir }}/vars.yml" roles:
- role: subject_artifacts
# FIXME: Make a role out of this task-list tags:
tasks: - results
- name: Verify expectations
assert:
that:
# Combined "is defined" and "isn't blank" check
- 'artifacts | default("", True) | trim | length'
- 'generated_artifacts | default("", True) | trim | length'
- 'extra_artifact_filepaths is defined'
- 'parsed_artifacts is defined'
- 'canonical_junit is defined'
- name: artifacts directory exists
file:
path: "{{ artifacts }}"
state: directory
- name: Extra artifacts are collected, except missing or with clashing filenames
command: 'cp --no-clobber --verbose "{{ item }}" "{{ artifacts }}/"'
failed_when: False
ignore_errors: True
with_items: '{{ extra_artifact_filepaths }}'
- name: Generated artifacts directory exists
file:
path: "{{ artifacts }}/generated"
state: directory
- name: Generated artifacts are produced
shell: '{{ item.value }} &> {{ item.key | basename }}.txt'
args:
chdir: "{{ artifacts }}/generated"
creates: "{{ artifacts }}/generated/{{ item.key | basename }}.txt"
failed_when: False
ignore_errors: True
with_dict: "{{ generated_artifacts }}"
- name: Wrapper and script path is buffered
set_fact:
# Full paths needed b/c command below runs over a list of files in parsed_artifacts
result: '{{ cri_o_dest_path }}/contrib/test/venv-cmd.sh {{ cri_o_dest_path }}/contrib/test/parse2junit.py'
- name: Subject produces a single canonical jUnit file by combining parsed_artifacts
command: '{{ result }} {{ parsed_artifacts | join(" ") }} "{{ canonical_junit }}"'
args:
chdir: "{{ artifacts }}"
environment:
ARTIFACTS: "{{ artifacts }}"
ignore_errors: True
- name: A subdirectory exists for this subject's artifacts
file:
path: "{{ collection_dirpath }}"
state: directory
delegate_to: '{{ control_host | default("", True) | trim }}'
when: control_host | default("", True) | trim
- name: Artifacts are retrieved from subjects
synchronize:
archive: False # Don't bother with permissions or times
checksum: True # Don't rely on date/time being in sync
copy_links: True # We want files, not links to files
recursive: True
mode: "pull"
src: '{{ artifacts }}/'
dest: '{{ collection_dirpath }}'
no_log: True # super-duper noisy
- hosts: '{{ control_host | default("NoHost") }}' - name: Non-subject control-host finalizes artifacts for downstream consumption
vars_files: hosts: ControlHost:!SubjectHosts
- "{{ playbook_dir }}/vars.yml" roles:
- role: control_artifacts
tasks: tags:
- name: The paths of canonical_junit files from all subjects are found - results
find:
paths:
- '{{ collection_dirpath }}'
patterns: "{{ canonical_junit | basename }}"
recurse: True
register: result
- name: Found paths are joined together into a single string
set_fact:
result: '{{ result.files | map(attribute="path") | join(" ") }}'
- name: The control host produces a top-level junit, combining all subject's canonical_junits
script: '{{ playbook_dir }}/parse2junit.py {{ result }} "{{ canonical_junit | basename }}"'
args:
chdir: "{{ collection_dirpath }}"
ignore_errors: True
when: result | trim | length

View file

@ -12,3 +12,8 @@
raw: $(type -P dnf || type -P yum || echo ) install -y {{ ansible_dependencies }} raw: $(type -P dnf || type -P yum || echo ) install -y {{ ansible_dependencies }}
failed_when: False # Any missing & required packages will show up as failures in setup failed_when: False # Any missing & required packages will show up as failures in setup
changed_when: True changed_when: True
- name: Subject host's networking facts are gathered
setup:
# other modules are simply not needed
gather_subset: network

View file

@ -0,0 +1 @@
../../subject_artifacts/files/parse2junit.py

View file

@ -0,0 +1,36 @@
---
- name: Input expectations are verified
assert:
that:
# Ensure control-host is NOT a subject for this role's tasks
- 'inventory_hostname in groups["ControlHost"]'
- 'inventory_hostname not in groups["SubjectHosts"]'
# Combined "is defined" and "isn't blank" check
- 'collection_dirpath | default("", True) | trim | length'
- 'collection_canonical_junit | default("", True) | trim | length'
- name: The collection_dirpath directory exists
file:
path: "{{ collection_dirpath }}"
state: directory
- name: The paths of canonical_junit files from all subjects are found
find:
paths:
- '{{ collection_dirpath }}'
# Subjects may produce more than one xml file, search exact file names
patterns: "{{ hostvars | map('extract', hostvars, 'canonical_junit') | list | unique }}"
recurse: True
register: result
- name: Located subject canonical_junit files buffered into space-separated string
set_fact:
result: '{{ result.files | map(attribute="path") | join(" ") }}'
- name: The control host produces a top-level jUnit xml, combining all subject's canonical_junits
script: '{{ playbook_dir }}/parse2junit.py {{ result }} "{{ collection_canonical_junit }}"'
args:
chdir: "{{ collection_dirpath }}"
ignore_errors: True
when: result | trim | length

View file

@ -0,0 +1,15 @@
---
# Flag set True to reboot the host
needs_reboot: False
# Time to wait for ssh port to close
shutdown_timeout: 30
# Maximum time to wait for system to become available again (in seconds)
bootup_timeout: 300
# Timeout in (integer) seconds to wait for timeouts and retries when
# confirming host is accessable. The default (13) comes from two DNS
# timeouts + one second
wait_for_timeout: 13

View file

@ -0,0 +1,38 @@
---
- assert:
that:
- 'needs_reboot | bool in [True, False]'
- 'shutdown_timeout | default(0, True) | int > 0'
- 'bootup_timeout | default(0, True) | int > 0'
- block:
- name: Reboot System
shell: sleep 5 && shutdown -r now
async: 120
poll: 0
ignore_errors: true
changed_when: True
- name: System started shutting down
wait_for:
host: '{{ ansible_host | default(inventory_hostname) }}'
state: stopped
timeout: '{{ shutdown_timeout }}'
connect_timeout: 1
delegate_to: 'localhost'
- name: Ansible universal accessability command is executed
raw: "/bin/true"
register: result
changed_when: result | success
until: result | success
retries: 12
delay: 10
when: needs_reboot == True
- name: needs_reboot flag is set false
set_fact:
needs_reboot: False

View file

@ -25,30 +25,24 @@
- debug: - debug:
msg: 'Script {{ execute }} will be sent to subject-host, & run from {{ basedir }}) with arguments: {{ ansible_distribution }} {{ artifacts }}' msg: 'Script {{ execute }} will be sent to subject-host, & run from {{ basedir }}) with arguments: {{ ansible_distribution }} {{ artifacts }}'
- block: - name: The script is executed
script: '{{ playbook_dir }}/scripts/{{ execute }} {{ ansible_distribution }}'
args:
chdir: "{{ basedir }}"
creates: "/var/tmp/{{ execute }}_done"
ignore_errors: True # Allow futher tasks to execute
register: result
- name: The script is executed - name: The script's touchstone file is touched
script: '{{ playbook_dir }}/scripts/{{ execute }} {{ ansible_distribution }}' file:
args: path: "/var/tmp/{{ execute }}_done"
chdir: "{{ basedir }}" state: touch
creates: "/var/tmp/{{ execute }}_done" when: result | success
register: result
- name: The script's touchstone file is touched - name: The script result is logged
file: blockinfile:
path: "/var/tmp/{{ execute }}_done" path: '{{ artifacts }}/{{ execute }}.txt'
state: touch marker: '# {mark} execution result of cri-o/contrib/test/scripts/{{ execute }} at {{ ansible_date_time.iso8601 }}'
when: result | success block: '{{ lookup("template", role_path ~ "/templates/result_format.j2") }}'
create: true
always: when: '"stdout" in result'
- name: The script result is logged
blockinfile:
path: '{{ artifacts }}/{{ execute }}.txt'
marker: '# {mark} execution result of cri-o/contrib/test/scripts/{{ execute }} at {{ ansible_date_time.iso8601 }}'
block: '{{ lookup("template", role_path ~ "/templates/result_format.j2") }}'
create: true
when: '"stdout" in result'
- fail:
when: result | failed

View file

@ -0,0 +1,95 @@
---
# vim-syntax: ansible
- name: Input expectations are verified
assert:
that:
# Control-host can be a subject as well
- 'inventory_hostname in groups.SubjectHosts'
# Combined "is defined" and "isn't blank" check
- 'artifacts | default("", True) | trim | length'
- 'extra_artifact_filepaths is defined'
- 'generated_artifacts is defined'
- 'parsed_artifacts is defined'
- 'canonical_junit is defined'
- name: Top-level artifacts directory exists
file:
path: "{{ artifacts }}"
state: directory
- block:
- name: Extra artifacts subdirectory exists
file:
path: "{{ artifacts }}/extra"
state: directory
- name: Extra artifacts are collected, except missing or with clashing filenames
command: 'cp --no-clobber --verbose "{{ item }}" "{{ artifacts }}/extra/"'
args:
creates: "{{ artifacts }}/extra/{{ item }}"
chdir: "/"
ignore_errors: True # Don't abort play because a command fails
failed_when: False # Don't make a spectical about failed command
with_items: '{{ extra_artifact_filepaths }}'
when: extra_artifact_filepaths | length
- block:
- name: Generated artifacts subdirectory exists
file:
path: "{{ artifacts }}/generated"
state: directory
when: generated_artifacts | length
- name: Generated artifacts are produced
shell: '{{ item.value }} &> {{ item.key | basename }}.txt'
args:
chdir: "{{ artifacts }}/generated"
ignore_errors: True # Don't abort play because a command fails
failed_when: False # Don't make a spectical about failed command
with_dict: "{{ generated_artifacts }}"
when: generated_artifacts | length
- block:
- name: Virtual-env wrapper path is buffered
set_fact:
result: '{{ cri_o_dest_path }}/contrib/test/venv-cmd.sh'
- name: jUnit parsing script path is appended to buffer
set_fact:
result: '{{ result }} {{ cri_o_dest_path }}/contrib/test/parse2junit.py'
- name: Subject produces a single canonical jUnit file by combining parsed_artifacts
command: '{{ result }} {{ parsed_artifacts | join(" ") }} "{{ canonical_junit }}"'
args:
chdir: "{{ artifacts }}"
creates: "{{ canonical_junit }}"
environment:
# Needed by venv-cmd.sh for logging
ARTIFACTS: "{{ artifacts }}"
ignore_errors: True
when: parsed_artifacts | length
- name: Non-subject control-host is synchronized with remote subject-host artifacts
block:
- name: The complete control-host path for this host's artifacts is buffered
set_fact:
result: "{{ hostvars[groups.ControlHost[0]].collection_dirpath }}/{{ inventory_hostname }}"
- name: A subdirectory exists for this subject's artifacts
file:
path: "{{ result }}"
state: directory
delegate_to: '{{ groups.ControlHost[0] }}'
- name: Artifacts are retrieved from subject-hosts, onto control-host
synchronize:
archive: False # Don't bother with permissions or times
checksum: True # Don't rely on date/time being in sync
copy_links: True # We want files, not links to files
recursive: True
mode: "pull"
src: '{{ artifacts }}/' # N/B: rsync path format
dest: '{{ result }}/'
no_log: True # super-duper noisy
when: groups.ControlHost | length and inventory_hostname not in groups['ControlHost']

View file

@ -0,0 +1,9 @@
#!/bin/bash
set -ex
# Set by 'runscript' role
DISTRO="$1"
ARTIFACTS="$2"
# FIXME: see 6da7193f

View file

@ -1,11 +1,19 @@
--- #!/bin/bash
- name: enable and start CRI-O set -ex
systemd:
name: crio # Set by 'runscript' role
state: started DISTRO="$1"
enabled: yes ARTIFACTS="$2"
daemon_reload: yes
# Restarting CRI-O service
systemctl --no-pager restart cri-o
# Dump the CRI-O service journal
journalctl --unit cri-o --no-pager
# Fail if CRI-O service is not active
systemctl is-active cri-o || exit $?
- name: update the server address for the custom cluster - name: update the server address for the custom cluster
lineinfile: lineinfile:

View file

@ -22,7 +22,7 @@ if [ "$DISTRO" == "RedHat" ] || [ "$DISTRO" == "Fedora" ]
then then
export STORAGE_OPTIONS='--storage-driver=overlay --storage-opt overlay.override_kernel_check=1' export STORAGE_OPTIONS='--storage-driver=overlay --storage-opt overlay.override_kernel_check=1'
else else
export export STORAGE_OPTIONS='--storage-driver=overlay' export STORAGE_OPTIONS='--storage-driver=overlay'
fi fi
./test/test_runner.sh | tee /tmp/artifacts/integration_results.txt" ./test/test_runner.sh | tee /tmp/artifacts/integration_results.txt"

47
contrib/test/setup.yml Normal file
View file

@ -0,0 +1,47 @@
---
- name: Non-subject-host Control-host is initialized
hosts: ControlHost:!SubjectHosts
tags:
- setup
pre_tasks:
- debug: var=group_names
tasks:
- name: Control-host facts are gathered
setup:
gather_subset: network # other facts simply not needed
- name: Minimum ansible version in use
assert:
that: 'ansible_version.string | version_compare("2.4.1", ">=")'
- name: Subject-hosts are initialized and setup
hosts: SubjectHosts
tags:
- setup
pre_tasks:
- debug: var=group_names
roles:
- role: ansible_dependencies # also gathers facts
- role: yumrepos
- role: test_subject
- role: runscript
execute: 'setup.sh'
- name: The control-host is never to be rebooted
hosts: SubjectHosts:!ControlHost
tags:
- setup
tasks:
- role: rebooted
needs_reboot: '{{ needs_reboot | default(False) }}'

29
contrib/test/test.yml Normal file
View file

@ -0,0 +1,29 @@
---
- name: Subject-hosts execute tests
hosts: SubjectHosts
gather_facts: False # requires ansible-dependencies
roles:
- role: runscript
execute: "cri-o_smoke_test.sh"
tags:
- smoke
- test
- role: runscript
execute: "cri-o_integration_test.sh"
tags:
- integration
- test
- role: runscript
execute: "cri-o_cri_test.sh"
tags:
- integration
- test
- role: runscript
execute: "cri-o_e2e_test.sh"
tags:
- e2e
- test