Add results playbook and processing script
* parse2junit.py script converts integration test output (testout.txt) into junit format. It then combines this with the e2e test junit output, adds identifying information, and packs them both into a single junit file including all tests. * results.yml playbook collects testing details from test-subjects into a central directory. Some of those details are generated by executing commands as specified in vars.yml. Finally, it uses parse2junit.py itself to assemble the final, canonical junit output file with results for all tests and all hosts. * vars.yml variables file updated with result production/collection details. Signed-off-by: Chris Evich <cevich@redhat.com>
This commit is contained in:
parent
4f78cb5c80
commit
904cecfbf3
4 changed files with 455 additions and 1 deletions
313
contrib/test/parse2junit.py
Executable file
313
contrib/test/parse2junit.py
Executable file
|
@ -0,0 +1,313 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
# N/B: Assumes script was called from cri-o repository on the test subject,
|
||||||
|
# with a remote name of 'origin. It's executing under the results.yml
|
||||||
|
# playbook, which in turn was executed by venv-ansible-playbook.sh
|
||||||
|
# i.e. everything in requirements.txt is already available
|
||||||
|
#
|
||||||
|
# Also Requires:
|
||||||
|
# python 2.7+
|
||||||
|
# git
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import argparse
|
||||||
|
import re
|
||||||
|
import contextlib
|
||||||
|
import uuid
|
||||||
|
from socket import gethostname
|
||||||
|
import subprocess
|
||||||
|
from tempfile import NamedTemporaryFile
|
||||||
|
# Ref: https://github.com/gastlygem/junitparser
|
||||||
|
import junitparser
|
||||||
|
|
||||||
|
# Parser function suffixes and regex patterns of supported input filenames
|
||||||
|
TEST_TYPE_FILE_RE = dict(integration=re.compile(r'testout\.txt'),
|
||||||
|
e2e=re.compile(r'junit_\d+.xml'))
|
||||||
|
INTEGRATION_TEST_COUNT_RE = re.compile(r'^(?P<start>\d+)\.\.(?P<end>\d+)')
|
||||||
|
INTEGRATION_SKIP_RE = re.compile(r'^(?P<stat>ok|not ok) (?P<tno>\d+) # skip'
|
||||||
|
r' (?P<sreason>\(.+\)) (?P<desc>.+)')
|
||||||
|
INTEGRATION_RESULT_RE = re.compile(r'^(?P<stat>ok|not ok) (?P<tno>\d+) (?P<desc>.+)')
|
||||||
|
|
||||||
|
|
||||||
|
def d(msg):
|
||||||
|
if msg:
|
||||||
|
try:
|
||||||
|
sys.stderr.write('{}\n'.format(msg))
|
||||||
|
sys.stderr.flush()
|
||||||
|
except IOError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def if_match(line, regex):
|
||||||
|
# __enter__
|
||||||
|
match = regex.search(line)
|
||||||
|
if match:
|
||||||
|
yield match
|
||||||
|
else:
|
||||||
|
yield None
|
||||||
|
# __exit__
|
||||||
|
pass # Do nothing
|
||||||
|
|
||||||
|
|
||||||
|
def if_case_add(suite, line_parser, *parser_args, **parser_dargs):
|
||||||
|
case = line_parser(*parser_args, **parser_dargs)
|
||||||
|
if case:
|
||||||
|
suite.add_testcase(case)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_integration_line(line, classname):
|
||||||
|
name_fmt = "[CRI-O] [integration] #{} {}"
|
||||||
|
with if_match(line, INTEGRATION_SKIP_RE) as match:
|
||||||
|
if match:
|
||||||
|
name = name_fmt.format(match.group('tno'), match.group('desc'))
|
||||||
|
case = junitparser.TestCase(name)
|
||||||
|
case.classname = classname
|
||||||
|
case.result = junitparser.Skipped(message=match.group('sreason'))
|
||||||
|
case.system_err = match.group('stat')
|
||||||
|
return case
|
||||||
|
with if_match(line, INTEGRATION_RESULT_RE) as match:
|
||||||
|
if match:
|
||||||
|
name = name_fmt.format(match.group('tno'), match.group('desc'))
|
||||||
|
case = junitparser.TestCase(name)
|
||||||
|
case.classname = classname
|
||||||
|
case.system_err = match.group('stat')
|
||||||
|
if match.group('stat') == 'not ok':
|
||||||
|
# Can't think of anything better to put here
|
||||||
|
case.result = junitparser.Failed('not ok')
|
||||||
|
elif not match.group('stat') == 'ok':
|
||||||
|
case.result = junitparser.Error(match.group('stat'))
|
||||||
|
return case
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# N/B: name suffix corresponds to key in TEST_TYPE_FILE_RE
|
||||||
|
def parse_integration(input_file_path, hostname):
|
||||||
|
suite = junitparser.TestSuite('CRI-O Integration suite')
|
||||||
|
suite.hostname = hostname
|
||||||
|
suite_stdout = []
|
||||||
|
classname = 'CRI-O integration suite'
|
||||||
|
n_tests = -1 # No tests ran
|
||||||
|
d(" Processing integration results for {}".format(suite.hostname))
|
||||||
|
with open(input_file_path) as testout_txt:
|
||||||
|
for line in testout_txt:
|
||||||
|
line = line.strip()
|
||||||
|
suite_stdout.append(line) # Basically a copy of the file
|
||||||
|
# n_tests must come first
|
||||||
|
with if_match(line, INTEGRATION_TEST_COUNT_RE) as match:
|
||||||
|
if match:
|
||||||
|
n_tests = int(match.group('end')) - int(match.group('start')) + 1
|
||||||
|
d(" Collecting results from {} tests".format(n_tests))
|
||||||
|
break
|
||||||
|
if n_tests > 0:
|
||||||
|
for line in testout_txt:
|
||||||
|
line = line.strip()
|
||||||
|
suite_stdout.append(line)
|
||||||
|
if_case_add(suite, parse_integration_line,
|
||||||
|
line=line, classname=classname)
|
||||||
|
else:
|
||||||
|
d(" Uh oh, no results found, skipping.")
|
||||||
|
return None
|
||||||
|
# TODO: No date/time recorded in file
|
||||||
|
#stat = os.stat(input_file_path)
|
||||||
|
#test_start = stat.st_mtime
|
||||||
|
#test_end = stat.st_atime
|
||||||
|
#duration = test_end - test_start
|
||||||
|
suite.time = 0
|
||||||
|
suite.add_property('stdout', '\n'.join(suite_stdout))
|
||||||
|
|
||||||
|
d(" Parsed {} integration test cases".format(len(suite)))
|
||||||
|
return suite
|
||||||
|
|
||||||
|
|
||||||
|
def flatten_testsuites(testsuites):
|
||||||
|
# The jUnit format allows nesting testsuites, squash into a list for simplicity
|
||||||
|
if isinstance(testsuites, junitparser.TestSuite):
|
||||||
|
testsuite = testsuites # for clarity
|
||||||
|
return [testsuite]
|
||||||
|
result = []
|
||||||
|
for testsuite in testsuites:
|
||||||
|
if isinstance(testsuite, junitparser.TestSuite):
|
||||||
|
result.append(testsuite)
|
||||||
|
elif isinstance(testsuite, junitparser.JUnitXml):
|
||||||
|
nested_suites = flatten_testsuites(testsuite)
|
||||||
|
if nested_suites:
|
||||||
|
result += nested_suites
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def find_k8s_e2e_suite(testsuites):
|
||||||
|
testsuites = flatten_testsuites(testsuites)
|
||||||
|
for testsuite in testsuites:
|
||||||
|
if testsuite.name and 'Kubernetes e2e' in testsuite.name:
|
||||||
|
return testsuite
|
||||||
|
# Name could be None or wrong, check classnames of all tests
|
||||||
|
classnames = ['Kubernetes e2e' in x.classname.strip() for x in testsuite]
|
||||||
|
if all(classnames):
|
||||||
|
return testsuite
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# N/B: name suffix corresponds to key in TEST_TYPE_FILE_RE
|
||||||
|
def parse_e2e(input_file_path, hostname):
|
||||||
|
# Load junit_xx.xml file, update contents with more identifying info.
|
||||||
|
try:
|
||||||
|
testsuites = junitparser.JUnitXml.fromfile(input_file_path)
|
||||||
|
suite = find_k8s_e2e_suite(testsuites)
|
||||||
|
except junitparser.JUnitXmlError as xcept:
|
||||||
|
d(" Error parsing {}, skipping it.: {}".format(input_file_path, xcept))
|
||||||
|
return None
|
||||||
|
if not suite:
|
||||||
|
d(" Failed to find any e2e results in {}".format(input_file_path))
|
||||||
|
return None
|
||||||
|
if not suite.hostname:
|
||||||
|
suite.hostname = hostname
|
||||||
|
if not suite.name:
|
||||||
|
suite.name = 'Kubernetes e2e suite'
|
||||||
|
d(" Processing e2e results for {}".format(suite.hostname))
|
||||||
|
for testcase in suite:
|
||||||
|
if not testcase.classname:
|
||||||
|
d(" Adding missing classname to case {}".format(testcase.name))
|
||||||
|
testcase.classname = "Kubernetes e2e suite"
|
||||||
|
d(" Parsed {} e2e test cases".format(len(suite)))
|
||||||
|
if not suite.time:
|
||||||
|
stat = os.stat(input_file_path)
|
||||||
|
test_start = stat.st_ctime
|
||||||
|
test_end = stat.st_mtime
|
||||||
|
duration = test_end - test_start
|
||||||
|
if duration:
|
||||||
|
suite.time = duration
|
||||||
|
return testsuites # Retain original structure
|
||||||
|
|
||||||
|
def parse_test_output(ifps, results_name, hostname):
|
||||||
|
time_total = 0
|
||||||
|
testsuites = junitparser.JUnitXml(results_name)
|
||||||
|
# Cheat, lookup parser function name suffix from global namespace
|
||||||
|
_globals = globals()
|
||||||
|
for input_file_path in ifps:
|
||||||
|
if not os.path.isfile(input_file_path):
|
||||||
|
d(" The file {} doesn't appear to exist, skipping it.".format(input_file_path))
|
||||||
|
continue
|
||||||
|
parser = None
|
||||||
|
for tname, regex in TEST_TYPE_FILE_RE.items():
|
||||||
|
if regex.search(input_file_path):
|
||||||
|
parser = _globals.get('parse_{}'.format(tname))
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
d(" Could not find parser to handle input"
|
||||||
|
" file {}, skipping.".format(input_file_path))
|
||||||
|
continue
|
||||||
|
|
||||||
|
d(" Parsing {} using {}".format(input_file_path, parser))
|
||||||
|
for parsed_testsuite in flatten_testsuites(parser(input_file_path, hostname)):
|
||||||
|
d(" Adding {} suite for {}".format(parsed_testsuite.name, parsed_testsuite.hostname))
|
||||||
|
testsuites.add_testsuite(parsed_testsuite)
|
||||||
|
if parsed_testsuite.time:
|
||||||
|
time_total += parsed_testsuite.time
|
||||||
|
testsuites.time = time_total
|
||||||
|
return testsuites
|
||||||
|
|
||||||
|
def make_host_name():
|
||||||
|
subject = '{}'.format(gethostname())
|
||||||
|
# Origin-CI doesn't use very distinguishable hostnames :(
|
||||||
|
if 'openshiftdevel' in subject or 'ip-' in subject:
|
||||||
|
try:
|
||||||
|
with open('/etc/machine-id') as machineid:
|
||||||
|
subject = 'machine-id-{}'.format(machineid.read().strip())
|
||||||
|
except IOError: # Worst-case, but we gotta pick sumpfin
|
||||||
|
subject = 'uuid-{}'.format(uuid.uuid4())
|
||||||
|
return subject
|
||||||
|
|
||||||
|
def make_results_name(argv):
|
||||||
|
script_dir = os.path.dirname(argv[0])
|
||||||
|
spco = lambda cmd: subprocess.check_output(cmd.split(' '),
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
close_fds=True,
|
||||||
|
cwd=script_dir,
|
||||||
|
universal_newlines=True)
|
||||||
|
pr_no = None
|
||||||
|
head_id = None
|
||||||
|
try:
|
||||||
|
head_id = spco('git rev-parse HEAD')
|
||||||
|
for line in spco('git ls-remote origin refs/pull/[0-9]*/head').strip().splitlines():
|
||||||
|
cid, ref = line.strip().split(None, 1)
|
||||||
|
if head_id in cid:
|
||||||
|
pr_no = ref.strip().split('/')[2]
|
||||||
|
break
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if pr_no:
|
||||||
|
return "CRI-O Pull Request {}".format(pr_no)
|
||||||
|
elif head_id:
|
||||||
|
return "CRI-O Commit {}".format(head_id[:8])
|
||||||
|
else: # Worst-case, but we gotta pick sumpfin
|
||||||
|
return "CRI-O Run ID {}".format(uuid.uuid4())
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv):
|
||||||
|
reload(sys)
|
||||||
|
sys.setdefaultencoding('utf8')
|
||||||
|
parser = argparse.ArgumentParser(epilog='Note: The parent directory of input files is'
|
||||||
|
'assumed to be the test suite name')
|
||||||
|
parser.add_argument('-f', '--fqdn',
|
||||||
|
help="Alternative hostname to add to results if none present",
|
||||||
|
default=make_host_name())
|
||||||
|
parser.add_argument('-b', '--backup', action="store_true",
|
||||||
|
help="If output file name matches any input file, backup with"
|
||||||
|
" 'original_' prefix",
|
||||||
|
default=False)
|
||||||
|
parser.add_argument('ifps', nargs='+',
|
||||||
|
help='Input file paths to test output from {}.'
|
||||||
|
''.format(TEST_TYPE_FILE_RE.keys()))
|
||||||
|
parser.add_argument('ofp', nargs=1,
|
||||||
|
default='-',
|
||||||
|
help='Output file path for jUnit XML, or "-" for stdout')
|
||||||
|
options = parser.parse_args(argv[1:])
|
||||||
|
ofp = options.ofp[0] # nargs==1 still puts it into a list
|
||||||
|
results_name = make_results_name(argv)
|
||||||
|
|
||||||
|
d("Parsing {} to {}".format(options.ifps, ofp))
|
||||||
|
d("Using results name: {} and hostname {}".format(results_name, options.fqdn))
|
||||||
|
# Parse all results
|
||||||
|
new_testsuites = parse_test_output(options.ifps, results_name, options.fqdn)
|
||||||
|
|
||||||
|
if not len(new_testsuites):
|
||||||
|
d("Uh oh, doesn't look like anything was processed. Bailing out")
|
||||||
|
return None
|
||||||
|
|
||||||
|
d("Parsed {} suites".format(len(new_testsuites)))
|
||||||
|
|
||||||
|
# etree can't handle files w/o filenames :(
|
||||||
|
tmp = NamedTemporaryFile(suffix='.tmp', prefix=results_name, bufsize=1)
|
||||||
|
new_testsuites.write(tmp.name)
|
||||||
|
tmp.seek(0)
|
||||||
|
del new_testsuites # close up any open files
|
||||||
|
if ofp == '-':
|
||||||
|
sys.stdout.write('\n{}\n'.format(tmp.read()))
|
||||||
|
else:
|
||||||
|
for ifp in options.ifps:
|
||||||
|
if not os.path.isfile(ofp):
|
||||||
|
break
|
||||||
|
if os.path.samefile(ifp, ofp):
|
||||||
|
if not options.backup:
|
||||||
|
d("Warning {} will be will be combined with other input files."
|
||||||
|
"".format(ofp))
|
||||||
|
break
|
||||||
|
dirname = os.path.dirname(ofp)
|
||||||
|
basename = os.path.basename(ofp)
|
||||||
|
origname = 'original_{}'.format(basename)
|
||||||
|
os.rename(ofp, os.path.join(dirname, origname))
|
||||||
|
break
|
||||||
|
with open(ofp, 'w', 1) as output_file:
|
||||||
|
output_file.truncate(0)
|
||||||
|
output_file.flush()
|
||||||
|
d("Writing {}".format(ofp))
|
||||||
|
output_file.write(tmp.read())
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main(sys.argv)
|
|
@ -51,3 +51,9 @@ six==1.11.0 --hash=sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87
|
||||||
virtualenv==15.1.0 --hash=sha256:39d88b533b422825d644087a21e78c45cf5af0ef7a99a1fc9fbb7b481e5c85b0
|
virtualenv==15.1.0 --hash=sha256:39d88b533b422825d644087a21e78c45cf5af0ef7a99a1fc9fbb7b481e5c85b0
|
||||||
|
|
||||||
pip==9.0.1 --hash=sha256:690b762c0a8460c303c089d5d0be034fb15a5ea2b75bdf565f40421f542fefb0
|
pip==9.0.1 --hash=sha256:690b762c0a8460c303c089d5d0be034fb15a5ea2b75bdf565f40421f542fefb0
|
||||||
|
|
||||||
|
# Needed for results processing
|
||||||
|
future==0.16.0 --hash=sha256:e39ced1ab767b5936646cedba8bcce582398233d6a627067d4c6a454c90cfedb
|
||||||
|
|
||||||
|
junitparser==1.0.0 --hash=sha256:5b0f0ffeef3548878b5ae2cac40b5b128ae18337e2a260a8265f5519b52c907c \
|
||||||
|
--hash=sha256:789b99899f1545675ec09957dcc605a6bb88de322ea9d1e5c1dbd288b682835a
|
||||||
|
|
99
contrib/test/results.yml
Normal file
99
contrib/test/results.yml
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
---
|
||||||
|
# vim-syntax: ansible
|
||||||
|
|
||||||
|
- hosts: '{{ subjects | default("all") }}'
|
||||||
|
vars_files:
|
||||||
|
- "{{ playbook_dir }}/vars.yml"
|
||||||
|
|
||||||
|
# FIXME: Make a role out of this task-list
|
||||||
|
tasks:
|
||||||
|
- name: Verify expectations
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
# Combined "is defined" and "isn't blank" check
|
||||||
|
- 'artifacts | default("", True) | trim | length'
|
||||||
|
- 'generated_artifacts | default("", True) | trim | length'
|
||||||
|
- 'extra_artifact_filepaths is defined'
|
||||||
|
- 'parsed_artifacts is defined'
|
||||||
|
- 'canonical_junit is defined'
|
||||||
|
|
||||||
|
- name: artifacts directory exists
|
||||||
|
file:
|
||||||
|
path: "{{ artifacts }}"
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Extra artifacts are collected, except missing or with clashing filenames
|
||||||
|
command: 'cp --no-clobber --verbose "{{ item }}" "{{ artifacts }}/"'
|
||||||
|
failed_when: False
|
||||||
|
ignore_errors: True
|
||||||
|
with_items: '{{ extra_artifact_filepaths }}'
|
||||||
|
|
||||||
|
- name: Generated artifacts directory exists
|
||||||
|
file:
|
||||||
|
path: "{{ artifacts }}/generated"
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Generated artifacts are produced
|
||||||
|
shell: '{{ item.value }} &> {{ item.key | basename }}.txt'
|
||||||
|
args:
|
||||||
|
chdir: "{{ artifacts }}/generated"
|
||||||
|
creates: "{{ artifacts }}/generated/{{ item.key | basename }}.txt"
|
||||||
|
failed_when: False
|
||||||
|
ignore_errors: True
|
||||||
|
with_dict: "{{ generated_artifacts }}"
|
||||||
|
|
||||||
|
- name: Wrapper and script path is buffered
|
||||||
|
set_fact:
|
||||||
|
# Full paths needed b/c command below runs over a list of files in parsed_artifacts
|
||||||
|
result: '{{ cri_o_dest_path }}/contrib/test/venv-cmd.sh {{ cri_o_dest_path }}/contrib/test/parse2junit.py'
|
||||||
|
|
||||||
|
- name: Subject produces a single canonical jUnit file by combining parsed_artifacts
|
||||||
|
command: '{{ result }} {{ parsed_artifacts | join(" ") }} "{{ canonical_junit }}"'
|
||||||
|
args:
|
||||||
|
chdir: "{{ artifacts }}"
|
||||||
|
environment:
|
||||||
|
ARTIFACTS: "{{ artifacts }}"
|
||||||
|
ignore_errors: True
|
||||||
|
|
||||||
|
- name: A subdirectory exists for this subject's artifacts
|
||||||
|
file:
|
||||||
|
path: "{{ collection_dirpath }}"
|
||||||
|
state: directory
|
||||||
|
delegate_to: '{{ control_host | default("", True) | trim }}'
|
||||||
|
when: control_host | default("", True) | trim
|
||||||
|
|
||||||
|
- name: Artifacts are retrieved from subjects
|
||||||
|
synchronize:
|
||||||
|
archive: False # Don't bother with permissions or times
|
||||||
|
checksum: True # Don't rely on date/time being in sync
|
||||||
|
copy_links: True # We want files, not links to files
|
||||||
|
recursive: True
|
||||||
|
mode: "pull"
|
||||||
|
src: '{{ artifacts }}/'
|
||||||
|
dest: '{{ collection_dirpath }}'
|
||||||
|
no_log: True # super-duper noisy
|
||||||
|
|
||||||
|
|
||||||
|
- hosts: '{{ control_host | default("NoHost") }}'
|
||||||
|
vars_files:
|
||||||
|
- "{{ playbook_dir }}/vars.yml"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: The paths of canonical_junit files from all subjects are found
|
||||||
|
find:
|
||||||
|
paths:
|
||||||
|
- '{{ collection_dirpath }}'
|
||||||
|
patterns: "{{ canonical_junit | basename }}"
|
||||||
|
recurse: True
|
||||||
|
register: result
|
||||||
|
|
||||||
|
- name: Found paths are joined together into a single string
|
||||||
|
set_fact:
|
||||||
|
result: '{{ result.files | map(attribute="path") | join(" ") }}'
|
||||||
|
|
||||||
|
- name: The control host produces a top-level junit, combining all subject's canonical_junits
|
||||||
|
script: '{{ playbook_dir }}/parse2junit.py {{ result }} "{{ canonical_junit | basename }}"'
|
||||||
|
args:
|
||||||
|
chdir: "{{ collection_dirpath }}"
|
||||||
|
ignore_errors: True
|
||||||
|
when: result | trim | length
|
|
@ -9,5 +9,41 @@ cri_o_src_path: "{{ playbook_dir }}/../../"
|
||||||
# Absolute path on subject-host where cri-o source is expected
|
# Absolute path on subject-host where cri-o source is expected
|
||||||
cri_o_dest_path: "{{ go_path }}/src/github.com/kubernetes-incubator/cri-o"
|
cri_o_dest_path: "{{ go_path }}/src/github.com/kubernetes-incubator/cri-o"
|
||||||
|
|
||||||
# For results.yml Paths use rsync 'source' conventions
|
# Subject-host, absolute source path for files to be synchronized to control-host
|
||||||
artifacts: "/tmp/artifacts" # Base-directory for collection
|
artifacts: "/tmp/artifacts" # Base-directory for collection
|
||||||
|
|
||||||
|
# Subject-host, absolute dest. path on control-host where results should be synchronized to.
|
||||||
|
collection_dirpath: '{{ lookup("env","WORKSPACE") |
|
||||||
|
default(playbook_dir, True) }}/artifacts/{{ inventory_hostname }}'
|
||||||
|
|
||||||
|
# List of absolute paths to extra filenames to collect into {{ artifacts }}.
|
||||||
|
# Non-existing files and any name-collisions will be skipped.
|
||||||
|
extra_artifact_filepaths:
|
||||||
|
- "/go/src/k8s.io/kubernetes/e2e.log"
|
||||||
|
- "/tmp/kubelet.log"
|
||||||
|
- "/tmp/kube-apiserver.log"
|
||||||
|
- "/tmp/kube-controller-manager.log"
|
||||||
|
- "/tmp/kube-proxy.log"
|
||||||
|
- "/tmp/kube-proxy.yaml"
|
||||||
|
- "/tmp/kube-scheduler.log"
|
||||||
|
|
||||||
|
# Mapping of generated artifact filenames and their commands. All
|
||||||
|
# are relative to {{ artifacts }}/generated/
|
||||||
|
generated_artifacts:
|
||||||
|
installed_packages.log: '$(type -P dnf || type -P yum) list installed'
|
||||||
|
avc_denials.log: 'ausearch -m AVC -m SELINUX_ERR -m USER_AVC'
|
||||||
|
filesystem.info: 'df -h && sudo pvs && sudo vgs && sudo lvs'
|
||||||
|
pid1.journal: 'journalctl _PID=1 --no-pager --all --lines=all'
|
||||||
|
crio.service: 'journalctl --unit crio.service --no-pager --all --lines=all'
|
||||||
|
customcluster.service: 'journalctl --unit customcluster.service --no-pager --all --lines=all'
|
||||||
|
systemd-journald.service: 'journalctl --unit systemd-journald.service --no-pager --all --lines=all'
|
||||||
|
|
||||||
|
# Subject-host, artifacts relative, junit file paths
|
||||||
|
# for ``parse2junit.py`` input.
|
||||||
|
parsed_artifacts:
|
||||||
|
- "./testout.txt"
|
||||||
|
- "./junit_01.xml"
|
||||||
|
|
||||||
|
# Subject-host, artifacts relative, jUnit file path for
|
||||||
|
# ``parse2junit.py`` output.
|
||||||
|
canonical_junit: "./junit_01.xml"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue