code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
test
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/fixtures/config/regex-project/git/org_project1/README
|
README
|
test
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/fixtures/config/regex-project/git/org_project2/README
|
README
|
# Steps used to create our certs
# Generate CA cert
openssl req -new -newkey rsa:2048 -nodes -keyout root-ca.key -x509 -days 3650 -out root-ca.pem -subj "/C=US/ST=Texas/L=Austin/O=OpenStack Foundation/CN=fingergw-ca"
# Generate server keys
CLIENT='fingergw'
openssl req -new -newkey rsa:2048 -nodes -keyout $CLIENT.key -out $CLIENT.csr -subj "/C=US/ST=Texas/L=Austin/O=OpenStack Foundation/CN=fingergw"
openssl x509 -req -days 3650 -in $CLIENT.csr -out $CLIENT.pem -CA root-ca.pem -CAkey root-ca.key -CAcreateserial
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/fixtures/fingergw/README.rst
|
README.rst
|
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import json
import logging
import os
import re
import textwrap
from datetime import datetime, timedelta
from tests.base import AnsibleZuulTestCase
class FunctionalZuulStreamMixIn:
tenant_config_file = 'config/remote-zuul-stream/main.yaml'
# This should be overriden in child classes.
ansible_version = '6'
def _setUp(self):
self.log_console_port = 19000 + int(
self.ansible_core_version.split('.')[1])
self.executor_server.log_console_port = self.log_console_port
self.wait_timeout = 180
self.fake_nodepool.remote_ansible = True
# This catches the Ansible output; rather than the callback
# output captured in the job log. For example if the callback
# fails, there will be an error output in this stream.
self.logger = logging.getLogger('zuul.AnsibleJob')
self.console_output = io.StringIO()
self.logger.addHandler(logging.StreamHandler(self.console_output))
ansible_remote = os.environ.get('ZUUL_REMOTE_IPV4')
self.assertIsNotNone(ansible_remote)
def _run_job(self, job_name, create=True, split='false'):
# Keep the jobdir around so we can inspect contents if an
# assert fails. It will be cleaned up anyway as it is contained
# in a tmp dir which gets cleaned up after the test.
self.executor_server.keep_jobdir = True
# Output extra ansible info so we might see errors.
self.executor_server.verbose = True
if create:
conf = textwrap.dedent(
"""
- job:
name: {job_name}
run: playbooks/{job_name}.yaml
ansible-version: {version}
ansible-split-streams: {split}
vars:
test_console_port: {console_port}
roles:
- zuul: org/common-config
nodeset:
nodes:
- name: compute1
label: whatever
- name: controller
label: whatever
- project:
check:
jobs:
- {job_name}
""".format(
job_name=job_name,
version=self.ansible_version,
split=split,
console_port=self.log_console_port))
else:
conf = textwrap.dedent(
"""
- project:
check:
jobs:
- {job_name}
""".format(job_name=job_name))
file_dict = {'zuul.yaml': conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
job = self.getJobFromHistory(job_name)
return job
def _get_job_output(self, build):
path = os.path.join(self.jobdir_root, build.uuid,
'work', 'logs', 'job-output.txt')
with open(path) as f:
return f.read()
def _get_job_json(self, build):
path = os.path.join(self.jobdir_root, build.uuid,
'work', 'logs', 'job-output.json')
with open(path) as f:
return json.loads(f.read())
def _assertLogLine(self, line, log, full_match=True):
pattern = (r'^\d\d\d\d-\d\d-\d\d \d\d:\d\d\:\d\d\.\d\d\d\d\d\d \| %s%s'
% (line, '$' if full_match else ''))
log_re = re.compile(pattern, re.MULTILINE)
m = log_re.search(log)
if m is None:
raise Exception("'%s' not found in log" % (line,))
def assertLogLineStartsWith(self, line, log):
self._assertLogLine(line, log, full_match=False)
def assertLogLine(self, line, log):
self._assertLogLine(line, log, full_match=True)
def _getLogTime(self, line, log):
pattern = (r'^(\d\d\d\d-\d\d-\d\d \d\d:\d\d\:\d\d\.\d\d\d\d\d\d)'
r' \| %s\n'
r'(\d\d\d\d-\d\d-\d\d \d\d:\d\d\:\d\d\.\d\d\d\d\d\d)'
% line)
log_re = re.compile(pattern, re.MULTILINE)
m = log_re.search(log)
if m is None:
raise Exception("'%s' not found in log" % (line,))
else:
date1 = datetime.strptime(m.group(1), "%Y-%m-%d %H:%M:%S.%f")
date2 = datetime.strptime(m.group(2), "%Y-%m-%d %H:%M:%S.%f")
return (date1, date2)
def test_command(self):
job = self._run_job('command')
with self.jobLog(job):
build = self.history[-1]
self.assertEqual(build.result, 'SUCCESS')
console_output = self.console_output.getvalue()
# This should be generic enough to match any callback
# plugin failures, which look something like
#
# [WARNING]: Failure using method (v2_runner_on_ok) in \
# callback plugin
# (<ansible.plugins.callback.zuul_stream.CallbackModule object at'
# 0x7f89f72a20b0>): 'dict' object has no attribute 'startswith'"
# Callback Exception:
# ...
#
self.assertNotIn('[WARNING]: Failure using method', console_output)
text = self._get_job_output(build)
data = self._get_job_json(build)
token_stdout = "Standard output test {}".format(
self.history[0].jobdir.src_root)
token_stderr = "Standard error test {}".format(
self.history[0].jobdir.src_root)
result = data[0]['plays'][1]['tasks'][2]['hosts']['compute1']
self.assertEqual("\n".join((token_stdout, token_stderr)),
result['stdout'])
self.assertEqual("", result['stderr'])
self.assertLogLine(
r'RUN START: \[untrusted : review.example.com/org/project/'
r'playbooks/command.yaml@master\]', text)
self.assertLogLine(r'PLAY \[all\]', text)
self.assertLogLine(
r'Ansible version={}'.format(self.ansible_core_version), text)
self.assertLogLine(r'TASK \[Show contents of first file\]', text)
self.assertLogLine(r'controller \| command test one', text)
self.assertLogLine(
r'controller \| ok: Runtime: \d:\d\d:\d\d\.\d\d\d\d\d\d', text)
self.assertLogLine(r'TASK \[Show contents of second file\]', text)
self.assertLogLine(r'compute1 \| command test two', text)
self.assertLogLine(r'controller \| command test two', text)
self.assertLogLine(r'compute1 \| This is a rescue task', text)
self.assertLogLine(r'controller \| This is a rescue task', text)
self.assertLogLine(r'compute1 \| This is an always task', text)
self.assertLogLine(r'controller \| This is an always task', text)
self.assertLogLine(r'compute1 \| This is a handler', text)
self.assertLogLine(r'controller \| This is a handler', text)
self.assertLogLine(r'controller \| First free task', text)
self.assertLogLine(r'controller \| Second free task', text)
self.assertLogLine(r'controller \| This is a shell task after an '
'included role', text)
self.assertLogLine(r'compute1 \| This is a shell task after an '
'included role', text)
self.assertLogLine(r'controller \| This is a command task after '
'an included role', text)
self.assertLogLine(r'compute1 \| This is a command task after an '
'included role', text)
self.assertLogLine(r'controller \| This is a shell task with '
'delegate compute1', text)
self.assertLogLine(r'controller \| This is a shell task with '
'delegate controller', text)
self.assertLogLine(r'compute1 \| item_in_loop1', text)
self.assertLogLine(r'compute1 \| ok: Item: item_in_loop1 '
r'Runtime: \d:\d\d:\d\d\.\d\d\d\d\d\d', text)
self.assertLogLine(r'compute1 \| item_in_loop2', text)
self.assertLogLine(r'compute1 \| ok: Item: item_in_loop2 '
r'Runtime: \d:\d\d:\d\d\.\d\d\d\d\d\d', text)
self.assertLogLine(r'compute1 \| failed_in_loop1', text)
self.assertLogLine(r'compute1 \| ok: Item: failed_in_loop1 '
r'Result: 1', text)
self.assertLogLine(r'compute1 \| failed_in_loop2', text)
self.assertLogLine(r'compute1 \| ok: Item: failed_in_loop2 '
r'Result: 1', text)
self.assertLogLine(r'compute1 \| transitive-one', text)
self.assertLogLine(r'compute1 \| transitive-two', text)
self.assertLogLine(r'compute1 \| transitive-three', text)
self.assertLogLine(r'compute1 \| transitive-four', text)
self.assertLogLine(
r'controller \| ok: Runtime: \d:\d\d:\d\d\.\d\d\d\d\d\d', text)
self.assertLogLine('PLAY RECAP', text)
self.assertLogLine(
r'controller \| ok: \d+ changed: \d+ unreachable: 0 failed: 0 '
'skipped: 0 rescued: 1 ignored: 0', text)
self.assertLogLine(
r'RUN END RESULT_NORMAL: \[untrusted : review.example.com/'
r'org/project/playbooks/command.yaml@master]', text)
time1, time2 = self._getLogTime(r'TASK \[Command Not Found\]',
text)
self.assertLess((time2 - time1) / timedelta(milliseconds=1),
9000)
# This is from the debug: msg='{{ ansible_version }}'
# testing raw variable output. To make it version
# agnostic, match just the start of
# compute1 | ok: {'string': '2.9.27'...
# NOTE(ianw) 2022-08-24 : I don't know why the callback
# for debug: msg= doesn't put the hostname first like
# other output. Undetermined if bug or feature.
self.assertLogLineStartsWith(
r"""\{'string': '\d.""", text)
# ... handling loops is a different path, and that does
self.assertLogLineStartsWith(
r"""compute1 \| ok: \{'string': '\d.""", text)
def test_command_split_streams(self):
job = self._run_job('command', split='true')
with self.jobLog(job):
build = self.history[-1]
self.assertEqual(build.result, 'SUCCESS')
console_output = self.console_output.getvalue()
# This should be generic enough to match any callback
# plugin failures, which look something like
#
# [WARNING]: Failure using method (v2_runner_on_ok) in \
# callback plugin
# (<ansible.plugins.callback.zuul_stream.CallbackModule object at'
# 0x7f89f72a20b0>): 'dict' object has no attribute 'startswith'"
# Callback Exception:
# ...
#
self.assertNotIn('[WARNING]: Failure using method', console_output)
text = self._get_job_output(build)
data = self._get_job_json(build)
token_stdout = "Standard output test {}".format(
self.history[0].jobdir.src_root)
token_stderr = "Standard error test {}".format(
self.history[0].jobdir.src_root)
result = data[0]['plays'][1]['tasks'][2]['hosts']['compute1']
self.assertEqual(token_stdout, result['stdout'])
self.assertEqual(token_stderr, result['stderr'])
self.assertLogLine(
r'RUN START: \[untrusted : review.example.com/org/project/'
r'playbooks/command.yaml@master\]', text)
self.assertLogLine(r'PLAY \[all\]', text)
self.assertLogLine(
r'Ansible version={}'.format(self.ansible_core_version), text)
self.assertLogLine(r'TASK \[Show contents of first file\]', text)
self.assertLogLine(r'controller \| command test one', text)
self.assertLogLine(
r'controller \| ok: Runtime: \d:\d\d:\d\d\.\d\d\d\d\d\d', text)
self.assertLogLine(r'TASK \[Show contents of second file\]', text)
self.assertLogLine(r'compute1 \| command test two', text)
self.assertLogLine(r'controller \| command test two', text)
self.assertLogLine(r'compute1 \| This is a rescue task', text)
self.assertLogLine(r'controller \| This is a rescue task', text)
self.assertLogLine(r'compute1 \| This is an always task', text)
self.assertLogLine(r'controller \| This is an always task', text)
self.assertLogLine(r'compute1 \| This is a handler', text)
self.assertLogLine(r'controller \| This is a handler', text)
self.assertLogLine(r'controller \| First free task', text)
self.assertLogLine(r'controller \| Second free task', text)
self.assertLogLine(r'controller \| This is a shell task after an '
'included role', text)
self.assertLogLine(r'compute1 \| This is a shell task after an '
'included role', text)
self.assertLogLine(r'controller \| This is a command task after '
'an included role', text)
self.assertLogLine(r'compute1 \| This is a command task after an '
'included role', text)
self.assertLogLine(r'controller \| This is a shell task with '
'delegate compute1', text)
self.assertLogLine(r'controller \| This is a shell task with '
'delegate controller', text)
self.assertLogLine(r'compute1 \| item_in_loop1', text)
self.assertLogLine(r'compute1 \| ok: Item: item_in_loop1 '
r'Runtime: \d:\d\d:\d\d\.\d\d\d\d\d\d', text)
self.assertLogLine(r'compute1 \| item_in_loop2', text)
self.assertLogLine(r'compute1 \| ok: Item: item_in_loop2 '
r'Runtime: \d:\d\d:\d\d\.\d\d\d\d\d\d', text)
self.assertLogLine(r'compute1 \| failed_in_loop1', text)
self.assertLogLine(r'compute1 \| ok: Item: failed_in_loop1 '
r'Result: 1', text)
self.assertLogLine(r'compute1 \| failed_in_loop2', text)
self.assertLogLine(r'compute1 \| ok: Item: failed_in_loop2 '
r'Result: 1', text)
self.assertLogLine(r'compute1 \| transitive-one', text)
self.assertLogLine(r'compute1 \| transitive-two', text)
self.assertLogLine(r'compute1 \| transitive-three', text)
self.assertLogLine(r'compute1 \| transitive-four', text)
self.assertLogLine(
r'controller \| ok: Runtime: \d:\d\d:\d\d\.\d\d\d\d\d\d', text)
self.assertLogLine('PLAY RECAP', text)
self.assertLogLine(
r'controller \| ok: \d+ changed: \d+ unreachable: 0 failed: 0 '
'skipped: 0 rescued: 1 ignored: 0', text)
self.assertLogLine(
r'RUN END RESULT_NORMAL: \[untrusted : review.example.com/'
r'org/project/playbooks/command.yaml@master]', text)
time1, time2 = self._getLogTime(r'TASK \[Command Not Found\]',
text)
self.assertLess((time2 - time1) / timedelta(milliseconds=1),
9000)
# This is from the debug: msg='{{ ansible_version }}'
# testing raw variable output. To make it version
# agnostic, match just the start of
# compute1 | ok: {'string': '2.9.27'...
# NOTE(ianw) 2022-08-24 : I don't know why the callback
# for debug: msg= doesn't put the hostname first like
# other output. Undetermined if bug or feature.
self.assertLogLineStartsWith(
r"""\{'string': '\d.""", text)
# ... handling loops is a different path, and that does
self.assertLogLineStartsWith(
r"""compute1 \| ok: \{'string': '\d.""", text)
def test_module_exception(self):
job = self._run_job('module_failure_exception')
with self.jobLog(job):
build = self.history[-1]
self.assertEqual(build.result, 'FAILURE')
text = self._get_job_output(build)
self.assertLogLine(r'TASK \[Module failure\]', text)
self.assertLogLine(
r'controller \| MODULE FAILURE:', text)
self.assertLogLine(
r'controller \| Exception: This module is broken', text)
def test_module_no_result(self):
job = self._run_job('module_failure_no_result')
with self.jobLog(job):
build = self.history[-1]
self.assertEqual(build.result, 'FAILURE')
text = self._get_job_output(build)
self.assertLogLine(r'TASK \[Module failure\]', text)
regex = r'controller \| "msg": "New-style module did not ' \
r'handle its own exit"'
self.assertLogLine(regex, text)
class TestZuulStream6(AnsibleZuulTestCase, FunctionalZuulStreamMixIn):
ansible_version = '6'
ansible_core_version = '2.13'
def setUp(self):
super().setUp()
self._setUp()
class TestZuulStream8(AnsibleZuulTestCase, FunctionalZuulStreamMixIn):
ansible_version = '8'
ansible_core_version = '2.15'
def setUp(self):
super().setUp()
self._setUp()
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/remote/test_remote_zuul_stream.py
|
test_remote_zuul_stream.py
|
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import textwrap
from tests.base import AnsibleZuulTestCase
class FunctionalActionModulesMixIn:
tenant_config_file = 'config/remote-action-modules/main.yaml'
# This should be overriden in child classes.
ansible_version = '6'
wait_timeout = 120
def _setUp(self):
self.fake_nodepool.remote_ansible = True
ansible_remote = os.environ.get('ZUUL_REMOTE_IPV4')
self.assertIsNotNone(ansible_remote)
def _run_job(self, job_name, result, expect_error=None):
# Keep the jobdir around so we can inspect contents if an
# assert fails. It will be cleaned up anyway as it is contained
# in a tmp dir which gets cleaned up after the test.
self.executor_server.keep_jobdir = True
# Output extra ansible info so we might see errors.
self.executor_server.verbose = True
conf = textwrap.dedent(
"""
- job:
name: {job_name}
run: playbooks/{job_name}.yaml
ansible-version: {version}
roles:
- zuul: org/common-config
nodeset:
nodes:
- name: controller
label: whatever
- project:
check:
jobs:
- {job_name}
""".format(job_name=job_name, version=self.ansible_version))
file_dict = {'zuul.yaml': conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
job = self.getJobFromHistory(job_name)
with self.jobLog(job):
build = self.history[-1]
self.assertEqual(build.result, result)
if expect_error:
path = os.path.join(self.jobdir_root, build.uuid,
'work', 'logs', 'job-output.txt')
with open(path, 'r') as f:
self.assertIn(expect_error, f.read())
def test_command_module(self):
self._run_job('command-good', 'SUCCESS')
def test_zuul_return_module(self):
self._run_job('zuul_return-good', 'SUCCESS')
def test_zuul_return_module_delegate_to(self):
self._run_job('zuul_return-good-delegate', 'SUCCESS')
def test_shell_module(self):
self._run_job('shell-good', 'SUCCESS')
class TestActionModules6(AnsibleZuulTestCase, FunctionalActionModulesMixIn):
ansible_version = '6'
def setUp(self):
super().setUp()
self._setUp()
class TestActionModules8(AnsibleZuulTestCase, FunctionalActionModulesMixIn):
ansible_version = '8'
def setUp(self):
super().setUp()
self._setUp()
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/remote/test_remote_action_modules.py
|
test_remote_action_modules.py
|
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import dateutil
import json
import os
import textwrap
from tests.base import AnsibleZuulTestCase
class FunctionalZuulJSONMixIn:
tenant_config_file = 'config/remote-zuul-json/main.yaml'
ansible_version = '2.6'
def _setUp(self):
self.fake_nodepool.remote_ansible = True
ansible_remote = os.environ.get('ZUUL_REMOTE_IPV4')
self.assertIsNotNone(ansible_remote)
def _run_job(self, job_name):
# Keep the jobdir around so we can inspect contents if an
# assert fails. It will be cleaned up anyway as it is contained
# in a tmp dir which gets cleaned up after the test.
self.executor_server.keep_jobdir = True
# Output extra ansible info so we might see errors.
self.executor_server.verbose = True
conf = textwrap.dedent(
"""
- job:
name: {job_name}
run: playbooks/{job_name}.yaml
ansible-version: {version}
roles:
- zuul: org/common-config
nodeset:
nodes:
- name: controller
label: whatever
- project:
check:
jobs:
- {job_name}
""".format(job_name=job_name, version=self.ansible_version))
file_dict = {'zuul.yaml': conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
job = self.getJobFromHistory(job_name)
return job
def _get_json_as_text(self, build):
path = os.path.join(self.jobdir_root, build.uuid,
'work', 'logs', 'job-output.json')
with open(path) as f:
return f.read()
def test_no_log(self):
job = self._run_job('no-log')
with self.jobLog(job):
build = self.history[-1]
self.assertEqual(build.result, 'SUCCESS')
text = self._get_json_as_text(build)
self.assertIn('rosebud', text)
self.assertNotIn('setec', text)
def test_json_task_action(self):
job = self._run_job('no-log')
with self.jobLog(job):
build = self.history[-1]
self.assertEqual(build.result, 'SUCCESS')
text = self._get_json_as_text(build)
json_result = json.loads(text)
tasks = json_result[0]['plays'][0]['tasks']
expected_actions = [
'debug', 'debug', 'debug', 'copy', 'find',
'stat', 'debug'
]
for i, expected in enumerate(expected_actions):
host_result = tasks[i]['hosts']['controller']
self.assertEquals(expected, host_result['action'])
def test_json_role_log(self):
job = self._run_job('json-role')
with self.jobLog(job):
build = self.history[-1]
self.assertEqual(build.result, 'SUCCESS')
text = self._get_json_as_text(build)
self.assertIn('json-role', text)
json_result = json.loads(text)
role_name = json_result[0]['plays'][0]['tasks'][0]['role']['name']
self.assertEqual('json-role', role_name)
role_path = json_result[0]['plays'][0]['tasks'][0]['role']['path']
self.assertEqual('json-role', os.path.basename(role_path))
def test_json_time_log(self):
job = self._run_job('no-log')
with self.jobLog(job):
build = self.history[-1]
self.assertEqual(build.result, 'SUCCESS')
text = self._get_json_as_text(build)
# Assert that 'start' and 'end' are part of the result at all
self.assertIn('start', text)
self.assertIn('end', text)
json_result = json.loads(text)
# Assert that the start and end timestamps are present at the
# right place in the dictionary
task = json_result[0]['plays'][0]['tasks'][0]['task']
task_start_time = task['duration']['start']
task_end_time = task['duration']['end']
play = json_result[0]['plays'][0]['play']
play_start_time = play['duration']['start']
play_end_time = play['duration']['end']
# Assert that the start and end timestamps are valid dates
dateutil.parser.parse(task_start_time)
dateutil.parser.parse(task_end_time)
dateutil.parser.parse(play_start_time)
dateutil.parser.parse(play_end_time)
class TestZuulJSON6(AnsibleZuulTestCase, FunctionalZuulJSONMixIn):
ansible_version = '6'
def setUp(self):
super().setUp()
self._setUp()
class TestZuulJSON8(AnsibleZuulTestCase, FunctionalZuulJSONMixIn):
ansible_version = '8'
def setUp(self):
super().setUp()
self._setUp()
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/remote/test_remote_zuul_json.py
|
test_remote_zuul_json.py
|
# Copyright 2023 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import json
import os
import queue
import time
import confluent_kafka as kafka
import tests.base
from tests.base import (
ZuulTestCase,
iterate_timeout,
simple_layout,
)
FIXTURE_DIR = os.path.join(tests.base.FIXTURE_DIR, 'gerrit')
class FakeKafkaMessage:
def __init__(self, topic, offset, value, error=None):
self._topic = topic
self._error = error
self._offset = offset
if error:
self._value = None
else:
self._value = value
def error(self):
return self._error
def value(self):
return self._value
def partition(self):
return 0
def topic(self):
return self._topic
def offset(self):
return self._offset
class FakeKafkaConsumer:
def __init__(self, config, logger):
self.config = config
self.logger = logger
self.topics = None
self._queue = queue.Queue()
self.closed = 0
self._offset = 0
def put(self, data):
self._queue.put(data)
def subscribe(self, topics):
self.topics = topics
def poll(self, timeout=0):
try:
data = self._queue.get(timeout=timeout)
self._queue.task_done()
if isinstance(data, kafka.KafkaError):
return FakeKafkaMessage(
'gerrit', self._offset, None, error=data)
self._offset += 1
return FakeKafkaMessage('gerrit', self._offset - 1, data)
except queue.Empty:
return None
def close(self):
self.closed += 1
def serialize(event):
return json.dumps(event).encode('utf8')
class TestGerritEventSourceKafka(ZuulTestCase):
config_file = 'zuul-gerrit-kafka.conf'
def setUp(self):
self.useFixture(fixtures.MonkeyPatch(
'zuul.driver.gerrit.gerriteventkafka.kafka.Consumer',
FakeKafkaConsumer))
super().setUp()
@simple_layout('layouts/simple.yaml')
def test_kafka(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
consumer = self.fake_gerrit.event_thread.consumer
self.fake_gerrit.event_thread.RECONNECTION_DELAY = 1
# Assert we passed the required config entries
self.assertTrue(isinstance(consumer.config, dict))
self.assertTrue('bootstrap.servers' in consumer.config)
self.assertTrue('group.id' in consumer.config)
# Exercise error handling
err = kafka.KafkaError(kafka.KafkaError._PARTITION_EOF)
consumer.put(err)
# Exercise reconnection
err = kafka.KafkaError(kafka.KafkaError.NETWORK_EXCEPTION)
consumer.put(err)
for _ in iterate_timeout(60, 'wait for reconnect'):
if consumer is not self.fake_gerrit.event_thread.consumer:
break
time.sleep(0.2)
consumer = self.fake_gerrit.event_thread.consumer
self.additional_event_queues.append(consumer._queue)
consumer.put(serialize(A.getPatchsetCreatedEvent(1)))
self.waitUntilSettled()
self.assertHistory([
dict(name='check-job', result='SUCCESS', changes='1,1')
])
self.assertEqual(A.reported, 1, "A should be reported")
self.assertTrue(consumer._queue.empty())
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_gerrit_kafka.py
|
test_gerrit_kafka.py
|
# Copyright 2020 OpenStack Foundation
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from unittest import mock
import os.path
import jwt
from io import StringIO
import time
from zuul.driver import auth
from tests.base import BaseTestCase, FIXTURE_DIR
with open(os.path.join(FIXTURE_DIR,
'auth/openid-configuration.json'), 'r') as well_known:
FAKE_WELL_KNOWN_CONFIG = json.loads(well_known.read())
algo = jwt.algorithms.RSAAlgorithm(jwt.algorithms.RSAAlgorithm.SHA256)
with open(os.path.join(FIXTURE_DIR,
'auth/oidc-key'), 'r') as k:
OIDC_PRIVATE_KEY = algo.prepare_key(k.read())
with open(os.path.join(FIXTURE_DIR,
'auth/oidc-key.pub'), 'r') as k:
pub_key = algo.prepare_key(k.read())
pub_jwk = algo.to_jwk(pub_key)
key = {
"kid": "OwO",
"use": "sig",
"alg": "RS256"
}
key.update(json.loads(pub_jwk))
# not present in keycloak jwks
if "key_ops" in key:
del key["key_ops"]
FAKE_CERTS = {
"keys": [
key
]
}
class FakeResponse:
def __init__(self, json_dict):
self._json = json_dict
def json(self):
return self._json
def mock_get(url, params=None, **kwargs):
if url == ("https://my.oidc.provider/auth/realms/realm-one/"
".well-known/openid-configuration"):
return FakeResponse(FAKE_WELL_KNOWN_CONFIG)
else:
raise Exception("Unknown URL %s" % url)
def mock_urlopen(url, *args, **kwargs):
if hasattr(url, 'full_url'):
# Like a urllib.Request object
url = url.full_url
if url == ("https://my.oidc.provider/auth/realms/realm-one/"
"protocol/openid-connect/certs"):
io = StringIO()
json.dump(FAKE_CERTS, io)
io.seek(0)
return io
else:
raise Exception("Unknown URL %s" % url)
class TestOpenIDConnectAuthenticator(BaseTestCase):
def test_decodeToken(self):
"""Test the decoding workflow"""
config = {
'issuer_id': FAKE_WELL_KNOWN_CONFIG['issuer'],
'client_id': 'zuul-app',
'realm': 'realm-one',
}
OIDCAuth = auth.jwt.OpenIDConnectAuthenticator(**config)
payload = {
'iss': FAKE_WELL_KNOWN_CONFIG['issuer'],
'aud': config['client_id'],
'exp': int(time.time()) + 3600,
'sub': 'someone'
}
token = jwt.encode(
payload,
OIDC_PRIVATE_KEY,
algorithm='RS256',
headers={'kid': 'OwO'})
with mock.patch('requests.get', side_effect=mock_get):
# patching call in PyJWKClient's fetch_data
with mock.patch('urllib.request.urlopen',
side_effect=mock_urlopen):
decoded = OIDCAuth.decodeToken(token)
for claim in payload.keys():
self.assertEqual(payload[claim], decoded[claim])
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_auth.py
|
test_auth.py
|
# Copyright 2015 GoodData
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import re
from testtools.matchers import MatchesRegex, Not, StartsWith
import urllib
import socket
import threading
import time
import textwrap
from concurrent.futures import ThreadPoolExecutor
from unittest import mock, skip
import git
import gitdb
import github3.exceptions
from tests.fakegithub import FakeFile, FakeGithubEnterpriseClient
from zuul.driver.github.githubconnection import GithubShaCache
from zuul.zk.layout import LayoutState
from zuul.lib import strings
from zuul.merger.merger import Repo
from zuul.model import MergeRequest, EnqueueEvent, DequeueEvent
from zuul.zk.change_cache import ChangeKey
from tests.base import (AnsibleZuulTestCase, BaseTestCase,
ZuulGithubAppTestCase, ZuulTestCase,
simple_layout, random_sha1, iterate_timeout)
from tests.base import ZuulWebFixture
EMPTY_LAYOUT_STATE = LayoutState("", "", 0, None, {}, -1)
class TestGithubDriver(ZuulTestCase):
config_file = 'zuul-github-driver.conf'
scheduler_count = 1
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_pull_event(self):
self.executor_server.hold_jobs_in_build = True
body = "This is the\nPR body."
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A',
body=body)
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test1').result)
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test2').result)
job = self.getJobFromHistory('project-test2')
zuulvars = job.parameters['zuul']
self.assertEqual(str(A.number), zuulvars['change'])
self.assertEqual(str(A.head_sha), zuulvars['patchset'])
self.assertEqual(str(A.head_sha), zuulvars['commit_id'])
self.assertEqual('master', zuulvars['branch'])
self.assertEquals('https://github.com/org/project/pull/1',
zuulvars['items'][0]['change_url'])
expected = "A\n\n{}".format(body)
self.assertEqual(zuulvars["message"],
strings.b64encode(expected))
self.assertEqual(1, len(A.comments))
self.assertThat(
A.comments[0],
MatchesRegex(r'.*\[project-test1 \]\(.*\).*', re.DOTALL))
self.assertThat(
A.comments[0],
MatchesRegex(r'.*\[project-test2 \]\(.*\).*', re.DOTALL))
self.assertEqual(2, len(self.history))
# test_pull_unmatched_branch_event(self):
self.create_branch('org/project', 'unmatched_branch')
B = self.fake_github.openFakePullRequest(
'org/project', 'unmatched_branch', 'B')
self.fake_github.emitEvent(B.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
# now emit closed event without merging
self.fake_github.emitEvent(A.getPullRequestClosedEvent())
self.waitUntilSettled()
# nothing should have happened due to the merged requirement
self.assertEqual(2, len(self.history))
# now merge the PR and emit the event again
A.setMerged('merged')
self.fake_github.emitEvent(A.getPullRequestClosedEvent())
self.waitUntilSettled()
# post job must be run
self.assertEqual(3, len(self.history))
@simple_layout('layouts/files-github.yaml', driver='github')
def test_pull_matched_file_event(self):
A = self.fake_github.openFakePullRequest(
'org/project', 'master', 'A',
files={'random.txt': 'test', 'build-requires': 'test'})
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
# test_pull_unmatched_file_event
B = self.fake_github.openFakePullRequest('org/project', 'master', 'B',
files={'random.txt': 'test2'})
self.fake_github.emitEvent(B.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
@simple_layout('layouts/files-github.yaml', driver='github')
def test_pull_changed_files_length_mismatch(self):
files = {'{:03d}.txt'.format(n): 'test' for n in range(300)}
# File 301 which is not included in the list of files of the PR,
# since Github only returns max. 300 files in alphabetical order
files["foobar-requires"] = "test"
A = self.fake_github.openFakePullRequest(
'org/project', 'master', 'A', files=files)
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
@simple_layout('layouts/files-github.yaml', driver='github')
def test_pull_changed_files_length_mismatch_reenqueue(self):
# Hold jobs so we can trigger a reconfiguration while the item is in
# the pipeline
self.executor_server.hold_jobs_in_build = True
files = {'{:03d}.txt'.format(n): 'test' for n in range(300)}
# File 301 which is not included in the list of files of the PR,
# since Github only returns max. 300 files in alphabetical order
files["foobar-requires"] = "test"
A = self.fake_github.openFakePullRequest(
'org/project', 'master', 'A', files=files)
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# Comment on the pull request to trigger updateChange
self.fake_github.emitEvent(A.getCommentAddedEvent('casual comment'))
self.waitUntilSettled()
# Trigger reconfig to enforce a reenqueue of the item
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# Now we can release all jobs
self.executor_server.hold_jobs_in_build = True
self.executor_server.release()
self.waitUntilSettled()
# There must be exactly one successful job in the history. If there is
# an aborted job in the history the reenqueue failed.
self.assertHistory([
dict(name='project-test1', result='SUCCESS',
changes="%s,%s" % (A.number, A.head_sha)),
])
@simple_layout('layouts/files-github.yaml', driver='github')
def test_changed_file_match_filter(self):
path = os.path.join(self.upstream_root, 'org/project')
base_sha = git.Repo(path).head.object.hexsha
files = {'{:03d}.txt'.format(n): 'test' for n in range(300)}
files["foobar-requires"] = "test"
files["to-be-removed"] = "test"
A = self.fake_github.openFakePullRequest(
'org/project', 'master', 'A', files=files, base_sha=base_sha)
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# project-test1 and project-test2 should be run
self.assertEqual(2, len(self.history))
@simple_layout('layouts/files-github.yaml', driver='github')
def test_changed_and_reverted_file_not_match_filter(self):
path = os.path.join(self.upstream_root, 'org/project')
base_sha = git.Repo(path).head.object.hexsha
files = {'{:03d}.txt'.format(n): 'test' for n in range(300)}
files["foobar-requires"] = "test"
files["to-be-removed"] = "test"
A = self.fake_github.openFakePullRequest(
'org/project', 'master', 'A', files=files, base_sha=base_sha)
A.addCommit(delete_files=['to-be-removed'])
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# Only project-test1 should be run, because the file to-be-removed
# is reverted and not in changed files to trigger project-test2
self.assertEqual(1, len(self.history))
@simple_layout('layouts/files-github.yaml', driver='github')
def test_pull_file_rename(self):
A = self.fake_github.openFakePullRequest(
'org/project', 'master', 'A', files={
FakeFile("moved", previous_filename="foobar-requires"): "test"
})
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_pull_github_files_error(self):
A = self.fake_github.openFakePullRequest(
'org/project', 'master', 'A')
with mock.patch("tests.fakegithub.FakePull.files") as files_mock:
files_mock.side_effect = github3.exceptions.ServerError(
mock.MagicMock())
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(1, files_mock.call_count)
self.assertEqual(2, len(self.history))
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_comment_event(self):
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
self.fake_github.emitEvent(A.getCommentAddedEvent('test me'))
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
# Test an unmatched comment, history should remain the same
B = self.fake_github.openFakePullRequest('org/project', 'master', 'B')
self.fake_github.emitEvent(B.getCommentAddedEvent('casual comment'))
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
# Test an unmatched comment, history should remain the same
C = self.fake_github.openFakePullRequest('org/project', 'master', 'C')
self.fake_github.emitEvent(
C.getIssueCommentAddedEvent('a non-PR issue comment'))
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
@simple_layout('layouts/push-tag-github.yaml', driver='github')
def test_tag_event(self):
self.executor_server.hold_jobs_in_build = True
self.create_branch('org/project', 'tagbranch')
files = {'README.txt': 'test'}
self.addCommitToRepo('org/project', 'test tag',
files, branch='tagbranch', tag='newtag')
path = os.path.join(self.upstream_root, 'org/project')
repo = git.Repo(path)
tag = repo.tags['newtag']
sha = tag.commit.hexsha
del repo
# Notify zuul about the new branch to load the config
self.fake_github.emitEvent(
self.fake_github.getPushEvent(
'org/project',
ref='refs/heads/%s' % 'tagbranch'))
self.waitUntilSettled()
# Record previous tenant reconfiguration time
before = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
self.fake_github.emitEvent(
self.fake_github.getPushEvent('org/project', 'refs/tags/newtag',
new_rev=sha))
self.waitUntilSettled()
# Make sure the tenant hasn't been reconfigured due to the new tag
after = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
self.assertEqual(before, after)
build_params = self.builds[0].parameters
self.assertEqual('refs/tags/newtag', build_params['zuul']['ref'])
self.assertFalse('oldrev' in build_params['zuul'])
self.assertEqual(sha, build_params['zuul']['newrev'])
self.assertEqual(sha, build_params['zuul']['commit_id'])
self.assertEqual(
'https://github.com/org/project/releases/tag/newtag',
build_params['zuul']['change_url'])
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-tag').result)
@simple_layout('layouts/push-tag-github.yaml', driver='github')
def test_push_event(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
old_sha = '0' * 40
new_sha = A.head_sha
A.setMerged("merging A")
pevent = self.fake_github.getPushEvent(project='org/project',
ref='refs/heads/master',
old_rev=old_sha,
new_rev=new_sha)
self.fake_github.emitEvent(pevent)
self.waitUntilSettled()
build_params = self.builds[0].parameters
self.assertEqual('refs/heads/master', build_params['zuul']['ref'])
self.assertFalse('oldrev' in build_params['zuul'])
self.assertEqual(new_sha, build_params['zuul']['newrev'])
self.assertEqual(
'https://github.com/org/project/commit/%s' % new_sha,
build_params['zuul']['change_url'])
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-post').result)
self.assertEqual(1, len(self.history))
# test unmatched push event
old_sha = random_sha1()
new_sha = random_sha1()
self.fake_github.emitEvent(
self.fake_github.getPushEvent('org/project',
'refs/heads/unmatched_branch',
old_sha, new_sha))
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
@simple_layout('layouts/labeling-github.yaml', driver='github')
def test_labels(self):
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
self.fake_github.emitEvent(A.addLabel('test'))
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
self.assertEqual('project-labels', self.history[0].name)
self.assertEqual(['tests passed'], A.labels)
# test label removed
B = self.fake_github.openFakePullRequest('org/project', 'master', 'B')
B.addLabel('do not test')
self.fake_github.emitEvent(B.removeLabel('do not test'))
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
self.assertEqual('project-labels', self.history[1].name)
self.assertEqual(['tests passed'], B.labels)
# test unmatched label
C = self.fake_github.openFakePullRequest('org/project', 'master', 'C')
self.fake_github.emitEvent(C.addLabel('other label'))
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
self.assertEqual(['other label'], C.labels)
@simple_layout('layouts/reviews-github.yaml', driver='github')
def test_reviews(self):
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
self.fake_github.emitEvent(A.getReviewAddedEvent('approve'))
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
self.assertEqual('project-reviews', self.history[0].name)
self.assertEqual(['tests passed'], A.labels)
# test_review_unmatched_event
B = self.fake_github.openFakePullRequest('org/project', 'master', 'B')
self.fake_github.emitEvent(B.getReviewAddedEvent('comment'))
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
# test sending reviews
C = self.fake_github.openFakePullRequest('org/project', 'master', 'C')
self.fake_github.emitEvent(C.getCommentAddedEvent(
"I solemnly swear that I am up to no good"))
self.waitUntilSettled()
self.assertEqual('project-reviews', self.history[0].name)
self.assertEqual(1, len(C.reviews))
self.assertEqual('APPROVE', C.reviews[0].as_dict()['state'])
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_timer_event(self):
self.executor_server.hold_jobs_in_build = True
self.commitConfigUpdate('org/common-config',
'layouts/timer-github.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
time.sleep(2)
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.executor_server.hold_jobs_in_build = False
# Stop queuing timer triggered jobs so that the assertions
# below don't race against more jobs being queued.
self.commitConfigUpdate('org/common-config',
'layouts/no-timer-github.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
# can end up with one more event firing, so give it an extra
# second to settle.
time.sleep(1)
self.waitUntilSettled()
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='project-bitrot', result='SUCCESS',
ref='refs/heads/master'),
], ordered=False)
@simple_layout('layouts/dequeue-github.yaml', driver='github')
def test_dequeue_pull_synchronized(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest(
'org/one-job-project', 'master', 'A')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# event update stamp has resolution one second, wait so the latter
# one has newer timestamp
time.sleep(1)
# On a push to a PR Github may emit a pull_request_review event with
# the old head so send that right before the synchronized event.
review_event = A.getReviewAddedEvent('dismissed')
A.addCommit()
self.fake_github.emitEvent(review_event)
self.fake_github.emitEvent(A.getPullRequestSynchronizeEvent())
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
self.assertEqual(1, self.countJobResults(self.history, 'ABORTED'))
@simple_layout('layouts/dequeue-github.yaml', driver='github')
def test_dequeue_pull_abandoned(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest(
'org/one-job-project', 'master', 'A')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.fake_github.emitEvent(A.getPullRequestClosedEvent())
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
self.assertEqual(1, self.countJobResults(self.history, 'ABORTED'))
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_git_https_url(self):
"""Test that git_ssh option gives git url with ssh"""
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
_, project = tenant.getProject('org/project')
url = self.fake_github.real_getGitUrl(project)
self.assertEqual('https://github.com/org/project', url)
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_git_ssh_url(self):
"""Test that git_ssh option gives git url with ssh"""
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
_, project = tenant.getProject('org/project')
url = self.fake_github_ssh.real_getGitUrl(project)
self.assertEqual('ssh://[email protected]/org/project.git', url)
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_git_enterprise_url(self):
"""Test that git_url option gives git url with proper host"""
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
_, project = tenant.getProject('org/project')
url = self.fake_github_ent.real_getGitUrl(project)
self.assertEqual('ssh://[email protected]/org/project.git', url)
@simple_layout('layouts/reporting-github.yaml', driver='github')
def test_reporting(self):
project = 'org/project'
github = self.fake_github.getGithubClient(None)
# pipeline reports pull status both on start and success
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest(project, 'master', 'A')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# We should have a status container for the head sha
self.assertIn(
A.head_sha, github.repo_from_project(project)._commits.keys())
statuses = self.fake_github.getCommitStatuses(project, A.head_sha)
# We should only have one status for the head sha
self.assertEqual(1, len(statuses))
check_status = statuses[0]
check_url = (
'http://zuul.example.com/t/tenant-one/status/change/%s,%s' %
(A.number, A.head_sha))
self.assertEqual('tenant-one/check', check_status['context'])
self.assertEqual('check status: pending',
check_status['description'])
self.assertEqual('pending', check_status['state'])
self.assertEqual(check_url, check_status['url'])
self.assertEqual(0, len(A.comments))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# We should only have two statuses for the head sha
statuses = self.fake_github.getCommitStatuses(project, A.head_sha)
self.assertEqual(2, len(statuses))
check_status = statuses[0]
check_url = 'http://zuul.example.com/t/tenant-one/buildset/'
self.assertEqual('tenant-one/check', check_status['context'])
self.assertEqual('check status: success',
check_status['description'])
self.assertEqual('success', check_status['state'])
self.assertThat(check_status['url'], StartsWith(check_url))
self.assertEqual(1, len(A.comments))
self.assertThat(A.comments[0],
MatchesRegex(r'.*Build succeeded.*', re.DOTALL))
# pipeline does not report any status but does comment
self.executor_server.hold_jobs_in_build = True
self.fake_github.emitEvent(
A.getCommentAddedEvent('reporting check'))
self.waitUntilSettled()
statuses = self.fake_github.getCommitStatuses(project, A.head_sha)
self.assertEqual(2, len(statuses))
# comments increased by one for the start message
self.assertEqual(2, len(A.comments))
self.assertThat(A.comments[1],
MatchesRegex(r'.*Starting reporting jobs.*',
re.DOTALL))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# pipeline reports success status
statuses = self.fake_github.getCommitStatuses(project, A.head_sha)
self.assertEqual(3, len(statuses))
report_status = statuses[0]
self.assertEqual('tenant-one/reporting', report_status['context'])
self.assertEqual('reporting status: success',
report_status['description'])
self.assertEqual('success', report_status['state'])
self.assertEqual(2, len(A.comments))
base = 'http://zuul.example.com/t/tenant-one/buildset/'
# Deconstructing the URL because we don't save the BuildSet UUID
# anywhere to do a direct comparison and doing regexp matches on a full
# URL is painful.
# The first part of the URL matches the easy base string
self.assertThat(report_status['url'], StartsWith(base))
# The rest of the URL is a UUID
self.assertThat(report_status['url'][len(base):],
MatchesRegex(r'^[a-fA-F0-9]{32}$'))
@simple_layout('layouts/reporting-github.yaml', driver='github')
def test_truncated_status_description(self):
project = 'org/project'
# pipeline reports pull status both on start and success
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest(project, 'master', 'A')
self.fake_github.emitEvent(
A.getCommentAddedEvent('long pipeline'))
self.waitUntilSettled()
statuses = self.fake_github.getCommitStatuses(project, A.head_sha)
self.assertEqual(1, len(statuses))
check_status = statuses[0]
# Status is truncated due to long pipeline name
self.assertEqual('status: pending',
check_status['description'])
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# We should only have two statuses for the head sha
statuses = self.fake_github.getCommitStatuses(project, A.head_sha)
self.assertEqual(2, len(statuses))
check_status = statuses[0]
# Status is truncated due to long pipeline name
self.assertEqual('status: success',
check_status['description'])
@simple_layout('layouts/reporting-github.yaml', driver='github')
def test_push_reporting(self):
project = 'org/project2'
# pipeline reports pull status both on start and success
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest(project, 'master', 'A')
old_sha = '0' * 40
new_sha = A.head_sha
A.setMerged("merging A")
pevent = self.fake_github.getPushEvent(project=project,
ref='refs/heads/master',
old_rev=old_sha,
new_rev=new_sha)
self.fake_github.emitEvent(pevent)
self.waitUntilSettled()
# there should only be one report, a status
self.assertEqual(1, len(self.fake_github.github_data.reports))
# Verify the user/context/state of the status
status = ('zuul', 'tenant-one/push-reporting', 'pending')
self.assertEqual(status, self.fake_github.github_data.reports[0][-1])
# free the executor, allow the build to finish
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# Now there should be a second report, the success of the build
self.assertEqual(2, len(self.fake_github.github_data.reports))
# Verify the user/context/state of the status
status = ('zuul', 'tenant-one/push-reporting', 'success')
self.assertEqual(status, self.fake_github.github_data.reports[-1][-1])
# now make a PR which should also comment
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest(project, 'master', 'A')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# Now there should be a four reports, a new comment
# and status
self.assertEqual(4, len(self.fake_github.github_data.reports))
self.executor_server.release()
self.waitUntilSettled()
@simple_layout("layouts/reporting-github.yaml", driver="github")
def test_reporting_checks_api_unauthorized(self):
# Using the checks API only works with app authentication. As all tests
# within the TestGithubDriver class are executed without app
# authentication, the checks API won't work here.
project = "org/project3"
github = self.fake_github.getGithubClient(None)
# The pipeline reports pull request status both on start and success.
# As we are not authenticated as app, this won't create or update any
# check runs, but should post two comments (start, success) informing
# the user about the missing authentication.
A = self.fake_github.openFakePullRequest(project, "master", "A")
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertIn(
A.head_sha, github.repo_from_project(project)._commits.keys()
)
check_runs = self.fake_github.getCommitChecks(project, A.head_sha)
self.assertEqual(0, len(check_runs))
expected_warning = (
"Unable to create or update check tenant-one/checks-api-reporting."
" Must be authenticated as app integration."
)
self.assertEqual(2, len(A.comments))
self.assertIn(expected_warning, A.comments[0])
self.assertIn(expected_warning, A.comments[1])
@simple_layout('layouts/merging-github.yaml', driver='github')
def test_report_pull_merge(self):
# pipeline merges the pull request on success
A = self.fake_github.openFakePullRequest('org/project', 'master',
'PR title',
body='I shouldnt be seen',
body_text='PR body')
self.fake_github.emitEvent(A.getCommentAddedEvent('merge me'))
self.waitUntilSettled()
self.assertTrue(A.is_merged)
self.assertThat(A.merge_message,
MatchesRegex(r'.*PR title\n\nPR body.*', re.DOTALL))
self.assertThat(A.merge_message,
Not(MatchesRegex(
r'.*I shouldnt be seen.*',
re.DOTALL)))
self.assertEqual(len(A.comments), 0)
# pipeline merges the pull request on success after failure
self.fake_github.merge_failure = True
B = self.fake_github.openFakePullRequest('org/project', 'master', 'B')
self.fake_github.emitEvent(B.getCommentAddedEvent('merge me'))
self.waitUntilSettled()
self.assertFalse(B.is_merged)
self.assertEqual(len(B.comments), 1)
self.assertEqual(B.comments[0],
'Pull request merge failed: Unknown merge failure')
self.fake_github.merge_failure = False
# pipeline merges the pull request on second run of merge
# first merge failed on 405 Method Not Allowed error
self.fake_github.merge_not_allowed_count = 1
C = self.fake_github.openFakePullRequest('org/project', 'master', 'C')
self.fake_github.emitEvent(C.getCommentAddedEvent('merge me'))
self.waitUntilSettled()
self.assertTrue(C.is_merged)
# pipeline does not merge the pull request
# merge failed on 405 Method Not Allowed error - twice
self.fake_github.merge_not_allowed_count = 2
D = self.fake_github.openFakePullRequest('org/project', 'master', 'D')
self.fake_github.emitEvent(D.getCommentAddedEvent('merge me'))
self.waitUntilSettled()
self.assertFalse(D.is_merged)
self.assertEqual(len(D.comments), 1)
# Validate that the merge failure comment contains the message github
# returned
self.assertEqual(D.comments[0],
'Pull request merge failed: Merge not allowed '
'because of fake reason')
@simple_layout('layouts/merging-github.yaml', driver='github')
def test_report_pull_merge_message_reviewed_by(self):
# pipeline merges the pull request on success
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
self.fake_github.emitEvent(A.getCommentAddedEvent('merge me'))
self.waitUntilSettled()
self.assertTrue(A.is_merged)
# assert that no 'Reviewed-By' is in merge commit message
self.assertThat(A.merge_message,
Not(MatchesRegex(r'.*Reviewed-By.*', re.DOTALL)))
B = self.fake_github.openFakePullRequest('org/project', 'master', 'B')
B.addReview('derp', 'APPROVED')
self.fake_github.emitEvent(B.getCommentAddedEvent('merge me'))
self.waitUntilSettled()
self.assertTrue(B.is_merged)
# assert that single 'Reviewed-By' is in merge commit message
self.assertThat(B.merge_message,
MatchesRegex(
r'.*Reviewed-by: derp <[email protected]>.*',
re.DOTALL))
C = self.fake_github.openFakePullRequest('org/project', 'master', 'C')
C.addReview('derp', 'APPROVED')
C.addReview('herp', 'COMMENTED')
self.fake_github.emitEvent(C.getCommentAddedEvent('merge me'))
self.waitUntilSettled()
self.assertTrue(C.is_merged)
# assert that multiple 'Reviewed-By's are in merge commit message
self.assertThat(C.merge_message,
MatchesRegex(
r'.*Reviewed-by: derp <[email protected]>.*',
re.DOTALL))
self.assertThat(C.merge_message,
MatchesRegex(
r'.*Reviewed-by: herp <[email protected]>.*',
re.DOTALL))
@simple_layout('layouts/dependent-github.yaml', driver='github')
def test_draft_pr(self):
# pipeline merges the pull request on success
A = self.fake_github.openFakePullRequest('org/project', 'master',
'PR title', draft=True)
self.fake_github.emitEvent(A.addLabel('merge'))
self.waitUntilSettled()
# A draft pull request must not enter the gate
self.assertFalse(A.is_merged)
self.assertHistory([])
@simple_layout('layouts/dependent-github.yaml', driver='github')
def test_non_mergeable_pr(self):
# pipeline merges the pull request on success
A = self.fake_github.openFakePullRequest('org/project', 'master',
'PR title', mergeable=False)
self.fake_github.emitEvent(A.addLabel('merge'))
self.waitUntilSettled()
# A non-mergeable pull request must not enter gate
self.assertFalse(A.is_merged)
self.assertHistory([])
@simple_layout('layouts/reporting-multiple-github.yaml', driver='github')
def test_reporting_multiple_github(self):
project = 'org/project1'
github = self.fake_github.getGithubClient(None)
# pipeline reports pull status both on start and success
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest(project, 'master', 'A')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
# open one on B as well, which should not effect A reporting
B = self.fake_github.openFakePullRequest('org/project2', 'master',
'B')
self.fake_github.emitEvent(B.getPullRequestOpenedEvent())
self.waitUntilSettled()
# We should have a status container for the head sha
statuses = self.fake_github.getCommitStatuses(project, A.head_sha)
self.assertIn(
A.head_sha, github.repo_from_project(project)._commits.keys())
# We should only have one status for the head sha
self.assertEqual(1, len(statuses))
check_status = statuses[0]
check_url = (
'http://zuul.example.com/t/tenant-one/status/change/%s,%s' %
(A.number, A.head_sha))
self.assertEqual('tenant-one/check', check_status['context'])
self.assertEqual('check status: pending', check_status['description'])
self.assertEqual('pending', check_status['state'])
self.assertEqual(check_url, check_status['url'])
self.assertEqual(0, len(A.comments))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# We should only have two statuses for the head sha
statuses = self.fake_github.getCommitStatuses(project, A.head_sha)
self.assertEqual(2, len(statuses))
check_status = statuses[0]
check_url = 'http://zuul.example.com/t/tenant-one/buildset/'
self.assertEqual('tenant-one/check', check_status['context'])
self.assertEqual('success', check_status['state'])
self.assertEqual('check status: success', check_status['description'])
self.assertThat(check_status['url'], StartsWith(check_url))
self.assertEqual(1, len(A.comments))
self.assertThat(A.comments[0],
MatchesRegex(r'.*Build succeeded.*', re.DOTALL))
@simple_layout('layouts/dependent-github.yaml', driver='github')
def test_parallel_changes(self):
"Test that changes are tested in parallel and merged in series"
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
B = self.fake_github.openFakePullRequest('org/project', 'master', 'B')
C = self.fake_github.openFakePullRequest('org/project', 'master', 'C')
self.fake_github.emitEvent(A.addLabel('merge'))
self.fake_github.emitEvent(B.addLabel('merge'))
self.fake_github.emitEvent(C.addLabel('merge'))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.assertEqual(self.builds[0].name, 'project-merge')
self.assertTrue(self.builds[0].hasChanges(A))
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertTrue(self.builds[0].hasChanges(A))
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertTrue(self.builds[1].hasChanges(A))
self.assertEqual(self.builds[2].name, 'project-merge')
self.assertTrue(self.builds[2].hasChanges(A, B))
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 5)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertTrue(self.builds[0].hasChanges(A))
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertTrue(self.builds[1].hasChanges(A))
self.assertEqual(self.builds[2].name, 'project-test1')
self.assertTrue(self.builds[2].hasChanges(A))
self.assertEqual(self.builds[3].name, 'project-test2')
self.assertTrue(self.builds[3].hasChanges(A, B))
self.assertEqual(self.builds[4].name, 'project-merge')
self.assertTrue(self.builds[4].hasChanges(A, B, C))
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 6)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertTrue(self.builds[0].hasChanges(A))
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertTrue(self.builds[1].hasChanges(A))
self.assertEqual(self.builds[2].name, 'project-test1')
self.assertTrue(self.builds[2].hasChanges(A, B))
self.assertEqual(self.builds[3].name, 'project-test2')
self.assertTrue(self.builds[3].hasChanges(A, B))
self.assertEqual(self.builds[4].name, 'project-test1')
self.assertTrue(self.builds[4].hasChanges(A, B, C))
self.assertEqual(self.builds[5].name, 'project-test2')
self.assertTrue(self.builds[5].hasChanges(A, B, C))
all_builds = self.builds[:]
self.release(all_builds[2])
self.release(all_builds[3])
self.waitUntilSettled()
self.assertFalse(A.is_merged)
self.assertFalse(B.is_merged)
self.assertFalse(C.is_merged)
self.release(all_builds[0])
self.release(all_builds[1])
self.waitUntilSettled()
self.assertTrue(A.is_merged)
self.assertTrue(B.is_merged)
self.assertFalse(C.is_merged)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(len(self.history), 9)
self.assertTrue(C.is_merged)
self.assertNotIn('merge', A.labels)
self.assertNotIn('merge', B.labels)
self.assertNotIn('merge', C.labels)
@simple_layout('layouts/dependent-github.yaml', driver='github')
def test_failed_changes(self):
"Test that a change behind a failed change is retested"
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
B = self.fake_github.openFakePullRequest('org/project', 'master', 'B')
self.executor_server.failJob('project-test1', A)
self.fake_github.emitEvent(A.addLabel('merge'))
self.fake_github.emitEvent(B.addLabel('merge'))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# It's certain that the merge job for change 2 will run, but
# the test1 and test2 jobs may or may not run.
self.assertTrue(len(self.history) > 6)
self.assertFalse(A.is_merged)
self.assertTrue(B.is_merged)
self.assertNotIn('merge', A.labels)
self.assertNotIn('merge', B.labels)
@simple_layout('layouts/dependent-github.yaml', driver='github')
def test_failed_change_at_head(self):
"Test that if a change at the head fails, jobs behind it are canceled"
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
B = self.fake_github.openFakePullRequest('org/project', 'master', 'B')
C = self.fake_github.openFakePullRequest('org/project', 'master', 'C')
self.executor_server.failJob('project-test1', A)
self.fake_github.emitEvent(A.addLabel('merge'))
self.fake_github.emitEvent(B.addLabel('merge'))
self.fake_github.emitEvent(C.addLabel('merge'))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.assertEqual(self.builds[0].name, 'project-merge')
self.assertTrue(self.builds[0].hasChanges(A))
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 6)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertEqual(self.builds[2].name, 'project-test1')
self.assertEqual(self.builds[3].name, 'project-test2')
self.assertEqual(self.builds[4].name, 'project-test1')
self.assertEqual(self.builds[5].name, 'project-test2')
self.release(self.builds[0])
self.waitUntilSettled()
# project-test2, project-merge for B
self.assertEqual(len(self.builds), 2)
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 4)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(len(self.history), 15)
self.assertFalse(A.is_merged)
self.assertTrue(B.is_merged)
self.assertTrue(C.is_merged)
self.assertNotIn('merge', A.labels)
self.assertNotIn('merge', B.labels)
self.assertNotIn('merge', C.labels)
def _test_push_event_reconfigure(self, project, branch,
expect_reconfigure=False,
old_sha=None, new_sha=None,
modified_files=None,
removed_files=None,
expected_cat_jobs=None):
pevent = self.fake_github.getPushEvent(
project=project,
ref='refs/heads/%s' % branch,
old_rev=old_sha,
new_rev=new_sha,
modified_files=modified_files,
removed_files=removed_files)
# record previous tenant reconfiguration time, which may not be set
old = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
self.waitUntilSettled()
if expected_cat_jobs is not None:
# clear the merge jobs history so we can count the cat jobs
# issued during reconfiguration
del self.merge_job_history
self.fake_github.emitEvent(pevent)
self.waitUntilSettled()
new = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
if expect_reconfigure:
# New timestamp should be greater than the old timestamp
self.assertLess(old, new)
else:
# Timestamps should be equal as no reconfiguration shall happen
self.assertEqual(old, new)
if expected_cat_jobs is not None:
# Check the expected number of cat jobs here as the (empty) config
# of org/project should be cached.
cat_jobs = self.merge_job_history.get(MergeRequest.CAT)
self.assertEqual(expected_cat_jobs, len(cat_jobs), cat_jobs)
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_push_event_reconfigure(self):
self._test_push_event_reconfigure('org/common-config', 'master',
modified_files=['zuul.yaml'],
old_sha='0' * 40,
expect_reconfigure=True,
expected_cat_jobs=1)
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_push_event_reconfigure_complex_branch(self):
branch = 'feature/somefeature'
project = 'org/common-config'
# prepare an existing branch
self.create_branch(project, branch)
github = self.fake_github.getGithubClient()
repo = github.repo_from_project(project)
repo._create_branch(branch)
repo._set_branch_protection(branch, False)
self.fake_github.emitEvent(
self.fake_github.getPushEvent(
project,
ref='refs/heads/%s' % branch))
self.waitUntilSettled()
A = self.fake_github.openFakePullRequest(project, branch, 'A')
old_sha = A.head_sha
A.setMerged("merging A")
new_sha = random_sha1()
self._test_push_event_reconfigure(project, branch,
expect_reconfigure=True,
old_sha=old_sha,
new_sha=new_sha,
modified_files=['zuul.yaml'],
expected_cat_jobs=1)
# there are no exclude-unprotected-branches in the test class, so a
# reconfigure shall occur
repo._delete_branch(branch)
self._test_push_event_reconfigure(project, branch,
expect_reconfigure=True,
old_sha=new_sha,
new_sha='0' * 40,
removed_files=['zuul.yaml'])
# TODO(jlk): Make this a more generic test for unknown project
@skip("Skipped for rewrite of webhook handler")
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_ping_event(self):
# Test valid ping
pevent = {'repository': {'full_name': 'org/project'}}
resp = self.fake_github.emitEvent(('ping', pevent))
self.assertEqual(resp.status_code, 200, "Ping event didn't succeed")
# Test invalid ping
pevent = {'repository': {'full_name': 'unknown-project'}}
self.assertRaises(
urllib.error.HTTPError,
self.fake_github.emitEvent,
('ping', pevent),
)
@simple_layout('layouts/gate-github.yaml', driver='github')
def test_status_checks(self):
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project')
repo._set_branch_protection(
'master', contexts=['tenant-one/check', 'tenant-one/gate'])
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# since the required status 'tenant-one/check' is not fulfilled no
# job is expected
self.assertEqual(0, len(self.history))
# now set a failing status 'tenant-one/check'
repo = github.repo_from_project('org/project')
repo.create_status(A.head_sha, 'failed', 'example.com', 'description',
'tenant-one/check')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
# now set a successful status followed by a failing status to check
# that the later failed status wins
repo.create_status(A.head_sha, 'success', 'example.com', 'description',
'tenant-one/check')
repo.create_status(A.head_sha, 'failed', 'example.com', 'description',
'tenant-one/check')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
# now set the required status 'tenant-one/check'
repo.create_status(A.head_sha, 'success', 'example.com', 'description',
'tenant-one/check')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# the change should have entered the gate
self.assertEqual(2, len(self.history))
@simple_layout('layouts/gate-github.yaml', driver='github')
def test_status_checks_removal(self):
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project')
repo._set_branch_protection(
'master', contexts=['something/check', 'tenant-one/gate'])
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = True
# Since the required status 'something/check' is not fulfilled,
# no job is expected
self.assertEqual(0, len(self.builds))
self.assertEqual(0, len(self.history))
# Set the required status 'something/check'
repo.create_status(A.head_sha, 'success', 'example.com', 'description',
'something/check')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(2, len(self.builds))
self.assertEqual(0, len(self.history))
# Remove it and verify the change is dequeued.
repo.create_status(A.head_sha, 'failed', 'example.com', 'description',
'something/check')
self.fake_github.emitEvent(A.getCommitStatusEvent('something/check',
state='failed',
user='foo'))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# The change should be dequeued.
self.assertHistory([
dict(name='project-test1', result='ABORTED'),
dict(name='project-test2', result='ABORTED'),
], ordered=False)
self.assertEqual(1, len(A.comments))
self.assertFalse(A.is_merged)
self.assertIn('This change is unable to merge '
'due to a missing merge requirement.',
A.comments[0])
# This test case verifies that no reconfiguration happens if a branch was
# deleted that didn't contain configuration.
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_no_reconfigure_on_non_config_branch_delete(self):
branch = 'feature/somefeature'
project = 'org/common-config'
# prepare an existing branch
self.create_branch(project, branch)
github = self.fake_github.getGithubClient()
repo = github.repo_from_project(project)
repo._create_branch(branch)
repo._set_branch_protection(branch, False)
A = self.fake_github.openFakePullRequest(project, branch, 'A')
old_sha = A.head_sha
A.setMerged("merging A")
new_sha = random_sha1()
self._test_push_event_reconfigure(project, branch,
expect_reconfigure=False,
old_sha=old_sha,
new_sha=new_sha,
modified_files=['README.md'])
# Check if deleting that branch is ignored as well
repo._delete_branch(branch)
self._test_push_event_reconfigure(project, branch,
expect_reconfigure=False,
old_sha=new_sha,
new_sha='0' * 40,
modified_files=['README.md'])
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_direct_dequeue_change_github(self):
"Test direct dequeue of a github pull request"
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
event = DequeueEvent('tenant-one', 'check',
'github.com', 'org/project',
change='{},{}'.format(A.number, A.head_sha),
ref=None, oldrev=None, newrev=None)
self.scheds.first.sched.pipeline_management_events['tenant-one'][
'check'].put(event)
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
self.assertEqual(check_pipeline.getAllItems(), [])
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 2)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_direct_enqueue_change_github(self):
"Test direct enqueue of a pull request"
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
event = EnqueueEvent('tenant-one', 'check',
'github.com', 'org/project',
change='{},{}'.format(A.number, A.head_sha),
ref=None, oldrev=None, newrev=None)
self.scheds.first.sched.pipeline_management_events['tenant-one'][
'check'].put(event)
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
# check that change_url is correct
job1_params = self.getJobFromHistory('project-test1').parameters
job2_params = self.getJobFromHistory('project-test2').parameters
self.assertEquals('https://github.com/org/project/pull/1',
job1_params['zuul']['items'][0]['change_url'])
self.assertEquals('https://github.com/org/project/pull/1',
job2_params['zuul']['items'][0]['change_url'])
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_pull_commit_race(self):
"""Test graceful handling of delayed availability of commits"""
github = self.fake_github.getGithubClient('org/project')
repo = github.repo_from_project('org/project')
repo.fail_not_found = 1
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test1').result)
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test2').result)
job = self.getJobFromHistory('project-test2')
zuulvars = job.parameters['zuul']
self.assertEqual(str(A.number), zuulvars['change'])
self.assertEqual(str(A.head_sha), zuulvars['patchset'])
self.assertEqual('master', zuulvars['branch'])
self.assertEqual(1, len(A.comments))
self.assertThat(
A.comments[0],
MatchesRegex(r'.*\[project-test1 \]\(.*\).*', re.DOTALL))
self.assertThat(
A.comments[0],
MatchesRegex(r'.*\[project-test2 \]\(.*\).*', re.DOTALL))
self.assertEqual(2, len(self.history))
@simple_layout('layouts/gate-github-cherry-pick.yaml', driver='github')
def test_merge_method_cherry_pick(self):
"""
Tests that the merge mode gets forwarded to the reporter and the
merge fails because cherry-pick is not supported by github.
"""
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project')
repo._set_branch_protection(
'master', contexts=['tenant-one/check', 'tenant-one/gate'])
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
repo = github.repo_from_project('org/project')
repo.create_status(A.head_sha, 'success', 'example.com', 'description',
'tenant-one/check')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# the change should have entered the gate
self.assertEqual(2, len(self.history))
# Merge should have failed because cherry-pick is not supported
self.assertEqual(2, len(A.comments))
self.assertFalse(A.is_merged)
self.assertEquals(A.comments[1],
'Merge mode cherry-pick not supported by Github')
@simple_layout('layouts/gate-github-rebase.yaml', driver='github')
def test_merge_method_rebase(self):
"""
Tests that the merge mode gets forwarded to the reporter and the
PR was rebased.
"""
self.executor_server.keep_jobdir = True
self.executor_server.hold_jobs_in_build = True
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project')
repo._set_branch_protection(
'master', contexts=['tenant-one/check', 'tenant-one/gate'])
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
repo.create_status(A.head_sha, 'success', 'example.com', 'description',
'tenant-one/check')
# Create a second commit on master to verify rebase behavior
self.create_commit('org/project', message="Test rebase commit")
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
build = self.builds[-1]
path = os.path.join(build.jobdir.src_root, 'github.com/org/project')
repo = git.Repo(path)
repo_messages = [c.message.strip() for c in repo.iter_commits()]
repo_messages.reverse()
expected = [
'initial commit',
'initial commit', # simple_layout adds a second "initial commit"
'Test rebase commit',
'A-1',
]
self.assertEqual(expected, repo_messages)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# the change should have entered the gate
self.assertEqual(2, len(self.history))
# now check if the merge was done via rebase
merges = [report for report in self.fake_github.github_data.reports
if report[2] == 'merge']
assert (len(merges) == 1 and merges[0][3] == 'rebase')
@simple_layout('layouts/gate-github-squash-merge.yaml', driver='github')
def test_merge_method_squash_merge(self):
"""
Tests that the merge mode gets forwarded to the reporter and the
PR was squashed.
"""
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project')
repo._set_branch_protection(
'master', contexts=['tenant-one/check', 'tenant-one/gate'])
pr_description = "PR description"
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A',
body_text=pr_description)
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
repo = github.repo_from_project('org/project')
repo.create_status(A.head_sha, 'success', 'example.com', 'description',
'tenant-one/check')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# the change should have entered the gate
self.assertEqual(2, len(self.history))
# now check if the merge was done via squash
merges = [report for report in self.fake_github.github_data.reports
if report[2] == 'merge']
assert (len(merges) == 1 and merges[0][3] == 'squash')
# Assert that we won't duplicate the PR title in the merge
# message description.
self.assertEqual(A.merge_message, pr_description)
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_invalid_event(self):
# Regression test to make sure the event forwarder thread continues
# running in case the event from the GithubEventProcessor is None.
self.fake_github.emitEvent(("pull_request", "invalid"))
self.waitUntilSettled()
A = self.fake_github.openFakePullRequest('org/project', 'master',
'A')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test1').result)
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test2').result)
@simple_layout('layouts/github-merge-mode.yaml', driver='github')
def test_merge_method_syntax_check(self):
"""
Tests that the merge mode gets forwarded to the reporter and the
PR was rebased.
"""
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project')
repo._repodata['allow_rebase_merge'] = False
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
loading_errors = tenant.layout.loading_errors
self.assertEquals(
len(tenant.layout.loading_errors), 1,
"An error should have been stored")
self.assertIn(
"rebase not supported",
str(loading_errors[0].error))
@simple_layout("layouts/basic-github.yaml", driver="github")
def test_concurrent_get_change(self):
"""
Test that getting a change concurrently returns the same
object from the cache.
"""
conn = self.scheds.first.sched.connections.connections["github"]
# Create a new change object and remove it from the cache so
# the concurrent call will try to create a new change object.
A = self.fake_github.openFakePullRequest("org/project", "master", "A")
change_key = ChangeKey(conn.connection_name, "org/project",
"PullRequest", str(A.number), str(A.head_sha))
change = conn.getChange(change_key, refresh=True)
conn._change_cache.delete(change_key)
# Acquire the update lock so the concurrent get task needs to
# wait for the lock to be release.
lock = conn._change_update_lock.setdefault(change_key,
threading.Lock())
lock.acquire()
try:
executor = ThreadPoolExecutor(max_workers=1)
task = executor.submit(conn.getChange, change_key, refresh=True)
for _ in iterate_timeout(5, "task to be running"):
if task.running():
break
# Add the change back so the waiting task can get the
# change from the cache.
conn._change_cache.set(change_key, change)
finally:
lock.release()
executor.shutdown()
other_change = task.result()
self.assertIsNotNone(other_change.cache_stat)
self.assertIs(change, other_change)
class TestMultiGithubDriver(ZuulTestCase):
config_file = 'zuul-multi-github.conf'
tenant_config_file = 'config/multi-github/main.yaml'
scheduler_count = 1
def test_multi_app(self):
"""Test that we can handle multiple app."""
A = self.fake_github_ro.openFakePullRequest(
'org/project', 'master', 'A')
self.fake_github_ro.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(
'SUCCESS',
self.getJobFromHistory('project-test').result)
class TestGithubUnprotectedBranches(ZuulTestCase):
config_file = 'zuul-github-driver.conf'
tenant_config_file = 'config/unprotected-branches/main.yaml'
scheduler_count = 1
def test_unprotected_branches(self):
tenant = self.scheds.first.sched.abide.tenants\
.get('tenant-one')
project1 = tenant.untrusted_projects[0]
project2 = tenant.untrusted_projects[1]
tpc1 = tenant.project_configs[project1.canonical_name]
tpc2 = tenant.project_configs[project2.canonical_name]
# project1 should have parsed master
self.assertIn('master', tpc1.parsed_branch_config.keys())
# project2 should have no parsed branch
self.assertEqual(0, len(tpc2.parsed_branch_config.keys()))
# now enable branch protection and trigger reload
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project2')
repo._set_branch_protection('master', True)
pevent = self.fake_github.getPushEvent(project='org/project2',
ref='refs/heads/master')
self.fake_github.emitEvent(pevent)
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
tpc1 = tenant.project_configs[project1.canonical_name]
tpc2 = tenant.project_configs[project2.canonical_name]
# project1 and project2 should have parsed master now
self.assertIn('master', tpc1.parsed_branch_config.keys())
self.assertIn('master', tpc2.parsed_branch_config.keys())
def test_filtered_branches_in_build(self):
"""
Tests unprotected branches are filtered in builds if excluded
"""
self.executor_server.keep_jobdir = True
# Enable branch protection on org/project2@master
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project2')
self.create_branch('org/project2', 'feat-x')
repo._set_branch_protection('master', True)
# Enable branch protection on org/project3@stable. We'll use a PR on
# this branch as a depends-on to validate that the stable branch
# which is not protected in org/project2 is not filtered out.
repo = github.repo_from_project('org/project3')
self.create_branch('org/project3', 'stable')
repo._set_branch_protection('stable', True)
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
A = self.fake_github.openFakePullRequest('org/project3', 'stable', 'A')
msg = "Depends-On: https://github.com/org/project1/pull/%s" % A.number
B = self.fake_github.openFakePullRequest('org/project2', 'master', 'B',
body=msg)
self.fake_github.emitEvent(B.getPullRequestOpenedEvent())
self.waitUntilSettled()
build = self.history[0]
path = os.path.join(
build.jobdir.src_root, 'github.com', 'org/project2')
build_repo = git.Repo(path)
branches = [x.name for x in build_repo.branches]
self.assertNotIn('feat-x', branches)
self.assertHistory([
dict(name='used-job', result='SUCCESS',
changes="%s,%s %s,%s" % (A.number, A.head_sha,
B.number, B.head_sha)),
])
def test_unfiltered_branches_in_build(self):
"""
Tests unprotected branches are not filtered in builds if not excluded
"""
self.executor_server.keep_jobdir = True
# Enable branch protection on org/project1@master
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project1')
self.create_branch('org/project1', 'feat-x')
repo._set_branch_protection('master', True)
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
A = self.fake_github.openFakePullRequest('org/project1', 'master', 'A')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
build = self.history[0]
path = os.path.join(
build.jobdir.src_root, 'github.com', 'org/project1')
build_repo = git.Repo(path)
branches = [x.name for x in build_repo.branches]
self.assertIn('feat-x', branches)
self.assertHistory([
dict(name='project-test', result='SUCCESS',
changes="%s,%s" % (A.number, A.head_sha)),
])
def test_unprotected_push(self):
"""Test that unprotected pushes don't cause tenant reconfigurations"""
# Prepare repo with an initial commit
A = self.fake_github.openFakePullRequest('org/project2', 'master', 'A')
A.setMerged("merging A")
# Do a push on top of A
pevent = self.fake_github.getPushEvent(project='org/project2',
old_rev=A.head_sha,
ref='refs/heads/master',
modified_files=['zuul.yaml'])
# record previous tenant reconfiguration time, which may not be set
old = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
self.waitUntilSettled()
self.fake_github.emitEvent(pevent)
self.waitUntilSettled()
new = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
# We don't expect a reconfiguration because the push was to an
# unprotected branch
self.assertEqual(old, new)
# now enable branch protection and trigger the push event again
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project2')
repo._set_branch_protection('master', True)
self.fake_github.emitEvent(pevent)
self.waitUntilSettled()
new = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
# We now expect that zuul reconfigured itself
self.assertLess(old, new)
def test_protected_branch_delete(self):
"""Test that protected branch deletes trigger a tenant reconfig"""
# Prepare repo with an initial commit and enable branch protection
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project2')
repo._set_branch_protection('master', True)
self.fake_github.emitEvent(
self.fake_github.getPushEvent(
project='org/project2', ref='refs/heads/master'))
A = self.fake_github.openFakePullRequest('org/project2', 'master', 'A')
A.setMerged("merging A")
# add a spare branch so that the project is not empty after master gets
# deleted.
repo._create_branch('feat-x')
self.fake_github.emitEvent(
self.fake_github.getPushEvent(
project='org/project2', ref='refs/heads/feat-x'))
self.waitUntilSettled()
# record previous tenant reconfiguration time, which may not be set
old = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
self.waitUntilSettled()
# Delete the branch
repo._delete_branch('master')
pevent = self.fake_github.getPushEvent(project='org/project2',
old_rev=A.head_sha,
new_rev='0' * 40,
ref='refs/heads/master',
modified_files=['zuul.yaml'])
self.fake_github.emitEvent(pevent)
self.waitUntilSettled()
new = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
# We now expect that zuul reconfigured itself as we deleted a protected
# branch
self.assertLess(old, new)
def test_base_branch_updated(self):
self.create_branch('org/project2', 'feature')
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project2')
repo._set_branch_protection('master', True)
# Make sure Zuul picked up and cached the configured branches
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
github_connection = self.scheds.first.connections.connections['github']
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
project = github_connection.source.getProject('org/project2')
# Verify that only the master branch is considered protected
branches = github_connection.getProjectBranches(project, tenant)
self.assertEqual(branches, ["master"])
A = self.fake_github.openFakePullRequest('org/project2', 'master',
'A')
# Fake an event from a pull-request that changed the base
# branch from "feature" to "master". The PR is already
# using "master" as base, but the event still references
# the old "feature" branch.
event = A.getPullRequestOpenedEvent()
event[1]["pull_request"]["base"]["ref"] = "feature"
self.fake_github.emitEvent(event)
self.waitUntilSettled()
# Make sure we are still only considering "master" to be
# protected.
branches = github_connection.getProjectBranches(project, tenant)
self.assertEqual(branches, ["master"])
# This test verifies that a PR is considered in case it was created for
# a branch just has been set to protected before a tenant reconfiguration
# took place.
def test_reconfigure_on_pr_to_new_protected_branch(self):
self.create_branch('org/project2', 'release')
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project2')
repo._set_branch_protection('master', True)
repo._create_branch('release')
repo._create_branch('feature')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
repo._set_branch_protection('release', True)
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest(
'org/project2', 'release', 'A')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('used-job').result)
job = self.getJobFromHistory('used-job')
zuulvars = job.parameters['zuul']
self.assertEqual(str(A.number), zuulvars['change'])
self.assertEqual(str(A.head_sha), zuulvars['patchset'])
self.assertEqual('release', zuulvars['branch'])
self.assertEqual(1, len(self.history))
def _test_push_event_reconfigure(self, project, branch,
expect_reconfigure=False,
old_sha=None, new_sha=None,
modified_files=None,
removed_files=None,
expected_cat_jobs=None):
pevent = self.fake_github.getPushEvent(
project=project,
ref='refs/heads/%s' % branch,
old_rev=old_sha,
new_rev=new_sha,
modified_files=modified_files,
removed_files=removed_files)
# record previous tenant reconfiguration time, which may not be set
old = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
self.waitUntilSettled()
if expected_cat_jobs is not None:
# clear the merge jobs history so we can count the cat jobs
# issued during reconfiguration
del self.merge_job_history
self.fake_github.emitEvent(pevent)
self.waitUntilSettled()
new = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
if expect_reconfigure:
# New timestamp should be greater than the old timestamp
self.assertLess(old, new)
else:
# Timestamps should be equal as no reconfiguration shall happen
self.assertEqual(old, new)
if expected_cat_jobs is not None:
# Check the expected number of cat jobs here as the (empty) config
# of org/project should be cached.
cat_jobs = self.merge_job_history.get(MergeRequest.CAT, [])
self.assertEqual(expected_cat_jobs, len(cat_jobs), cat_jobs)
def test_push_event_reconfigure_complex_branch(self):
branch = 'feature/somefeature'
project = 'org/project2'
# prepare an existing branch
self.create_branch(project, branch)
github = self.fake_github.getGithubClient()
repo = github.repo_from_project(project)
repo._create_branch(branch)
repo._set_branch_protection(branch, False)
self.fake_github.emitEvent(
self.fake_github.getPushEvent(
project,
ref='refs/heads/%s' % branch))
self.waitUntilSettled()
A = self.fake_github.openFakePullRequest(project, branch, 'A')
old_sha = A.head_sha
A.setMerged("merging A")
new_sha = random_sha1()
# branch is not protected, no reconfiguration even if config file
self._test_push_event_reconfigure(project, branch,
expect_reconfigure=False,
old_sha=old_sha,
new_sha=new_sha,
modified_files=['zuul.yaml'],
expected_cat_jobs=0)
# branch is not protected: no reconfiguration
repo._delete_branch(branch)
self._test_push_event_reconfigure(project, branch,
expect_reconfigure=False,
old_sha=new_sha,
new_sha='0' * 40,
removed_files=['zuul.yaml'])
def test_branch_protection_rule_update(self):
"""Test the branch_protection_rule event"""
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project2')
github_connection = self.scheds.first.connections.connections['github']
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
project = github_connection.source.getProject('org/project2')
# The repo starts without branch protection, and Zuul is
# configured to exclude unprotected branches, so we should see
# no branches.
branches = github_connection.getProjectBranches(project, tenant)
self.assertEqual(branches, [])
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
prev_layout = tenant.layout.uuid
# Add a rule to the master branch
repo._set_branch_protection('master', True)
self.fake_github.emitEvent(
self.fake_github.getBranchProtectionRuleEvent(
'org/project2', 'created'))
self.waitUntilSettled()
# Verify that it shows up
branches = github_connection.getProjectBranches(project, tenant)
self.assertEqual(branches, ['master'])
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
new_layout = tenant.layout.uuid
self.assertNotEqual(new_layout, prev_layout)
prev_layout = new_layout
# Remove the rule
repo._set_branch_protection('master', False)
self.fake_github.emitEvent(
self.fake_github.getBranchProtectionRuleEvent(
'org/project2', 'deleted'))
self.waitUntilSettled()
# Verify it's gone again
branches = github_connection.getProjectBranches(project, tenant)
self.assertEqual(branches, [])
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
new_layout = tenant.layout.uuid
self.assertNotEqual(new_layout, prev_layout)
prev_layout = new_layout
class TestGithubWebhook(ZuulTestCase):
config_file = 'zuul-github-driver.conf'
scheduler_count = 1
def setUp(self):
super(TestGithubWebhook, self).setUp()
# Start the web server
self.web = self.useFixture(
ZuulWebFixture(self.changes, self.config,
self.additional_event_queues, self.upstream_root,
self.poller_events,
self.git_url_with_auth, self.addCleanup,
self.test_root))
host = '127.0.0.1'
# Wait until web server is started
while True:
port = self.web.port
try:
with socket.create_connection((host, port)):
break
except ConnectionRefusedError:
pass
self.fake_github.setZuulWebPort(port)
def tearDown(self):
super(TestGithubWebhook, self).tearDown()
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_webhook(self):
"""Test that we can get github events via zuul-web."""
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent(),
use_zuulweb=True)
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test1').result)
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test2').result)
job = self.getJobFromHistory('project-test2')
zuulvars = job.parameters['zuul']
self.assertEqual(str(A.number), zuulvars['change'])
self.assertEqual(str(A.head_sha), zuulvars['patchset'])
self.assertEqual('master', zuulvars['branch'])
self.assertEqual(1, len(A.comments))
self.assertThat(
A.comments[0],
MatchesRegex(r'.*\[project-test1 \]\(.*\).*', re.DOTALL))
self.assertThat(
A.comments[0],
MatchesRegex(r'.*\[project-test2 \]\(.*\).*', re.DOTALL))
self.assertEqual(2, len(self.history))
# test_pull_unmatched_branch_event(self):
self.create_branch('org/project', 'unmatched_branch')
B = self.fake_github.openFakePullRequest(
'org/project', 'unmatched_branch', 'B')
self.fake_github.emitEvent(B.getPullRequestOpenedEvent(),
use_zuulweb=True)
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
class TestGithubShaCache(BaseTestCase):
scheduler_count = 1
def testInsert(self):
cache = GithubShaCache()
pr_dict = {
'head': {
'sha': '123456',
},
'number': 1,
'state': 'open',
}
cache.update('foo/bar', pr_dict)
self.assertEqual(cache.get('foo/bar', '123456'), set({1}))
def testRemoval(self):
cache = GithubShaCache()
pr_dict = {
'head': {
'sha': '123456',
},
'number': 1,
'state': 'open',
}
cache.update('foo/bar', pr_dict)
self.assertEqual(cache.get('foo/bar', '123456'), set({1}))
# Create 4096 entries so original falls off.
for x in range(0, 4096):
pr_dict['head']['sha'] = str(x)
cache.update('foo/bar', pr_dict)
cache.get('foo/bar', str(x))
self.assertEqual(cache.get('foo/bar', '123456'), set())
def testMultiInsert(self):
cache = GithubShaCache()
pr_dict = {
'head': {
'sha': '123456',
},
'number': 1,
'state': 'open',
}
cache.update('foo/bar', pr_dict)
self.assertEqual(cache.get('foo/bar', '123456'), set({1}))
pr_dict['number'] = 2
cache.update('foo/bar', pr_dict)
self.assertEqual(cache.get('foo/bar', '123456'), set({1, 2}))
def testMultiProjectInsert(self):
cache = GithubShaCache()
pr_dict = {
'head': {
'sha': '123456',
},
'number': 1,
'state': 'open',
}
cache.update('foo/bar', pr_dict)
self.assertEqual(cache.get('foo/bar', '123456'), set({1}))
cache.update('foo/baz', pr_dict)
self.assertEqual(cache.get('foo/baz', '123456'), set({1}))
def testNoMatch(self):
cache = GithubShaCache()
pr_dict = {
'head': {
'sha': '123456',
},
'number': 1,
'state': 'open',
}
cache.update('foo/bar', pr_dict)
self.assertEqual(cache.get('bar/foo', '789'), set())
self.assertEqual(cache.get('foo/bar', '789'), set())
def testClosedPRRemains(self):
cache = GithubShaCache()
pr_dict = {
'head': {
'sha': '123456',
},
'number': 1,
'state': 'closed',
}
cache.update('foo/bar', pr_dict)
self.assertEqual(cache.get('foo/bar', '123456'), set({1}))
class TestGithubAppDriver(ZuulGithubAppTestCase):
"""Inheriting from ZuulGithubAppTestCase will enable app authentication"""
config_file = 'zuul-github-driver.conf'
scheduler_count = 1
@simple_layout("layouts/reporting-github.yaml", driver="github")
def test_reporting_checks_api(self):
"""Using the checks API only works with app authentication"""
project = "org/project3"
github = self.fake_github.getGithubClient(None)
repo = github.repo_from_project('org/project3')
repo._set_branch_protection(
'master', contexts=['tenant-one/checks-api-reporting',
'tenant-one/gate'])
# pipeline reports pull request status both on start and success
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest(project, "master", "A")
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# We should have a pending check for the head sha
self.assertIn(
A.head_sha, github.repo_from_project(project)._commits.keys())
check_runs = self.fake_github.getCommitChecks(project, A.head_sha)
self.assertEqual(1, len(check_runs))
check_run = check_runs[0]
self.assertEqual("tenant-one/checks-api-reporting", check_run["name"])
self.assertEqual("in_progress", check_run["status"])
self.assertThat(
check_run["output"]["summary"],
MatchesRegex(r'.*Starting checks-api-reporting jobs.*', re.DOTALL)
)
# The external id should be a json-string containing all relevant
# information to uniquely identify this change.
self.assertEqual(
json.dumps(
{
"tenant": "tenant-one",
"pipeline": "checks-api-reporting",
"change": 1
}
),
check_run["external_id"],
)
# A running check run should provide a custom abort action
self.assertEqual(1, len(check_run["actions"]))
self.assertEqual(
{
"identifier": "abort",
"description": "Abort this check run",
"label": "Abort",
},
check_run["actions"][0],
)
# TODO (felix): How can we test if the details_url was set correctly?
# How can the details_url be configured on the test case?
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# We should now have an updated status for the head sha
check_runs = self.fake_github.getCommitChecks(project, A.head_sha)
self.assertEqual(1, len(check_runs))
check_run = check_runs[0]
self.assertEqual("tenant-one/checks-api-reporting", check_run["name"])
self.assertEqual("completed", check_run["status"])
self.assertEqual("success", check_run["conclusion"])
self.assertThat(
check_run["output"]["summary"],
MatchesRegex(r'.*Build succeeded.*', re.DOTALL)
)
self.assertIsNotNone(check_run["completed_at"])
# A completed check run should not provide any custom actions
self.assertEqual(0, len(check_run["actions"]))
# Tell gate to merge to test checks requirements
self.fake_github.emitEvent(A.getCommentAddedEvent('merge me'))
self.waitUntilSettled()
self.assertTrue(A.is_merged)
@simple_layout("layouts/reporting-github.yaml", driver="github")
def test_reporting_checks_api_dequeue(self):
"Test that a dequeued change will be reported back to the check run"
project = "org/project4"
github = self.fake_github.getGithubClient(None)
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest(project, "master", "A")
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# We should have a pending check for the head sha
self.assertIn(
A.head_sha, github.repo_from_project(project)._commits.keys())
check_runs = self.fake_github.getCommitChecks(project, A.head_sha)
self.assertEqual(1, len(check_runs))
check_run = check_runs[0]
self.assertEqual(
"tenant-one/checks-api-reporting-skipped", check_run["name"])
self.assertEqual("in_progress", check_run["status"])
self.assertThat(
check_run["output"]["summary"],
MatchesRegex(
r'.*Starting checks-api-reporting-skipped jobs.*', re.DOTALL)
)
# Dequeue the pending change
event = DequeueEvent('tenant-one', 'checks-api-reporting-skipped',
'github.com', 'org/project4',
change='{},{}'.format(A.number, A.head_sha),
ref=None, oldrev=None, newrev=None)
self.scheds.first.sched.pipeline_management_events['tenant-one'][
'checks-api-reporting-skipped'].put(event)
self.waitUntilSettled()
# We should now have a skipped check run for the head sha
check_runs = self.fake_github.getCommitChecks(project, A.head_sha)
self.assertEqual(1, len(check_runs))
check_run = check_runs[0]
self.assertEqual(
"tenant-one/checks-api-reporting-skipped", check_run["name"])
self.assertEqual("completed", check_run["status"])
self.assertEqual("skipped", check_run["conclusion"])
self.assertThat(
check_run["output"]["summary"],
MatchesRegex(r'.*Build canceled.*', re.DOTALL)
)
self.assertIsNotNone(check_run["completed_at"])
@simple_layout("layouts/reporting-github.yaml", driver="github")
def test_update_non_existing_check_run(self):
project = "org/project3"
github = self.fake_github.getGithubClient(None)
# Make check run creation fail
github._data.fail_check_run_creation = True
# pipeline reports pull request status both on start and success
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest(project, "master", "A")
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# We should have no pending check for the head sha
commit = github.repo_from_project(project)._commits.get(A.head_sha)
check_runs = commit.check_runs()
self.assertEqual(0, len(check_runs))
# Make check run creation work again
github._data.fail_check_run_creation = False
# Now run the build and check if the update of the check_run could
# still be accomplished.
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
check_runs = self.fake_github.getCommitChecks(project, A.head_sha)
self.assertEqual(1, len(check_runs))
check_run = check_runs[0]
self.assertEqual("tenant-one/checks-api-reporting", check_run["name"])
self.assertEqual("completed", check_run["status"])
self.assertEqual("success", check_run["conclusion"])
self.assertThat(
check_run["output"]["summary"],
MatchesRegex(r'.*Build succeeded.*', re.DOTALL)
)
self.assertIsNotNone(check_run["completed_at"])
# A completed check run should not provide any custom actions
self.assertEqual(0, len(check_run["actions"]))
@simple_layout("layouts/reporting-github.yaml", driver="github")
def test_update_check_run_missing_permissions(self):
project = "org/project3"
github = self.fake_github.getGithubClient(None)
repo = github.repo_from_project(project)
repo._set_permission("checks", False)
A = self.fake_github.openFakePullRequest(project, "master", "A")
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# Alghough we are authenticated as github app, we are lacking the
# necessary "checks" permissions for the test repository. Thus, the
# check run creation/update should fail and we end up in two comments
# being posted to the PR with appropriate warnings.
commit = github.repo_from_project(project)._commits.get(A.head_sha)
check_runs = commit.check_runs()
self.assertEqual(0, len(check_runs))
self.assertIn(
A.head_sha, github.repo_from_project(project)._commits.keys()
)
check_runs = self.fake_github.getCommitChecks(project, A.head_sha)
self.assertEqual(0, len(check_runs))
expected_warning = (
"Failed to create check run tenant-one/checks-api-reporting: "
"403 Resource not accessible by integration"
)
self.assertEqual(2, len(A.comments))
self.assertIn(expected_warning, A.comments[0])
self.assertIn(expected_warning, A.comments[1])
@simple_layout("layouts/reporting-github.yaml", driver="github")
def test_abort_check_run(self):
"Test that we can dequeue a change by aborting the related check run"
project = "org/project3"
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest(project, "master", "A")
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# We should have a pending check for the head sha that provides an
# abort action.
check_runs = self.fake_github.getCommitChecks(project, A.head_sha)
self.assertEqual(1, len(check_runs))
check_run = check_runs[0]
self.assertEqual("tenant-one/checks-api-reporting", check_run["name"])
self.assertEqual("in_progress", check_run["status"])
self.assertEqual(1, len(check_run["actions"]))
self.assertEqual("abort", check_run["actions"][0]["identifier"])
self.assertEqual(
{
"tenant": "tenant-one",
"pipeline": "checks-api-reporting",
"change": 1
},
json.loads(check_run["external_id"])
)
# Simulate a click on the "Abort" button in Github by faking a webhook
# event with our custom abort action.
# Handling this event should dequeue the related change
self.fake_github.emitEvent(A.getCheckRunAbortEvent(check_run))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get("tenant-one")
check_pipeline = tenant.layout.pipelines["check"]
self.assertEqual(0, len(check_pipeline.getAllItems()))
self.assertEqual(1, self.countJobResults(self.history, "ABORTED"))
# The buildset was already dequeued, so there shouldn't be anything to
# release.
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# Since the change/buildset was dequeued, the check run should be
# reported as cancelled and don't provide any further action.
check_runs = self.fake_github.getCommitChecks(project, A.head_sha)
self.assertEqual(1, len(check_runs))
aborted_check_run = check_runs[0]
self.assertEqual(
"tenant-one/checks-api-reporting", aborted_check_run["name"]
)
self.assertEqual("completed", aborted_check_run["status"])
self.assertEqual("cancelled", aborted_check_run["conclusion"])
self.assertEqual(0, len(aborted_check_run["actions"]))
class TestCheckRunAnnotations(ZuulGithubAppTestCase, AnsibleZuulTestCase):
"""We need Github app authentication and be able to run Ansible jobs"""
config_file = 'zuul-github-driver.conf'
tenant_config_file = "config/github-file-comments/main.yaml"
scheduler_count = 1
def test_file_comments(self):
project = "org/project"
github = self.fake_github.getGithubClient(None)
# The README file must be part of this PR to make the comment function
# work. Thus we change it's content to provide some more text.
files_dict = {
"README": textwrap.dedent(
"""
section one
===========
here is some text
and some more text
and a last line of text
section two
===========
here is another section
with even more text
and the end of the section
"""
),
}
A = self.fake_github.openFakePullRequest(
project, "master", "A", files=files_dict
)
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# We should have a pending check for the head sha
self.assertIn(
A.head_sha, github.repo_from_project(project)._commits.keys())
check_runs = self.fake_github.getCommitChecks(project, A.head_sha)
self.assertEqual(1, len(check_runs))
check_run = check_runs[0]
self.assertEqual("tenant-one/check", check_run["name"])
self.assertEqual("completed", check_run["status"])
self.assertThat(
check_run["output"]["summary"],
MatchesRegex(r'.*Build succeeded.*', re.DOTALL)
)
annotations = check_run["output"]["annotations"]
self.assertEqual(6, len(annotations))
self.assertEqual(annotations[0], {
"path": "README",
"annotation_level": "warning",
"message": "Simple line annotation",
"start_line": 1,
"end_line": 1,
})
self.assertEqual(annotations[1], {
"path": "README",
"annotation_level": "warning",
"message": "Line annotation with level",
"start_line": 2,
"end_line": 2,
})
# As the columns are not part of the same line, they are ignored in the
# annotation. Otherwise Github will complain about the request.
self.assertEqual(annotations[2], {
"path": "README",
"annotation_level": "notice",
"message": "simple range annotation",
"start_line": 4,
"end_line": 6,
})
self.assertEqual(annotations[3], {
"path": "README",
"annotation_level": "failure",
"message": "Columns must be part of the same line",
"start_line": 7,
"end_line": 7,
"start_column": 13,
"end_column": 26,
})
# From the invalid/error file comments, only the "line out of file"
# should remain. All others are excluded as they would result in
# invalid Github requests, making the whole check run update fail.
self.assertEqual(annotations[4], {
"path": "README",
"annotation_level": "warning",
"message": "Line is not part of the file",
"end_line": 9999,
"start_line": 9999
})
self.assertEqual(annotations[5], {
"path": "README",
"annotation_level": "warning",
"message": "Invalid level will fall back to warning",
"start_line": 3,
"end_line": 3,
})
def test_many_file_comments(self):
# Test that we only send 50 comments to github
project = "org/project"
github = self.fake_github.getGithubClient(None)
# The file must be part of this PR to make the comment function
# work. Thus we change it's content to provide some more text.
files_dict = {
"bigfile": textwrap.dedent(
"""
section one
===========
here is some text
"""
),
}
A = self.fake_github.openFakePullRequest(
project, "master", "A", files=files_dict
)
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# We should have a pending check for the head sha
self.assertIn(
A.head_sha, github.repo_from_project(project)._commits.keys())
check_runs = self.fake_github.getCommitChecks(project, A.head_sha)
self.assertEqual(1, len(check_runs))
check_run = check_runs[0]
self.assertEqual("tenant-one/check", check_run["name"])
self.assertEqual("completed", check_run["status"])
self.assertThat(
check_run["output"]["summary"],
MatchesRegex(r'.*Build succeeded.*', re.DOTALL)
)
annotations = check_run["output"]["annotations"]
self.assertEqual(50, len(annotations))
# Comments are sorted by uniqueness, so our most unique
# comment is first.
self.assertEqual(annotations[0], {
"path": "bigfile",
"annotation_level": "warning",
"message": "Insightful comment",
"start_line": 2,
"end_line": 2,
})
# This comment appears 3 times.
self.assertEqual(annotations[1], {
"path": "bigfile",
"annotation_level": "warning",
"message": "Useful comment",
"start_line": 1,
"end_line": 1,
})
# The rest.
self.assertEqual(annotations[4], {
"path": "bigfile",
"annotation_level": "warning",
"message": "Annoying comment",
"start_line": 1,
"end_line": 1,
})
class TestGithubDriverEnterprise(ZuulGithubAppTestCase):
config_file = 'zuul-github-driver-enterprise.conf'
scheduler_count = 1
@simple_layout('layouts/merging-github.yaml', driver='github')
def test_report_pull_merge(self):
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project')
repo._set_branch_protection(
'master', require_review=True)
# pipeline merges the pull request on success
A = self.fake_github.openFakePullRequest('org/project', 'master',
'PR title',
body='I shouldnt be seen',
body_text='PR body')
self.fake_github.emitEvent(A.getCommentAddedEvent('merge me'))
self.waitUntilSettled()
# Since the PR was not approved it should not be merged
self.assertFalse(A.is_merged)
A.addReview('derp', 'APPROVED')
self.fake_github.emitEvent(A.getCommentAddedEvent('merge me'))
self.waitUntilSettled()
# After approval it should be merged
self.assertTrue(A.is_merged)
self.assertThat(A.merge_message,
MatchesRegex(r'.*PR title\n\nPR body.*', re.DOTALL))
self.assertThat(A.merge_message,
Not(MatchesRegex(
r'.*I shouldnt be seen.*',
re.DOTALL)))
self.assertEqual(len(A.comments), 0)
class TestGithubDriverEnterpriseLegacy(ZuulGithubAppTestCase):
config_file = 'zuul-github-driver-enterprise.conf'
scheduler_count = 1
def setUp(self):
self.old_version = FakeGithubEnterpriseClient.version
FakeGithubEnterpriseClient.version = '2.19.0'
super().setUp()
def tearDown(self):
super().tearDown()
FakeGithubEnterpriseClient.version = self.old_version
@simple_layout('layouts/merging-github.yaml', driver='github')
def test_report_pull_merge(self):
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project')
repo._set_branch_protection(
'master', require_review=True)
# pipeline merges the pull request on success
A = self.fake_github.openFakePullRequest('org/project', 'master',
'PR title',
body='I shouldnt be seen',
body_text='PR body')
self.fake_github.emitEvent(A.getCommentAddedEvent('merge me'))
self.waitUntilSettled()
# Note: PR was not approved but old github does not support
# reviewDecision so this gets ignored and zuul merges nevertheless
self.assertTrue(A.is_merged)
self.assertThat(A.merge_message,
MatchesRegex(r'.*PR title\n\nPR body.*', re.DOTALL))
self.assertThat(A.merge_message,
Not(MatchesRegex(
r'.*I shouldnt be seen.*',
re.DOTALL)))
self.assertEqual(len(A.comments), 0)
class TestGithubDriverEnterpriseCache(ZuulGithubAppTestCase):
config_file = 'zuul-github-driver-enterprise.conf'
scheduler_count = 1
def setup_config(self, config_file):
self.upstream_cache_root = self.upstream_root + '-cache'
config = super().setup_config(config_file)
# This adds the GHE repository cache feature
config.set('connection github', 'repo_cache', self.upstream_cache_root)
config.set('connection github', 'repo_retry_timeout', '30')
# Synchronize the upstream repos to the upstream repo cache
self.synchronize_repo('org/common-config')
self.synchronize_repo('org/project')
return config
def init_repo(self, project, tag=None):
super().init_repo(project, tag)
# After creating the upstream repo, also create the empty
# cache repo (but unsynchronized for now)
parts = project.split('/')
path = os.path.join(self.upstream_cache_root, *parts[:-1])
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(self.upstream_cache_root, project)
repo = git.Repo.init(path)
with repo.config_writer() as config_writer:
config_writer.set_value('user', 'email', '[email protected]')
config_writer.set_value('user', 'name', 'User Name')
def synchronize_repo(self, project):
# Synchronize the upstream repo to the cache
upstream_path = os.path.join(self.upstream_root, project)
upstream = git.Repo(upstream_path)
cache_path = os.path.join(self.upstream_cache_root, project)
cache = git.Repo(cache_path)
refs = upstream.git.for_each_ref(
'--format=%(objectname) %(refname)'
)
for ref in refs.splitlines():
parts = ref.split(" ")
if len(parts) == 2:
commit, ref = parts
else:
continue
self.log.debug("Synchronize ref %s: %s", ref, commit)
cache.git.fetch(upstream_path, ref)
binsha = gitdb.util.to_bin_sha(commit)
obj = git.objects.Object.new_from_sha(cache, binsha)
git.refs.Reference.create(cache, ref, obj, force=True)
@simple_layout('layouts/merging-github.yaml', driver='github')
def test_github_repo_cache(self):
# Test that we fetch and configure retries correctly when
# using a github enterprise repo cache (the cache can be
# slightly out of sync).
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project')
repo._set_branch_protection('master', require_review=True)
# Make sure we have correctly overridden the retry attempts
merger = self.executor_server.merger
repo = merger.getRepo('github', 'org/project')
self.assertEqual(repo.retry_attempts, 1)
# Our initial attempt should fail; make it happen quickly
self.patch(Repo, 'retry_interval', 1)
# pipeline merges the pull request on success
A = self.fake_github.openFakePullRequest('org/project', 'master',
'PR title',
body='I shouldnt be seen',
body_text='PR body')
A.addReview('user', 'APPROVED')
self.fake_github.emitEvent(A.getCommentAddedEvent('merge me'))
self.waitUntilSettled('initial failed attempt')
self.assertFalse(A.is_merged)
# Now synchronize the upstream repo to the cache and try again
self.synchronize_repo('org/project')
self.fake_github.emitEvent(A.getCommentAddedEvent('merge me'))
self.waitUntilSettled('second successful attempt')
self.assertTrue(A.is_merged)
self.assertThat(A.merge_message,
MatchesRegex(r'.*PR title\n\nPR body.*', re.DOTALL))
self.assertThat(A.merge_message,
Not(MatchesRegex(
r'.*I shouldnt be seen.*',
re.DOTALL)))
self.assertEqual(len(A.comments), 0)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_github_driver.py
|
test_github_driver.py
|
# Copyright 2021 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(ianw): 2021-08-30 we are testing whitespace things that trigger
# flake8, for now there is no way to turn of specific tests on a
# per-file basis.
# flake8: noqa
import textwrap
from zuul.lib.dependson import find_dependency_headers
from tests.base import BaseTestCase
class TestDependsOnParsing(BaseTestCase):
def test_depends_on_parsing(self):
msg = textwrap.dedent('''\
This is a sample review subject
Review text
Depends-On:https://this.is.a.url/1
Depends-On: https://this.is.a.url/2
Depends-On: https://this.is.a.url/3
Depends-On: https://this.is.a.url/4
''')
r = find_dependency_headers(msg)
self.assertListEqual(r,
['https://this.is.a.url/1',
'https://this.is.a.url/2',
'https://this.is.a.url/3',
'https://this.is.a.url/4'])
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_dependson.py
|
test_dependson.py
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zuul import change_matcher as cm
from zuul import model
from tests.base import BaseTestCase
class BaseTestMatcher(BaseTestCase):
project = 'project'
def setUp(self):
super(BaseTestMatcher, self).setUp()
self.change = model.Change(self.project)
class TestAbstractChangeMatcher(BaseTestMatcher):
def test_str(self):
matcher = cm.ProjectMatcher(self.project)
self.assertEqual(str(matcher), '{ProjectMatcher:project}')
def test_repr(self):
matcher = cm.ProjectMatcher(self.project)
self.assertEqual(repr(matcher), '<ProjectMatcher project>')
class TestProjectMatcher(BaseTestMatcher):
def test_matches_returns_true(self):
matcher = cm.ProjectMatcher(self.project)
self.assertTrue(matcher.matches(self.change))
def test_matches_returns_false(self):
matcher = cm.ProjectMatcher('not_project')
self.assertFalse(matcher.matches(self.change))
class TestBranchMatcher(BaseTestMatcher):
def setUp(self):
super(TestBranchMatcher, self).setUp()
self.matcher = cm.BranchMatcher('foo')
def test_matches_returns_true_on_matching_branch(self):
self.change.branch = 'foo'
self.assertTrue(self.matcher.matches(self.change))
def test_matches_returns_true_on_matching_ref(self):
delattr(self.change, 'branch')
self.change.ref = 'foo'
self.assertTrue(self.matcher.matches(self.change))
def test_matches_returns_false_for_no_match(self):
self.change.branch = 'bar'
self.change.ref = 'baz'
self.assertFalse(self.matcher.matches(self.change))
class TestAbstractMatcherCollection(BaseTestMatcher):
def test_str(self):
matcher = cm.MatchAll([cm.FileMatcher('foo')])
self.assertEqual(str(matcher), '{MatchAll:{FileMatcher:foo}}')
def test_repr(self):
matcher = cm.MatchAll([])
self.assertEqual(repr(matcher), '<MatchAll>')
class BaseTestFilesMatcher(BaseTestMatcher):
def _test_matches(self, expected, files=None):
if files is not None:
self.change.files = files
self.assertEqual(expected, self.matcher.matches(self.change))
class TestMatchAllFiles(BaseTestFilesMatcher):
def setUp(self):
super(TestMatchAllFiles, self).setUp()
self.matcher = cm.MatchAllFiles([cm.FileMatcher('^docs/.*$')])
def test_matches_returns_false_when_files_attr_missing(self):
delattr(self.change, 'files')
self._test_matches(False)
def test_matches_returns_false_when_no_files(self):
self._test_matches(False)
def test_matches_returns_false_when_not_all_files_match(self):
self._test_matches(False, files=['/COMMIT_MSG', 'docs/foo', 'foo/bar'])
def test_matches_returns_true_when_single_file_does_not_match(self):
self._test_matches(True, files=['docs/foo'])
def test_matches_returns_false_when_commit_message_matches(self):
self._test_matches(False, files=['/COMMIT_MSG'])
def test_matches_returns_true_when_all_files_match(self):
self._test_matches(True, files=['/COMMIT_MSG', 'docs/foo'])
def test_matches_returns_true_when_single_file_matches(self):
self._test_matches(True, files=['docs/foo'])
class TestMatchAnyFiles(BaseTestFilesMatcher):
def setUp(self):
super(TestMatchAnyFiles, self).setUp()
self.matcher = cm.MatchAnyFiles([cm.FileMatcher('^docs/.*$')])
def test_matches_returns_true_when_files_attr_missing(self):
delattr(self.change, 'files')
self._test_matches(True)
def test_matches_returns_true_when_no_files(self):
self._test_matches(True)
def test_matches_returns_true_when_only_commit_message(self):
self._test_matches(True, files=['/COMMIT_MSG'])
def test_matches_returns_true_when_some_files_match(self):
self._test_matches(True, files=['/COMMIT_MSG', 'docs/foo', 'foo/bar'])
def test_matches_returns_true_when_single_file_matches(self):
self._test_matches(True, files=['docs/foo'])
def test_matches_returns_false_when_no_matching_files(self):
self._test_matches(False, files=['/COMMIT_MSG', 'foo/bar'])
class TestMatchAll(BaseTestMatcher):
def test_matches_returns_true(self):
matcher = cm.MatchAll([cm.ProjectMatcher(self.project)])
self.assertTrue(matcher.matches(self.change))
def test_matches_returns_false_for_missing_matcher(self):
matcher = cm.MatchAll([cm.ProjectMatcher('not_project')])
self.assertFalse(matcher.matches(self.change))
class TestMatchAny(BaseTestMatcher):
def test_matches_returns_true(self):
matcher = cm.MatchAny([cm.ProjectMatcher(self.project)])
self.assertTrue(matcher.matches(self.change))
def test_matches_returns_false(self):
matcher = cm.MatchAny([cm.ProjectMatcher('not_project')])
self.assertFalse(matcher.matches(self.change))
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_change_matcher.py
|
test_change_matcher.py
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tests.base import AnsibleZuulTestCase
class TestOpenStack(AnsibleZuulTestCase):
# A temporary class to experiment with how openstack can use
# Zuulv3
tenant_config_file = 'config/openstack/main.yaml'
def test_nova_master(self):
A = self.fake_gerrit.addFakeChange('openstack/nova', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('python27').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('python35').result,
'SUCCESS')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2,
"A should report start and success")
self.assertEqual(self.getJobFromHistory('python27').node,
'ubuntu-xenial')
def test_nova_mitaka(self):
self.create_branch('openstack/nova', 'stable/mitaka')
A = self.fake_gerrit.addFakeChange('openstack/nova',
'stable/mitaka', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('python27').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('python35').result,
'SUCCESS')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2,
"A should report start and success")
self.assertEqual(self.getJobFromHistory('python27').node,
'ubuntu-trusty')
def test_dsvm_keystone_repo(self):
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('openstack/nova', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='dsvm', result='SUCCESS', changes='1,1')])
build = self.getJobFromHistory('dsvm')
# Check that a change to nova triggered a keystone clone
executor_git_dir = os.path.join(self.executor_src_root,
'review.example.com',
'openstack', 'openstack%2Fkeystone',
'.git')
self.assertTrue(os.path.exists(executor_git_dir),
msg='openstack/keystone should be cloned.')
jobdir_git_dir = os.path.join(build.jobdir.src_root,
'review.example.com',
'openstack', 'keystone', '.git')
self.assertTrue(os.path.exists(jobdir_git_dir),
msg='openstack/keystone should be cloned.')
def test_dsvm_nova_repo(self):
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('openstack/keystone', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='dsvm', result='SUCCESS', changes='1,1')])
build = self.getJobFromHistory('dsvm')
# Check that a change to keystone triggered a nova clone
executor_git_dir = os.path.join(self.executor_src_root,
'review.example.com',
'openstack', 'openstack%2Fnova',
'.git')
self.assertTrue(os.path.exists(executor_git_dir),
msg='openstack/nova should be cloned.')
jobdir_git_dir = os.path.join(build.jobdir.src_root,
'review.example.com',
'openstack', 'nova', '.git')
self.assertTrue(os.path.exists(jobdir_git_dir),
msg='openstack/nova should be cloned.')
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_openstack.py
|
test_openstack.py
|
# Copyright 2012-2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from tests.base import ZuulTestCase, simple_layout
class TestRequirementsApprovalNewerThan(ZuulTestCase):
"""Requirements with a newer-than comment requirement"""
tenant_config_file = 'config/requirements/newer-than/main.yaml'
def test_pipeline_require_approval_newer_than(self):
"Test pipeline requirement: approval newer than"
return self._test_require_approval_newer_than('org/project1',
'project1-job')
def test_trigger_require_approval_newer_than(self):
"Test trigger requirement: approval newer than"
return self._test_require_approval_newer_than('org/project2',
'project2-job')
def _test_require_approval_newer_than(self, project, job):
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.addApproval('Code-Review', 2, username='nobody')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
# No +1 from Jenkins so should not be enqueued
self.assertEqual(len(self.history), 0)
# Add a too-old +1, should not be enqueued
A.addApproval('Verified', 1, username='jenkins',
granted_on=time.time() - 72 * 60 * 60)
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# Add a recent +1
self.fake_gerrit.addEvent(A.addApproval('Verified', 1,
username='jenkins'))
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, job)
class TestRequirementsApprovalOlderThan(ZuulTestCase):
"""Requirements with a older-than comment requirement"""
tenant_config_file = 'config/requirements/older-than/main.yaml'
def test_pipeline_require_approval_older_than(self):
"Test pipeline requirement: approval older than"
return self._test_require_approval_older_than('org/project1',
'project1-job')
def test_trigger_require_approval_older_than(self):
"Test trigger requirement: approval older than"
return self._test_require_approval_older_than('org/project2',
'project2-job')
def _test_require_approval_older_than(self, project, job):
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.addApproval('Code-Review', 2, username='nobody')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
# No +1 from Jenkins so should not be enqueued
self.assertEqual(len(self.history), 0)
# Add a recent +1 which should not be enqueued
A.addApproval('Verified', 1)
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# Add an old +1 which should be enqueued
A.addApproval('Verified', 1, username='jenkins',
granted_on=time.time() - 72 * 60 * 60)
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, job)
class TestRequirementsUserName(ZuulTestCase):
"""Requirements with a username requirement"""
tenant_config_file = 'config/requirements/username/main.yaml'
def test_pipeline_require_approval_username(self):
"Test pipeline requirement: approval username"
return self._test_require_approval_username('org/project1',
'project1-job')
def test_trigger_require_approval_username(self):
"Test trigger requirement: approval username"
return self._test_require_approval_username('org/project2',
'project2-job')
def _test_require_approval_username(self, project, job):
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.addApproval('Code-Review', 2, username='nobody')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
# No approval from Jenkins so should not be enqueued
self.assertEqual(len(self.history), 0)
# Add an approval from Jenkins
A.addApproval('Verified', 1, username='jenkins')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, job)
class TestRequirementsEmail(ZuulTestCase):
"""Requirements with a email requirement"""
tenant_config_file = 'config/requirements/email/main.yaml'
def test_pipeline_require_approval_email(self):
"Test pipeline requirement: approval email"
return self._test_require_approval_email('org/project1',
'project1-job')
def test_trigger_require_approval_email(self):
"Test trigger requirement: approval email"
return self._test_require_approval_email('org/project2',
'project2-job')
def _test_require_approval_email(self, project, job):
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.addApproval('Code-Review', 2, username='nobody')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
# No approval from Jenkins so should not be enqueued
self.assertEqual(len(self.history), 0)
# Add an approval from Jenkins
A.addApproval('Verified', 1, username='jenkins')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, job)
class TestRequirementsVote1(ZuulTestCase):
"""Requirements with a voting requirement"""
tenant_config_file = 'config/requirements/vote1/main.yaml'
def test_pipeline_require_approval_vote1(self):
"Test pipeline requirement: approval vote with one value"
return self._test_require_approval_vote1('org/project1',
'project1-job')
def test_trigger_require_approval_vote1(self):
"Test trigger requirement: approval vote with one value"
return self._test_require_approval_vote1('org/project2',
'project2-job')
def _test_require_approval_vote1(self, project, job):
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.addApproval('Code-Review', 2, username='nobody')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
# No approval from Jenkins so should not be enqueued
self.assertEqual(len(self.history), 0)
# A -1 from jenkins should not cause it to be enqueued
A.addApproval('Verified', -1, username='jenkins')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A +1 should allow it to be enqueued
A.addApproval('Verified', 1, username='jenkins')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, job)
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.loading_errors), 1)
self.assertEqual(tenant.layout.loading_errors[0].name,
'Gerrit require-approval Deprecation')
self.assertEqual(tenant.layout.loading_errors[0].severity,
'warning')
self.assertIn('require-approval',
tenant.layout.loading_errors[0].short_error)
self.assertIn('require-approval',
tenant.layout.loading_errors[0].error)
self.assertIsNotNone(tenant.layout.loading_errors[0].key.context)
self.assertIsNotNone(tenant.layout.loading_errors[0].key.mark)
self.assertIsNotNone(tenant.layout.loading_errors[0].key.error_text)
class TestRequirementsVote2(ZuulTestCase):
"""Requirements with a voting requirement"""
tenant_config_file = 'config/requirements/vote2/main.yaml'
def test_pipeline_require_approval_vote2(self):
"Test pipeline requirement: approval vote with two values"
return self._test_require_approval_vote2('org/project1',
'project1-job')
def test_trigger_require_approval_vote2(self):
"Test trigger requirement: approval vote with two values"
return self._test_require_approval_vote2('org/project2',
'project2-job')
def _test_require_approval_vote2(self, project, job):
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.addApproval('Code-Review', 2, username='nobody')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
# No approval from Jenkins so should not be enqueued
self.assertEqual(len(self.history), 0)
# A -1 from jenkins should not cause it to be enqueued
A.addApproval('Verified', -1, username='jenkins')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A -2 from jenkins should not cause it to be enqueued
A.addApproval('Verified', -2, username='jenkins')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A +1 from jenkins should allow it to be enqueued
A.addApproval('Verified', 1, username='jenkins')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, job)
# A +2 from nobody should not cause it to be enqueued
B = self.fake_gerrit.addFakeChange(project, 'master', 'B')
# A comment event that we will keep submitting to trigger
comment = B.addApproval('Code-Review', 2, username='nobody')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
# A +2 from jenkins should allow it to be enqueued
B.addApproval('Verified', 2, username='jenkins')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 2)
self.assertEqual(self.history[1].name, job)
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.loading_errors), 1)
self.assertEqual(tenant.layout.loading_errors[0].name,
'Gerrit require-approval Deprecation')
self.assertEqual(tenant.layout.loading_errors[0].severity,
'warning')
self.assertIn('require-approval',
tenant.layout.loading_errors[0].short_error)
self.assertIn('require-approval',
tenant.layout.loading_errors[0].error)
self.assertIsNotNone(tenant.layout.loading_errors[0].key.context)
self.assertIsNotNone(tenant.layout.loading_errors[0].key.mark)
self.assertIsNotNone(tenant.layout.loading_errors[0].key.error_text)
class TestRequirementsState(ZuulTestCase):
"""Requirements with simple state requirement"""
tenant_config_file = 'config/requirements/state/main.yaml'
def test_pipeline_require_current_patchset(self):
# Create two patchsets and let their tests settle out. Then
# comment on first patchset and check that no additional
# jobs are run.
A = self.fake_gerrit.addFakeChange('current-project', 'master', 'A')
self.fake_gerrit.addEvent(A.addApproval('Code-Review', 1))
self.waitUntilSettled()
A.addPatchset()
self.fake_gerrit.addEvent(A.addApproval('Code-Review', 1))
self.waitUntilSettled()
self.assertEqual(len(self.history), 2) # one job for each ps
self.fake_gerrit.addEvent(A.getChangeCommentEvent(1))
self.waitUntilSettled()
# Assert no new jobs ran after event for old patchset.
self.assertEqual(len(self.history), 2)
# Make sure the same event on a new PS will trigger
self.fake_gerrit.addEvent(A.getChangeCommentEvent(2))
self.waitUntilSettled()
self.assertEqual(len(self.history), 3)
def test_pipeline_require_open(self):
A = self.fake_gerrit.addFakeChange('open-project', 'master', 'A',
status='MERGED')
self.fake_gerrit.addEvent(A.addApproval('Code-Review', 2))
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
B = self.fake_gerrit.addFakeChange('open-project', 'master', 'B')
self.fake_gerrit.addEvent(B.addApproval('Code-Review', 2))
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
def test_pipeline_require_status(self):
A = self.fake_gerrit.addFakeChange('status-project', 'master', 'A',
status='MERGED')
self.fake_gerrit.addEvent(A.addApproval('Code-Review', 2))
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
B = self.fake_gerrit.addFakeChange('status-project', 'master', 'B')
self.fake_gerrit.addEvent(B.addApproval('Code-Review', 2))
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
def test_pipeline_require_wip(self):
A = self.fake_gerrit.addFakeChange('wip-project', 'master', 'A')
A.setWorkInProgress(True)
self.fake_gerrit.addEvent(A.addApproval('Code-Review', 2))
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
B = self.fake_gerrit.addFakeChange('wip-project', 'master', 'B')
self.fake_gerrit.addEvent(B.addApproval('Code-Review', 2))
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
class TestRequirementsRejectUsername(ZuulTestCase):
"""Requirements with reject username requirement"""
tenant_config_file = 'config/requirements/reject-username/main.yaml'
def _test_require_reject_username(self, project, job):
"Test negative username's match"
# Should only trigger if Jenkins hasn't voted.
# add in a change with no comments
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# add in a comment that will trigger
self.fake_gerrit.addEvent(A.addApproval('Code-Review', 1,
username='reviewer'))
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, job)
# add in a comment from jenkins user which shouldn't trigger
self.fake_gerrit.addEvent(A.addApproval('Verified', 1,
username='jenkins'))
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
# Check future reviews also won't trigger as a 'jenkins' user has
# commented previously
self.fake_gerrit.addEvent(A.addApproval('Code-Review', 1,
username='reviewer'))
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
def test_pipeline_reject_username(self):
"Test negative pipeline requirement: no comment from jenkins"
return self._test_require_reject_username('org/project1',
'project1-job')
def test_trigger_reject_username(self):
"Test negative trigger requirement: no comment from jenkins"
return self._test_require_reject_username('org/project2',
'project2-job')
class TestRequirementsReject(ZuulTestCase):
"""Requirements with reject requirement"""
tenant_config_file = 'config/requirements/reject/main.yaml'
def _test_require_reject(self, project, job):
"Test no approval matches a reject param"
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# First positive vote should not queue until jenkins has +1'd
comment = A.addApproval('Verified', 1, username='reviewer_a')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# Jenkins should put in a +1 which will also queue
comment = A.addApproval('Verified', 1, username='jenkins')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, job)
# Negative vote should not queue
comment = A.addApproval('Verified', -1, username='reviewer_b')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
# Future approvals should do nothing
comment = A.addApproval('Verified', 1, username='reviewer_c')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
# Change/update negative vote should queue
comment = A.addApproval('Verified', 1, username='reviewer_b')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 2)
self.assertEqual(self.history[1].name, job)
# Future approvals should also queue
comment = A.addApproval('Verified', 1, username='reviewer_d')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 3)
self.assertEqual(self.history[2].name, job)
def test_pipeline_require_reject(self):
"Test pipeline requirement: rejections absent"
return self._test_require_reject('org/project1', 'project1-job')
def test_trigger_require_reject(self):
"Test trigger requirement: rejections absent"
return self._test_require_reject('org/project2', 'project2-job')
def test_pipeline_requirement_reject_unrelated(self):
"Test if reject is obeyed if another unrelated approval is present"
# Having no approvals whatsoever shall not reject the change
A = self.fake_gerrit.addFakeChange('org/project3', 'master', 'A')
self.fake_gerrit.addEvent(A.getChangeCommentEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
# Setting another unrelated approval shall not change the behavior of
# the configured reject.
comment = A.addApproval('Approved', 1, username='reviewer_e')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 2)
# Setting the approval 'Verified' to a rejected value shall not lead to
# a build.
comment = A.addApproval('Verified', -1, username='jenkins')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 2)
# Setting the approval 'Verified' to an accepted value shall lead to
# a build.
comment = A.addApproval('Verified', 1, username='jenkins')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 3)
class TestRequirementsTrustedCheck(ZuulTestCase):
config_file = "zuul-gerrit-github.conf"
tenant_config_file = "config/requirements/trusted-check/main.yaml"
def test_non_live_requirements(self):
# Test that pipeline requirements are applied to non-live
# changes.
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.setDependsOn(A, 1)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([])
self.fake_gerrit.addEvent(A.addApproval('Code-Review', 2))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='check-job', result='SUCCESS', changes='1,1 2,1')],
ordered=False)
def test_other_connections(self):
# Test allow-other-connections: False
A = self.fake_github.openFakePullRequest("gh/project", "master", "A")
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.url,
)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([])
class TestGerritTriggerRequirements(ZuulTestCase):
scheduler_count = 1
@simple_layout('layouts/gerrit-trigger-requirements.yaml')
def test_require_open(self):
# Test trigger require-open
jobname = 'require-open'
project = 'org/project'
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getChangeCommentEvent(1, f'test {jobname}')
# It's open, so it should be enqueued
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
# Not open, so should be ignored
A.setMerged()
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
@simple_layout('layouts/gerrit-trigger-requirements.yaml')
def test_reject_open(self):
# Test trigger reject-open
jobname = 'reject-open'
project = 'org/project'
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getChangeCommentEvent(1, f'test {jobname}')
# It's open, so it should not be enqueued
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# Not open, so should be enqueued
A.setMerged()
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
@simple_layout('layouts/gerrit-trigger-requirements.yaml')
def test_require_wip(self):
# Test trigger require-wip
jobname = 'require-wip'
project = 'org/project'
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getChangeCommentEvent(1, f'test {jobname}')
# It's not WIP, so it should be ignored
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# WIP, so should be enqueued
A.setWorkInProgress(True)
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
@simple_layout('layouts/gerrit-trigger-requirements.yaml')
def test_reject_wip(self):
# Test trigger reject-wip
jobname = 'reject-wip'
project = 'org/project'
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getChangeCommentEvent(1, f'test {jobname}')
# It's not WIP, so it should be enqueued
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
# WIP, so should be ignored
A.setWorkInProgress(True)
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
@simple_layout('layouts/gerrit-trigger-requirements.yaml')
def test_require_current_patchset(self):
# Test trigger require-current_patchset
jobname = 'require-current-patchset'
project = 'org/project'
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getChangeCommentEvent(1, f'test {jobname}')
# It's current, so it should be enqueued
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
# Not current, so should be ignored
A.addPatchset()
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
@simple_layout('layouts/gerrit-trigger-requirements.yaml')
def test_reject_current_patchset(self):
# Test trigger reject-current_patchset
jobname = 'reject-current-patchset'
project = 'org/project'
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getChangeCommentEvent(1, f'test {jobname}')
# It's current, so it should be ignored
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# Not current, so should be enqueued
A.addPatchset()
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
@simple_layout('layouts/gerrit-trigger-requirements.yaml')
def test_require_status(self):
# Test trigger require-status
jobname = 'require-status'
project = 'org/project'
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getChangeCommentEvent(1, f'test {jobname}')
# It's not merged, so it should be ignored
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# Merged, so should be enqueued
A.setMerged()
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
@simple_layout('layouts/gerrit-trigger-requirements.yaml')
def test_reject_status(self):
# Test trigger reject-status
jobname = 'reject-status'
project = 'org/project'
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getChangeCommentEvent(1, f'test {jobname}')
# It's not merged, so it should be enqueued
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
# Merged, so should be ignored
A.setMerged()
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
@simple_layout('layouts/gerrit-trigger-requirements.yaml')
def test_require_approval(self):
# Test trigger require-approval
jobname = 'require-approval'
project = 'org/project'
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getChangeCommentEvent(1, f'test {jobname}')
# Missing approval, so it should be ignored
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# Has approval, so it should be enqueued
A.addApproval('Verified', 1, username='zuul')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
@simple_layout('layouts/gerrit-trigger-requirements.yaml')
def test_reject_approval(self):
# Test trigger reject-approval
jobname = 'reject-approval'
project = 'org/project'
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getChangeCommentEvent(1, f'test {jobname}')
# Missing approval, so it should be enqueued
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
# Has approval, so it should be ignored
A.addApproval('Verified', 1, username='zuul')
self.fake_gerrit.addEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_requirements.py
|
test_requirements.py
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright 2021 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import json
import logging
import os
import sys
import textwrap
import gc
import re
from time import sleep
from unittest import mock, skip, skipIf
from zuul.lib import yamlutil
import fixtures
import git
import paramiko
import zuul.configloader
from zuul.lib import yamlutil as yaml
from zuul.model import MergeRequest
from zuul.zk.blob_store import BlobStore
from tests.base import (
AnsibleZuulTestCase,
ZuulTestCase,
FIXTURE_DIR,
simple_layout,
iterate_timeout,
skipIfMultiScheduler,
)
class TestMultipleTenants(AnsibleZuulTestCase):
# A temporary class to hold new tests while others are disabled
tenant_config_file = 'config/multi-tenant/main.yaml'
def test_multiple_tenants(self):
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project1-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('python27').result,
'SUCCESS')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2,
"A should report start and success")
self.assertIn('tenant-one-gate', A.messages[1],
"A should transit tenant-one gate")
self.assertNotIn('tenant-two-gate', A.messages[1],
"A should *not* transit tenant-two gate")
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('python27',
'org/project2').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project2-test1').result,
'SUCCESS')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 2,
"B should report start and success")
self.assertIn('tenant-two-gate', B.messages[1],
"B should transit tenant-two gate")
self.assertNotIn('tenant-one-gate', B.messages[1],
"B should *not* transit tenant-one gate")
self.assertEqual(A.reported, 2, "Activity in tenant two should"
"not affect tenant one")
class TestProtected(ZuulTestCase):
tenant_config_file = 'config/protected/main.yaml'
def test_protected_ok(self):
# test clean usage of final parent job
in_repo_conf = textwrap.dedent(
"""
- job:
name: job-protected
protected: true
run: playbooks/job-protected.yaml
- project:
name: org/project
check:
jobs:
- job-child-ok
- job:
name: job-child-ok
parent: job-protected
- project:
name: org/project
check:
jobs:
- job-child-ok
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '1')
def test_protected_reset(self):
# try to reset protected flag
in_repo_conf = textwrap.dedent(
"""
- job:
name: job-protected
protected: true
run: playbooks/job-protected.yaml
- job:
name: job-child-reset-protected
parent: job-protected
protected: false
- project:
name: org/project
check:
jobs:
- job-child-reset-protected
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# The second patch tried to override some variables.
# Thus it should fail.
self.assertEqual(A.reported, 1)
self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '-1')
self.assertIn('Unable to reset protected attribute', A.messages[0])
def test_protected_inherit_not_ok(self):
# try to inherit from a protected job in different project
in_repo_conf = textwrap.dedent(
"""
- job:
name: job-child-notok
run: playbooks/job-child-notok.yaml
parent: job-protected
- project:
name: org/project1
check:
jobs:
- job-child-notok
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '-1')
self.assertIn("is a protected job in a different project",
A.messages[0])
class TestAbstract(ZuulTestCase):
tenant_config_file = 'config/abstract/main.yaml'
def test_abstract_fail(self):
in_repo_conf = textwrap.dedent(
"""
- project:
check:
jobs:
- job-abstract
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '-1')
self.assertIn('may not be directly run', A.messages[0])
def test_child_of_abstract(self):
in_repo_conf = textwrap.dedent(
"""
- project:
check:
jobs:
- job-child
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '1')
class TestIntermediate(ZuulTestCase):
tenant_config_file = 'config/intermediate/main.yaml'
def test_intermediate_fail(self):
# you can not instantiate from an intermediate job
in_repo_conf = textwrap.dedent(
"""
- job:
name: job-instantiate-intermediate
parent: job-abstract-intermediate
- project:
check:
jobs:
- job-instantiate-intermediate
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '-1')
self.assertIn('is not abstract', A.messages[0])
def test_intermediate_config_fail(self):
# an intermediate job must also be abstract
in_repo_conf = textwrap.dedent(
"""
- job:
name: job-intermediate-but-not-abstract
intermediate: true
abstract: false
- project:
check:
jobs:
- job-intermediate-but-not-abstract
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '-1')
self.assertIn('An intermediate job must also be abstract',
A.messages[0])
def test_intermediate_several(self):
# test passing through several intermediate jobs
in_repo_conf = textwrap.dedent(
"""
- project:
name: org/project
check:
jobs:
- job-actual
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '1')
class TestFinal(ZuulTestCase):
tenant_config_file = 'config/final/main.yaml'
def test_final_variant_ok(self):
# test clean usage of final parent job
in_repo_conf = textwrap.dedent(
"""
- project:
name: org/project
check:
jobs:
- job-final
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '1')
def test_final_variant_error(self):
# test misuse of final parent job
in_repo_conf = textwrap.dedent(
"""
- project:
name: org/project
check:
jobs:
- job-final:
vars:
dont_override_this: bar
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# The second patch tried to override some variables.
# Thus it should fail.
self.assertEqual(A.reported, 1)
self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '-1')
self.assertIn('Unable to modify final job', A.messages[0])
class TestBranchCreation(ZuulTestCase):
tenant_config_file = 'config/one-project/main.yaml'
def test_missed_branch_create(self):
# Test that if we miss a branch creation event, we can recover
# by issuing a full-reconfiguration.
self.create_branch('org/project', 'stable/yoga')
# We do not emit the gerrit event, thus simulating a missed event;
# verify that nothing happens
A = self.fake_gerrit.addFakeChange('org/project', 'stable/yoga', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 0)
self.assertHistory([])
# Correct the situation with a full reconfiguration
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertHistory([
dict(name='test-job', result='SUCCESS', changes='1,1')])
class TestBranchDeletion(ZuulTestCase):
tenant_config_file = 'config/branch-deletion/main.yaml'
def test_branch_delete(self):
# This tests a tenant reconfiguration on deleting a branch
# *after* an earlier failed tenant reconfiguration. This
# ensures that cached data are appropriately removed, even if
# we are recovering from an invalid config.
self.create_branch('org/project', 'stable/queens')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable/queens'))
self.waitUntilSettled()
in_repo_conf = textwrap.dedent(
"""
- project:
check:
jobs:
- nonexistent-job
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'stable/queens', 'A',
files=file_dict)
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
self.delete_branch('org/project', 'stable/queens')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchDeletedEvent(
'org/project', 'stable/queens'))
self.waitUntilSettled()
in_repo_conf = textwrap.dedent(
"""
- project:
check:
jobs:
- base
""")
file_dict = {'zuul.yaml': in_repo_conf}
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B',
files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(B.reported, 1)
self.assertHistory([
dict(name='base', result='SUCCESS', changes='2,1')])
def test_branch_delete_full_reconfiguration(self):
# This tests a full configuration after deleting a branch
# *after* an earlier failed tenant reconfiguration. This
# ensures that cached data are appropriately removed, even if
# we are recovering from an invalid config.
self.create_branch('org/project', 'stable/queens')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable/queens'))
self.waitUntilSettled()
in_repo_conf = textwrap.dedent(
"""
- project:
check:
jobs:
- nonexistent-job
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'stable/queens', 'A',
files=file_dict)
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
self.delete_branch('org/project', 'stable/queens')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
in_repo_conf = textwrap.dedent(
"""
- project:
check:
jobs:
- base
""")
file_dict = {'zuul.yaml': in_repo_conf}
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B',
files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(B.reported, 1)
self.assertHistory([
dict(name='base', result='SUCCESS', changes='2,1')])
self.scheds.first.sched.merger.merger_api.cleanup(0)
class TestBranchTag(ZuulTestCase):
tenant_config_file = 'config/branch-tag/main.yaml'
def test_no_branch_match(self):
# Test that tag jobs run with no explicit branch matchers
event = self.fake_gerrit.addFakeTag('org/project', 'master', 'foo')
self.fake_gerrit.addEvent(event)
self.waitUntilSettled()
self.assertHistory([
dict(name='central-job', result='SUCCESS', ref='refs/tags/foo'),
dict(name='test-job', result='SUCCESS', ref='refs/tags/foo')],
ordered=False)
def test_no_branch_match_multi_branch(self):
# Test that tag jobs run with no explicit branch matchers in a
# multi-branch project (where jobs generally get implied
# branch matchers)
self.create_branch('org/project', 'stable/pike')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable/pike'))
self.waitUntilSettled()
event = self.fake_gerrit.addFakeTag('org/project', 'master', 'foo')
self.fake_gerrit.addEvent(event)
self.waitUntilSettled()
# test-job does run in this case because it is defined in a
# branched repo with implied branch matchers, and the tagged
# commit is in both branches.
self.assertHistory([
dict(name='central-job', result='SUCCESS', ref='refs/tags/foo'),
dict(name='test-job', result='SUCCESS', ref='refs/tags/foo')],
ordered=False)
def test_no_branch_match_divergent_multi_branch(self):
# Test that tag jobs from divergent branches run different job
# variants.
self.create_branch('org/project', 'stable/pike')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable/pike'))
self.waitUntilSettled()
# Add a new job to master
in_repo_conf = textwrap.dedent(
"""
- job:
name: test2-job
run: playbooks/test-job.yaml
- project:
name: org/project
tag:
jobs:
- central-job
- test2-job
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
event = self.fake_gerrit.addFakeTag(
'org/project', 'stable/pike', 'foo')
self.fake_gerrit.addEvent(event)
self.waitUntilSettled()
# test-job runs because we tagged stable/pike, but test2-job does
# not, it only applied to master.
self.assertHistory([
dict(name='central-job', result='SUCCESS', ref='refs/tags/foo'),
dict(name='test-job', result='SUCCESS', ref='refs/tags/foo')],
ordered=False)
event = self.fake_gerrit.addFakeTag('org/project', 'master', 'bar')
self.fake_gerrit.addEvent(event)
self.waitUntilSettled()
# test2-job runs because we tagged master, but test-job does
# not, it only applied to stable/pike.
self.assertHistory([
dict(name='central-job', result='SUCCESS', ref='refs/tags/foo'),
dict(name='test-job', result='SUCCESS', ref='refs/tags/foo'),
dict(name='central-job', result='SUCCESS', ref='refs/tags/bar'),
dict(name='test2-job', result='SUCCESS', ref='refs/tags/bar')],
ordered=False)
class TestBranchNegative(ZuulTestCase):
tenant_config_file = 'config/branch-negative/main.yaml'
def test_negative_branch_match(self):
# Test that a negative branch matcher works with implied branches.
self.create_branch('org/project', 'stable/pike')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable/pike'))
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project', 'stable/pike', 'A')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='test-job', result='SUCCESS', changes='1,1')])
class TestBranchTemplates(ZuulTestCase):
tenant_config_file = 'config/branch-templates/main.yaml'
def test_template_removal_from_branch(self):
# Test that a template can be removed from one branch but not
# another.
# This creates a new branch with a copy of the config in master
self.create_branch('puppet-integration', 'stable/newton')
self.create_branch('puppet-integration', 'stable/ocata')
self.create_branch('puppet-tripleo', 'stable/newton')
self.create_branch('puppet-tripleo', 'stable/ocata')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'puppet-integration', 'stable/newton'))
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'puppet-integration', 'stable/ocata'))
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'puppet-tripleo', 'stable/newton'))
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'puppet-tripleo', 'stable/ocata'))
self.waitUntilSettled()
in_repo_conf = textwrap.dedent(
"""
- project:
name: puppet-tripleo
check:
jobs:
- puppet-something
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('puppet-tripleo', 'stable/newton',
'A', files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='puppet-something', result='SUCCESS', changes='1,1')])
def test_template_change_on_branch(self):
# Test that the contents of a template can be changed on one
# branch without affecting another.
# This creates a new branch with a copy of the config in master
self.create_branch('puppet-integration', 'stable/newton')
self.create_branch('puppet-integration', 'stable/ocata')
self.create_branch('puppet-tripleo', 'stable/newton')
self.create_branch('puppet-tripleo', 'stable/ocata')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'puppet-integration', 'stable/newton'))
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'puppet-integration', 'stable/ocata'))
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'puppet-tripleo', 'stable/newton'))
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'puppet-tripleo', 'stable/ocata'))
self.waitUntilSettled()
in_repo_conf = textwrap.dedent("""
- job:
name: puppet-unit-base
run: playbooks/run-unit-tests.yaml
- job:
name: puppet-unit-3.8
parent: puppet-unit-base
branches: ^(stable/(newton|ocata)).*$
vars:
puppet_gem_version: 3.8
- job:
name: puppet-something
run: playbooks/run-unit-tests.yaml
- project-template:
name: puppet-unit
check:
jobs:
- puppet-something
- project:
name: puppet-integration
templates:
- puppet-unit
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('puppet-integration',
'stable/newton',
'A', files=file_dict)
B = self.fake_gerrit.addFakeChange('puppet-tripleo',
'stable/newton',
'B')
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['id'])
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='puppet-something', result='SUCCESS',
changes='1,1 2,1')])
class TestBranchVariants(ZuulTestCase):
tenant_config_file = 'config/branch-variants/main.yaml'
def test_branch_variants(self):
# Test branch variants of jobs with inheritance
self.executor_server.hold_jobs_in_build = True
# This creates a new branch with a copy of the config in master
self.create_branch('puppet-integration', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'puppet-integration', 'stable'))
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange('puppet-integration', 'stable', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds[0].job.pre_run), 3)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
def test_branch_variants_reconfigure(self):
# Test branch variants of jobs with inheritance
self.executor_server.hold_jobs_in_build = True
# This creates a new branch with a copy of the config in master
self.create_branch('puppet-integration', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'puppet-integration', 'stable'))
self.waitUntilSettled()
with open(os.path.join(FIXTURE_DIR,
'config/branch-variants/git/',
'puppet-integration/.zuul.yaml')) as f:
config = f.read()
# Push a change that triggers a dynamic reconfiguration
file_dict = {'.zuul.yaml': config}
A = self.fake_gerrit.addFakeChange('puppet-integration', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
ipath = self.builds[0].parameters['zuul']['_inheritance_path']
for i in ipath:
self.log.debug("inheritance path %s", i)
self.assertEqual(len(ipath), 5)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
def test_branch_variants_divergent(self):
# Test branches can diverge and become independent
self.executor_server.hold_jobs_in_build = True
# This creates a new branch with a copy of the config in master
self.create_branch('puppet-integration', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'puppet-integration', 'stable'))
self.waitUntilSettled()
with open(os.path.join(FIXTURE_DIR,
'config/branch-variants/git/',
'puppet-integration/stable.zuul.yaml')) as f:
config = f.read()
file_dict = {'.zuul.yaml': config}
C = self.fake_gerrit.addFakeChange('puppet-integration', 'stable', 'C',
files=file_dict)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(C.getChangeMergedEvent())
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange('puppet-integration', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('puppet-integration', 'stable', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.builds[0].parameters['zuul']['jobtags'],
['master'])
self.assertEqual(self.builds[1].parameters['zuul']['jobtags'],
['stable'])
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
class TestBranchMismatch(ZuulTestCase):
tenant_config_file = 'config/branch-mismatch/main.yaml'
def test_job_override_branch(self):
"Test that override-checkout overrides branch matchers as well"
# Make sure the parent job repo is branched, so it gets
# implied branch matchers.
self.create_branch('org/project1', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'stable'))
# The child job repo should have a branch which does not exist
# in the parent job repo.
self.create_branch('org/project2', 'devel')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project2', 'devel'))
self.waitUntilSettled()
# A job in a repo with a weird branch name should use the
# parent job from the parent job's master (default) branch.
A = self.fake_gerrit.addFakeChange('org/project2', 'devel', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# project-test2 should run because it inherits from
# project-test1 and we will use the fallback branch to find
# project-test1 variants, but project-test1 itself, even
# though it is in the project-pipeline config, should not run
# because it doesn't directly match.
self.assertHistory([
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
], ordered=False)
def test_implied_branch_matcher_regex(self):
# Test that branch names that look like regexes aren't treated
# as such for implied branch matchers.
# Make sure the parent job repo is branched, so it gets
# implied branch matchers.
# The '+' in the branch name would cause the change not to
# match if it is treated as a regex.
self.create_branch('org/project1', 'feature/foo-0.1.12+bar')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'feature/foo-0.1.12+bar'))
A = self.fake_gerrit.addFakeChange(
'org/project1', 'feature/foo-0.1.12+bar', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='project-test1', result='SUCCESS', changes='1,1'),
], ordered=False)
def test_implied_branch_matcher_pragma_syntax_error(self):
# Test that syntax errors are reported if the implied branch
# matcher pragma is set. This catches potential errors when
# serializing configuration errors since the pragma causes
# extra information to be added to the error source context.
self.create_branch('org/project1', 'feature/test')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'feature/test'))
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
nodeset: bar
- pragma:
implied-branches:
- master
- feature/r1
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([])
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('nodeset "bar" was not found', A.messages[0],
"A should have a syntax error reported")
def _updateConfig(self, config, branch):
file_dict = {'zuul.yaml': config}
C = self.fake_gerrit.addFakeChange('org/project1', branch, 'C',
files=file_dict)
C.setMerged()
self.fake_gerrit.addEvent(C.getChangeMergedEvent())
self.waitUntilSettled()
def test_implied_branch_matcher_similar(self):
# Test that we perform a full-text match with implied branch
# matchers.
self.create_branch('org/project1', 'testbranch')
self.create_branch('org/project1', 'testbranch2')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'testbranch'))
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'testbranch2'))
config = textwrap.dedent(
"""
- job:
name: testjob
vars:
this_branch: testbranch
testbranch: true
- project:
check: {jobs: [testjob]}
""")
self._updateConfig(config, 'testbranch')
config = textwrap.dedent(
"""
- job:
name: testjob
vars:
this_branch: testbranch2
testbranch2: true
- project:
check: {jobs: [testjob]}
""")
self._updateConfig(config, 'testbranch2')
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange(
'org/project1', 'testbranch', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(
{'testbranch': True, 'this_branch': 'testbranch'},
self.builds[0].job.combined_variables)
self.executor_server.release()
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange(
'org/project1', 'testbranch2', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# The two jobs should have distinct variables. Notably,
# testbranch2 should not pick up vars from testbranch.
self.assertEqual(
{'testbranch2': True, 'this_branch': 'testbranch2'},
self.builds[0].job.combined_variables)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='testjob', result='SUCCESS', changes='3,1'),
dict(name='testjob', result='SUCCESS', changes='4,1'),
], ordered=False)
def test_implied_branch_matcher_similar_override_checkout(self):
# Overriding a checkout has some branch matching implications.
# Make sure that we are performing a full-text match on
# branches when we override a checkout.
self.create_branch('org/project1', 'testbranch')
self.create_branch('org/project1', 'testbranch2')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'testbranch'))
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'testbranch2'))
config = textwrap.dedent(
"""
- job:
name: testjob
vars:
this_branch: testbranch
testbranch: true
- project:
check: {jobs: [testjob]}
""")
self._updateConfig(config, 'testbranch')
config = textwrap.dedent(
"""
- job:
name: testjob
vars:
this_branch: testbranch2
testbranch2: true
- project:
check: {jobs: [testjob]}
""")
self._updateConfig(config, 'testbranch2')
self.executor_server.hold_jobs_in_build = True
config = textwrap.dedent(
"""
- job:
name: project-test1
- job:
name: testjob-testbranch
parent: testjob
override-checkout: testbranch
- job:
name: testjob-testbranch2
parent: testjob
override-checkout: testbranch2
- project:
check: {jobs: [testjob-testbranch, testjob-testbranch2]}
""")
file_dict = {'zuul.yaml': config}
A = self.fake_gerrit.addFakeChange(
'org/project1', 'master', 'A', files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(
{'testbranch': True, 'this_branch': 'testbranch'},
self.builds[0].job.combined_variables)
# The two jobs should have distinct variables (notably, the
# variant on testbranch2 should not pick up vars from
# testbranch.
self.assertEqual(
{'testbranch2': True, 'this_branch': 'testbranch2'},
self.builds[1].job.combined_variables)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='testjob-testbranch', result='SUCCESS', changes='3,1'),
dict(name='testjob-testbranch2', result='SUCCESS', changes='3,1'),
], ordered=False)
class TestBranchRef(ZuulTestCase):
tenant_config_file = 'config/branch-ref/main.yaml'
def test_ref_match(self):
# Test that branch matchers for explicit refs work as expected
# First, make a branch with another job so we can examine
# different branches.
self.create_branch('org/project', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable'))
self.waitUntilSettled()
in_repo_conf = textwrap.dedent(
"""
- project:
tag:
jobs:
- other-job:
branches: "^refs/tags/tag1-.*$"
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'stable', 'A',
files=file_dict)
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
# We're going to tag master, which is still at the branch
# point for stable, so the tagged commit will appear in both
# branches. This should cause test-job-1 (from the project
# config on master) and other-job (from the project config on
# stable).
event = self.fake_gerrit.addFakeTag('org/project', 'master', 'tag1-a')
self.fake_gerrit.addEvent(event)
self.waitUntilSettled()
self.assertHistory([
dict(name='other-job', result='SUCCESS', ref='refs/tags/tag1-a'),
dict(name='test-job-1', result='SUCCESS', ref='refs/tags/tag1-a')],
ordered=False)
# Next, merge a noop change to master so that we can tag a
# commit that's unique to master.
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.setMerged()
self.fake_gerrit.addEvent(B.getChangeMergedEvent())
self.waitUntilSettled()
# This tag should only run test-job-1, since it doesn't appear
# in stable, it doesn't get that project config applied.
event = self.fake_gerrit.addFakeTag('org/project', 'master', 'tag1-b')
self.fake_gerrit.addEvent(event)
self.waitUntilSettled()
self.assertHistory([
dict(name='other-job', result='SUCCESS', ref='refs/tags/tag1-a'),
dict(name='test-job-1', result='SUCCESS', ref='refs/tags/tag1-a'),
dict(name='test-job-1', result='SUCCESS', ref='refs/tags/tag1-b')],
ordered=False)
# Now tag the same commit with the other format; we should get
# only test-job-2 added.
event = self.fake_gerrit.addFakeTag('org/project', 'master', 'tag2-a')
self.fake_gerrit.addEvent(event)
self.waitUntilSettled()
self.assertHistory([
dict(name='other-job', result='SUCCESS', ref='refs/tags/tag1-a'),
dict(name='test-job-1', result='SUCCESS', ref='refs/tags/tag1-a'),
dict(name='test-job-1', result='SUCCESS', ref='refs/tags/tag1-b'),
dict(name='test-job-2', result='SUCCESS', ref='refs/tags/tag2-a')],
ordered=False)
class TestAllowedProjects(ZuulTestCase):
tenant_config_file = 'config/allowed-projects/main.yaml'
def test_allowed_projects(self):
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertIn('Build succeeded', A.messages[0])
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(B.reported, 1)
self.assertIn('Project org/project2 is not allowed '
'to run job test-project2', B.messages[0])
C = self.fake_gerrit.addFakeChange('org/project3', 'master', 'C')
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(C.reported, 1)
self.assertIn('Project org/project3 is not allowed '
'to run job restricted-job', C.messages[0])
self.assertHistory([
dict(name='test-project1', result='SUCCESS', changes='1,1'),
dict(name='restricted-job', result='SUCCESS', changes='1,1'),
], ordered=False)
def test_allowed_projects_dynamic_config(self):
# It is possible to circumvent allowed-projects with a
# depends-on.
in_repo_conf2 = textwrap.dedent(
"""
- job:
name: test-project2b
parent: restricted-job
allowed-projects:
- org/project1
""")
in_repo_conf1 = textwrap.dedent(
"""
- project:
check:
jobs:
- test-project2b
""")
file_dict = {'zuul.yaml': in_repo_conf2}
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
files=file_dict)
file_dict = {'zuul.yaml': in_repo_conf1}
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B',
files=file_dict)
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['id'])
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='test-project2b', result='SUCCESS', changes='1,1 2,1'),
], ordered=False)
def test_allowed_projects_dynamic_config_secret(self):
# It is not possible to circumvent allowed-projects with a
# depends-on if there is a secret involved.
in_repo_conf2 = textwrap.dedent(
"""
- secret:
name: project2_secret
data: {}
- job:
name: test-project2b
parent: restricted-job
secrets: project2_secret
allowed-projects:
- org/project1
""")
in_repo_conf1 = textwrap.dedent(
"""
- project:
check:
jobs:
- test-project2b
""")
file_dict = {'zuul.yaml': in_repo_conf2}
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
files=file_dict)
file_dict = {'zuul.yaml': in_repo_conf1}
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B',
files=file_dict)
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['id'])
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([])
self.assertEqual(B.reported, 1)
self.assertIn('Project org/project1 is not allowed '
'to run job test-project2b', B.messages[0])
class TestAllowedProjectsTrusted(ZuulTestCase):
tenant_config_file = 'config/allowed-projects-trusted/main.yaml'
def test_allowed_projects_secret_trusted(self):
# Test that an untrusted job defined in project1 can be used
# in project2, but only if attached by a config project.
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertIn('Build succeeded', A.messages[0])
self.assertHistory([
dict(name='test-project1', result='SUCCESS', changes='1,1'),
], ordered=False)
class TestCentralJobs(ZuulTestCase):
tenant_config_file = 'config/central-jobs/main.yaml'
def setUp(self):
super(TestCentralJobs, self).setUp()
self.create_branch('org/project', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable'))
self.waitUntilSettled()
def _updateConfig(self, config, branch):
file_dict = {'.zuul.yaml': config}
C = self.fake_gerrit.addFakeChange('org/project', branch, 'C',
files=file_dict)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(C.getChangeMergedEvent())
self.waitUntilSettled()
def _test_central_job_on_branch(self, branch, other_branch):
# Test that a job defined on a branchless repo only runs on
# the branch applied
config = textwrap.dedent(
"""
- project:
name: org/project
check:
jobs:
- central-job
""")
self._updateConfig(config, branch)
A = self.fake_gerrit.addFakeChange('org/project', branch, 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='central-job', result='SUCCESS', changes='2,1')])
# No jobs should run for this change.
B = self.fake_gerrit.addFakeChange('org/project', other_branch, 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='central-job', result='SUCCESS', changes='2,1')])
def test_central_job_on_stable(self):
self._test_central_job_on_branch('master', 'stable')
def test_central_job_on_master(self):
self._test_central_job_on_branch('stable', 'master')
def _test_central_template_on_branch(self, branch, other_branch):
# Test that a project-template defined on a branchless repo
# only runs on the branch applied
config = textwrap.dedent(
"""
- project:
name: org/project
templates: ['central-jobs']
""")
self._updateConfig(config, branch)
A = self.fake_gerrit.addFakeChange('org/project', branch, 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='central-job', result='SUCCESS', changes='2,1')])
# No jobs should run for this change.
B = self.fake_gerrit.addFakeChange('org/project', other_branch, 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='central-job', result='SUCCESS', changes='2,1')])
def test_central_template_on_stable(self):
self._test_central_template_on_branch('master', 'stable')
def test_central_template_on_master(self):
self._test_central_template_on_branch('stable', 'master')
class TestEmptyConfigFile(ZuulTestCase):
tenant_config_file = 'config/empty-config-file/main.yaml'
def test_empty_config_file(self):
# Tests that a config file with only comments does not cause
# an error.
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEquals(
len(tenant.layout.loading_errors), 0,
"No error should have been accumulated")
class TestInRepoConfig(ZuulTestCase):
# A temporary class to hold new tests while others are disabled
config_file = 'zuul-connections-gerrit-and-github.conf'
tenant_config_file = 'config/in-repo/main.yaml'
def test_in_repo_config(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2,
"A should report start and success")
self.assertIn('tenant-one-gate', A.messages[1],
"A should transit tenant-one gate")
@skip("This test is useful, but not reliable")
def test_full_and_dynamic_reconfig(self):
self.executor_server.hold_jobs_in_build = True
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
- project:
name: org/project
tenant-one-gate:
jobs:
- project-test1
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
gc.collect()
pipelines = [obj for obj in gc.get_objects()
if isinstance(obj, zuul.model.Pipeline)]
self.assertEqual(len(pipelines), 4)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
def test_dynamic_config(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
- job:
name: project-test2
run: playbooks/project-test2.yaml
- job:
name: project-test3
run: playbooks/project-test2.yaml
# add a job by the short project name
- project:
name: org/project
tenant-one-gate:
jobs:
- project-test2
# add a job by the canonical project name
- project:
name: review.example.com/org/project
tenant-one-gate:
jobs:
- project-test3
""")
in_repo_playbook = textwrap.dedent(
"""
- hosts: all
tasks: []
""")
file_dict = {'.zuul.yaml': in_repo_conf,
'playbooks/project-test2.yaml': in_repo_playbook}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2,
"A should report start and success")
self.assertIn('tenant-one-gate', A.messages[1],
"A should transit tenant-one gate")
self.assertHistory([
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project-test3', result='SUCCESS', changes='1,1'),
], ordered=False)
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
# Now that the config change is landed, it should be live for
# subsequent changes.
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
self.assertHistory([
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project-test3', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='2,1'),
dict(name='project-test3', result='SUCCESS', changes='2,1'),
], ordered=False)
# Catch time / monotonic errors
val = self.assertReportedStat('zuul.tenant.tenant-one.pipeline.'
'tenant-one-gate.layout_generation_time',
kind='ms')
self.assertTrue(0.0 < float(val) < 60000.0)
def test_dynamic_template(self):
# Tests that a project can't update a template in another
# project.
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
- project-template:
name: common-config-template
check:
jobs:
- project-test1
- project:
name: org/project
templates: [common-config-template]
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.patchsets[0]['approvals'][0]['value'], "-1")
self.assertIn('Project template common-config-template '
'is already defined',
A.messages[0],
"A should have failed the check pipeline")
def test_dynamic_config_errors_not_accumulated(self):
"""Test that requesting broken dynamic configs
does not appear in tenant layout error accumulator"""
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
- project:
name: org/project
check:
jobs:
- non-existent-job
""")
in_repo_playbook = textwrap.dedent(
"""
- hosts: all
tasks: []
""")
file_dict = {'.zuul.yaml': in_repo_conf,
'playbooks/project-test2.yaml': in_repo_playbook}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEquals(
len(tenant.layout.loading_errors), 0,
"No error should have been accumulated")
self.assertHistory([])
def test_dynamic_config_non_existing_job(self):
"""Test that requesting a non existent job fails"""
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
- project:
name: org/project
check:
jobs:
- non-existent-job
""")
in_repo_playbook = textwrap.dedent(
"""
- hosts: all
tasks: []
""")
file_dict = {'.zuul.yaml': in_repo_conf,
'playbooks/project-test2.yaml': in_repo_playbook}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertEqual(A.patchsets[0]['approvals'][0]['value'], "-1")
self.assertIn('Job non-existent-job not defined', A.messages[0],
"A should have failed the check pipeline")
self.assertHistory([])
self.assertEqual(len(A.comments), 1)
comments = sorted(A.comments, key=lambda x: x['line'])
self.assertEqual(comments[0],
{'file': '.zuul.yaml',
'line': 9,
'message': 'Job non-existent-job not defined',
'reviewer': {'email': '[email protected]',
'name': 'Zuul',
'username': 'jenkins'},
'range': {'end_character': 0,
'end_line': 9,
'start_character': 2,
'start_line': 5},
})
def test_dynamic_config_job_anchors(self):
# Test the use of anchors in job configuration. This is a
# regression test designed to catch a failure where we freeze
# the first job and in doing so, mutate the vars dict. The
# intended behavior is that the two jobs end up with two
# separate python objects for their vars dicts.
in_repo_conf = textwrap.dedent(
"""
- job:
name: myvars
vars: &anchor
plugins:
foo: bar
- job:
name: project-test1
timeout: 999999999999
vars: *anchor
- project:
name: org/project
check:
jobs:
- project-test1
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertEqual(A.patchsets[0]['approvals'][0]['value'], "-1")
self.assertIn('max-job-timeout', A.messages[0])
self.assertHistory([])
def test_dynamic_config_non_existing_job_in_template(self):
"""Test that requesting a non existent job fails"""
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
- project-template:
name: test-template
check:
jobs:
- non-existent-job
- project:
name: org/project
templates:
- test-template
""")
in_repo_playbook = textwrap.dedent(
"""
- hosts: all
tasks: []
""")
file_dict = {'.zuul.yaml': in_repo_conf,
'playbooks/project-test2.yaml': in_repo_playbook}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertEqual(A.patchsets[0]['approvals'][0]['value'], "-1")
self.assertIn('Job non-existent-job not defined', A.messages[0],
"A should have failed the check pipeline")
self.assertHistory([])
def test_dynamic_nonexistent_job_dependency(self):
# Tests that a reference to a nonexistent job dependency is an
# error.
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
run: playbooks/project-test1.yaml
- project:
name: org/project
check:
jobs:
- project-test1:
dependencies:
- name: non-existent-job
soft: true
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertEqual(A.patchsets[0]['approvals'][0]['value'], "-1")
self.assertIn('Job non-existent-job not defined', A.messages[0],
"A should have failed the check pipeline")
self.assertNotIn('freezing', A.messages[0])
self.assertHistory([])
def test_dynamic_config_new_patchset(self):
self.executor_server.hold_jobs_in_build = True
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
run: playbooks/project-test1.yaml
- job:
name: project-test2
run: playbooks/project-test2.yaml
- project:
name: org/project
check:
jobs:
- project-test2
""")
in_repo_playbook = textwrap.dedent(
"""
- hosts: all
tasks: []
""")
file_dict = {'.zuul.yaml': in_repo_conf,
'playbooks/project-test2.yaml': in_repo_playbook}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
items = check_pipeline.getAllItems()
self.assertEqual(items[0].change.number, '1')
self.assertEqual(items[0].change.patchset, '1')
self.assertTrue(items[0].live)
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
run: playbooks/project-test1.yaml
- job:
name: project-test2
run: playbooks/project-test2.yaml
- project:
name: org/project
check:
jobs:
- project-test1
- project-test2
""")
file_dict = {'.zuul.yaml': in_repo_conf,
'playbooks/project-test2.yaml': in_repo_playbook}
A.addPatchset(files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
items = check_pipeline.getAllItems()
self.assertEqual(items[0].change.number, '1')
self.assertEqual(items[0].change.patchset, '2')
self.assertTrue(items[0].live)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release('project-test1')
self.waitUntilSettled()
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='project-test2', result='ABORTED', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,2'),
dict(name='project-test2', result='SUCCESS', changes='1,2')])
def test_in_repo_branch(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
- job:
name: project-test2
run: playbooks/project-test2.yaml
- project:
name: org/project
tenant-one-gate:
jobs:
- project-test2
""")
in_repo_playbook = textwrap.dedent(
"""
- hosts: all
tasks: []
""")
file_dict = {'.zuul.yaml': in_repo_conf,
'playbooks/project-test2.yaml': in_repo_playbook}
self.create_branch('org/project', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable'))
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange('org/project', 'stable', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2,
"A should report start and success")
self.assertIn('tenant-one-gate', A.messages[1],
"A should transit tenant-one gate")
self.assertHistory([
dict(name='project-test2', result='SUCCESS', changes='1,1')])
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
# The config change should not affect master.
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertHistory([
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='2,1')])
# The config change should be live for further changes on
# stable.
C = self.fake_gerrit.addFakeChange('org/project', 'stable', 'C')
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertHistory([
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='2,1'),
dict(name='project-test2', result='SUCCESS', changes='3,1')])
def test_crd_dynamic_config_branch(self):
# Test that we can create a job in one repo and be able to use
# it from a different branch on a different repo.
self.create_branch('org/project1', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'stable'))
self.waitUntilSettled()
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
- job:
name: project-test2
run: playbooks/project-test2.yaml
- project:
name: org/project
check:
jobs:
- project-test2
""")
in_repo_playbook = textwrap.dedent(
"""
- hosts: all
tasks: []
""")
file_dict = {'.zuul.yaml': in_repo_conf,
'playbooks/project-test2.yaml': in_repo_playbook}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
second_repo_conf = textwrap.dedent(
"""
- project:
name: org/project1
check:
jobs:
- project-test2
""")
second_file_dict = {'.zuul.yaml': second_repo_conf}
B = self.fake_gerrit.addFakeChange('org/project1', 'stable', 'B',
files=second_file_dict)
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['id'])
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1, "A should report")
self.assertHistory([
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1 2,1'),
])
def test_yaml_list_error(self):
in_repo_conf = textwrap.dedent(
"""
job: foo
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('not a list', A.messages[0],
"A should have a syntax error reported")
self.assertIn('job: foo', A.messages[0],
"A should display the failing list")
def test_yaml_dict_error(self):
in_repo_conf = textwrap.dedent(
"""
- job_not_a_dict
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('not a dictionary', A.messages[0],
"A should have a syntax error reported")
self.assertIn('job_not_a_dict', A.messages[0],
"A should list the bad key")
def test_yaml_dict_error2(self):
in_repo_conf = textwrap.dedent(
"""
- foo: {{ not_a_dict }}
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('while constructing a mapping', A.messages[0],
"A should have a syntax error reported")
def test_yaml_dict_error3(self):
in_repo_conf = textwrap.dedent(
"""
- job:
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('is not a dictionary', A.messages[0],
"A should have a syntax error reported")
def test_yaml_duplicate_key_error(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: foo
name: bar
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('appears more than once', A.messages[0],
"A should have a syntax error reported")
def test_yaml_key_error(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test2
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('has more than one key', A.messages[0],
"A should have a syntax error reported")
self.assertIn("job: null\n name: project-test2", A.messages[0],
"A should have the failing section displayed")
# This is non-deterministic without default dict ordering, which
# happended with python 3.7.
@skipIf(sys.version_info < (3, 7), "non-deterministic on < 3.7")
def test_yaml_error_truncation_message(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test2
this: is
a: long
set: of
keys: that
should: be
truncated: ok
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('has more than one key', A.messages[0],
"A should have a syntax error reported")
self.assertIn("job: null\n name: project-test2", A.messages[0],
"A should have the failing section displayed")
self.assertIn("...", A.messages[0],
"A should have the failing section truncated")
def test_yaml_unknown_error(self):
in_repo_conf = textwrap.dedent(
"""
- foobar:
foo: bar
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('not recognized', A.messages[0],
"A should have a syntax error reported")
self.assertIn('foobar:\n foo: bar', A.messages[0],
"A should report the bad keys")
def test_invalid_job_secret_var_name(self):
in_repo_conf = textwrap.dedent(
"""
- secret:
name: foo-bar
data:
dummy: value
- job:
name: foobar
secrets:
- name: foo-bar
secret: foo-bar
""")
file_dict = {".zuul.yaml": in_repo_conf}
A = self.fake_gerrit.addFakeChange("org/project", "master", "A",
files=file_dict)
A.addApproval("Code-Review", 2)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn("Ansible variable name 'foo-bar'", A.messages[0],
"A should have a syntax error reported")
def test_invalid_job_vars(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: foobar
vars:
foo-bar: value
""")
file_dict = {".zuul.yaml": in_repo_conf}
A = self.fake_gerrit.addFakeChange("org/project", "master", "A",
files=file_dict)
A.addApproval("Code-Review", 2)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn("Ansible variable name 'foo-bar'", A.messages[0],
"A should have a syntax error reported")
def test_invalid_job_extra_vars(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: foobar
extra-vars:
foo-bar: value
""")
file_dict = {".zuul.yaml": in_repo_conf}
A = self.fake_gerrit.addFakeChange("org/project", "master", "A",
files=file_dict)
A.addApproval("Code-Review", 2)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn("Ansible variable name 'foo-bar'", A.messages[0],
"A should have a syntax error reported")
def test_invalid_job_host_vars(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: foobar
host-vars:
host-name:
foo-bar: value
""")
file_dict = {".zuul.yaml": in_repo_conf}
A = self.fake_gerrit.addFakeChange("org/project", "master", "A",
files=file_dict)
A.addApproval("Code-Review", 2)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn("Ansible variable name 'foo-bar'", A.messages[0],
"A should have a syntax error reported")
def test_invalid_job_group_vars(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: foobar
group-vars:
group-name:
foo-bar: value
""")
file_dict = {".zuul.yaml": in_repo_conf}
A = self.fake_gerrit.addFakeChange("org/project", "master", "A",
files=file_dict)
A.addApproval("Code-Review", 2)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn("Ansible variable name 'foo-bar'", A.messages[0],
"A should have a syntax error reported")
def test_untrusted_syntax_error(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test2
foo: error
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('syntax error', A.messages[0],
"A should have a syntax error reported")
def test_trusted_syntax_error(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test2
foo: error
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('syntax error', A.messages[0],
"A should have a syntax error reported")
def test_untrusted_yaml_error(self):
in_repo_conf = textwrap.dedent(
"""
- job:
foo: error
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('syntax error', A.messages[0],
"A should have a syntax error reported")
def test_untrusted_shadow_error(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: common-config-test
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('not permitted to shadow', A.messages[0],
"A should have a syntax error reported")
def test_untrusted_pipeline_error(self):
in_repo_conf = textwrap.dedent(
"""
- pipeline:
name: test
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('Pipelines may not be defined', A.messages[0],
"A should have a syntax error reported")
def test_untrusted_project_error(self):
in_repo_conf = textwrap.dedent(
"""
- project:
name: org/project1
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('the only project definition permitted', A.messages[0],
"A should have a syntax error reported")
def test_untrusted_depends_on_trusted(self):
with open(os.path.join(FIXTURE_DIR,
'config/in-repo/git/',
'common-config/zuul.yaml')) as f:
common_config = f.read()
common_config += textwrap.dedent(
"""
- job:
name: project-test9
""")
file_dict = {'zuul.yaml': common_config}
A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A',
files=file_dict)
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
- project:
name: org/project
check:
jobs:
- project-test9
""")
file_dict = {'zuul.yaml': in_repo_conf}
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
files=file_dict)
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['id'])
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(B.reported, 1,
"B should report failure")
self.assertIn('depends on a change to a config project',
B.messages[0],
"A should have a syntax error reported")
def test_duplicate_node_error(self):
in_repo_conf = textwrap.dedent(
"""
- nodeset:
name: duplicate
nodes:
- name: compute
label: foo
- name: compute
label: foo
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('appears multiple times', A.messages[0],
"A should have a syntax error reported")
def test_duplicate_group_error(self):
in_repo_conf = textwrap.dedent(
"""
- nodeset:
name: duplicate
nodes:
- name: compute
label: foo
groups:
- name: group
nodes: compute
- name: group
nodes: compute
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('appears multiple times', A.messages[0],
"A should have a syntax error reported")
def test_group_in_job_with_invalid_node(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: test job
nodeset:
nodes: []
groups:
- name: a_group
nodes:
- a_node_that_does_not_exist
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('which is not defined in the nodeset', A.messages[0],
"A should have a syntax error reported")
def test_duplicate_group_in_job(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: test job
nodeset:
nodes:
- name: controller
label: ubuntu-focal
groups:
- name: a_duplicate_group
nodes:
- controller
- name: a_duplicate_group
nodes:
- controller
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn(
'Group names must be unique within a nodeset.',
A.messages[0], "A should have a syntax error reported")
def test_secret_not_found_error(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
run: playbooks/project-test1.yaml
secrets: does-not-exist
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('secret "does-not-exist" was not found', A.messages[0],
"A should have a syntax error reported")
def test_nodeset_not_found_error(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: test
nodeset: does-not-exist
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('nodeset "does-not-exist" was not found', A.messages[0],
"A should have a syntax error reported")
def test_required_project_not_found_error(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
- job:
name: test
required-projects:
- does-not-exist
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('Unknown projects: does-not-exist', A.messages[0],
"A should have a syntax error reported")
def test_required_project_not_found_multiple_error(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
- job:
name: test
required-projects:
- does-not-exist
- also-does-not-exist
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('Unknown projects: does-not-exist, also-does-not-exist',
A.messages[0], "A should have a syntax error reported")
def test_template_not_found_error(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
- project:
name: org/project
templates:
- does-not-exist
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('project template "does-not-exist" was not found',
A.messages[0],
"A should have a syntax error reported")
def test_job_list_in_project_template_not_dict_error(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
- project-template:
name: some-jobs
check:
jobs:
- project-test1:
- required-projects:
org/project2
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('expected str for dictionary value',
A.messages[0], "A should have a syntax error reported")
def test_job_list_in_project_not_dict_error(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
- project:
name: org/project1
check:
jobs:
- project-test1:
- required-projects:
org/project2
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('expected str for dictionary value',
A.messages[0], "A should have a syntax error reported")
def test_project_template(self):
# Tests that a project template is not modified when used, and
# can therefore be used in subsequent reconfigurations.
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
run: playbooks/project-test1.yaml
- project-template:
name: some-jobs
tenant-one-gate:
jobs:
- project-test1:
required-projects:
- org/project1
- project:
name: org/project
templates:
- some-jobs
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
in_repo_conf = textwrap.dedent(
"""
- project:
name: org/project1
templates:
- some-jobs
""")
file_dict = {'.zuul.yaml': in_repo_conf}
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B',
files=file_dict)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(B.data['status'], 'MERGED')
def test_job_remove_add(self):
# Tests that a job can be removed from one repo and added in another.
# First, remove the current config for project1 since it
# references the job we want to remove.
file_dict = {'.zuul.yaml': None}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
# Then propose a change to delete the job from one repo...
file_dict = {'.zuul.yaml': None}
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# ...and a second that depends on it that adds it to another repo.
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
run: playbooks/project-test1.yaml
- project:
name: org/project1
check:
jobs:
- project-test1
""")
in_repo_playbook = textwrap.dedent(
"""
- hosts: all
tasks: []
""")
file_dict = {'.zuul.yaml': in_repo_conf,
'playbooks/project-test1.yaml': in_repo_playbook}
C = self.fake_gerrit.addFakeChange('org/project1', 'master', 'C',
files=file_dict,
parent='refs/changes/01/1/1')
C.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
C.subject, B.data['id'])
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='project-test1', result='SUCCESS', changes='2,1 3,1'),
], ordered=False)
@skipIfMultiScheduler()
# This test is failing depending on which scheduler completed the
# tenant reconfiguration first. As the assertions are done with the
# objects on scheduler-0, they will fail if scheduler-1 completed
# the reconfiguration first.
def test_multi_repo(self):
downstream_repo_conf = textwrap.dedent(
"""
- project:
name: org/project1
tenant-one-gate:
jobs:
- project-test1
- job:
name: project1-test1
parent: project-test1
""")
file_dict = {'.zuul.yaml': downstream_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
upstream_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
run: playbooks/project-test1.yaml
- job:
name: project-test2
- project:
name: org/project
tenant-one-gate:
jobs:
- project-test1
""")
file_dict = {'.zuul.yaml': upstream_repo_conf}
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
files=file_dict)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(B.data['status'], 'MERGED')
self.fake_gerrit.addEvent(B.getChangeMergedEvent())
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
# Ensure the latest change is reflected in the config; if it
# isn't this will raise an exception.
tenant.layout.getJob('project-test2')
def test_pipeline_error(self):
with open(os.path.join(FIXTURE_DIR,
'config/in-repo/git/',
'common-config/zuul.yaml')) as f:
base_common_config = f.read()
in_repo_conf_A = textwrap.dedent(
"""
- pipeline:
name: periodic
foo: error
""")
file_dict = {'zuul.yaml': None,
'zuul.d/main.yaml': base_common_config,
'zuul.d/test1.yaml': in_repo_conf_A}
A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('syntax error',
A.messages[0],
"A should have an error reported")
def test_pipeline_supercedes_error(self):
with open(os.path.join(FIXTURE_DIR,
'config/in-repo/git/',
'common-config/zuul.yaml')) as f:
base_common_config = f.read()
in_repo_conf_A = textwrap.dedent(
"""
- pipeline:
name: periodic
manager: independent
supercedes: doesnotexist
trigger: {}
""")
file_dict = {'zuul.yaml': None,
'zuul.d/main.yaml': base_common_config,
'zuul.d/test1.yaml': in_repo_conf_A}
A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertIn('supercedes an unknown',
A.messages[0],
"A should have an error reported")
def test_change_series_error(self):
with open(os.path.join(FIXTURE_DIR,
'config/in-repo/git/',
'common-config/zuul.yaml')) as f:
base_common_config = f.read()
in_repo_conf_A = textwrap.dedent(
"""
- pipeline:
name: periodic
foo: error
""")
file_dict = {'zuul.yaml': None,
'zuul.d/main.yaml': base_common_config,
'zuul.d/test1.yaml': in_repo_conf_A}
A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A',
files=file_dict)
in_repo_conf_B = textwrap.dedent(
"""
- job:
name: project-test2
foo: error
""")
file_dict = {'zuul.yaml': None,
'zuul.d/main.yaml': base_common_config,
'zuul.d/test1.yaml': in_repo_conf_A,
'zuul.d/test2.yaml': in_repo_conf_B}
B = self.fake_gerrit.addFakeChange('common-config', 'master', 'B',
files=file_dict)
B.setDependsOn(A, 1)
C = self.fake_gerrit.addFakeChange('common-config', 'master', 'C')
C.setDependsOn(B, 1)
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(C.reported, 1,
"C should report failure")
self.assertIn('This change depends on a change '
'with an invalid configuration.',
C.messages[0],
"C should have an error reported")
def test_pipeline_debug(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
run: playbooks/project-test1.yaml
- project:
name: org/project
check:
debug: True
jobs:
- project-test1
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1,
"A should report success")
self.assertIn('Debug information:',
A.messages[0], "A should have debug info")
def test_nodeset_alternates_cycle(self):
in_repo_conf = textwrap.dedent(
"""
- nodeset:
name: red
alternatives: [blue]
- nodeset:
name: blue
alternatives: [red]
- job:
name: project-test1
run: playbooks/project-test1.yaml
nodeset: blue
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertIn("cycle detected", A.messages[0])
def test_nodeset_alternates_missing_from_nodeset(self):
in_repo_conf = textwrap.dedent(
"""
- nodeset:
name: red
alternatives: [blue]
- job:
name: project-test1
run: playbooks/project-test1.yaml
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertIn('nodeset "blue" was not found', A.messages[0])
def test_nodeset_alternates_missing_from_job(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
run: playbooks/project-test1.yaml
nodeset:
alternatives: [red]
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertIn('nodeset "red" was not found', A.messages[0])
@skipIfMultiScheduler()
# See comment in TestInRepoConfigDir.scheduler_count for further
# details.
# As this is the only test within this test class, that doesn't work
# with multi scheduler, we skip it rather than setting the
# scheduler_count to 1 for the whole test class.
def test_file_move(self):
# Tests that a zuul config file can be renamed
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test2
parent: project-test1
- project:
check:
jobs:
- project-test2
""")
file_dict = {'.zuul.yaml': None,
'.zuul.d/newfile.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='project-test2', result='SUCCESS', changes='2,1'),
], ordered=True)
self.scheds[0].sched.stop()
self.scheds[0].sched.join()
del self.scheds[0]
self.log.debug("Restarting scheduler")
self.createScheduler()
self.scheds[0].start(self.validate_tenants)
self.waitUntilSettled()
# The fake gerrit was lost with the scheduler shutdown;
# restore the state we care about:
self.fake_gerrit.change_number = 2
C = self.fake_gerrit.addFakeChange('org/project1', 'master', 'C')
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='project-test2', result='SUCCESS', changes='2,1'),
dict(name='project-test2', result='SUCCESS', changes='3,1'),
], ordered=True)
@simple_layout('layouts/empty-check.yaml')
def test_merge_commit(self):
# Test a .zuul.yaml content change in a merge commit
self.create_branch('org/project', 'stable/queens')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable/queens'))
self.waitUntilSettled()
conf = textwrap.dedent(
"""
- job:
name: test-job
- project:
name: org/project
check:
jobs:
- test-job
""")
file_dict = {'.zuul.yaml': conf}
A = self.fake_gerrit.addFakeChange('org/project', 'stable/queens', 'A',
files=file_dict)
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
upstream_path = os.path.join(self.upstream_root, 'org/project')
upstream_repo = git.Repo(upstream_path)
master_sha = upstream_repo.heads.master.commit.hexsha
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([])
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C',
merge_parents=[
master_sha,
A.patchsets[-1]['revision'],
],
merge_files=['.zuul.yaml'])
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='test-job', result='SUCCESS', changes='3,1'),
], ordered=True)
def test_final_parent(self):
# If all variants of the parent are final, it is an error.
# This doesn't catch all possibilities (that is handled during
# job freezing) but this may catch most errors earlier.
in_repo_conf = textwrap.dedent(
"""
- job:
name: parent
final: true
- job:
name: project-test1
parent: parent
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertIn('is final and can not act as a parent', A.messages[0])
def test_intermediate_parent(self):
# If all variants of the parent are intermediate and this job
# is not abstract, it is an error.
# This doesn't catch all possibilities (that is handled during
# job freezing) but this may catch most errors earlier.
in_repo_conf = textwrap.dedent(
"""
- job:
name: parent
intermediate: true
abstract: true
- job:
name: project-test1
parent: parent
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertIn('is not abstract', A.messages[0])
@simple_layout('layouts/protected-parent.yaml')
def test_protected_parent(self):
# If a parent is protected, it may only be used by a child in
# the same project.
# This doesn't catch all possibilities (that is handled during
# job freezing) but this may catch most errors earlier.
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
parent: protected-job
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertIn('protected job in a different project', A.messages[0])
class TestInRepoConfigSOS(ZuulTestCase):
config_file = 'zuul-connections-gerrit-and-github.conf'
tenant_config_file = 'config/in-repo/main.yaml'
# Those tests are testing specific interactions between multiple
# schedulers. They create additional schedulers as necessary and
# start or stop them individually to test specific interactions.
# Using the scheduler_count in addition to create even more
# schedulers doesn't make sense for those tests.
scheduler_count = 1
def test_cross_scheduler_config_update(self):
# This is a regression test. We observed duplicate entries in
# the TPC config cache when a second scheduler updates its
# layout. This test performs a reconfiguration on one
# scheduler, then allows the second scheduler to process the
# change.
# Create the second scheduler.
self.waitUntilSettled()
self.createScheduler()
self.scheds[1].start()
self.waitUntilSettled()
# Create a change which will trigger a tenant configuration
# update.
in_repo_conf = textwrap.dedent(
"""
- nodeset:
name: test-nodeset
nodes: []
""")
file_dict = {'.zuul.yaml': in_repo_conf}
X = self.fake_gerrit.addFakeChange('org/project1', 'master', 'X',
files=file_dict)
X.setMerged()
# Let the first scheduler process the reconfiguration.
with self.scheds[1].sched.run_handler_lock:
self.fake_gerrit.addEvent(X.getChangeMergedEvent())
self.waitUntilSettled(matcher=[self.scheds[0]])
# Wait for the second scheduler to update its config to match.
self.waitUntilSettled()
# Do the same process again.
X = self.fake_gerrit.addFakeChange('org/project1', 'master', 'X',
files=file_dict)
X.setMerged()
with self.scheds[1].sched.run_handler_lock:
self.fake_gerrit.addEvent(X.getChangeMergedEvent())
self.waitUntilSettled(matcher=[self.scheds[0]])
# And wait for the second scheduler again. If we're re-using
# cache objects, we will have created duplicates at this
# point.
self.waitUntilSettled()
# Create a change which will perform a dynamic config update.
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-testx
parent: common-config-test
- project:
check:
jobs:
- project-testx
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
with self.scheds[0].sched.run_handler_lock:
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled(matcher=[self.scheds[1]])
self.waitUntilSettled()
self.assertHistory([
dict(name='project-testx', result='SUCCESS', changes='3,1'),
], ordered=False)
class TestInRepoConfigDir(ZuulTestCase):
# Like TestInRepoConfig, but the fixture test files are in zuul.d
tenant_config_file = 'config/in-repo-dir/main.yaml'
# These tests fiddle around with the list of schedulers used in
# the test. They delete the existing scheduler and replace it by
# a new one. This wouldn't work with multiple schedulers as the
# new scheduler wouldn't replace the one at self.scheds[0], but
# any of the other schedulers used within a multi-scheduler setup.
# As a result, starting self.scheds[0] would fail because it is
# already running an threads can only be started once.
scheduler_count = 1
def test_file_move(self):
# Tests that a zuul config file can be renamed
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test2
- project:
name: org/project
check:
jobs:
- project-test2
""")
file_dict = {'zuul.d/project.yaml': None,
'zuul.d/newfile.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='project-test2', result='SUCCESS', changes='2,1'),
], ordered=True)
self.scheds[0].sched.stop()
self.scheds[0].sched.join()
del self.scheds[0]
self.log.debug("Restarting scheduler")
self.createScheduler()
self.scheds[0].start(self.validate_tenants)
self.waitUntilSettled()
# The fake gerrit was lost with the scheduler shutdown;
# restore the state we care about:
self.fake_gerrit.change_number = 2
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='project-test2', result='SUCCESS', changes='2,1'),
dict(name='project-test2', result='SUCCESS', changes='3,1'),
], ordered=True)
def test_extra_config_move(self):
# Tests that a extra config file can be renamed
in_repo_conf = textwrap.dedent(
"""
- job:
name: project1-test2
- project:
name: org/project1
check:
jobs:
- project1-test2
""")
# Wait until settled so that we process both tenant reconfig
# events in one pass through the scheduler loop.
self.waitUntilSettled()
# Add an empty zuul.yaml here so we are triggering a tenant
# reconfig for both tenants as the extra config dir is only
# considered for tenant-two.
file_dict = {'zuul.yaml': '',
'extra.d/project.yaml': None,
'extra.d/newfile.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='project1-test2', result='SUCCESS', changes='2,1'),
], ordered=True)
self.scheds[0].sched.stop()
self.scheds[0].sched.join()
del self.scheds[0]
self.log.debug("Restarting scheduler")
self.createScheduler()
self.scheds[0].start(self.validate_tenants)
self.waitUntilSettled()
# The fake gerrit was lost with the scheduler shutdown;
# restore the state we care about:
self.fake_gerrit.change_number = 2
C = self.fake_gerrit.addFakeChange('org/project1', 'master', 'C')
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='project1-test2', result='SUCCESS', changes='2,1'),
dict(name='project1-test2', result='SUCCESS', changes='3,1'),
], ordered=True)
class TestExtraConfigInDependent(ZuulTestCase):
# in org/project2, jobs are defined in extra config paths, while
# project is defined in .zuul.yaml
tenant_config_file = 'config/in-repo-dir/main.yaml'
scheduler_count = 1
def test_extra_config_in_dependent_change(self):
# Test that when jobs are defined in a extra-config-paths in a repo, if
# another change is dependent on a change of that repo, the jobs should
# still be loaded.
# Add an empty zuul.yaml here so we are triggering dynamic layout load
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files={'zuul.yaml': ''})
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B',
files={'zuul.yaml': ''})
# A Depends-On: B who has private jobs defined in extra-config-paths
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['url'])
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Jobs in both changes should be success
self.assertHistory([
dict(name='project2-private-extra-file', result='SUCCESS',
changes='2,1'),
dict(name='project2-private-extra-dir', result='SUCCESS',
changes='2,1'),
dict(name='project-test1', result='SUCCESS',
changes='2,1 1,1'),
], ordered=False)
def test_extra_config_in_bundle_change(self):
# Test that jobs defined in a extra-config-paths in a repo should be
# loaded in a bundle with changes from different repos.
# Add an empty zuul.yaml here so we are triggering dynamic layout load
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files={'zuul.yaml': ''})
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B',
files={'zuul.yaml': ''})
C = self.fake_gerrit.addFakeChange('org/project3', 'master', 'C',
files={'zuul.yaml': ''})
# A B form a bundle, and A depends on C
A.data['commitMessage'] = '%s\n\nDepends-On: %s\nDepends-On: %s\n' % (
A.subject, B.data['url'], C.data['url'])
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['url'])
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Jobs in both changes should be success
self.assertHistory([
dict(name='project2-private-extra-file', result='SUCCESS',
changes='3,1 1,1 2,1'),
dict(name='project2-private-extra-dir', result='SUCCESS',
changes='3,1 1,1 2,1'),
dict(name='project-test1', result='SUCCESS',
changes='3,1 2,1 1,1'),
dict(name='project3-private-extra-file', result='SUCCESS',
changes='3,1'),
dict(name='project3-private-extra-dir', result='SUCCESS',
changes='3,1'),
], ordered=False)
class TestGlobalRepoState(AnsibleZuulTestCase):
config_file = 'zuul-connections-gerrit-and-github.conf'
tenant_config_file = 'config/global-repo-state/main.yaml'
def test_inherited_playbooks(self):
# Test that the repo state is restored globally for the whole buildset
# including inherited projects not in the dependency chain.
self.executor_server.hold_jobs_in_start = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Approved', 1)
self.fake_gerrit.addEvent(A.addApproval('Code-Review', 2))
for _ in iterate_timeout(30, 'Wait for build to be in starting phase'):
if self.executor_server.job_workers:
sleep(1)
break
# The build test1 is running while test2 is waiting for test1.
self.assertEqual(len(self.builds), 1)
# Now merge a change to the playbook out of band. This will break test2
# if it updates common-config to latest master. However due to the
# buildset-global repo state test2 must not be broken afterwards.
playbook = textwrap.dedent(
"""
- hosts: localhost
tasks:
- name: fail
fail:
msg: foobar
""")
file_dict = {'playbooks/test2.yaml': playbook}
B = self.fake_gerrit.addFakeChange('common-config', 'master', 'A',
files=file_dict)
self.log.info('Merge test change on common-config')
B.setMerged()
# Reset repo to ensure the cached repo has the failing commit. This
# is needed to ensure that the repo state has been restored.
repo = self.executor_server.merger.getRepo('gerrit', 'common-config')
repo.update()
repo.reset()
self.executor_server.hold_jobs_in_start = False
self.waitUntilSettled()
self.assertHistory([
dict(name='test1', result='SUCCESS', changes='1,1'),
dict(name='test2', result='SUCCESS', changes='1,1'),
])
def test_inherited_implicit_roles(self):
# Test that the repo state is restored globally for the whole buildset
# including inherited projects not in the dependency chain.
self.executor_server.hold_jobs_in_start = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Approved', 1)
self.fake_gerrit.addEvent(A.addApproval('Code-Review', 2))
for _ in iterate_timeout(30, 'Wait for build to be in starting phase'):
if self.executor_server.job_workers:
sleep(1)
break
# The build test1 is running while test2 is waiting for test1.
self.assertEqual(len(self.builds), 1)
# Now merge a change to the role out of band. This will break test2
# if it updates common-config to latest master. However due to the
# buildset-global repo state test2 must not be broken afterwards.
playbook = textwrap.dedent(
"""
- name: fail
fail:
msg: foobar
""")
file_dict = {'roles/implicit-role/tasks/main.yaml': playbook}
B = self.fake_gerrit.addFakeChange('org/implicit-role', 'master', 'A',
files=file_dict)
self.log.info('Merge test change on org/implicit-role')
B.setMerged()
# Reset repo to ensure the cached repo has the failing commit. This
# is needed to ensure that the repo state has been restored.
repo = self.executor_server.merger.getRepo(
'gerrit', 'org/implicit-role')
repo.update()
repo.reset()
self.executor_server.hold_jobs_in_start = False
self.waitUntilSettled()
self.assertHistory([
dict(name='test1', result='SUCCESS', changes='1,1'),
dict(name='test2', result='SUCCESS', changes='1,1'),
])
def test_required_projects_unprotected_override_checkout(self):
# Setup branch protection for master on org/requiringproject-github
github = self.fake_github.getGithubClient()
github.repo_from_project(
'org/requiringproject-github')._set_branch_protection(
'master', True)
self.fake_github.emitEvent(self.fake_github.getPushEvent(
'org/requiringproject-github', ref='refs/heads/master'))
# Create unprotected branch feat-x. This branch will be the target
# of override-checkout
repo = github.repo_from_project('org/requiredproject-github')
repo._set_branch_protection('master', True)
repo._create_branch('feat-x')
self.create_branch('org/requiredproject-github', 'feat-x')
self.fake_github.emitEvent(self.fake_github.getPushEvent(
'org/requiredproject-github', ref='refs/heads/feat-x'))
# Wait until Zuul has processed the push events and knows about
# the branch protection
self.waitUntilSettled()
A = self.fake_github.openFakePullRequest(
'org/requiringproject-github', 'master', 'A')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# Job must be successful
self.assertHistory([
dict(name='require-test1-github', result='SUCCESS'),
])
def test_required_projects_branch_old_cache(self):
self.create_branch('org/requiringproject', 'feat-x')
self.create_branch('org/requiredproject', 'feat-x')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/requiringproject', 'feat-x'))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_start = True
B = self.fake_gerrit.addFakeChange('org/requiringproject', 'feat-x',
'A')
B.addApproval('Approved', 1)
self.fake_gerrit.addEvent(B.addApproval('Code-Review', 2))
for _ in iterate_timeout(30, 'Wait for build to be in starting phase'):
if self.executor_server.job_workers:
sleep(1)
break
# Delete local feat-x from org/requiredproject on the executor cache
repo = self.executor_server.merger.getRepo(
'gerrit', 'org/requiredproject')
repo.deleteRef('refs/heads/feat-x')
# Let the job continue to the build phase
self.executor_server.hold_jobs_in_build = True
self.executor_server.hold_jobs_in_start = False
self.waitUntilSettled()
# Assert that feat-x has been checked out in the job workspace
path = os.path.join(self.builds[0].jobdir.src_root,
'review.example.com/org/requiredproject')
repo = git.Repo(path)
self.assertEqual(str(repo.active_branch), 'feat-x')
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='require-test1', result='SUCCESS', changes='1,1'),
dict(name='require-test2', result='SUCCESS', changes='1,1'),
])
def test_required_projects(self):
# Test that the repo state is restored globally for the whole buildset
# including required projects not in the dependency chain.
self.executor_server.hold_jobs_in_start = True
A = self.fake_gerrit.addFakeChange('org/requiringproject', 'master',
'A')
A.addApproval('Approved', 1)
self.fake_gerrit.addEvent(A.addApproval('Code-Review', 2))
for _ in iterate_timeout(30, 'Wait for build to be in starting phase'):
if self.executor_server.job_workers:
sleep(1)
break
# The build require-test1 is running,
# require-test2 is waiting for require-test1.
self.assertEqual(len(self.builds), 1)
# Now merge a change to the test script out of band.
# This will break required-test2 if it updates requiredproject
# to latest master. However, due to the buildset-global repo state,
# required-test2 must not be broken afterwards.
runscript = textwrap.dedent(
"""
#!/bin/bash
exit 1
""")
file_dict = {'script.sh': runscript}
B = self.fake_gerrit.addFakeChange('org/requiredproject', 'master',
'A', files=file_dict)
self.log.info('Merge test change on common-config')
B.setMerged()
# Reset repo to ensure the cached repo has the failing commit. This
# is needed to ensure that the repo state has been restored.
repo = self.executor_server.merger.getRepo(
'gerrit', 'org/requiredproject')
repo.update()
repo.reset()
self.executor_server.hold_jobs_in_start = False
self.waitUntilSettled()
self.assertHistory([
dict(name='require-test1', result='SUCCESS', changes='1,1'),
dict(name='require-test2', result='SUCCESS', changes='1,1'),
])
def test_dependent_project(self):
# Test that the repo state is restored globally for the whole buildset
# including dependent projects.
self.executor_server.hold_jobs_in_start = True
B = self.fake_gerrit.addFakeChange('org/requiredproject', 'master',
'B')
A = self.fake_gerrit.addFakeChange('org/dependentproject', 'master',
'A')
A.setDependsOn(B, 1)
A.addApproval('Approved', 1)
self.fake_gerrit.addEvent(A.addApproval('Code-Review', 2))
for _ in iterate_timeout(30, 'Wait for build to be in starting phase'):
if self.executor_server.job_workers:
sleep(1)
break
# The build dependent-test1 is running,
# dependent-test2 is waiting for dependent-test1.
self.assertEqual(len(self.builds), 1)
# Now merge a change to the test script out of band.
# This will break dependent-test2 if it updates requiredproject
# to latest master. However, due to the buildset-global repo state,
# dependent-test2 must not be broken afterwards.
runscript = textwrap.dedent(
"""
#!/bin/bash
exit 1
""")
file_dict = {'script.sh': runscript}
C = self.fake_gerrit.addFakeChange('org/requiredproject', 'master',
'C', files=file_dict)
self.log.info('Merge test change on common-config')
C.setMerged()
# Reset repo to ensure the cached repo has the failing commit. This
# is needed to ensure that the repo state has been restored.
repo = self.executor_server.merger.getRepo(
'gerrit', 'org/requiredproject')
repo.reset()
self.executor_server.hold_jobs_in_start = False
self.waitUntilSettled()
self.assertHistory([
dict(name='dependent-test1', result='SUCCESS', changes='1,1 2,1'),
dict(name='dependent-test2', result='SUCCESS', changes='1,1 2,1'),
])
class TestNonLiveMerges(ZuulTestCase):
config_file = 'zuul-connections-gerrit-and-github.conf'
tenant_config_file = 'config/in-repo/main.yaml'
def test_non_live_merges_with_config_updates(self):
"""
This test checks that we do merges for non-live queue items with
config updates.
* Simple dependency chain:
A -> B -> C
"""
in_repo_conf_a = textwrap.dedent(
"""
- job:
name: project-test1
run: playbooks/project-test.yaml
- project:
name: org/project
check:
jobs:
- project-test1
""")
in_repo_playbook = textwrap.dedent(
"""
- hosts: all
tasks: []
""")
file_dict_a = {'.zuul.yaml': in_repo_conf_a,
'playbooks/project-test.yaml': in_repo_playbook}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict_a)
in_repo_conf_b = textwrap.dedent(
"""
- job:
name: project-test1
run: playbooks/project-test.yaml
- job:
name: project-test2
run: playbooks/project-test.yaml
- project:
name: org/project
check:
jobs:
- project-test1
- project-test2
""")
file_dict_b = {'.zuul.yaml': in_repo_conf_b}
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
files=file_dict_b,
parent=A.patchsets[0]['ref'])
B.setDependsOn(A, 1)
in_repo_conf_c = textwrap.dedent(
"""
- job:
name: project-test1
run: playbooks/project-test.yaml
- job:
name: project-test2
run: playbooks/project-test.yaml
- job:
name: project-test3
run: playbooks/project-test.yaml
- project:
name: org/project
check:
jobs:
- project-test1
- project-test2
- project-test3
""")
file_dict_c = {'.zuul.yaml': in_repo_conf_c}
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C',
files=file_dict_c,
parent=B.patchsets[0]['ref'])
C.setDependsOn(B, 1)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1, "A should report")
self.assertEqual(B.reported, 1, "B should report")
self.assertEqual(C.reported, 1, "C should report")
self.assertIn('Build succeeded', A.messages[0])
self.assertIn('Build succeeded', B.messages[0])
self.assertIn('Build succeeded', C.messages[0])
self.assertHistory([
# Change A
dict(name='project-test1', result='SUCCESS', changes='1,1'),
# Change B
dict(name='project-test1', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1 2,1'),
# Change C
dict(name='project-test1', result='SUCCESS',
changes='1,1 2,1 3,1'),
dict(name='project-test2', result='SUCCESS',
changes='1,1 2,1 3,1'),
dict(name='project-test3', result='SUCCESS',
changes='1,1 2,1 3,1'),
], ordered=False)
# We expect one merge call per live change, plus one call for
# each non-live change with a config update (which is all of them).
merge_jobs = self.merge_job_history.get(MergeRequest.MERGE)
self.assertEqual(len(merge_jobs), 6)
def test_non_live_merges(self):
"""
This test checks that we don't do merges for non-live queue items.
* Simple dependency chain:
A -> B -> C
"""
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.setDependsOn(A, 1)
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
C.setDependsOn(B, 1)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# We expect one merge call per live change.
merge_jobs = self.merge_job_history.get(MergeRequest.MERGE)
self.assertEqual(len(merge_jobs), 3)
class TestJobContamination(AnsibleZuulTestCase):
config_file = 'zuul-connections-gerrit-and-github.conf'
tenant_config_file = 'config/zuul-job-contamination/main.yaml'
# Those tests are also using the fake github implementation which
# means that every scheduler gets a different fake github instance.
# Thus, assertions might fail depending on which scheduler did the
# interaction with Github.
scheduler_count = 1
def test_job_contamination_playbooks(self):
conf = textwrap.dedent(
"""
- job:
name: base
post-run:
- playbooks/something-new.yaml
parent: null
vars:
basevar: basejob
""")
file_dict = {'zuul.d/jobs.yaml': conf}
A = self.fake_github.openFakePullRequest(
'org/global-config', 'master', 'A', files=file_dict)
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
B = self.fake_github.openFakePullRequest('org/project1', 'master', 'A')
self.fake_github.emitEvent(B.getPullRequestOpenedEvent())
self.waitUntilSettled()
statuses_b = self.fake_github.getCommitStatuses(
'org/project1', B.head_sha)
self.assertEqual(len(statuses_b), 1)
# B should not be affected by the A PR
self.assertEqual('success', statuses_b[0]['state'])
def test_job_contamination_vars(self):
conf = textwrap.dedent(
"""
- job:
name: base
parent: null
vars:
basevar: basejob-modified
""")
file_dict = {'zuul.d/jobs.yaml': conf}
A = self.fake_github.openFakePullRequest(
'org/global-config', 'master', 'A', files=file_dict)
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
B = self.fake_github.openFakePullRequest('org/project1', 'master', 'A')
self.fake_github.emitEvent(B.getPullRequestOpenedEvent())
self.waitUntilSettled()
statuses_b = self.fake_github.getCommitStatuses(
'org/project1', B.head_sha)
self.assertEqual(len(statuses_b), 1)
# B should not be affected by the A PR
self.assertEqual('success', statuses_b[0]['state'])
class TestInRepoJoin(ZuulTestCase):
# In this config, org/project is not a member of any pipelines, so
# that we may test the changes that cause it to join them.
tenant_config_file = 'config/in-repo-join/main.yaml'
def test_dynamic_dependent_pipeline(self):
# Test dynamically adding a project to a
# dependent pipeline for the first time
self.executor_server.hold_jobs_in_build = True
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
gate_pipeline = tenant.layout.pipelines['gate']
self.assertEqual(gate_pipeline.queues, [])
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
run: playbooks/project-test1.yaml
- job:
name: project-test2
run: playbooks/project-test2.yaml
- project:
name: org/project
gate:
jobs:
- project-test2
""")
in_repo_playbook = textwrap.dedent(
"""
- hosts: all
tasks: []
""")
file_dict = {'.zuul.yaml': in_repo_conf,
'playbooks/project-test2.yaml': in_repo_playbook}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
items = gate_pipeline.getAllItems()
self.assertEqual(items[0].change.number, '1')
self.assertEqual(items[0].change.patchset, '1')
self.assertTrue(items[0].live)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# Make sure the dynamic queue got cleaned up
self.assertEqual(gate_pipeline.queues, [])
def test_dynamic_dependent_pipeline_failure(self):
# Test that a change behind a failing change adding a project
# to a dependent pipeline is dequeued.
self.executor_server.hold_jobs_in_build = True
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
run: playbooks/project-test1.yaml
- project:
name: org/project
gate:
jobs:
- project-test1
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.executor_server.failJob('project-test1', A)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.orderedRelease()
self.waitUntilSettled()
self.assertEqual(A.reported, 2,
"A should report start and failure")
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.reported, 1,
"B should report start")
self.assertHistory([
dict(name='project-test1', result='FAILURE', changes='1,1'),
dict(name='project-test1', result='ABORTED', changes='1,1 2,1'),
], ordered=False)
def test_dynamic_failure_with_reconfig(self):
# Test that a reconfig in the middle of adding a change to a
# pipeline works.
self.executor_server.hold_jobs_in_build = True
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
run: playbooks/project-test1.yaml
- project:
name: org/project
gate:
jobs:
- project-test1
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.executor_server.failJob('project-test1', A)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
# Execute a reconfig here which will clear the cached layout
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.orderedRelease()
self.waitUntilSettled()
self.assertEqual(A.reported, 2,
"A should report start and failure")
self.assertEqual(A.data['status'], 'NEW')
self.assertHistory([
dict(name='project-test1', result='FAILURE', changes='1,1'),
], ordered=False)
def test_dynamic_dependent_pipeline_merge_failure(self):
# Test that a merge failure behind a change adding a project
# to a dependent pipeline is correctly reported.
self.executor_server.hold_jobs_in_build = True
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
run: playbooks/project-test1.yaml
- project:
name: org/project
gate:
jobs:
- project-test1
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test2
run: playbooks/project-test1.yaml
- project:
name: org/project
gate:
jobs:
- project-test2
""")
file_dict = {'.zuul.yaml': in_repo_conf}
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
files=file_dict)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.orderedRelease()
self.waitUntilSettled()
self.assertEqual(A.reported, 2,
"A should report start and success")
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.reported, 1,
"B should report merge failure")
self.assertHistory([
dict(name='project-test1', result='SUCCESS', changes='1,1'),
], ordered=False)
def test_dynamic_dependent_pipeline_absent(self):
# Test that a series of dependent changes don't report merge
# failures to a pipeline they aren't in.
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.setDependsOn(A, 1)
A.addApproval('Code-Review', 2)
A.addApproval('Approved', 1)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 0,
"A should not report")
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.reported, 0,
"B should not report")
self.assertEqual(B.data['status'], 'NEW')
self.assertHistory([])
class FunctionalAnsibleMixIn(object):
# A temporary class to hold new tests while others are disabled
# These should be overridden in child classes.
tenant_config_file = 'config/ansible/main.yaml'
ansible_major_minor = 'X.Y'
def test_playbook(self):
# This test runs a bit long and needs extra time.
self.wait_timeout = 300
# Keep the jobdir around so we can inspect contents if an
# assert fails.
self.executor_server.keep_jobdir = True
# Output extra ansible info so we might see errors.
self.executor_server.verbose = True
# Add a site variables file, used by check-vars
path = os.path.join(FIXTURE_DIR, 'config', 'ansible',
'variables.yaml')
self.config.set('executor', 'variables', path)
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
build_timeout = self.getJobFromHistory('timeout', result='TIMED_OUT')
with self.jobLog(build_timeout):
post_flag_path = os.path.join(
self.jobdir_root, build_timeout.uuid + '.post.flag')
self.assertTrue(os.path.exists(post_flag_path))
build_post_timeout = self.getJobFromHistory('post-timeout')
with self.jobLog(build_post_timeout):
self.assertEqual(build_post_timeout.result, 'POST_FAILURE')
build_faillocal = self.getJobFromHistory('faillocal')
with self.jobLog(build_faillocal):
self.assertEqual(build_faillocal.result, 'FAILURE')
build_failpost = self.getJobFromHistory('failpost')
with self.jobLog(build_failpost):
self.assertEqual(build_failpost.result, 'POST_FAILURE')
build_check_vars = self.getJobFromHistory('check-vars')
with self.jobLog(build_check_vars):
self.assertEqual(build_check_vars.result, 'SUCCESS')
build_check_hostvars = self.getJobFromHistory('check-hostvars')
with self.jobLog(build_check_hostvars):
self.assertEqual(build_check_hostvars.result, 'SUCCESS')
build_check_secret_names = self.getJobFromHistory('check-secret-names')
with self.jobLog(build_check_secret_names):
self.assertEqual(build_check_secret_names.result, 'SUCCESS')
build_hello = self.getJobFromHistory('hello-world')
with self.jobLog(build_hello):
self.assertEqual(build_hello.result, 'SUCCESS')
build_add_host = self.getJobFromHistory('add-host')
with self.jobLog(build_add_host):
self.assertEqual(build_add_host.result, 'SUCCESS')
build_multiple_child = self.getJobFromHistory('multiple-child')
with self.jobLog(build_multiple_child):
self.assertEqual(build_multiple_child.result, 'SUCCESS')
build_multiple_child_no_run = self.getJobFromHistory(
'multiple-child-no-run')
with self.jobLog(build_multiple_child_no_run):
self.assertEqual(build_multiple_child_no_run.result, 'SUCCESS')
build_multiple_run = self.getJobFromHistory('multiple-run')
with self.jobLog(build_multiple_run):
self.assertEqual(build_multiple_run.result, 'SUCCESS')
build_multiple_run_failure = self.getJobFromHistory(
'multiple-run-failure')
with self.jobLog(build_multiple_run_failure):
self.assertEqual(build_multiple_run_failure.result, 'FAILURE')
build_python27 = self.getJobFromHistory('python27')
with self.jobLog(build_python27):
self.assertEqual(build_python27.result, 'SUCCESS')
flag_path = os.path.join(self.jobdir_root,
build_python27.uuid + '.flag')
self.assertTrue(os.path.exists(flag_path))
copied_path = os.path.join(self.jobdir_root, build_python27.uuid +
'.copied')
self.assertTrue(os.path.exists(copied_path))
failed_path = os.path.join(self.jobdir_root, build_python27.uuid +
'.failed')
self.assertFalse(os.path.exists(failed_path))
pre_flag_path = os.path.join(
self.jobdir_root, build_python27.uuid + '.pre.flag')
self.assertTrue(os.path.exists(pre_flag_path))
post_flag_path = os.path.join(
self.jobdir_root, build_python27.uuid + '.post.flag')
self.assertTrue(os.path.exists(post_flag_path))
bare_role_flag_path = os.path.join(self.jobdir_root,
build_python27.uuid +
'.bare-role.flag')
self.assertTrue(os.path.exists(bare_role_flag_path))
secrets_path = os.path.join(self.jobdir_root,
build_python27.uuid + '.secrets')
with open(secrets_path) as f:
self.assertEqual(f.read(), "test-username test-password")
build_bubblewrap = self.getJobFromHistory('bubblewrap')
with self.jobLog(build_bubblewrap):
self.assertEqual(build_bubblewrap.result, 'SUCCESS')
def test_repo_ansible(self):
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/ansible', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1,
"A should report success")
self.assertHistory([
dict(name='hello-ansible', result='SUCCESS', changes='1,1'),
])
build = self.getJobFromHistory('hello-ansible', result='SUCCESS')
with open(build.jobdir.job_output_file) as f:
output = f.read()
self.assertIn(f'Ansible version={self.ansible_major_minor}',
output)
class TestAnsible6(AnsibleZuulTestCase, FunctionalAnsibleMixIn):
tenant_config_file = 'config/ansible/main6.yaml'
ansible_major_minor = '2.13'
class TestAnsible8(AnsibleZuulTestCase, FunctionalAnsibleMixIn):
tenant_config_file = 'config/ansible/main8.yaml'
ansible_major_minor = '2.15'
class TestPrePlaybooks(AnsibleZuulTestCase):
# A temporary class to hold new tests while others are disabled
tenant_config_file = 'config/pre-playbook/main.yaml'
def test_pre_playbook_fail(self):
# Test that we run the post playbooks (but not the actual
# playbook) when a pre-playbook fails.
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
build = self.getJobFromHistory('python27')
self.assertIsNone(build.result)
self.assertIn('RETRY_LIMIT', A.messages[0])
flag_path = os.path.join(self.test_root, build.uuid +
'.main.flag')
self.assertFalse(os.path.exists(flag_path))
pre_flag_path = os.path.join(self.test_root, build.uuid +
'.pre.flag')
self.assertFalse(os.path.exists(pre_flag_path))
post_flag_path = os.path.join(
self.jobdir_root, build.uuid + '.post.flag')
self.assertTrue(os.path.exists(post_flag_path),
"The file %s should exist" % post_flag_path)
def test_post_playbook_fail_autohold(self):
self.addAutohold('tenant-one', 'review.example.com/org/project3',
'python27-node-post', '.*', 'reason text', 1, 600)
A = self.fake_gerrit.addFakeChange('org/project3', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
build = self.getJobFromHistory('python27-node-post')
self.assertEqual(build.result, 'POST_FAILURE')
# Check nodepool for a held node
held_node = None
for node in self.fake_nodepool.getNodes():
if node['state'] == zuul.model.STATE_HOLD:
held_node = node
break
self.assertIsNotNone(held_node)
# Validate node has recorded the failed job
self.assertEqual(
held_node['hold_job'],
" ".join(['tenant-one',
'review.example.com/org/project3',
'python27-node-post', '.*'])
)
self.assertEqual(held_node['comment'], "reason text")
def test_pre_playbook_fail_autohold(self):
self.addAutohold('tenant-one', 'review.example.com/org/project2',
'python27-node', '.*', 'reason text', 1, 600)
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
build = self.getJobFromHistory('python27-node')
self.assertIsNone(build.result)
self.assertIn('RETRY_LIMIT', A.messages[0])
# Check nodepool for a held node
held_node = None
for node in self.fake_nodepool.getNodes():
if node['state'] == zuul.model.STATE_HOLD:
held_node = node
break
self.assertIsNotNone(held_node)
# Validate node has recorded the failed job
self.assertEqual(
held_node['hold_job'],
" ".join(['tenant-one',
'review.example.com/org/project2',
'python27-node', '.*'])
)
self.assertEqual(held_node['comment'], "reason text")
class TestPostPlaybooks(AnsibleZuulTestCase):
tenant_config_file = 'config/post-playbook/main.yaml'
def test_post_playbook_abort(self):
# Test that when we abort a job in the post playbook, that we
# don't send back POST_FAILURE.
self.executor_server.verbose = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
for _ in iterate_timeout(60, 'job started'):
if len(self.builds):
break
build = self.builds[0]
post_start = os.path.join(self.jobdir_root, build.uuid +
'.post_start.flag')
for _ in iterate_timeout(60, 'job post running'):
if os.path.exists(post_start):
break
# The post playbook has started, abort the job
self.fake_gerrit.addEvent(A.getChangeAbandonedEvent())
self.waitUntilSettled()
build = self.getJobFromHistory('python27')
self.assertEqual('ABORTED', build.result)
post_end = os.path.join(self.jobdir_root, build.uuid +
'.post_end.flag')
self.assertTrue(os.path.exists(post_start))
self.assertFalse(os.path.exists(post_end))
class TestCleanupPlaybooks(AnsibleZuulTestCase):
tenant_config_file = 'config/cleanup-playbook/main.yaml'
def test_cleanup_playbook_success(self):
# Test that the cleanup run is performed
self.executor_server.verbose = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
for _ in iterate_timeout(60, 'job started'):
if len(self.builds):
break
build = self.builds[0]
post_start = os.path.join(self.jobdir_root, build.uuid +
'.post_start.flag')
for _ in iterate_timeout(60, 'job post running'):
if os.path.exists(post_start):
break
with open(os.path.join(self.jobdir_root, build.uuid, 'test_wait'),
"w") as of:
of.write("continue")
self.waitUntilSettled()
build = self.getJobFromHistory('python27')
self.assertEqual('SUCCESS', build.result)
cleanup_flag = os.path.join(self.jobdir_root, build.uuid +
'.cleanup.flag')
self.assertTrue(os.path.exists(cleanup_flag))
with open(cleanup_flag) as f:
self.assertEqual('True', f.readline())
def test_cleanup_playbook_failure(self):
# Test that the cleanup run is performed
self.executor_server.verbose = True
in_repo_conf = textwrap.dedent(
"""
- project:
check:
jobs:
- python27-failure
""")
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files={'.zuul.yaml': in_repo_conf})
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
for _ in iterate_timeout(60, 'job started'):
if len(self.builds):
break
self.waitUntilSettled()
build = self.getJobFromHistory('python27-failure')
self.assertEqual('FAILURE', build.result)
cleanup_flag = os.path.join(self.jobdir_root, build.uuid +
'.cleanup.flag')
self.assertTrue(os.path.exists(cleanup_flag))
with open(cleanup_flag) as f:
self.assertEqual('False', f.readline())
def test_cleanup_playbook_abort(self):
# Test that when we abort a job the cleanup run is performed
self.executor_server.verbose = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
for _ in iterate_timeout(60, 'job started'):
if len(self.builds):
break
build = self.builds[0]
post_start = os.path.join(self.jobdir_root, build.uuid +
'.post_start.flag')
for _ in iterate_timeout(60, 'job post running'):
if os.path.exists(post_start):
break
# The post playbook has started, abort the job
self.fake_gerrit.addEvent(A.getChangeAbandonedEvent())
self.waitUntilSettled()
build = self.getJobFromHistory('python27')
self.assertEqual('ABORTED', build.result)
post_end = os.path.join(self.jobdir_root, build.uuid +
'.post_end.flag')
cleanup_flag = os.path.join(self.jobdir_root, build.uuid +
'.cleanup.flag')
self.assertTrue(os.path.exists(cleanup_flag))
self.assertTrue(os.path.exists(post_start))
self.assertFalse(os.path.exists(post_end))
@mock.patch("zuul.executor.server.CLEANUP_TIMEOUT", 5)
def test_cleanup_playbook_timeout(self):
# Test that when the cleanup runs into a timeout, the job
# still completes.
self.executor_server.verbose = True
# Change the zuul config to run the python27-cleanup-timeout job
in_repo_conf = textwrap.dedent(
"""
- project:
check:
jobs:
- python27-cleanup-timeout
""")
A = self.fake_gerrit.addFakeChange("org/project", "master", "A",
files={".zuul.yaml": in_repo_conf})
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name="python27-cleanup-timeout", result="SUCCESS",
changes="1,1")])
class TestPlaybookSemaphore(AnsibleZuulTestCase):
tenant_config_file = 'config/playbook-semaphore/main.yaml'
def test_playbook_semaphore(self):
self.executor_server.verbose = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
for _ in iterate_timeout(60, 'job started'):
if len(self.builds) == 1:
break
build1 = self.builds[0]
# Wait for the first job to be running the mutexed playbook
run1_start = os.path.join(self.jobdir_root, build1.uuid +
'.run_start.flag')
for _ in iterate_timeout(60, 'job1 running'):
if os.path.exists(run1_start):
break
# Start a second build which should wait for the playbook
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
# Wait until we are waiting for the playbook
for _ in iterate_timeout(60, 'job2 waiting for semaphore'):
found = False
if len(self.builds) == 2:
build2 = self.builds[1]
for job_worker in self.executor_server.job_workers.values():
if job_worker.build_request.uuid == build2.uuid:
if job_worker.waiting_for_semaphores:
found = True
if found:
break
# Wait for build1 to finish
with open(os.path.join(self.jobdir_root, build1.uuid, 'test_wait'),
"w") as of:
of.write("continue")
# Wait for the second job to be running the mutexed playbook
run2_start = os.path.join(self.jobdir_root, build2.uuid +
'.run_start.flag')
for _ in iterate_timeout(60, 'job2 running'):
if os.path.exists(run2_start):
break
# Release build2 and wait to finish
with open(os.path.join(self.jobdir_root, build2.uuid, 'test_wait'),
"w") as of:
of.write("continue")
self.waitUntilSettled()
self.assertHistory([
dict(name='test-job', result='SUCCESS', changes='1,1'),
dict(name='test-job', result='SUCCESS', changes='2,1'),
])
def test_playbook_and_job_semaphore_runtime(self):
# Test that a playbook does not specify the same semaphore as
# the job. Test via inheritance which is a runtime check.
in_repo_conf = textwrap.dedent(
"""
- job:
name: test-job2
parent: test-job
semaphore: test-semaphore
- project:
check:
jobs:
- test-job2
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([])
self.assertEqual(A.reported, 1)
self.assertEqual(A.patchsets[0]['approvals'][0]['value'], "-1")
self.assertIn('both job and playbook', A.messages[0])
def test_playbook_and_job_semaphore_def(self):
# Test that a playbook does not specify the same semaphore as
# the job. Static configuration test.
in_repo_conf = textwrap.dedent(
"""
- job:
name: test-job2
semaphore: test-semaphore
run:
- name: playbooks/run.yaml
semaphores: test-semaphore
- project:
check:
jobs:
- test-job2
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([])
self.assertEqual(A.reported, 1)
self.assertEqual(A.patchsets[0]['approvals'][0]['value'], "-1")
self.assertIn('both job and playbook', A.messages[0])
def test_playbook_semaphore_timeout(self):
self.wait_timeout = 300
self.executor_server.verbose = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
for _ in iterate_timeout(60, 'job started'):
if len(self.builds) == 1:
break
build1 = self.builds[0]
# Wait for the first job to be running the mutexed playbook
run1_start = os.path.join(self.jobdir_root, build1.uuid +
'.run_start.flag')
for _ in iterate_timeout(60, 'job1 running'):
if os.path.exists(run1_start):
break
# Start a second build which should wait for the playbook
in_repo_conf = textwrap.dedent(
"""
- project:
check:
jobs:
- test-job:
timeout: 20
""")
file_dict = {'.zuul.yaml': in_repo_conf}
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
# Wait until we are waiting for the playbook
for _ in iterate_timeout(60, 'job2 waiting for semaphore'):
found = False
if len(self.builds) == 2:
build2 = self.builds[1]
for job_worker in self.executor_server.job_workers.values():
if job_worker.build_request.uuid == build2.uuid:
if job_worker.waiting_for_semaphores:
found = True
if found:
break
# Wait for the second build to timeout waiting for the semaphore
for _ in iterate_timeout(60, 'build timed out'):
if len(self.builds) == 1:
break
# Wait for build1 to finish
with open(os.path.join(self.jobdir_root, build1.uuid, 'test_wait'),
"w") as of:
of.write("continue")
self.waitUntilSettled()
self.assertHistory([
dict(name='test-job', result='TIMED_OUT', changes='2,1'),
dict(name='test-job', result='SUCCESS', changes='1,1'),
])
def test_playbook_semaphore_abort(self):
self.wait_timeout = 300
self.executor_server.verbose = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
for _ in iterate_timeout(60, 'job started'):
if len(self.builds) == 1:
break
build1 = self.builds[0]
# Wait for the first job to be running the mutexed playbook
run1_start = os.path.join(self.jobdir_root, build1.uuid +
'.run_start.flag')
for _ in iterate_timeout(60, 'job1 running'):
if os.path.exists(run1_start):
break
# Start a second build which should wait for the playbook
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
# Wait until we are waiting for the playbook
for _ in iterate_timeout(60, 'job2 waiting for semaphore'):
found = False
if len(self.builds) == 2:
build2 = self.builds[1]
for job_worker in self.executor_server.job_workers.values():
if job_worker.build_request.uuid == build2.uuid:
if job_worker.waiting_for_semaphores:
found = True
if found:
break
in_repo_conf = textwrap.dedent(
"""
- project:
check:
jobs: []
""")
file_dict = {'.zuul.yaml': in_repo_conf}
B.addPatchset(files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(2))
for _ in iterate_timeout(60, 'build aborted'):
if len(self.builds) == 1:
break
# Wait for build1 to finish
with open(os.path.join(self.jobdir_root, build1.uuid, 'test_wait'),
"w") as of:
of.write("continue")
self.waitUntilSettled()
self.assertHistory([
dict(name='test-job', result='ABORTED', changes='2,1'),
dict(name='test-job', result='SUCCESS', changes='1,1'),
])
class TestBrokenTrustedConfig(ZuulTestCase):
# Test we can deal with a broken config only with trusted projects. This
# is different then TestBrokenConfig, as it does not have a missing
# repo error.
tenant_config_file = 'config/broken-trusted/main.yaml'
def test_broken_config_on_startup(self):
# verify get the errors at tenant level.
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
loading_errors = tenant.layout.loading_errors
self.assertEquals(
len(tenant.layout.loading_errors), 1,
"An error should have been stored")
self.assertIn(
"Zuul encountered a syntax error",
str(loading_errors[0].error))
def test_trusted_broken_tenant_config(self):
"""
Tests we cannot modify a config-project speculative by replacing
check jobs with noop.
"""
in_repo_conf = textwrap.dedent(
"""
- pipeline:
name: check
manager: independent
trigger:
gerrit:
- event: patchset-created
success:
gerrit:
Verified: 1
failure:
gerrit:
Verified: -1
- job:
name: base
parent: null
- project:
name: common-config
check:
jobs:
- noop
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='gate-noop', result='SUCCESS', changes='1,1')])
class TestBrokenConfig(ZuulTestCase):
# Test we can deal with a broken config
tenant_config_file = 'config/broken/main.yaml'
def test_broken_config_on_startup(self):
# verify get the errors at tenant level.
tenant = self.scheds.first.sched.abide.tenants.get('tenant-broken')
loading_errors = tenant.layout.loading_errors
self.assertEquals(
len(tenant.layout.loading_errors), 2,
"An error should have been stored")
self.assertIn(
"Zuul encountered an error while accessing the repo org/project3",
str(loading_errors[0].error))
self.assertIn(
"Zuul encountered a syntax error",
str(loading_errors[1].error))
@simple_layout('layouts/broken-template.yaml')
def test_broken_config_on_startup_template(self):
# Verify that a missing project-template doesn't break gate
# pipeline construction.
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEquals(
len(tenant.layout.loading_errors), 1,
"An error should have been stored")
self.assertIn(
"Zuul encountered a syntax error",
str(tenant.layout.loading_errors[0].error))
@simple_layout('layouts/broken-double-gate.yaml')
def test_broken_config_on_startup_double_gate(self):
# Verify that duplicated pipeline definitions raise config errors
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEquals(
len(tenant.layout.loading_errors), 1,
"An error should have been stored")
self.assertIn(
"Zuul encountered a syntax error",
str(tenant.layout.loading_errors[0].error))
@simple_layout('layouts/broken-warnings.yaml')
def test_broken_config_on_startup_warnings(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEquals(
len(tenant.layout.loading_errors), 1,
"An error should have been stored")
self.assertIn(
"Zuul encountered a deprecated syntax",
str(tenant.layout.loading_errors[0].error))
def test_dynamic_ignore(self):
# Verify dynamic config behaviors inside a tenant broken config
tenant = self.scheds.first.sched.abide.tenants.get('tenant-broken')
# There is a configuration error
self.assertEquals(
len(tenant.layout.loading_errors), 2,
"An error should have been stored")
# Inside a broken tenant configuration environment,
# send a valid config to an "unbroken" project and verify
# that tenant configuration have been validated and job executed
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test
run: playbooks/project-test.yaml
- project:
check:
jobs:
- project-test
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.patchsets[0]['approvals'][0]['value'], "1")
self.assertHistory([
dict(name='project-test', result='SUCCESS', changes='1,1')])
def test_dynamic_fail_unbroken(self):
# Verify dynamic config behaviors inside a tenant broken config
tenant = self.scheds.first.sched.abide.tenants.get('tenant-broken')
# There is a configuration error
self.assertEquals(
len(tenant.layout.loading_errors), 2,
"An error should have been stored")
# Inside a broken tenant configuration environment,
# send an invalid config to an "unbroken" project and verify
# that tenant configuration have not been validated
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test
run: playbooks/project-test.yaml
- project:
check:
jobs:
- non-existent-job
""")
file_dict = {'.zuul.yaml': in_repo_conf}
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(B.reported, 1,
"A should report failure")
self.assertEqual(B.patchsets[0]['approvals'][0]['value'], "-1")
self.assertIn('Job non-existent-job not defined', B.messages[0],
"A should have failed the check pipeline")
def test_dynamic_fail_broken(self):
# Verify dynamic config behaviors inside a tenant broken config
tenant = self.scheds.first.sched.abide.tenants.get('tenant-broken')
# There is a configuration error
self.assertEquals(
len(tenant.layout.loading_errors), 2,
"An error should have been stored")
# Inside a broken tenant configuration environment,
# send an invalid config to a "broken" project and verify
# that tenant configuration have not been validated
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test
run: playbooks/project-test.yaml
- project:
check:
jobs:
- non-existent-job
""")
file_dict = {'.zuul.yaml': in_repo_conf}
C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C',
files=file_dict)
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(C.reported, 1,
"A should report failure")
self.assertEqual(C.patchsets[0]['approvals'][0]['value'], "-1")
self.assertIn('Job non-existent-job not defined', C.messages[0],
"A should have failed the check pipeline")
def test_dynamic_fix_broken(self):
# Verify dynamic config behaviors inside a tenant broken config
tenant = self.scheds.first.sched.abide.tenants.get('tenant-broken')
# There is a configuration error
self.assertEquals(
len(tenant.layout.loading_errors), 2,
"An error should have been stored")
# Inside a broken tenant configuration environment,
# send an valid config to a "broken" project and verify
# that tenant configuration have been validated and job executed
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test2
run: playbooks/project-test.yaml
- project:
check:
jobs:
- project-test2
""")
file_dict = {'.zuul.yaml': in_repo_conf}
D = self.fake_gerrit.addFakeChange('org/project2', 'master', 'D',
files=file_dict)
self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(D.patchsets[0]['approvals'][0]['value'], "1")
self.assertHistory([
dict(name='project-test2', result='SUCCESS', changes='1,1')])
def test_dynamic_fail_cross_repo(self):
# Verify dynamic config behaviors inside a tenant broken config
tenant = self.scheds.first.sched.abide.tenants.get('tenant-broken')
# There is a configuration error
self.assertEquals(
len(tenant.layout.loading_errors), 2,
"An error should have been stored")
# Inside a broken tenant configuration environment, remove a
# job used in another repo and verify that an error is
# reported despite the error being in a repo other than the
# change.
in_repo_conf = textwrap.dedent(
"""
- pipeline:
name: check
manager: independent
trigger:
gerrit:
- event: patchset-created
success:
gerrit:
Verified: 1
failure:
gerrit:
Verified: -1
- job:
name: base
parent: null
- project:
name: common-config
check:
jobs:
- noop
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertEqual(A.patchsets[0]['approvals'][0]['value'], "-1")
self.assertIn('Job central-test not defined', A.messages[0],
"A should have failed the check pipeline")
class TestBrokenMultiTenantConfig(ZuulTestCase):
# Test we can deal with a broken multi-tenant config
tenant_config_file = 'config/broken-multi-tenant/main.yaml'
def test_loading_errors(self):
# This regression test came about when we discovered the following:
# * We cache configuration objects if they load without error
# in their first tenant; that means that they can show up as
# errors in later tenants, but as long as those other
# tenants aren't proposing changes to that repo (which is
# unlikely in this situation; this usually arises if the
# tenant just wants to use some foreign jobs), users won't
# be blocked by the error.
#
# * If a merge job for a dynamic config change arrives out of
# order, we will build the new configuration and if there
# are errors, we will compare it to the previous
# configuration to determine if they are relevant, but that
# caused an error since the previous layout had not been
# calculated yet. It's pretty hard to end up with
# irrelevant errors except by virtue of the first point
# above, which is why this test relies on a second tenant.
# This test has two tenants. The first loads project2, and
# project3 without errors and all config objects are cached.
# The second tenant loads only project1 and project2.
# Project2 references a job that is defined in project3, so
# the tenant loads with an error, but proceeds.
# Don't run any merge jobs, so we can run them out of order.
self.hold_merge_jobs_in_queue = True
# Create a first change which modifies the config (and
# therefore will require a merge job).
in_repo_conf = textwrap.dedent(
"""
- job: {'name': 'foo'}
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
# Create a second change which also modifies the config.
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B',
files=file_dict)
B.setDependsOn(A, 1)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# There should be a merge job for each change.
self.assertEqual(len(list(self.merger_api.all())), 2)
jobs = list(self.merger_api.queued())
# Release the second merge job.
self.merger_api.release(jobs[-1])
self.waitUntilSettled()
# At this point we should still be waiting on the first
# change's merge job.
self.assertHistory([])
# Proceed.
self.hold_merge_jobs_in_queue = False
self.merger_api.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='base', result='SUCCESS', changes='1,1 2,1'),
])
class TestProjectKeys(ZuulTestCase):
# Test that we can generate project keys
# Normally the test infrastructure copies a static key in place
# for each project before starting tests. This saves time because
# Zuul's automatic key-generation on startup can be slow. To make
# sure we exercise that code, in this test we allow Zuul to create
# keys for the project on startup.
create_project_keys = True
config_file = 'zuul-connections-gerrit-and-github.conf'
tenant_config_file = 'config/in-repo/main.yaml'
def test_key_generation(self):
test_keys = []
key_fns = ['private.pem', 'ssh.pem']
for fn in key_fns:
with open(os.path.join(FIXTURE_DIR, fn)) as i:
test_keys.append(i.read())
keystore = self.scheds.first.sched.keystore
private_secrets_key, public_secrets_key = (
keystore.getProjectSecretsKeys("gerrit", "org/project")
)
# Make sure that we didn't just end up with the static fixture
# key
self.assertTrue(private_secrets_key not in test_keys)
# Make sure it's the right length
self.assertEqual(4096, private_secrets_key.key_size)
# Make sure that a proper key was created on startup
private_ssh_key, public_ssh_key = (
keystore.getProjectSSHKeys("gerrit", "org/project")
)
# Make sure that we didn't just end up with the static fixture
# key
self.assertTrue(private_ssh_key not in test_keys)
with io.StringIO(private_ssh_key) as o:
ssh_key = paramiko.RSAKey.from_private_key(
o, password=keystore.password)
# Make sure it's the right length
self.assertEqual(2048, ssh_key.get_bits())
class TestValidateAllBroken(ZuulTestCase):
# Test we fail while validating all tenants with one broken tenant
validate_tenants = []
tenant_config_file = 'config/broken/main.yaml'
# This test raises a config error during the startup of the test
# case which makes the first scheduler fail during its startup.
# The second (or any additional) scheduler won't even run as the
# startup is serialized in tests/base.py.
# Thus it doesn't make sense to execute this test with multiple
# schedulers.
scheduler_count = 1
def setUp(self):
self.assertRaises(zuul.configloader.ConfigurationSyntaxError,
super().setUp)
def test_validate_all_tenants_broken(self):
# If we reach this point we successfully catched the config exception.
# There is nothing more to test here.
pass
class TestValidateBroken(ZuulTestCase):
# Test we fail while validating a broken tenant
validate_tenants = ['tenant-broken']
tenant_config_file = 'config/broken/main.yaml'
# This test raises a config error during the startup of the test
# case which makes the first scheduler fail during its startup.
# The second (or any additional) scheduler won't even run as the
# startup is serialized in tests/base.py.
# Thus it doesn't make sense to execute this test with multiple
# schedulers.
scheduler_count = 1
def setUp(self):
self.assertRaises(zuul.configloader.ConfigurationSyntaxError,
super().setUp)
def test_validate_tenant_broken(self):
# If we reach this point we successfully catched the config exception.
# There is nothing more to test here.
pass
class TestValidateGood(ZuulTestCase):
# Test we don't fail while validating a good tenant in a multi tenant
# setup that contains a broken tenant.
validate_tenants = ['tenant-good']
tenant_config_file = 'config/broken/main.yaml'
def test_validate_tenant_good(self):
# If we reach this point we successfully validated the good tenant.
# There is nothing more to test here.
pass
class TestValidateWarnings(ZuulTestCase):
# Test we don't fail when we only have configuration warnings
validate_tenants = ['tenant-one']
tenant_config_file = 'config/broken/main.yaml'
def setUp(self):
with self.assertLogs('zuul.ConfigLoader', level='DEBUG') as full_logs:
super().setUp()
self.assertRegexInList('Zuul encountered a deprecated syntax',
full_logs.output)
@simple_layout('layouts/broken-warnings.yaml')
def test_validate_warnings(self):
pass
class RoleTestCase(ZuulTestCase):
def _getRolesPaths(self, build, playbook):
path = os.path.join(self.jobdir_root, build.uuid,
'ansible', playbook, 'ansible.cfg')
roles_paths = []
with open(path) as f:
for line in f:
if line.startswith('roles_path'):
roles_paths.append(line)
return roles_paths
def _assertRolePath(self, build, playbook, content):
roles_paths = self._getRolesPaths(build, playbook)
if content:
self.assertEqual(len(roles_paths), 1,
"Should have one roles_path line in %s" %
(playbook,))
self.assertIn(content, roles_paths[0])
else:
self.assertEqual(len(roles_paths), 0,
"Should have no roles_path line in %s" %
(playbook,))
def _assertInRolePath(self, build, playbook, files):
roles_paths = self._getRolesPaths(build, playbook)[0]
roles_paths = roles_paths.split('=')[-1].strip()
roles_paths = roles_paths.split(':')
files = set(files)
matches = set()
for rpath in roles_paths:
for rolename in os.listdir(rpath):
if rolename in files:
matches.add(rolename)
self.assertEqual(files, matches)
class TestRoleBranches(RoleTestCase):
tenant_config_file = 'config/role-branches/main.yaml'
def _addRole(self, project, branch, role, parent=None):
data = textwrap.dedent("""
- name: %s
debug:
msg: %s
""" % (role, role))
file_dict = {'roles/%s/tasks/main.yaml' % role: data}
A = self.fake_gerrit.addFakeChange(project, branch,
'add %s' % role,
files=file_dict,
parent=parent)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
return A.patchsets[-1]['ref']
def _addPlaybook(self, project, branch, playbook, role, parent=None):
data = textwrap.dedent("""
- hosts: all
roles:
- %s
""" % role)
file_dict = {'playbooks/%s.yaml' % playbook: data}
A = self.fake_gerrit.addFakeChange(project, branch,
'add %s' % playbook,
files=file_dict,
parent=parent)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
return A.patchsets[-1]['ref']
def _assertInFile(self, path, content):
with open(path) as f:
self.assertIn(content, f.read())
def test_playbook_role_branches(self):
# This tests that the correct branch of a repo which contains
# a playbook or a role is checked out. Most of the action
# happens on project1, which holds a parent job, so that we
# can test the behavior of a project which is not in the
# dependency chain.
# First we create some branch-specific content in project1:
self.create_branch('project1', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'project1', 'stable'))
self.waitUntilSettled()
# A pre-playbook with unique stable branch content.
p = self._addPlaybook('project1', 'stable',
'parent-job-pre', 'parent-stable-role')
# A role that only exists on the stable branch.
self._addRole('project1', 'stable', 'stable-role', parent=p)
# The same for the master branch.
p = self._addPlaybook('project1', 'master',
'parent-job-pre', 'parent-master-role')
self._addRole('project1', 'master', 'master-role', parent=p)
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# Push a change to project2 which will run 3 jobs which
# inherit from project1.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('project2', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
# This job should use the master branch since that's the
# zuul.branch for this change.
build = self.getBuildByName('child-job')
self._assertInRolePath(build, 'playbook_0', ['master-role'])
self._assertInFile(build.jobdir.pre_playbooks[1].path,
'parent-master-role')
# The main playbook is on the master branch of project2, but
# there is a job-level branch override, so the project1 role
# should be from the stable branch. The job-level override
# will cause Zuul to select the project1 pre-playbook from the
# stable branch as well, so we should see it using the stable
# role.
build = self.getBuildByName('child-job-override')
self._assertInRolePath(build, 'playbook_0', ['stable-role'])
self._assertInFile(build.jobdir.pre_playbooks[1].path,
'parent-stable-role')
# The same, but using a required-projects override.
build = self.getBuildByName('child-job-project-override')
self._assertInRolePath(build, 'playbook_0', ['stable-role'])
self._assertInFile(build.jobdir.pre_playbooks[1].path,
'parent-stable-role')
inventory = self.getBuildInventory('child-job-override')
zuul = inventory['all']['vars']['zuul']
expected = {
'playbook_projects': {
'trusted/project_0/review.example.com/common-config': {
'canonical_name': 'review.example.com/common-config',
'checkout': 'master',
'commit': self.getCheckout(
build,
'trusted/project_0/review.example.com/common-config')},
'untrusted/project_0/review.example.com/project1': {
'canonical_name': 'review.example.com/project1',
'checkout': 'stable',
'commit': self.getCheckout(
build,
'untrusted/project_0/review.example.com/project1')},
'untrusted/project_1/review.example.com/common-config': {
'canonical_name': 'review.example.com/common-config',
'checkout': 'master',
'commit': self.getCheckout(
build,
'untrusted/project_1/review.example.com/common-config'
)},
'untrusted/project_2/review.example.com/project2': {
'canonical_name': 'review.example.com/project2',
'checkout': 'master',
'commit': self.getCheckout(
build,
'untrusted/project_2/review.example.com/project2')}},
'playbooks': [
{'path': 'untrusted/project_2/review.example.com/'
'project2/playbooks/child-job.yaml',
'roles': [
{'checkout': 'stable',
'checkout_description': 'job override ref',
'link_name': 'ansible/playbook_0/role_1/project1',
'link_target': 'untrusted/project_0/'
'review.example.com/project1',
'role_path': 'ansible/playbook_0/role_1/project1/roles'
},
{'checkout': 'master',
'checkout_description': 'zuul branch',
'link_name': 'ansible/playbook_0/role_2/common-config',
'link_target': 'untrusted/project_1/'
'review.example.com/common-config',
'role_path': 'ansible/playbook_0/role_2/'
'common-config/roles'
}
]}
]
}
self.assertEqual(expected, zuul['playbook_context'])
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
def getBuildInventory(self, name):
build = self.getBuildByName(name)
inv_path = os.path.join(build.jobdir.root, 'ansible', 'inventory.yaml')
with open(inv_path, 'r') as f:
inventory = yaml.safe_load(f)
return inventory
def getCheckout(self, build, path):
root = os.path.join(build.jobdir.root, path)
repo = git.Repo(root)
return repo.head.commit.hexsha
class TestRoles(RoleTestCase):
tenant_config_file = 'config/roles/main.yaml'
def test_role(self):
# This exercises a proposed change to a role being checked out
# and used.
A = self.fake_gerrit.addFakeChange('bare-role', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['id'])
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='project-test', result='SUCCESS', changes='1,1 2,1'),
])
def test_role_inheritance(self):
self.executor_server.hold_jobs_in_build = True
conf = textwrap.dedent(
"""
- job:
name: parent
roles:
- zuul: bare-role
pre-run: playbooks/parent-pre.yaml
post-run: playbooks/parent-post.yaml
- job:
name: project-test
parent: parent
run: playbooks/project-test.yaml
roles:
- zuul: org/project
- project:
name: org/project
check:
jobs:
- project-test
""")
file_dict = {'.zuul.yaml': conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
build = self.getBuildByName('project-test')
self._assertRolePath(build, 'pre_playbook_0', 'role_0')
self._assertRolePath(build, 'playbook_0', 'role_0')
self._assertRolePath(build, 'playbook_0', 'role_1')
self._assertRolePath(build, 'post_playbook_0', 'role_0')
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='project-test', result='SUCCESS', changes='1,1'),
])
def test_role_error(self):
conf = textwrap.dedent(
"""
- job:
name: project-test
run: playbooks/project-test.yaml
roles:
- zuul: common-config
- project:
name: org/project
check:
jobs:
- project-test
""")
file_dict = {'.zuul.yaml': conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertTrue(re.search(
'- project-test .* ERROR Unable to find role',
A.messages[-1]))
class TestImplicitRoles(RoleTestCase):
tenant_config_file = 'config/implicit-roles/main.yaml'
def test_missing_roles(self):
# Test implicit and explicit roles for a project which does
# not have roles. The implicit role should be silently
# ignored since the project doesn't supply roles, but if a
# user declares an explicit role, it should error.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/norole-project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2)
build = self.getBuildByName('implicit-role-fail')
self._assertRolePath(build, 'playbook_0', None)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# The retry_limit doesn't get recorded
self.assertHistory([
dict(name='implicit-role-fail', result='SUCCESS', changes='1,1'),
])
def test_roles(self):
# Test implicit and explicit roles for a project which does
# have roles. In both cases, we should end up with the role
# in the path. In the explicit case, ensure we end up with
# the name we specified.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/role-project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2)
build = self.getBuildByName('implicit-role-ok')
self._assertRolePath(build, 'playbook_0', 'role_0')
build = self.getBuildByName('explicit-role-ok')
self._assertRolePath(build, 'playbook_0', 'role_0')
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='implicit-role-ok', result='SUCCESS', changes='1,1'),
dict(name='explicit-role-ok', result='SUCCESS', changes='1,1'),
], ordered=False)
class TestShadow(ZuulTestCase):
tenant_config_file = 'config/shadow/main.yaml'
def test_shadow(self):
# Test that a repo is allowed to shadow another's job definitions.
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='test1', result='SUCCESS', changes='1,1'),
dict(name='test2', result='SUCCESS', changes='1,1'),
], ordered=False)
class TestDataReturn(AnsibleZuulTestCase):
tenant_config_file = 'config/data-return/main.yaml'
def test_data_return(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='data-return', result='SUCCESS', changes='1,1'),
dict(name='data-return-relative', result='SUCCESS', changes='1,1'),
dict(name='child', result='SUCCESS', changes='1,1'),
], ordered=False)
self.assertIn('- data-return https://zuul.example.com/',
A.messages[-1])
self.assertIn('- data-return-relative https://zuul.example.com',
A.messages[-1])
def test_data_return_child_jobs(self):
self.wait_timeout = 120
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.executor_server.release('data-return-child-jobs')
self.waitUntilSettled()
self.executor_server.release('data-return-child-jobs')
self.waitUntilSettled()
# Make sure skipped jobs are not reported as failing
tenant = self.scheds.first.sched.abide.tenants.get("tenant-one")
status = tenant.layout.pipelines["check"].formatStatusJSON()
self.assertEqual(
status["change_queues"][0]["heads"][0][0]["failing_reasons"], [])
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='data-return-child-jobs', result='SUCCESS',
changes='1,1'),
dict(name='data-return', result='SUCCESS', changes='1,1'),
])
self.assertIn(
'- data-return-child-jobs https://zuul.example.com/',
A.messages[-1])
self.assertIn(
'- data-return https://zuul.example.com/',
A.messages[-1])
self.assertTrue('Skipped 1 job' in A.messages[-1])
self.assertIn('Build succeeded', A.messages[-1])
connection = self.scheds.first.sched.sql.connection
builds = connection.getBuilds()
builds.sort(key=lambda x: x.job_name)
self.assertEqual(builds[0].job_name, 'child')
self.assertEqual(builds[0].error_detail,
'Skipped due to child_jobs return value '
'in job data-return-child-jobs')
self.assertEqual(builds[1].job_name, 'data-return')
self.assertIsNone(builds[1].error_detail)
self.assertEqual(builds[2].job_name, 'data-return-child-jobs')
self.assertIsNone(builds[2].error_detail)
def test_data_return_invalid_child_job(self):
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='data-return-invalid-child-job', result='SUCCESS',
changes='1,1')])
self.assertIn(
'- data-return-invalid-child-job https://zuul.example.com',
A.messages[-1])
self.assertTrue('Skipped 1 job' in A.messages[-1])
self.assertIn('Build succeeded', A.messages[-1])
def test_data_return_skip_all_child_jobs(self):
A = self.fake_gerrit.addFakeChange('org/project3', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='data-return-skip-all', result='SUCCESS',
changes='1,1'),
])
self.assertIn(
'- data-return-skip-all https://zuul.example.com/',
A.messages[-1])
self.assertTrue('Skipped 2 jobs' in A.messages[-1])
self.assertIn('Build succeeded', A.messages[-1])
def test_data_return_skip_all_child_jobs_with_soft_dependencies(self):
A = self.fake_gerrit.addFakeChange('org/project-soft', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='data-return-cd', result='SUCCESS', changes='1,1'),
dict(name='data-return-c', result='SUCCESS', changes='1,1'),
dict(name='data-return-d', result='SUCCESS', changes='1,1'),
])
self.assertIn('- data-return-cd https://zuul.example.com/',
A.messages[-1])
self.assertTrue('Skipped 2 jobs' in A.messages[-1])
self.assertIn('Build succeeded', A.messages[-1])
def test_several_zuul_return(self):
A = self.fake_gerrit.addFakeChange('org/project4', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='several-zuul-return-child', result='SUCCESS',
changes='1,1'),
])
self.assertIn(
'- several-zuul-return-child https://zuul.example.com/',
A.messages[-1])
self.assertTrue('Skipped 1 job' in A.messages[-1])
self.assertIn('Build succeeded', A.messages[-1])
def test_data_return_skip_retry(self):
A = self.fake_gerrit.addFakeChange(
'org/project-skip-retry',
'master',
'A'
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='skip-retry-return', result='FAILURE',
changes='1,1'),
])
def test_data_return_child_jobs_failure(self):
A = self.fake_gerrit.addFakeChange('org/project5', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='data-return-child-jobs-failure',
result='FAILURE', changes='1,1'),
])
def test_data_return_child_from_paused_job(self):
A = self.fake_gerrit.addFakeChange('org/project6', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='data-return', result='SUCCESS', changes='1,1'),
dict(name='paused-data-return-child-jobs',
result='SUCCESS', changes='1,1'),
])
def test_data_return_child_from_retried_paused_job(self):
"""
Tests that the data returned to the child job is overwritten if the
paused job is lost and gets retried (e.g.: executor restart or node
unreachable).
"""
def _get_file(path):
with open(path) as f:
return f.read()
self.wait_timeout = 120
self.executor_server.hold_jobs_in_build = True
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/project7', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled("patchset uploaded")
self.executor_server.release('paused-data-return-vars')
self.waitUntilSettled("till job is paused")
paused_job = self.builds[0]
self.assertTrue(paused_job.paused)
# zuul_return data is set correct
j = json.loads(_get_file(paused_job.jobdir.result_data_file))
self.assertEqual(j["data"]["build_id"], paused_job.uuid)
# Stop the job worker to simulate an executor restart
for job_worker in self.executor_server.job_workers.values():
if job_worker.build_request.uuid == paused_job.uuid:
job_worker.stop()
self.waitUntilSettled("stop job worker")
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled("all jobs are done")
# The "pause" job might be paused during the waitUntilSettled
# call and appear settled; it should automatically resume
# though, so just wait for it.
for x in iterate_timeout(60, 'paused job'):
if not self.builds:
break
self.waitUntilSettled()
# First build of paused job (gets retried)
first_build = self.history[0]
# Second build of the paused job (the retried one)
retried_build = self.history[3]
# The successful child job (second build)
print_build = self.history[2]
# zuul_return data is set correct to new build id
j = json.loads(_get_file(retried_build.jobdir.result_data_file))
self.assertEqual(j["data"]["build_id"], retried_build.uuid)
self.assertNotIn(first_build.uuid,
_get_file(print_build.jobdir.job_output_file))
self.assertIn(retried_build.uuid,
_get_file(print_build.jobdir.job_output_file))
class TestDiskAccounting(AnsibleZuulTestCase):
config_file = 'zuul-disk-accounting.conf'
tenant_config_file = 'config/disk-accountant/main.yaml'
def test_disk_accountant_kills_job(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='dd-big-empty-file', result='ABORTED', changes='1,1')])
class TestEarlyFailure(AnsibleZuulTestCase):
tenant_config_file = 'config/early-failure/main.yaml'
def test_early_failure(self):
file_dict = {'early-failure.txt': ''}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.log.debug("Wait for the first change to start its job")
for _ in iterate_timeout(30, 'job A started'):
if len(self.builds) == 1:
break
A_build = self.builds[0]
start = os.path.join(self.jobdir_root, A_build.uuid +
'.failure_start.flag')
for _ in iterate_timeout(30, 'job A running'):
if os.path.exists(start):
break
self.log.debug("Add a second change which will test with the first")
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.log.debug("Wait for the second change to start its job")
for _ in iterate_timeout(30, 'job B started'):
if len(self.builds) == 2:
break
B_build = self.builds[1]
start = os.path.join(self.jobdir_root, B_build.uuid +
'.wait_start.flag')
for _ in iterate_timeout(30, 'job B running'):
if os.path.exists(start):
break
self.log.debug("Continue the first job which will fail early")
flag_path = os.path.join(self.jobdir_root, A_build.uuid,
'failure_continue_flag')
self.log.debug("Writing %s", flag_path)
with open(flag_path, "w") as of:
of.write("continue")
self.log.debug("Wait for the second job to be aborted "
"and restarted without the first change")
for _ in iterate_timeout(30, 'job B restarted'):
if len(self.builds) == 2:
B_build2 = self.builds[1]
if B_build2 != B_build:
break
self.log.debug("Wait for the first job to be in its post-run playbook")
start = os.path.join(self.jobdir_root, A_build.uuid +
'.wait_start.flag')
for _ in iterate_timeout(30, 'job A post running'):
if os.path.exists(start):
break
self.log.debug("Allow the first job to finish")
flag_path = os.path.join(self.jobdir_root, A_build.uuid,
'wait_continue_flag')
self.log.debug("Writing %s", flag_path)
with open(flag_path, "w") as of:
of.write("continue")
self.log.debug("Wait for the first job to finish")
for _ in iterate_timeout(30, 'job A complete'):
if A_build not in self.builds:
break
self.log.debug("Allow the restarted second job to finish")
flag_path = os.path.join(self.jobdir_root, B_build2.uuid,
'wait_continue_flag')
self.log.debug("Writing %s", flag_path)
with open(flag_path, "w") as of:
of.write("continue")
self.waitUntilSettled()
self.assertHistory([
dict(name='wait', result='ABORTED', changes='1,1 2,1'),
dict(name='early-failure', result='FAILURE', changes='1,1'),
dict(name='wait', result='SUCCESS', changes='2,1'),
], ordered=True)
def test_pre_run_failure_retry(self):
# Test that we don't set pre_fail when a pre-run playbook fails
# (so we honor the retry logic and restart the job).
file_dict = {'pre-failure.txt': ''}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.log.debug("Wait for the first change to start its job")
for _ in iterate_timeout(30, 'job A started'):
if len(self.builds) == 1:
break
A_build = self.builds[0]
start = os.path.join(self.jobdir_root, A_build.uuid +
'.failure_start.flag')
for _ in iterate_timeout(30, 'job A running'):
if os.path.exists(start):
break
self.log.debug("Add a second change which will test with the first")
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.log.debug("Wait for the second change to start its job")
for _ in iterate_timeout(30, 'job B started'):
if len(self.builds) == 2:
break
B_build = self.builds[1]
start = os.path.join(self.jobdir_root, B_build.uuid +
'.wait_start.flag')
for _ in iterate_timeout(30, 'job B running'):
if os.path.exists(start):
break
self.log.debug("Continue the first job which will fail early")
flag_path = os.path.join(self.jobdir_root, A_build.uuid,
'failure_continue_flag')
self.log.debug("Writing %s", flag_path)
with open(flag_path, "w") as of:
of.write("continue")
# From here out, allow any pre-failure job to
# continue until it has run three times
self.log.debug("Wait for all jobs to finish")
for _ in iterate_timeout(30, 'all jobs finished'):
if len(self.builds) == 1 and len(self.history) == 4:
break
for b in self.builds[:]:
if b.name == 'pre-failure':
try:
flag_path = os.path.join(self.jobdir_root, b.uuid,
'failure_continue_flag')
with open(flag_path, "w") as of:
of.write("continue")
except Exception:
self.log.debug("Unable to write flag path %s",
flag_path)
self.log.debug("Done")
self.log.debug("Wait for the second job to be aborted "
"and restarted without the first change")
for _ in iterate_timeout(30, 'job B restarted'):
if len(self.builds) == 1 and self.builds[0].name == 'wait':
B_build2 = self.builds[0]
if B_build2 != B_build:
break
self.log.debug("Wait for the second change to start its job")
start = os.path.join(self.jobdir_root, B_build2.uuid +
'.wait_start.flag')
for _ in iterate_timeout(30, 'job B running'):
if os.path.exists(start):
break
self.log.debug("Allow the restarted second job to finish")
flag_path = os.path.join(self.jobdir_root, B_build2.uuid,
'wait_continue_flag')
self.log.debug("Writing %s", flag_path)
with open(flag_path, "w") as of:
of.write("continue")
self.waitUntilSettled()
self.assertHistory([
dict(name='pre-failure', result=None, changes='1,1'),
dict(name='pre-failure', result=None, changes='1,1'),
dict(name='pre-failure', result=None, changes='1,1'),
dict(name='wait', result='ABORTED', changes='1,1 2,1'),
dict(name='wait', result='SUCCESS', changes='2,1'),
], ordered=True)
def test_early_failure_fail_fast(self):
file_dict = {'early-failure.txt': ''}
A = self.fake_gerrit.addFakeChange('org/project3', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.log.debug("Wait for the first change to start its job")
for _ in iterate_timeout(30, 'job A started'):
if len(self.builds) == 1:
break
A_build = self.builds[0]
start = os.path.join(self.jobdir_root, A_build.uuid +
'.failure_start.flag')
for _ in iterate_timeout(30, 'job A running'):
if os.path.exists(start):
break
self.log.debug("Add a second change which will test with the first")
B = self.fake_gerrit.addFakeChange('org/project4', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.log.debug("Wait for the second change to start its job")
for _ in iterate_timeout(30, 'job B started'):
if len(self.builds) == 3:
break
B_build = self.builds[2]
start = os.path.join(self.jobdir_root, B_build.uuid +
'.wait_start.flag')
for _ in iterate_timeout(30, 'job B running'):
if os.path.exists(start):
break
self.log.debug("Continue the first job which will fail early")
flag_path = os.path.join(self.jobdir_root, A_build.uuid,
'failure_continue_flag')
self.log.debug("Writing %s", flag_path)
with open(flag_path, "w") as of:
of.write("continue")
self.log.debug("Wait for the second job to be aborted "
"and restarted without the first change")
for _ in iterate_timeout(30, 'job B restarted'):
if len(self.builds) == 3:
B_build2 = self.builds[2]
if B_build2 != B_build:
break
self.log.debug("Wait for the first job to be in its post-run playbook")
start = os.path.join(self.jobdir_root, A_build.uuid +
'.wait_start.flag')
for _ in iterate_timeout(30, 'job A post running'):
if os.path.exists(start):
break
self.log.debug("Allow the first job to finish")
flag_path = os.path.join(self.jobdir_root, A_build.uuid,
'wait_continue_flag')
self.log.debug("Writing %s", flag_path)
with open(flag_path, "w") as of:
of.write("continue")
self.log.debug("Wait for the first job to finish")
for _ in iterate_timeout(30, 'job A complete'):
if A_build not in self.builds:
break
self.log.debug("Wait for the second change to start its job")
start = os.path.join(self.jobdir_root, B_build2.uuid +
'.wait_start.flag')
for _ in iterate_timeout(30, 'job B running'):
if os.path.exists(start):
break
self.log.debug("Allow the restarted second job to finish")
flag_path = os.path.join(self.jobdir_root, B_build2.uuid,
'wait_continue_flag')
self.log.debug("Writing %s", flag_path)
with open(flag_path, "w") as of:
of.write("continue")
self.waitUntilSettled()
self.assertHistory([
dict(name='wait', result='ABORTED', changes='1,1 2,1'),
dict(name='early-failure', result='FAILURE', changes='1,1'),
dict(name='wait', result='ABORTED', changes='1,1'),
dict(name='wait', result='SUCCESS', changes='2,1'),
], ordered=True)
class TestMaxNodesPerJob(AnsibleZuulTestCase):
tenant_config_file = 'config/multi-tenant/main.yaml'
def test_max_timeout_exceeded(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: test-job
nodeset:
nodes:
- name: node01
label: fake
- name: node02
label: fake
- name: node03
label: fake
- name: node04
label: fake
- name: node05
label: fake
- name: node06
label: fake
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn('The job "test-job" exceeds tenant max-nodes-per-job 5.',
A.messages[0], "A should fail because of nodes limit")
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertNotIn("exceeds tenant max-nodes", B.messages[0],
"B should not fail because of nodes limit")
class TestMaxTimeout(ZuulTestCase):
tenant_config_file = 'config/multi-tenant/main.yaml'
def test_max_nodes_reached(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: test-job
timeout: 3600
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn('The job "test-job" exceeds tenant max-job-timeout',
A.messages[0], "A should fail because of timeout limit")
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertNotIn("exceeds tenant max-job-timeout", B.messages[0],
"B should not fail because of timeout limit")
class TestAllowedConnection(AnsibleZuulTestCase):
config_file = 'zuul-connections-gerrit-and-github.conf'
tenant_config_file = 'config/multi-tenant/main.yaml'
def test_allowed_triggers(self):
in_repo_conf = textwrap.dedent(
"""
- pipeline:
name: test
manager: independent
trigger:
github:
- event: pull_request
""")
file_dict = {'zuul.d/test.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange(
'tenant-two-config', 'master', 'A', files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn(
'Unknown connection named "github"', A.messages[0],
"A should fail because of allowed-trigger")
B = self.fake_gerrit.addFakeChange(
'tenant-one-config', 'master', 'A', files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertNotIn(
'Unknown connection named "github"', B.messages[0],
"B should not fail because of allowed-trigger")
def test_allowed_reporters(self):
in_repo_conf = textwrap.dedent(
"""
- pipeline:
name: test
manager: independent
success:
outgoing_smtp:
to: [email protected]
""")
file_dict = {'zuul.d/test.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange(
'tenant-one-config', 'master', 'A', files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn(
'Unknown connection named "outgoing_smtp"', A.messages[0],
"A should fail because of allowed-reporters")
B = self.fake_gerrit.addFakeChange(
'tenant-two-config', 'master', 'A', files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertNotIn(
'Unknown connection named "outgoing_smtp"', B.messages[0],
"B should not fail because of allowed-reporters")
class TestAllowedLabels(AnsibleZuulTestCase):
config_file = 'zuul-connections-gerrit-and-github.conf'
tenant_config_file = 'config/multi-tenant/main.yaml'
def test_allowed_labels(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: test
nodeset:
nodes:
- name: controller
label: tenant-two-label
""")
file_dict = {'zuul.d/test.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange(
'tenant-one-config', 'master', 'A', files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn(
'Label named "tenant-two-label" is not part of the allowed',
A.messages[0],
"A should fail because of allowed-labels")
def test_disallowed_labels(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: test
nodeset:
nodes:
- name: controller
label: tenant-one-label
""")
file_dict = {'zuul.d/test.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange(
'tenant-two-config', 'master', 'A', files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn(
'Label named "tenant-one-label" is not part of the allowed',
A.messages[0],
"A should fail because of disallowed-labels")
class TestPragma(ZuulTestCase):
tenant_config_file = 'config/pragma/main.yaml'
# These tests are failing depending on which scheduler completed the
# tenant reconfiguration first. As the assertions are done with the
# objects on scheduler-0, they will fail if scheduler-1 completed
# the reconfiguration first.
scheduler_count = 1
def test_no_pragma(self):
self.create_branch('org/project', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable'))
self.waitUntilSettled()
with open(os.path.join(FIXTURE_DIR,
'config/pragma/git/',
'org_project/nopragma.yaml')) as f:
config = f.read()
file_dict = {'.zuul.yaml': config}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
# This is an untrusted repo with 2 branches, so it should have
# an implied branch matcher for the job.
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
jobs = tenant.layout.getJobs('test-job')
self.assertEqual(len(jobs), 1)
for job in tenant.layout.getJobs('test-job'):
self.assertIsNotNone(job.branch_matcher)
def test_pragma(self):
self.create_branch('org/project', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable'))
self.waitUntilSettled()
with open(os.path.join(FIXTURE_DIR,
'config/pragma/git/',
'org_project/pragma.yaml')) as f:
config = f.read()
file_dict = {'.zuul.yaml': config}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
# This is an untrusted repo with 2 branches, so it would
# normally have an implied branch matcher, but our pragma
# overrides it.
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
jobs = tenant.layout.getJobs('test-job')
self.assertEqual(len(jobs), 1)
for job in tenant.layout.getJobs('test-job'):
self.assertIsNone(job.branch_matcher)
class TestPragmaMultibranch(ZuulTestCase):
tenant_config_file = 'config/pragma-multibranch/main.yaml'
def test_no_branch_matchers(self):
self.create_branch('org/project1', 'stable/pike')
self.create_branch('org/project2', 'stable/jewel')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'stable/pike'))
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project2', 'stable/jewel'))
self.waitUntilSettled()
# We want the jobs defined on the stable/pike branch of
# project1 to apply to the stable/jewel branch of project2.
# First, without the pragma line, the jobs should not run
# because in project1 they have branch matchers for pike, so
# they will not match a jewel change.
B = self.fake_gerrit.addFakeChange('org/project2', 'stable/jewel', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([])
# Add a pragma line to disable implied branch matchers in
# project1, so that the jobs and templates apply to both
# branches.
with open(os.path.join(FIXTURE_DIR,
'config/pragma-multibranch/git/',
'org_project1/zuul.yaml')) as f:
config = f.read()
extra_conf = textwrap.dedent(
"""
- pragma:
implied-branch-matchers: False
""")
config = extra_conf + config
file_dict = {'zuul.yaml': config}
A = self.fake_gerrit.addFakeChange('org/project1', 'stable/pike', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
# Now verify that when we propose a change to jewel, we get
# the pike/jewel jobs.
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='test-job1', result='SUCCESS', changes='1,1'),
dict(name='test-job2', result='SUCCESS', changes='1,1'),
], ordered=False)
def test_supplied_branch_matchers(self):
self.create_branch('org/project1', 'stable/pike')
self.create_branch('org/project2', 'stable/jewel')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'stable/pike'))
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project2', 'stable/jewel'))
self.waitUntilSettled()
# We want the jobs defined on the stable/pike branch of
# project1 to apply to the stable/jewel branch of project2.
# First, without the pragma line, the jobs should not run
# because in project1 they have branch matchers for pike, so
# they will not match a jewel change.
B = self.fake_gerrit.addFakeChange('org/project2', 'stable/jewel', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([])
# Add a pragma line to disable implied branch matchers in
# project1, so that the jobs and templates apply to both
# branches.
with open(os.path.join(FIXTURE_DIR,
'config/pragma-multibranch/git/',
'org_project1/zuul.yaml')) as f:
config = f.read()
extra_conf = textwrap.dedent(
"""
- pragma:
implied-branches:
- stable/pike
- stable/jewel
""")
config = extra_conf + config
file_dict = {'zuul.yaml': config}
A = self.fake_gerrit.addFakeChange('org/project1', 'stable/pike', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
# Now verify that when we propose a change to jewel, we get
# the pike/jewel jobs.
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='test-job1', result='SUCCESS', changes='1,1'),
dict(name='test-job2', result='SUCCESS', changes='1,1'),
], ordered=False)
class TestTenantImpliedBranchMatchers(ZuulTestCase):
tenant_config_file = 'config/tenant-implied-branch-matchers/main.yaml'
def test_tenant_implied_branch_matchers(self):
# Test that we can force implied branch matchers in the tenant
# config even in the case where a project only has one branch.
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
jobs = tenant.layout.getJobs('test-job')
self.assertEqual(len(jobs), 1)
for job in tenant.layout.getJobs('test-job'):
self.assertIsNotNone(job.branch_matcher)
def test_pragma_overrides_tenant_implied_branch_matchers(self):
# Test that we can force implied branch matchers off with a pragma
# even if the tenant config has it set on.
config = textwrap.dedent(
"""
- job:
name: test-job
- pragma:
implied-branch-matchers: False
""")
file_dict = {'zuul.yaml': config}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
self.create_branch('org/project', 'stable/pike')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable/pike'))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
jobs = tenant.layout.getJobs('test-job')
self.assertEqual(len(jobs), 2)
for job in tenant.layout.getJobs('test-job'):
self.assertIsNone(job.branch_matcher)
class TestBaseJobs(ZuulTestCase):
tenant_config_file = 'config/base-jobs/main.yaml'
def test_multiple_base_jobs(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='my-job', result='SUCCESS', changes='1,1'),
dict(name='other-job', result='SUCCESS', changes='1,1'),
], ordered=False)
self.assertEqual(self.getJobFromHistory('my-job').
parameters['zuul']['jobtags'],
['mybase'])
self.assertEqual(self.getJobFromHistory('other-job').
parameters['zuul']['jobtags'],
['otherbase'])
def test_untrusted_base_job(self):
"""Test that a base job may not be defined in an untrusted repo"""
in_repo_conf = textwrap.dedent(
"""
- job:
name: fail-base
parent: null
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1,
"A should report failure")
self.assertEqual(A.patchsets[0]['approvals'][0]['value'], "-1")
self.assertIn('Base jobs must be defined in config projects',
A.messages[0])
self.assertHistory([])
class TestSecrets(ZuulTestCase):
tenant_config_file = 'config/secrets/main.yaml'
secret = {'password': 'test-password',
'username': 'test-username'}
def _getSecrets(self, job, pbtype):
secrets = []
build = self.getJobFromHistory(job)
for pb in getattr(build.jobdir, pbtype):
if pb.secrets_content:
secrets.append(
yamlutil.ansible_unsafe_load(pb.secrets_content))
else:
secrets.append({})
return secrets
def test_secret_branch(self):
# Test that we can use a secret defined in another branch of
# the same project.
self.create_branch('org/project2', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project2', 'stable'))
self.waitUntilSettled()
with open(os.path.join(FIXTURE_DIR,
'config/secrets/git/',
'org_project2/zuul-secret.yaml')) as f:
config = f.read()
file_dict = {'zuul.yaml': config}
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
in_repo_conf = textwrap.dedent(
"""
- job:
parent: base
name: project2-secret
run: playbooks/secret.yaml
secrets: [project2_secret]
- project:
check:
jobs:
- project2-secret
gate:
jobs:
- noop
""")
file_dict = {'zuul.yaml': in_repo_conf}
B = self.fake_gerrit.addFakeChange('org/project2', 'stable', 'B',
files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(B.reported, 1, "B should report success")
self.assertHistory([
dict(name='project2-secret', result='SUCCESS', changes='2,1'),
])
self.assertEqual(
self._getSecrets('project2-secret', 'playbooks'),
[{'project2_secret': self.secret}])
def test_secret_branch_duplicate(self):
# Test that we can create a duplicate secret on a different
# branch of the same project -- i.e., that when we branch
# master to stable on a project with a secret, nothing
# changes.
self.create_branch('org/project1', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'stable'))
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange('org/project1', 'stable', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1,
"A should report success")
self.assertHistory([
dict(name='project1-secret', result='SUCCESS', changes='1,1'),
])
self.assertEqual(
[{'secret_name': self.secret}],
self._getSecrets('project1-secret', 'playbooks'))
def test_secret_branch_error_same_branch(self):
# Test that we are unable to define a secret twice on the same
# project-branch.
in_repo_conf = textwrap.dedent(
"""
- secret:
name: project1_secret
data: {}
- secret:
name: project1_secret
data: {}
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn('already defined', A.messages[0])
def test_secret_branch_error_same_project(self):
# Test that we are unable to create a secret which differs
# from another with the same name -- i.e., that if we have a
# duplicate secret on multiple branches of the same project,
# they must be identical.
self.create_branch('org/project1', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'stable'))
self.waitUntilSettled()
in_repo_conf = textwrap.dedent(
"""
- secret:
name: project1_secret
data: {}
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project1', 'stable', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn('does not match existing definition in branch master',
A.messages[0])
def test_secret_branch_error_other_project(self):
# Test that we are unable to create a secret with the same
# name as another. We're never allowed to have a secret with
# the same name outside of a project.
in_repo_conf = textwrap.dedent(
"""
- secret:
name: project1_secret
data: {}
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn('already defined in project org/project1',
A.messages[0])
def test_complex_secret(self):
# Test that we can use a complex secret
with open(os.path.join(FIXTURE_DIR,
'config/secrets/git/',
'org_project2/zuul-complex.yaml')) as f:
config = f.read()
file_dict = {'zuul.yaml': config}
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1, "A should report success")
self.assertHistory([
dict(name='project2-complex', result='SUCCESS', changes='1,1'),
])
secret = {'complex_secret':
{'dict': {'password': 'test-password',
'username': 'test-username'},
'list': ['one', 'test-password', 'three'],
'profile': 'cloudy'}}
self.assertEqual(
self._getSecrets('project2-complex', 'playbooks'),
[secret])
def test_blobstore_secret(self):
# Test the large secret blob store
self.executor_server.hold_jobs_in_build = True
self.useFixture(fixtures.MonkeyPatch(
'zuul.model.Job.SECRET_BLOB_SIZE',
1))
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
with self.scheds.first.sched.createZKContext(None, self.log)\
as context:
bs = BlobStore(context)
self.assertEqual(len(bs), 1)
self.scheds.first.sched._runBlobStoreCleanup()
self.assertEqual(len(bs), 1)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.reported, 1, "A should report success")
self.assertHistory([
dict(name='project1-secret', result='SUCCESS', changes='1,1'),
])
self.assertEqual(
[{'secret_name': self.secret}],
self._getSecrets('project1-secret', 'playbooks'))
self.scheds.first.sched._runBlobStoreCleanup()
self.assertEqual(len(bs), 0)
class TestSecretInheritance(ZuulTestCase):
tenant_config_file = 'config/secret-inheritance/main.yaml'
def _getSecrets(self, job, pbtype):
secrets = []
build = self.getJobFromHistory(job)
for pb in getattr(build.jobdir, pbtype):
if pb.secrets_content:
secrets.append(
yamlutil.ansible_unsafe_load(pb.secrets_content))
else:
secrets.append({})
return secrets
def _checkTrustedSecrets(self):
secret = {'longpassword': 'test-passwordtest-password',
'password': 'test-password',
'username': 'test-username'}
base_secret = {'username': 'base-username'}
self.assertEqual(
self._getSecrets('trusted-secrets', 'playbooks'),
[{'trusted_secret': secret}])
self.assertEqual(
self._getSecrets('trusted-secrets', 'pre_playbooks'),
[{'base_secret': base_secret}])
self.assertEqual(
self._getSecrets('trusted-secrets', 'post_playbooks'), [])
self.assertEqual(
self._getSecrets('trusted-secrets-trusted-child',
'playbooks'), [{}])
self.assertEqual(
self._getSecrets('trusted-secrets-trusted-child',
'pre_playbooks'),
[{'base_secret': base_secret}])
self.assertEqual(
self._getSecrets('trusted-secrets-trusted-child',
'post_playbooks'), [])
self.assertEqual(
self._getSecrets('trusted-secrets-untrusted-child',
'playbooks'), [{}])
self.assertEqual(
self._getSecrets('trusted-secrets-untrusted-child',
'pre_playbooks'),
[{'base_secret': base_secret}])
self.assertEqual(
self._getSecrets('trusted-secrets-untrusted-child',
'post_playbooks'), [])
def _checkUntrustedSecrets(self):
secret = {'longpassword': 'test-passwordtest-password',
'password': 'test-password',
'username': 'test-username'}
base_secret = {'username': 'base-username'}
self.assertEqual(
self._getSecrets('untrusted-secrets', 'playbooks'),
[{'untrusted-secret': secret}])
self.assertEqual(
self._getSecrets('untrusted-secrets', 'pre_playbooks'),
[{'base-secret': base_secret}])
self.assertEqual(
self._getSecrets('untrusted-secrets', 'post_playbooks'), [])
self.assertEqual(
self._getSecrets('untrusted-secrets-trusted-child',
'playbooks'), [{}])
self.assertEqual(
self._getSecrets('untrusted-secrets-trusted-child',
'pre_playbooks'),
[{'base-secret': base_secret}])
self.assertEqual(
self._getSecrets('untrusted-secrets-trusted-child',
'post_playbooks'), [])
self.assertEqual(
self._getSecrets('untrusted-secrets-untrusted-child',
'playbooks'), [{}])
self.assertEqual(
self._getSecrets('untrusted-secrets-untrusted-child',
'pre_playbooks'),
[{'base-secret': base_secret}])
self.assertEqual(
self._getSecrets('untrusted-secrets-untrusted-child',
'post_playbooks'), [])
def test_trusted_secret_inheritance_check(self):
A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='trusted-secrets', result='SUCCESS', changes='1,1'),
dict(name='trusted-secrets-trusted-child',
result='SUCCESS', changes='1,1'),
dict(name='trusted-secrets-untrusted-child',
result='SUCCESS', changes='1,1'),
], ordered=False)
self._checkTrustedSecrets()
def test_untrusted_secret_inheritance_check(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# This configuration tries to run untrusted secrets in an
# non-post-review pipeline and should therefore run no jobs.
self.assertHistory([])
class TestSecretPassToParent(ZuulTestCase):
tenant_config_file = 'config/pass-to-parent/main.yaml'
def _getSecrets(self, job, pbtype):
secrets = []
build = self.getJobFromHistory(job)
for pb in getattr(build.jobdir, pbtype):
if pb.secrets_content:
secrets.append(
yamlutil.ansible_unsafe_load(pb.secrets_content))
else:
secrets.append({})
return secrets
def test_secret_no_pass_to_parent(self):
# Test that secrets are not available in the parent if
# pass-to-parent is not set.
file_dict = {'no-pass.txt': ''}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertHistory([
dict(name='no-pass', result='SUCCESS', changes='1,1'),
])
self.assertEqual(
self._getSecrets('no-pass', 'playbooks'),
[{'parent_secret': {'password': 'password3'}}])
self.assertEqual(
self._getSecrets('no-pass', 'pre_playbooks'),
[{'parent_secret': {'password': 'password3'}}])
self.assertEqual(
self._getSecrets('no-pass', 'post_playbooks'),
[{'parent_secret': {'password': 'password3'}}])
def test_secret_pass_to_parent(self):
# Test that secrets are available in the parent if
# pass-to-parent is set.
file_dict = {'pass.txt': ''}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertHistory([
dict(name='pass', result='SUCCESS', changes='1,1'),
])
self.assertEqual(
self._getSecrets('pass', 'playbooks'),
[{'parent_secret': {'password': 'password3'},
'secret1': {'password': 'password1'},
'secret2': {'password': 'password2'}}])
self.assertEqual(
self._getSecrets('pass', 'pre_playbooks'),
[{'parent_secret': {'password': 'password3'},
'secret1': {'password': 'password1'},
'secret2': {'password': 'password2'}}])
self.assertEqual(
self._getSecrets('pass', 'post_playbooks'),
[{'parent_secret': {'password': 'password3'},
'secret1': {'password': 'password1'},
'secret2': {'password': 'password2'}}])
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='pass', result='SUCCESS', changes='1,1'),
])
self.assertIn('does not allow post-review', B.messages[0])
def test_secret_pass_to_parent_missing(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: parent-job-without-secret
pre-run: playbooks/pre.yaml
run: playbooks/run.yaml
post-run: playbooks/post.yaml
- job:
name: test-job
parent: trusted-parent-job-without-secret
secrets:
- name: my_secret
secret: missing-secret
pass-to-parent: true
- project:
check:
jobs:
- test-job
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn('Secret missing-secret not found', A.messages[0])
def test_secret_override(self):
# Test that secrets passed to parents don't override existing
# secrets.
file_dict = {'override.txt': ''}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertHistory([
dict(name='override', result='SUCCESS', changes='1,1'),
])
self.assertEqual(
self._getSecrets('override', 'playbooks'),
[{'parent_secret': {'password': 'password3'},
'secret1': {'password': 'password1'},
'secret2': {'password': 'password2'}}])
self.assertEqual(
self._getSecrets('override', 'pre_playbooks'),
[{'parent_secret': {'password': 'password3'},
'secret1': {'password': 'password1'},
'secret2': {'password': 'password2'}}])
self.assertEqual(
self._getSecrets('override', 'post_playbooks'),
[{'parent_secret': {'password': 'password3'},
'secret1': {'password': 'password1'},
'secret2': {'password': 'password2'}}])
def test_secret_ptp_trusted_untrusted(self):
# Test if we pass a secret to a parent and one of the parents
# is untrusted, the job becomes post-review.
file_dict = {'trusted-under-untrusted.txt': ''}
A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertHistory([
dict(name='trusted-under-untrusted',
result='SUCCESS', changes='1,1'),
])
self.assertEqual(
self._getSecrets('trusted-under-untrusted', 'playbooks'),
[{'secret': {'password': 'trustedpassword1'}}])
self.assertEqual(
self._getSecrets('trusted-under-untrusted', 'pre_playbooks'),
[{'secret': {'password': 'trustedpassword1'}}])
self.assertEqual(
self._getSecrets('trusted-under-untrusted', 'post_playbooks'),
[{'secret': {'password': 'trustedpassword1'}}])
B = self.fake_gerrit.addFakeChange('common-config', 'master', 'B',
files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='trusted-under-untrusted',
result='SUCCESS', changes='1,1'),
])
self.assertIn('does not allow post-review', B.messages[0])
def test_secret_ptp_trusted_trusted(self):
# Test if we pass a secret to a parent and all of the parents
# are trusted, the job does not become post-review.
file_dict = {'trusted-under-trusted.txt': ''}
A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertHistory([
dict(name='trusted-under-trusted',
result='SUCCESS', changes='1,1'),
])
self.assertEqual(
self._getSecrets('trusted-under-trusted', 'playbooks'),
[{'secret': {'password': 'trustedpassword1'}}])
self.assertEqual(
self._getSecrets('trusted-under-trusted', 'pre_playbooks'),
[{'secret': {'password': 'trustedpassword1'}}])
self.assertEqual(
self._getSecrets('trusted-under-trusted', 'post_playbooks'),
[{'secret': {'password': 'trustedpassword1'}}])
B = self.fake_gerrit.addFakeChange('common-config', 'master', 'B',
files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='trusted-under-trusted',
result='SUCCESS', changes='1,1'),
dict(name='trusted-under-trusted',
result='SUCCESS', changes='2,1'),
])
class TestSecretLeaks(AnsibleZuulTestCase):
tenant_config_file = 'config/secret-leaks/main.yaml'
def searchForContent(self, path, content):
matches = []
for (dirpath, dirnames, filenames) in os.walk(path):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
with open(filepath, 'rb') as f:
if content in f.read():
matches.append(filepath[len(path):])
return matches
def _test_secret_file(self):
# Or rather -- test that they *don't* leak.
# Keep the jobdir around so we can inspect contents.
self.executor_server.keep_jobdir = True
conf = textwrap.dedent(
"""
- project:
name: org/project
check:
jobs:
- secret-file
""")
file_dict = {'.zuul.yaml': conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='secret-file', result='SUCCESS', changes='1,1'),
], ordered=False)
matches = self.searchForContent(self.history[0].jobdir.root,
b'test-password')
self.assertEqual(set(['/work/secret-file.txt']),
set(matches))
def test_secret_file(self):
self._test_secret_file()
def test_secret_file_verbose(self):
# Output extra ansible info to exercise alternate logging code
# paths.
self.executor_server.verbose = True
self._test_secret_file()
def _test_secret_file_fail(self):
# Or rather -- test that they *don't* leak.
# Keep the jobdir around so we can inspect contents.
self.executor_server.keep_jobdir = True
conf = textwrap.dedent(
"""
- project:
name: org/project
check:
jobs:
- secret-file-fail
""")
file_dict = {'.zuul.yaml': conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='secret-file-fail', result='FAILURE', changes='1,1'),
], ordered=False)
matches = self.searchForContent(self.history[0].jobdir.root,
b'test-password')
self.assertEqual(set(['/work/failure-file.txt']),
set(matches))
def test_secret_file_fail(self):
self._test_secret_file_fail()
def test_secret_file_fail_verbose(self):
# Output extra ansible info to exercise alternate logging code
# paths.
self.executor_server.verbose = True
self._test_secret_file_fail()
class TestParseErrors(AnsibleZuulTestCase):
tenant_config_file = 'config/parse-errors/main.yaml'
def searchForContent(self, path, content):
matches = []
for (dirpath, dirnames, filenames) in os.walk(path):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
with open(filepath, 'rb') as f:
if content in f.read():
matches.append(filepath[len(path):])
return matches
def _get_file(self, build, path):
p = os.path.join(build.jobdir.root, path)
with open(p) as f:
return f.read()
def test_parse_error_leak(self):
# Test that parse errors don't leak inventory information
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='test-job', result='SUCCESS', changes='1,1'),
])
matches = self.searchForContent(self.history[0].jobdir.root,
b'xyzzy')
self.assertEqual(set([
'/trusted/project_0/review.example.com/common-config/zuul.yaml']),
set(matches))
class TestNodesets(ZuulTestCase):
tenant_config_file = 'config/nodesets/main.yaml'
def test_nodeset_branch(self):
# Test that we can use a nodeset defined in another branch of
# the same project.
self.create_branch('org/project2', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project2', 'stable'))
self.waitUntilSettled()
with open(os.path.join(FIXTURE_DIR,
'config/nodesets/git/',
'org_project2/zuul-nodeset.yaml')) as f:
config = f.read()
file_dict = {'zuul.yaml': config}
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
in_repo_conf = textwrap.dedent(
"""
- job:
parent: base
name: project2-test
nodeset: project2-nodeset
- project:
check:
jobs:
- project2-test
gate:
jobs:
- noop
""")
file_dict = {'zuul.yaml': in_repo_conf}
B = self.fake_gerrit.addFakeChange('org/project2', 'stable', 'B',
files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(B.reported, 1, "B should report success")
self.assertHistory([
dict(name='project2-test', result='SUCCESS', changes='2,1',
node='ubuntu-xenial'),
])
def test_nodeset_branch_duplicate(self):
# Test that we can create a duplicate nodeset on a different
# branch of the same project -- i.e., that when we branch
# master to stable on a project with a nodeset, nothing
# changes.
self.create_branch('org/project1', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'stable'))
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange('org/project1', 'stable', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1,
"A should report success")
self.assertHistory([
dict(name='project1-test', result='SUCCESS', changes='1,1',
node='ubuntu-xenial'),
])
def test_nodeset_branch_error_same_branch(self):
# Test that we are unable to define a nodeset twice on the same
# project-branch.
in_repo_conf = textwrap.dedent(
"""
- nodeset:
name: project1-nodeset
nodes: []
- nodeset:
name: project1-nodeset
nodes: []
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn('already defined', A.messages[0])
def test_nodeset_branch_error_same_project(self):
# Test that we are unable to create a nodeset which differs
# from another with the same name -- i.e., that if we have a
# duplicate nodeset on multiple branches of the same project,
# they must be identical.
self.create_branch('org/project1', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'stable'))
self.waitUntilSettled()
in_repo_conf = textwrap.dedent(
"""
- nodeset:
name: project1-nodeset
nodes: []
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project1', 'stable', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn('does not match existing definition in branch master',
A.messages[0])
def test_nodeset_branch_error_other_project(self):
# Test that we are unable to create a nodeset with the same
# name as another. We're never allowed to have a nodeset with
# the same name outside of a project.
in_repo_conf = textwrap.dedent(
"""
- nodeset:
name: project1-nodeset
nodes: []
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn('already defined in project org/project1',
A.messages[0])
class TestSemaphoreBranches(ZuulTestCase):
tenant_config_file = 'config/semaphore-branches/main.yaml'
def test_semaphore_branch(self):
# Test that we can use a semaphore defined in another branch of
# the same project.
self.create_branch('org/project2', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project2', 'stable'))
self.waitUntilSettled()
with open(os.path.join(FIXTURE_DIR,
'config/semaphore-branches/git/',
'org_project2/zuul-semaphore.yaml')) as f:
config = f.read()
file_dict = {'zuul.yaml': config}
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
in_repo_conf = textwrap.dedent(
"""
- job:
parent: base
name: project2-test
semaphore: project2-semaphore
- project:
check:
jobs:
- project2-test
gate:
jobs:
- noop
""")
file_dict = {'zuul.yaml': in_repo_conf}
B = self.fake_gerrit.addFakeChange('org/project2', 'stable', 'B',
files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(B.reported, 1, "B should report success")
self.assertHistory([
dict(name='project2-test', result='SUCCESS', changes='2,1')
])
def test_semaphore_branch_duplicate(self):
# Test that we can create a duplicate semaphore on a different
# branch of the same project -- i.e., that when we branch
# master to stable on a project with a semaphore, nothing
# changes.
self.create_branch('org/project1', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'stable'))
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange('org/project1', 'stable', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1,
"A should report success")
self.assertHistory([
dict(name='project1-test', result='SUCCESS', changes='1,1')
])
def test_semaphore_branch_error_same_branch(self):
# Test that we are unable to define a semaphore twice on the same
# project-branch.
in_repo_conf = textwrap.dedent(
"""
- semaphore:
name: project1-semaphore
max: 2
- semaphore:
name: project1-semaphore
max: 2
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn('already defined', A.messages[0])
def test_semaphore_branch_error_same_project(self):
# Test that we are unable to create a semaphore which differs
# from another with the same name -- i.e., that if we have a
# duplicate semaphore on multiple branches of the same project,
# they must be identical.
self.create_branch('org/project1', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'stable'))
self.waitUntilSettled()
in_repo_conf = textwrap.dedent(
"""
- semaphore:
name: project1-semaphore
max: 4
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project1', 'stable', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn('does not match existing definition in branch master',
A.messages[0])
def test_semaphore_branch_error_other_project(self):
# Test that we are unable to create a semaphore with the same
# name as another. We're never allowed to have a semaphore with
# the same name outside of a project.
in_repo_conf = textwrap.dedent(
"""
- semaphore:
name: project1-semaphore
max: 2
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn('already defined in project org/project1',
A.messages[0])
class TestJobOutput(AnsibleZuulTestCase):
tenant_config_file = 'config/job-output/main.yaml'
def _get_file(self, build, path):
p = os.path.join(build.jobdir.root, path)
with open(p) as f:
return f.read()
def test_job_output_split_streams(self):
# Verify that command standard output appears in the job output,
# and that failures in the final playbook get logged.
# This currently only verifies we receive output from
# localhost. Notably, it does not verify we receive output
# via zuul_console streaming.
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/project4', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='job-output-split-streams',
result='SUCCESS', changes='1,1'),
], ordered=False)
token_stdout = "Standard output test {}".format(
self.history[0].jobdir.src_root)
token_stderr = "Standard error test {}".format(
self.history[0].jobdir.src_root)
j = json.loads(self._get_file(self.history[0],
'work/logs/job-output.json'))
result = j[0]['plays'][0]['tasks'][0]['hosts']['test_node']
self.assertEqual(token_stdout, result['stdout'])
self.assertEqual(token_stderr, result['stderr'])
job_output = self._get_file(self.history[0],
'work/logs/job-output.txt')
self.log.info(job_output)
self.assertIn(token_stdout, job_output)
self.assertIn(token_stderr, job_output)
def test_job_output(self):
# Verify that command standard output appears in the job output,
# and that failures in the final playbook get logged.
# This currently only verifies we receive output from
# localhost. Notably, it does not verify we receive output
# via zuul_console streaming.
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='job-output', result='SUCCESS', changes='1,1'),
], ordered=False)
token_stdout = "Standard output test {}".format(
self.history[0].jobdir.src_root)
token_stderr = "Standard error test {}".format(
self.history[0].jobdir.src_root)
j = json.loads(self._get_file(self.history[0],
'work/logs/job-output.json'))
result = j[0]['plays'][0]['tasks'][0]['hosts']['test_node']
self.assertEqual("\n".join((token_stdout, token_stderr)),
result['stdout'])
self.assertEqual("", result['stderr'])
self.assertTrue(j[0]['plays'][0]['tasks'][1]
['hosts']['test_node']['skipped'])
self.assertTrue(j[0]['plays'][0]['tasks'][2]
['hosts']['test_node']['failed'])
self.assertEqual(
"This is a handler",
j[0]['plays'][0]['tasks'][3]
['hosts']['test_node']['stdout'])
job_output = self._get_file(self.history[0],
'work/logs/job-output.txt')
self.log.info(job_output)
self.assertIn(token_stdout, job_output)
self.assertIn(token_stderr, job_output)
def test_job_output_missing_role(self):
# Verify that ansible errors such as missing roles are part of the
# buildlog.
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/project3', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='job-output-missing-role', result='FAILURE',
changes='1,1'),
dict(name='job-output-missing-role-include', result='FAILURE',
changes='1,1'),
], ordered=False)
for history in self.history:
job_output = self._get_file(history,
'work/logs/job-output.txt')
self.assertIn('the role \'not_existing\' was not found',
job_output)
def test_job_output_failure_log_split_streams(self):
logger = logging.getLogger('zuul.AnsibleJob')
output = io.StringIO()
logger.addHandler(logging.StreamHandler(output))
# Verify that a failure in the last post playbook emits the contents
# of the json output to the log
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/project5', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='job-output-failure-split-streams',
result='POST_FAILURE', changes='1,1'),
], ordered=False)
token_stdout = "Standard output test {}".format(
self.history[0].jobdir.src_root)
token_stderr = "Standard error test {}".format(
self.history[0].jobdir.src_root)
json_output = self._get_file(self.history[0],
'work/logs/job-output.json')
self.log.info(json_output)
j = json.loads(json_output)
result = j[0]['plays'][0]['tasks'][0]['hosts']['test_node']
self.assertEqual(token_stdout, result['stdout'])
self.assertEqual(token_stderr, result['stderr'])
job_output = self._get_file(self.history[0],
'work/logs/job-output.txt')
self.log.info(job_output)
self.assertIn(token_stdout, job_output)
self.assertIn(token_stderr, job_output)
log_output = output.getvalue()
self.assertIn('Final playbook failed', log_output)
self.assertIn('Failure stdout test', log_output)
self.assertIn('Failure stderr test', log_output)
def test_job_output_failure_log(self):
logger = logging.getLogger('zuul.AnsibleJob')
output = io.StringIO()
logger.addHandler(logging.StreamHandler(output))
# Verify that a failure in the last post playbook emits the contents
# of the json output to the log
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='job-output-failure',
result='POST_FAILURE', changes='1,1'),
], ordered=False)
token_stdout = "Standard output test {}".format(
self.history[0].jobdir.src_root)
token_stderr = "Standard error test {}".format(
self.history[0].jobdir.src_root)
json_output = self._get_file(self.history[0],
'work/logs/job-output.json')
self.log.info(json_output)
j = json.loads(json_output)
result = j[0]['plays'][0]['tasks'][0]['hosts']['test_node']
self.assertEqual("\n".join((token_stdout, token_stderr)),
result['stdout'])
self.assertEqual("", result['stderr'])
job_output = self._get_file(self.history[0],
'work/logs/job-output.txt')
self.log.info(job_output)
self.assertIn(token_stdout, job_output)
self.assertIn(token_stderr, job_output)
log_output = output.getvalue()
self.assertIn('Final playbook failed', log_output)
self.assertIn('Failure stdout test', log_output)
self.assertIn('Failure stderr test', log_output)
def test_job_POST_FAILURE_reports_statsd(self):
"""Test that POST_FAILURES output job stats."""
self.statsd.clear()
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='job-output-failure',
result='POST_FAILURE', changes='1,1'),
], ordered=False)
post_failure_stat = 'zuul.tenant.tenant-one.pipeline.check.project.' \
'review_example_com.org_project2.master.job.' \
'job-output-failure.POST_FAILURE'
self.assertReportedStat(post_failure_stat, value='1', kind='c')
self.assertReportedStat(post_failure_stat, kind='ms')
class TestNoLog(AnsibleZuulTestCase):
tenant_config_file = 'config/ansible-no-log/main.yaml'
def _get_file(self, build, path):
p = os.path.join(build.jobdir.root, path)
with open(p) as f:
return f.read()
def test_no_log_unreachable(self):
# Output extra ansible info so we might see errors.
self.executor_server.verbose = True
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
json_log = self._get_file(self.history[0], 'work/logs/job-output.json')
text_log = self._get_file(self.history[0], 'work/logs/job-output.txt')
self.assertNotIn('my-very-secret-password-1', json_log)
self.assertNotIn('my-very-secret-password-2', json_log)
self.assertNotIn('my-very-secret-password-1', text_log)
self.assertNotIn('my-very-secret-password-2', text_log)
class TestJsonStringResults(AnsibleZuulTestCase):
tenant_config_file = 'config/ansible-json-string-results/main.yaml'
def _get_file(self, build, path):
p = os.path.join(build.jobdir.root, path)
with open(p) as f:
return f.read()
def test_ansible_json_string_results(self):
"""Test modules that return string results are captured
The yum/dnf modules are seemily almost unique in setting
"results" in their module return value to a list of strings
(other things might too, but not many other built-in
components). Confusingly, when using loops in ansible the
output also has a "results" which is a list of dicts with
return values from each iteration.
The zuul_json callback handler needs to deal with both; We've
broken this before making changes to its results parsing.
This test fakes some string return values like the yum modules
do, and ensures they are captured.
"""
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
json_log = self._get_file(self.history[0], 'work/logs/job-output.json')
text_log = self._get_file(self.history[0], 'work/logs/job-output.txt')
self.assertIn('if you see this string, it is working', json_log)
# Note the text log doesn't include the detail of the returned
# results, just the msg field, hence to following "not in"
self.assertNotIn('if you see this string, it is working', text_log)
self.assertIn('A plugin message', text_log)
# no_log checking
self.assertNotIn('this is a secret string', json_log)
self.assertNotIn('this is a secret string', text_log)
class TestUnreachable(AnsibleZuulTestCase):
tenant_config_file = 'config/ansible-unreachable/main.yaml'
def _get_file(self, build, path):
p = os.path.join(build.jobdir.root, path)
with open(p) as f:
return f.read()
def test_unreachable(self):
self.wait_timeout = 120
# Output extra ansible info so we might see errors.
self.executor_server.verbose = True
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# The result must be retry limit because jobs with unreachable nodes
# will be retried.
self.assertIn('RETRY_LIMIT', A.messages[0])
self.assertHistory([
dict(name='pre-unreachable', result=None, changes='1,1'),
dict(name='pre-unreachable', result=None, changes='1,1'),
dict(name='run-unreachable', result=None, changes='1,1'),
dict(name='run-unreachable', result=None, changes='1,1'),
dict(name='post-unreachable', result=None, changes='1,1'),
dict(name='post-unreachable', result=None, changes='1,1'),
], ordered=False)
unreachable_log = self._get_file(self.history[0],
'.ansible/nodes.unreachable')
self.assertEqual('fake\n', unreachable_log)
retried_builds = set()
for build in self.history:
will_retry_flag = os.path.join(
self.jobdir_root, f'{build.uuid}.will-retry.flag')
self.assertTrue(os.path.exists(will_retry_flag))
with open(will_retry_flag) as f:
will_retry = f.readline()
expect_retry = build.name not in retried_builds
self.assertEqual(str(expect_retry), will_retry)
retried_builds.add(build.name)
conn = self.scheds.first.sched.sql.connection
for build in conn.getBuilds():
self.assertEqual(build.error_detail, 'Host unreachable')
class TestJobPause(AnsibleZuulTestCase):
tenant_config_file = 'config/job-pause/main.yaml'
def _get_file(self, build, path):
p = os.path.join(build.jobdir.root, path)
with open(p) as f:
return f.read()
def test_job_pause(self):
"""
compile1
+--> compile2
| +--> test-after-compile2
+--> test1-after-compile1
+--> test2-after-compile1
test-good
test-fail
"""
self.wait_timeout = 120
# Output extra ansible info so we might see errors.
self.executor_server.verbose = True
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# The "pause" job might be paused during the waitUntilSettled
# call and appear settled; it should automatically resume
# though, so just wait for it.
for _ in iterate_timeout(60, 'paused job'):
if not self.builds:
break
self.waitUntilSettled()
self.assertHistory([
dict(name='test-fail', result='FAILURE', changes='1,1'),
dict(name='test-good', result='SUCCESS', changes='1,1'),
dict(name='test1-after-compile1', result='SUCCESS', changes='1,1'),
dict(name='test2-after-compile1', result='SUCCESS', changes='1,1'),
dict(name='test-after-compile2', result='SUCCESS', changes='1,1'),
dict(name='compile2', result='SUCCESS', changes='1,1'),
dict(name='compile1', result='SUCCESS', changes='1,1'),
], ordered=False)
# The order of some of these tests is not deterministic so check that
# the last two are compile2, compile1 in this order.
history_compile1 = self.history[-1]
history_compile2 = self.history[-2]
self.assertEqual('compile1', history_compile1.name)
self.assertEqual('compile2', history_compile2.name)
def test_job_pause_retry(self):
"""
Tests that a paused job that gets lost due to an executor restart is
retried together with all child jobs.
This test will wait until compile1 is paused and then fails it. The
expectation is that all child jobs are retried even if they already
were successful.
compile1 --+
+--> test1-after-compile1
+--> test2-after-compile1
+--> compile2 --+
+--> test-after-compile2
test-good
test-fail
"""
self.wait_timeout = 120
self.executor_server.hold_jobs_in_build = True
# Output extra ansible info so we might see errors.
self.executor_server.verbose = True
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled("patchset uploaded")
self.executor_server.release('test-.*')
self.executor_server.release('compile1')
self.waitUntilSettled("released compile1")
# test-fail and test-good must be finished by now
self.assertHistory([
dict(name='test-fail', result='FAILURE', changes='1,1'),
dict(name='test-good', result='SUCCESS', changes='1,1'),
], ordered=False)
# Further compile1 must be in paused state and its three children in
# the queue. waitUltilSettled can return either directly after the job
# pause or after the child jobs are enqueued. So to make this
# deterministic we wait for the child jobs here
for _ in iterate_timeout(60, 'waiting for child jobs'):
if len(self.builds) == 4:
break
self.waitUntilSettled("child jobs are running")
compile1 = self.builds[0]
self.assertTrue(compile1.paused)
# Now resume resume the compile2 sub tree so we can later check if all
# children restarted
self.executor_server.release('compile2')
for _ in iterate_timeout(60, 'waiting for child jobs'):
if len(self.builds) == 5:
break
self.waitUntilSettled("release compile2")
self.executor_server.release('test-after-compile2')
self.waitUntilSettled("release test-after-compile2")
self.executor_server.release('compile2')
self.waitUntilSettled("release compile2 again")
self.assertHistory([
dict(name='test-fail', result='FAILURE', changes='1,1'),
dict(name='test-good', result='SUCCESS', changes='1,1'),
dict(name='compile2', result='SUCCESS', changes='1,1'),
dict(name='test-after-compile2', result='SUCCESS', changes='1,1'),
], ordered=False)
# Stop the job worker of compile1 to simulate an executor restart
for job_worker in self.executor_server.job_workers.values():
if job_worker.build_request.uuid == compile1.unique:
job_worker.stop()
self.waitUntilSettled("Stop job")
# Only compile1 must be waiting
for _ in iterate_timeout(60, 'waiting for compile1 job'):
if len(self.builds) == 1:
break
self.waitUntilSettled("only compile1 is running")
self.assertBuilds([dict(name='compile1', changes='1,1')])
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled("global release")
# The "pause" job might be paused during the waitUntilSettled
# call and appear settled; it should automatically resume
# though, so just wait for it.
for x in iterate_timeout(60, 'paused job'):
if not self.builds:
break
self.waitUntilSettled()
self.assertHistory([
dict(name='test-fail', result='FAILURE', changes='1,1'),
dict(name='test-good', result='SUCCESS', changes='1,1'),
dict(name='compile2', result='SUCCESS', changes='1,1'),
dict(name='compile2', result='SUCCESS', changes='1,1'),
dict(name='test-after-compile2', result='SUCCESS', changes='1,1'),
dict(name='test-after-compile2', result='SUCCESS', changes='1,1'),
dict(name='compile1', result='ABORTED', changes='1,1'),
dict(name='compile1', result='SUCCESS', changes='1,1'),
dict(name='test1-after-compile1', result='ABORTED', changes='1,1'),
dict(name='test2-after-compile1', result='ABORTED', changes='1,1'),
dict(name='test1-after-compile1', result='SUCCESS', changes='1,1'),
dict(name='test2-after-compile1', result='SUCCESS', changes='1,1'),
], ordered=False)
def test_job_pause_fail(self):
"""
Test that only succeeding jobs are allowed to pause.
compile-fail
+--> after-compile
"""
A = self.fake_gerrit.addFakeChange('org/project4', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='compile-fail', result='FAILURE', changes='1,1'),
])
def test_job_node_failure_resume(self):
self.wait_timeout = 120
# Output extra ansible info so we might see errors.
self.executor_server.verbose = True
# Second node request should fail
fail = {'_oid': '199-0000000001'}
self.fake_nodepool.addFailRequest(fail)
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# The "pause" job might be paused during the waitUntilSettled
# call and appear settled; it should automatically resume
# though, so just wait for it.
for x in iterate_timeout(60, 'paused job'):
if not self.builds:
break
self.waitUntilSettled()
self.assertEqual([], self.builds)
self.assertHistory([
dict(name='just-pause', result='SUCCESS', changes='1,1'),
], ordered=False)
def test_job_reconfigure_resume(self):
"""
Tests that a paused job is resumed after reconfiguration
"""
self.wait_timeout = 120
# Output extra ansible info so we might see errors.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project6', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1, 'compile in progress')
self.executor_server.release('compile')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2, 'compile and test in progress')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.release('test')
self.waitUntilSettled()
self.assertHistory([
dict(name='compile', result='SUCCESS', changes='1,1'),
dict(name='test', result='SUCCESS', changes='1,1'),
], ordered=False)
def test_job_pause_skipped_child(self):
"""
Tests that a paused job is resumed with externally skipped jobs.
Tests that this situation won't lead to stuck buildsets.
Compile pauses before pre-test fails.
1. compile (pauses) --+
|
+--> test (skipped because of pre-test)
|
2. pre-test (fails) --+
"""
self.wait_timeout = 120
self.executor_server.hold_jobs_in_build = True
# Output extra ansible info so we might see errors.
self.executor_server.verbose = True
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/project3', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.executor_server.release('compile')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='pre-test', result='FAILURE', changes='1,1'),
dict(name='compile', result='SUCCESS', changes='1,1'),
])
self.assertTrue('Skipped due to failed job pre-test' in A.messages[0])
def test_job_pause_pre_skipped_child(self):
"""
Tests that a paused job is resumed with pre-existing skipped jobs.
Tests that this situation won't lead to stuck buildsets.
The pre-test fails before compile pauses so test is already skipped
when compile pauses.
1. pre-test (fails) --+
|
+--> test (skipped because of pre-test)
|
2. compile (pauses) --+
"""
self.wait_timeout = 120
self.executor_server.hold_jobs_in_build = True
# Output extra ansible info so we might see errors.
self.executor_server.verbose = True
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/project3', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.executor_server.release('pre-test')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# The "pause" job might be paused during the waitUntilSettled
# call and appear settled; it should automatically resume
# though, so just wait for it.
for x in iterate_timeout(60, 'paused job'):
if not self.builds:
break
self.waitUntilSettled()
self.assertHistory([
dict(name='pre-test', result='FAILURE', changes='1,1'),
dict(name='compile', result='SUCCESS', changes='1,1'),
])
self.assertTrue('Skipped due to failed job pre-test' in A.messages[0])
def test_job_pause_skipped_child_retry(self):
"""
Tests that a paused job is resumed with skipped jobs and retries.
Tests that this situation won't lead to stuck buildsets.
1. cache pauses
2. skip-upload skips upload
3. test does a retry which resets upload which must get skipped
again during the reset process because of pre-test skipping it.
cache (pauses) -+
|
|
+--> test (retries) -----------+
|
+--> upload (skipped)
|
+--> prepare-upload (skipped) -+
|
skip-upload ----+
"""
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project5', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.executor_server.release('cache')
self.waitUntilSettled()
self.executor_server.release('skip-upload')
self.waitUntilSettled()
# Stop the job worker of test to simulate an executor restart
job_test = self.builds[1]
for job_worker in self.executor_server.job_workers.values():
if job_worker.build_request.uuid == job_test.uuid:
job_worker.stop()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# All builds must be finished by now
self.assertEqual(len(self.builds), 0, 'All builds must be finished')
# upload must not be run as this should have been skipped
self.assertHistory([
dict(name='skip-upload', result='SUCCESS', changes='1,1'),
dict(name='test', result='ABORTED', changes='1,1'),
dict(name='test', result='SUCCESS', changes='1,1'),
dict(name='cache', result='SUCCESS', changes='1,1'),
])
class TestJobPausePostFail(AnsibleZuulTestCase):
tenant_config_file = 'config/job-pause2/main.yaml'
def _get_file(self, build, path):
p = os.path.join(build.jobdir.root, path)
with open(p) as f:
return f.read()
def test_job_pause_post_fail(self):
"""Tests that a parent job which has a post failure does not
retroactively set its child job's result to SKIPPED.
compile
+--> test
"""
# Output extra ansible info so we might see errors.
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# The "pause" job might be paused during the waitUntilSettled
# call and appear settled; it should automatically resume
# though, so just wait for it.
for x in iterate_timeout(60, 'paused job'):
if not self.builds:
break
self.waitUntilSettled()
self.assertHistory([
dict(name='test', result='SUCCESS', changes='1,1'),
dict(name='compile', result='POST_FAILURE', changes='1,1'),
])
class TestContainerJobs(AnsibleZuulTestCase):
tenant_config_file = "config/container-build-resources/main.yaml"
def test_container_jobs(self):
self.patch(zuul.executor.server.KubeFwd,
'kubectl_command',
os.path.join(FIXTURE_DIR, 'fake_kubectl.sh'))
def noop(*args, **kw):
return 1, 0
self.patch(zuul.executor.server.AnsibleJob,
'runAnsibleFreeze',
noop)
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='container-machine', result='SUCCESS', changes='1,1'),
dict(name='container-native', result='SUCCESS', changes='1,1'),
], ordered=False)
class TestProvidesRequiresPause(AnsibleZuulTestCase):
tenant_config_file = "config/provides-requires-pause/main.yaml"
def test_provides_requires_pause(self):
# Changes share a queue, with both running at the same time.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
# Release image-build, it should cause both instances of
# image-user to run.
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# The "pause" job might be paused during the waitUntilSettled
# call and appear settled; it should automatically resume
# though, so just wait for it.
for _ in iterate_timeout(60, 'paused job'):
if not self.builds:
break
self.waitUntilSettled()
self.assertHistory([
dict(name='image-builder', result='SUCCESS', changes='1,1'),
dict(name='image-user', result='SUCCESS', changes='1,1'),
dict(name='image-user', result='SUCCESS', changes='1,1 2,1'),
], ordered=False)
build = self.getJobFromHistory('image-user', project='org/project2')
self.assertEqual(
build.parameters['zuul']['artifacts'],
[{
'project': 'org/project1',
'change': '1',
'patchset': '1',
'job': 'image-builder',
'url': 'http://example.com/image',
'name': 'image',
}])
class TestProvidesRequiresBuildset(ZuulTestCase):
tenant_config_file = "config/provides-requires-buildset/main.yaml"
def test_provides_requires_buildset(self):
# Changes share a queue, with both running at the same time.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.executor_server.returnData(
'image-builder', A,
{'zuul':
{'artifacts': [
{'name': 'image',
'url': 'http://example.com/image',
'metadata': {
'type': 'container_image'
}},
]}}
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='image-builder', result='SUCCESS', changes='1,1'),
dict(name='image-user', result='SUCCESS', changes='1,1'),
])
build = self.getJobFromHistory('image-user', project='org/project1')
self.assertEqual(
build.parameters['zuul']['artifacts'],
[{
'project': 'org/project1',
'change': '1',
'patchset': '1',
'branch': 'master',
'job': 'image-builder',
'url': 'http://example.com/image',
'name': 'image',
'metadata': {
'type': 'container_image',
}
}])
def test_provides_with_tag_requires_buildset(self):
self.executor_server.hold_jobs_in_build = True
event = self.fake_gerrit.addFakeTag('org/project1', 'master', 'foo')
self.executor_server.returnData(
'image-builder', event,
{'zuul':
{'artifacts': [
{'name': 'image',
'url': 'http://example.com/image',
'metadata': {
'type': 'container_image'
}},
]}}
)
self.fake_gerrit.addEvent(event)
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='image-builder', result='SUCCESS', ref='refs/tags/foo'),
dict(name='image-user', result='SUCCESS', ref='refs/tags/foo'),
])
build = self.getJobFromHistory('image-user', project='org/project1')
self.assertEqual(
build.parameters['zuul']['artifacts'],
[{
'project': 'org/project1',
'ref': 'refs/tags/foo',
'tag': 'foo',
'oldrev': event['refUpdate']['oldRev'],
'newrev': event['refUpdate']['newRev'],
'job': 'image-builder',
'url': 'http://example.com/image',
'name': 'image',
'metadata': {
'type': 'container_image',
}
}])
class TestProvidesRequiresMysql(ZuulTestCase):
config_file = "zuul-sql-driver-mysql.conf"
@simple_layout('layouts/provides-requires.yaml')
def test_provides_requires_shared_queue_fast(self):
# Changes share a queue, but with only one job, the first
# merges before the second starts.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.executor_server.returnData(
'image-builder', A,
{'zuul':
{'artifacts': [
{'name': 'image',
'url': 'http://example.com/image',
'metadata': {
'type': 'container_image'
}},
]}}
)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='image-builder', result='SUCCESS', changes='1,1'),
dict(name='image-user', result='SUCCESS', changes='1,1 2,1'),
])
# Data are not passed in this instance because the builder
# change merges before the user job runs.
self.assertFalse('artifacts' in self.history[-1].parameters['zuul'])
@simple_layout('layouts/provides-requires-two-jobs.yaml')
def test_provides_requires_shared_queue_slow(self):
# Changes share a queue, with both running at the same time.
# This also seems to be a defacto waiting_status test since it
# exercises so many of the statuses.
self.hold_merge_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.executor_server.returnData(
'image-builder', A,
{'zuul':
{'artifacts': [
{'name': 'image', 'url': 'http://example.com/image',
'metadata': {'type': 'container_image'}},
]}}
)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
# We should have a merge job for the buildset
jobs = list(self.merger_api.queued())
self.assertEqual(len(jobs), 1)
self.assertEqual(jobs[0].job_type, 'merge')
# Release the merge job.
self.merger_api.release(jobs[0])
self.waitUntilSettled()
# We should have a global repo state refstate job for the buildset
jobs = list(self.merger_api.queued())
self.assertEqual(len(jobs), 1)
self.assertEqual(jobs[0].job_type, 'refstate')
# Verify the waiting status for both jobs is "repo state"
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
status = tenant.layout.pipelines["gate"].formatStatusJSON()
jobs = status["change_queues"][0]["heads"][0][0]["jobs"]
self.assertEqual(jobs[0]["waiting_status"], 'repo state')
self.assertEqual(jobs[1]["waiting_status"], 'repo state')
# Return the merge queue to normal behavior, but pause nodepool
self.fake_nodepool.pause()
self.hold_merge_jobs_in_queue = False
self.merger_api.release()
self.waitUntilSettled()
# Verify the nodepool waiting status
status = tenant.layout.pipelines["gate"].formatStatusJSON()
jobs = status["change_queues"][0]["heads"][0][0]["jobs"]
self.assertEqual(jobs[0]["waiting_status"],
'node request: 100-0000000000')
self.assertEqual(jobs[1]["waiting_status"],
'dependencies: image-builder')
# Return nodepool operation to normal, but hold executor jobs
# in queue
self.hold_jobs_in_queue = True
self.fake_nodepool.unpause()
self.waitUntilSettled()
# Verify the executor waiting status
status = tenant.layout.pipelines["gate"].formatStatusJSON()
jobs = status["change_queues"][0]["heads"][0][0]["jobs"]
self.assertEqual(jobs[0]["waiting_status"], 'executor')
self.assertEqual(jobs[1]["waiting_status"],
'dependencies: image-builder')
# Return the executor queue to normal, but hold jobs in build
self.hold_jobs_in_queue = False
self.executor_server.hold_jobs_in_build = True
self.executor_api.release()
self.waitUntilSettled()
status = tenant.layout.pipelines["gate"].formatStatusJSON()
jobs = status["change_queues"][0]["heads"][0][0]["jobs"]
self.assertIsNone(jobs[0]["waiting_status"])
self.assertEqual(jobs[1]["waiting_status"],
'dependencies: image-builder')
self.assertEqual(len(self.builds), 1)
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
status = tenant.layout.pipelines["gate"].formatStatusJSON()
# First change
jobs = status["change_queues"][0]["heads"][0][0]["jobs"]
self.assertIsNone(jobs[0]["waiting_status"])
self.assertEqual(jobs[1]["waiting_status"],
'dependencies: image-builder')
# Second change
jobs = status["change_queues"][0]["heads"][0][1]["jobs"]
self.assertEqual(jobs[0]["waiting_status"],
'requirements: images')
# Release image-build, it should cause both instances of
# image-user to run.
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2)
self.assertHistory([
dict(name='image-builder', result='SUCCESS', changes='1,1'),
])
self.orderedRelease()
self.waitUntilSettled()
self.assertHistory([
dict(name='image-builder', result='SUCCESS', changes='1,1'),
dict(name='image-user', result='SUCCESS', changes='1,1'),
dict(name='image-user', result='SUCCESS', changes='1,1 2,1'),
])
self.assertEqual(
self.history[-1].parameters['zuul']['artifacts'],
[{
'project': 'org/project1',
'change': '1',
'patchset': '1',
'job': 'image-builder',
'url': 'http://example.com/image',
'name': 'image',
'metadata': {
'type': 'container_image',
}
}])
# Catch time / monotonic errors
val = self.assertReportedStat('zuul.tenant.tenant-one.pipeline.'
'gate.repo_state_time',
kind='ms')
self.assertTrue(0.0 < float(val) < 60000.0)
@simple_layout('layouts/provides-requires-unshared.yaml')
def test_provides_requires_unshared_queue(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.executor_server.returnData(
'image-builder', A,
{'zuul':
{'artifacts': [
{'name': 'image', 'url': 'http://example.com/image',
'metadata': {'type': 'container_image'}},
]}}
)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['id'])
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='image-builder', result='SUCCESS', changes='1,1'),
])
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertHistory([
dict(name='image-builder', result='SUCCESS', changes='1,1'),
dict(name='image-user', result='SUCCESS', changes='2,1'),
])
# Data are not passed in this instance because the builder
# change merges before the user job runs.
self.assertFalse('artifacts' in self.history[-1].parameters['zuul'])
@simple_layout('layouts/provides-requires.yaml')
def test_provides_requires_check_current(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.executor_server.returnData(
'image-builder', A,
{'zuul':
{'artifacts': [
{'name': 'image', 'url': 'http://example.com/image',
'metadata': {'type': 'container_image'}},
]}}
)
self.executor_server.returnData(
'library-builder', A,
{'zuul':
{'artifacts': [
{'name': 'library', 'url': 'http://example.com/library',
'metadata': {'type': 'library_object'}},
]}}
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['id'])
self.executor_server.returnData(
'image-builder', B,
{'zuul':
{'artifacts': [
{'name': 'image2', 'url': 'http://example.com/image2',
'metadata': {'type': 'container_image'}},
]}}
)
self.executor_server.returnData(
'library-builder', B,
{'zuul':
{'artifacts': [
{'name': 'library2', 'url': 'http://example.com/library2',
'metadata': {'type': 'library_object'}},
]}}
)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 6)
C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
C.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
C.subject, B.data['id'])
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 7)
self.executor_server.release('image-*')
self.executor_server.release('library-*')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='image-builder', result='SUCCESS', changes='1,1'),
dict(name='library-builder', result='SUCCESS', changes='1,1'),
dict(name='hold', result='SUCCESS', changes='1,1'),
dict(name='image-builder', result='SUCCESS', changes='1,1 2,1'),
dict(name='library-builder', result='SUCCESS', changes='1,1 2,1'),
dict(name='hold', result='SUCCESS', changes='1,1 2,1'),
dict(name='image-user', result='SUCCESS', changes='1,1 2,1 3,1'),
dict(name='library-user', result='SUCCESS',
changes='1,1 2,1 3,1'),
dict(name='library-user2', result='SUCCESS',
changes='1,1 2,1 3,1'),
dict(name='hold', result='SUCCESS', changes='1,1 2,1 3,1'),
], ordered=False)
image_user = self.getJobFromHistory('image-user')
self.assertEqual(
image_user.parameters['zuul']['artifacts'],
[{
'project': 'org/project1',
'change': '1',
'patchset': '1',
'job': 'image-builder',
'url': 'http://example.com/image',
'name': 'image',
'metadata': {
'type': 'container_image',
}
}, {
'project': 'org/project1',
'change': '2',
'patchset': '1',
'job': 'image-builder',
'url': 'http://example.com/image2',
'name': 'image2',
'metadata': {
'type': 'container_image',
}
}])
library_user = self.getJobFromHistory('library-user')
self.assertEqual(
library_user.parameters['zuul']['artifacts'],
[{
'project': 'org/project1',
'change': '1',
'patchset': '1',
'job': 'library-builder',
'url': 'http://example.com/library',
'name': 'library',
'metadata': {
'type': 'library_object',
}
}, {
'project': 'org/project1',
'change': '2',
'patchset': '1',
'job': 'library-builder',
'url': 'http://example.com/library2',
'name': 'library2',
'metadata': {
'type': 'library_object',
}
}])
@simple_layout('layouts/provides-requires.yaml')
def test_provides_requires_check_old_success(self):
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.executor_server.returnData(
'image-builder', A,
{'zuul':
{'artifacts': [
{'name': 'image', 'url': 'http://example.com/image',
'metadata': {'type': 'container_image'}},
]}}
)
self.executor_server.returnData(
'library-builder', A,
{'zuul':
{'artifacts': [
{'name': 'library', 'url': 'http://example.com/library',
'metadata': {'type': 'library_object'}},
]}}
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='image-builder', result='SUCCESS', changes='1,1'),
dict(name='library-builder', result='SUCCESS', changes='1,1'),
dict(name='hold', result='SUCCESS', changes='1,1'),
], ordered=False)
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['id'])
self.executor_server.returnData(
'image-builder', B,
{'zuul':
{'artifacts': [
{'name': 'image2', 'url': 'http://example.com/image2',
'metadata': {'type': 'container_image'}},
]}}
)
self.executor_server.returnData(
'library-builder', B,
{'zuul':
{'artifacts': [
{'name': 'library2', 'url': 'http://example.com/library2',
'metadata': {'type': 'library_object'}},
]}}
)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='image-builder', result='SUCCESS', changes='1,1'),
dict(name='library-builder', result='SUCCESS', changes='1,1'),
dict(name='hold', result='SUCCESS', changes='1,1'),
dict(name='image-builder', result='SUCCESS', changes='1,1 2,1'),
dict(name='library-builder', result='SUCCESS', changes='1,1 2,1'),
dict(name='hold', result='SUCCESS', changes='1,1 2,1'),
], ordered=False)
C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
C.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
C.subject, B.data['id'])
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='image-builder', result='SUCCESS', changes='1,1'),
dict(name='library-builder', result='SUCCESS', changes='1,1'),
dict(name='hold', result='SUCCESS', changes='1,1'),
dict(name='image-builder', result='SUCCESS', changes='1,1 2,1'),
dict(name='library-builder', result='SUCCESS', changes='1,1 2,1'),
dict(name='hold', result='SUCCESS', changes='1,1 2,1'),
dict(name='image-user', result='SUCCESS', changes='1,1 2,1 3,1'),
dict(name='library-user', result='SUCCESS',
changes='1,1 2,1 3,1'),
dict(name='library-user2', result='SUCCESS',
changes='1,1 2,1 3,1'),
dict(name='hold', result='SUCCESS', changes='1,1 2,1 3,1'),
], ordered=False)
D = self.fake_gerrit.addFakeChange('org/project3', 'master', 'D')
D.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
D.subject, B.data['id'])
self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='image-builder', result='SUCCESS', changes='1,1'),
dict(name='library-builder', result='SUCCESS', changes='1,1'),
dict(name='hold', result='SUCCESS', changes='1,1'),
dict(name='image-builder', result='SUCCESS', changes='1,1 2,1'),
dict(name='library-builder', result='SUCCESS', changes='1,1 2,1'),
dict(name='hold', result='SUCCESS', changes='1,1 2,1'),
dict(name='image-user', result='SUCCESS', changes='1,1 2,1 3,1'),
dict(name='library-user', result='SUCCESS',
changes='1,1 2,1 3,1'),
dict(name='library-user2', result='SUCCESS',
changes='1,1 2,1 3,1'),
dict(name='hold', result='SUCCESS', changes='1,1 2,1 3,1'),
dict(name='both-user', result='SUCCESS', changes='1,1 2,1 4,1'),
dict(name='hold', result='SUCCESS', changes='1,1 2,1 4,1'),
], ordered=False)
image_user = self.getJobFromHistory('image-user')
self.assertEqual(
image_user.parameters['zuul']['artifacts'],
[{
'project': 'org/project1',
'change': '1',
'patchset': '1',
'job': 'image-builder',
'url': 'http://example.com/image',
'name': 'image',
'metadata': {
'type': 'container_image',
}
}, {
'project': 'org/project1',
'change': '2',
'patchset': '1',
'job': 'image-builder',
'url': 'http://example.com/image2',
'name': 'image2',
'metadata': {
'type': 'container_image',
}
}])
library_user = self.getJobFromHistory('library-user')
self.assertEqual(
library_user.parameters['zuul']['artifacts'],
[{
'project': 'org/project1',
'change': '1',
'patchset': '1',
'job': 'library-builder',
'url': 'http://example.com/library',
'name': 'library',
'metadata': {
'type': 'library_object',
}
}, {
'project': 'org/project1',
'change': '2',
'patchset': '1',
'job': 'library-builder',
'url': 'http://example.com/library2',
'name': 'library2',
'metadata': {
'type': 'library_object',
}
}])
both_user = self.getJobFromHistory('both-user')
self.assertEqual(
both_user.parameters['zuul']['artifacts'],
[{
'project': 'org/project1',
'change': '1',
'patchset': '1',
'job': 'image-builder',
'url': 'http://example.com/image',
'name': 'image',
'metadata': {
'type': 'container_image',
}
}, {
'project': 'org/project1',
'change': '1',
'patchset': '1',
'job': 'library-builder',
'url': 'http://example.com/library',
'name': 'library',
'metadata': {
'type': 'library_object',
}
}, {
'project': 'org/project1',
'change': '2',
'patchset': '1',
'job': 'image-builder',
'url': 'http://example.com/image2',
'name': 'image2',
'metadata': {
'type': 'container_image',
}
}, {
'project': 'org/project1',
'change': '2',
'patchset': '1',
'job': 'library-builder',
'url': 'http://example.com/library2',
'name': 'library2',
'metadata': {
'type': 'library_object',
}
}])
@simple_layout('layouts/provides-requires.yaml')
def test_provides_requires_check_old_failure(self):
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.executor_server.failJob('image-builder', A)
self.executor_server.failJob('library-builder', A)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='image-builder', result='FAILURE', changes='1,1'),
dict(name='library-builder', result='FAILURE', changes='1,1'),
dict(name='hold', result='SUCCESS', changes='1,1'),
], ordered=False)
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['id'])
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='image-builder', result='FAILURE', changes='1,1'),
dict(name='library-builder', result='FAILURE', changes='1,1'),
dict(name='hold', result='SUCCESS', changes='1,1'),
dict(name='hold', result='SUCCESS', changes='1,1 2,1'),
], ordered=False)
self.assertTrue(re.search('image-user .* FAILURE', B.messages[0]))
self.assertEqual(
B.messages[0].count(
'Job image-user requires artifact(s) images'),
1,
B.messages[0])
self.assertEqual(
B.messages[0].count(
'Job library-user requires artifact(s) libraries'),
1,
B.messages[0])
@simple_layout('layouts/provides-requires-single-project.yaml')
def test_provides_requires_check_old_failure_single_project(self):
# Similar to above test, but has job dependencies which will
# cause the requirements check to potentially run multiple
# times as the queue processor runs repeatedly.
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.executor_server.failJob('image-builder', A)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='image-builder', result='FAILURE', changes='1,1'),
dict(name='hold', result='SUCCESS', changes='1,1'),
], ordered=False)
self.assertTrue(
'Skipped due to failed job image-builder' in A.messages[0])
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['id'])
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='image-builder', result='FAILURE', changes='1,1'),
dict(name='hold', result='SUCCESS', changes='1,1'),
dict(name='image-builder', result='FAILURE', changes='1,1 2,1'),
dict(name='hold', result='SUCCESS', changes='1,1 2,1'),
], ordered=False)
self.assertTrue(re.search('image-user .* FAILURE', B.messages[0]))
self.assertEqual(
B.messages[0].count(
'Job image-user requires artifact(s) images'),
1, B.messages[0])
class TestProvidesRequiresPostgres(TestProvidesRequiresMysql):
config_file = "zuul-sql-driver-postgres.conf"
class TestForceMergeMissingTemplate(ZuulTestCase):
tenant_config_file = "config/force-merge-template/main.yaml"
def test_force_merge_missing_template(self):
"""
Tests that force merging a change using a non-existent project
template triggering a post job doesn't wedge zuul on reporting.
"""
# Create change that adds uses a non-existent project template
conf = textwrap.dedent(
"""
- project:
templates:
- non-existent
check:
jobs:
- noop
post:
jobs:
- post-job
""")
file_dict = {'zuul.yaml': conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
# Now force merge the change
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
self.fake_gerrit.addEvent(A.getRefUpdatedEvent())
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(B.reported, 1)
self.assertHistory([
dict(name='other-job', result='SUCCESS', changes='2,1'),
])
class TestJobPausePriority(AnsibleZuulTestCase):
tenant_config_file = 'config/job-pause-priority/main.yaml'
def test_paused_job_priority(self):
"Test that nodes for children of paused jobs have a higher priority"
self.fake_nodepool.pause()
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
reqs = self.fake_nodepool.getNodeRequests()
self.assertEqual(len(reqs), 1)
self.assertEqual(reqs[0]['_oid'], '100-0000000000')
self.assertEqual(reqs[0]['provider'], None)
self.fake_nodepool.unpause()
self.waitUntilSettled()
self.fake_nodepool.pause()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
for x in iterate_timeout(60, 'paused job'):
reqs = self.fake_nodepool.getNodeRequests()
if reqs:
break
self.assertEqual(len(reqs), 1)
self.assertEqual(reqs[0]['_oid'], '099-0000000001')
self.assertEqual(reqs[0]['provider'], 'test-provider')
self.fake_nodepool.unpause()
self.waitUntilSettled()
class TestAnsibleVersion(AnsibleZuulTestCase):
tenant_config_file = 'config/ansible-versions/main.yaml'
def test_ansible_versions(self):
"""
Tests that jobs run with the requested ansible version.
"""
A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='ansible-default', result='SUCCESS', changes='1,1'),
dict(name='ansible-6', result='SUCCESS', changes='1,1'),
dict(name='ansible-8', result='SUCCESS', changes='1,1'),
], ordered=False)
class TestDefaultAnsibleVersion(AnsibleZuulTestCase):
config_file = 'zuul-default-ansible-version.conf'
tenant_config_file = 'config/ansible-versions/main.yaml'
def test_ansible_versions(self):
"""
Tests that jobs run with the requested ansible version.
"""
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='ansible-default-zuul-conf', result='SUCCESS',
changes='1,1'),
dict(name='ansible-6', result='SUCCESS', changes='1,1'),
dict(name='ansible-8', result='SUCCESS', changes='1,1'),
], ordered=False)
class TestReturnWarnings(AnsibleZuulTestCase):
tenant_config_file = 'config/return-warnings/main.yaml'
def test_return_warnings(self):
"""
Tests that jobs can emit custom warnings that get reported.
"""
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='emit-warnings', result='SUCCESS', changes='1,1'),
])
self.assertTrue(A.reported)
self.assertIn('This is the first warning', A.messages[0])
self.assertIn('This is the second warning', A.messages[0])
class TestUnsafeVars(AnsibleZuulTestCase):
tenant_config_file = 'config/unsafe-vars/main.yaml'
def _get_file(self, build, path):
p = os.path.join(build.jobdir.root, path)
with open(p) as f:
return f.read()
def test_unsafe_vars(self):
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
testjob = self.getJobFromHistory('testjob')
job_output = self._get_file(testjob, 'work/logs/job-output.txt')
self.log.debug(job_output)
# base_secret wasn't present when frozen
self.assertIn("BASE JOBSECRET: undefined", job_output)
# secret variables are marked unsafe
self.assertIn("BASE SECRETSUB: {{ subtext }}", job_output)
# latefact wasn't present when frozen
self.assertIn("BASE LATESUB: undefined", job_output)
# check the !unsafe tagged version
self.assertIn("BASE LATESUB UNSAFE: "
"{{ latefact | default('undefined') }}", job_output)
# Both of these are dynamically evaluated
self.assertIn("TESTJOB SUB: text", job_output)
self.assertIn("TESTJOB LATESUB: late", job_output)
# check the !unsafe tagged version
self.assertIn("TESTJOB LATESUB UNSAFE: "
"{{ latefact | default('undefined') }}", job_output)
# The project secret is not defined
self.assertNotIn("TESTJOB SECRET:", job_output)
testjob = self.getJobFromHistory('testjob-secret')
job_output = self._get_file(testjob, 'work/logs/job-output.txt')
self.log.debug(job_output)
# base_secret wasn't present when frozen
self.assertIn("BASE JOBSECRET: undefined", job_output)
# secret variables are marked unsafe
self.assertIn("BASE SECRETSUB: {{ subtext }}", job_output)
# latefact wasn't present when frozen
self.assertIn("BASE LATESUB: undefined", job_output)
# check the !unsafe tagged version
self.assertIn("BASE LATESUB UNSAFE: "
"{{ latefact | default('undefined') }}", job_output)
# These are frozen
self.assertIn("TESTJOB SUB: text", job_output)
self.assertIn("TESTJOB LATESUB: undefined", job_output)
# check the !unsafe tagged version
self.assertIn("TESTJOB LATESUB UNSAFE: "
"{{ latefact | default('undefined') }}", job_output)
# This is marked unsafe
self.assertIn("TESTJOB SECRET: {{ subtext }}", job_output)
class TestConnectionVars(AnsibleZuulTestCase):
tenant_config_file = 'config/connection-vars/main.yaml'
def _get_file(self, build, path):
p = os.path.join(build.jobdir.root, path)
with open(p) as f:
return f.read()
def test_ansible_connection(self):
in_repo_conf = textwrap.dedent(
"""
- project:
check:
jobs:
- test-job:
vars:
ansible_shell_executable: /bin/du
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn("Variable name 'ansible_shell_executable' "
"is not allowed", A.messages[0])
self.assertHistory([])
def test_return_data(self):
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='test-job', result='SUCCESS', changes='1,1'),
], ordered=False)
# Currently, second-job errors; if it ever runs, add these assertions:
# job = self.getJobFromHistory('second-job')
# job_output = self._get_file(job, 'work/logs/job-output.txt')
# self.log.debug(job_output)
# self.assertNotIn("/bin/du", job_output)
class IncludeBranchesTestCase(ZuulTestCase):
def _test_include_branches(self, history1, history2, history3, history4):
self.create_branch('org/project', 'stable')
self.create_branch('org/project', 'feature/foo')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable'))
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'feature/foo'))
self.waitUntilSettled()
# Test the jobs on the master branch.
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory(history1, ordered=False)
# Test the jobs on the excluded feature branch.
B = self.fake_gerrit.addFakeChange('org/project', 'feature/foo', 'A')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory(history1 + history2, ordered=False)
# Test in-repo config proposed on the excluded feature branch.
conf = textwrap.dedent(
"""
- job:
name: project-dynamic
- project:
check:
jobs:
- project-dynamic
""")
file_dict = {'zuul.yaml': conf}
C = self.fake_gerrit.addFakeChange('org/project', 'feature/foo', 'A',
files=file_dict)
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory(history1 + history2 + history3, ordered=False)
old = self.scheds.first.sched.tenant_layout_state.get('tenant-one')
# Merge a change to the excluded feature branch.
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(B.data['status'], 'MERGED')
self.assertHistory(history1 + history2 + history3 + history4,
ordered=False)
new = self.scheds.first.sched.tenant_layout_state.get('tenant-one')
# Verify we haven't performed a tenant reconfiguration
self.assertTrue(old == new)
class TestIncludeBranchesProject(IncludeBranchesTestCase):
tenant_config_file = 'config/dynamic-only-project/include.yaml'
def test_include_branches(self):
history1 = [
dict(name='central-test', result='SUCCESS', changes='1,1'),
dict(name='project-test', result='SUCCESS', changes='1,1'),
]
history2 = [
dict(name='central-test', result='SUCCESS', changes='2,1'),
]
history3 = [
dict(name='central-test', result='SUCCESS', changes='3,1'),
]
history4 = [
dict(name='central-test', result='SUCCESS', changes='2,1'),
]
self._test_include_branches(history1, history2, history3, history4)
class TestExcludeBranchesProject(IncludeBranchesTestCase):
tenant_config_file = 'config/dynamic-only-project/exclude.yaml'
def test_exclude_branches(self):
history1 = [
dict(name='central-test', result='SUCCESS', changes='1,1'),
dict(name='project-test', result='SUCCESS', changes='1,1'),
]
history2 = [
dict(name='central-test', result='SUCCESS', changes='2,1'),
]
history3 = [
dict(name='central-test', result='SUCCESS', changes='3,1'),
]
history4 = [
dict(name='central-test', result='SUCCESS', changes='2,1'),
]
self._test_include_branches(history1, history2, history3, history4)
class TestDynamicBranchesProject(IncludeBranchesTestCase):
tenant_config_file = 'config/dynamic-only-project/dynamic.yaml'
def test_dynamic_branches(self):
history1 = [
dict(name='central-test', result='SUCCESS', changes='1,1'),
dict(name='project-test', result='SUCCESS', changes='1,1'),
]
history2 = [
dict(name='central-test', result='SUCCESS', changes='2,1'),
dict(name='project-test', result='SUCCESS', changes='2,1'),
]
history3 = [
dict(name='central-test', result='SUCCESS', changes='3,1'),
dict(name='project-dynamic', result='SUCCESS', changes='3,1'),
]
history4 = [
dict(name='central-test', result='SUCCESS', changes='2,1'),
dict(name='project-test', result='SUCCESS', changes='2,1'),
]
self._test_include_branches(history1, history2, history3, history4)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_v3.py
|
test_v3.py
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright 2021-2022 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import configparser
import gc
import json
import logging
import os
import re
import shutil
import socket
import textwrap
import threading
import time
from collections import namedtuple
from unittest import mock, skip
from uuid import uuid4
from kazoo.exceptions import NoNodeError
from testtools.matchers import StartsWith
import git
import fixtures
import zuul.change_matcher
from zuul.driver.gerrit import gerritreporter
import zuul.scheduler
import zuul.model
import zuul.merger.merger
from zuul.lib import yamlutil as yaml
from tests.base import (
SSLZuulTestCase,
ZuulTestCase,
repack_repo,
simple_layout,
iterate_timeout,
RecordingExecutorServer,
TestConnectionRegistry,
FIXTURE_DIR,
skipIfMultiScheduler,
)
from zuul.zk.change_cache import ChangeKey
from zuul.zk.event_queues import PIPELINE_NAME_ROOT
from zuul.zk.layout import LayoutState
from zuul.zk.locks import management_queue_lock, pipeline_lock
from zuul.zk import zkobject
EMPTY_LAYOUT_STATE = LayoutState("", "", 0, None, {}, -1)
class TestSchedulerSSL(SSLZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def test_jobs_executed(self):
"Test that jobs are executed and a change is merged"
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-merge').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(self.getJobFromHistory('project-test1').node,
'label1')
self.assertEqual(self.getJobFromHistory('project-test2').node,
'label1')
class TestSchedulerZone(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def setUp(self):
super(TestSchedulerZone, self).setUp()
self.fake_nodepool.attributes = {'executor-zone': 'test-provider.vpn'}
# Create an unzoned executor
config = configparser.ConfigParser()
config.read_dict(self.config)
config.remove_option('executor', 'zone')
config.set('executor', 'command_socket',
os.path.join(self.test_root, 'executor2.socket'))
executor_connections = TestConnectionRegistry(
self.changes, self.config, self.additional_event_queues,
self.upstream_root, self.poller_events,
self.git_url_with_auth, self.addCleanup)
executor_connections.configure(self.config,
source_only=True)
self.executor_server_unzoned = RecordingExecutorServer(
config,
connections=executor_connections,
jobdir_root=self.jobdir_root,
_run_ansible=self.run_ansible,
_test_root=self.test_root,
log_console_port=self.log_console_port)
self.executor_server_unzoned.start()
self.addCleanup(self._shutdown_executor)
def _shutdown_executor(self):
self.executor_server_unzoned.hold_jobs_in_build = False
self.executor_server_unzoned.release()
self.executor_server_unzoned.stop()
self.executor_server_unzoned.join()
def setup_config(self, config_file: str):
config = super(TestSchedulerZone, self).setup_config(config_file)
config.set('executor', 'zone', 'test-provider.vpn')
return config
def test_jobs_executed(self):
"Test that jobs are executed and a change is merged per zone"
# Validate that the reported executor stats are correct. There must
# be two executors online (one unzoned and one zoned)
# TODO(corvus): remove deprecated top-level stats in 5.0
self.assertReportedStat(
'zuul.executors.online', value='2', kind='g')
self.assertReportedStat(
'zuul.executors.unzoned.online', value='1', kind='g')
self.assertReportedStat(
'zuul.executors.zone.test-provider_vpn.online',
value='1', kind='g')
self.hold_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
queue = list(self.executor_api.queued())
self.assertEqual(len(self.builds), 0)
self.assertEqual(len(queue), 1)
self.assertEqual('test-provider.vpn', queue[0].zone)
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-merge').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(self.getJobFromHistory('project-test1').node,
'label1')
self.assertEqual(self.getJobFromHistory('project-test2').node,
'label1')
# Validate that both (zoned and unzoned) executors are accepting work
self.assertReportedStat(
'zuul.executors.accepting', value='2', kind='g')
self.assertReportedStat(
'zuul.executors.unzoned.accepting', value='1', kind='g')
self.assertReportedStat(
'zuul.executors.zone.test-provider_vpn.accepting',
value='1', kind='g')
def test_executor_disconnect(self):
"Test that jobs are completed after an executor disconnect"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
# Forcibly disconnect the executor from ZK
self.executor_server.zk_client.client.stop()
self.executor_server.zk_client.client.start()
# Find the build in the scheduler so we can check its status
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
items = tenant.layout.pipelines['gate'].getAllItems()
builds = items[0].current_build_set.getBuilds()
build = builds[0]
# Clean up the build
self.scheds.first.sched.executor.cleanupLostBuildRequests()
# Wait for the build to be reported as lost
for x in iterate_timeout(30, 'retry build'):
if build.result == 'RETRY':
break
# If we didn't timeout, then it worked; we're done
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# There is a test-only race in the recording executor class
# where we may record a successful first build, even though
# the executor didn't actually send a build complete event.
# This could probabyl be improved, but for now, it's
# sufficient to verify that the job was retried. So we omit a
# result classifier on the first build.
self.assertHistory([
dict(name='project-merge', changes='1,1'),
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
], ordered=False)
class TestSchedulerZoneFallback(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def setup_config(self, config_file: str):
config = super().setup_config(config_file)
config.set('executor', 'zone', 'test-provider.vpn')
config.set('executor', 'allow_unzoned', 'true')
return config
def test_jobs_executed(self):
"Test that jobs are executed and a change is merged per zone"
self.hold_jobs_in_queue = True
# Validate that the reported executor stats are correct. Since
# the executor accepts zoned and unzoned job it should be counted
# in both metrics.
self.assertReportedStat(
'zuul.executors.online', value='1', kind='g')
self.assertReportedStat(
'zuul.executors.unzoned.online', value='1', kind='g')
self.assertReportedStat(
'zuul.executors.zone.test-provider_vpn.online',
value='1', kind='g')
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
queue = list(self.executor_api.queued())
self.assertEqual(len(self.builds), 0)
self.assertEqual(len(queue), 1)
self.assertEqual(None, queue[0].zone)
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-merge').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(self.getJobFromHistory('project-test1').node,
'label1')
self.assertEqual(self.getJobFromHistory('project-test2').node,
'label1')
class TestSchedulerAutoholdHoldExpiration(ZuulTestCase):
'''
This class of tests validates the autohold node expiration values
are set correctly via zuul config or from a custom value.
'''
config_file = 'zuul-hold-expiration.conf'
tenant_config_file = 'config/single-tenant/main.yaml'
@simple_layout('layouts/autohold.yaml')
def test_autohold_max_hold_default(self):
'''
Test that the hold request node expiration will default to the
value specified in the configuration file.
'''
# Add a autohold with no hold expiration.
self.scheds.first.sched.autohold(
'tenant-one', 'review.example.com/org/project', 'project-test2',
".*", "reason text", 1, None)
# There should be a record in ZooKeeper
request_list = self.sched_zk_nodepool.getHoldRequests()
self.assertEqual(1, len(request_list))
request = self.sched_zk_nodepool.getHoldRequest(
request_list[0])
self.assertIsNotNone(request)
self.assertEqual('tenant-one', request.tenant)
self.assertEqual('review.example.com/org/project', request.project)
self.assertEqual('project-test2', request.job)
self.assertEqual('reason text', request.reason)
self.assertEqual(1, request.max_count)
self.assertEqual(0, request.current_count)
self.assertEqual([], request.nodes)
# This should be the default value from the zuul config file.
self.assertEqual(1800, request.node_expiration)
@simple_layout('layouts/autohold.yaml')
def test_autohold_max_hold_custom(self):
'''
Test that the hold request node expiration will be set to the custom
value specified in the request.
'''
# Add a autohold with a custom hold expiration.
self.scheds.first.sched.autohold(
'tenant-one', 'review.example.com/org/project', 'project-test2',
".*", "reason text", 1, 500)
# There should be a record in ZooKeeper
request_list = self.sched_zk_nodepool.getHoldRequests()
self.assertEqual(1, len(request_list))
request = self.sched_zk_nodepool.getHoldRequest(
request_list[0])
self.assertIsNotNone(request)
self.assertEqual('tenant-one', request.tenant)
self.assertEqual('review.example.com/org/project', request.project)
self.assertEqual('project-test2', request.job)
self.assertEqual('reason text', request.reason)
self.assertEqual(1, request.max_count)
self.assertEqual(0, request.current_count)
self.assertEqual([], request.nodes)
# This should be the value from the user request.
self.assertEqual(500, request.node_expiration)
@simple_layout('layouts/autohold.yaml')
def test_autohold_max_hold_custom_invalid(self):
'''
Test that if the custom hold request node expiration is higher than our
configured max, it will be lowered to the max.
'''
# Add a autohold with a custom hold expiration that is higher than our
# configured max.
self.scheds.first.sched.autohold(
'tenant-one', 'review.example.com/org/project', 'project-test2',
".*", "reason text", 1, 10000)
# There should be a record in ZooKeeper
request_list = self.sched_zk_nodepool.getHoldRequests()
self.assertEqual(1, len(request_list))
request = self.sched_zk_nodepool.getHoldRequest(
request_list[0])
self.assertIsNotNone(request)
self.assertEqual('tenant-one', request.tenant)
self.assertEqual('review.example.com/org/project', request.project)
self.assertEqual('project-test2', request.job)
self.assertEqual('reason text', request.reason)
self.assertEqual(1, request.max_count)
self.assertEqual(0, request.current_count)
self.assertEqual([], request.nodes)
# This should be the max value from the zuul config file.
self.assertEqual(3600, request.node_expiration)
class TestScheduler(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def test_jobs_executed(self):
"Test that jobs are executed and a change is merged"
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-merge').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(self.getJobFromHistory('project-test1').node,
'label1')
self.assertEqual(self.getJobFromHistory('project-test2').node,
'label1')
self.assertThat(A.messages[1],
StartsWith(
'Build succeeded (gate).\n'
'https://zuul.example.com/t/tenant-one/buildset'))
# TODOv3(jeblair): we may want to report stats by tenant (also?).
# Per-driver
self.assertReportedStat('zuul.event.gerrit.comment-added', value='1',
kind='c')
# Per-driver per-connection
self.assertReportedStat('zuul.event.gerrit.gerrit.comment-added',
value='1', kind='c')
self.assertReportedStat(
'zuul.tenant.tenant-one.trigger_events', value='0', kind='g')
self.assertReportedStat(
'zuul.tenant.tenant-one.management_events', value='0', kind='g')
self.assertReportedStat(
'zuul.tenant.tenant-one.pipeline.gate.current_changes',
value='1', kind='g')
self.assertReportedStat(
'zuul.tenant.tenant-one.pipeline.gate.project.review_example_com.'
'org_project.master.job.project-merge.SUCCESS', kind='ms')
self.assertReportedStat(
'zuul.tenant.tenant-one.pipeline.gate.project.review_example_com.'
'org_project.master.job.project-merge.SUCCESS', value='1',
kind='c')
self.assertReportedStat(
'zuul.tenant.tenant-one.pipeline.gate.resident_time', kind='ms')
self.assertReportedStat(
'zuul.tenant.tenant-one.pipeline.gate.total_changes', value='1',
kind='c')
self.assertReportedStat(
'zuul.tenant.tenant-one.pipeline.gate.trigger_events',
value='0', kind='g')
self.assertReportedStat(
'zuul.tenant.tenant-one.pipeline.gate.result_events',
value='0', kind='g')
self.assertReportedStat(
'zuul.tenant.tenant-one.pipeline.gate.management_events',
value='0', kind='g')
self.assertReportedStat(
'zuul.tenant.tenant-one.pipeline.gate.project.review_example_com.'
'org_project.master.resident_time', kind='ms')
self.assertReportedStat(
'zuul.tenant.tenant-one.pipeline.gate.project.review_example_com.'
'org_project.master.total_changes', value='1', kind='c')
exec_key = 'zuul.executor.%s' % self.executor_server.hostname.replace(
'.', '_')
self.assertReportedStat(exec_key + '.builds', value='1', kind='c')
self.assertReportedStat(exec_key + '.starting_builds', kind='g')
self.assertReportedStat(exec_key + '.starting_builds', kind='ms')
self.assertReportedStat(
'zuul.nodepool.requests.requested.total', value='1', kind='c')
self.assertReportedStat(
'zuul.nodepool.requests.requested.label.label1',
value='1', kind='c')
self.assertReportedStat(
'zuul.nodepool.requests.fulfilled.label.label1',
value='1', kind='c')
self.assertReportedStat(
'zuul.nodepool.requests.requested.size.1', value='1', kind='c')
self.assertReportedStat(
'zuul.nodepool.requests.fulfilled.size.1', value='1', kind='c')
# just check for existence, since we can not know if a request is
# in-flight during the sched._stats_inverval
self.assertReportedStat(
'zuul.nodepool.current_requests', kind='g')
self.assertReportedStat(
'zuul.nodepool.tenant.tenant-one.current_requests', kind='g')
self.assertReportedStat(
'zuul.executors.online', value='1', kind='g')
self.assertReportedStat(
'zuul.executors.accepting', value='1', kind='g')
self.assertReportedStat(
'zuul.mergers.online', value='1', kind='g')
self.assertReportedStat('zuul.scheduler.eventqueues.connection.gerrit',
value='0', kind='g')
self.assertReportedStat('zuul.scheduler.run_handler', kind='ms')
# Catch time / monotonic errors
for key in [
'zuul.tenant.tenant-one.event_enqueue_processing_time',
'zuul.tenant.tenant-one.event_enqueue_time',
'zuul.tenant.tenant-one.reconfiguration_time',
'zuul.tenant.tenant-one.pipeline.gate.event_enqueue_time',
'zuul.tenant.tenant-one.pipeline.gate.merge_request_time',
'zuul.tenant.tenant-one.pipeline.gate.merger_merge_op_time',
'zuul.tenant.tenant-one.pipeline.gate.job_freeze_time',
'zuul.tenant.tenant-one.pipeline.gate.node_request_time',
'zuul.tenant.tenant-one.pipeline.gate.job_wait_time',
'zuul.tenant.tenant-one.pipeline.gate.event_job_time',
'zuul.tenant.tenant-one.pipeline.gate.resident_time',
'zuul.tenant.tenant-one.pipeline.gate.read_time',
'zuul.tenant.tenant-one.pipeline.gate.write_time',
'zuul.tenant.tenant-one.pipeline.gate.process',
'zuul.tenant.tenant-one.pipeline.gate.event_process',
'zuul.tenant.tenant-one.pipeline.gate.handling',
'zuul.tenant.tenant-one.pipeline.gate.refresh',
]:
val = self.assertReportedStat(key, kind='ms')
self.assertTrue(0.0 < float(val) < 60000.0)
for key in [
'zuul.tenant.tenant-one.pipeline.gate.read_objects',
'zuul.tenant.tenant-one.pipeline.gate.write_objects',
'zuul.tenant.tenant-one.pipeline.gate.read_znodes',
'zuul.tenant.tenant-one.pipeline.gate.write_znodes',
'zuul.tenant.tenant-one.pipeline.gate.write_bytes',
]:
# 'zuul.tenant.tenant-one.pipeline.gate.read_bytes' is
# expected to be zero since it's initialized after reading
val = self.assertReportedStat(key, kind='g')
self.assertTrue(0.0 < float(val) < 60000.0)
self.assertReportedStat('zuul.tenant.tenant-one.pipeline.gate.'
'data_size_compressed',
kind='g')
self.assertReportedStat('zuul.tenant.tenant-one.pipeline.gate.'
'data_size_uncompressed',
kind='g')
self.assertReportedStat('zuul.connection.gerrit.cache.'
'data_size_compressed',
kind='g')
self.assertReportedStat('zuul.connection.gerrit.cache.'
'data_size_uncompressed',
kind='g')
for build in self.history:
self.assertTrue(build.parameters['zuul']['voting'])
def test_zk_profile(self):
command_socket = self.scheds.first.sched.config.get(
'scheduler', 'command_socket')
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
zplog = logging.getLogger('zuul.profile')
with self.assertLogs('zuul.profile', level='DEBUG') as logs:
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# assertNoLogs doesn't appear until py3.10, so we need to
# emit a single log line in order to assert that there
# aren't any others.
zplog.debug('test')
self.assertEqual(1, len(logs.output))
args = json.dumps(['tenant-one', 'check'])
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.connect(command_socket)
s.sendall(f'zkprofile {args}\n'.encode('utf8'))
with self.assertLogs('zuul.profile', level='DEBUG'):
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.connect(command_socket)
s.sendall(f'zkprofile {args}\n'.encode('utf8'))
with self.assertLogs('zuul.profile', level='DEBUG') as logs:
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
zplog.debug('test')
self.assertEqual(1, len(logs.output))
def test_initial_pipeline_gauges(self):
"Test that each pipeline reported its length on start"
self.assertReportedStat('zuul.tenant.tenant-one.pipeline.gate.'
'current_changes',
value='0', kind='g')
self.assertReportedStat('zuul.tenant.tenant-one.pipeline.check.'
'current_changes',
value='0', kind='g')
def test_job_branch(self):
"Test the correct variant of a job runs on a branch"
self.create_branch('org/project', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable'))
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange('org/project', 'stable', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2,
"A should report start and success")
self.assertIn('gate', A.messages[1],
"A should transit gate")
self.assertEqual(self.getJobFromHistory('project-test1').node,
'label2')
@simple_layout('layouts/branch-deletion.yaml')
def test_branch_deletion(self):
"Test the correct variant of a job runs on a branch"
# Start a secondary merger so this test exercises branch
# deletion on both a merger and a separate executor.
self.executor_server._merger_running = False
self.executor_server.merger_loop_wake_event.set()
self.executor_server.merger_thread.join()
self._startMerger()
self.create_branch('org/project', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable'))
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange('org/project', 'stable', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
self.delete_branch('org/project', 'stable')
path = os.path.join(self.executor_src_root, 'review.example.com')
shutil.rmtree(path)
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
build = self.builds[0]
# Make sure there is no stable branch in the checked out git repo.
pname = 'review.example.com/org/project'
work = build.getWorkspaceRepos([pname])
work = work[pname]
heads = set([str(x) for x in work.heads])
self.assertEqual(heads, set(['master']))
self.executor_server.hold_jobs_in_build = False
build.release()
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
def test_parallel_changes(self):
"Test that changes are tested in parallel and merged in series"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.assertEqual(self.builds[0].name, 'project-merge')
self.assertTrue(self.builds[0].hasChanges(A))
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertTrue(self.builds[0].hasChanges(A))
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertTrue(self.builds[1].hasChanges(A))
self.assertEqual(self.builds[2].name, 'project-merge')
self.assertTrue(self.builds[2].hasChanges(A, B))
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 5)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertTrue(self.builds[0].hasChanges(A))
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertTrue(self.builds[1].hasChanges(A))
self.assertEqual(self.builds[2].name, 'project-test1')
self.assertTrue(self.builds[2].hasChanges(A, B))
self.assertEqual(self.builds[3].name, 'project-test2')
self.assertTrue(self.builds[3].hasChanges(A, B))
self.assertEqual(self.builds[4].name, 'project-merge')
self.assertTrue(self.builds[4].hasChanges(A, B, C))
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 6)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertTrue(self.builds[0].hasChanges(A))
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertTrue(self.builds[1].hasChanges(A))
self.assertEqual(self.builds[2].name, 'project-test1')
self.assertTrue(self.builds[2].hasChanges(A, B))
self.assertEqual(self.builds[3].name, 'project-test2')
self.assertTrue(self.builds[3].hasChanges(A, B))
self.assertEqual(self.builds[4].name, 'project-test1')
self.assertTrue(self.builds[4].hasChanges(A, B, C))
self.assertEqual(self.builds[5].name, 'project-test2')
self.assertTrue(self.builds[5].hasChanges(A, B, C))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(len(self.history), 9)
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
def test_failed_changes(self):
"Test that a change behind a failed change is retested"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.executor_server.failJob('project-test1', A)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertBuilds([dict(name='project-merge', changes='1,1')])
self.executor_server.release('.*-merge')
self.waitUntilSettled()
# A/project-merge is complete
self.assertBuilds([
dict(name='project-test1', changes='1,1'),
dict(name='project-test2', changes='1,1'),
dict(name='project-merge', changes='1,1 2,1'),
])
self.executor_server.release('.*-merge')
self.waitUntilSettled()
# A/project-merge is complete
# B/project-merge is complete
self.assertBuilds([
dict(name='project-test1', changes='1,1'),
dict(name='project-test2', changes='1,1'),
dict(name='project-test1', changes='1,1 2,1'),
dict(name='project-test2', changes='1,1 2,1'),
])
# Release project-test1 for A which will fail. This will
# abort both running B jobs and reexecute project-merge for B.
self.builds[0].release()
self.waitUntilSettled()
self.orderedRelease()
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-merge', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test1', result='FAILURE', changes='1,1'),
dict(name='project-test1', result='ABORTED', changes='1,1 2,1'),
dict(name='project-test2', result='ABORTED', changes='1,1 2,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project-merge', result='SUCCESS', changes='2,1'),
dict(name='project-test1', result='SUCCESS', changes='2,1'),
dict(name='project-test2', result='SUCCESS', changes='2,1'),
], ordered=False)
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
# Make sure that after a reset of the buildset the files state is
# updated correctly and the schedulers is not resolving the list
# of changed files via the merger.
self.assertIsNone(
self.scheds.first.sched.merger.merger_api.history.get(
"fileschanges"))
def test_independent_queues(self):
"Test that changes end up in the right queues"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
# There should be one merge job at the head of each queue running
self.assertBuilds([
dict(name='project-merge', changes='1,1'),
dict(name='project-merge', changes='2,1'),
])
# Release the current merge builds
self.builds[0].release()
self.waitUntilSettled()
self.builds[0].release()
self.waitUntilSettled()
# Release the merge job for project2 which is behind project1
self.executor_server.release('.*-merge')
self.waitUntilSettled()
# All the test builds should be running:
self.assertBuilds([
dict(name='project-test1', changes='1,1'),
dict(name='project-test2', changes='1,1'),
dict(name='project-test1', changes='2,1'),
dict(name='project-test2', changes='2,1'),
dict(name='project1-project2-integration', changes='2,1'),
dict(name='project-test1', changes='2,1 3,1'),
dict(name='project-test2', changes='2,1 3,1'),
dict(name='project1-project2-integration', changes='2,1 3,1'),
])
self.orderedRelease()
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-merge', result='SUCCESS', changes='2,1'),
dict(name='project-merge', result='SUCCESS', changes='2,1 3,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='2,1'),
dict(name='project-test2', result='SUCCESS', changes='2,1'),
dict(
name='project1-project2-integration',
result='SUCCESS',
changes='2,1'),
dict(name='project-test1', result='SUCCESS', changes='2,1 3,1'),
dict(name='project-test2', result='SUCCESS', changes='2,1 3,1'),
dict(name='project1-project2-integration',
result='SUCCESS',
changes='2,1 3,1'),
])
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
def test_failed_change_at_head(self):
"Test that if a change at the head fails, jobs behind it are canceled"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.executor_server.failJob('project-test1', A)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertBuilds([
dict(name='project-merge', changes='1,1'),
])
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertBuilds([
dict(name='project-test1', changes='1,1'),
dict(name='project-test2', changes='1,1'),
dict(name='project-test1', changes='1,1 2,1'),
dict(name='project-test2', changes='1,1 2,1'),
dict(name='project-test1', changes='1,1 2,1 3,1'),
dict(name='project-test2', changes='1,1 2,1 3,1'),
])
self.release(self.builds[0])
self.waitUntilSettled()
# project-test2, project-merge for B
self.assertBuilds([
dict(name='project-test2', changes='1,1'),
dict(name='project-merge', changes='2,1'),
])
# Unordered history comparison because the aborts can finish
# in any order.
self.assertHistory([
dict(name='project-merge', result='SUCCESS',
changes='1,1'),
dict(name='project-merge', result='SUCCESS',
changes='1,1 2,1'),
dict(name='project-merge', result='SUCCESS',
changes='1,1 2,1 3,1'),
dict(name='project-test1', result='FAILURE',
changes='1,1'),
dict(name='project-test1', result='ABORTED',
changes='1,1 2,1'),
dict(name='project-test2', result='ABORTED',
changes='1,1 2,1'),
dict(name='project-test1', result='ABORTED',
changes='1,1 2,1 3,1'),
dict(name='project-test2', result='ABORTED',
changes='1,1 2,1 3,1'),
], ordered=False)
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.orderedRelease()
self.assertBuilds([])
self.assertHistory([
dict(name='project-merge', result='SUCCESS',
changes='1,1'),
dict(name='project-merge', result='SUCCESS',
changes='1,1 2,1'),
dict(name='project-merge', result='SUCCESS',
changes='1,1 2,1 3,1'),
dict(name='project-test1', result='FAILURE',
changes='1,1'),
dict(name='project-test1', result='ABORTED',
changes='1,1 2,1'),
dict(name='project-test2', result='ABORTED',
changes='1,1 2,1'),
dict(name='project-test1', result='ABORTED',
changes='1,1 2,1 3,1'),
dict(name='project-test2', result='ABORTED',
changes='1,1 2,1 3,1'),
dict(name='project-merge', result='SUCCESS',
changes='2,1'),
dict(name='project-merge', result='SUCCESS',
changes='2,1 3,1'),
dict(name='project-test2', result='SUCCESS',
changes='1,1'),
dict(name='project-test1', result='SUCCESS',
changes='2,1'),
dict(name='project-test2', result='SUCCESS',
changes='2,1'),
dict(name='project-test1', result='SUCCESS',
changes='2,1 3,1'),
dict(name='project-test2', result='SUCCESS',
changes='2,1 3,1'),
], ordered=False)
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
def test_failed_change_in_middle(self):
"Test a failed change in the middle of the queue"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.executor_server.failJob('project-test1', B)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 6)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertEqual(self.builds[2].name, 'project-test1')
self.assertEqual(self.builds[3].name, 'project-test2')
self.assertEqual(self.builds[4].name, 'project-test1')
self.assertEqual(self.builds[5].name, 'project-test2')
self.release(self.builds[2])
self.waitUntilSettled()
# project-test1 and project-test2 for A
# project-test2 for B
# project-merge for C (without B)
self.assertEqual(len(self.builds), 4)
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 2)
self.executor_server.release('.*-merge')
self.waitUntilSettled()
# project-test1 and project-test2 for A
# project-test2 for B
# project-test1 and project-test2 for C
self.assertEqual(len(self.builds), 5)
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
items = tenant.layout.pipelines['gate'].getAllItems()
builds = items[0].current_build_set.getBuilds()
self.assertEqual(self.countJobResults(builds, 'SUCCESS'), 1)
self.assertEqual(self.countJobResults(builds, None), 2)
builds = items[1].current_build_set.getBuilds()
self.assertEqual(self.countJobResults(builds, 'SUCCESS'), 1)
self.assertEqual(self.countJobResults(builds, 'FAILURE'), 1)
self.assertEqual(self.countJobResults(builds, None), 1)
builds = items[2].current_build_set.getBuilds()
self.assertEqual(self.countJobResults(builds, 'SUCCESS'), 1)
self.assertEqual(self.countJobResults(builds, None), 2)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(len(self.history), 12)
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
def test_failed_change_at_head_with_queue(self):
"Test that if a change at the head fails, queued jobs are canceled"
def get_name(params):
return params.get('job_ref', '').split('/')[-1]
self.hold_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.executor_server.failJob('project-test1', A)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
queue = list(self.executor_api.queued())
self.assertEqual(len(self.builds), 0)
self.assertEqual(len(queue), 1)
self.assertEqual(queue[0].zone, None)
params = self.executor_server.executor_api.getParams(queue[0])
self.assertEqual(get_name(params), 'project-merge')
self.assertEqual(params['items'][0]['number'], '%d' % A.number)
self.executor_api.release('.*-merge')
self.waitUntilSettled()
self.executor_api.release('.*-merge')
self.waitUntilSettled()
self.executor_api.release('.*-merge')
self.waitUntilSettled()
queue = list(self.executor_api.queued())
params = [self.executor_server.executor_api.getParams(item)
for item in queue]
self.assertEqual(len(self.builds), 0)
self.assertEqual(len(queue), 6)
self.assertEqual(get_name(params[0]), 'project-test1')
self.assertEqual(get_name(params[1]), 'project-test2')
self.assertEqual(get_name(params[2]), 'project-test1')
self.assertEqual(get_name(params[3]), 'project-test2')
self.assertEqual(get_name(params[4]), 'project-test1')
self.assertEqual(get_name(params[5]), 'project-test2')
self.executor_api.release(queue[0])
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
queue = list(self.executor_api.queued())
self.assertEqual(len(queue), 2) # project-test2, project-merge for B
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 0)
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(len(self.history), 11)
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
def _test_time_database(self, iteration):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
time.sleep(2)
found_job = None
pipeline = self.scheds.first.sched.abide.tenants[
'tenant-one'].layout.pipelines['gate']
pipeline_status = pipeline.formatStatusJSON(
self.scheds.first.sched.globals.websocket_url)
for queue in pipeline_status['change_queues']:
for head in queue['heads']:
for item in head:
for job in item['jobs']:
if job['name'] == 'project-merge':
found_job = job
break
self.assertIsNotNone(found_job)
if iteration == 1:
self.assertIsNotNone(found_job['estimated_time'])
self.assertIsNone(found_job['remaining_time'])
else:
self.assertIsNotNone(found_job['estimated_time'])
self.assertTrue(found_job['estimated_time'] >= 2)
self.assertIsNotNone(found_job['remaining_time'])
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
def test_time_database(self):
"Test the time database"
self._test_time_database(1)
self._test_time_database(2)
def test_two_failed_changes_at_head(self):
"Test that changes are reparented correctly if 2 fail at head"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.executor_server.failJob('project-test1', A)
self.executor_server.failJob('project-test1', B)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 6)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertEqual(self.builds[2].name, 'project-test1')
self.assertEqual(self.builds[3].name, 'project-test2')
self.assertEqual(self.builds[4].name, 'project-test1')
self.assertEqual(self.builds[5].name, 'project-test2')
self.assertTrue(self.builds[0].hasChanges(A))
self.assertTrue(self.builds[2].hasChanges(A))
self.assertTrue(self.builds[2].hasChanges(B))
self.assertTrue(self.builds[4].hasChanges(A))
self.assertTrue(self.builds[4].hasChanges(B))
self.assertTrue(self.builds[4].hasChanges(C))
# Fail change B first
self.release(self.builds[2])
self.waitUntilSettled()
# restart of C after B failure
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 5)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertEqual(self.builds[2].name, 'project-test2')
self.assertEqual(self.builds[3].name, 'project-test1')
self.assertEqual(self.builds[4].name, 'project-test2')
self.assertTrue(self.builds[1].hasChanges(A))
self.assertTrue(self.builds[2].hasChanges(A))
self.assertTrue(self.builds[2].hasChanges(B))
self.assertTrue(self.builds[4].hasChanges(A))
self.assertFalse(self.builds[4].hasChanges(B))
self.assertTrue(self.builds[4].hasChanges(C))
# Finish running all passing jobs for change A
self.release(self.builds[1])
self.waitUntilSettled()
# Fail and report change A
self.release(self.builds[0])
self.waitUntilSettled()
# restart of B,C after A failure
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 4)
self.assertEqual(self.builds[0].name, 'project-test1') # B
self.assertEqual(self.builds[1].name, 'project-test2') # B
self.assertEqual(self.builds[2].name, 'project-test1') # C
self.assertEqual(self.builds[3].name, 'project-test2') # C
self.assertFalse(self.builds[1].hasChanges(A))
self.assertTrue(self.builds[1].hasChanges(B))
self.assertFalse(self.builds[1].hasChanges(C))
self.assertFalse(self.builds[2].hasChanges(A))
# After A failed and B and C restarted, B should be back in
# C's tests because it has not failed yet.
self.assertTrue(self.builds[2].hasChanges(B))
self.assertTrue(self.builds[2].hasChanges(C))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(len(self.history), 21)
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
def test_patch_order(self):
"Test that dependent patches are tested in the right order"
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
M2 = self.fake_gerrit.addFakeChange('org/project', 'master', 'M2')
M1 = self.fake_gerrit.addFakeChange('org/project', 'master', 'M1')
M2.setMerged()
M1.setMerged()
# C -> B -> A -> M1 -> M2
# M2 is here to make sure it is never queried. If it is, it
# means zuul is walking down the entire history of merged
# changes.
C.setDependsOn(B, 1)
B.setDependsOn(A, 1)
A.setDependsOn(M1, 1)
M1.setDependsOn(M2, 1)
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(C.data['status'], 'NEW')
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(M2.queried, 0)
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
def test_needed_changes_enqueue(self):
"Test that a needed change is enqueued ahead"
# A Given a git tree like this, if we enqueue
# / \ change C, we should walk up and down the tree
# B G and enqueue changes in the order ABCDEFG.
# /|\ This is also the order that you would get if
# *C E F you enqueued changes in the order ABCDEFG, so
# / the ordering is stable across re-enqueue events.
# D
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
D = self.fake_gerrit.addFakeChange('org/project', 'master', 'D')
E = self.fake_gerrit.addFakeChange('org/project', 'master', 'E')
F = self.fake_gerrit.addFakeChange('org/project', 'master', 'F')
G = self.fake_gerrit.addFakeChange('org/project', 'master', 'G')
B.setDependsOn(A, 1)
C.setDependsOn(B, 1)
D.setDependsOn(C, 1)
E.setDependsOn(B, 1)
F.setDependsOn(B, 1)
G.setDependsOn(A, 1)
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
D.addApproval('Code-Review', 2)
E.addApproval('Code-Review', 2)
F.addApproval('Code-Review', 2)
G.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(C.data['status'], 'NEW')
self.assertEqual(D.data['status'], 'NEW')
self.assertEqual(E.data['status'], 'NEW')
self.assertEqual(F.data['status'], 'NEW')
self.assertEqual(G.data['status'], 'NEW')
# We're about to add approvals to changes without adding the
# triggering events to Zuul, so that we can be sure that it is
# enqueing the changes based on dependencies, not because of
# triggering events. Since it will have the changes cached
# already (without approvals), we need to clear the cache
# first.
for connection in self.scheds.first.connections.connections.values():
if hasattr(connection, '_change_cache'):
connection.maintainCache([], max_age=0)
self.executor_server.hold_jobs_in_build = True
A.addApproval('Approved', 1)
B.addApproval('Approved', 1)
D.addApproval('Approved', 1)
E.addApproval('Approved', 1)
F.addApproval('Approved', 1)
G.addApproval('Approved', 1)
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
for x in range(8):
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(D.data['status'], 'MERGED')
self.assertEqual(E.data['status'], 'MERGED')
self.assertEqual(F.data['status'], 'MERGED')
self.assertEqual(G.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
self.assertEqual(D.reported, 2)
self.assertEqual(E.reported, 2)
self.assertEqual(F.reported, 2)
self.assertEqual(G.reported, 2)
self.assertEqual(self.history[6].changes,
'1,1 2,1 3,1 4,1 5,1 6,1 7,1')
def test_source_cache(self):
"Test that the source cache operates correctly"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
X = self.fake_gerrit.addFakeChange('org/project', 'master', 'X')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
M1 = self.fake_gerrit.addFakeChange('org/project', 'master', 'M1')
M1.setMerged()
B.setDependsOn(A, 1)
A.setDependsOn(M1, 1)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(X.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
for build in self.builds:
if build.pipeline == 'check':
build.release()
self.waitUntilSettled()
for build in self.builds:
if build.pipeline == 'check':
build.release()
self.waitUntilSettled()
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
cached_changes = list(self.fake_gerrit._change_cache)
self.log.debug("len %s", [c.cache_key for c in cached_changes])
# there should still be changes in the cache
self.assertNotEqual(len(cached_changes), 0)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(A.queried, 2) # Initial and isMerged
self.assertEqual(B.queried, 3) # Initial A, refresh from B, isMerged
def test_connection_cache_cleanup(self):
"Test that cached changes are correctly cleaned up"
sched = self.scheds.first.sched
def _getCachedChanges():
cached = set()
for source in sched.connections.getSources():
cached.update(source.getCachedChanges())
return cached
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.setDependsOn(A, 1)
self.hold_jobs_in_queue = True
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
C.setMerged()
self.fake_gerrit.addEvent(C.getRefUpdatedEvent())
self.waitUntilSettled()
self.assertEqual(len(_getCachedChanges()), 3)
sched.maintainConnectionCache()
self.assertEqual(len(_getCachedChanges()), 3)
# Test this method separately to make sure we are getting
# cache keys of the correct type, since we don't do run-time
# validation.
relevant = sched._gatherConnectionCacheKeys()
self.assertEqual(len(relevant), 2)
for k in relevant:
if not isinstance(k, ChangeKey):
raise RuntimeError("Cache key %s is not a ChangeKey" % repr(k))
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.assertEqual(len(_getCachedChanges()), 3)
sched.maintainConnectionCache()
self.assertEqual(len(_getCachedChanges()), 3)
# Test that outdated but still relevant changes are not cleaned up
for connection in sched.connections.connections.values():
connection.maintainCache(
set([c.cache_stat.key for c in _getCachedChanges()]),
max_age=0)
self.assertEqual(len(_getCachedChanges()), 3)
change1 = None
change2 = None
for c in _getCachedChanges():
if c.cache_stat.key.stable_id == '1':
change1 = c
if c.cache_stat.key.stable_id == '2':
change2 = c
# Make change1 eligible for cleanup, but not change2
change1.cache_stat = zuul.model.CacheStat(change1.cache_stat.key,
change1.cache_stat.uuid,
change1.cache_stat.version,
change1.cache_stat.mzxid,
0.0, 0, 0)
# We should not delete change1 since it's needed by change2
# which we want to keep.
for connection in sched.connections.connections.values():
connection.maintainCache([], max_age=7200)
self.assertEqual(len(_getCachedChanges()), 3)
# Make both changes eligible for deletion
change2.cache_stat = zuul.model.CacheStat(change2.cache_stat.key,
change2.cache_stat.uuid,
change2.cache_stat.version,
change1.cache_stat.mzxid,
0.0, 0, 0)
for connection in sched.connections.connections.values():
connection.maintainCache([], max_age=7200)
# The master branch change remains
self.assertEqual(len(_getCachedChanges()), 1)
# Test that we can remove changes once max_age has expired
for connection in sched.connections.connections.values():
connection.maintainCache([], max_age=0)
self.assertEqual(len(_getCachedChanges()), 0)
def test_can_merge(self):
"Test whether a change is ready to merge"
# TODO: move to test_gerrit (this is a unit test!)
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
(trusted, project) = tenant.getProject('org/project')
source = project.source
# TODO(pabelanger): As we add more source / trigger APIs we should make
# it easier for users to create events for testing.
event = zuul.model.TriggerEvent()
event.trigger_name = 'gerrit'
event.change_number = '1'
event.patch_number = '2'
a = source.getChange(source.getChangeKey(event), event=event)
mgr = tenant.layout.pipelines['gate'].manager
self.assertFalse(source.canMerge(a, mgr.getSubmitAllowNeeds()))
A.addApproval('Code-Review', 2)
a = source.getChange(source.getChangeKey(event),
refresh=True, event=event)
self.assertFalse(source.canMerge(a, mgr.getSubmitAllowNeeds()))
A.addApproval('Approved', 1)
a = source.getChange(source.getChangeKey(event),
refresh=True, event=event)
self.assertTrue(source.canMerge(a, mgr.getSubmitAllowNeeds()))
A.setWorkInProgress(True)
a = source.getChange(source.getChangeKey(event),
refresh=True, event=event)
self.assertFalse(source.canMerge(a, mgr.getSubmitAllowNeeds()))
def test_project_merge_conflict(self):
"Test that gate merge conflicts are handled properly"
self.hold_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('org/project',
'master', 'A',
files={'conflict': 'foo'})
B = self.fake_gerrit.addFakeChange('org/project',
'master', 'B',
files={'conflict': 'bar'})
C = self.fake_gerrit.addFakeChange('org/project',
'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertEqual(C.reported, 1)
self.executor_api.release('project-merge')
self.waitUntilSettled()
self.executor_api.release('project-merge')
self.waitUntilSettled()
self.executor_api.release('project-merge')
self.waitUntilSettled()
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertIn('Merge Failed', B.messages[-1])
self.assertEqual(C.reported, 2)
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project-merge', result='SUCCESS', changes='1,1 3,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1 3,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1 3,1'),
], ordered=False)
def test_delayed_merge_conflict(self):
"Test that delayed check merge conflicts are handled properly"
# Hold jobs in the ZooKeeper queue so that we can test whether
# the executor sucesfully merges a change based on an old
# repo state (frozen by the scheduler) which would otherwise
# conflict.
self.hold_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('org/project',
'master', 'A',
files={'conflict': 'foo'})
B = self.fake_gerrit.addFakeChange('org/project',
'master', 'B',
files={'conflict': 'bar'})
C = self.fake_gerrit.addFakeChange('org/project',
'master', 'C')
C.setDependsOn(B, 1)
# A enters the gate queue; B and C enter the check queue
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 0) # Check does not report start
self.assertEqual(C.reported, 0) # Check does not report start
# A merges while B and C are queued in check
# Release A project-merge
queue = list(self.executor_api.queued())
self.executor_api.release(queue[0])
self.waitUntilSettled()
# Release A project-test*
# gate has higher precedence, so A's test jobs are added in
# front of the merge jobs for B and C
queue = list(self.executor_api.queued())
self.executor_api.release(queue[0])
self.executor_api.release(queue[1])
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(C.data['status'], 'NEW')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 0)
self.assertEqual(C.reported, 0)
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
], ordered=False)
# B and C report merge conflicts
# Release B project-merge
queue = list(self.executor_api.queued())
self.executor_api.release(queue[0])
self.waitUntilSettled()
# Release C
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(C.data['status'], 'NEW')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 1)
self.assertEqual(C.reported, 1)
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project-merge', result='SUCCESS', changes='2,1'),
dict(name='project-test1', result='SUCCESS', changes='2,1'),
dict(name='project-test2', result='SUCCESS', changes='2,1'),
dict(name='project-merge', result='SUCCESS', changes='2,1 3,1'),
dict(name='project-test1', result='SUCCESS', changes='2,1 3,1'),
dict(name='project-test2', result='SUCCESS', changes='2,1 3,1'),
], ordered=False)
def test_post(self):
"Test that post jobs run"
p = "review.example.com/org/project"
upstream = self.getUpstreamRepos([p])
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.setMerged()
A_commit = str(upstream[p].commit('master'))
self.log.debug("A commit: %s" % A_commit)
e = {
"type": "ref-updated",
"submitter": {
"name": "User Name",
},
"refUpdate": {
"oldRev": "90f173846e3af9154517b88543ffbd1691f31366",
"newRev": A_commit,
"refName": "master",
"project": "org/project",
}
}
self.fake_gerrit.addEvent(e)
self.waitUntilSettled()
job_names = [x.name for x in self.history]
self.assertEqual(len(self.history), 1)
self.assertIn('project-post', job_names)
def test_post_ignore_deletes(self):
"Test that deleting refs does not trigger post jobs"
e = {
"type": "ref-updated",
"submitter": {
"name": "User Name",
},
"refUpdate": {
"oldRev": "90f173846e3af9154517b88543ffbd1691f31366",
"newRev": "0000000000000000000000000000000000000000",
"refName": "master",
"project": "org/project",
}
}
self.fake_gerrit.addEvent(e)
self.waitUntilSettled()
job_names = [x.name for x in self.history]
self.assertEqual(len(self.history), 0)
self.assertNotIn('project-post', job_names)
@simple_layout('layouts/dont-ignore-ref-deletes.yaml')
def test_post_ignore_deletes_negative(self):
"Test that deleting refs does trigger post jobs"
e = {
"type": "ref-updated",
"submitter": {
"name": "User Name",
},
"refUpdate": {
"oldRev": "90f173846e3af9154517b88543ffbd1691f31366",
"newRev": "0000000000000000000000000000000000000000",
"refName": "testbranch",
"project": "org/project",
}
}
self.fake_gerrit.addEvent(e)
self.waitUntilSettled()
job_names = [x.name for x in self.history]
self.assertEqual(len(self.history), 1)
self.assertIn('project-post', job_names)
@skip("Disabled for early v3 development")
def test_build_configuration_branch_interaction(self):
"Test that switching between branches works"
self.test_build_configuration()
self.test_build_configuration_branch()
# C has been merged, undo that
path = os.path.join(self.upstream_root, "org/project")
repo = git.Repo(path)
repo.heads.master.commit = repo.commit('init')
self.test_build_configuration()
def test_dependent_changes_rebase(self):
# Test that no errors occur when we walk a dependency tree
# with an unused leaf node due to a rebase.
# Start by constructing: C -> B -> A
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.setDependsOn(A, 1)
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
C.setDependsOn(B, 1)
# Then rebase to form: D -> C -> A
C.addPatchset() # C,2
C.setDependsOn(A, 1)
D = self.fake_gerrit.addFakeChange('org/project', 'master', 'D')
D.setDependsOn(C, 2)
# Walk the entire tree
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.history), 3)
# Verify that walking just part of the tree still works
self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.history), 6)
def test_dependent_changes_dequeue(self):
"Test that dependent patches are not needlessly tested"
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
M1 = self.fake_gerrit.addFakeChange('org/project', 'master', 'M1')
M1.setMerged()
# C -> B -> A -> M1
C.setDependsOn(B, 1)
B.setDependsOn(A, 1)
A.setDependsOn(M1, 1)
self.executor_server.failJob('project-merge', A)
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 2)
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(B.reported, 2)
self.assertEqual(C.data['status'], 'NEW')
self.assertIn('This change depends on a change that failed to merge.',
C.messages[-1])
self.assertEqual(len(self.history), 1)
def test_failing_dependent_changes(self):
"Test that failing dependent patches are taken out of stream"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
D = self.fake_gerrit.addFakeChange('org/project', 'master', 'D')
E = self.fake_gerrit.addFakeChange('org/project', 'master', 'E')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
D.addApproval('Code-Review', 2)
E.addApproval('Code-Review', 2)
# E, D -> C -> B, A
D.setDependsOn(C, 1)
C.setDependsOn(B, 1)
self.executor_server.failJob('project-test1', B)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(D.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(E.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
for build in self.builds:
if build.parameters['zuul']['change'] != '1':
build.release()
self.waitUntilSettled()
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertIn('Build succeeded', A.messages[1])
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(B.reported, 2)
self.assertIn('Build failed', B.messages[1])
self.assertEqual(C.data['status'], 'NEW')
self.assertEqual(C.reported, 2)
self.assertIn('depends on a change', C.messages[1])
self.assertEqual(D.data['status'], 'NEW')
self.assertEqual(D.reported, 2)
self.assertIn('depends on a change', D.messages[1])
self.assertEqual(E.data['status'], 'MERGED')
self.assertEqual(E.reported, 2)
self.assertIn('Build succeeded', E.messages[1])
self.assertEqual(len(self.history), 18)
def test_head_is_dequeued_once(self):
"Test that if a change at the head fails it is dequeued only once"
# If it's dequeued more than once, we should see extra
# aborted jobs.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.executor_server.failJob('project-test1', A)
self.executor_server.failJob('project-test2', A)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.assertEqual(self.builds[0].name, 'project-merge')
self.assertTrue(self.builds[0].hasChanges(A))
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 6)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertEqual(self.builds[2].name, 'project-test1')
self.assertEqual(self.builds[3].name, 'project-test2')
self.assertEqual(self.builds[4].name, 'project-test1')
self.assertEqual(self.builds[5].name, 'project-test2')
self.release(self.builds[0])
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2) # test2, merge for B
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 4)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(len(self.history), 15)
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
def test_approval_removal(self):
# Test that we dequeue a change when it can not merge
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(1, len(self.builds))
self.assertEqual(0, len(self.history))
# Remove the approval
self.fake_gerrit.addEvent(A.addApproval('Approved', 0))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# The change should be dequeued.
self.assertHistory([
dict(name='project-merge', result='ABORTED'),
], ordered=False)
self.assertEqual(2, len(A.messages))
self.assertEqual(A.data['status'], 'NEW')
self.assertIn('This change is unable to merge '
'due to a missing merge requirement.',
A.messages[1])
@simple_layout('layouts/nonvoting-job-approval.yaml')
def test_nonvoting_job_approval(self):
"Test that non-voting jobs don't vote but leave approval"
A = self.fake_gerrit.addFakeChange('org/nonvoting-project',
'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.executor_server.failJob('nonvoting-project-test2', A)
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(
self.getJobFromHistory('nonvoting-project-test1').result,
'SUCCESS')
self.assertEqual(
self.getJobFromHistory('nonvoting-project-test2').result,
'FAILURE')
self.assertFalse(self.getJobFromHistory('nonvoting-project-test1').
parameters['zuul']['voting'])
self.assertFalse(self.getJobFromHistory('nonvoting-project-test2').
parameters['zuul']['voting'])
self.assertEqual(A.patchsets[0]['approvals'][0]['value'], "1")
@simple_layout('layouts/nonvoting-job.yaml')
def test_nonvoting_job(self):
"Test that non-voting jobs don't vote."
A = self.fake_gerrit.addFakeChange('org/nonvoting-project',
'master', 'A')
A.addApproval('Code-Review', 2)
self.executor_server.failJob('nonvoting-project-test2', A)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(
self.getJobFromHistory('nonvoting-project-merge').result,
'SUCCESS')
self.assertEqual(
self.getJobFromHistory('nonvoting-project-test1').result,
'SUCCESS')
self.assertEqual(
self.getJobFromHistory('nonvoting-project-test2').result,
'FAILURE')
self.assertTrue(self.getJobFromHistory('nonvoting-project-merge').
parameters['zuul']['voting'])
self.assertTrue(self.getJobFromHistory('nonvoting-project-test1').
parameters['zuul']['voting'])
self.assertFalse(self.getJobFromHistory('nonvoting-project-test2').
parameters['zuul']['voting'])
def test_check_queue_success(self):
"Test successful check queue jobs."
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(self.getJobFromHistory('project-merge').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
def test_check_queue_failure(self):
"Test failed check queue jobs."
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.executor_server.failJob('project-test2', A)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(self.getJobFromHistory('project-merge').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'FAILURE')
@simple_layout('layouts/autohold.yaml')
def test_autohold(self):
self.scheds.first.sched.autohold(
'tenant-one', 'review.example.com/org/project', 'project-test2',
".*", "reason text", 1, None)
# There should be a record in ZooKeeper
request_list = self.sched_zk_nodepool.getHoldRequests()
self.assertEqual(1, len(request_list))
request = self.sched_zk_nodepool.getHoldRequest(
request_list[0])
self.assertIsNotNone(request)
self.assertEqual('tenant-one', request.tenant)
self.assertEqual('review.example.com/org/project', request.project)
self.assertEqual('project-test2', request.job)
self.assertEqual('reason text', request.reason)
self.assertEqual(1, request.max_count)
self.assertEqual(0, request.current_count)
self.assertEqual([], request.nodes)
# Some convenience variables for checking the stats.
tenant_ram_stat =\
'zuul.nodepool.resources.in_use.tenant.tenant-one.ram'
project_ram_stat = ('zuul.nodepool.resources.in_use.project.'
'review_example_com/org/project.ram')
# Test that we zeroed the gauges
self.scheds.first.sched._runStats()
self.assertUnReportedStat(tenant_ram_stat, value='1024', kind='g')
self.assertUnReportedStat(project_ram_stat, value='1024', kind='g')
self.assertReportedStat(tenant_ram_stat, value='0', kind='g')
self.assertReportedStat(project_ram_stat, value='0', kind='g')
# First check that successful jobs do not autohold
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
# project-test2
self.assertEqual(self.history[0].result, 'SUCCESS')
# Check nodepool for a held node
held_node = None
for node in self.fake_nodepool.getNodes():
if node['state'] == zuul.model.STATE_HOLD:
held_node = node
break
self.assertIsNone(held_node)
# Hold in build to check the stats
self.executor_server.hold_jobs_in_build = True
# Now test that failed jobs are autoheld
# Set resources only for this node so we can examine the code
# path for updating the stats on autohold.
self.fake_nodepool.resources = {
'cores': 2,
'ram': 1024,
'instances': 1,
}
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
self.executor_server.failJob('project-test2', B)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Get the build request object
build = list(self.getCurrentBuilds())[0]
# We should report using the held node's resources
self.waitUntilNodeCacheSync(
self.scheds.first.sched.nodepool.zk_nodepool)
self.statsd.clear()
self.scheds.first.sched._runStats()
self.assertReportedStat(tenant_ram_stat, value='1024', kind='g')
self.assertReportedStat(project_ram_stat, value='1024', kind='g')
self.assertUnReportedStat(tenant_ram_stat, value='0', kind='g')
self.assertUnReportedStat(project_ram_stat, value='0', kind='g')
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(B.reported, 1)
# project-test2
self.assertEqual(self.history[1].result, 'FAILURE')
self.assertTrue(build.held)
# Check nodepool for a held node
held_node = None
for node in self.fake_nodepool.getNodes():
if node['state'] == zuul.model.STATE_HOLD:
held_node = node
break
self.assertIsNotNone(held_node)
# Validate node has recorded the failed job
self.assertEqual(
held_node['hold_job'],
" ".join(['tenant-one',
'review.example.com/org/project',
'project-test2', '.*'])
)
self.assertEqual(held_node['comment'], "reason text")
# The hold request current_count should have incremented
# and we should have recorded the held node ID.
request2 = self.sched_zk_nodepool.getHoldRequest(
request.id)
self.assertEqual(request.current_count + 1, request2.current_count)
self.assertEqual(1, len(request2.nodes))
self.assertEqual(1, len(request2.nodes[0]["nodes"]))
# We should still report we use the resources
self.waitUntilNodeCacheSync(
self.scheds.first.sched.nodepool.zk_nodepool)
self.statsd.clear()
self.scheds.first.sched._runStats()
self.assertReportedStat(tenant_ram_stat, value='1024', kind='g')
self.assertReportedStat(project_ram_stat, value='1024', kind='g')
self.assertUnReportedStat(tenant_ram_stat, value='0', kind='g')
self.assertUnReportedStat(project_ram_stat, value='0', kind='g')
# Another failed change should not hold any more nodes
self.fake_nodepool.resources = {}
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
self.executor_server.failJob('project-test2', C)
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(C.data['status'], 'NEW')
self.assertEqual(C.reported, 1)
# project-test2
self.assertEqual(self.history[2].result, 'FAILURE')
held_nodes = 0
for node in self.fake_nodepool.getNodes():
if node['state'] == zuul.model.STATE_HOLD:
held_nodes += 1
self.assertEqual(held_nodes, 1)
# request current_count should not have changed
request3 = self.sched_zk_nodepool.getHoldRequest(
request2.id)
self.assertEqual(request2.current_count, request3.current_count)
# Deleting hold request should set held nodes to used
self.sched_zk_nodepool.deleteHoldRequest(request3)
node_states = [n['state'] for n in self.fake_nodepool.getNodes()]
self.assertEqual(3, len(node_states))
self.assertEqual([zuul.model.STATE_USED] * 3, node_states)
# Nodepool deletes the nodes
for n in self.fake_nodepool.getNodes():
self.fake_nodepool.removeNode(n)
# We should now report that we no longer use the nodes resources
self.waitUntilNodeCacheSync(
self.scheds.first.sched.nodepool.zk_nodepool)
self.statsd.clear()
self.scheds.first.sched._runStats()
self.assertUnReportedStat(tenant_ram_stat, value='1024', kind='g')
self.assertUnReportedStat(project_ram_stat, value='1024', kind='g')
self.assertReportedStat(tenant_ram_stat, value='0', kind='g')
self.assertReportedStat(project_ram_stat, value='0', kind='g')
@simple_layout('layouts/autohold.yaml')
def test_autohold_info(self):
self.scheds.first.sched.autohold(
'tenant-one', 'review.example.com/org/project', 'project-test2',
".*", "reason text", 1, None)
# There should be a record in ZooKeeper
request_list = self.sched_zk_nodepool.getHoldRequests()
self.assertEqual(1, len(request_list))
request = self.sched_zk_nodepool.getHoldRequest(
request_list[0])
self.assertIsNotNone(request)
request = self.scheds.first.sched.autohold_info(request.id)
self.assertNotEqual({}, request)
self.assertEqual('tenant-one', request['tenant'])
self.assertEqual('review.example.com/org/project', request['project'])
self.assertEqual('project-test2', request['job'])
self.assertEqual('reason text', request['reason'])
self.assertEqual(1, request['max_count'])
self.assertEqual(0, request['current_count'])
@simple_layout('layouts/autohold.yaml')
def test_autohold_delete(self):
self.scheds.first.sched.autohold(
'tenant-one', 'review.example.com/org/project', 'project-test2',
".*", "reason text", 1, None)
# There should be a record in ZooKeeper
request_list = self.sched_zk_nodepool.getHoldRequests()
self.assertEqual(1, len(request_list))
request = self.sched_zk_nodepool.getHoldRequest(
request_list[0])
self.assertIsNotNone(request)
# Delete and verify no more requests
self.scheds.first.sched.autohold_delete(request.id)
request_list = self.sched_zk_nodepool.getHoldRequests()
self.assertEqual([], request_list)
def test_autohold_padding(self):
self.scheds.first.sched.autohold(
'tenant-one', 'review.example.com/org/project', 'project-test2',
".*", "reason text", 1, None)
# There should be a record in ZooKeeper
request_list = self.sched_zk_nodepool.getHoldRequests()
self.assertEqual(1, len(request_list))
request = self.sched_zk_nodepool.getHoldRequest(
request_list[0])
self.assertIsNotNone(request)
# Assert the ID leads with a bunch of zeros, them strip them
# off to test autohold_delete can handle a user passing in an
# ID without leading zeros.
self.assertEqual(request.id[0:5], '00000')
trimmed_request = request.id[5:]
# Delete and verify no more requests
self.scheds.first.sched.autohold_delete(trimmed_request)
request_list = self.sched_zk_nodepool.getHoldRequests()
self.assertEqual([], request_list)
def _test_autohold_scoped(self, change_obj, change, ref):
# create two changes on the same project, and autohold request
# for one of them.
other = self.fake_gerrit.addFakeChange(
'org/project', 'master', 'other'
)
if change != "":
ref = "refs/changes/%s/%s/.*" % (
str(change_obj.number).zfill(2)[-2:], str(change_obj.number)
)
self.scheds.first.sched.autohold(
'tenant-one', 'review.example.com/org/project', 'project-test2',
ref, "reason text", 1, None)
# First, check that an unrelated job does not trigger autohold, even
# when it failed
self.executor_server.failJob('project-test2', other)
self.fake_gerrit.addEvent(other.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(other.data['status'], 'NEW')
self.assertEqual(other.reported, 1)
# project-test2
self.assertEqual(self.history[0].result, 'FAILURE')
# Check nodepool for a held node
held_node = None
for node in self.fake_nodepool.getNodes():
if node['state'] == zuul.model.STATE_HOLD:
held_node = node
break
self.assertIsNone(held_node)
# And then verify that failed job for the defined change
# triggers the autohold
self.executor_server.failJob('project-test2', change_obj)
self.fake_gerrit.addEvent(change_obj.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(change_obj.data['status'], 'NEW')
self.assertEqual(change_obj.reported, 1)
# project-test2
self.assertEqual(self.history[1].result, 'FAILURE')
# Check nodepool for a held node
held_node = None
for node in self.fake_nodepool.getNodes():
if node['state'] == zuul.model.STATE_HOLD:
held_node = node
break
self.assertIsNotNone(held_node)
# Validate node has recorded the failed job
self.assertEqual(
held_node['hold_job'],
" ".join(['tenant-one',
'review.example.com/org/project',
'project-test2', ref])
)
self.assertEqual(held_node['comment'], "reason text")
@simple_layout('layouts/autohold.yaml')
def test_autohold_change(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self._test_autohold_scoped(A, change=A.number, ref="")
@simple_layout('layouts/autohold.yaml')
def test_autohold_ref(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
ref = A.data['currentPatchSet']['ref']
self._test_autohold_scoped(A, change="", ref=ref)
@simple_layout('layouts/autohold.yaml')
def test_autohold_scoping(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
# create three autohold requests, scoped to job, change and
# a specific ref
change = str(A.number)
change_ref = "refs/changes/%s/%s/.*" % (
str(change).zfill(2)[-2:], str(change)
)
ref = A.data['currentPatchSet']['ref']
self.scheds.first.sched.autohold(
'tenant-one', 'review.example.com/org/project', 'project-test2',
".*", "reason text", 1, None)
self.scheds.first.sched.autohold(
'tenant-one', 'review.example.com/org/project', 'project-test2',
change_ref, "reason text", 1, None)
self.scheds.first.sched.autohold(
'tenant-one', 'review.example.com/org/project', 'project-test2',
ref, "reason text", 1, None)
# Fail 3 jobs for the same change, and verify that the autohold
# requests are fullfilled in the expected order: from the most
# specific towards the most generic one.
def _fail_job_and_verify_autohold_request(change_obj, ref_filter):
self.executor_server.failJob('project-test2', change_obj)
self.fake_gerrit.addEvent(change_obj.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Check nodepool for a held node
held_node = None
for node in self.fake_nodepool.getNodes():
if node['state'] == zuul.model.STATE_HOLD:
held_node = node
break
self.assertIsNotNone(held_node)
self.assertEqual(
held_node['hold_job'],
" ".join(['tenant-one',
'review.example.com/org/project',
'project-test2', ref_filter])
)
self.assertFalse(held_node['_lock'], "Node %s is locked" %
(node['_oid'],))
self.fake_nodepool.removeNode(held_node)
_fail_job_and_verify_autohold_request(A, ref)
ref = "refs/changes/%s/%s/.*" % (str(change).zfill(2)[-2:],
str(change))
_fail_job_and_verify_autohold_request(A, ref)
_fail_job_and_verify_autohold_request(A, ".*")
@simple_layout('layouts/autohold.yaml')
def test_autohold_ignores_aborted_jobs(self):
self.scheds.first.sched.autohold(
'tenant-one', 'review.example.com/org/project', 'project-test2',
".*", "reason text", 1, None)
self.executor_server.hold_jobs_in_build = True
# Create a change that will have its job aborted
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Creating new patchset on change A will abort A,1's job because
# a new patchset arrived replacing A,1 with A,2.
A.addPatchset()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
# Note only the successful job for A,2 will report as we don't
# report aborted builds for old patchsets.
self.assertEqual(A.reported, 1)
# A,1 project-test2
self.assertEqual(self.history[0].result, 'ABORTED')
# A,2 project-test2
self.assertEqual(self.history[1].result, 'SUCCESS')
# Check nodepool for a held node
held_node = None
for node in self.fake_nodepool.getNodes():
if node['state'] == zuul.model.STATE_HOLD:
held_node = node
break
self.assertIsNone(held_node)
@simple_layout('layouts/autohold.yaml')
def test_autohold_hold_expiration(self):
self.scheds.first.sched.autohold(
'tenant-one', 'review.example.com/org/project', 'project-test2',
".*", "reason text", 1, 30)
# Hold a failed job
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
self.executor_server.failJob('project-test2', B)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(B.reported, 1)
# project-test2
self.assertEqual(self.history[0].result, 'FAILURE')
# Check nodepool for a held node
held_node = None
for node in self.fake_nodepool.getNodes():
if node['state'] == zuul.model.STATE_HOLD:
held_node = node
break
self.assertIsNotNone(held_node)
# Validate node has hold_expiration property
self.assertEqual(int(held_node['hold_expiration']), 30)
@simple_layout('layouts/autohold.yaml')
def test_autohold_list(self):
self.scheds.first.sched.autohold(
'tenant-one', 'review.example.com/org/project', 'project-test2',
".*", "reason text", 1, None)
autohold_requests = self.scheds.first.sched.autohold_list()
self.assertNotEqual([], autohold_requests)
self.assertEqual(1, len(autohold_requests))
request = autohold_requests[0]
self.assertEqual('tenant-one', request['tenant'])
self.assertIn('org/project', request['project'])
self.assertEqual('project-test2', request['job'])
self.assertEqual(".*", request['ref_filter'])
self.assertEqual("reason text", request['reason'])
@simple_layout('layouts/autohold.yaml')
def test_autohold_request_expiration(self):
orig_exp = RecordingExecutorServer.EXPIRED_HOLD_REQUEST_TTL
def reset_exp():
self.executor_server.EXPIRED_HOLD_REQUEST_TTL = orig_exp
self.addCleanup(reset_exp)
# Temporarily shorten the hold request expiration time
self.scheds.first.sched.autohold(
'tenant-one', 'review.example.com/org/project', 'project-test2',
".*", "reason text", 1, 1)
autohold_requests = self.scheds.first.sched.autohold_list()
self.assertEqual(1, len(autohold_requests))
req = autohold_requests[0]
self.assertIsNone(req['expired'])
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.executor_server.failJob('project-test2', A)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
autohold_requests = self.scheds.first.sched.autohold_list()
self.assertEqual(1, len(autohold_requests))
req = autohold_requests[0]
self.assertIsNotNone(req['expired'])
# Temporarily shorten hold time so that the hold request can be
# auto-deleted (which is done on another test failure). And wait
# long enough for nodes to expire and request to delete.
self.executor_server.EXPIRED_HOLD_REQUEST_TTL = 1
time.sleep(3)
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
self.executor_server.failJob('project-test2', B)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
for _ in iterate_timeout(10, 'hold request expiration'):
if len(self.scheds.first.sched.autohold_list()) == 0:
break
@simple_layout('layouts/three-projects.yaml')
def test_dependent_behind_dequeue(self):
# This particular test does a large amount of merges and needs a little
# more time to complete
self.wait_timeout = 120
"test that dependent changes behind dequeued changes work"
# This complicated test is a reproduction of a real life bug
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
D = self.fake_gerrit.addFakeChange('org/project2', 'master', 'D')
E = self.fake_gerrit.addFakeChange('org/project2', 'master', 'E')
F = self.fake_gerrit.addFakeChange('org/project3', 'master', 'F')
D.setDependsOn(C, 1)
E.setDependsOn(D, 1)
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
D.addApproval('Code-Review', 2)
E.addApproval('Code-Review', 2)
F.addApproval('Code-Review', 2)
A.fail_merge = True
# Change object re-use in the gerrit trigger is hidden if
# changes are added in quick succession; waiting makes it more
# like real life.
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(D.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(E.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(F.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
# all jobs running
# Grab pointers to the jobs we want to release before
# releasing any, because list indexes may change as
# the jobs complete.
a, b, c = self.builds[:3]
a.release()
b.release()
c.release()
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(D.data['status'], 'MERGED')
self.assertEqual(E.data['status'], 'MERGED')
self.assertEqual(F.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
self.assertEqual(D.reported, 2)
self.assertEqual(E.reported, 2)
self.assertEqual(F.reported, 2)
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 15)
self.assertEqual(len(self.history), 44)
def test_merger_repack(self):
"Test that the merger works after a repack"
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-merge').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEmptyQueues()
self.build_history = []
path = os.path.join(self.merger_src_root, "review.example.com",
"org/project")
if os.path.exists(path):
repack_repo(path)
path = os.path.join(self.executor_src_root, "review.example.com",
"org/project")
if os.path.exists(path):
repack_repo(path)
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-merge').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
def test_merger_repack_large_change(self):
"Test that the merger works with large changes after a repack"
# https://bugs.executepad.net/zuul/+bug/1078946
# This test assumes the repo is already cloned; make sure it is
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
trusted, project = tenant.getProject('org/project')
url = self.fake_gerrit.getGitUrl(project)
self.executor_server.merger._addProject(
'review.example.com', 'gerrit', 'org/project', url, None, None,
None)
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addPatchset(large=True)
# TODOv3(jeblair): add hostname to upstream root
path = os.path.join(self.upstream_root, 'org/project')
repack_repo(path)
path = os.path.join(self.merger_src_root, 'review.example.com',
'org/project')
if os.path.exists(path):
repack_repo(path)
path = os.path.join(self.executor_src_root, 'review.example.com',
'org/project')
if os.path.exists(path):
repack_repo(path)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-merge').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
def test_new_patchset_dequeues_old(self):
"Test that a new patchset causes the old to be dequeued"
# D -> C (depends on B) -> B (depends on A) -> A -> M
self.executor_server.hold_jobs_in_build = True
M = self.fake_gerrit.addFakeChange('org/project', 'master', 'M')
M.setMerged()
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
D = self.fake_gerrit.addFakeChange('org/project', 'master', 'D')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
D.addApproval('Code-Review', 2)
C.setDependsOn(B, 1)
B.setDependsOn(A, 1)
A.setDependsOn(M, 1)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.fake_gerrit.addEvent(D.addApproval('Approved', 1))
self.waitUntilSettled()
B.addPatchset()
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(B.reported, 2)
self.assertEqual(C.data['status'], 'NEW')
self.assertEqual(C.reported, 2)
self.assertEqual(D.data['status'], 'MERGED')
self.assertEqual(D.reported, 2)
self.assertEqual(len(self.history), 9) # 3 each for A, B, D.
@simple_layout('layouts/no-dequeue-on-new-patchset.yaml')
def test_no_dequeue_on_new_patchset(self):
"Test the dequeue-on-new-patchset false value"
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.executor_server.hold_jobs_in_build = True
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
A.addPatchset()
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='project1-test', result='SUCCESS', changes='1,1'),
dict(name='project1-test', result='SUCCESS', changes='1,2'),
], ordered=False)
@simple_layout('layouts/no-dequeue-on-new-patchset.yaml')
def test_no_dequeue_on_new_patchset_deps(self):
"Test dependencies are updated if dequeue-on-new-patchset is false"
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['url'])
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.executor_server.hold_jobs_in_build = True
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
A.addPatchset()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# The item should be dequeued because of the dependency change
self.assertHistory([
dict(name='project1-test', result='ABORTED', changes='1,1 2,1'),
], ordered=False)
def test_new_patchset_check(self):
"Test a new patchset in check"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
# Add two git-dependent changes
B.setDependsOn(A, 1)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# A live item, and a non-live/live pair
items = check_pipeline.getAllItems()
self.assertEqual(len(items), 3)
self.assertEqual(items[0].change.number, '1')
self.assertEqual(items[0].change.patchset, '1')
self.assertFalse(items[0].live)
self.assertEqual(items[1].change.number, '2')
self.assertEqual(items[1].change.patchset, '1')
self.assertTrue(items[1].live)
self.assertEqual(items[2].change.number, '1')
self.assertEqual(items[2].change.patchset, '1')
self.assertTrue(items[2].live)
# Add a new patchset to A
A.addPatchset()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
# The live copy of A,1 should be gone, but the non-live and B
# should continue, and we should have a new A,2
items = check_pipeline.getAllItems()
self.assertEqual(len(items), 3)
self.assertEqual(items[0].change.number, '1')
self.assertEqual(items[0].change.patchset, '1')
self.assertFalse(items[0].live)
self.assertEqual(items[1].change.number, '2')
self.assertEqual(items[1].change.patchset, '1')
self.assertTrue(items[1].live)
self.assertEqual(items[2].change.number, '1')
self.assertEqual(items[2].change.patchset, '2')
self.assertTrue(items[2].live)
# Add a new patchset to B
B.addPatchset()
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
# The live copy of B,1 should be gone, and it's non-live copy of A,1
# but we should have a new B,2 (still based on A,1)
items = check_pipeline.getAllItems()
self.assertEqual(len(items), 3)
self.assertEqual(items[0].change.number, '1')
self.assertEqual(items[0].change.patchset, '2')
self.assertTrue(items[0].live)
self.assertEqual(items[1].change.number, '1')
self.assertEqual(items[1].change.patchset, '1')
self.assertFalse(items[1].live)
self.assertEqual(items[2].change.number, '2')
self.assertEqual(items[2].change.patchset, '2')
self.assertTrue(items[2].live)
self.builds[0].release()
self.waitUntilSettled()
self.builds[0].release()
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 1)
self.assertEqual(self.history[0].result, 'ABORTED')
self.assertEqual(self.history[0].changes, '1,1')
self.assertEqual(self.history[1].result, 'ABORTED')
self.assertEqual(self.history[1].changes, '1,1 2,1')
self.assertEqual(self.history[2].result, 'SUCCESS')
self.assertEqual(self.history[2].changes, '1,2')
self.assertEqual(self.history[3].result, 'SUCCESS')
self.assertEqual(self.history[3].changes, '1,1 2,2')
def test_abandoned_gate(self):
"Test that an abandoned change is dequeued from gate"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1, "One job being built (on hold)")
self.assertEqual(self.builds[0].name, 'project-merge')
self.fake_gerrit.addEvent(A.getChangeAbandonedEvent())
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertBuilds([])
self.assertHistory([
dict(name='project-merge', result='ABORTED', changes='1,1')],
ordered=False)
self.assertEqual(A.reported, 1,
"Abandoned gate change should report only start")
def test_abandoned_check(self):
"Test that an abandoned change is dequeued from check"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
# Add two git-dependent changes
B.setDependsOn(A, 1)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# A live item, and a non-live/live pair
items = check_pipeline.getAllItems()
self.assertEqual(len(items), 3)
self.assertEqual(items[0].change.number, '1')
self.assertFalse(items[0].live)
self.assertEqual(items[1].change.number, '2')
self.assertTrue(items[1].live)
self.assertEqual(items[2].change.number, '1')
self.assertTrue(items[2].live)
# Abandon A
self.fake_gerrit.addEvent(A.getChangeAbandonedEvent())
self.waitUntilSettled()
# The live copy of A should be gone, but the non-live and B
# should continue
items = check_pipeline.getAllItems()
self.assertEqual(len(items), 2)
self.assertEqual(items[0].change.number, '1')
self.assertFalse(items[0].live)
self.assertEqual(items[1].change.number, '2')
self.assertTrue(items[1].live)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.history), 4)
self.assertEqual(self.history[0].result, 'ABORTED',
'Build should have been aborted')
self.assertEqual(A.reported, 0, "Abandoned change should not report")
self.assertEqual(B.reported, 1, "Change should report")
def test_cancel_starting_build(self):
"Test that a canceled build that is not processed yet is removed"
self.executor_server.hold_jobs_in_start = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
for _ in iterate_timeout(30, 'Wait for build to be in starting phase'):
if self.executor_server.job_workers:
break
tevent = threading.Event()
def data_watch(event):
# Set the threading event as soon as the cancel node is present
tevent.set()
builds = list(self.executor_api.all())
# When using DataWatch it is possible for the cancel znode to be
# created, then deleted almost immediately before the watch handling
# happens. When this happens kazoo sees no stat info and treats the
# event as a noop because no new version is available. Use exists to
# avoid this problem.
self.zk_client.client.exists(f"{builds[0].path}/cancel", data_watch)
# Abandon change to cancel build
self.fake_gerrit.addEvent(A.getChangeAbandonedEvent())
self.assertTrue(tevent.wait(timeout=30))
self.executor_server.hold_jobs_in_start = False
self.waitUntilSettled()
self.assertHistory([
dict(name='project-merge', result='ABORTED')
])
def test_abandoned_not_timer(self):
"Test that an abandoned change does not cancel timer jobs"
# This test can not use simple_layout because it must start
# with a configuration which does not include a
# timer-triggered job so that we have an opportunity to set
# the hold flag before the first job.
self.executor_server.hold_jobs_in_build = True
# Start timer trigger - also org/project
self.commitConfigUpdate('common-config', 'layouts/idle.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# The pipeline triggers every second, so we should have seen
# several by now.
time.sleep(5)
self.waitUntilSettled()
# Stop queuing timer triggered jobs so that the assertions
# below don't race against more jobs being queued.
# Must be in same repo, so overwrite config with another one
self.commitConfigUpdate('common-config', 'layouts/no-timer.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
# can end up with one more event firing, so give it an extra
# second to settle.
time.sleep(1)
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1, "One timer job")
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2, "One change plus one timer job")
self.fake_gerrit.addEvent(A.getChangeAbandonedEvent())
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1, "One timer job remains")
self.executor_server.release()
self.waitUntilSettled()
def test_new_patchset_dequeues_old_on_head(self):
"Test that a new patchset causes the old to be dequeued (at head)"
# D -> C (depends on B) -> B (depends on A) -> A -> M
self.executor_server.hold_jobs_in_build = True
M = self.fake_gerrit.addFakeChange('org/project', 'master', 'M')
M.setMerged()
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
D = self.fake_gerrit.addFakeChange('org/project', 'master', 'D')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
D.addApproval('Code-Review', 2)
C.setDependsOn(B, 1)
B.setDependsOn(A, 1)
A.setDependsOn(M, 1)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.fake_gerrit.addEvent(D.addApproval('Approved', 1))
self.waitUntilSettled()
A.addPatchset()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 2)
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(B.reported, 2)
self.assertEqual(C.data['status'], 'NEW')
self.assertEqual(C.reported, 2)
self.assertEqual(D.data['status'], 'MERGED')
self.assertEqual(D.reported, 2)
self.assertEqual(len(self.history), 7)
def test_new_patchset_dequeues_old_without_dependents(self):
"Test that a new patchset causes only the old to be dequeued"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
B.addPatchset()
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(B.reported, 2)
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(C.reported, 2)
self.assertEqual(len(self.history), 9)
def test_new_patchset_dequeues_old_independent_queue(self):
"Test that a new patchset causes the old to be dequeued (independent)"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
B.addPatchset()
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(B.reported, 1)
self.assertEqual(C.data['status'], 'NEW')
self.assertEqual(C.reported, 1)
self.assertEqual(len(self.history), 10)
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 1)
@simple_layout('layouts/noop-job.yaml')
def test_noop_job(self):
"Test that the internal noop job works"
A = self.fake_gerrit.addFakeChange('org/noop-project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
queue = list(self.executor_api.queued())
self.assertEqual(len(queue), 0)
self.assertTrue(self.scheds.first.sched._areAllBuildsComplete())
self.assertEqual(len(self.history), 0)
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
@simple_layout('layouts/no-jobs-project.yaml')
def test_no_job_project(self):
"Test that reports with no jobs don't get sent"
A = self.fake_gerrit.addFakeChange('org/no-jobs-project',
'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Change wasn't reported to
self.assertEqual(A.reported, False)
# Check queue is empty afterwards
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
items = check_pipeline.getAllItems()
self.assertEqual(len(items), 0)
self.assertEqual(len(self.history), 0)
def test_zuul_refs(self):
"Test that zuul refs exist and have the right changes"
self.executor_server.hold_jobs_in_build = True
M1 = self.fake_gerrit.addFakeChange('org/project1', 'master', 'M1')
M1.setMerged()
M2 = self.fake_gerrit.addFakeChange('org/project2', 'master', 'M2')
M2.setMerged()
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
D = self.fake_gerrit.addFakeChange('org/project2', 'master', 'D')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
D.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.fake_gerrit.addEvent(D.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
a_build = b_build = c_build = d_build = None
for x in self.builds:
if x.parameters['zuul']['change'] == '3':
a_build = x
elif x.parameters['zuul']['change'] == '4':
b_build = x
elif x.parameters['zuul']['change'] == '5':
c_build = x
elif x.parameters['zuul']['change'] == '6':
d_build = x
if a_build and b_build and c_build and d_build:
break
# should have a, not b, and should not be in project2
self.assertTrue(a_build.hasChanges(A))
self.assertFalse(a_build.hasChanges(B, M2))
# should have a and b, and should not be in project2
self.assertTrue(b_build.hasChanges(A, B))
self.assertFalse(b_build.hasChanges(M2))
# should have a and b in 1, c in 2
self.assertTrue(c_build.hasChanges(A, B, C))
self.assertFalse(c_build.hasChanges(D))
# should have a and b in 1, c and d in 2
self.assertTrue(d_build.hasChanges(A, B, C, D))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 2)
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(C.reported, 2)
self.assertEqual(D.data['status'], 'MERGED')
self.assertEqual(D.reported, 2)
def test_rerun_on_error(self):
"Test that if a worker fails to run a job, it is run again"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.builds[0].requeue = True
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(self.countJobResults(self.history, None), 1)
self.assertEqual(self.countJobResults(self.history, 'SUCCESS'), 3)
def test_statsd(self):
"Test each of the statsd methods used in the scheduler"
statsd = self.scheds.first.sched.statsd
statsd.incr('test-incr')
statsd.timing('test-timing', 3)
statsd.gauge('test-gauge', 12)
self.assertReportedStat('test-incr', '1', 'c')
self.assertReportedStat('test-timing', '3', 'ms')
self.assertReportedStat('test-gauge', '12', 'g')
# test key normalization
statsd.extra_keys = {'hostname': '1_2_3_4'}
statsd.incr('hostname-incr.{hostname}.{fake}', fake='1:2')
statsd.timing('hostname-timing.{hostname}.{fake}', 3, fake='1:2')
statsd.gauge('hostname-gauge.{hostname}.{fake}', 12, fake='1:2')
self.assertReportedStat('hostname-incr.1_2_3_4.1_2', '1', 'c')
self.assertReportedStat('hostname-timing.1_2_3_4.1_2', '3', 'ms')
self.assertReportedStat('hostname-gauge.1_2_3_4.1_2', '12', 'g')
def test_statsd_conflict(self):
statsd = self.scheds.first.sched.statsd
statsd.gauge('test-gauge', 12)
# since test-gauge is already a value, we can't make
# subvalues. Test the assert works.
statsd.gauge('test-gauge.1_2_3_4', 12)
self.assertReportedStat('test-gauge', '12', 'g')
self.assertRaises(Exception, self.assertReportedStat,
'test-gauge.1_2_3_4', '12', 'g')
def test_file_head(self):
# This is a regression test for an observed bug. A change
# with a file named "HEAD" in the root directory of the repo
# was processed by a merger. It then was unable to reset the
# repo because of:
# GitCommandError: 'git reset --hard HEAD' returned
# with exit code 128
# stderr: 'fatal: ambiguous argument 'HEAD': both revision
# and filename
# Use '--' to separate filenames from revisions'
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addPatchset({'HEAD': ''})
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn('Build succeeded', A.messages[0])
self.assertIn('Build succeeded', B.messages[0])
def test_file_jobs(self):
"Test that file jobs run only when appropriate"
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addPatchset({'pip-requires': 'foo'})
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
testfile_jobs = [x for x in self.history
if x.name == 'project-testfile']
self.assertEqual(len(testfile_jobs), 1)
self.assertEqual(testfile_jobs[0].changes, '1,2')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 2)
def _test_irrelevant_files_jobs(self, should_skip):
"Test that jobs with irrelevant-files filter run only when appropriate"
if should_skip:
files = {'ignoreme': 'ignored\n'}
else:
files = {'respectme': 'please!\n'}
change = self.fake_gerrit.addFakeChange('org/project',
'master',
'test irrelevant-files',
files=files)
self.fake_gerrit.addEvent(change.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
tested_change_ids = [x.changes[0] for x in self.history
if x.name == 'project-test-irrelevant-files']
if should_skip:
self.assertEqual([], tested_change_ids)
else:
self.assertIn(change.data['number'], tested_change_ids)
@simple_layout('layouts/irrelevant-files.yaml')
def test_irrelevant_files_match_skips_job(self):
self._test_irrelevant_files_jobs(should_skip=True)
@simple_layout('layouts/irrelevant-files.yaml')
def test_irrelevant_files_no_match_runs_job(self):
self._test_irrelevant_files_jobs(should_skip=False)
@simple_layout('layouts/inheritance.yaml')
def test_inherited_jobs_keep_matchers(self):
files = {'ignoreme': 'ignored\n'}
change = self.fake_gerrit.addFakeChange('org/project',
'master',
'test irrelevant-files',
files=files)
self.fake_gerrit.addEvent(change.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
run_jobs = set([build.name for build in self.history])
self.assertEqual(set(['project-test-nomatch-starts-empty',
'project-test-nomatch-starts-full']), run_jobs)
@simple_layout('layouts/job-vars.yaml')
def test_inherited_job_variables(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='parentjob', result='SUCCESS'),
dict(name='child1', result='SUCCESS'),
dict(name='child2', result='SUCCESS'),
dict(name='child3', result='SUCCESS'),
dict(name='override_project_var', result='SUCCESS'),
dict(name='job_from_template1', result='SUCCESS'),
dict(name='job_from_template2', result='SUCCESS'),
], ordered=False)
j = self.getJobFromHistory('parentjob')
rp = set([p['name'] for p in j.parameters['projects']])
job_vars = j.job.combined_variables
self.assertEqual(job_vars['project_var'], 'set_in_project')
self.assertEqual(job_vars['template_var1'], 'set_in_template1')
self.assertEqual(job_vars['template_var2'], 'set_in_template2')
self.assertEqual(job_vars['override'], 0)
self.assertEqual(job_vars['child1override'], 0)
self.assertEqual(job_vars['parent'], 0)
self.assertEqual(job_vars['deep']['override'], 0)
self.assertFalse('child1' in job_vars)
self.assertFalse('child2' in job_vars)
self.assertFalse('child3' in job_vars)
self.assertEqual(rp, set(['org/project', 'org/project0',
'org/project0']))
j = self.getJobFromHistory('child1')
rp = set([p['name'] for p in j.parameters['projects']])
job_vars = j.job.combined_variables
self.assertEqual(job_vars['project_var'], 'set_in_project')
self.assertEqual(job_vars['override'], 1)
self.assertEqual(job_vars['child1override'], 1)
self.assertEqual(job_vars['parent'], 0)
self.assertEqual(job_vars['child1'], 1)
self.assertEqual(job_vars['deep']['override'], 1)
self.assertFalse('child2' in job_vars)
self.assertFalse('child3' in job_vars)
self.assertEqual(rp, set(['org/project', 'org/project0',
'org/project1']))
j = self.getJobFromHistory('child2')
job_vars = j.job.combined_variables
self.assertEqual(job_vars['project_var'], 'set_in_project')
rp = set([p['name'] for p in j.parameters['projects']])
self.assertEqual(job_vars['override'], 2)
self.assertEqual(job_vars['child1override'], 0)
self.assertEqual(job_vars['parent'], 0)
self.assertEqual(job_vars['deep']['override'], 2)
self.assertFalse('child1' in job_vars)
self.assertEqual(job_vars['child2'], 2)
self.assertFalse('child3' in job_vars)
self.assertEqual(rp, set(['org/project', 'org/project0',
'org/project2']))
j = self.getJobFromHistory('child3')
job_vars = j.job.combined_variables
self.assertEqual(job_vars['project_var'], 'set_in_project')
rp = set([p['name'] for p in j.parameters['projects']])
self.assertEqual(job_vars['override'], 3)
self.assertEqual(job_vars['child1override'], 0)
self.assertEqual(job_vars['parent'], 0)
self.assertEqual(job_vars['deep']['override'], 3)
self.assertFalse('child1' in job_vars)
self.assertFalse('child2' in job_vars)
self.assertEqual(job_vars['child3'], 3)
self.assertEqual(rp, set(['org/project', 'org/project0',
'org/project3']))
j = self.getJobFromHistory('override_project_var')
job_vars = j.job.combined_variables
self.assertEqual(job_vars['project_var'], 'override_in_job')
@simple_layout('layouts/job-variants.yaml')
def test_job_branch_variants(self):
self.create_branch('org/project', 'stable/diablo')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable/diablo'))
self.create_branch('org/project', 'stable/essex')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable/essex'))
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project', 'stable/diablo', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
C = self.fake_gerrit.addFakeChange('org/project', 'stable/essex', 'C')
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='python27', result='SUCCESS'),
dict(name='python27', result='SUCCESS'),
dict(name='python27', result='SUCCESS'),
])
j = self.history[0].job
self.assertEqual(j.timeout, 40)
self.assertEqual(len(j.nodeset.nodes), 1)
self.assertEqual(next(iter(j.nodeset.nodes.values())).label, 'new')
self.assertEqual([x['path'] for x in j.pre_run],
['base-pre', 'py27-pre'])
self.assertEqual([x['path'] for x in j.post_run],
['py27-post-a', 'py27-post-b', 'base-post'])
self.assertEqual([x['path'] for x in j.run],
['playbooks/python27.yaml'])
j = self.history[1].job
self.assertEqual(j.timeout, 50)
self.assertEqual(len(j.nodeset.nodes), 1)
self.assertEqual(next(iter(j.nodeset.nodes.values())).label, 'old')
self.assertEqual([x['path'] for x in j.pre_run],
['base-pre', 'py27-pre', 'py27-diablo-pre'])
self.assertEqual([x['path'] for x in j.post_run],
['py27-diablo-post', 'py27-post-a', 'py27-post-b',
'base-post'])
self.assertEqual([x['path'] for x in j.run],
['py27-diablo'])
j = self.history[2].job
self.assertEqual(j.timeout, 40)
self.assertEqual(len(j.nodeset.nodes), 1)
self.assertEqual(next(iter(j.nodeset.nodes.values())).label, 'new')
self.assertEqual([x['path'] for x in j.pre_run],
['base-pre', 'py27-pre', 'py27-essex-pre'])
self.assertEqual([x['path'] for x in j.post_run],
['py27-essex-post', 'py27-post-a', 'py27-post-b',
'base-post'])
self.assertEqual([x['path'] for x in j.run],
['playbooks/python27.yaml'])
@simple_layout("layouts/no-run.yaml")
def test_job_without_run(self):
"Test that a job without a run playbook errors"
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn('Job base does not specify a run playbook',
A.messages[-1])
def test_queue_names(self):
"Test shared change queue names"
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
(trusted, project1) = tenant.getProject('org/project1')
(trusted, project2) = tenant.getProject('org/project2')
# Change queues are created lazy by the dependent pipeline manager
# so retrieve the queue first without having to really enqueue a
# change first.
gate = tenant.layout.pipelines['gate']
FakeChange = namedtuple('FakeChange', ['project', 'branch'])
fake_a = FakeChange(project1, 'master')
fake_b = FakeChange(project2, 'master')
with (pipeline_lock(self.zk_client, tenant.name, gate.name) as lock,
self.createZKContext(lock) as ctx,
gate.manager.currentContext(ctx)):
gate.manager.getChangeQueue(fake_a, None)
gate.manager.getChangeQueue(fake_b, None)
q1 = gate.getQueue(project1.canonical_name, None)
q2 = gate.getQueue(project2.canonical_name, None)
self.assertEqual(q1.name, 'integrated')
self.assertEqual(q2.name, 'integrated')
@simple_layout("layouts/template-queue.yaml")
def test_template_queue(self):
"Test a shared queue can be constructed from a project-template"
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
(trusted, project1) = tenant.getProject('org/project1')
(trusted, project2) = tenant.getProject('org/project2')
# Change queues are created lazy by the dependent pipeline manager
# so retrieve the queue first without having to really enqueue a
# change first.
gate = tenant.layout.pipelines['gate']
FakeChange = namedtuple('FakeChange', ['project', 'branch'])
fake_a = FakeChange(project1, 'master')
fake_b = FakeChange(project2, 'master')
with (pipeline_lock(self.zk_client, tenant.name, gate.name) as lock,
self.createZKContext(lock) as ctx,
gate.manager.currentContext(ctx)):
gate.manager.getChangeQueue(fake_a, None)
gate.manager.getChangeQueue(fake_b, None)
q1 = gate.getQueue(project1.canonical_name, None)
q2 = gate.getQueue(project2.canonical_name, None)
self.assertEqual(q1.name, 'integrated')
self.assertEqual(q2.name, 'integrated')
@simple_layout("layouts/template-project-queue.yaml")
def test_template_project_queue(self):
"Test a shared queue can be constructed from a project-template"
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
(trusted, project1) = tenant.getProject('org/project1')
(trusted, project2) = tenant.getProject('org/project2')
# Change queues are created lazy by the dependent pipeline manager
# so retrieve the queue first without having to really enqueue a
# change first.
gate = tenant.layout.pipelines['gate']
FakeChange = namedtuple('FakeChange', ['project', 'branch'])
fake_a = FakeChange(project1, 'master')
fake_b = FakeChange(project2, 'master')
with (pipeline_lock(self.zk_client, tenant.name, gate.name) as lock,
self.createZKContext(lock) as ctx,
gate.manager.currentContext(ctx)):
gate.manager.getChangeQueue(fake_a, None)
gate.manager.getChangeQueue(fake_b, None)
q1 = gate.getQueue(project1.canonical_name, None)
q2 = gate.getQueue(project2.canonical_name, None)
self.assertEqual(q1.name, 'integrated')
self.assertEqual(q2.name, 'integrated')
@simple_layout("layouts/regex-template-queue.yaml")
def test_regex_template_queue(self):
"Test a shared queue can be constructed from a regex project-template"
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
(trusted, project1) = tenant.getProject('org/project1')
(trusted, project2) = tenant.getProject('org/project2')
# Change queues are created lazy by the dependent pipeline manager
# so retrieve the queue first without having to really enqueue a
# change first.
gate = tenant.layout.pipelines['gate']
FakeChange = namedtuple('FakeChange', ['project', 'branch'])
fake_a = FakeChange(project1, 'master')
fake_b = FakeChange(project2, 'master')
with (pipeline_lock(self.zk_client, tenant.name, gate.name) as lock,
self.createZKContext(lock) as ctx,
gate.manager.currentContext(ctx)):
gate.manager.getChangeQueue(fake_a, None)
gate.manager.getChangeQueue(fake_b, None)
q1 = gate.getQueue(project1.canonical_name, None)
q2 = gate.getQueue(project2.canonical_name, None)
self.assertEqual(q1.name, 'integrated')
self.assertEqual(q2.name, 'integrated')
@simple_layout("layouts/regex-queue.yaml")
@skipIfMultiScheduler()
def test_regex_queue(self):
"Test a shared queue can be constructed from a regex project"
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
(trusted, project1) = tenant.getProject('org/project1')
(trusted, project2) = tenant.getProject('org/project2')
# Change queues are created lazy by the dependent pipeline manager
# so retrieve the queue first without having to really enqueue a
# change first.
gate = tenant.layout.pipelines['gate']
FakeChange = namedtuple('FakeChange', ['project', 'branch'])
fake_a = FakeChange(project1, 'master')
fake_b = FakeChange(project2, 'master')
with (pipeline_lock(self.zk_client, tenant.name, gate.name) as lock,
self.createZKContext(lock) as ctx,
gate.manager.currentContext(ctx)):
gate.manager.getChangeQueue(fake_a, None)
gate.manager.getChangeQueue(fake_b, None)
q1 = gate.getQueue(project1.canonical_name, None)
q2 = gate.getQueue(project2.canonical_name, None)
self.assertEqual(q1.name, 'integrated')
self.assertEqual(q2.name, 'integrated')
def test_queue_precedence(self):
"Test that queue precedence works"
self.hold_jobs_in_queue = True
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
# Run one build at a time to ensure non-race order:
self.orderedRelease()
self.executor_server.hold_jobs_in_build = False
self.waitUntilSettled()
self.log.debug(self.history)
self.assertEqual(self.history[0].pipeline, 'gate')
self.assertEqual(self.history[1].pipeline, 'check')
self.assertEqual(self.history[2].pipeline, 'gate')
self.assertEqual(self.history[3].pipeline, 'gate')
self.assertEqual(self.history[4].pipeline, 'check')
self.assertEqual(self.history[5].pipeline, 'check')
@simple_layout('layouts/two-check.yaml')
def test_query_dependency_count(self):
# Test that we efficiently query dependent changes
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['url'])
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# 1. The query to find the change id
# from the Depends-On string "change:1" (simpleQuery)
# 2. The query to populate the change once we know the id
# (queryChange)
self.assertEqual(A.queried, 2)
self.assertEqual(B.queried, 1)
def test_reconfigure_merge(self):
"""Test that two reconfigure events are merged"""
# Wrap the recofiguration handler so we can count how many
# times it runs.
with mock.patch.object(
zuul.scheduler.Scheduler, '_doTenantReconfigureEvent',
wraps=self.scheds.first.sched._doTenantReconfigureEvent
) as mymock:
with self.scheds.first.sched.run_handler_lock:
self.create_branch('org/project', 'stable/diablo')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable/diablo'))
self.create_branch('org/project', 'stable/essex')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable/essex'))
for _ in iterate_timeout(60, 'jobs started'):
if len(self.scheds.first.sched.trigger_events[
'tenant-one']) == 2:
break
self.waitUntilSettled()
mymock.assert_called_once()
def test_live_reconfiguration(self):
"Test that live reconfiguration works"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-merge').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
def test_live_reconfiguration_layout_cache_fallback(self):
# Test that re-calculating a dynamic fallback layout works after it
# was removed during a reconfiguration.
self.executor_server.hold_jobs_in_build = True
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test3
parent: project-test1
# add a job by the canonical project name
- project:
gate:
jobs:
- project-test3:
dependencies:
- project-merge
""")
file_dict = {'zuul.d/a.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
pipeline = tenant.layout.pipelines['gate']
items = pipeline.getAllItems()
self.assertEqual(len(items), 1)
self.assertIsNone(items[0].layout_uuid)
# Assert that the layout cache is empty after a reconfiguration.
self.assertEqual(pipeline.manager._layout_cache, {})
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
parent='refs/changes/01/1/1')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
items = pipeline.getAllItems()
self.assertEqual(len(items), 2)
for item in items:
# Layout UUID should be set again for all live items. It had to
# be re-calculated for the first item in the queue as it was reset
# during re-enqueue.
self.assertIsNotNone(item.layout_uuid)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 2)
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-merge', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test3', result='SUCCESS', changes='1,1'),
dict(name='project-test3', result='SUCCESS', changes='1,1 2,1'),
], ordered=False)
def test_live_reconfiguration_layout_cache_non_live(self):
# Test that the layout UUID is only reset for live items.
self.executor_server.hold_jobs_in_build = True
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test3
parent: project-test1
# add a job by the canonical project name
- project:
check:
jobs:
- project-test3:
dependencies:
- project-merge
""")
file_dict = {'zuul.d/a.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
parent='refs/changes/01/1/1')
B.setDependsOn(A, 1)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
pipeline = tenant.layout.pipelines['check']
items = pipeline.getAllItems()
self.assertEqual(len(items), 2)
# Assert that the layout UUID of the live item is reset during a
# reconfiguration, but non-live items keep their UUID.
self.assertIsNotNone(items[0].layout_uuid)
self.assertIsNone(items[1].layout_uuid)
# Cache should be empty after a reconfiguration
self.assertEqual(pipeline.manager._layout_cache, {})
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 0)
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(B.reported, 1)
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test3', result='SUCCESS', changes='1,1 2,1'),
], ordered=False)
def test_live_reconfiguration_command_socket(self):
"Test that live reconfiguration via command socket works"
# record previous tenant reconfiguration state, which may not be set
old = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
self.waitUntilSettled()
command_socket = self.scheds.first.config.get(
'scheduler', 'command_socket')
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.connect(command_socket)
s.sendall('full-reconfigure\n'.encode('utf8'))
# Wait for full reconfiguration. Note that waitUntilSettled is not
# reliable here because the reconfigure event may arrive in the
# event queue after waitUntilSettled.
start = time.time()
while True:
if time.time() - start > 15:
raise Exception("Timeout waiting for full reconfiguration")
new = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
if old < new:
break
else:
time.sleep(0)
self.assertGreater(new.last_reconfigured, old.last_reconfigured)
self.assertGreater(new.last_reconfigure_event_ltime,
old.last_reconfigure_event_ltime)
def test_tenant_reconfiguration_command_socket(self):
"Test that single-tenant reconfiguration via command socket works"
# record previous tenant reconfiguration state, which may not be set
old = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
self.waitUntilSettled()
command_socket = self.scheds.first.config.get(
'scheduler', 'command_socket')
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.connect(command_socket)
s.sendall('tenant-reconfigure ["tenant-one"]\n'.encode('utf8'))
# Wait for full reconfiguration. Note that waitUntilSettled is not
# reliable here because the reconfigure event may arrive in the
# event queue after waitUntilSettled.
start = time.time()
while True:
if time.time() - start > 15:
raise Exception("Timeout waiting for full reconfiguration")
new = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
if old < new:
break
else:
time.sleep(0)
def test_double_live_reconfiguration_shared_queue(self):
# This was a real-world regression. A change is added to
# gate; a reconfigure happens, a second change which depends
# on the first is added, and a second reconfiguration happens.
# Ensure that both changes merge.
# A failure may indicate incorrect caching or cleaning up of
# references during a reconfiguration.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
B.setDependsOn(A, 1)
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
# Add the parent change.
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
# Reconfigure (with only one change in the pipeline).
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# Add the child change.
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
# Reconfigure (with both in the pipeline).
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.history), 8)
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 2)
def test_live_reconfiguration_del_project(self):
# Test project deletion from tenant while changes are enqueued
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project1', 'master', 'C')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 8)
self.newTenantConfig('config/single-tenant/main-one-project.yaml')
# This layout defines only org/project, not org/project1
self.commitConfigUpdate(
'common-config',
'layouts/live-reconfiguration-del-project.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(C.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 0)
self.assertEqual(C.reported, 0)
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='2,1'),
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-merge', result='SUCCESS', changes='3,1'),
dict(name='project-test1', result='ABORTED', changes='2,1'),
dict(name='project-test2', result='ABORTED', changes='2,1'),
dict(name='project1-project2-integration',
result='ABORTED', changes='2,1'),
dict(name='project-test1', result='ABORTED', changes='3,1'),
dict(name='project-test2', result='ABORTED', changes='3,1'),
dict(name='project1-project2-integration',
result='ABORTED', changes='3,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
], ordered=False)
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
self.assertIn('Build succeeded', A.messages[0])
def test_live_reconfiguration_del_pipeline(self):
# Test pipeline deletion while changes are enqueued
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2)
# This layout defines only org/project, not org/project1
self.commitConfigUpdate(
'common-config',
'layouts/live-reconfiguration-del-pipeline.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 0)
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='ABORTED', changes='1,1'),
dict(name='project-test2', result='ABORTED', changes='1,1'),
], ordered=False)
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines), 0)
def test_live_reconfiguration_del_tenant(self):
# Test tenant deletion while changes are enqueued
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project1', 'master', 'C')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 8)
self.newTenantConfig('config/single-tenant/main-no-tenants.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(C.data['status'], 'NEW')
self.assertEqual(A.reported, 0)
self.assertEqual(B.reported, 0)
self.assertEqual(C.reported, 0)
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='2,1'),
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-merge', result='SUCCESS', changes='3,1'),
dict(name='project-test1', result='ABORTED', changes='2,1'),
dict(name='project-test2', result='ABORTED', changes='2,1'),
dict(name='project1-project2-integration',
result='ABORTED', changes='2,1'),
dict(name='project-test1', result='ABORTED', changes='3,1'),
dict(name='project-test2', result='ABORTED', changes='3,1'),
dict(name='project1-project2-integration',
result='ABORTED', changes='3,1'),
dict(name='project-test1', result='ABORTED', changes='1,1'),
dict(name='project-test2', result='ABORTED', changes='1,1'),
], ordered=False)
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertIsNone(tenant)
@simple_layout("layouts/reconfigure-failed-head.yaml")
def test_live_reconfiguration_failed_change_at_head(self):
# Test that if we reconfigure with a failed change at head,
# that the change behind it isn't reparented onto it.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.addApproval('Code-Review', 2)
self.executor_server.failJob('job1', A)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertBuilds([
dict(name='job1', changes='1,1'),
dict(name='job2', changes='1,1'),
dict(name='job1', changes='1,1 2,1'),
dict(name='job2', changes='1,1 2,1'),
])
self.release(self.builds[0])
self.waitUntilSettled()
self.assertBuilds([
dict(name='job2', changes='1,1'),
dict(name='job1', changes='2,1'),
dict(name='job2', changes='2,1'),
])
# Unordered history comparison because the aborts can finish
# in any order.
self.assertHistory([
dict(name='job1', result='FAILURE', changes='1,1'),
dict(name='job1', result='ABORTED', changes='1,1 2,1'),
dict(name='job2', result='ABORTED', changes='1,1 2,1'),
], ordered=False)
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertBuilds([])
self.assertHistory([
dict(name='job1', result='FAILURE', changes='1,1'),
dict(name='job1', result='ABORTED', changes='1,1 2,1'),
dict(name='job2', result='ABORTED', changes='1,1 2,1'),
dict(name='job2', result='SUCCESS', changes='1,1'),
dict(name='job1', result='SUCCESS', changes='2,1'),
dict(name='job2', result='SUCCESS', changes='2,1'),
], ordered=False)
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
def test_delayed_repo_init(self):
self.init_repo("org/new-project")
files = {'README': ''}
self.addCommitToRepo("org/new-project", 'Initial commit',
files=files, tag='init')
self.newTenantConfig('tenants/delayed-repo-init.yaml')
self.commitConfigUpdate(
'common-config',
'layouts/delayed-repo-init.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange('org/new-project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-merge').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
@simple_layout('layouts/single-job-with-nodeset.yaml')
def test_live_reconfiguration_queued_node_requests(self):
# Test that a job with a queued node request still has the
# correct state after reconfiguration.
self.fake_nodepool.pause()
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
def get_job():
tenant = self.scheds.first.sched.abide.tenants['tenant-one']
for pipeline in tenant.layout.pipelines.values():
pipeline_status = pipeline.formatStatusJSON(
self.scheds.first.sched.globals.websocket_url)
for queue in pipeline_status['change_queues']:
for head in queue['heads']:
for item in head:
for job in item['jobs']:
if job['name'] == 'check-job':
return job
job = get_job()
self.assertTrue(job['queued'])
self.scheds.execute(lambda app: app.sched.reconfigure(self.config))
self.waitUntilSettled()
job = get_job()
self.assertTrue(job['queued'])
self.fake_nodepool.unpause()
self.waitUntilSettled()
self.assertHistory([
dict(name='check-job', result='SUCCESS', changes='1,1'),
])
@simple_layout('layouts/trigger-sequence.yaml')
def test_live_reconfiguration_trigger_sequence(self):
# Test that events arriving after an event that triggers a
# reconfiguration are handled after the reconfiguration
# completes.
in_repo_conf = "[{project: {tag: {jobs: [post-job]}}}]"
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
sched = self.scheds.first.sched
# Hold the management queue so that we don't process any
# reconfiguration events yet.
with management_queue_lock(
self.zk_client, 'tenant-one', blocking=False
):
with sched.run_handler_lock:
A.setMerged()
# Submit two events while no processing is happening:
# A change merged event that will trigger a reconfiguration
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
# And a tag event which should only run a job after
# the config change above is in effect.
event = self.fake_gerrit.addFakeTag(
'org/project', 'master', 'foo')
self.fake_gerrit.addEvent(event)
# Wait for the tenant trigger queue to empty out, and for
# us to have a tenant management as well as a pipeline
# trigger event. At this point, we should be deferring
# the trigger event until the management event is handled.
for _ in iterate_timeout(60, 'queues'):
with sched.run_handler_lock:
if sched.trigger_events['tenant-one'].hasEvents():
continue
if not sched.pipeline_trigger_events[
'tenant-one']['tag'].hasEvents():
continue
if not sched.management_events['tenant-one'].hasEvents():
continue
break
# Now we can resume and process the reconfiguration event
sched.wake_event.set()
self.waitUntilSettled()
self.assertHistory([
dict(name='post-job', result='SUCCESS'),
])
@simple_layout('layouts/repo-deleted.yaml')
def test_repo_deleted(self):
self.init_repo("org/delete-project")
A = self.fake_gerrit.addFakeChange('org/delete-project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-merge').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
# Delete org/new-project zuul repo. Should be recloned.
p = 'org/delete-project'
if os.path.exists(os.path.join(self.merger_src_root, p)):
shutil.rmtree(os.path.join(self.merger_src_root, p))
if os.path.exists(os.path.join(self.executor_src_root, p)):
shutil.rmtree(os.path.join(self.executor_src_root, p))
B = self.fake_gerrit.addFakeChange('org/delete-project', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-merge').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 2)
@simple_layout('layouts/untrusted-secrets.yaml')
def test_untrusted_secrets(self):
"Test untrusted secrets"
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([])
self.assertEqual(A.patchsets[0]['approvals'][0]['value'], "-1")
self.assertIn('does not allow post-review job',
A.messages[0])
@simple_layout('layouts/tags.yaml')
def test_tags(self):
"Test job tags"
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.history), 2)
results = {self.getJobFromHistory('merge',
project='org/project1').uuid: ['extratag', 'merge'],
self.getJobFromHistory('merge',
project='org/project2').uuid: ['merge']}
for build in self.history:
self.assertEqual(results.get(build.uuid, ''),
build.parameters['zuul'].get('jobtags'))
def test_timer_template(self):
"Test that a periodic job is triggered"
# This test can not use simple_layout because it must start
# with a configuration which does not include a
# timer-triggered job so that we have an opportunity to set
# the hold flag before the first job.
self.create_branch('org/project', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable'))
self.executor_server.hold_jobs_in_build = True
self.commitConfigUpdate('common-config', 'layouts/timer-template.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# The pipeline triggers every second, so we should have seen
# several by now.
time.sleep(5)
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2)
merge_count_project1 = 0
for job in self.merge_job_history.get(
zuul.model.MergeRequest.REF_STATE
):
if job.payload["items"][0]["project"] == "org/project1":
merge_count_project1 += 1
self.assertEquals(merge_count_project1, 0,
"project1 shouldn't have any refstate call")
self.executor_server.hold_jobs_in_build = False
# Stop queuing timer triggered jobs so that the assertions
# below don't race against more jobs being queued.
self.commitConfigUpdate('common-config', 'layouts/no-timer.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
# can end up with one more event firing, so give it an extra
# second to settle.
time.sleep(1)
self.waitUntilSettled()
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='project-bitrot', result='SUCCESS',
ref='refs/heads/master'),
dict(name='project-bitrot', result='SUCCESS',
ref='refs/heads/stable'),
], ordered=False)
def _test_timer(self, config_file):
# This test can not use simple_layout because it must start
# with a configuration which does not include a
# timer-triggered job so that we have an opportunity to set
# the hold flag before the first job.
timer = self.scheds.first.sched.connections.drivers['timer']
start_jobs = timer.apsched.get_jobs()
self.create_branch('org/project', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable'))
self.executor_server.hold_jobs_in_build = True
self.commitConfigUpdate('common-config', config_file)
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
first_jobs = timer.apsched.get_jobs()
# Collect the currently cached branches in order to later check,
# that the timer driver refreshes the cache.
cached_versions = {}
tenant = self.scheds.first.sched.abide.tenants['tenant-one']
for project_name in tenant.layout.project_configs:
_, project = tenant.getProject('org/project')
for branch in project.source.getProjectBranches(project, tenant):
event = self._create_dummy_event(project, branch)
change_key = project.source.getChangeKey(event)
change = project.source.getChange(change_key, event=event)
cached_versions[branch] = change.cache_version
# The pipeline triggers every second, so we should have seen
# several by now.
for _ in iterate_timeout(60, 'jobs started'):
if len(self.builds) > 1:
break
# Ensure that the status json has the ref so we can render it in the
# web ui.
pipeline = self.scheds.first.sched.abide.tenants[
'tenant-one'].layout.pipelines['periodic']
pipeline_status = pipeline.formatStatusJSON(
self.scheds.first.sched.globals.websocket_url)
first = pipeline_status['change_queues'][0]['heads'][0][0]
second = pipeline_status['change_queues'][1]['heads'][0][0]
self.assertIn(first['ref'], ['refs/heads/master', 'refs/heads/stable'])
self.assertIn(second['ref'],
['refs/heads/master', 'refs/heads/stable'])
self.executor_server.hold_jobs_in_build = False
# Stop queuing timer triggered jobs so that the assertions
# below don't race against more jobs being queued.
self.commitConfigUpdate('common-config', 'layouts/no-timer.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
second_jobs = timer.apsched.get_jobs()
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
# can end up with one more event firing, so give it an extra
# second to settle.
time.sleep(3)
self.waitUntilSettled()
self.executor_server.release()
self.waitUntilSettled()
self.assertTrue(len(self.history) > 1)
for job in self.history[:1]:
self.assertEqual(job.result, 'SUCCESS')
self.assertEqual(job.name, 'project-bitrot')
self.assertIn(job.ref, ('refs/heads/stable', 'refs/heads/master'))
for project_name in tenant.layout.project_configs:
_, project = tenant.getProject('org/project')
for branch in project.source.getProjectBranches(project, tenant):
event = self._create_dummy_event(project, branch)
change_key = project.source.getChangeKey(event)
change = project.source.getChange(change_key, event=event)
# Make sure the timer driver refreshed the cache
self.assertGreater(change.cache_version,
cached_versions[branch])
# We start with no jobs, and our first reconfigure should add jobs
self.assertTrue(len(first_jobs) > len(start_jobs))
# Our second reconfigure should return us to no jobs
self.assertEqual(start_jobs, second_jobs)
def _create_dummy_event(self, project, branch):
event = zuul.model.TriggerEvent()
event.type = 'test'
event.project_hostname = project.canonical_hostname
event.project_name = project.name
event.ref = f'refs/heads/{branch}'
event.branch = branch
event.zuul_event_id = str(uuid4().hex)
event.timestamp = time.time()
return event
def test_timer(self):
"Test that a periodic job is triggered"
self._test_timer('layouts/timer.yaml')
def test_timer_with_jitter(self):
"Test that a periodic job with a jitter is triggered"
self._test_timer('layouts/timer-jitter.yaml')
@simple_layout('layouts/timer-jitter.yaml')
def test_timer_preserve_jobs(self):
# This tests that we keep the same apsched jobs if possible
# when reconfiguring. If a reconfiguration happens during the
# "jitter" period, we might end up not running jobs unless we
# preserve the exact job object across reconfiguration.
timer = self.scheds.first.sched.connections.drivers['timer']
old_jobs = timer.apsched.get_jobs()
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
new_jobs = timer.apsched.get_jobs()
self.assertEqual(old_jobs, new_jobs)
def test_idle(self):
"Test that frequent periodic jobs work"
# This test can not use simple_layout because it must start
# with a configuration which does not include a
# timer-triggered job so that we have an opportunity to set
# the hold flag before the first job.
self.executor_server.hold_jobs_in_build = True
for x in range(1, 3):
# Test that timer triggers periodic jobs even across
# layout config reloads.
# Start timer trigger
self.commitConfigUpdate('common-config',
'layouts/idle.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# The pipeline triggers every second, so we should have seen
# several by now.
time.sleep(5)
# Stop queuing timer triggered jobs so that the assertions
# below don't race against more jobs being queued.
self.commitConfigUpdate('common-config',
'layouts/no-timer.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job,
# we can end up with one more event firing, so give it an
# extra second to settle.
time.sleep(1)
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1,
'Timer builds iteration #%d' % x)
self.executor_server.release('.*')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(len(self.history), x)
@simple_layout('layouts/smtp.yaml')
def test_check_smtp_pool(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.waitUntilSettled()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.smtp_messages), 2)
# A.messages only holds what FakeGerrit places in it. Thus we
# work on the knowledge of what the first message should be as
# it is only configured to go to SMTP.
self.assertEqual('[email protected]',
self.smtp_messages[0]['from_email'])
self.assertEqual(['[email protected]'],
self.smtp_messages[0]['to_email'])
self.assertEqual('Starting check jobs.',
self.smtp_messages[0]['body'])
self.assertEqual('[email protected]',
self.smtp_messages[1]['from_email'])
self.assertEqual(['[email protected]'],
self.smtp_messages[1]['to_email'])
self.assertEqual(A.messages[0],
self.smtp_messages[1]['body'])
@simple_layout('layouts/smtp.yaml')
@mock.patch('zuul.driver.gerrit.gerritreporter.GerritReporter.report')
def test_failed_reporter(self, report_mock):
'''Test that one failed reporter doesn't break other reporters'''
# Warning hacks. We sort the reports here so that the test is
# deterministic. Gerrit reporting will fail, but smtp reporting
# should succeed.
report_mock.side_effect = Exception('Gerrit failed to report')
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check = tenant.layout.pipelines['check']
check.success_actions = sorted(check.success_actions,
key=lambda x: x.name)
self.assertEqual(check.success_actions[0].name, 'gerrit')
self.assertEqual(check.success_actions[1].name, 'smtp')
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.waitUntilSettled()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# We know that if gerrit ran first and failed and smtp ran second
# and sends mail then we handle failures in reporters gracefully.
self.assertEqual(len(self.smtp_messages), 2)
# A.messages only holds what FakeGerrit places in it. Thus we
# work on the knowledge of what the first message should be as
# it is only configured to go to SMTP.
self.assertEqual('[email protected]',
self.smtp_messages[0]['from_email'])
self.assertEqual(['[email protected]'],
self.smtp_messages[0]['to_email'])
self.assertEqual('Starting check jobs.',
self.smtp_messages[0]['body'])
self.assertEqual('[email protected]',
self.smtp_messages[1]['from_email'])
self.assertEqual(['[email protected]'],
self.smtp_messages[1]['to_email'])
# This double checks that Gerrit side failed
self.assertEqual(A.messages, [])
def test_timer_smtp(self):
"Test that a periodic job is triggered"
# This test can not use simple_layout because it must start
# with a configuration which does not include a
# timer-triggered job so that we have an opportunity to set
# the hold flag before the first job.
self.executor_server.hold_jobs_in_build = True
self.commitConfigUpdate('common-config', 'layouts/timer-smtp.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# The pipeline triggers every second, so we should have seen
# several by now.
time.sleep(5)
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2)
self.executor_server.release('.*')
self.waitUntilSettled()
self.assertEqual(len(self.history), 2)
self.assertEqual(self.getJobFromHistory(
'project-bitrot-stable-old').result, 'SUCCESS')
self.assertEqual(self.getJobFromHistory(
'project-bitrot-stable-older').result, 'SUCCESS')
self.assertEqual(len(self.smtp_messages), 1)
# A.messages only holds what FakeGerrit places in it. Thus we
# work on the knowledge of what the first message should be as
# it is only configured to go to SMTP.
self.assertEqual('[email protected]',
self.smtp_messages[0]['from_email'])
self.assertEqual(['[email protected]'],
self.smtp_messages[0]['to_email'])
self.assertIn('Subject: Periodic check for org/project succeeded',
self.smtp_messages[0]['headers'])
# Stop queuing timer triggered jobs and let any that may have
# queued through so that end of test assertions pass.
self.commitConfigUpdate('common-config', 'layouts/no-timer.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
# can end up with one more event firing, so give it an extra
# second to settle.
time.sleep(1)
self.waitUntilSettled()
self.executor_server.release('.*')
self.waitUntilSettled()
@skip("Disabled for early v3 development")
def test_timer_sshkey(self):
"Test that a periodic job can setup SSH key authentication"
self.worker.hold_jobs_in_build = True
self.config.set('zuul', 'layout_config',
'tests/fixtures/layout-timer.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.registerJobs()
# The pipeline triggers every second, so we should have seen
# several by now.
time.sleep(5)
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2)
ssh_wrapper = os.path.join(self.git_root, ".ssh_wrapper_gerrit")
self.assertTrue(os.path.isfile(ssh_wrapper))
with open(ssh_wrapper) as f:
ssh_wrapper_content = f.read()
self.assertIn("fake_id_rsa", ssh_wrapper_content)
# In the unit tests Merger runs in the same process,
# so we see its' environment variables
self.assertEqual(os.environ['GIT_SSH'], ssh_wrapper)
self.worker.release('.*')
self.waitUntilSettled()
self.assertEqual(len(self.history), 2)
self.assertEqual(self.getJobFromHistory(
'project-bitrot-stable-old').result, 'SUCCESS')
self.assertEqual(self.getJobFromHistory(
'project-bitrot-stable-older').result, 'SUCCESS')
# Stop queuing timer triggered jobs and let any that may have
# queued through so that end of test assertions pass.
self.config.set('zuul', 'layout_config',
'tests/fixtures/layout-no-timer.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.registerJobs()
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
# can end up with one more event firing, so give it an extra
# second to settle.
time.sleep(1)
self.waitUntilSettled()
self.worker.release('.*')
self.waitUntilSettled()
@simple_layout('layouts/rate-limit.yaml')
def test_queue_rate_limiting(self):
"Test that DependentPipelines are rate limited with dep across window"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
C.setDependsOn(B, 1)
self.executor_server.failJob('project-test1', A)
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
# Only A and B will have their merge jobs queued because
# window is 2.
self.assertEqual(len(self.builds), 2)
self.assertEqual(self.builds[0].name, 'project-merge')
self.assertEqual(self.builds[1].name, 'project-merge')
# Release the merge jobs one at a time.
self.builds[0].release()
self.waitUntilSettled()
self.builds[0].release()
self.waitUntilSettled()
# Only A and B will have their test jobs queued because
# window is 2.
self.assertEqual(len(self.builds), 4)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertEqual(self.builds[2].name, 'project-test1')
self.assertEqual(self.builds[3].name, 'project-test2')
self.executor_server.release('project-.*')
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
# A failed so window is reduced by 1 to 1.
self.assertEqual(queue.window, 1)
self.assertEqual(queue.window_floor, 1)
self.assertEqual(A.data['status'], 'NEW')
# Gate is reset and only B's merge job is queued because
# window shrunk to 1.
self.assertEqual(len(self.builds), 1)
self.assertEqual(self.builds[0].name, 'project-merge')
self.executor_server.release('.*-merge')
self.waitUntilSettled()
# Only B's test jobs are queued because window is still 1.
self.assertEqual(len(self.builds), 2)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
self.executor_server.release('project-.*')
self.waitUntilSettled()
# B was successfully merged so window is increased to 2.
self.assertEqual(queue.window, 2)
self.assertEqual(queue.window_floor, 1)
self.assertEqual(B.data['status'], 'MERGED')
# Only C is left and its merge job is queued.
self.assertEqual(len(self.builds), 1)
self.assertEqual(self.builds[0].name, 'project-merge')
self.executor_server.release('.*-merge')
self.waitUntilSettled()
# After successful merge job the test jobs for C are queued.
self.assertEqual(len(self.builds), 2)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
self.executor_server.release('project-.*')
self.waitUntilSettled()
# C successfully merged so window is bumped to 3.
self.assertEqual(queue.window, 3)
self.assertEqual(queue.window_floor, 1)
self.assertEqual(C.data['status'], 'MERGED')
@simple_layout('layouts/rate-limit.yaml')
def test_queue_rate_limiting_dependent(self):
"Test that DependentPipelines are rate limited with dep in window"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
B.setDependsOn(A, 1)
self.executor_server.failJob('project-test1', A)
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
# Only A and B will have their merge jobs queued because
# window is 2.
self.assertEqual(len(self.builds), 2)
self.assertEqual(self.builds[0].name, 'project-merge')
self.assertEqual(self.builds[1].name, 'project-merge')
self.orderedRelease(2)
# Only A and B will have their test jobs queued because
# window is 2.
self.assertEqual(len(self.builds), 4)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertEqual(self.builds[2].name, 'project-test1')
self.assertEqual(self.builds[3].name, 'project-test2')
self.executor_server.release('project-.*')
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
# A failed so window is reduced by 1 to 1.
self.assertEqual(queue.window, 1)
self.assertEqual(queue.window_floor, 1)
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
# Gate is reset and only C's merge job is queued because
# window shrunk to 1 and A and B were dequeued.
self.assertEqual(len(self.builds), 1)
self.assertEqual(self.builds[0].name, 'project-merge')
self.orderedRelease(1)
# Only C's test jobs are queued because window is still 1.
self.assertEqual(len(self.builds), 2)
builds = self.getSortedBuilds()
self.assertEqual(builds[0].name, 'project-test1')
self.assertEqual(builds[1].name, 'project-test2')
self.executor_server.release('project-.*')
self.waitUntilSettled()
# C was successfully merged so window is increased to 2.
self.assertEqual(queue.window, 2)
self.assertEqual(queue.window_floor, 1)
self.assertEqual(C.data['status'], 'MERGED')
@simple_layout('layouts/rate-limit-reconfigure.yaml')
def test_queue_rate_limiting_reconfigure(self):
"""Test that changes survive a reconfigure when no longer in window.
This is a regression tests for a case that lead to an exception during
re-enqueue. The exception happened when former active items had already
build results but then dropped out of the active window. During
re-enqueue the job graph was not re-initialized because the items were
no longer active.
"""
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
D = self.fake_gerrit.addFakeChange('org/project', 'master', 'D')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
D.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.fake_gerrit.addEvent(D.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 4)
self.assertEqual(self.builds[0].name, 'project-merge')
self.assertEqual(self.builds[1].name, 'project-merge')
self.assertEqual(self.builds[2].name, 'project-merge')
self.assertEqual(self.builds[3].name, 'project-merge')
self.orderedRelease(4)
self.assertEqual(len(self.builds), 8)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertEqual(self.builds[2].name, 'project-test1')
self.assertEqual(self.builds[3].name, 'project-test2')
self.assertEqual(self.builds[4].name, 'project-test1')
self.assertEqual(self.builds[5].name, 'project-test2')
self.assertEqual(self.builds[6].name, 'project-test1')
self.assertEqual(self.builds[7].name, 'project-test2')
self.executor_server.failJob('project-test1', B)
self.builds[2].release()
self.builds[3].release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 4)
# A's jobs
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
# C's and D's merge jobs
self.assertEqual(self.builds[2].name, 'project-merge')
self.assertEqual(self.builds[3].name, 'project-merge')
# Release merge jobs of C, D after speculative gate reset
self.executor_server.release('project-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 6)
# A's jobs
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
# C's + D's jobs
self.assertEqual(self.builds[2].name, 'project-test1')
self.assertEqual(self.builds[3].name, 'project-test2')
self.assertEqual(self.builds[4].name, 'project-test1')
self.assertEqual(self.builds[5].name, 'project-test2')
# Fail D's job so we have a build results for an item that
# is not in the active window after B is reported
# (condition that previously lead to an exception)
self.executor_server.failJob('project-test1', D)
self.builds[4].release()
self.waitUntilSettled()
# Release A's jobs
self.builds[0].release()
self.builds[1].release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
# C's jobs
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
# D's remaining job
self.assertEqual(self.builds[2].name, 'project-test2')
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
self.assertEqual(queue.window, 1)
# D dropped out of the window
self.assertFalse(queue.queue[-1].active)
self.commitConfigUpdate('org/common-config',
'layouts/rate-limit-reconfigure2.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# D's remaining job should still be queued
self.assertEqual(len(self.builds), 3)
self.executor_server.release('project-.*')
self.waitUntilSettled()
@simple_layout('layouts/reconfigure-window.yaml')
def test_reconfigure_window_shrink(self):
# Test the active window shrinking during reconfiguration
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
self.assertEqual(queue.window, 20)
self.assertTrue(len(self.builds), 4)
self.executor_server.release('job1')
self.waitUntilSettled()
self.commitConfigUpdate('org/common-config',
'layouts/reconfigure-window2.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
# Even though we have configured a smaller window, the value
# on the existing shared queue should be used.
self.assertEqual(queue.window, 20)
self.assertTrue(len(self.builds), 4)
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
self.assertEqual(queue.window, 20)
self.assertTrue(len(self.builds), 4)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='job1', result='SUCCESS', changes='1,1'),
dict(name='job1', result='SUCCESS', changes='1,1 2,1'),
dict(name='job2', result='SUCCESS', changes='1,1'),
dict(name='job2', result='SUCCESS', changes='1,1 2,1'),
], ordered=False)
@simple_layout('layouts/reconfigure-window-fixed.yaml')
def test_reconfigure_window_fixed(self):
# Test the active window shrinking during reconfiguration
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
self.assertEqual(queue.window, 2)
self.assertEqual(len(self.builds), 4)
self.waitUntilSettled()
self.commitConfigUpdate('org/common-config',
'layouts/reconfigure-window-fixed2.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
# Because we have configured a static window, it should
# be allowed to shrink on reconfiguration.
self.assertEqual(queue.window, 1)
# B is outside the window, but still marked active until the
# next pass through the queue processor.
self.assertEqual(len(self.builds), 4)
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
self.assertEqual(queue.window, 1)
self.waitUntilSettled()
# B's builds should not be canceled
self.assertEqual(len(self.builds), 4)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='job1', result='SUCCESS', changes='1,1'),
dict(name='job2', result='SUCCESS', changes='1,1'),
dict(name='job1', result='SUCCESS', changes='1,1 2,1'),
dict(name='job2', result='SUCCESS', changes='1,1 2,1'),
], ordered=False)
@simple_layout('layouts/reconfigure-window-fixed.yaml')
def test_reconfigure_window_fixed_requests(self):
# Test the active window shrinking during reconfiguration with
# outstanding node requests
self.executor_server.hold_jobs_in_build = True
# Start the jobs for A
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.log.debug("A complete")
# Hold node requests for B
self.fake_nodepool.pause()
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.log.debug("B complete")
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
self.assertEqual(queue.window, 2)
self.assertEqual(len(self.builds), 2)
self.waitUntilSettled()
self.commitConfigUpdate('org/common-config',
'layouts/reconfigure-window-fixed2.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.log.debug("Reconfiguration complete")
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
# Because we have configured a static window, it should
# be allowed to shrink on reconfiguration.
self.assertEqual(queue.window, 1)
self.assertEqual(len(self.builds), 2)
# After the previous reconfig, the queue processor will have
# run and marked B inactive; run another reconfiguration so
# that we're testing what happens when we reconfigure after
# the active window having shrunk.
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# Unpause the node requests now
self.fake_nodepool.unpause()
self.waitUntilSettled()
self.log.debug("Nodepool unpause complete")
# Allow A to merge and B to enter the active window and complete
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.log.debug("Executor unpause complete")
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
self.assertEqual(queue.window, 1)
self.waitUntilSettled()
self.assertHistory([
dict(name='job1', result='SUCCESS', changes='1,1'),
dict(name='job2', result='SUCCESS', changes='1,1'),
dict(name='job1', result='SUCCESS', changes='1,1 2,1'),
dict(name='job2', result='SUCCESS', changes='1,1 2,1'),
], ordered=False)
@simple_layout('layouts/footer-message.yaml')
def test_footer_message(self):
"Test a pipeline's footer message is correctly added to the report."
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.executor_server.failJob('project-test1', A)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(2, len(self.smtp_messages))
failure_msg = """\
Build failed. For information on how to proceed, see \
http://wiki.example.org/Test_Failures"""
footer_msg = """\
For CI problems and help debugging, contact [email protected]"""
self.assertTrue(self.smtp_messages[0]['body'].startswith(failure_msg))
self.assertTrue(self.smtp_messages[0]['body'].endswith(footer_msg))
self.assertFalse(self.smtp_messages[1]['body'].startswith(failure_msg))
self.assertTrue(self.smtp_messages[1]['body'].endswith(footer_msg))
@simple_layout('layouts/start-message.yaml')
def test_start_message(self):
"Test a pipeline's start message is correctly added to the report."
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(1, len(self.smtp_messages))
start_msg = "Jobs started in gate for 1,1."
self.assertTrue(self.smtp_messages[0]['body'].startswith(start_msg))
@simple_layout('layouts/unmanaged-project.yaml')
def test_unmanaged_project_start_message(self):
"Test start reporting is not done for unmanaged projects."
self.init_repo("org/project", tag='init')
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(0, len(A.messages))
@simple_layout('layouts/merge-conflict.yaml')
def test_merge_conflict_reporters(self):
"""Check that the config is set up correctly"""
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(
"Merge Failed.\n\nThis change or one of its cross-repo "
"dependencies was unable to be automatically merged with the "
"current state of its repository. Please rebase the change and "
"upload a new patchset.",
tenant.layout.pipelines['check'].merge_conflict_message)
self.assertEqual(
"The merge failed! For more information...",
tenant.layout.pipelines['gate'].merge_conflict_message)
self.assertEqual(
len(tenant.layout.pipelines['check'].merge_conflict_actions), 1)
self.assertEqual(
len(tenant.layout.pipelines['gate'].merge_conflict_actions), 2)
self.assertTrue(isinstance(
tenant.layout.pipelines['check'].merge_conflict_actions[0],
gerritreporter.GerritReporter))
self.assertTrue(
(
isinstance(tenant.layout.pipelines['gate'].
merge_conflict_actions[0],
zuul.driver.smtp.smtpreporter.SMTPReporter) and
isinstance(tenant.layout.pipelines['gate'].
merge_conflict_actions[1],
gerritreporter.GerritReporter)
) or (
isinstance(tenant.layout.pipelines['gate'].
merge_conflict_actions[0],
gerritreporter.GerritReporter) and
isinstance(tenant.layout.pipelines['gate'].
merge_conflict_actions[1],
zuul.driver.smtp.smtpreporter.SMTPReporter)
)
)
@simple_layout('layouts/merge-failure.yaml')
def test_merge_failure_reporters(self):
"""Check that the config is set up correctly"""
# TODO: Remove this backwards compat test in v6.0
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(
"Merge Failed.\n\nThis change or one of its cross-repo "
"dependencies was unable to be automatically merged with the "
"current state of its repository. Please rebase the change and "
"upload a new patchset.",
tenant.layout.pipelines['check'].merge_conflict_message)
self.assertEqual(
"The merge failed! For more information...",
tenant.layout.pipelines['gate'].merge_conflict_message)
self.assertEqual(
len(tenant.layout.pipelines['check'].merge_conflict_actions), 1)
self.assertEqual(
len(tenant.layout.pipelines['gate'].merge_conflict_actions), 2)
self.assertTrue(isinstance(
tenant.layout.pipelines['check'].merge_conflict_actions[0],
gerritreporter.GerritReporter))
self.assertTrue(
(
isinstance(tenant.layout.pipelines['gate'].
merge_conflict_actions[0],
zuul.driver.smtp.smtpreporter.SMTPReporter) and
isinstance(tenant.layout.pipelines['gate'].
merge_conflict_actions[1],
gerritreporter.GerritReporter)
) or (
isinstance(tenant.layout.pipelines['gate'].
merge_conflict_actions[0],
gerritreporter.GerritReporter) and
isinstance(tenant.layout.pipelines['gate'].
merge_conflict_actions[1],
zuul.driver.smtp.smtpreporter.SMTPReporter)
)
)
def test_merge_failure_reports(self):
"""Check that when a change fails to merge the correct message is sent
to the correct reporter"""
self.commitConfigUpdate('common-config',
'layouts/merge-conflict.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# Check a test failure isn't reported to SMTP
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.executor_server.failJob('project-test1', A)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(3, len(self.history)) # 3 jobs
self.assertEqual(0, len(self.smtp_messages))
# Check a merge failure is reported to SMTP
# B should be merged, but C will conflict with B
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.addPatchset({'conflict': 'foo'})
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
C.addPatchset({'conflict': 'bar'})
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(6, len(self.history)) # A and B jobs
self.assertEqual(1, len(self.smtp_messages))
self.assertIn('The merge failed! For more information...',
self.smtp_messages[0]['body'])
self.assertIn('Error merging gerrit/org/project',
self.smtp_messages[0]['body'])
def test_default_merge_failure_reports(self):
"""Check that the default merge failure reports are correct."""
# A should report success, B should report merge failure.
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addPatchset({'conflict': 'foo'})
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.addPatchset({'conflict': 'bar'})
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(3, len(self.history)) # A jobs
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 1)
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'NEW')
self.assertIn('Build succeeded', A.messages[1])
self.assertIn('Merge Failed', B.messages[0])
self.assertIn('automatically merged', B.messages[0])
self.assertIn('Error merging gerrit/org/project', B.messages[0])
self.assertNotIn('logs.example.com', B.messages[0])
self.assertNotIn('SKIPPED', B.messages[0])
buildsets = list(
self.scheds.first.connections.connections[
'database'].getBuildsets())
self.assertEqual(buildsets[0].result, 'MERGE_CONFLICT')
self.assertIn('This change or one of', buildsets[0].message)
def test_submit_failure(self):
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
A.fail_merge = True
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
buildsets = list(
self.scheds.first.connections.connections[
'database'].getBuildsets())
self.assertEqual(buildsets[0].result, 'MERGE_FAILURE')
@simple_layout('layouts/timer-freeze-job-failure.yaml')
def test_periodic_freeze_job_failure(self):
self.waitUntilSettled()
for x in iterate_timeout(30, 'buildset complete'):
buildsets = list(
self.scheds.first.connections.connections[
'database'].getBuildsets())
if buildsets:
break
# Stop queuing timer triggered jobs so that the assertions
# below don't race against more jobs being queued.
self.commitConfigUpdate('org/common-config', 'layouts/no-timer.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
# can end up with one more event firing, so give it an extra
# second to settle.
time.sleep(3)
self.waitUntilSettled()
self.assertEqual(buildsets[0].result, 'CONFIG_ERROR')
self.assertIn('Job project-test2 depends on project-test1 '
'which was not run', buildsets[0].message)
@simple_layout('layouts/freeze-job-failure.yaml')
def test_freeze_job_failure(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
buildsets = list(
self.scheds.first.connections.connections[
'database'].getBuildsets())
self.assertEqual(buildsets[0].result, 'CONFIG_ERROR')
self.assertIn('Job project-test2 depends on project-test1 '
'which was not run', buildsets[0].message)
@simple_layout('layouts/nonvoting-pipeline.yaml')
def test_nonvoting_pipeline(self):
"Test that a nonvoting pipeline (experimental) can still report"
A = self.fake_gerrit.addFakeChange('org/experimental-project',
'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-merge').result,
'SUCCESS')
self.assertEqual(
self.getJobFromHistory('experimental-project-test').result,
'SUCCESS')
self.assertEqual(A.reported, 1)
@simple_layout('layouts/disable_at.yaml')
def test_disable_at(self):
"Test a pipeline will only report to the disabled trigger when failing"
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(3, tenant.layout.pipelines['check'].disable_at)
self.assertEqual(
0, tenant.layout.pipelines['check'].state.consecutive_failures)
self.assertFalse(tenant.layout.pipelines['check'].state.disabled)
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
D = self.fake_gerrit.addFakeChange('org/project', 'master', 'D')
E = self.fake_gerrit.addFakeChange('org/project', 'master', 'E')
F = self.fake_gerrit.addFakeChange('org/project', 'master', 'F')
G = self.fake_gerrit.addFakeChange('org/project', 'master', 'G')
H = self.fake_gerrit.addFakeChange('org/project', 'master', 'H')
I = self.fake_gerrit.addFakeChange('org/project', 'master', 'I')
J = self.fake_gerrit.addFakeChange('org/project', 'master', 'J')
K = self.fake_gerrit.addFakeChange('org/project', 'master', 'K')
self.executor_server.failJob('project-test1', A)
self.executor_server.failJob('project-test1', B)
# Let C pass, resetting the counter
self.executor_server.failJob('project-test1', D)
self.executor_server.failJob('project-test1', E)
self.executor_server.failJob('project-test1', F)
self.executor_server.failJob('project-test1', G)
self.executor_server.failJob('project-test1', H)
# I also passes but should only report to the disabled reporters
self.executor_server.failJob('project-test1', J)
self.executor_server.failJob('project-test1', K)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(
2, tenant.layout.pipelines['check'].state.consecutive_failures)
self.assertFalse(tenant.layout.pipelines['check'].state.disabled)
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(
0, tenant.layout.pipelines['check'].state.consecutive_failures)
self.assertFalse(tenant.layout.pipelines['check'].state.disabled)
self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(E.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(F.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# We should be disabled now
self.assertEqual(
3, tenant.layout.pipelines['check'].state.consecutive_failures)
self.assertTrue(tenant.layout.pipelines['check'].state.disabled)
# We need to wait between each of these patches to make sure the
# smtp messages come back in an expected order
self.fake_gerrit.addEvent(G.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(H.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(I.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# The first 6 (ABCDEF) jobs should have reported back to gerrt thus
# leaving a message on each change
self.assertEqual(1, len(A.messages))
self.assertIn('Build failed.', A.messages[0])
self.assertEqual(1, len(B.messages))
self.assertIn('Build failed.', B.messages[0])
self.assertEqual(1, len(C.messages))
self.assertIn('Build succeeded.', C.messages[0])
self.assertEqual(1, len(D.messages))
self.assertIn('Build failed.', D.messages[0])
self.assertEqual(1, len(E.messages))
self.assertIn('Build failed.', E.messages[0])
self.assertEqual(1, len(F.messages))
self.assertIn('Build failed.', F.messages[0])
# The last 3 (GHI) would have only reported via smtp.
self.assertEqual(3, len(self.smtp_messages))
self.assertEqual(0, len(G.messages))
self.assertIn('Build failed.', self.smtp_messages[0]['body'])
self.assertIn(
'project-test1 https://', self.smtp_messages[0]['body'])
self.assertEqual(0, len(H.messages))
self.assertIn('Build failed.', self.smtp_messages[1]['body'])
self.assertIn(
'project-test1 https://', self.smtp_messages[1]['body'])
self.assertEqual(0, len(I.messages))
self.assertIn('Build succeeded.', self.smtp_messages[2]['body'])
self.assertIn(
'project-test1 https://', self.smtp_messages[2]['body'])
# Now reload the configuration (simulate a HUP) to check the pipeline
# comes out of disabled
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(3, tenant.layout.pipelines['check'].disable_at)
self.assertEqual(
0, tenant.layout.pipelines['check'].state.consecutive_failures)
self.assertFalse(tenant.layout.pipelines['check'].state.disabled)
self.fake_gerrit.addEvent(J.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(K.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(
2, tenant.layout.pipelines['check'].state.consecutive_failures)
self.assertFalse(tenant.layout.pipelines['check'].state.disabled)
# J and K went back to gerrit
self.assertEqual(1, len(J.messages))
self.assertIn('Build failed.', J.messages[0])
self.assertEqual(1, len(K.messages))
self.assertIn('Build failed.', K.messages[0])
# No more messages reported via smtp
self.assertEqual(3, len(self.smtp_messages))
@simple_layout('layouts/one-job-project.yaml')
def test_one_job_project(self):
"Test that queueing works with one job"
A = self.fake_gerrit.addFakeChange('org/one-job-project',
'master', 'A')
B = self.fake_gerrit.addFakeChange('org/one-job-project',
'master', 'B')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 2)
def test_job_aborted(self):
"Test that if a execute server aborts a job, it is run again"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2)
# first abort
self.builds[0].aborted = True
self.executor_server.release('.*-test*')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
# second abort
self.builds[0].aborted = True
self.executor_server.release('.*-test*')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
# third abort
self.builds[0].aborted = True
self.executor_server.release('.*-test*')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
# fourth abort
self.builds[0].aborted = True
self.executor_server.release('.*-test*')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.history), 7)
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 4)
self.assertEqual(self.countJobResults(self.history, 'SUCCESS'), 3)
def test_rerun_on_abort(self):
"Test that if a execute server fails to run a job, it is run again"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2)
self.builds[0].requeue = True
self.executor_server.release('.*-test*')
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
items = tenant.layout.pipelines['check'].getAllItems()
build_set = items[0].current_build_set
for x in range(3):
# We should have x+1 retried builds for project-test1
retry_builds = build_set.getRetryBuildsForJob('project-test1')
self.assertEqual(len(retry_builds), x + 1)
for build in retry_builds:
self.assertEqual(build.retry, True)
self.assertEqual(build.result, 'RETRY')
self.assertEqual(len(self.builds), 1,
'len of builds at x=%d is wrong' % x)
self.builds[0].requeue = True
self.executor_server.release('.*-test1')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.history), 6)
self.assertEqual(self.countJobResults(self.history, 'SUCCESS'), 2)
self.assertEqual(A.reported, 1)
self.assertIn('RETRY_LIMIT', A.messages[0])
def test_executor_disconnect(self):
"Test that jobs are completed after an executor disconnect"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
# Forcibly disconnect the executor from ZK
self.executor_server.zk_client.client.stop()
self.executor_server.zk_client.client.start()
# Find the build in the scheduler so we can check its status
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
items = tenant.layout.pipelines['gate'].getAllItems()
builds = items[0].current_build_set.getBuilds()
build = builds[0]
# Clean up the build
self.scheds.first.sched.executor.cleanupLostBuildRequests()
# Wait for the build to be reported as lost
for x in iterate_timeout(30, 'retry build'):
if build.result == 'RETRY':
break
# If we didn't timeout, then it worked; we're done
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# There is a test-only race in the recording executor class
# where we may record a successful first build, even though
# the executor didn't actually send a build complete event.
# This could probabyl be improved, but for now, it's
# sufficient to verify that the job was retried. So we omit a
# result classifier on the first build.
self.assertHistory([
dict(name='project-merge', changes='1,1'),
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
], ordered=False)
# TODO: There seems to be a race condition in the kazoo election
# recipe that can cause the stats election thread to hang after
# reconnecting.
@skip("This is unstable in the gate")
def test_scheduler_disconnect(self):
"Test that jobs are completed after a scheduler disconnect"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
# Forcibly disconnect the scheduler from ZK
self.scheds.execute(lambda app: app.sched.zk_client.client.stop())
self.scheds.execute(lambda app: app.sched.zk_client.client.start())
# Clean up lost builds
self.scheds.first.sched.executor.cleanupLostBuildRequests()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
], ordered=False)
# TODO: See comment for test_scheduler_disconnect.
@skip("This is unstable in the gate")
def test_zookeeper_disconnect(self):
"Test that jobs are executed after a zookeeper disconnect"
self.fake_nodepool.pause()
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.scheds.execute(lambda app: app.sched.zk_client.client.stop())
self.scheds.execute(lambda app: app.sched.zk_client.client.start())
self.fake_nodepool.unpause()
# Wait until we win the nodepool election in order to avoid a
# race in waitUntilSettled with the request being fulfilled
# without submitting an event.
for x in iterate_timeout(60, 'nodepool election won'):
found = [app for app in self.scheds
if (app.sched.nodepool.election_won and
app.sched.nodepool.election.is_still_valid())]
if found:
break
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
def test_nodepool_cleanup(self):
"Test that we cleanup leaked node requests"
self.fake_nodepool.pause()
system_id = self.scheds.first.sched.system.system_id
zk_nodepool = self.scheds.first.sched.nodepool.zk_nodepool
req1 = zuul.model.NodeRequest(system_id, "uuid1", "tenant",
"pipeline", "job", ['label'], None,
0, None)
zk_nodepool.submitNodeRequest(req1, 100)
req2 = zuul.model.NodeRequest("someone else", "uuid1", "tenant",
"pipeline", "job", ['label'], None,
0, None)
zk_nodepool.submitNodeRequest(req2, 100)
self.assertEqual(zk_nodepool.getNodeRequests(),
['100-0000000000', '100-0000000001'])
self.scheds.first.sched._runNodeRequestCleanup()
self.assertEqual(zk_nodepool.getNodeRequests(),
['100-0000000001'])
zk_nodepool.deleteNodeRequest(req2.id)
def test_nodepool_failure(self):
"Test that jobs are reported after a nodepool failure"
self.fake_nodepool.pause()
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
req = self.fake_nodepool.getNodeRequests()[0]
self.fake_nodepool.addFailRequest(req)
self.fake_nodepool.unpause()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 2)
self.assertTrue(re.search('project-merge .* NODE_FAILURE',
A.messages[1]))
self.assertTrue(
'Skipped due to failed job project-merge' in A.messages[1])
def test_nodepool_resources(self):
"Test that resources are reported"
self.executor_server.hold_jobs_in_build = True
self.fake_nodepool.resources = {
'cores': 2,
'ram': 1024,
'instances': 1,
}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('project-merge')
self.waitUntilSettled()
self.waitUntilNodeCacheSync(
self.scheds.first.sched.nodepool.zk_nodepool)
self.scheds.first.sched._runStats()
# Check that resource usage gauges are reported
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
])
self.assertReportedStat(
'zuul.nodepool.resources.total.tenant.tenant-one.cores',
value='6', kind='g')
self.assertReportedStat(
'zuul.nodepool.resources.total.tenant.tenant-one.ram',
value='3072', kind='g')
self.assertReportedStat(
'zuul.nodepool.resources.total.tenant.tenant-one.instances',
value='3', kind='g')
# All 3 nodes are in use
self.assertReportedStat(
'zuul.nodepool.resources.in_use.tenant.tenant-one.cores',
value='6', kind='g')
self.assertReportedStat(
'zuul.nodepool.resources.in_use.tenant.tenant-one.ram',
value='3072', kind='g')
self.assertReportedStat(
'zuul.nodepool.resources.in_use.tenant.tenant-one.instances',
value='3', kind='g')
self.assertReportedStat(
'zuul.nodepool.resources.in_use.project.review_example_com/org/'
'project.cores', value='6', kind='g')
self.assertReportedStat(
'zuul.nodepool.resources.in_use.project.review_example_com/org/'
'project.ram', value='3072', kind='g')
self.assertReportedStat(
'zuul.nodepool.resources.in_use.project.review_example_com/org/'
'project.instances', value='3', kind='g')
# Check that resource usage counters are reported
self.assertReportedStat(
'zuul.nodepool.resources.in_use.tenant.tenant-one.cores',
kind='c')
self.assertReportedStat(
'zuul.nodepool.resources.in_use.tenant.tenant-one.ram',
kind='c')
self.assertReportedStat(
'zuul.nodepool.resources.in_use.tenant.tenant-one.instances',
kind='c')
self.assertReportedStat(
'zuul.nodepool.resources.in_use.project.review_example_com/org/'
'project.cores', kind='c')
self.assertReportedStat(
'zuul.nodepool.resources.in_use.project.review_example_com/org/'
'project.ram', kind='c')
self.assertReportedStat(
'zuul.nodepool.resources.in_use.project.review_example_com/org/'
'project.instances', kind='c')
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
def test_nodepool_pipeline_priority(self):
"Test that nodes are requested at the correct pipeline priority"
self.fake_nodepool.pause()
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getRefUpdatedEvent())
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
reqs = self.fake_nodepool.getNodeRequests()
# The requests come back sorted by priority. Since we have
# three requests for the three changes each with a different
# priority. Also they get a serial number based on order they
# were received so the number on the endof the oid should map
# to order submitted.
# * gate first - high priority - change C
self.assertEqual(reqs[0]['_oid'], '100-0000000002')
self.assertEqual(reqs[0]['node_types'], ['label1'])
# * check second - normal priority - change B
self.assertEqual(reqs[1]['_oid'], '200-0000000001')
self.assertEqual(reqs[1]['node_types'], ['label1'])
# * post third - low priority - change A
# additionally, the post job defined uses an ubuntu-xenial node,
# so we include that check just as an extra verification
self.assertEqual(reqs[2]['_oid'], '300-0000000000')
self.assertEqual(reqs[2]['node_types'], ['ubuntu-xenial'])
self.fake_nodepool.unpause()
self.waitUntilSettled()
@simple_layout('layouts/two-projects-integrated.yaml')
def test_nodepool_relative_priority_check(self):
"Test that nodes are requested at the relative priority"
self.fake_nodepool.pause()
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
C = self.fake_gerrit.addFakeChange('org/project1', 'master', 'C')
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
D = self.fake_gerrit.addFakeChange('org/project2', 'master', 'D')
self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
reqs = self.fake_nodepool.getNodeRequests()
# The requests come back sorted by priority.
# Change A, first change for project, high relative priority.
self.assertEqual(reqs[0]['_oid'], '200-0000000000')
self.assertEqual(reqs[0]['relative_priority'], 0)
# Change C, first change for project1, high relative priority.
self.assertEqual(reqs[1]['_oid'], '200-0000000002')
self.assertEqual(reqs[1]['relative_priority'], 0)
# Change B, second change for project, lower relative priority.
self.assertEqual(reqs[2]['_oid'], '200-0000000001')
self.assertEqual(reqs[2]['relative_priority'], 1)
# Change D, first change for project2 shared with project1,
# lower relative priority than project1.
self.assertEqual(reqs[3]['_oid'], '200-0000000003')
self.assertEqual(reqs[3]['relative_priority'], 1)
# Fulfill only the first request
self.fake_nodepool.fulfillRequest(reqs[0])
for x in iterate_timeout(30, 'fulfill request'):
reqs = list(self.scheds.first.sched.nodepool.getNodeRequests())
if len(reqs) < 4:
break
self.waitUntilSettled()
reqs = self.fake_nodepool.getNodeRequests()
# Change B, now first change for project, equal priority.
self.assertEqual(reqs[0]['_oid'], '200-0000000001')
self.assertEqual(reqs[0]['relative_priority'], 0)
# Change C, now first change for project1, equal priority.
self.assertEqual(reqs[1]['_oid'], '200-0000000002')
self.assertEqual(reqs[1]['relative_priority'], 0)
# Change D, first change for project2 shared with project1,
# still lower relative priority than project1.
self.assertEqual(reqs[2]['_oid'], '200-0000000003')
self.assertEqual(reqs[2]['relative_priority'], 1)
self.fake_nodepool.unpause()
self.waitUntilSettled()
@simple_layout('layouts/two-projects-integrated.yaml')
def test_nodepool_relative_priority_long(self):
"Test that nodes are requested at the relative priority"
self.fake_nodepool.pause()
count = 13
changes = []
for x in range(count):
change = self.fake_gerrit.addFakeChange(
'org/project', 'master', 'A')
self.fake_gerrit.addEvent(change.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
changes.append(change)
reqs = self.fake_nodepool.getNodeRequests()
self.assertEqual(len(reqs), 13)
# The requests come back sorted by priority.
for x in range(10):
self.assertEqual(reqs[x]['relative_priority'], x)
self.assertEqual(reqs[10]['relative_priority'], 10)
self.assertEqual(reqs[11]['relative_priority'], 10)
self.assertEqual(reqs[12]['relative_priority'], 10)
# Fulfill only the first request
self.fake_nodepool.fulfillRequest(reqs[0])
for x in iterate_timeout(30, 'fulfill request'):
reqs = list(self.scheds.first.sched.nodepool.getNodeRequests())
if len(reqs) < count:
break
self.waitUntilSettled()
reqs = self.fake_nodepool.getNodeRequests()
self.assertEqual(len(reqs), 12)
for x in range(10):
self.assertEqual(reqs[x]['relative_priority'], x)
self.assertEqual(reqs[10]['relative_priority'], 10)
self.assertEqual(reqs[11]['relative_priority'], 10)
self.fake_nodepool.unpause()
self.waitUntilSettled()
@simple_layout('layouts/two-projects-integrated.yaml')
def test_nodepool_relative_priority_gate(self):
"Test that nodes are requested at the relative priority"
self.fake_nodepool.pause()
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
# project does not share a queue with project1 and project2.
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
reqs = self.fake_nodepool.getNodeRequests()
# The requests come back sorted by priority.
# Change A, first change for shared queue, high relative
# priority.
self.assertEqual(reqs[0]['_oid'], '100-0000000000')
self.assertEqual(reqs[0]['relative_priority'], 0)
# Change C, first change for independent project, high
# relative priority.
self.assertEqual(reqs[1]['_oid'], '100-0000000002')
self.assertEqual(reqs[1]['relative_priority'], 0)
# Change B, second change for shared queue, lower relative
# priority.
self.assertEqual(reqs[2]['_oid'], '100-0000000001')
self.assertEqual(reqs[2]['relative_priority'], 1)
self.fake_nodepool.unpause()
self.waitUntilSettled()
def test_nodepool_project_removal(self):
"Test that nodes are returned unused after project removal"
self.fake_nodepool.pause()
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.newTenantConfig('config/single-tenant/main-one-project.yaml')
# This layout defines only org/project, not org/project1
self.commitConfigUpdate(
'common-config',
'layouts/live-reconfiguration-del-project.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.fake_nodepool.unpause()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 0)
for node in self.fake_nodepool.getNodes():
self.assertFalse(node['_lock'])
self.assertEqual(node['state'], 'ready')
@simple_layout('layouts/nodeset-fallback.yaml')
def test_nodeset_fallback(self):
# Test that nodeset fallback works
self.executor_server.hold_jobs_in_build = True
# Verify that we get the correct number and order of
# alternates from our nested config.
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
job = tenant.layout.getJob('check-job')
alts = job.flattenNodesetAlternatives(tenant.layout)
self.assertEqual(4, len(alts))
self.assertEqual('fast-nodeset', alts[0].name)
self.assertEqual('', alts[1].name)
self.assertEqual('red-nodeset', alts[2].name)
self.assertEqual('blue-nodeset', alts[3].name)
self.fake_nodepool.pause()
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
req = self.fake_nodepool.getNodeRequests()[0]
self.fake_nodepool.addFailRequest(req)
self.fake_nodepool.unpause()
self.waitUntilSettled()
build = self.getBuildByName('check-job')
inv_path = os.path.join(build.jobdir.root, 'ansible', 'inventory.yaml')
with open(inv_path, 'r') as f:
inventory = yaml.safe_load(f)
label = inventory['all']['hosts']['controller']['nodepool']['label']
self.assertEqual('slow-label', label)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertNotIn('NODE_FAILURE', A.messages[0])
self.assertHistory([
dict(name='check-job', result='SUCCESS', changes='1,1'),
], ordered=False)
@simple_layout('layouts/multiple-templates.yaml')
def test_multiple_project_templates(self):
# Test that applying multiple project templates to a project
# doesn't alter them when used for a second project.
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
build = self.getJobFromHistory('py27')
self.assertEqual(build.parameters['zuul']['jobtags'], [])
def test_pending_merge_in_reconfig(self):
# Test that if we are waiting for an outstanding merge on
# reconfiguration that we continue to do so.
self.hold_merge_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
A.setMerged()
self.fake_gerrit.addEvent(A.getRefUpdatedEvent())
self.waitUntilSettled()
jobs = list(self.merger_api.all())
self.assertEqual(len(jobs), 1)
self.assertEqual(jobs[0].state, zuul.model.MergeRequest.HOLD)
# Reconfigure while we still have an outstanding merge job
self.hold_merge_jobs_in_queue = False
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
(trusted, project1) = tenant.getProject('org/project1')
event = zuul.model.TriggerEvent()
self.scheds.first.sched.reconfigureTenant(
self.scheds.first.sched.abide.tenants['tenant-one'],
project1, event)
self.waitUntilSettled()
# Verify the merge job is still running and that the item is
# in the pipeline
jobs = list(self.merger_api.all())
self.assertEqual(jobs[0].state, zuul.model.MergeRequest.HOLD)
self.assertEqual(len(jobs), 1)
pipeline = tenant.layout.pipelines['post']
self.assertEqual(len(pipeline.getAllItems()), 1)
self.merger_api.release()
self.waitUntilSettled()
jobs = list(self.merger_api.all())
self.assertEqual(len(jobs), 0)
@simple_layout('layouts/parent-matchers.yaml')
def test_parent_matchers(self):
"Test that if a job's parent does not match, the job does not run"
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([])
files = {'foo.txt': ''}
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
files=files)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
files = {'bar.txt': ''}
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C',
files=files)
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
files = {'foo.txt': '', 'bar.txt': ''}
D = self.fake_gerrit.addFakeChange('org/project', 'master', 'D',
files=files)
self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='child-job', result='SUCCESS', changes='3,1'),
dict(name='child-job', result='SUCCESS', changes='4,1'),
], ordered=False)
@simple_layout('layouts/file-matchers.yaml')
def test_file_matchers(self):
"Test several file matchers"
files = {'parent1.txt': ''}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=files)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
files = {'parent2.txt': ''}
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
files=files)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
files = {'child.txt': ''}
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C',
files=files)
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
files = {'project.txt': ''}
D = self.fake_gerrit.addFakeChange('org/project', 'master', 'D',
files=files)
self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
files = {'tests/foo': ''}
E = self.fake_gerrit.addFakeChange('org/project', 'master', 'E',
files=files)
self.fake_gerrit.addEvent(E.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
files = {'tests/docs/foo': ''}
F = self.fake_gerrit.addFakeChange('org/project', 'master', 'F',
files=files)
self.fake_gerrit.addEvent(F.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='child-job', result='SUCCESS', changes='2,1'),
dict(name='child-override-job', result='SUCCESS', changes='3,1'),
dict(name='project-override-job', result='SUCCESS', changes='4,1'),
dict(name='irr-job', result='SUCCESS', changes='5,1'),
dict(name='irr-override-job', result='SUCCESS', changes='5,1'),
dict(name='irr-job', result='SUCCESS', changes='6,1'),
], ordered=False)
def test_trusted_project_dep_on_non_live_untrusted_project(self):
# Test we get a layout for trusted projects when they depend on
# non live untrusted projects. This checks against a bug where
# trusted project config changes can end up in a infinite loop
# trying to find the right layout.
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
files = {'zuul.yaml': ''}
B = self.fake_gerrit.addFakeChange('common-config', 'master', 'B',
files=files)
B.setDependsOn(A, 1)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1 2,1'),
dict(name='project1-project2-integration',
result='SUCCESS', changes='1,1 2,1'),
], ordered=False)
@simple_layout('layouts/success-message.yaml')
def test_success_message(self):
# Test the success_message (and failure_message) job attrs
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.executor_server.failJob('badjob', A)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.messages), 1)
self.assertTrue('YAY' in A.messages[0])
self.assertTrue('BOO' in A.messages[0])
def test_merge_error(self):
# Test we don't get stuck on a merger error
self.waitUntilSettled()
self.patch(zuul.merger.merger.Repo, 'retry_attempts', 1)
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.patch(git.Git, 'GIT_PYTHON_GIT_EXECUTABLE',
os.path.join(FIXTURE_DIR, 'git_fail.sh'))
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn('Unable to update gerrit/org/project', A.messages[0])
@simple_layout('layouts/vars.yaml')
def test_jobdata(self):
# Test the use of JobData objects for job variables
self.executor_server.hold_jobs_in_build = True
self.useFixture(fixtures.MonkeyPatch(
'zuul.model.FrozenJob.MAX_DATA_LEN',
1))
self.useFixture(fixtures.MonkeyPatch(
'zuul.model.Build.MAX_DATA_LEN',
1))
# Return some data and pause the job. We use a paused job
# here because there are only two times we refresh JobData:
# 1) A job which has not yet started its build
# because the waiting status may change, we refresh the FrozenJob
# 2) A job which is paused
# because the result/data may change, we refresh the Build
# This allows us to test that we re-use JobData instances when
# we are able.
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.executor_server.returnData(
'check-job', A,
{'somedata': 'foobar',
'zuul': {'pause': True}},
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
item = tenant.layout.pipelines['check'].queues[0].queue[0]
hold_job = item.getJobs()[1]
# Refresh the pipeline so that we can verify the JobData
# objects are immutable.
old_hold_job_variables = hold_job.variables
ctx = self.refreshPipelines(self.scheds.first.sched)
new_hold_job_variables = hold_job.variables
self.executor_server.release('check-job')
self.waitUntilSettled()
# Waiting on hold-job now
# Check the assertions on the hold job here so that the test
# can fail normally if they fail (it times out otherwise due
# to the held job).
self.assertEqual('hold-job', hold_job.name)
# Make sure we're really using JobData objects
self.assertTrue(isinstance(hold_job._variables, zuul.model.JobData))
# Make sure the same object instance is used
self.assertIs(old_hold_job_variables, new_hold_job_variables)
# Hopefully these asserts won't change much over time. If
# they don't they may be a good way for us to catch unintended
# extra read operations. If they change too much, they may
# not be worth keeping and we can just remove them.
self.assertEqual(5, ctx.cumulative_read_objects)
self.assertEqual(5, ctx.cumulative_read_znodes)
self.assertEqual(0, ctx.cumulative_write_objects)
self.assertEqual(0, ctx.cumulative_write_znodes)
check_job = item.getJobs()[0]
self.assertEqual('check-job', check_job.name)
self.assertTrue(isinstance(check_job._variables,
zuul.model.JobData))
check_build = item.current_build_set.getBuild('check-job')
self.assertTrue(isinstance(check_build._result_data,
zuul.model.JobData))
# Refresh the pipeline so that we can verify the JobData
# objects are immutable.
old_check_build_results = check_build.result_data
ctx = self.refreshPipelines(self.scheds.first.sched)
new_check_build_results = check_build.result_data
# Verify that we did not reload results
self.assertIs(old_check_build_results, new_check_build_results)
# Again check the object read counts
self.assertEqual(4, ctx.cumulative_read_objects)
self.assertEqual(4, ctx.cumulative_read_znodes)
self.assertEqual(0, ctx.cumulative_write_objects)
self.assertEqual(0, ctx.cumulative_write_znodes)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='check-job', result='SUCCESS', changes='1,1'),
dict(name='hold-job', result='SUCCESS', changes='1,1'),
], ordered=False)
def test_zkobject_parallel_refresh(self):
# Test that we don't deadlock when refreshing objects
zkobject.BaseZKContext._max_workers = 1
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project-merge', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1 2,1'),
], ordered=False)
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
def test_leaked_pipeline_cleanup(self):
self.waitUntilSettled()
sched = self.scheds.first.sched
pipeline_state_path = "/zuul/tenant/tenant-one/pipeline/invalid"
self.zk_client.client.ensure_path(pipeline_state_path)
# Create the ZK path as a side-effect of getting the event queue.
sched.pipeline_management_events["tenant-one"]["invalid"]
pipeline_event_queue_path = PIPELINE_NAME_ROOT.format(
tenant="tenant-one", pipeline="invalid")
self.assertIsNotNone(self.zk_client.client.exists(pipeline_state_path))
# Wait for the event watcher to create the event queues
for _ in iterate_timeout(30, "create event queues"):
for event_queue in ("management", "trigger", "result"):
if self.zk_client.client.exists(
f"{pipeline_event_queue_path}/{event_queue}") is None:
break
else:
break
sched._runLeakedPipelineCleanup()
self.assertIsNone(
self.zk_client.client.exists(pipeline_event_queue_path))
self.assertIsNone(self.zk_client.client.exists(pipeline_state_path))
class TestChangeQueues(ZuulTestCase):
tenant_config_file = 'config/change-queues/main.yaml'
def _test_dependent_queues_per_branch(self, project,
queue_name='integrated',
queue_repo='common-config'):
self.create_branch(project, 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(project, 'stable'))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
B = self.fake_gerrit.addFakeChange(project, 'stable', 'B')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.executor_server.failJob('project-test', A)
# Let first go A into gate then B
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
# There should be one project-test job at the head of each queue
self.assertBuilds([
dict(name='project-test', changes='1,1'),
dict(name='project-test', changes='2,1'),
])
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
_, p = tenant.getProject(project)
q1 = tenant.layout.pipelines['gate'].getQueue(
p.canonical_name, 'master')
q2 = tenant.layout.pipelines['gate'].getQueue(
p.canonical_name, 'stable')
self.assertEqual(q1.name, queue_name)
self.assertEqual(q2.name, queue_name)
# Both queues must contain one item
self.assertEqual(len(q1.queue), 1)
self.assertEqual(len(q2.queue), 1)
# Fail job on the change on master
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertNotEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
# Now reconfigure the queue to be non-branched and run the same test
# again.
conf = textwrap.dedent(
"""
- queue:
name: {}
per-branch: false
""").format(queue_name)
file_dict = {'zuul.d/queue.yaml': conf}
C = self.fake_gerrit.addFakeChange(queue_repo, 'master', 'A',
files=file_dict)
C.setMerged()
self.fake_gerrit.addEvent(C.getChangeMergedEvent())
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = True
D = self.fake_gerrit.addFakeChange(project, 'master', 'D')
E = self.fake_gerrit.addFakeChange(project, 'stable', 'E')
D.addApproval('Code-Review', 2)
E.addApproval('Code-Review', 2)
self.executor_server.failJob('project-test', D)
# Let first go A into gate then B
self.fake_gerrit.addEvent(D.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(E.addApproval('Approved', 1))
self.waitUntilSettled()
# There should be two project-test jobs in a shared queue
self.assertBuilds([
dict(name='project-test', changes='4,1'),
dict(name='project-test', changes='4,1 5,1'),
])
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
_, p = tenant.getProject(project)
q1 = tenant.layout.pipelines['gate'].getQueue(
p.canonical_name, 'master')
q2 = tenant.layout.pipelines['gate'].getQueue(
p.canonical_name, 'stable')
q3 = tenant.layout.pipelines['gate'].getQueue(
p.canonical_name, None)
# There should be no branch specific queues anymore
self.assertEqual(q1, None)
self.assertEqual(q2, None)
self.assertEqual(q3.name, queue_name)
# Both queues must contain one item
self.assertEqual(len(q3.queue), 2)
# Release project-test of D to make history after test deterministic
self.executor_server.release('project-test', change='4 1')
self.waitUntilSettled()
# Fail job on the change on master
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertNotEqual(D.data['status'], 'MERGED')
self.assertEqual(E.data['status'], 'MERGED')
self.assertEqual(D.reported, 2)
self.assertEqual(E.reported, 2)
self.assertHistory([
# Independent runs because of per branch queues
dict(name='project-test', result='FAILURE', changes='1,1'),
dict(name='project-test', result='SUCCESS', changes='2,1'),
# Same queue with gate reset because of 4,1
dict(name='project-test', result='FAILURE', changes='4,1'),
# Result can be anything depending on timing of the gate reset.
dict(name='project-test', changes='4,1 5,1'),
dict(name='project-test', result='SUCCESS', changes='5,1'),
], ordered=False)
def test_dependent_queues_per_branch(self):
"""
Test that change queues can be different for different branches.
In this case the project contains zuul config so the branches are
known upfront and the queues are pre-seeded.
"""
self._test_dependent_queues_per_branch('org/project')
def test_dependent_queues_per_branch_no_config(self):
"""
Test that change queues can be different for different branches.
In this case we create changes for two branches in a repo that
doesn't contain zuul config so the queues are not pre-seeded
in the gate pipeline.
"""
self._test_dependent_queues_per_branch('org/project2')
def test_dependent_queues_per_branch_untrusted(self):
"""
Test that change queues can be different for different branches.
In this case we create changes for two branches in an untrusted repo
that defines its own queue.
"""
self._test_dependent_queues_per_branch(
'org/project3', queue_name='integrated-untrusted',
queue_repo='org/project3')
def test_dependent_queues_per_branch_project_queue(self):
"""
Test that change queues can be different for different branches.
In this case we create changes for two branches in a repo that
references the queue on project level instead of pipeline level.
"""
self._test_dependent_queues_per_branch('org/project4')
def test_duplicate_definition_on_branches(self):
project = 'org/project3'
self.create_branch(project, 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(project, 'stable'))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEquals(
len(tenant.layout.loading_errors), 1,
"No error should have been accumulated")
# This error is expected and unrelated to this test (the
# ignored configuration is used by other tests in this class):
self.assertIn('Queue integrated already defined',
tenant.layout.loading_errors[0].error)
# At this point we've verified that we can have identical
# queue definitions on multiple branches without conflict.
# Next, let's try to change the queue def on one branch so it
# doesn't match (flip the per-branch boolean):
conf = textwrap.dedent(
"""
- queue:
name: integrated-untrusted
per-branch: false
""")
file_dict = {'zuul.d/queue.yaml': conf}
A = self.fake_gerrit.addFakeChange(project, 'stable', 'A',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(A.messages), 1)
self.assertTrue(
'Queue integrated-untrusted does not match '
'existing definition in branch master' in A.messages[0])
self.assertEqual(A.data['status'], 'NEW')
class TestJobUpdateBrokenConfig(ZuulTestCase):
tenant_config_file = 'config/job-update-broken/main.yaml'
def test_fix_check_without_running(self):
"Test that we can fix a broken check pipeline (don't run the job)"
in_repo_conf = textwrap.dedent(
"""
- job:
name: existing-files
files:
- README.txt
- project-template:
name: files-template
check:
jobs:
- existing-files
- noop
""")
# When the config is broken, we don't override any files
# matchers since we don't have a valid basis. Since this
# doesn't update README.txt, nothing should run.
file_dict = {'zuul.d/existing.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([])
self.assertEqual(A.reported, 1)
def test_fix_check_with_running(self):
"Test that we can fix a broken check pipeline (do run the job)"
in_repo_conf = textwrap.dedent(
"""
- job:
name: existing-files
files:
- README.txt
- project-template:
name: files-template
check:
jobs:
- existing-files
""")
# When the config is broken, we don't override any files
# matchers since we don't have a valid basis. Since this
# does update README.txt, the job should run.
file_dict = {'zuul.d/existing.yaml': in_repo_conf,
'README.txt': ''}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='existing-files', result='SUCCESS', changes='1,1'),
])
self.assertEqual(A.reported, 1)
class TestJobUpdateFileMatcher(ZuulTestCase):
tenant_config_file = 'config/job-update/main.yaml'
def test_matchers(self):
"Test matchers work as expected with no change"
file_dict = {'README.txt': ''}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
file_dict = {'something_else': ''}
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='existing-files', result='SUCCESS', changes='1,1'),
dict(name='existing-irr', result='SUCCESS', changes='2,1'),
])
def test_job_update(self):
"Test matchers are overridden with a config update"
in_repo_conf = textwrap.dedent(
"""
- job:
name: existing-files
tags: foo
- job:
name: existing-irr
tags: foo
""")
file_dict = {'zuul.d/new.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='existing-files', result='SUCCESS', changes='1,1'),
dict(name='existing-irr', result='SUCCESS', changes='1,1'),
], ordered=False)
def test_job_update_files(self):
"Test that changes to file matchers themselves don't run jobs"
# Normally we want to ignore file matchers and run jobs if the
# job config changes, but if the only thing about the job
# config that changes *is* the file matchers, then we don't
# want to run it.
in_repo_conf = textwrap.dedent(
"""
- job:
name: existing-files
files: 'doesnotexist'
- job:
name: existing-irr
irrelevant-files:
- README
- ^zuul.d/.*$
- newthing
""")
file_dict = {'zuul.d/new.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([])
def test_new_job(self):
"Test matchers are overridden when creating a new job"
in_repo_conf = textwrap.dedent(
"""
- job:
name: new-files
parent: existing-files
- project:
check:
jobs:
- new-files
""")
file_dict = {'zuul.d/new.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='new-files', result='SUCCESS', changes='1,1'),
])
def test_patch_series(self):
"Test that we diff to the nearest layout in a patch series"
in_repo_conf = textwrap.dedent(
"""
- job:
name: new-files1
parent: existing-files
- project:
check:
jobs:
- new-files1
""")
file_dict = {'zuul.d/new1.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
in_repo_conf = textwrap.dedent(
"""
- job:
name: new-files2
parent: existing-files
- project:
check:
jobs:
- new-files2
""")
file_dict = {'zuul.d/new2.yaml': in_repo_conf}
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
files=file_dict)
B.setDependsOn(A, 1)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='new-files2', result='SUCCESS', changes='1,1 2,1'),
])
def test_disable_match(self):
"Test matchers are not overridden if we say so"
in_repo_conf = textwrap.dedent(
"""
- job:
name: new-files
parent: existing-files
match-on-config-updates: false
- project:
check:
jobs:
- new-files
""")
file_dict = {'zuul.d/new.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([])
class TestExecutor(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def assertFinalState(self):
# In this test, we expect to shut down in a non-final state,
# so skip these checks.
pass
def assertCleanShutdown(self):
self.log.debug("Assert clean shutdown")
# After shutdown, make sure no jobs are running
self.assertEqual({}, self.executor_server.job_workers)
# Make sure that git.Repo objects have been garbage collected.
gc.disable()
try:
gc.collect()
for obj in gc.get_objects():
if isinstance(obj, git.Repo):
self.log.debug("Leaked git repo object: %s" % repr(obj))
gc.enable()
finally:
gc.enable()
def test_executor_shutdown(self):
"Test that the executor can shut down with jobs running"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
class TestDependencyGraph(ZuulTestCase):
tenant_config_file = 'config/dependency-graph/main.yaml'
def test_dependeny_graph_dispatch_jobs_once(self):
"Test a job in a dependency graph is queued only once"
# Job dependencies, starting with A
# A
# / \
# B C
# / \ / \
# D F E
# |
# G
self.executor_server.hold_jobs_in_build = True
change = self.fake_gerrit.addFakeChange(
'org/project', 'master', 'change')
change.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(change.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual([b.name for b in self.builds], ['A'])
self.executor_server.release('A')
self.waitUntilSettled()
self.assertEqual(sorted(b.name for b in self.builds), ['B', 'C'])
self.executor_server.release('B')
self.waitUntilSettled()
self.assertEqual(sorted(b.name for b in self.builds), ['C', 'D'])
self.executor_server.release('D')
self.waitUntilSettled()
self.assertEqual([b.name for b in self.builds], ['C'])
self.executor_server.release('C')
self.waitUntilSettled()
self.assertEqual(sorted(b.name for b in self.builds), ['E', 'F'])
self.executor_server.release('F')
self.waitUntilSettled()
self.assertEqual(sorted(b.name for b in self.builds), ['E', 'G'])
self.executor_server.release('G')
self.waitUntilSettled()
self.assertEqual([b.name for b in self.builds], ['E'])
self.executor_server.release('E')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(len(self.history), 7)
self.assertEqual(change.data['status'], 'MERGED')
self.assertEqual(change.reported, 2)
def test_jobs_launched_only_if_all_dependencies_are_successful(self):
"Test that a job waits till all dependencies are successful"
# Job dependencies, starting with A
# A
# / \
# B C*
# / \ / \
# D F E
# |
# G
self.executor_server.hold_jobs_in_build = True
change = self.fake_gerrit.addFakeChange(
'org/project', 'master', 'change')
change.addApproval('Code-Review', 2)
self.executor_server.failJob('C', change)
self.fake_gerrit.addEvent(change.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual([b.name for b in self.builds], ['A'])
self.executor_server.release('A')
self.waitUntilSettled()
self.assertEqual(sorted(b.name for b in self.builds), ['B', 'C'])
self.executor_server.release('B')
self.waitUntilSettled()
self.assertEqual(sorted(b.name for b in self.builds), ['C', 'D'])
self.executor_server.release('D')
self.waitUntilSettled()
self.assertEqual([b.name for b in self.builds], ['C'])
self.executor_server.release('C')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(len(self.history), 4)
self.assertEqual(change.data['status'], 'NEW')
self.assertEqual(change.reported, 2)
@simple_layout('layouts/soft-dependencies-error.yaml')
def test_soft_dependencies_error(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([])
self.assertEqual(len(A.messages), 1)
self.assertTrue('Job project-merge not defined' in A.messages[0])
self.log.info(A.messages)
@simple_layout('layouts/soft-dependencies.yaml')
def test_soft_dependencies(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='deploy', result='SUCCESS', changes='1,1'),
], ordered=False)
@simple_layout('layouts/not-skip-when-reenqueue.yaml')
def test_child_with_soft_dependency_should_not_skip(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.executor_server.hold_jobs_in_build = True
self.executor_server.returnData(
'grand-parent', A,
{'zuul':
{'child_jobs': ['parent2']}
}
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.executor_server.release('grand-parent')
self.waitUntilSettled()
# grand-parent success, parent1 skipped, parent2 running
self.assertHistory([
dict(name='grand-parent', result='SUCCESS', changes='1,1'),
], ordered=False)
self.assertBuilds([dict(name='parent2')])
# Reconfigure to trigger a re-enqueue, this should not cause job
# 'child' to be skipped because parent1 was skipped
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.executor_server.release('parent1')
self.executor_server.release('parent2')
self.waitUntilSettled()
# grand-parent success, parent1 skipped, parent2 success, child running
self.assertHistory([
dict(name='grand-parent', result='SUCCESS', changes='1,1'),
dict(name='parent2', result='SUCCESS', changes='1,1'),
], ordered=False)
self.assertBuilds([dict(name='child')])
self.executor_server.release('child')
self.waitUntilSettled()
# grand-parent success, parent1 skipped, parent2 success, child success
self.assertHistory([
dict(name='grand-parent', result='SUCCESS', changes='1,1'),
dict(name='parent2', result='SUCCESS', changes='1,1'),
dict(name='child', result='SUCCESS', changes='1,1'),
], ordered=False)
self.assertBuilds([])
@simple_layout('layouts/soft-dependencies.yaml')
def test_soft_dependencies_failure(self):
file_dict = {'main.c': 'test'}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.executor_server.failJob('build', A)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='build', result='FAILURE', changes='1,1'),
], ordered=False)
self.assertIn('Skipped due to failed job build', A.messages[0])
class TestDuplicatePipeline(ZuulTestCase):
tenant_config_file = 'config/duplicate-pipeline/main.yaml'
def test_duplicate_pipelines(self):
"Test that a change matching multiple pipelines works"
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getChangeRestoredEvent())
self.waitUntilSettled()
self.assertHistory([
dict(name='project-test1', result='SUCCESS', changes='1,1',
pipeline='dup1'),
dict(name='project-test1', result='SUCCESS', changes='1,1',
pipeline='dup2'),
], ordered=False)
self.assertEqual(len(A.messages), 2)
if 'dup1' in A.messages[0]:
self.assertIn('dup1', A.messages[0])
self.assertNotIn('dup2', A.messages[0])
self.assertIn('project-test1', A.messages[0])
self.assertIn('dup2', A.messages[1])
self.assertNotIn('dup1', A.messages[1])
self.assertIn('project-test1', A.messages[1])
else:
self.assertIn('dup1', A.messages[1])
self.assertNotIn('dup2', A.messages[1])
self.assertIn('project-test1', A.messages[1])
self.assertIn('dup2', A.messages[0])
self.assertNotIn('dup1', A.messages[0])
self.assertIn('project-test1', A.messages[0])
class TestSchedulerRegexProject(ZuulTestCase):
tenant_config_file = 'config/regex-project/main.yaml'
def test_regex_project(self):
"Test that changes are tested in parallel and merged in series"
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# We expect the following builds:
# - 1 for org/project
# - 3 for org/project1
# - 3 for org/project2
self.assertEqual(len(self.history), 7)
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 1)
self.assertEqual(C.reported, 1)
self.assertHistory([
dict(name='project-test', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='2,1'),
dict(name='project-common-test', result='SUCCESS', changes='2,1'),
dict(name='project-common-test-canonical', result='SUCCESS',
changes='2,1'),
dict(name='project-test2', result='SUCCESS', changes='3,1'),
dict(name='project-common-test', result='SUCCESS', changes='3,1'),
dict(name='project-common-test-canonical', result='SUCCESS',
changes='3,1'),
], ordered=False)
class TestSchedulerTemplatedProject(ZuulTestCase):
tenant_config_file = 'config/templated-project/main.yaml'
def test_job_from_templates_executed(self):
"Test whether a job generated via a template can be executed"
A = self.fake_gerrit.addFakeChange(
'org/templated-project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
def test_layered_templates(self):
"Test whether a job generated via a template can be executed"
A = self.fake_gerrit.addFakeChange(
'org/layered-project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('layered-project-test3'
).result, 'SUCCESS')
self.assertEqual(self.getJobFromHistory('layered-project-test4'
).result, 'SUCCESS')
self.assertEqual(self.getJobFromHistory('layered-project-foo-test5'
).result, 'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test6').result,
'SUCCESS')
def test_unimplied_branch_matchers(self):
# This tests that there are no implied branch matchers added
# to project templates in unbranched projects.
self.create_branch('org/layered-project', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/layered-project', 'stable'))
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange(
'org/layered-project', 'stable', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.log.info(
self.getJobFromHistory('project-test1').
parameters['zuul']['_inheritance_path'])
def test_implied_branch_matchers(self):
# This tests that there is an implied branch matcher when a
# template is used on an in-repo project pipeline definition.
self.create_branch('untrusted-config', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'untrusted-config', 'stable'))
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange(
'untrusted-config', 'stable', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.log.info(
self.getJobFromHistory('project-test1').
parameters['zuul']['_inheritance_path'])
# Now create a new branch named stable-foo and change the project
# pipeline
self.create_branch('untrusted-config', 'stable-foo')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'untrusted-config', 'stable-foo'))
self.waitUntilSettled()
in_repo_conf = textwrap.dedent(
"""
- project:
name: untrusted-config
templates:
- test-three-and-four
check:
jobs:
- project-test7
""")
file_dict = {'zuul.d/project.yaml': in_repo_conf}
B = self.fake_gerrit.addFakeChange('untrusted-config', 'stable-foo',
'B', files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project-test7', result='SUCCESS', changes='2,1'),
dict(name='layered-project-test3', result='SUCCESS',
changes='2,1'),
dict(name='layered-project-test4', result='SUCCESS',
changes='2,1'),
], ordered=False)
# Inheritance path should not contain items from branch stable
# This tests that not only is it the case that the stable
# branch project-template did not apply, but also that the
# stable branch definitions of the project-test7 did not apply
# (since the job definitions also have implied branch
# matchers).
job = self.getJobFromHistory('project-test7', branch='stable-foo')
inheritance_path = job.parameters['zuul']['_inheritance_path']
self.assertEqual(len(inheritance_path), 4)
stable_items = [x for x in inheritance_path
if 'untrusted-config/zuul.d/jobs.yaml@stable#' in x]
self.assertEqual(len(stable_items), 0)
class TestSchedulerMerges(ZuulTestCase):
tenant_config_file = 'config/merges/main.yaml'
def _test_project_merge_mode(self, mode):
self.executor_server.keep_jobdir = False
project = 'org/project-%s' % mode
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange(project, 'master', 'A')
B = self.fake_gerrit.addFakeChange(project, 'master', 'B')
C = self.fake_gerrit.addFakeChange(project, 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
build = self.builds[-1]
path = os.path.join(build.jobdir.src_root, 'review.example.com',
project)
repo = git.Repo(path)
repo_messages = [c.message.strip() for c in repo.iter_commits()]
repo_messages.reverse()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
return repo_messages
def _test_merge(self, mode):
us_path = 'file://' + os.path.join(
self.upstream_root, 'org/project-%s' % mode)
expected_messages = [
'initial commit',
'add content from fixture',
# the intermediate commits order is nondeterministic
"Merge commit 'refs/changes/02/2/1' of %s into HEAD" % us_path,
"Merge commit 'refs/changes/03/3/1' of %s into HEAD" % us_path,
]
result = self._test_project_merge_mode(mode)
self.assertEqual(result[:2], expected_messages[:2])
self.assertEqual(result[-2:], expected_messages[-2:])
def test_project_merge_mode_merge(self):
self._test_merge('merge')
def test_project_merge_mode_merge_resolve(self):
self._test_merge('merge-resolve')
def test_project_merge_mode_cherrypick(self):
expected_messages = [
'initial commit',
'add content from fixture',
'A-1',
'B-1',
'C-1']
result = self._test_project_merge_mode('cherry-pick')
self.assertEqual(result, expected_messages)
def test_project_merge_mode_cherrypick_redundant(self):
# A redundant commit (that is, one that has already been applied to the
# working tree) should be skipped
self.executor_server.keep_jobdir = False
project = 'org/project-cherry-pick'
files = {
"foo.txt": "ABC",
}
A = self.fake_gerrit.addFakeChange(project, 'master', 'A', files=files)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = True
B = self.fake_gerrit.addFakeChange(project, 'master', 'B', files=files)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
build = self.builds[-1]
path = os.path.join(build.jobdir.src_root, 'review.example.com',
project)
repo = git.Repo(path)
repo_messages = [c.message.strip() for c in repo.iter_commits()]
repo_messages.reverse()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
expected_messages = [
'initial commit',
'add content from fixture',
'A-1',
]
self.assertHistory([
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='2,1'),
])
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(repo_messages, expected_messages)
def test_project_merge_mode_cherrypick_empty(self):
# An empty commit (that is, one that doesn't modify any files) should
# be preserved
self.executor_server.keep_jobdir = False
project = 'org/project-cherry-pick'
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange(project, 'master', 'A', empty=True)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
build = self.builds[-1]
path = os.path.join(build.jobdir.src_root, 'review.example.com',
project)
repo = git.Repo(path)
repo_messages = [c.message.strip() for c in repo.iter_commits()]
repo_messages.reverse()
changed_files = list(repo.commit("HEAD").diff(repo.commit("HEAD~1")))
self.assertEqual(changed_files, [])
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
expected_messages = [
'initial commit',
'add content from fixture',
'A-1',
]
self.assertHistory([
dict(name='project-test1', result='SUCCESS', changes='1,1'),
])
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(repo_messages, expected_messages)
def test_project_merge_mode_cherrypick_branch_merge(self):
"Test that branches can be merged together in cherry-pick mode"
self.create_branch('org/project-merge-branches', 'mp')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project-merge-branches', 'mp'))
self.waitUntilSettled()
path = os.path.join(self.upstream_root, 'org/project-merge-branches')
repo = git.Repo(path)
master_sha = repo.heads.master.commit.hexsha
mp_sha = repo.heads.mp.commit.hexsha
self.executor_server.hold_jobs_in_build = True
M = self.fake_gerrit.addFakeChange(
'org/project-merge-branches', 'master', 'M',
merge_parents=[
master_sha,
mp_sha,
])
M.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(M.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
build = self.builds[-1]
self.assertEqual(build.parameters['zuul']['branch'], 'master')
path = os.path.join(build.jobdir.src_root, 'review.example.com',
"org/project-merge-branches")
repo = git.Repo(path)
repo_messages = [c.message.strip() for c in repo.iter_commits()]
repo_messages.reverse()
correct_messages = [
'initial commit',
'add content from fixture',
'mp commit',
'M-1']
self.assertEqual(repo_messages, correct_messages)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
def test_merge_branch(self):
"Test that the right commits are on alternate branches"
self.create_branch('org/project-merge-branches', 'mp')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project-merge-branches', 'mp'))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange(
'org/project-merge-branches', 'mp', 'A')
B = self.fake_gerrit.addFakeChange(
'org/project-merge-branches', 'mp', 'B')
C = self.fake_gerrit.addFakeChange(
'org/project-merge-branches', 'mp', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
build = self.builds[-1]
self.assertEqual(build.parameters['zuul']['branch'], 'mp')
path = os.path.join(build.jobdir.src_root, 'review.example.com',
'org/project-merge-branches')
repo = git.Repo(path)
repo_messages = [c.message.strip() for c in repo.iter_commits()]
repo_messages.reverse()
correct_messages = [
'initial commit',
'add content from fixture',
'mp commit',
'A-1', 'B-1', 'C-1']
self.assertEqual(repo_messages, correct_messages)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
def test_merge_multi_branch(self):
"Test that dependent changes on multiple branches are merged"
self.create_branch('org/project-merge-branches', 'mp')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project-merge-branches', 'mp'))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange(
'org/project-merge-branches', 'master', 'A')
B = self.fake_gerrit.addFakeChange(
'org/project-merge-branches', 'mp', 'B')
C = self.fake_gerrit.addFakeChange(
'org/project-merge-branches', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
job_A = None
for job in self.builds:
if 'project-merge' in job.name:
job_A = job
path = os.path.join(job_A.jobdir.src_root, 'review.example.com',
'org/project-merge-branches')
repo = git.Repo(path)
repo_messages = [c.message.strip()
for c in repo.iter_commits()]
repo_messages.reverse()
correct_messages = [
'initial commit', 'add content from fixture', 'A-1']
self.assertEqual(repo_messages, correct_messages)
self.executor_server.release('.*-merge')
self.waitUntilSettled()
job_B = None
for job in self.builds:
if 'project-merge' in job.name:
job_B = job
path = os.path.join(job_B.jobdir.src_root, 'review.example.com',
'org/project-merge-branches')
repo = git.Repo(path)
repo_messages = [c.message.strip() for c in repo.iter_commits()]
repo_messages.reverse()
correct_messages = [
'initial commit', 'add content from fixture', 'mp commit', 'B-1']
self.assertEqual(repo_messages, correct_messages)
self.executor_server.release('.*-merge')
self.waitUntilSettled()
job_C = None
for job in self.builds:
if 'project-merge' in job.name:
job_C = job
path = os.path.join(job_C.jobdir.src_root, 'review.example.com',
'org/project-merge-branches')
repo = git.Repo(path)
repo_messages = [c.message.strip() for c in repo.iter_commits()]
repo_messages.reverse()
correct_messages = [
'initial commit', 'add content from fixture',
'A-1', 'C-1']
# Ensure the right commits are in the history for this ref
self.assertEqual(repo_messages, correct_messages)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
class TestSemaphore(ZuulTestCase):
tenant_config_file = 'config/semaphore/main.yaml'
def test_semaphore_one(self):
"Test semaphores with max=1 (mutex)"
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.executor_server.hold_jobs_in_build = True
# Pause nodepool so we can check the ordering of getting the nodes
# and aquiring the semaphore.
self.fake_nodepool.paused = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
status = tenant.layout.pipelines["check"].formatStatusJSON()
jobs = status["change_queues"][0]["heads"][0][0]["jobs"]
self.assertEqual(jobs[0]["waiting_status"],
'node request: 200-0000000000')
self.assertEqual(jobs[1]["waiting_status"],
'node request: 200-0000000001')
self.assertEqual(jobs[2]["waiting_status"],
'semaphores: test-semaphore')
# By default we first lock the semaphore and then get the nodes
# so at this point the semaphore needs to be aquired.
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
self.fake_nodepool.paused = False
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'semaphore-one-test1')
self.assertEqual(self.builds[2].name, 'project-test1')
self.executor_server.release('semaphore-one-test1')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test1')
self.assertEqual(self.builds[2].name, 'semaphore-one-test2')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
self.executor_server.release('semaphore-one-test2')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test1')
self.assertEqual(self.builds[2].name, 'semaphore-one-test1')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
self.executor_server.release('semaphore-one-test1')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test1')
self.assertEqual(self.builds[2].name, 'semaphore-one-test2')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
self.executor_server.release('semaphore-one-test2')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test1')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 1)
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.assertReportedStat(
'zuul.tenant.tenant-one.semaphore.test-semaphore.holders',
value='1', kind='g')
self.assertReportedStat(
'zuul.tenant.tenant-one.semaphore.test-semaphore.holders',
value='0', kind='g')
def test_semaphore_two(self):
"Test semaphores with max>1"
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders(
"test-semaphore-two")), 0)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 4)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'semaphore-two-test1')
self.assertEqual(self.builds[2].name, 'semaphore-two-test2')
self.assertEqual(self.builds[3].name, 'project-test1')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders(
"test-semaphore-two")), 2)
self.executor_server.release('semaphore-two-test1')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 4)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'semaphore-two-test2')
self.assertEqual(self.builds[2].name, 'project-test1')
self.assertEqual(self.builds[3].name, 'semaphore-two-test1')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders(
"test-semaphore-two")), 2)
self.executor_server.release('semaphore-two-test2')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 4)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test1')
self.assertEqual(self.builds[2].name, 'semaphore-two-test1')
self.assertEqual(self.builds[3].name, 'semaphore-two-test2')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders(
"test-semaphore-two")), 2)
self.executor_server.release('semaphore-two-test1')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test1')
self.assertEqual(self.builds[2].name, 'semaphore-two-test2')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders(
"test-semaphore-two")), 1)
self.executor_server.release('semaphore-two-test2')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test1')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders(
"test-semaphore-two")), 0)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 1)
def test_semaphore_node_failure(self):
"Test semaphore and node failure"
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
# Pause nodepool so we can fail the node request later
self.fake_nodepool.pause()
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# By default we first lock the semaphore and then get the nodes
# so at this point the semaphore needs to be aquired.
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
# Fail the node request and unpause
req = self.fake_nodepool.getNodeRequests()[0]
self.fake_nodepool.addFailRequest(req)
self.fake_nodepool.unpause()
self.waitUntilSettled()
# At this point the job that holds the semaphore failed with
# node_failure and the semaphore must be released.
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.assertEquals(1, A.reported)
self.assertTrue(re.search('semaphore-one-test3 .* NODE_FAILURE',
A.messages[0]))
def test_semaphore_resources_first(self):
"Test semaphores with max=1 (mutex) and get resources first"
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.executor_server.hold_jobs_in_build = True
# Pause nodepool so we can check the ordering of getting the nodes
# and aquiring the semaphore.
self.fake_nodepool.paused = True
A = self.fake_gerrit.addFakeChange('org/project3', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project3', 'master', 'B')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Here we first get the resources and then lock the semaphore
# so at this point the semaphore should not be aquired.
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.fake_nodepool.paused = False
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name,
'semaphore-one-test1-resources-first')
self.assertEqual(self.builds[2].name, 'project-test1')
self.executor_server.release('semaphore-one-test1')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test1')
self.assertEqual(self.builds[2].name,
'semaphore-one-test2-resources-first')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
def test_semaphore_resources_first_node_failure(self):
"Test semaphore and node failure"
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
# Pause nodepool so we can fail the node request later
self.fake_nodepool.pause()
A = self.fake_gerrit.addFakeChange('org/project4', 'master', 'A')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# With resources first we first get the nodes so at this point the
# semaphore must not be aquired.
self.assertEqual(len(tenant.semaphore_handler.semaphoreHolders(
"test-semaphore")), 0)
# Fail the node request and unpause
req = self.fake_nodepool.getNodeRequests()[0]
self.fake_nodepool.addFailRequest(req)
self.fake_nodepool.unpause()
self.waitUntilSettled()
# At this point the job should never have acuired a semaphore so check
# that it still has not locked a semaphore.
self.assertEqual(len(tenant.semaphore_handler.semaphoreHolders(
"test-semaphore")), 0)
self.assertEquals(1, A.reported)
self.assertTrue(
re.search('semaphore-one-test1-resources-first .* NODE_FAILURE',
A.messages[0]))
def test_semaphore_zk_error(self):
"Test semaphore release with zk error"
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
# Simulate a single zk error in useNodeSet
orig_useNodeSet = self.scheds.first.sched.nodepool.useNodeSet
def broken_use_nodeset(nodeset, tenant_name, project_name):
# restore original useNodeSet
self.scheds.first.sched.nodepool.useNodeSet = orig_useNodeSet
raise NoNodeError()
self.scheds.first.sched.nodepool.useNodeSet = broken_use_nodeset
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# The semaphore should be released
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
# cleanup the queue
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
def test_semaphore_abandon(self):
"Test abandon with job semaphores"
self.executor_server.hold_jobs_in_build = True
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
self.fake_gerrit.addEvent(A.getChangeAbandonedEvent())
self.waitUntilSettled()
# The check pipeline should be empty
items = check_pipeline.getAllItems()
self.assertEqual(len(items), 0)
# The semaphore should be released
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
def test_semaphore_abandon_pending_node_request(self):
"Test abandon with job semaphores and pending node request"
self.executor_server.hold_jobs_in_build = True
# Pause nodepool so we can check the ordering of getting the nodes
# and aquiring the semaphore.
self.fake_nodepool.paused = True
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
self.fake_gerrit.addEvent(A.getChangeAbandonedEvent())
self.waitUntilSettled()
# The check pipeline should be empty
items = check_pipeline.getAllItems()
self.assertEqual(len(items), 0)
# The semaphore should be released
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.executor_server.hold_jobs_in_build = False
self.fake_nodepool.paused = False
self.executor_server.release()
self.waitUntilSettled()
def test_semaphore_abandon_pending_execution(self):
"Test abandon with job semaphores and pending job execution"
# Pause the executor so it doesn't take any jobs.
self.executor_server.pause()
# Start merger as the paused executor won't take merge jobs.
self._startMerger()
# Pause nodepool so we can wait on the node requests and fulfill them
# in a controlled manner.
self.fake_nodepool.paused = True
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
reqs = list(self.scheds.first.sched.nodepool.getNodeRequests())
self.assertEqual(len(reqs), 2)
# Now unpause nodepool to fulfill the node requests. We cannot use
# waitUntilSettled here because the executor is paused.
self.fake_nodepool.paused = False
for _ in iterate_timeout(30, 'fulfill node requests'):
reqs = [
r for r in self.scheds.first.sched.nodepool.getNodeRequests()
if r.state != zuul.model.STATE_FULFILLED
]
if len(reqs) == 0:
break
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
self.fake_gerrit.addEvent(A.getChangeAbandonedEvent())
self.waitUntilSettled()
# The check pipeline should be empty
items = check_pipeline.getAllItems()
self.assertEqual(len(items), 0)
# The semaphore should be released
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.executor_server.release()
self.waitUntilSettled()
def test_semaphore_new_patchset(self):
"Test new patchset with job semaphores"
self.executor_server.hold_jobs_in_build = True
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
A.addPatchset()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
items = check_pipeline.getAllItems()
self.assertEqual(items[0].change.number, '1')
self.assertEqual(items[0].change.patchset, '2')
self.assertTrue(items[0].live)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# The semaphore should be released
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
def test_semaphore_reconfigure(self):
"Test reconfigure with job semaphores"
self.executor_server.hold_jobs_in_build = True
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
# reconfigure without layout change
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
# semaphore still must be held
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
# remove the pipeline
self.commitConfigUpdate(
'common-config',
'config/semaphore/zuul-reconfiguration.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.executor_server.release('project-test1')
self.waitUntilSettled()
# There should be no builds anymore
self.assertEqual(len(self.builds), 0)
# The semaphore should be released
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
def test_semaphore_handler_cleanup(self):
"Test the semaphore handler leak cleanup"
self.executor_server.hold_jobs_in_build = True
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
# Save some variables for later use while the job is running
check_pipeline = tenant.layout.pipelines['check']
item = check_pipeline.getAllItems()[0]
job = item.getJob('semaphore-one-test1')
tenant.semaphore_handler.cleanupLeaks()
# Nothing has leaked; our handle should be present.
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# Make sure the semaphore is released normally
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
# Use our previously saved data to simulate a leaked semaphore
tenant.semaphore_handler.acquire(item, job, False)
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
tenant.semaphore_handler.cleanupLeaks()
# Make sure the leaked semaphore is cleaned up
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
@simple_layout('layouts/multiple-semaphores.yaml')
def test_multiple_semaphores(self):
# Test a job with multiple semaphores
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# One job should be running, and hold sem1
self.assertBuilds([dict(name='job1')])
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Job2 requires sem1 and sem2; it hasn't started because job
# is still holding sem1.
self.assertBuilds([dict(name='job1')])
self.executor_server.release('job1')
self.waitUntilSettled()
# Job1 is finished, so job2 can acquire both semaphores.
self.assertBuilds([dict(name='job2')])
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='job1', result='SUCCESS', changes='1,1'),
dict(name='job2', result='SUCCESS', changes='2,1'),
])
# TODO(corvus): Consider a version of this test which launches
# 2 jobs with the same multiple-semaphore requirements
# simultaneously to test the behavior with contention (at
# least one should be able to start on each pass through the
# loop).
@simple_layout('layouts/semaphore-multi-pipeline.yaml')
def test_semaphore_multi_pipeline(self):
"Test semaphores in multiple pipelines"
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
# Start a second change in a different pipeline
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
# Still just the first change holds the lock
self.assertEqual(len(self.builds), 1)
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
# Now the second should run
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='check-job', result='SUCCESS', changes='1,1'),
dict(name='gate-job', result='SUCCESS', changes='2,1'),
])
class TestSemaphoreMultiTenant(ZuulTestCase):
tenant_config_file = 'config/multi-tenant-semaphore/main.yaml'
def test_semaphore_tenant_isolation(self):
"Test semaphores in multiple tenants"
self.waitUntilSettled()
tenant_one = self.scheds.first.sched.abide.tenants.get('tenant-one')
tenant_two = self.scheds.first.sched.abide.tenants.get('tenant-two')
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
D = self.fake_gerrit.addFakeChange('org/project2', 'master', 'D')
E = self.fake_gerrit.addFakeChange('org/project2', 'master', 'E')
self.assertEqual(
len(tenant_one.semaphore_handler.semaphoreHolders(
"test-semaphore")), 0)
self.assertEqual(
len(tenant_two.semaphore_handler.semaphoreHolders(
"test-semaphore")), 0)
# add patches to project1 of tenant-one
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# one build of project1-test1 must run
# semaphore of tenant-one must be acquired once
# semaphore of tenant-two must not be acquired
self.assertEqual(len(self.builds), 1)
self.assertEqual(self.builds[0].name, 'project1-test1')
self.assertEqual(
len(tenant_one.semaphore_handler.semaphoreHolders(
"test-semaphore")), 1)
self.assertEqual(
len(tenant_two.semaphore_handler.semaphoreHolders(
"test-semaphore")), 0)
# add patches to project2 of tenant-two
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(E.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# one build of project1-test1 must run
# two builds of project2-test1 must run
# semaphore of tenant-one must be acquired once
# semaphore of tenant-two must be acquired twice
self.assertEqual(len(self.builds), 3)
self.assertEqual(self.builds[0].name, 'project1-test1')
self.assertEqual(self.builds[1].name, 'project2-test1')
self.assertEqual(self.builds[2].name, 'project2-test1')
self.assertEqual(
len(tenant_one.semaphore_handler.semaphoreHolders(
"test-semaphore")), 1)
self.assertEqual(
len(tenant_two.semaphore_handler.semaphoreHolders(
"test-semaphore")), 2)
self.executor_server.release('project1-test1')
self.waitUntilSettled()
# one build of project1-test1 must run
# two builds of project2-test1 must run
# semaphore of tenant-one must be acquired once
# semaphore of tenant-two must be acquired twice
self.assertEqual(len(self.builds), 3)
self.assertEqual(self.builds[0].name, 'project2-test1')
self.assertEqual(self.builds[1].name, 'project2-test1')
self.assertEqual(self.builds[2].name, 'project1-test1')
self.assertEqual(
len(tenant_one.semaphore_handler.semaphoreHolders(
"test-semaphore")), 1)
self.assertEqual(
len(tenant_two.semaphore_handler.semaphoreHolders(
"test-semaphore")), 2)
self.executor_server.release('project2-test1')
self.waitUntilSettled()
# one build of project1-test1 must run
# one build of project2-test1 must run
# semaphore of tenant-one must be acquired once
# semaphore of tenant-two must be acquired once
self.assertEqual(len(self.builds), 2)
self.assertEqual(
len(tenant_one.semaphore_handler.semaphoreHolders(
"test-semaphore")), 1)
self.assertEqual(
len(tenant_two.semaphore_handler.semaphoreHolders(
"test-semaphore")), 1)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# no build must run
# semaphore of tenant-one must not be acquired
# semaphore of tenant-two must not be acquired
self.assertEqual(len(self.builds), 0)
self.assertEqual(
len(tenant_one.semaphore_handler.semaphoreHolders(
"test-semaphore")), 0)
self.assertEqual(
len(tenant_two.semaphore_handler.semaphoreHolders(
"test-semaphore")), 0)
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 1)
class TestImplicitProject(ZuulTestCase):
tenant_config_file = 'config/implicit-project/main.yaml'
def test_implicit_project(self):
# config project should work with implicit project name
A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
# untrusted project should work with implicit project name
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(B.reported, 1)
self.assertHistory([
dict(name='test-common', result='SUCCESS', changes='1,1'),
dict(name='test-common', result='SUCCESS', changes='2,1'),
dict(name='test-project', result='SUCCESS', changes='2,1'),
], ordered=False)
# now test adding a further project in repo
in_repo_conf = textwrap.dedent(
"""
- job:
name: test-project
run: playbooks/test-project.yaml
- job:
name: test2-project
run: playbooks/test-project.yaml
- project:
check:
jobs:
- test-project
gate:
jobs:
- test-project
- project:
check:
jobs:
- test2-project
gate:
jobs:
- test2-project
""")
file_dict = {'.zuul.yaml': in_repo_conf}
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
# change C must be merged
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(C.reported, 2)
self.assertHistory([
dict(name='test-common', result='SUCCESS', changes='1,1'),
dict(name='test-common', result='SUCCESS', changes='2,1'),
dict(name='test-project', result='SUCCESS', changes='2,1'),
dict(name='test-common', result='SUCCESS', changes='3,1'),
dict(name='test-project', result='SUCCESS', changes='3,1'),
dict(name='test2-project', result='SUCCESS', changes='3,1'),
], ordered=False)
class TestSemaphoreInRepo(ZuulTestCase):
config_file = 'zuul-connections-gerrit-and-github.conf'
tenant_config_file = 'config/in-repo/main.yaml'
def test_semaphore_in_repo(self):
"Test semaphores in repo config"
# This tests dynamic semaphore handling in project repos. The semaphore
# max value should not be evaluated dynamically but must be updated
# after the change lands.
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
in_repo_conf = textwrap.dedent(
"""
- job:
name: project-test1
- job:
name: project-test2
run: playbooks/project-test2.yaml
semaphore: test-semaphore
- project:
name: org/project
tenant-one-gate:
jobs:
- project-test2
# the max value in dynamic layout must be ignored
- semaphore:
name: test-semaphore
max: 2
""")
in_repo_playbook = textwrap.dedent(
"""
- hosts: all
tasks: []
""")
file_dict = {'.zuul.yaml': in_repo_conf,
'playbooks/project-test2.yaml': in_repo_playbook}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
B.setDependsOn(A, 1)
C.setDependsOn(A, 1)
self.executor_server.hold_jobs_in_build = True
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
# check that the layout in a queue item still has max value of 1
# for test-semaphore
pipeline = tenant.layout.pipelines.get('tenant-one-gate')
queue = None
for queue_candidate in pipeline.queues:
if queue_candidate.name == 'org/project':
queue = queue_candidate
break
queue_item = queue.queue[0]
item_dynamic_layout = pipeline.manager._layout_cache.get(
queue_item.layout_uuid)
self.assertIsNotNone(item_dynamic_layout)
dynamic_test_semaphore = item_dynamic_layout.getSemaphore(
self.scheds.first.sched.abide, 'test-semaphore')
self.assertEqual(dynamic_test_semaphore.max, 1)
# one build must be in queue, one semaphores acquired
self.assertEqual(len(self.builds), 1)
self.assertEqual(self.builds[0].name, 'project-test2')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
self.executor_server.release('project-test2')
self.waitUntilSettled()
# change A must be merged
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
# send change-merged event as the gerrit mock doesn't send it
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
# now that change A was merged, the new semaphore max must be effective
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(tenant.layout.getSemaphore(
self.scheds.first.sched.abide, 'test-semaphore').max, 2)
# two builds must be in queue, two semaphores acquired
self.assertEqual(len(self.builds), 2)
self.assertEqual(self.builds[0].name, 'project-test2')
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
2)
self.executor_server.release('project-test2')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0
)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
class TestSchedulerBranchMatcher(ZuulTestCase):
@simple_layout('layouts/matcher-test.yaml')
def test_job_branch_ignored(self):
'''
Test that branch matching logic works.
The 'ignore-branch' job has a branch matcher that is supposed to
match every branch except for the 'featureA' branch, so it should
not be run on a change to that branch.
'''
self.create_branch('org/project', 'featureA')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'featureA'))
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange('org/project', 'featureA', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.printHistory()
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertJobNotInHistory('ignore-branch')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2,
"A should report start and success")
self.assertIn('gate', A.messages[1],
"A should transit gate")
class TestSchedulerFailFast(ZuulTestCase):
tenant_config_file = 'config/fail-fast/main.yaml'
def test_fail_fast(self):
"""
Tests that a pipeline that is flagged with fail-fast
aborts jobs early.
"""
self.executor_server.hold_jobs_in_build = True
self.fake_nodepool.pause()
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.executor_server.failJob('project-test1', A)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.assertEqual(self.builds[0].name, 'project-merge')
self.executor_server.release('project-merge')
self.waitUntilSettled()
# Now project-test1, project-test2 and project-test6
# should be running
self.assertEqual(len(self.builds), 3)
# Release project-test1 which will fail
self.executor_server.release('project-test1')
self.waitUntilSettled()
self.fake_nodepool.unpause()
self.waitUntilSettled()
# Now project-test2 must be aborted
self.assertEqual(len(self.builds), 0)
self.assertEqual(A.reported, 1)
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='FAILURE', changes='1,1'),
dict(name='project-test2', result='ABORTED', changes='1,1'),
dict(name='project-test6', result='ABORTED', changes='1,1'),
], ordered=False)
def test_fail_fast_gate(self):
"""
Tests that a pipeline that is flagged with fail-fast
aborts jobs early.
"""
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
self.executor_server.failJob('project-test1', B)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 4)
# Release project-test1 which will fail
self.builds[2].release()
self.waitUntilSettled()
# We should only have the builds from change A now
self.assertEqual(len(self.builds), 2)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
# But both changes should still be in the pipeline
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
items = tenant.layout.pipelines['gate'].getAllItems()
self.assertEqual(len(items), 2)
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 1)
# Release change A
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertHistory([
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='FAILURE', changes='1,1 2,1'),
dict(name='project-test2', result='ABORTED', changes='1,1 2,1'),
], ordered=False)
def test_fail_fast_nonvoting(self):
"""
Tests that a pipeline that is flagged with fail-fast
doesn't abort jobs due to a non-voting job.
"""
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.executor_server.failJob('project-test6', A)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2)
self.assertEqual(self.builds[0].name, 'project-merge')
self.executor_server.release('project-merge')
self.waitUntilSettled()
# Now project-test1, project-test2, project-test5 and project-test6
# should be running
self.assertEqual(len(self.builds), 4)
# Release project-test6 which will fail
self.executor_server.release('project-test6')
self.waitUntilSettled()
# Now project-test1, project-test2 and project-test5 should be running
self.assertEqual(len(self.builds), 3)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(A.reported, 1)
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project-test3', result='SUCCESS', changes='1,1'),
dict(name='project-test4', result='SUCCESS', changes='1,1'),
dict(name='project-test5', result='SUCCESS', changes='1,1'),
dict(name='project-test6', result='FAILURE', changes='1,1'),
], ordered=False)
def test_fail_fast_retry(self):
"""
Tests that a retried build doesn't trigger fail-fast.
"""
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# project-merge and project-test5
self.assertEqual(len(self.builds), 2)
# Force a retry of first build
self.builds[0].requeue = True
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(A.reported, 1)
self.assertHistory([
dict(name='project-merge', result=None, changes='1,1'),
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project-test3', result='SUCCESS', changes='1,1'),
dict(name='project-test4', result='SUCCESS', changes='1,1'),
dict(name='project-test5', result='SUCCESS', changes='1,1'),
dict(name='project-test6', result='SUCCESS', changes='1,1'),
], ordered=False)
class TestPipelineSupersedes(ZuulTestCase):
@simple_layout('layouts/pipeline-supercedes.yaml')
def test_supercedes(self):
"""
Tests that a pipeline that is flagged with fail-fast
aborts jobs early.
"""
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.assertEqual(self.builds[0].name, 'test-job')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.assertEqual(self.builds[0].name, 'test-job')
self.assertEqual(self.builds[0].pipeline, 'gate')
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(A.reported, 2)
self.assertEqual(A.data['status'], 'MERGED')
self.assertHistory([
dict(name='test-job', result='ABORTED', changes='1,1'),
dict(name='test-job', result='SUCCESS', changes='1,1'),
], ordered=False)
class TestSchedulerExcludeAll(ZuulTestCase):
tenant_config_file = 'config/two-tenant/exclude-all.yaml'
def test_skip_reconfig_exclude_all(self):
"""Test that we don't trigger a reconfiguration for a tenant
when the changed project excludes all config."""
config = textwrap.dedent(
"""
- job:
name: project2-test
parent: test
- project:
check:
jobs:
- project2-test
""")
file_dict = {'zuul.yaml': config}
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='project2-test', result='SUCCESS', changes='1,1'),
])
sched = self.scheds.first.sched
tenant_one_layout_state = sched.local_layout_state["tenant-one"]
tenant_two_layout_state = sched.local_layout_state["tenant-two"]
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
# We don't expect a reconfiguration for tenant-one as it excludes
# all config of org/project2.
self.assertEqual(sched.local_layout_state["tenant-one"],
tenant_one_layout_state)
# As tenant-two includes the config from org/project2, the merge of
# change A should have triggered a reconfig.
self.assertGreater(sched.local_layout_state["tenant-two"],
tenant_two_layout_state)
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='project2-test', result='SUCCESS', changes='1,1'),
dict(name='project2-test', result='SUCCESS', changes='2,1'),
])
class TestReportBuildPage(ZuulTestCase):
tenant_config_file = 'config/build-page/main.yaml'
def test_tenant_url(self):
"""
Test that the tenant url is used in reporting the build page.
"""
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='python27', result='SUCCESS', changes='1,1'),
])
self.assertIn('python27 https://one.example.com/build/',
A.messages[0])
def test_base_url(self):
"""
Test that the web base url is used in reporting the build page.
"""
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='python27', result='SUCCESS', changes='1,1'),
])
self.assertIn('python27 https://zuul.example.com/t/tenant-two/build/',
A.messages[0])
def test_no_build_page(self):
"""
Test that we fall back to the old behavior if the tenant is
not configured to report the build page
"""
A = self.fake_gerrit.addFakeChange('org/project3', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='python27', result='SUCCESS', changes='1,1'),
])
self.assertIn('python27 https://', A.messages[0])
class TestSchedulerSmartReconfiguration(ZuulTestCase):
tenant_config_file = 'config/multi-tenant/main.yaml'
def _test_smart_reconfiguration(self, command_socket=False):
"""
Tests that smart reconfiguration works
In this scenario we have the tenants tenant-one, tenant-two and
tenant-three. We make the following changes and then trigger a smart
reconfiguration:
- tenant-one remains unchanged
- tenant-two gets another repo
- tenant-three gets removed completely
- tenant-four is a new tenant
"""
self.executor_server.hold_jobs_in_build = True
# Create changes for all tenants
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
C = self.fake_gerrit.addFakeChange('org/project3', 'master', 'C')
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
# record previous tenant reconfiguration time, which may not be set
old_one = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
old_two = self.scheds.first.sched.tenant_layout_state.get(
'tenant-two', EMPTY_LAYOUT_STATE)
self.waitUntilSettled()
self.newTenantConfig('config/multi-tenant/main-reconfig.yaml')
del self.merge_job_history
self.scheds.execute(
lambda app: app.smartReconfigure(command_socket=command_socket))
# Wait for smart reconfiguration. Only tenant-two should be
# reconfigured. Note that waitUntilSettled is not
# reliable here because the reconfigure event may arrive in the
# event queue after waitUntilSettled.
start = time.time()
while True:
if time.time() - start > 30:
raise Exception("Timeout waiting for smart reconfiguration")
new_two = self.scheds.first.sched.tenant_layout_state.get(
'tenant-two', EMPTY_LAYOUT_STATE)
if old_two < new_two:
break
else:
time.sleep(0.1)
self.waitUntilSettled()
# We're only adding two new repos, so we should only need to
# issue 2 cat jobs.
cat_jobs = self.merge_job_history.get(zuul.model.MergeRequest.CAT)
self.assertEqual(len(cat_jobs), 2)
# Ensure that tenant-one has not been reconfigured
new_one = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
self.assertEqual(old_one, new_one)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# Changes in tenant-one and tenant-two have to be reported
self.assertEqual(1, A.reported)
self.assertEqual(1, B.reported)
# The tenant-three has been removed so nothing should be reported
self.assertEqual(0, C.reported)
self.assertNotIn('tenant-three', self.scheds.first.sched.abide.tenants)
# Verify known tenants
expected_tenants = {'tenant-one', 'tenant-two', 'tenant-four'}
self.assertEqual(expected_tenants,
self.scheds.first.sched.abide.tenants.keys())
self.assertIsNotNone(
self.scheds.first.sched.tenant_layout_state.get('tenant-four'),
'Tenant tenant-four should exist now.'
)
# Test that the new tenant-four actually works
D = self.fake_gerrit.addFakeChange('org/project4', 'master', 'D')
self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(1, D.reported)
# Test that the new project in tenant-two works
B2 = self.fake_gerrit.addFakeChange('org/project2b', 'master', 'B2')
self.fake_gerrit.addEvent(B2.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(1, B2.reported)
def test_smart_reconfiguration(self):
"Test that live reconfiguration works"
self._test_smart_reconfiguration()
def test_smart_reconfiguration_command_socket(self):
"Test that live reconfiguration works using command socket"
self._test_smart_reconfiguration(command_socket=True)
class TestReconfigureBranch(ZuulTestCase):
def _setupTenantReconfigureTime(self):
self.old = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
def _createBranch(self):
self.create_branch('org/project1', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'stable'))
self.waitUntilSettled()
def _deleteBranch(self):
self.delete_branch('org/project1', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchDeletedEvent(
'org/project1', 'stable'))
self.waitUntilSettled()
def _expectReconfigure(self, doReconfigure):
new = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
if doReconfigure:
self.assertLess(self.old, new)
else:
self.assertEqual(self.old, new)
self.old = new
class TestReconfigureBranchCreateDeleteSshHttp(TestReconfigureBranch):
tenant_config_file = 'config/single-tenant/main.yaml'
config_file = 'zuul-gerrit-web.conf'
def test_reconfigure_cache_branch_create_delete(self):
"Test that cache is updated clear on branch creation/deletion"
self._setupTenantReconfigureTime()
self._createBranch()
self._expectReconfigure(True)
self._deleteBranch()
self._expectReconfigure(True)
class TestReconfigureBranchCreateDeleteSsh(TestReconfigureBranch):
tenant_config_file = 'config/single-tenant/main.yaml'
def test_reconfigure_cache_branch_create_delete(self):
"Test that cache is updated clear on branch creation/deletion"
self._setupTenantReconfigureTime()
self._createBranch()
self._expectReconfigure(True)
self._deleteBranch()
self._expectReconfigure(True)
class TestReconfigureBranchCreateDeleteHttp(TestReconfigureBranch):
tenant_config_file = 'config/single-tenant/main.yaml'
config_file = 'zuul-gerrit-no-stream.conf'
def test_reconfigure_cache_branch_create_delete(self):
"Test that cache is updated clear on branch creation/deletion"
self._setupTenantReconfigureTime()
self._createBranch()
self._expectReconfigure(True)
self._deleteBranch()
self._expectReconfigure(True)
class TestEventProcessing(ZuulTestCase):
tenant_config_file = 'config/event-processing/main.yaml'
# Some regression tests for ZK-distributed event processing
def test_independent_tenants(self):
# Test that an exception in one tenant doesn't break others
orig = zuul.scheduler.Scheduler._forward_trigger_event
def patched_forward(obj, *args, **kw):
if args[1].name == 'tenant-one':
raise Exception("test")
return orig(obj, *args, **kw)
self.useFixture(fixtures.MonkeyPatch(
'zuul.scheduler.Scheduler._forward_trigger_event',
patched_forward))
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='checkjob', result='SUCCESS', changes='1,1'),
], ordered=False)
@skip("Failure can only be detected in logs; see test_ref_equality")
def test_change_types(self):
# Test that when we decide whether to forward events, we can
# compare items with different change types (branch vs
# change).
# We can't detect a failure here except by observing the logs;
# this test is left in case that's useful in the future, but
# automated detection of this failure case is handled by
# test_ref_equality.
# Enqueue a tag
self.executor_server.hold_jobs_in_build = True
event = self.fake_gerrit.addFakeTag('org/project1', 'master', 'foo')
self.fake_gerrit.addEvent(event)
# Enqueue a change and make sure the scheduler is able to
# compare the two when forwarding the event
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='tagjob', result='SUCCESS'),
dict(name='checkjob', result='SUCCESS', changes='1,1'),
], ordered=False)
class TestWaitForInit(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
wait_for_init = True
def setUp(self):
with self.assertLogs('zuul.Scheduler-0', level='DEBUG') as full_logs:
super().setUp()
self.assertRegexInList('Waiting for tenant initialization',
full_logs.output)
def test_wait_for_init(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_scheduler.py
|
test_scheduler.py
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tests.base import ZuulTestCase, skipIfMultiScheduler
class TestGerritToGithubCRD(ZuulTestCase):
config_file = 'zuul-gerrit-github.conf'
tenant_config_file = 'config/cross-source/main.yaml'
def test_crd_gate(self):
"Test cross-repo dependencies"
A = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'A')
B = self.fake_github.openFakePullRequest('github/project2', 'master',
'B')
A.addApproval('Code-Review', 2)
AM2 = self.fake_gerrit.addFakeChange('gerrit/project1', 'master',
'AM2')
AM1 = self.fake_gerrit.addFakeChange('gerrit/project1', 'master',
'AM1')
AM2.setMerged()
AM1.setMerged()
# A -> AM1 -> AM2
# A Depends-On: B
# M2 is here to make sure it is never queried. If it is, it
# means zuul is walking down the entire history of merged
# changes.
A.setDependsOn(AM1, 1)
AM1.setDependsOn(AM2, 1)
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.url)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertFalse(B.is_merged)
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
self.executor_server.hold_jobs_in_build = True
B.addLabel('approved')
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(AM2.queried, 0)
self.assertEqual(A.data['status'], 'MERGED')
self.assertTrue(B.is_merged)
self.assertEqual(A.reported, 2)
self.assertEqual(len(B.comments), 2)
changes = self.getJobFromHistory(
'project-merge', 'gerrit/project1').changes
self.assertEqual(changes, '1,%s 1,1' % B.head_sha)
def test_crd_branch(self):
"Test cross-repo dependencies in multiple branches"
self.create_branch('github/project2', 'mp')
A = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'A')
B = self.fake_github.openFakePullRequest('github/project2', 'master',
'B')
C1 = self.fake_github.openFakePullRequest('github/project2', 'mp',
'C1')
A.addApproval('Code-Review', 2)
# A Depends-On: B+C1
A.data['commitMessage'] = '%s\n\nDepends-On: %s\nDepends-On: %s\n' % (
A.subject, B.url, C1.url)
self.executor_server.hold_jobs_in_build = True
B.addLabel('approved')
C1.addLabel('approved')
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertTrue(B.is_merged)
self.assertTrue(C1.is_merged)
self.assertEqual(A.reported, 2)
self.assertEqual(len(B.comments), 2)
self.assertEqual(len(C1.comments), 2)
changes = self.getJobFromHistory(
'project-merge', 'gerrit/project1').changes
self.assertEqual(changes, '1,%s 2,%s 1,1' %
(B.head_sha, C1.head_sha))
def test_crd_gate_reverse(self):
"Test reverse cross-repo dependencies"
A = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'A')
B = self.fake_github.openFakePullRequest('github/project2', 'master',
'B')
A.addApproval('Code-Review', 2)
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.url)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertFalse(B.is_merged)
self.executor_server.hold_jobs_in_build = True
A.addApproval('Approved', 1)
self.fake_github.emitEvent(B.addLabel('approved'))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertTrue(B.is_merged)
self.assertEqual(A.reported, 2)
self.assertEqual(len(B.comments), 2)
changes = self.getJobFromHistory(
'project-merge', 'gerrit/project1').changes
self.assertEqual(changes, '1,%s 1,1' %
(B.head_sha,))
def test_crd_cycle(self):
"Test cross-repo dependency cycles"
A = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'A')
msg = "Depends-On: %s" % (A.data['url'],)
B = self.fake_github.openFakePullRequest('github/project2', 'master',
'B', body=msg)
A.addApproval('Code-Review', 2)
B.addLabel('approved')
# A -> B -> A (via commit-depends)
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.url)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertEqual(
A.messages[0],
"Build failed.\n\n\nWarning:\n Dependency cycle detected\n")
self.assertEqual(len(B.comments), 0)
self.assertEqual(A.data['status'], 'NEW')
self.assertFalse(B.is_merged)
def test_crd_gate_unknown(self):
"Test unknown projects in dependent pipeline"
self.init_repo("github/unknown", tag='init')
A = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'A')
B = self.fake_github.openFakePullRequest('github/unknown', 'master',
'B')
A.addApproval('Code-Review', 2)
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.url)
event = B.addLabel('approved')
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
# Unknown projects cannot share a queue with any other
# since they don't have common jobs with any other (they have no jobs).
# Changes which depend on unknown project changes
# should not be processed in dependent pipeline
self.assertEqual(A.data['status'], 'NEW')
self.assertFalse(B.is_merged)
self.assertEqual(A.reported, 0)
self.assertEqual(len(B.comments), 0)
self.assertEqual(len(self.history), 0)
# Simulate change B being gated outside this layout Set the
# change merged before submitting the event so that when the
# event triggers a gerrit query to update the change, we get
# the information that it was merged.
B.setMerged('merged')
self.fake_github.emitEvent(event)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# Now that B is merged, A should be able to be enqueued and
# merged.
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertTrue(B.is_merged)
self.assertEqual(len(B.comments), 0)
def test_crd_check(self):
"Test cross-repo dependencies in independent pipelines"
self.executor_server.hold_jobs_in_build = True
self.hold_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'A')
B = self.fake_github.openFakePullRequest(
'github/project2', 'master', 'B')
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.url)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertTrue(self.builds[0].hasChanges(A, B))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertFalse(B.is_merged)
self.assertEqual(A.reported, 1)
self.assertEqual(len(B.comments), 0)
changes = self.getJobFromHistory(
'project-merge', 'gerrit/project1').changes
self.assertEqual(changes, '1,%s 1,1' %
(B.head_sha,))
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
def test_crd_check_duplicate(self):
"Test duplicate check in independent pipelines"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'A')
B = self.fake_github.openFakePullRequest(
'github/project2', 'master', 'B')
self.waitUntilSettled()
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.url)
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
# Add two dependent changes...
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(check_pipeline.getAllItems()), 2)
# ...make sure the live one is not duplicated...
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(check_pipeline.getAllItems()), 2)
# ...but the non-live one is able to be.
self.fake_github.emitEvent(B.getPullRequestEditedEvent())
self.waitUntilSettled()
self.assertEqual(len(check_pipeline.getAllItems()), 3)
# Release jobs in order to avoid races with change A jobs
# finishing before change B jobs.
self.orderedRelease()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertFalse(B.is_merged)
self.assertEqual(A.reported, 1)
self.assertEqual(len(B.comments), 1)
changes = self.getJobFromHistory(
'project-merge', 'gerrit/project1').changes
self.assertEqual(changes, '1,%s 1,1' %
(B.head_sha,))
changes = self.getJobFromHistory(
'project-merge', 'github/project2').changes
self.assertEqual(changes, '1,%s' %
(B.head_sha,))
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
self.assertIn('Build succeeded', A.messages[0])
def _test_crd_check_reconfiguration(self, project1, project2):
"Test cross-repo dependencies re-enqueued in independent pipelines"
self.hold_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'A')
B = self.fake_github.openFakePullRequest(
'github/project2', 'master', 'B')
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.url)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# Make sure the items still share a change queue, and the
# first one is not live.
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 1)
queue = tenant.layout.pipelines['check'].queues[0]
first_item = queue.queue[0]
for item in queue.queue:
self.assertEqual(item.queue, first_item.queue)
self.assertFalse(first_item.live)
self.assertTrue(queue.queue[1].live)
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertFalse(B.is_merged)
self.assertEqual(A.reported, 1)
self.assertEqual(len(B.comments), 0)
changes = self.getJobFromHistory(
'project-merge', 'gerrit/project1').changes
self.assertEqual(changes, '1,%s 1,1' %
(B.head_sha,))
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
@skipIfMultiScheduler()
def test_crd_check_reconfiguration(self):
self._test_crd_check_reconfiguration('org/project1', 'org/project2')
@skipIfMultiScheduler()
def test_crd_undefined_project(self):
"""Test that undefined projects in dependencies are handled for
independent pipelines"""
# It's a hack for fake github,
# as it implies repo creation upon the creation of any change
self.init_repo("github/unknown", tag='init')
self._test_crd_check_reconfiguration('gerrit/project1',
'github/unknown')
def test_crd_check_transitive(self):
"Test transitive cross-repo dependencies"
# Specifically, if A -> B -> C, and C gets a new patchset and
# A gets a new patchset, ensure the test of A,2 includes B,1
# and C,2 (not C,1 which would indicate stale data in the
# cache for B).
A = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'A')
C = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'C')
# B Depends-On: C
msg = "Depends-On: %s" % (C.data['url'],)
B = self.fake_github.openFakePullRequest(
'github/project2', 'master', 'B', body=msg)
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.url)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '2,1 1,%s 1,1' %
(B.head_sha,))
self.fake_github.emitEvent(B.getPullRequestEditedEvent())
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '2,1 1,%s' %
(B.head_sha,))
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '2,1')
C.addPatchset()
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '2,2')
A.addPatchset()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '2,2 1,%s 1,2' %
(B.head_sha,))
def test_crd_check_unknown(self):
"Test unknown projects in independent pipeline"
self.init_repo("github/unknown", tag='init')
A = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'A')
B = self.fake_github.openFakePullRequest(
'github/unknown', 'master', 'B')
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.url)
# Make sure zuul has seen an event on B. This is necessary
# in order to populate our fake github project db.
self.fake_github.emitEvent(B.getPullRequestEditedEvent())
# Note we wait until settled here as the event processing for
# the next event may not have the updated db yet otherwise.
self.waitUntilSettled()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertFalse(B.is_merged)
self.assertEqual(len(B.comments), 0)
def test_crd_cycle_join(self):
"Test an updated change creates a cycle"
A = self.fake_github.openFakePullRequest(
'github/project2', 'master', 'A')
self.fake_github.emitEvent(A.getPullRequestEditedEvent())
self.waitUntilSettled()
self.assertEqual(len(A.comments), 1)
# Create B->A
B = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'B')
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.url)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Dep is there so zuul should have reported on B
self.assertEqual(B.reported, 1)
# Update A to add A->B (a cycle).
self.fake_github.emitEvent(
A.editBody('Depends-On: %s\n' % (B.data['url'])))
self.waitUntilSettled()
# Dependency cycle injected so zuul should have reported again on A
self.assertEqual(len(A.comments), 2)
# Now if we update B to remove the depends-on, everything
# should be okay. B; A->B
B.addPatchset()
B.data['commitMessage'] = '%s\n' % (B.subject,)
self.fake_github.emitEvent(A.getPullRequestEditedEvent())
self.waitUntilSettled()
# Cycle was removed so now zuul should have reported again on A
self.assertEqual(len(A.comments), 3)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.assertEqual(B.reported, 2)
class TestGithubToGerritCRD(ZuulTestCase):
config_file = 'zuul-gerrit-github.conf'
tenant_config_file = 'config/cross-source/main.yaml'
def test_crd_gate(self):
"Test cross-repo dependencies"
A = self.fake_github.openFakePullRequest('github/project2', 'master',
'A')
B = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'B')
B.addApproval('Code-Review', 2)
# A Depends-On: B
A.editBody('Depends-On: %s\n' % (B.data['url']))
event = A.addLabel('approved')
self.fake_github.emitEvent(event)
self.waitUntilSettled()
self.assertFalse(A.is_merged)
self.assertEqual(B.data['status'], 'NEW')
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
self.executor_server.hold_jobs_in_build = True
B.addApproval('Approved', 1)
self.fake_github.emitEvent(event)
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertTrue(A.is_merged)
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(len(A.comments), 2)
self.assertEqual(B.reported, 2)
changes = self.getJobFromHistory(
'project-merge', 'github/project2').changes
self.assertEqual(changes, '1,1 1,%s' % A.head_sha)
def test_crd_branch(self):
"Test cross-repo dependencies in multiple branches"
self.create_branch('gerrit/project1', 'mp')
A = self.fake_github.openFakePullRequest('github/project2', 'master',
'A')
B = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'B')
C1 = self.fake_gerrit.addFakeChange('gerrit/project1', 'mp', 'C1')
B.addApproval('Code-Review', 2)
C1.addApproval('Code-Review', 2)
# A Depends-On: B+C1
A.editBody('Depends-On: %s\nDepends-On: %s\n' % (
B.data['url'], C1.data['url']))
self.executor_server.hold_jobs_in_build = True
B.addApproval('Approved', 1)
C1.addApproval('Approved', 1)
self.fake_github.emitEvent(A.addLabel('approved'))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertTrue(A.is_merged)
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(C1.data['status'], 'MERGED')
self.assertEqual(len(A.comments), 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C1.reported, 2)
changes = self.getJobFromHistory(
'project-merge', 'github/project2').changes
self.assertEqual(changes, '1,1 2,1 1,%s' %
(A.head_sha,))
def test_crd_gate_reverse(self):
"Test reverse cross-repo dependencies"
A = self.fake_github.openFakePullRequest('github/project2', 'master',
'A')
B = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'B')
B.addApproval('Code-Review', 2)
# A Depends-On: B
A.editBody('Depends-On: %s\n' % (B.data['url'],))
self.fake_github.emitEvent(A.addLabel('approved'))
self.waitUntilSettled()
self.assertFalse(A.is_merged)
self.assertEqual(B.data['status'], 'NEW')
self.executor_server.hold_jobs_in_build = True
A.addLabel('approved')
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertTrue(A.is_merged)
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(len(A.comments), 2)
self.assertEqual(B.reported, 2)
changes = self.getJobFromHistory(
'project-merge', 'github/project2').changes
self.assertEqual(changes, '1,1 1,%s' %
(A.head_sha,))
def test_crd_cycle(self):
"Test cross-repo dependency cycles"
A = self.fake_github.openFakePullRequest('github/project2', 'master',
'A')
B = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'B')
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.url)
B.addApproval('Code-Review', 2)
B.addApproval('Approved', 1)
# A -> B -> A (via commit-depends)
A.editBody('Depends-On: %s\n' % (B.data['url'],))
self.fake_github.emitEvent(A.addLabel('approved'))
self.waitUntilSettled()
self.assertEqual(len(A.comments), 1)
self.assertEqual(B.reported, 0)
self.assertFalse(A.is_merged)
self.assertEqual(B.data['status'], 'NEW')
def test_crd_gate_unknown(self):
"Test unknown projects in dependent pipeline"
self.init_repo("gerrit/unknown", tag='init')
A = self.fake_github.openFakePullRequest('github/project2', 'master',
'A')
B = self.fake_gerrit.addFakeChange('gerrit/unknown', 'master', 'B')
B.addApproval('Code-Review', 2)
# A Depends-On: B
A.editBody('Depends-On: %s\n' % (B.data['url'],))
B.addApproval('Approved', 1)
event = A.addLabel('approved')
self.fake_github.emitEvent(event)
self.waitUntilSettled()
# Unknown projects cannot share a queue with any other
# since they don't have common jobs with any other (they have no jobs).
# Changes which depend on unknown project changes
# should not be processed in dependent pipeline
self.assertFalse(A.is_merged)
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(len(A.comments), 1)
self.assertEqual(B.reported, 0)
self.assertEqual(len(self.history), 0)
# Simulate change B being gated outside this layout Set the
# change merged before submitting the event so that when the
# event triggers a gerrit query to update the change, we get
# the information that it was merged.
B.setMerged()
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# Now that B is merged, A should be able to be enqueued and
# merged.
self.fake_github.emitEvent(event)
self.waitUntilSettled()
self.assertTrue(A.is_merged)
self.assertEqual(len(A.comments), 3)
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 0)
def test_crd_check(self):
"Test cross-repo dependencies in independent pipelines"
self.executor_server.hold_jobs_in_build = True
self.hold_jobs_in_queue = True
A = self.fake_github.openFakePullRequest('github/project2', 'master',
'A')
B = self.fake_gerrit.addFakeChange(
'gerrit/project1', 'master', 'B')
# A Depends-On: B
self.fake_github.emitEvent(
A.editBody('Depends-On: %s\n' % (B.data['url'],)))
self.waitUntilSettled()
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertTrue(self.builds[0].hasChanges(A, B))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertFalse(A.is_merged)
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(len(A.comments), 1)
self.assertEqual(B.reported, 0)
changes = self.getJobFromHistory(
'project-merge', 'github/project2').changes
self.assertEqual(changes, '1,1 1,%s' %
(A.head_sha,))
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
def test_crd_check_duplicate(self):
"Test duplicate check in independent pipelines"
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest('github/project2', 'master',
'A')
B = self.fake_gerrit.addFakeChange(
'gerrit/project1', 'master', 'B')
# A Depends-On: B
event = A.editBody('Depends-On: %s\n' % (B.data['url'],))
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
# Add two dependent changes...
self.fake_github.emitEvent(event)
self.waitUntilSettled()
self.assertEqual(len(check_pipeline.getAllItems()), 2)
# ...make sure the live one is not duplicated...
self.fake_github.emitEvent(A.getPullRequestEditedEvent())
self.waitUntilSettled()
self.assertEqual(len(check_pipeline.getAllItems()), 2)
# ...but the non-live one is able to be.
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(check_pipeline.getAllItems()), 3)
# Release jobs in order to avoid races with change A jobs
# finishing before change B jobs.
self.orderedRelease()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertFalse(A.is_merged)
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(len(A.comments), 1)
self.assertEqual(B.reported, 1)
changes = self.getJobFromHistory(
'project-merge', 'github/project2').changes
self.assertEqual(changes, '1,1 1,%s' %
(A.head_sha,))
changes = self.getJobFromHistory(
'project-merge', 'gerrit/project1').changes
self.assertEqual(changes, '1,1')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
self.assertIn('Build succeeded', A.comments[0])
def _test_crd_check_reconfiguration(self, project1, project2):
"Test cross-repo dependencies re-enqueued in independent pipelines"
self.hold_jobs_in_queue = True
A = self.fake_github.openFakePullRequest('github/project2', 'master',
'A')
B = self.fake_gerrit.addFakeChange(
'gerrit/project1', 'master', 'B')
# A Depends-On: B
self.fake_github.emitEvent(
A.editBody('Depends-On: %s\n' % (B.data['url'],)))
self.waitUntilSettled()
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# Make sure the items still share a change queue, and the
# first one is not live.
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 1)
queue = tenant.layout.pipelines['check'].queues[0]
first_item = queue.queue[0]
for item in queue.queue:
self.assertEqual(item.queue, first_item.queue)
self.assertFalse(first_item.live)
self.assertTrue(queue.queue[1].live)
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.assertFalse(A.is_merged)
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(len(A.comments), 1)
self.assertEqual(B.reported, 0)
changes = self.getJobFromHistory(
'project-merge', 'github/project2').changes
self.assertEqual(changes, '1,1 1,%s' %
(A.head_sha,))
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
@skipIfMultiScheduler()
def test_crd_check_reconfiguration(self):
self._test_crd_check_reconfiguration('org/project1', 'org/project2')
@skipIfMultiScheduler()
def test_crd_undefined_project(self):
"""Test that undefined projects in dependencies are handled for
independent pipelines"""
# It's a hack for fake gerrit,
# as it implies repo creation upon the creation of any change
self.init_repo("gerrit/unknown", tag='init')
self._test_crd_check_reconfiguration('github/project2',
'gerrit/unknown')
def test_crd_check_transitive(self):
"Test transitive cross-repo dependencies"
# Specifically, if A -> B -> C, and C gets a new patchset and
# A gets a new patchset, ensure the test of A,2 includes B,1
# and C,2 (not C,1 which would indicate stale data in the
# cache for B).
A = self.fake_github.openFakePullRequest('github/project2', 'master',
'A')
B = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'B')
C = self.fake_github.openFakePullRequest('github/project2', 'master',
'C')
# B Depends-On: C
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, C.url)
# A Depends-On: B
self.fake_github.emitEvent(
A.editBody('Depends-On: %s\n' % (B.data['url'],)))
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '2,%s 1,1 1,%s' %
(C.head_sha, A.head_sha))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '2,%s 1,1' %
(C.head_sha,))
self.fake_github.emitEvent(C.getPullRequestEditedEvent())
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '2,%s' %
(C.head_sha,))
new_c_head = C.head_sha
C.addCommit()
old_c_head = C.head_sha
self.assertNotEqual(old_c_head, new_c_head)
self.fake_github.emitEvent(C.getPullRequestEditedEvent())
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '2,%s' %
(C.head_sha,))
new_a_head = A.head_sha
A.addCommit()
old_a_head = A.head_sha
self.assertNotEqual(old_a_head, new_a_head)
self.fake_github.emitEvent(A.getPullRequestEditedEvent())
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '2,%s 1,1 1,%s' %
(C.head_sha, A.head_sha,))
def test_crd_check_unknown(self):
"Test unknown projects in independent pipeline"
self.init_repo("gerrit/unknown", tag='init')
A = self.fake_github.openFakePullRequest('github/project2', 'master',
'A')
B = self.fake_gerrit.addFakeChange(
'gerrit/unknown', 'master', 'B')
# A Depends-On: B
event = A.editBody('Depends-On: %s\n' % (B.data['url'],))
# Make sure zuul has seen an event on B.
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
# Note we wait until settled here as the event processing for
# the next event may not have the updated db yet otherwise.
self.waitUntilSettled()
self.fake_github.emitEvent(event)
self.waitUntilSettled()
self.assertFalse(A.is_merged)
self.assertEqual(len(A.comments), 1)
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(B.reported, 0)
def test_crd_cycle_join(self):
"Test an updated change creates a cycle"
A = self.fake_gerrit.addFakeChange(
'gerrit/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
# Create B->A
B = self.fake_github.openFakePullRequest('github/project2', 'master',
'B')
self.fake_github.emitEvent(
B.editBody('Depends-On: %s\n' % (A.data['url'],)))
self.waitUntilSettled()
# Dep is there so zuul should have reported on B
self.assertEqual(len(B.comments), 1)
# Update A to add A->B (a cycle).
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.url)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Dependency cycle injected so zuul should have reported again on A
self.assertEqual(A.reported, 2)
# Now if we update B to remove the depends-on, everything
# should be okay. B; A->B
B.addCommit()
B.editBody('')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Cycle was removed so now zuul should have reported again on A
self.assertEqual(A.reported, 3)
self.fake_github.emitEvent(B.getPullRequestEditedEvent())
self.waitUntilSettled()
self.assertEqual(len(B.comments), 2)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_cross_crd.py
|
test_cross_crd.py
|
# Copyright (c) 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tests.base import ZuulTestCase, simple_layout
class TestGithubCrossRepoDeps(ZuulTestCase):
"""Test Github cross-repo dependencies"""
config_file = 'zuul-github-driver.conf'
scheduler_count = 1
@simple_layout('layouts/crd-github.yaml', driver='github')
def test_crd_independent(self):
"Test cross-repo dependences on an independent pipeline"
# Create a change in project1 that a project2 change will depend on
A = self.fake_github.openFakePullRequest('org/project1', 'master', 'A')
# Create a commit in B that sets the dependency on A
msg = "Depends-On: https://github.com/org/project1/pull/%s" % A.number
B = self.fake_github.openFakePullRequest('org/project2', 'master', 'B',
body=msg)
# Make an event to re-use
event = B.getPullRequestEditedEvent()
self.fake_github.emitEvent(event)
self.waitUntilSettled()
# The changes for the job from project2 should include the project1
# PR contet
changes = self.getJobFromHistory(
'project2-test', 'org/project2').changes
self.assertEqual(changes, "%s,%s %s,%s" % (A.number,
A.head_sha,
B.number,
B.head_sha))
# There should be no more changes in the queue
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
@simple_layout('layouts/crd-github.yaml', driver='github')
def test_crd_dependent(self):
"Test cross-repo dependences on a dependent pipeline"
# Create a change in project3 that a project4 change will depend on
A = self.fake_github.openFakePullRequest('org/project3', 'master', 'A')
# Create a commit in B that sets the dependency on A
msg = "Depends-On: https://github.com/org/project3/pull/%s" % A.number
B = self.fake_github.openFakePullRequest('org/project4', 'master', 'B',
body=msg)
# Make an event to re-use
event = B.getPullRequestEditedEvent()
self.fake_github.emitEvent(event)
self.waitUntilSettled()
# The changes for the job from project4 should include the project3
# PR contet
changes = self.getJobFromHistory(
'project4-test', 'org/project4').changes
self.assertEqual(changes, "%s,%s %s,%s" % (A.number,
A.head_sha,
B.number,
B.head_sha))
self.assertTrue(A.is_merged)
self.assertTrue(B.is_merged)
@simple_layout('layouts/crd-github.yaml', driver='github')
def test_crd_unshared_dependent(self):
"Test cross-repo dependences on unshared dependent pipeline"
# Create a change in project1 that a project2 change will depend on
A = self.fake_github.openFakePullRequest('org/project5', 'master', 'A')
# Create a commit in B that sets the dependency on A
msg = "Depends-On: https://github.com/org/project5/pull/%s" % A.number
B = self.fake_github.openFakePullRequest('org/project6', 'master', 'B',
body=msg)
# Make an event for B
event = B.getPullRequestEditedEvent()
# Emit for B, which should not enqueue A because they do not share
# A queue. Since B depends on A, and A isn't enqueue, B will not run
self.fake_github.emitEvent(event)
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
# Enqueue A alone, let it finish
self.fake_github.emitEvent(A.getPullRequestEditedEvent())
self.waitUntilSettled()
self.assertTrue(A.is_merged)
self.assertFalse(B.is_merged)
self.assertEqual(1, len(self.history))
# With A merged, B should go through
self.fake_github.emitEvent(event)
self.waitUntilSettled()
self.assertTrue(B.is_merged)
self.assertEqual(2, len(self.history))
@simple_layout('layouts/crd-github.yaml', driver='github')
def test_crd_cycle(self):
"Test cross-repo dependency cycles"
# A -> B -> A
msg = "Depends-On: https://github.com/org/project6/pull/2"
A = self.fake_github.openFakePullRequest('org/project5', 'master', 'A',
body=msg)
msg = "Depends-On: https://github.com/org/project5/pull/1"
B = self.fake_github.openFakePullRequest('org/project6', 'master', 'B',
body=msg)
self.fake_github.emitEvent(A.getPullRequestEditedEvent())
self.waitUntilSettled()
self.assertFalse(A.is_merged)
self.assertFalse(B.is_merged)
self.assertEqual(0, len(self.history))
@simple_layout('layouts/crd-github.yaml', driver='github')
def test_crd_needed_changes(self):
"Test cross-repo needed changes discovery"
# Given change A and B, where B depends on A, when A
# completes B should be enqueued (using a shared queue)
# Create a change in project3 that a project4 change will depend on
A = self.fake_github.openFakePullRequest('org/project3', 'master', 'A')
# Set B to depend on A
msg = "Depends-On: https://github.com/org/project3/pull/%s" % A.number
B = self.fake_github.openFakePullRequest('org/project4', 'master', 'B',
body=msg)
# Enqueue A, which when finished should enqueue B
self.fake_github.emitEvent(A.getPullRequestEditedEvent())
self.waitUntilSettled()
# The changes for the job from project4 should include the project3
# PR contet
changes = self.getJobFromHistory(
'project4-test', 'org/project4').changes
self.assertEqual(changes, "%s,%s %s,%s" % (A.number,
A.head_sha,
B.number,
B.head_sha))
self.assertTrue(A.is_merged)
self.assertTrue(B.is_merged)
@simple_layout('layouts/github-message-update.yaml', driver='github')
def test_crd_message_update(self):
"Test a change is dequeued when the PR in its depends-on is updated"
# Create a change in project1 that a project2 change will depend on
A = self.fake_github.openFakePullRequest('org/project1', 'master', 'A')
# Create a commit in B that sets the dependency on A
msg = "Depends-On: https://github.com/org/project1/pull/%s" % A.number
B = self.fake_github.openFakePullRequest('org/project2', 'master', 'B',
body=msg)
# Create a commit in C that sets the dependency on B
msg = "Depends-On: https://github.com/org/project2/pull/%s" % B.number
C = self.fake_github.openFakePullRequest('org/project3', 'master', 'C',
body=msg)
# A change we'll use later to replace A
A1 = self.fake_github.openFakePullRequest(
'org/project1', 'master', 'A1')
self.executor_server.hold_jobs_in_build = True
# Enqueue A,B,C
self.fake_github.emitEvent(C.getReviewAddedEvent('approve'))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
items = tenant.layout.pipelines['check'].getAllItems()
self.assertEqual(len(items), 3)
# Update B to point at A1 instead of A
msg = "Depends-On: https://github.com/org/project1/pull/%s" % A1.number
self.fake_github.emitEvent(B.editBody(msg))
self.waitUntilSettled()
# Release
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# The job should be aborted since B was updated while enqueued.
self.assertHistory([dict(name='project3-test', result='ABORTED')])
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_github_crd.py
|
test_github_crd.py
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import tempfile
import testtools
import zuul.cmd
FAKE_CONFIG = b'''
[DEFAULT]
foo=%(ZUUL_ENV_TEST)s
'''
class TestCmd(testtools.TestCase):
def test_read_config_with_environment(self):
"Test that readConfig interpolates environment vars"
self.useFixture(fixtures.EnvironmentVariable(
'HISTTIMEFORMAT', '%Y-%m-%dT%T%z '))
self.useFixture(fixtures.EnvironmentVariable(
'ZUUL_ENV_TEST', 'baz'))
with tempfile.NamedTemporaryFile() as test_config:
test_config.write(FAKE_CONFIG)
test_config.flush()
app = zuul.cmd.ZuulApp()
app.parseArguments(['-c', test_config.name])
app.readConfig()
self.assertEquals('baz', app.config.get('DEFAULT', 'foo'))
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_cmd.py
|
test_cmd.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tests.base import (
ZuulTestCase,
simple_layout,
)
class TestSupercedent(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
@simple_layout('layouts/supercedent.yaml')
def test_supercedent(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
arev = A.patchsets[-1]['revision']
A.setMerged()
self.fake_gerrit.addEvent(A.getRefUpdatedEvent())
self.waitUntilSettled()
# We should never run jobs for more than one change at a time
self.assertEqual(len(self.builds), 1)
# This change should be superceded by the next
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.setMerged()
self.fake_gerrit.addEvent(B.getRefUpdatedEvent())
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
crev = C.patchsets[-1]['revision']
C.setMerged()
self.fake_gerrit.addEvent(C.getRefUpdatedEvent())
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.executor_server.hold_jobs_in_build = True
self.orderedRelease()
self.assertHistory([
dict(name='post-job', result='SUCCESS', newrev=arev),
dict(name='post-job', result='SUCCESS', newrev=crev),
], ordered=False)
@simple_layout('layouts/supercedent.yaml')
def test_supercedent_branches(self):
self.executor_server.hold_jobs_in_build = True
self.create_branch('org/project', 'stable')
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
arev = A.patchsets[-1]['revision']
A.setMerged()
self.fake_gerrit.addEvent(A.getRefUpdatedEvent())
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
# This change should not be superceded
B = self.fake_gerrit.addFakeChange('org/project', 'stable', 'B')
brev = B.patchsets[-1]['revision']
B.setMerged()
self.fake_gerrit.addEvent(B.getRefUpdatedEvent())
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2)
self.executor_server.hold_jobs_in_build = True
self.orderedRelease()
self.assertHistory([
dict(name='post-job', result='SUCCESS', newrev=arev),
dict(name='post-job', result='SUCCESS', newrev=brev),
], ordered=False)
@simple_layout('layouts/supercedent-promote.yaml')
def test_supercedent_promote(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
# We should never run jobs for more than one change at a time
self.assertEqual(len(self.builds), 1)
# This change should be superceded by the next
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.setMerged()
self.fake_gerrit.addEvent(B.getChangeMergedEvent())
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
C.setMerged()
self.fake_gerrit.addEvent(C.getChangeMergedEvent())
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.executor_server.hold_jobs_in_build = True
self.orderedRelease()
self.assertHistory([
dict(name='promote-job', result='SUCCESS', changes='1,1'),
dict(name='promote-job', result='SUCCESS', changes='3,1'),
], ordered=False)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_supercedent.py
|
test_supercedent.py
|
# Copyright 2021 BMW Group
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import configparser
from zuul.lib.fingergw import FingerGateway
from zuul.zk.components import BaseComponent, ComponentRegistry
from tests.base import iterate_timeout, ZuulTestCase, ZuulWebFixture
class TestComponentRegistry(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def setUp(self):
super().setUp()
self.component_registry = ComponentRegistry(self.zk_client)
def assertComponentAttr(self, component_name, attr_name,
attr_value, timeout=10):
for _ in iterate_timeout(
timeout,
f"{component_name} in cache has {attr_name} set to {attr_value}",
):
components = list(self.component_registry.all(component_name))
if (
len(components) > 0 and
getattr(components[0], attr_name) == attr_value
):
break
def assertComponentState(self, component_name, state, timeout=10):
return self.assertComponentAttr(
component_name, "state", state, timeout
)
def assertComponentStopped(self, component_name, timeout=10):
for _ in iterate_timeout(
timeout, f"{component_name} in cache is stopped"
):
components = list(self.component_registry.all(component_name))
if len(components) == 0:
break
def test_scheduler_component(self):
self.assertComponentState("scheduler", BaseComponent.RUNNING)
def test_executor_component(self):
self.assertComponentState("executor", BaseComponent.RUNNING)
self.executor_server.pause()
self.assertComponentState("executor", BaseComponent.PAUSED)
self.executor_server.unpause()
self.assertComponentState("executor", BaseComponent.RUNNING)
self.executor_server.unregister_work()
self.assertComponentAttr("executor", "accepting_work", False)
self.executor_server.register_work()
self.assertComponentAttr("executor", "accepting_work", True)
self.executor_server.zk_client.client.stop()
self.assertComponentStopped("executor")
self.executor_server.zk_client.client.start()
self.assertComponentAttr("executor", "accepting_work", True)
def test_merger_component(self):
self._startMerger()
self.assertComponentState("merger", BaseComponent.RUNNING)
self.merge_server.pause()
self.assertComponentState("merger", BaseComponent.PAUSED)
self.merge_server.unpause()
self.assertComponentState("merger", BaseComponent.RUNNING)
self.merge_server.stop()
self.merge_server.join()
# Set the merger to None so the test doesn't try to stop it again
self.merge_server = None
try:
self.assertComponentStopped("merger")
except Exception:
for kind, components in self.component_registry.all():
self.log.error("Component %s has %s online", kind, components)
raise
def test_fingergw_component(self):
config = configparser.ConfigParser()
config.read_dict(self.config)
config.read_dict({
'fingergw': {
'listen_address': '::',
'port': '0',
'hostname': 'janine',
}
})
gateway = FingerGateway(
config,
command_socket=None,
pid_file=None
)
gateway.start()
try:
self.assertComponentState("fingergw", BaseComponent.RUNNING)
self.assertComponentAttr("fingergw", "hostname", "janine")
finally:
gateway.stop()
self.assertComponentStopped("fingergw")
def test_web_component(self):
self.useFixture(
ZuulWebFixture(
self.changes, self.config, self.additional_event_queues,
self.upstream_root, self.poller_events,
self.git_url_with_auth, self.addCleanup, self.test_root
)
)
self.assertComponentState("web", BaseComponent.RUNNING)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_component_registry.py
|
test_component_registry.py
|
# Copyright 2015 BMW Car IT GmbH
# Copyright 2023 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import threading
import textwrap
from unittest import mock
import tests.base
from tests.base import (
AnsibleZuulTestCase,
BaseTestCase,
simple_layout,
skipIfMultiScheduler,
ZuulTestCase,
)
from zuul.lib import strings
from zuul.driver.gerrit import GerritDriver
from zuul.driver.gerrit.gerritconnection import GerritConnection
FIXTURE_DIR = os.path.join(tests.base.FIXTURE_DIR, 'gerrit')
def read_fixture(file):
with open('%s/%s' % (FIXTURE_DIR, file), 'r') as fixturefile:
lines = fixturefile.readlines()
command = lines[0].replace('\n', '')
value = ''.join(lines[1:])
return command, value
def read_fixtures(files):
calls = []
values = []
for fixture_file in files:
command, value = read_fixture(fixture_file)
calls.append(mock.call(command))
values.append([value, ''])
return calls, values
class TestGerrit(BaseTestCase):
@mock.patch('zuul.driver.gerrit.gerritconnection.GerritConnection._ssh')
def run_query(self, files, expected_patches, _ssh_mock):
gerrit_config = {
'user': 'gerrit',
'server': 'localhost',
}
driver = GerritDriver()
gerrit = GerritConnection(driver, 'review_gerrit', gerrit_config)
calls, values = read_fixtures(files)
_ssh_mock.side_effect = values
result = gerrit.simpleQuery('project:zuul/zuul')
_ssh_mock.assert_has_calls(calls)
self.assertEqual(len(calls), _ssh_mock.call_count,
'_ssh should be called %d times' % len(calls))
self.assertIsNotNone(result, 'Result is not none')
self.assertEqual(len(result), expected_patches,
'There must be %d patches.' % expected_patches)
def test_simple_query_pagination_new(self):
files = ['simple_query_pagination_new_1',
'simple_query_pagination_new_2']
expected_patches = 5
self.run_query(files, expected_patches)
def test_simple_query_pagination_old(self):
files = ['simple_query_pagination_old_1',
'simple_query_pagination_old_2',
'simple_query_pagination_old_3']
expected_patches = 5
self.run_query(files, expected_patches)
def test_ref_name_check_rules(self):
# See man git-check-ref-format for the rules referenced here
test_strings = [
('refs/heads/normal', True),
('refs/heads/.bad', False), # rule 1
('refs/heads/bad.lock', False), # rule 1
('refs/heads/good.locked', True),
('refs/heads/go.od', True),
('refs/heads//bad', False), # rule 6
('refs/heads/b?d', False), # rule 5
('refs/heads/b[d', False), # rule 5
('refs/heads/b..ad', False), # rule 3
('bad', False), # rule 2
('refs/heads/\nbad', False), # rule 4
('/refs/heads/bad', False), # rule 6
('refs/heads/bad/', False), # rule 6
('refs/heads/bad.', False), # rule 7
('.refs/heads/bad', False), # rule 1
('refs/he@{ads/bad', False), # rule 8
('@', False), # rule 9
('refs\\heads/bad', False) # rule 10
]
for ref, accepted in test_strings:
self.assertEqual(
accepted,
GerritConnection._checkRefFormat(ref),
ref + ' shall be ' + ('accepted' if accepted else 'rejected'))
def test_getGitURL(self):
gerrit_config = {
'user': 'gerrit',
'server': 'localhost',
'password': '1/badpassword',
}
# The 1/ in the password ensures we test the url encoding
# path; this is the format of password we get from
# googlesource.com.
driver = GerritDriver()
gerrit = GerritConnection(driver, 'review_gerrit', gerrit_config)
project = gerrit.source.getProject('org/project')
url = gerrit.source.getGitUrl(project)
self.assertEqual(
'https://gerrit:1%2Fbadpassword@localhost/a/org/project',
url)
def test_git_over_ssh_getGitURL(self):
gerrit_config = {
'user': 'gerrit',
'server': 'localhost',
'password': '1/badpassword',
'git_over_ssh': 'true',
}
# The 1/ in the password ensures we test the url encoding
# path; this is the format of password we get from
# googlesource.com.
driver = GerritDriver()
gerrit = GerritConnection(driver, 'review_gerrit', gerrit_config)
project = gerrit.source.getProject('org/project')
url = gerrit.source.getGitUrl(project)
self.assertEqual(
'ssh://gerrit@localhost:29418/org/project',
url)
def test_ssh_server_getGitURL(self):
gerrit_config = {
'user': 'gerrit',
'server': 'otherserver',
'password': '1/badpassword',
'ssh_server': 'localhost',
'git_over_ssh': 'true',
}
# The 1/ in the password ensures we test the url encoding
# path; this is the format of password we get from
# googlesource.com.
driver = GerritDriver()
gerrit = GerritConnection(driver, 'review_gerrit', gerrit_config)
project = gerrit.source.getProject('org/project')
url = gerrit.source.getGitUrl(project)
self.assertEqual(
'ssh://gerrit@localhost:29418/org/project',
url)
class TestGerritWeb(ZuulTestCase):
config_file = 'zuul-gerrit-web.conf'
tenant_config_file = 'config/single-tenant/main.yaml'
def test_jobs_executed(self):
"Test that jobs are executed and a change is merged"
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('project-merge').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test1').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('project-test2').result,
'SUCCESS')
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(self.getJobFromHistory('project-test1').node,
'label1')
self.assertEqual(self.getJobFromHistory('project-test2').node,
'label1')
def test_dynamic_line_comment(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: garbage-job
garbage: True
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.patchsets[0]['approvals'][0]['value'], "-1")
self.assertEqual(A.patchsets[0]['approvals'][0]['__tag'],
"autogenerated:zuul:check")
self.assertIn('Zuul encountered a syntax error',
A.messages[0])
comments = sorted(A.comments, key=lambda x: x['line'])
self.assertEqual(comments[0],
{'file': '.zuul.yaml',
'line': 4,
'message': "extra keys not allowed @ "
"data['garbage']",
'range': {'end_character': 0,
'end_line': 4,
'start_character': 2,
'start_line': 2},
'reviewer': {'email': '[email protected]',
'name': 'Zuul',
'username': 'jenkins'}}
)
def test_message_too_long(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: garbage-job
%s
garbage: True
"""
) % ('\n' * 16384)
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.patchsets[0]['approvals'][0]['value'], "-1")
self.assertEqual(A.patchsets[0]['approvals'][0]['__tag'],
"autogenerated:zuul:check")
self.assertIn('... (truncated)',
A.messages[0])
def test_dependent_dynamic_line_comment(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: garbage-job
garbage: True
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['id'])
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(B.patchsets[0]['approvals'][0]['value'], "-1")
self.assertIn('This change depends on a change '
'with an invalid configuration',
B.messages[0])
self.assertEqual(B.comments, [])
@simple_layout('layouts/single-file-matcher.yaml')
def test_single_file(self):
# HTTP requests don't return a commit_msg entry in the files
# list, but the rest of zuul always expects one. This test
# returns a single file to exercise the single-file code path
# in the files matcher.
files = {'README': 'please!\n'}
change = self.fake_gerrit.addFakeChange('org/project',
'master',
'test irrelevant-files',
files=files)
self.fake_gerrit.addEvent(change.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
tested_change_ids = [x.changes[0] for x in self.history
if x.name == 'project-test-irrelevant-files']
self.assertEqual([], tested_change_ids)
def test_recheck(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(3, len(self.history))
self.fake_gerrit.addEvent(A.getChangeCommentEvent(1))
self.waitUntilSettled()
self.assertEqual(3, len(self.history))
self.fake_gerrit.addEvent(A.getChangeCommentEvent(1,
'recheck'))
self.waitUntilSettled()
self.assertEqual(6, len(self.history))
self.fake_gerrit.addEvent(A.getChangeCommentEvent(1,
patchsetcomment='recheck'))
self.waitUntilSettled()
self.assertEqual(9, len(self.history))
self.fake_gerrit.addEvent(A.getChangeCommentEvent(1,
patchsetcomment='do not recheck'))
self.waitUntilSettled()
self.assertEqual(9, len(self.history))
def test_submitted_together_git(self):
# This tests that the circular dependency handling for submit
# whole topic doesn't activate for changes which are only in a
# git dependency.
A = self.fake_gerrit.addFakeChange('org/project1', "master", "A")
B = self.fake_gerrit.addFakeChange('org/project1', "master", "B")
C = self.fake_gerrit.addFakeChange('org/project1', "master", "C")
D = self.fake_gerrit.addFakeChange('org/project1', "master", "D")
E = self.fake_gerrit.addFakeChange('org/project1', "master", "E")
F = self.fake_gerrit.addFakeChange('org/project1', "master", "F")
G = self.fake_gerrit.addFakeChange('org/project1', "master", "G")
G.setDependsOn(F, 1)
F.setDependsOn(E, 1)
E.setDependsOn(D, 1)
D.setDependsOn(C, 1)
C.setDependsOn(B, 1)
B.setDependsOn(A, 1)
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(C.patchsets[-1]["approvals"]), 1)
self.assertEqual(C.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(C.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(A.queried, 1)
self.assertEqual(B.queried, 1)
self.assertEqual(C.queried, 1)
self.assertEqual(D.queried, 1)
self.assertEqual(E.queried, 1)
self.assertEqual(F.queried, 1)
self.assertEqual(G.queried, 1)
self.assertHistory([
dict(name="project-merge", result="SUCCESS",
changes="1,1 2,1 3,1"),
dict(name="project-test1", result="SUCCESS",
changes="1,1 2,1 3,1"),
dict(name="project-test2", result="SUCCESS",
changes="1,1 2,1 3,1"),
dict(name="project1-project2-integration", result="SUCCESS",
changes="1,1 2,1 3,1"),
], ordered=False)
def test_submit_failure(self):
# Test that we log the reason for a submit failure (403 error)
self.fake_gerrit._fake_submit_permission = False
A = self.fake_gerrit.addFakeChange('org/project1', "master", "A")
A.addApproval('Code-Review', 2)
with self.assertLogs('zuul.test.FakeGerritConnection', level='INFO'
) as full_logs:
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.log.debug("Full logs:")
for x in full_logs.output:
self.log.debug(x)
self.assertRegexInList(
r'Error submitting data to gerrit on attempt 3: '
'Received response 403: submit not permitted',
full_logs.output)
self.assertEqual(A.data['status'], 'NEW')
class TestFileComments(AnsibleZuulTestCase):
config_file = 'zuul-gerrit-web.conf'
tenant_config_file = 'config/gerrit-file-comments/main.yaml'
def test_file_comments(self):
A = self.fake_gerrit.addFakeChange(
'org/project', 'master', 'A',
files={'path/to/file.py': 'test1',
'otherfile.txt': 'test2',
})
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('file-comments').result,
'SUCCESS')
self.assertEqual(self.getJobFromHistory('file-comments-error').result,
'SUCCESS')
self.assertEqual(len(A.comments), 7)
comments = sorted(A.comments, key=lambda x: (x['file'], x['line']))
self.assertEqual(
comments[0],
{
'file': '/COMMIT_MSG',
'line': 1,
'message': 'commit message comment',
'reviewer': {
'email': '[email protected]',
'name': 'Zuul',
'username': 'jenkins'
},
},
)
self.assertEqual(
comments[1],
{
'file': 'otherfile.txt',
'line': 21,
'message': 'This is a much longer message.\n\n'
'With multiple paragraphs.\n',
'reviewer': {
'email': '[email protected]',
'name': 'Zuul',
'username': 'jenkins'
},
},
)
self.assertEqual(
comments[2],
{
"file": "path/to/file.py",
"line": 2,
"message": "levels are ignored by gerrit",
"reviewer": {
"email": "[email protected]",
"name": "Zuul",
"username": "jenkins",
},
},
)
self.assertEqual(
comments[3],
{
"file": "path/to/file.py",
"line": 21,
"message": (
"A second zuul return value using the same file should not"
"\noverride the first result, but both should be merged.\n"
),
"reviewer": {
"email": "[email protected]",
"name": "Zuul",
"username": "jenkins",
},
},
)
self.assertEqual(
comments[4],
{
'file': 'path/to/file.py',
'line': 42,
'message': 'line too long',
'reviewer': {
'email': '[email protected]',
'name': 'Zuul',
'username': 'jenkins'
},
},
)
self.assertEqual(
comments[5],
{
"file": "path/to/file.py",
"line": 42,
"message": (
"A second comment applied to the same line in the same "
"file\nshould also be added to the result.\n"
),
"reviewer": {
"email": "[email protected]",
"name": "Zuul",
"username": "jenkins",
}
}
)
self.assertEqual(comments[6],
{'file': 'path/to/file.py',
'line': 82,
'message': 'line too short',
'reviewer': {'email': '[email protected]',
'name': 'Zuul',
'username': 'jenkins'}}
)
self.assertIn('expected a dictionary', A.messages[0],
"A should have a validation error reported")
self.assertIn('invalid file missingfile.txt', A.messages[0],
"A should have file error reported")
class TestChecksApi(ZuulTestCase):
config_file = 'zuul-gerrit-web.conf'
@simple_layout('layouts/gerrit-checks.yaml')
def test_check_pipeline(self):
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.setDependsOn(B, 1)
A.setCheck('zuul:check', reset=True)
self.waitForPoll('gerrit')
self.waitUntilSettled()
self.assertEqual(A.checks_history[0]['zuul:check']['state'],
'NOT_STARTED')
self.assertEqual(A.checks_history[1]['zuul:check']['state'],
'SCHEDULED')
self.assertEqual(
A.checks_history[1]['zuul:check']['url'],
'http://zuul.example.com/t/tenant-one/status/change/2,1')
self.assertEqual(A.checks_history[2]['zuul:check']['state'],
'RUNNING')
self.assertEqual(
A.checks_history[2]['zuul:check']['url'],
'http://zuul.example.com/t/tenant-one/status/change/2,1')
self.assertEqual(A.checks_history[3]['zuul:check']['state'],
'SUCCESSFUL')
self.assertTrue(
A.checks_history[3]['zuul:check']['url'].startswith(
'http://zuul.example.com/t/tenant-one/buildset/'))
self.assertEqual(len(A.checks_history), 4)
self.assertTrue(isinstance(
A.checks_history[3]['zuul:check']['started'], str))
self.assertTrue(isinstance(
A.checks_history[3]['zuul:check']['finished'], str))
self.assertTrue(
A.checks_history[3]['zuul:check']['finished'] >
A.checks_history[3]['zuul:check']['started'])
self.assertEqual(A.checks_history[3]['zuul:check']['message'],
'Change passed all voting jobs')
self.assertHistory([
dict(name='test-job', result='SUCCESS', changes='1,1 2,1')])
self.assertEqual(A.reported, 0, "no messages should be reported")
self.assertEqual(A.messages, [], "no messages should be reported")
# Make sure B was never updated
self.assertEqual(len(B.checks_history), 0)
@simple_layout('layouts/gerrit-checks.yaml')
def test_gate_pipeline(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
A.addApproval('Approved', 1)
A.setCheck('zuul:gate', reset=True)
self.waitForPoll('gerrit')
self.waitUntilSettled()
self.assertEqual(A.checks_history[0]['zuul:gate']['state'],
'NOT_STARTED')
self.assertEqual(A.checks_history[1]['zuul:gate']['state'],
'SCHEDULED')
self.assertEqual(A.checks_history[2]['zuul:gate']['state'],
'RUNNING')
self.assertEqual(A.checks_history[3]['zuul:gate']['state'],
'SUCCESSFUL')
self.assertEqual(len(A.checks_history), 4)
self.assertHistory([
dict(name='test-job', result='SUCCESS', changes='1,1')])
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2,
"start and success messages should be reported")
@simple_layout('layouts/gerrit-checks-scheme.yaml')
@skipIfMultiScheduler()
# This is the only gerrit checks API test which is failing because
# it uses a check scheme rather than an UUID. The scheme must first
# be evaluated and mapped to an UUID.
# This shouldn't be a problem in production as the evaluation takes
# place on the gerrit webserver. However, in the tests we get a
# dedicated (fake) gerrit webserver for each fake gerrrit
# connection. Since each scheduler gets a new connection, only one
# of those webservers will be aware of the check. If any other
# webserver tries to evaluate the check it will fail with
# "Unable to find matching checker".
def test_check_pipeline_scheme(self):
self.fake_gerrit.addFakeChecker(uuid='zuul_check:abcd',
repository='org/project',
status='ENABLED')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.setCheck('zuul_check:abcd', reset=True)
self.waitForPoll('gerrit')
self.waitUntilSettled()
self.assertEqual(A.checks_history[0]['zuul_check:abcd']['state'],
'NOT_STARTED')
self.assertEqual(A.checks_history[1]['zuul_check:abcd']['state'],
'SCHEDULED')
self.assertEqual(A.checks_history[2]['zuul_check:abcd']['state'],
'RUNNING')
self.assertEqual(A.checks_history[3]['zuul_check:abcd']['state'],
'SUCCESSFUL')
self.assertEqual(len(A.checks_history), 4)
self.assertHistory([
dict(name='test-job', result='SUCCESS', changes='1,1')])
@simple_layout('layouts/gerrit-checks-nojobs.yaml')
def test_no_jobs(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.setCheck('zuul:check', reset=True)
self.waitForPoll('gerrit')
self.waitUntilSettled()
self.assertEqual(A.checks_history[0]['zuul:check']['state'],
'NOT_STARTED')
self.assertEqual(A.checks_history[1]['zuul:check']['state'],
'SCHEDULED')
self.assertEqual(A.checks_history[2]['zuul:check']['state'],
'NOT_RELEVANT')
self.assertEqual(len(A.checks_history), 3)
self.assertEqual(A.data['status'], 'NEW')
@simple_layout('layouts/gerrit-checks.yaml')
def test_config_error(self):
# Test that line comments are reported on config errors
in_repo_conf = textwrap.dedent(
"""
- project:
check:
jobs:
- bad-job
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.setCheck('zuul:check', reset=True)
self.waitForPoll('gerrit')
self.waitUntilSettled()
self.assertEqual(A.checks_history[0]['zuul:check']['state'],
'NOT_STARTED')
self.assertEqual(A.checks_history[1]['zuul:check']['state'],
'SCHEDULED')
self.assertEqual(A.checks_history[2]['zuul:check']['state'],
'FAILED')
self.assertEqual(len(A.checks_history), 3)
comments = sorted(A.comments, key=lambda x: x['line'])
self.assertEqual(comments[0],
{'file': '.zuul.yaml',
'line': 5,
'message': 'Job bad-job not defined',
'range': {'end_character': 0,
'end_line': 5,
'start_character': 2,
'start_line': 2},
'reviewer': {'email': '[email protected]',
'name': 'Zuul',
'username': 'jenkins'}}
)
self.assertEqual(A.reported, 0, "no messages should be reported")
self.assertEqual(A.messages, [], "no messages should be reported")
@simple_layout('layouts/gerrit-checks.yaml')
def test_new_patchset(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.setCheck('zuul:check', reset=True)
self.waitForPoll('gerrit')
self.waitUntilSettled()
self.assertEqual(A.checks_history[0]['zuul:check']['state'],
'NOT_STARTED')
self.assertEqual(A.checks_history[1]['zuul:check']['state'],
'SCHEDULED')
self.assertEqual(A.checks_history[2]['zuul:check']['state'],
'RUNNING')
self.assertEqual(len(A.checks_history), 3)
A.addPatchset()
A.setCheck('zuul:check', reset=True)
self.waitForPoll('gerrit')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.log.info(A.checks_history)
self.assertEqual(A.checks_history[3]['zuul:check']['state'],
'NOT_STARTED')
self.assertEqual(A.checks_history[4]['zuul:check']['state'],
'SCHEDULED')
self.assertEqual(A.checks_history[5]['zuul:check']['state'],
'RUNNING')
self.assertEqual(A.checks_history[6]['zuul:check']['state'],
'SUCCESSFUL')
self.assertEqual(len(A.checks_history), 7)
self.assertHistory([
dict(name='test-job', result='ABORTED', changes='1,1'),
dict(name='test-job', result='SUCCESS', changes='1,2'),
], ordered=False)
class TestPolling(ZuulTestCase):
config_file = 'zuul-gerrit-no-stream.conf'
@simple_layout('layouts/gerrit-checks.yaml')
def test_config_update(self):
# Test that the config is updated via polling when a change
# merges without stream-events enabled.
in_repo_conf = textwrap.dedent(
"""
- job:
name: test-job2
parent: test-job
- project:
check:
jobs:
- test-job2
""")
file_dict = {'.zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
A.setMerged()
self.waitForPoll('gerrit')
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.setCheck('zuul:check', reset=True)
self.waitForPoll('gerrit')
self.waitUntilSettled()
self.assertEqual(B.checks_history[0]['zuul:check']['state'],
'NOT_STARTED')
self.assertEqual(B.checks_history[1]['zuul:check']['state'],
'SCHEDULED')
self.assertEqual(B.checks_history[2]['zuul:check']['state'],
'RUNNING')
self.assertEqual(B.checks_history[3]['zuul:check']['state'],
'SUCCESSFUL')
self.assertEqual(len(B.checks_history), 4)
self.assertHistory([
dict(name='test-job', result='SUCCESS', changes='2,1'),
dict(name='test-job2', result='SUCCESS', changes='2,1'),
], ordered=False)
@simple_layout('layouts/gerrit-poll-post.yaml')
def test_post(self):
# Test that ref-updated events trigger post jobs.
self.waitUntilSettled()
# Wait for an initial poll to get the original sha.
self.waitForPoll('gerrit-ref')
# Merge a change.
self.create_commit('org/project')
# Wait for the job to run.
self.waitForPoll('gerrit-ref')
self.waitUntilSettled()
self.assertHistory([
dict(name='post-job', result='SUCCESS'),
])
@simple_layout('layouts/gerrit-poll-post.yaml')
def test_tag(self):
# Test that ref-updated events trigger post jobs.
self.waitUntilSettled()
# Wait for an initial poll to get the original sha.
self.waitForPoll('gerrit-ref')
# Merge a change.
self.fake_gerrit.addFakeTag('org/project', 'master', 'foo')
# Wait for the job to run.
self.waitForPoll('gerrit-ref')
self.waitUntilSettled()
self.assertHistory([
dict(name='tag-job', result='SUCCESS'),
])
class TestWrongConnection(ZuulTestCase):
config_file = 'zuul-connections-multiple-gerrits.conf'
tenant_config_file = 'config/wrong-connection-in-pipeline/main.yaml'
def test_wrong_connection(self):
# Test if the wrong connection is configured in a gate pipeline
# Our system has two gerrits, and we have configured a gate
# pipeline to trigger on the "review_gerrit" connection, but
# report (and merge) via "another_gerrit".
A = self.fake_review_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_review_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
B = self.fake_review_gerrit.addFakeChange('org/project', 'master', 'B')
# Let's try this as if the change was merged (say, via another tenant).
B.setMerged()
B.addApproval('Code-Review', 2)
self.fake_review_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 0)
self.assertEqual(B.reported, 0)
self.assertHistory([
dict(name='test-job', result='SUCCESS', changes='1,1'),
dict(name='test-job', result='SUCCESS', changes='2,1'),
], ordered=False)
class TestGerritFake(ZuulTestCase):
config_file = "zuul-gerrit-github.conf"
tenant_config_file = "config/circular-dependencies/main.yaml"
def _make_tuple(self, data):
ret = []
for c in data:
dep_change = c['number']
dep_ps = c['currentPatchSet']['number']
ret.append((int(dep_change), int(dep_ps)))
return sorted(ret)
def _get_tuple(self, change_number):
ret = []
data = self.fake_gerrit.get(
f'changes/{change_number}/submitted_together')
for c in data:
dep_change = c['_number']
dep_ps = c['revisions'][c['current_revision']]['_number']
ret.append((dep_change, dep_ps))
return sorted(ret)
def test_submitted_together_normal(self):
# Test that the fake submitted together endpoint returns
# expected data
# This test verifies behavior with submitWholeTopic=False
# A single change
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
data = self._get_tuple(1)
self.assertEqual(data, [])
ret = self.fake_gerrit._getSubmittedTogether(A, None)
self.assertEqual(ret, [])
# A dependent series (B->A)
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.setDependsOn(A, 1)
data = self._get_tuple(2)
self.assertEqual(data, [(1, 1), (2, 1)])
# The Gerrit connection method filters out the queried change
ret = self.fake_gerrit._getSubmittedTogether(B, None)
self.assertEqual(ret, [(1, 1)])
# A topic cycle
C1 = self.fake_gerrit.addFakeChange('org/project', 'master', 'C1',
topic='test-topic')
self.fake_gerrit.addFakeChange('org/project', 'master', 'C2',
topic='test-topic')
data = self._get_tuple(3)
self.assertEqual(data, [])
ret = self.fake_gerrit._getSubmittedTogether(C1, None)
self.assertEqual(ret, [])
def test_submitted_together_whole_topic(self):
# Test that the fake submitted together endpoint returns
# expected data
# This test verifies behavior with submitWholeTopic=True
self.fake_gerrit._fake_submit_whole_topic = True
# A single change
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
data = self._get_tuple(1)
self.assertEqual(data, [])
ret = self.fake_gerrit._getSubmittedTogether(A, None)
self.assertEqual(ret, [])
# A dependent series (B->A)
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.setDependsOn(A, 1)
data = self._get_tuple(2)
self.assertEqual(data, [(1, 1), (2, 1)])
# The Gerrit connection method filters out the queried change
ret = self.fake_gerrit._getSubmittedTogether(B, None)
self.assertEqual(ret, [(1, 1)])
# A topic cycle
C1 = self.fake_gerrit.addFakeChange('org/project', 'master', 'C1',
topic='test-topic')
self.fake_gerrit.addFakeChange('org/project', 'master', 'C2',
topic='test-topic')
data = self._get_tuple(3)
self.assertEqual(data, [(3, 1), (4, 1)])
# The Gerrit connection method filters out the queried change
ret = self.fake_gerrit._getSubmittedTogether(C1, None)
self.assertEqual(ret, [(4, 1)])
# Test also the query used by the GerritConnection:
ret = self.fake_gerrit._simpleQuery('status:open topic:test-topic')
ret = self._make_tuple(ret)
self.assertEqual(ret, [(3, 1), (4, 1)])
class TestGerritConnection(ZuulTestCase):
config_file = 'zuul-gerrit-web.conf'
tenant_config_file = 'config/single-tenant/main.yaml'
def test_zuul_query_ltime(self):
# Add a lock around the event queue iterator so that we can
# ensure that multiple events arrive before the first is
# processed.
lock = threading.Lock()
orig_iterEvents = self.fake_gerrit.gerrit_event_connector.\
event_queue._iterEvents
def _iterEvents(*args, **kw):
with lock:
return orig_iterEvents(*args, **kw)
self.patch(self.fake_gerrit.gerrit_event_connector.event_queue,
'_iterEvents', _iterEvents)
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.setDependsOn(A, 1)
# Hold the connection queue processing so these events get
# processed together
with lock:
self.fake_gerrit.addEvent(A.addApproval('Code-Review', 2))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Code-Review', 2))
self.waitUntilSettled()
self.assertHistory([])
# One query for each change in the above cluster of events.
self.assertEqual(A.queried, 1)
self.assertEqual(B.queried, 1)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertHistory([
dict(name="project-merge", result="SUCCESS", changes="1,1"),
dict(name="project-test1", result="SUCCESS", changes="1,1"),
dict(name="project-test2", result="SUCCESS", changes="1,1"),
dict(name="project-merge", result="SUCCESS", changes="1,1 2,1"),
dict(name="project-test1", result="SUCCESS", changes="1,1 2,1"),
dict(name="project-test2", result="SUCCESS", changes="1,1 2,1"),
], ordered=False)
# One query due to the event on change A, followed by a query
# to verify the merge.
self.assertEqual(A.queried, 3)
# No query for change B necessary since our cache is up to
# date with respect for the triggering event. One query to
# verify the merge.
self.assertEqual(B.queried, 2)
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
def test_submit_requirements(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
# Set an unsatisfied submit requirement
A.setSubmitRequirements([
{
"name": "Code-Review",
"description": "Disallow self-review",
"status": "UNSATISFIED",
"is_legacy": False,
"submittability_expression_result": {
"expression": "label:Code-Review=MAX,user=non_uploader "
"AND -label:Code-Review=MIN",
"fulfilled": False,
"passing_atoms": [],
"failing_atoms": [
"label:Code-Review=MAX,user=non_uploader",
"label:Code-Review=MIN"
]
}
},
{
"name": "Verified",
"status": "UNSATISFIED",
"is_legacy": True,
"submittability_expression_result": {
"expression": "label:Verified=MAX -label:Verified=MIN",
"fulfilled": False,
"passing_atoms": [],
"failing_atoms": [
"label:Verified=MAX",
"-label:Verified=MIN"
]
}
},
])
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertHistory([])
self.assertEqual(A.queried, 1)
self.assertEqual(A.data['status'], 'NEW')
# Mark the requirement satisfied
A.setSubmitRequirements([
{
"name": "Code-Review",
"description": "Disallow self-review",
"status": "SATISFIED",
"is_legacy": False,
"submittability_expression_result": {
"expression": "label:Code-Review=MAX,user=non_uploader "
"AND -label:Code-Review=MIN",
"fulfilled": False,
"passing_atoms": [
"label:Code-Review=MAX,user=non_uploader",
],
"failing_atoms": [
"label:Code-Review=MIN"
]
}
},
{
"name": "Verified",
"status": "UNSATISFIED",
"is_legacy": True,
"submittability_expression_result": {
"expression": "label:Verified=MAX -label:Verified=MIN",
"fulfilled": False,
"passing_atoms": [],
"failing_atoms": [
"label:Verified=MAX",
"-label:Verified=MIN"
]
}
},
])
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertHistory([
dict(name="project-merge", result="SUCCESS", changes="1,1"),
dict(name="project-test1", result="SUCCESS", changes="1,1"),
dict(name="project-test2", result="SUCCESS", changes="1,1"),
], ordered=False)
self.assertEqual(A.queried, 3)
self.assertEqual(A.data['status'], 'MERGED')
class TestGerritUnicodeRefs(ZuulTestCase):
config_file = 'zuul-gerrit-web.conf'
tenant_config_file = 'config/single-tenant/main.yaml'
upload_pack_data = (b'014452944ee370db5c87691e62e0f9079b6281319b4e HEAD'
b'\x00multi_ack thin-pack side-band side-band-64k '
b'ofs-delta shallow deepen-since deepen-not '
b'deepen-relative no-progress include-tag '
b'multi_ack_detailed allow-tip-sha1-in-want '
b'allow-reachable-sha1-in-want '
b'symref=HEAD:refs/heads/faster filter '
b'object-format=sha1 agent=git/2.37.1.gl1\n'
b'003d5f42665d737b3fd4ec22ca0209e6191859f09fd6 '
b'refs/for/faster\n'
b'004952944ee370db5c87691e62e0f9079b6281319b4e '
b'refs/heads/foo/\xf0\x9f\x94\xa5\xf0\x9f\x94\xa5'
b'\xf0\x9f\x94\xa5\n'
b'003f52944ee370db5c87691e62e0f9079b6281319b4e '
b'refs/heads/faster\n0000').decode("utf-8")
def test_mb_unicode_refs(self):
gerrit_config = {
'user': 'gerrit',
'server': 'localhost',
}
driver = GerritDriver()
gerrit = GerritConnection(driver, 'review_gerrit', gerrit_config)
def _uploadPack(project):
return self.upload_pack_data
self.patch(gerrit, '_uploadPack', _uploadPack)
project = gerrit.source.getProject('org/project')
refs = gerrit.getInfoRefs(project)
self.assertEqual(refs,
{'refs/for/faster':
'5f42665d737b3fd4ec22ca0209e6191859f09fd6',
'refs/heads/foo/🔥🔥🔥':
'52944ee370db5c87691e62e0f9079b6281319b4e',
'refs/heads/faster':
'52944ee370db5c87691e62e0f9079b6281319b4e'})
class TestGerritDriver(ZuulTestCase):
# Most of the Zuul test suite tests the Gerrit driver, to some
# extent. The other classes in this file test specific methods of
# Zuul interacting with Gerrit. But the other drivers also test
# some basic driver functionality that, if tested for Gerrit at
# all, is spread out in random tests. This class adds some
# (potentially duplicative) testing to validate parity with the
# other drivers.
@simple_layout('layouts/simple.yaml')
def test_change_event(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='check-job', result='SUCCESS', changes='1,1'),
])
job = self.getJobFromHistory('check-job')
zuulvars = job.parameters['zuul']
self.assertEqual(str(A.number), zuulvars['change'])
self.assertEqual('1', zuulvars['patchset'])
self.assertEqual(str(A.patchsets[-1]['revision']),
zuulvars['commit_id'])
self.assertEqual('master', zuulvars['branch'])
self.assertEquals('https://review.example.com/1',
zuulvars['items'][0]['change_url'])
self.assertEqual(zuulvars["message"], strings.b64encode('A'))
self.assertEqual(1, len(self.history))
self.assertEqual(1, len(A.messages))
@simple_layout('layouts/simple.yaml')
def test_tag_event(self):
event = self.fake_gerrit.addFakeTag('org/project', 'master', 'foo')
tagsha = event['refUpdate']['newRev']
self.fake_gerrit.addEvent(event)
self.waitUntilSettled()
self.assertHistory([
dict(name='tag-job', result='SUCCESS', ref='refs/tags/foo'),
])
job = self.getJobFromHistory('tag-job')
zuulvars = job.parameters['zuul']
zuulvars = job.parameters['zuul']
self.assertEqual('refs/tags/foo', zuulvars['ref'])
self.assertEqual('tag', zuulvars['pipeline'])
self.assertEqual('tag-job', zuulvars['job'])
self.assertEqual(tagsha, zuulvars['newrev'])
self.assertEqual(tagsha, zuulvars['commit_id'])
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_gerrit.py
|
test_gerrit.py
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import logging
import signal
import testtools
import zuul.cmd
class TestStackDump(testtools.TestCase):
def setUp(self):
super(TestStackDump, self).setUp()
self.log_fixture = self.useFixture(
fixtures.FakeLogger(level=logging.DEBUG))
def test_stack_dump_logs(self):
"Test that stack dumps end up in logs."
zuul.cmd.stack_dump_handler(signal.SIGUSR2, None)
self.assertIn("Thread", self.log_fixture.output)
self.assertIn("MainThread", self.log_fixture.output)
self.assertIn("test_stack_dump_logs", self.log_fixture.output)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_stack_dump.py
|
test_stack_dump.py
|
# Copyright 2019 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import git
import yaml
import socket
from testtools.matchers import MatchesRegex
from zuul.lib import strings
from zuul.zk.layout import LayoutState
from tests.base import ZuulTestCase, simple_layout
from tests.base import ZuulWebFixture
EMPTY_LAYOUT_STATE = LayoutState("", "", 0, None, {}, -1)
class TestPagureDriver(ZuulTestCase):
config_file = 'zuul-pagure-driver.conf'
@simple_layout('layouts/basic-pagure.yaml', driver='pagure')
def test_pull_request_opened(self):
initial_comment = "This is the\nPR initial_comment."
A = self.fake_pagure.openFakePullRequest(
'org/project', 'master', 'A', initial_comment=initial_comment)
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test1').result)
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test2').result)
job = self.getJobFromHistory('project-test2')
zuulvars = job.parameters['zuul']
self.assertEqual(str(A.number), zuulvars['change'])
self.assertEqual(str(A.commit_stop), zuulvars['patchset'])
self.assertEqual(str(A.commit_stop), zuulvars['commit_id'])
self.assertEqual('master', zuulvars['branch'])
self.assertEquals('https://pagure/org/project/pull-request/1',
zuulvars['items'][0]['change_url'])
self.assertEqual(zuulvars["message"],
strings.b64encode(initial_comment))
self.assertEqual(2, len(self.history))
self.assertEqual(2, len(A.comments))
self.assertEqual(
A.comments[0]['comment'], "Starting check jobs.")
self.assertThat(
A.comments[1]['comment'],
MatchesRegex(r'.*\[project-test1 \]\(.*\).*', re.DOTALL))
self.assertThat(
A.comments[1]['comment'],
MatchesRegex(r'.*\[project-test2 \]\(.*\).*', re.DOTALL))
self.assertEqual(1, len(A.flags))
self.assertEqual('success', A.flags[0]['status'])
@simple_layout('layouts/basic-pagure.yaml', driver='pagure')
def test_pull_request_updated(self):
A = self.fake_pagure.openFakePullRequest('org/project', 'master', 'A')
pr_tip1 = A.commit_stop
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
self.assertHistory(
[
{'name': 'project-test1', 'changes': '1,%s' % pr_tip1},
{'name': 'project-test2', 'changes': '1,%s' % pr_tip1},
], ordered=False
)
self.fake_pagure.emitEvent(A.getPullRequestUpdatedEvent())
pr_tip2 = A.commit_stop
self.waitUntilSettled()
self.assertEqual(4, len(self.history))
self.assertHistory(
[
{'name': 'project-test1', 'changes': '1,%s' % pr_tip1},
{'name': 'project-test2', 'changes': '1,%s' % pr_tip1},
{'name': 'project-test1', 'changes': '1,%s' % pr_tip2},
{'name': 'project-test2', 'changes': '1,%s' % pr_tip2}
], ordered=False
)
@simple_layout('layouts/basic-pagure.yaml', driver='pagure')
def test_pull_request_updated_builds_aborted(self):
A = self.fake_pagure.openFakePullRequest('org/project', 'master', 'A')
pr_tip1 = A.commit_stop
self.executor_server.hold_jobs_in_build = True
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.fake_pagure.emitEvent(A.getPullRequestUpdatedEvent())
pr_tip2 = A.commit_stop
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory(
[
{'name': 'project-test1', 'result': 'ABORTED',
'changes': '1,%s' % pr_tip1},
{'name': 'project-test2', 'result': 'ABORTED',
'changes': '1,%s' % pr_tip1},
{'name': 'project-test1', 'changes': '1,%s' % pr_tip2},
{'name': 'project-test2', 'changes': '1,%s' % pr_tip2}
], ordered=False
)
@simple_layout('layouts/basic-pagure.yaml', driver='pagure')
def test_pull_request_commented(self):
A = self.fake_pagure.openFakePullRequest('org/project', 'master', 'A')
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
self.fake_pagure.emitEvent(
A.getPullRequestCommentedEvent('I like that change'))
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
self.fake_pagure.emitEvent(
A.getPullRequestCommentedEvent('recheck'))
self.waitUntilSettled()
self.assertEqual(4, len(self.history))
self.fake_pagure.emitEvent(
A.getPullRequestInitialCommentEvent('Initial comment edited'))
self.waitUntilSettled()
self.assertEqual(6, len(self.history))
@simple_layout('layouts/basic-pagure.yaml', driver='pagure')
def test_pull_request_with_dyn_reconf(self):
zuul_yaml = [
{'job': {
'name': 'project-test3',
'run': 'job.yaml'
}},
{'project': {
'check': {
'jobs': [
'project-test3'
]
}
}}
]
playbook = "- hosts: all\n tasks: []"
A = self.fake_pagure.openFakePullRequest(
'org/project', 'master', 'A')
A.addCommit(
{'.zuul.yaml': yaml.dump(zuul_yaml),
'job.yaml': playbook}
)
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test1').result)
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test2').result)
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test3').result)
@simple_layout('layouts/basic-pagure.yaml', driver='pagure')
def test_ref_updated(self):
event = self.fake_pagure.getGitReceiveEvent('org/project')
expected_newrev = event[1]['msg']['end_commit']
expected_oldrev = event[1]['msg']['old_commit']
self.fake_pagure.emitEvent(event)
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
self.assertEqual(
'SUCCESS',
self.getJobFromHistory('project-post-job').result)
job = self.getJobFromHistory('project-post-job')
zuulvars = job.parameters['zuul']
self.assertEqual('refs/heads/master', zuulvars['ref'])
self.assertEqual('post', zuulvars['pipeline'])
self.assertEqual('project-post-job', zuulvars['job'])
self.assertEqual('master', zuulvars['branch'])
self.assertEqual(
'https://pagure/org/project/c/%s' % zuulvars['newrev'],
zuulvars['change_url'])
self.assertEqual(expected_newrev, zuulvars['newrev'])
self.assertEqual(expected_oldrev, zuulvars['oldrev'])
self.assertEqual(expected_newrev, zuulvars['commit_id'])
@simple_layout('layouts/basic-pagure.yaml', driver='pagure')
def test_ref_created(self):
self.create_branch('org/project', 'stable-1.0')
path = os.path.join(self.upstream_root, 'org/project')
repo = git.Repo(path)
newrev = repo.commit('refs/heads/stable-1.0').hexsha
event = self.fake_pagure.getGitBranchEvent(
'org/project', 'stable-1.0', 'creation', newrev)
old = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
self.fake_pagure.emitEvent(event)
self.waitUntilSettled()
new = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
# New timestamp should be greater than the old timestamp
self.assertLess(old, new)
self.assertEqual(1, len(self.history))
self.assertEqual(
'SUCCESS',
self.getJobFromHistory('project-post-job').result)
job = self.getJobFromHistory('project-post-job')
zuulvars = job.parameters['zuul']
self.assertEqual('refs/heads/stable-1.0', zuulvars['ref'])
self.assertEqual('post', zuulvars['pipeline'])
self.assertEqual('project-post-job', zuulvars['job'])
self.assertEqual('stable-1.0', zuulvars['branch'])
self.assertEqual(newrev, zuulvars['newrev'])
@simple_layout('layouts/basic-pagure.yaml', driver='pagure')
def test_ref_deleted(self):
event = self.fake_pagure.getGitBranchEvent(
'org/project', 'stable-1.0', 'deletion', '0' * 40)
self.fake_pagure.emitEvent(event)
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
@simple_layout('layouts/basic-pagure.yaml', driver='pagure')
def test_ref_updated_and_tenant_reconfigure(self):
self.waitUntilSettled()
old = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
zuul_yaml = [
{'job': {
'name': 'project-post-job2',
'run': 'job.yaml'
}},
{'project': {
'post': {
'jobs': [
'project-post-job2'
]
}
}}
]
playbook = "- hosts: all\n tasks: []"
self.create_commit(
'org/project',
{'.zuul.yaml': yaml.dump(zuul_yaml),
'job.yaml': playbook},
message='Add InRepo configuration'
)
event = self.fake_pagure.getGitReceiveEvent('org/project')
self.fake_pagure.emitEvent(event)
self.waitUntilSettled()
new = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
# New timestamp should be greater than the old timestamp
self.assertLess(old, new)
self.assertHistory(
[{'name': 'project-post-job'},
{'name': 'project-post-job2'},
], ordered=False
)
@simple_layout('layouts/files-pagure.yaml', driver='pagure')
def test_pull_matched_file_event(self):
A = self.fake_pagure.openFakePullRequest(
'org/project', 'master', 'A',
files={'random.txt': 'test', 'build-requires': 'test'})
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
B = self.fake_pagure.openFakePullRequest('org/project', 'master', 'B',
files={'random.txt': 'test2'})
self.fake_pagure.emitEvent(B.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
C = self.fake_pagure.openFakePullRequest(
'org/project', 'master', 'C',
files={'build-requires': 'test'})
self.fake_pagure.emitEvent(C.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
@simple_layout('layouts/basic-pagure.yaml', driver='pagure')
def test_tag_created(self):
path = os.path.join(self.upstream_root, 'org/project')
repo = git.Repo(path)
repo.create_tag('1.0')
tagsha = repo.tags['1.0'].commit.hexsha
event = self.fake_pagure.getGitTagCreatedEvent(
'org/project', '1.0', tagsha)
self.fake_pagure.emitEvent(event)
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
self.assertEqual(
'SUCCESS',
self.getJobFromHistory('project-tag-job').result)
job = self.getJobFromHistory('project-tag-job')
zuulvars = job.parameters['zuul']
self.assertEqual('refs/tags/1.0', zuulvars['ref'])
self.assertEqual('tag', zuulvars['pipeline'])
self.assertEqual('project-tag-job', zuulvars['job'])
self.assertEqual(tagsha, zuulvars['newrev'])
@simple_layout('layouts/requirements-pagure.yaml', driver='pagure')
def test_pr_score_require_1_vote(self):
A = self.fake_pagure.openFakePullRequest(
'org/project1', 'master', 'A')
self.fake_pagure.emitEvent(
A.getPullRequestCommentedEvent("I like that change"))
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
self.fake_pagure.emitEvent(
A.getPullRequestCommentedEvent(":thumbsup:"))
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
self.assertEqual(
'SUCCESS',
self.getJobFromHistory('project-test').result)
@simple_layout('layouts/requirements-pagure.yaml', driver='pagure')
def test_pr_score_require_2_votes(self):
A = self.fake_pagure.openFakePullRequest(
'org/project2', 'master', 'A')
self.fake_pagure.emitEvent(
A.getPullRequestCommentedEvent("I like that change"))
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
self.fake_pagure.emitEvent(
A.getPullRequestCommentedEvent(":thumbsup:"))
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
self.fake_pagure.emitEvent(
A.getPullRequestCommentedEvent(":thumbsdown:"))
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
self.fake_pagure.emitEvent(
A.getPullRequestCommentedEvent(":thumbsup:"))
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
self.fake_pagure.emitEvent(
A.getPullRequestCommentedEvent(":thumbsup:"))
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
@simple_layout('layouts/requirements-pagure.yaml', driver='pagure')
def test_status_trigger(self):
A = self.fake_pagure.openFakePullRequest(
'org/project3', 'master', 'A')
self.fake_pagure.emitEvent(
A.getPullRequestStatusSetEvent("failure"))
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
self.fake_pagure.emitEvent(
A.getPullRequestStatusSetEvent("success"))
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
@simple_layout('layouts/requirements-pagure.yaml', driver='pagure')
def test_tag_trigger(self):
A = self.fake_pagure.openFakePullRequest(
'org/project4', 'master', 'A')
self.fake_pagure.emitEvent(
A.getPullRequestTagAddedEvent(["lambda"]))
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
self.fake_pagure.emitEvent(
A.getPullRequestTagAddedEvent(["gateit", "lambda"]))
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
self.fake_pagure.emitEvent(
A.getPullRequestTagAddedEvent(["mergeit"]))
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
@simple_layout('layouts/requirements-pagure.yaml', driver='pagure')
def test_tag_require(self):
A = self.fake_pagure.openFakePullRequest(
'org/project5', 'master', 'A')
self.fake_pagure.emitEvent(A.getPullRequestUpdatedEvent())
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
A.tags = ["lambda"]
self.fake_pagure.emitEvent(A.getPullRequestUpdatedEvent())
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
A.tags = ["lambda", "gateit"]
self.fake_pagure.emitEvent(A.getPullRequestUpdatedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
A.tags = []
self.fake_pagure.emitEvent(A.getPullRequestUpdatedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
@simple_layout('layouts/requirements-pagure.yaml', driver='pagure')
def test_flag_require(self):
A = self.fake_pagure.openFakePullRequest(
'org/project7', 'master', 'A')
# CI status from other CIs must not be handled
self.fake_pagure.emitEvent(
A.getPullRequestStatusSetEvent("success", username="notzuul"))
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
self.assertEqual(1, len(A.flags))
self.fake_pagure.emitEvent(
A.getPullRequestStatusSetEvent("failure"))
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
self.assertEqual(2, len(A.flags))
self.fake_pagure.emitEvent(
A.getPullRequestStatusSetEvent("success"))
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
self.assertEqual(2, len(A.flags))
@simple_layout('layouts/requirements-pagure.yaml', driver='pagure')
def test_pull_request_closed(self):
A = self.fake_pagure.openFakePullRequest(
'org/project6', 'master', 'A')
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
# Validate a closed but not merged PR does not trigger the pipeline
self.fake_pagure.emitEvent(A.getPullRequestClosedEvent(merged=False))
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
# Reset the status to Open
# Validate a closed and merged PR triggers the pipeline
A.status = 'Open'
A.is_merged = False
self.fake_pagure.emitEvent(A.getPullRequestClosedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
@simple_layout('layouts/merging-pagure.yaml', driver='pagure')
def test_merge_action_in_independent(self):
A = self.fake_pagure.openFakePullRequest(
'org/project1', 'master', 'A')
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test').result)
self.assertEqual('Merged', A.status)
@simple_layout('layouts/merging-pagure.yaml', driver='pagure')
def test_merge_action_in_dependent(self):
A = self.fake_pagure.openFakePullRequest(
'org/project2', 'master', 'A')
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# connection.canMerge is not validated
self.assertEqual(0, len(self.history))
# Set the mergeable PR flag to a expected value
A.cached_merge_status = 'MERGE'
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# connection.canMerge is not validated
self.assertEqual(0, len(self.history))
# Set the score threshold as reached
# Here we use None that means no specific score is required
A.threshold_reached = None
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# connection.canMerge is not validated
self.assertEqual(0, len(self.history))
# Set CI flag as passed CI
A.addFlag('success', 'https://url', 'Build passed')
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# connection.canMerge is validated
self.assertEqual(1, len(self.history))
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test').result)
self.assertEqual('Merged', A.status)
@simple_layout('layouts/crd-pagure.yaml', driver='pagure')
def test_crd_independent(self):
# Create a change in project1 that a project2 change will depend on
A = self.fake_pagure.openFakePullRequest('org/project1', 'master', 'A')
# Create a commit in B that sets the dependency on A
msg = "Depends-On: %s" % A.url
B = self.fake_pagure.openFakePullRequest(
'org/project2', 'master', 'B', initial_comment=msg)
# Make an event to re-use
event = B.getPullRequestCommentedEvent('A comment')
self.fake_pagure.emitEvent(event)
self.waitUntilSettled()
# The changes for the job from project2 should include the project1
# PR content
changes = self.getJobFromHistory(
'project2-test', 'org/project2').changes
self.assertEqual(changes, "%s,%s %s,%s" % (A.number,
A.commit_stop,
B.number,
B.commit_stop))
# There should be no more changes in the queue
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
@simple_layout('layouts/crd-pagure.yaml', driver='pagure')
def test_crd_dependent(self):
# Create a change in project3 that a project4 change will depend on
A = self.fake_pagure.openFakePullRequest('org/project3', 'master', 'A')
# Create a commit in B that sets the dependency on A
msg = "Depends-On: %s" % A.url
B = self.fake_pagure.openFakePullRequest(
'org/project4', 'master', 'B', initial_comment=msg)
# Make an event to re-use
event = B.getPullRequestCommentedEvent('A comment')
self.fake_pagure.emitEvent(event)
self.waitUntilSettled()
# Neither A and B can't merge (no flag, no score threshold)
self.assertEqual(0, len(self.history))
B.threshold_reached = True
B.addFlag('success', 'https://url', 'Build passed')
self.fake_pagure.emitEvent(event)
self.waitUntilSettled()
# B can't merge as A got no flag, no score threshold
self.assertEqual(0, len(self.history))
A.threshold_reached = True
A.addFlag('success', 'https://url', 'Build passed')
self.fake_pagure.emitEvent(event)
self.waitUntilSettled()
# The changes for the job from project4 should include the project3
# PR content
changes = self.getJobFromHistory(
'project4-test', 'org/project4').changes
self.assertEqual(changes, "%s,%s %s,%s" % (A.number,
A.commit_stop,
B.number,
B.commit_stop))
self.assertTrue(A.is_merged)
self.assertTrue(B.is_merged)
@simple_layout('layouts/crd-pagure.yaml', driver='pagure')
def test_crd_needed_changes(self):
# Given change A and B, where B depends on A, when A
# completes B should be enqueued (using a shared queue)
# Create a change in project3 that a project4 change will depend on
A = self.fake_pagure.openFakePullRequest('org/project3', 'master', 'A')
A.threshold_reached = True
A.addFlag('success', 'https://url', 'Build passed')
# Set B to depend on A
msg = "Depends-On: %s" % A.url
B = self.fake_pagure.openFakePullRequest(
'org/project4', 'master', 'B', initial_comment=msg)
# Make the driver aware of change B by sending an event
# At that moment B can't merge
self.fake_pagure.emitEvent(B.getPullRequestCommentedEvent('A comment'))
# Now set B mergeable
B.threshold_reached = True
B.addFlag('success', 'https://url', 'Build passed')
# Enqueue A, which will make the scheduler detect that B is
# depending on so B will be enqueue as well.
self.fake_pagure.emitEvent(A.getPullRequestCommentedEvent('A comment'))
self.waitUntilSettled()
# The changes for the job from project4 should include the project3
# PR content
changes = self.getJobFromHistory(
'project4-test', 'org/project4').changes
self.assertEqual(changes, "%s,%s %s,%s" % (A.number,
A.commit_stop,
B.number,
B.commit_stop))
self.assertTrue(A.is_merged)
self.assertTrue(B.is_merged)
@simple_layout('layouts/files-pagure.yaml', driver='pagure')
def test_changed_file_match_filter(self):
files = {'{:03d}.txt'.format(n): 'test' for n in range(300)}
files["foobar-requires"] = "test"
files["to-be-removed"] = "test"
A = self.fake_pagure.openFakePullRequest(
'org/project', 'master', 'A', files=files)
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# project-test1 and project-test2 should be run
self.assertEqual(2, len(self.history))
@simple_layout('layouts/files-pagure.yaml', driver='pagure')
def test_changed_and_reverted_file_not_match_filter(self):
files = {'{:03d}.txt'.format(n): 'test' for n in range(3)}
files["foobar-requires"] = "test"
files["to-be-removed"] = "test"
A = self.fake_pagure.openFakePullRequest(
'org/project', 'master', 'A', files=files)
A.addCommit(delete_files=['to-be-removed'])
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# Only project-test1 should be run, because the file to-be-removed
# is reverted and not in changed files to trigger project-test2
self.assertEqual(1, len(self.history))
class TestPagureToGerritCRD(ZuulTestCase):
config_file = 'zuul-crd-pagure.conf'
tenant_config_file = 'config/cross-source-pagure/gerrit.yaml'
def test_crd_gate(self):
"Test cross-repo dependencies"
A = self.fake_pagure.openFakePullRequest('pagure/project2', 'master',
'A')
B = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'B')
# A Depends-On: B
A.editInitialComment('Depends-On: %s\n' % (B.data['url']))
A.addFlag('success', 'https://url', 'Build passed')
A.threshold_reached = True
B.addApproval('Code-Review', 2)
# Make A enter the pipeline
self.fake_pagure.emitEvent(
A.getPullRequestCommentedEvent(":thumbsup:"))
self.waitUntilSettled()
# Expect not merged as B not approved yet
self.assertFalse(A.is_merged)
self.assertEqual(B.data['status'], 'NEW')
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
B.addApproval('Approved', 1)
self.fake_pagure.emitEvent(
A.getPullRequestCommentedEvent(":thumbsup:"))
self.waitUntilSettled()
self.assertTrue(A.is_merged)
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(len(A.comments), 4)
self.assertEqual(B.reported, 2)
changes = self.getJobFromHistory(
'project-merge', 'pagure/project2').changes
self.assertEqual(changes, '1,1 1,%s' % A.commit_stop)
def test_crd_check(self):
"Test cross-repo dependencies in independent pipelines"
A = self.fake_pagure.openFakePullRequest('pagure/project2', 'master',
'A')
B = self.fake_gerrit.addFakeChange(
'gerrit/project1', 'master', 'B')
# A Depends-On: B
A.editInitialComment('Depends-On: %s\n' % (B.data['url'],))
self.executor_server.hold_jobs_in_build = True
self.fake_pagure.emitEvent(A.getPullRequestUpdatedEvent())
self.waitUntilSettled()
self.assertTrue(self.builds[0].hasChanges(A, B))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertFalse(A.is_merged)
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(len(A.comments), 2)
self.assertEqual(B.reported, 0)
changes = self.getJobFromHistory(
'project-merge', 'pagure/project2').changes
self.assertEqual(changes, '1,1 1,%s' % A.commit_stop)
class TestGerritToPagureCRD(ZuulTestCase):
config_file = 'zuul-crd-pagure.conf'
tenant_config_file = 'config/cross-source-pagure/gerrit.yaml'
def test_crd_gate(self):
"Test cross-repo dependencies"
A = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'A')
B = self.fake_pagure.openFakePullRequest('pagure/project2', 'master',
'B')
A.addApproval('Code-Review', 2)
AM2 = self.fake_gerrit.addFakeChange('gerrit/project1', 'master',
'AM2')
AM1 = self.fake_gerrit.addFakeChange('gerrit/project1', 'master',
'AM1')
AM2.setMerged()
AM1.setMerged()
# A -> AM1 -> AM2
# A Depends-On: B
# M2 is here to make sure it is never queried. If it is, it
# means zuul is walking down the entire history of merged
# changes.
A.setDependsOn(AM1, 1)
AM1.setDependsOn(AM2, 1)
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.url)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertFalse(B.is_merged)
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
B.addFlag('success', 'https://url', 'Build passed')
B.threshold_reached = True
self.fake_pagure.emitEvent(
B.getPullRequestCommentedEvent(":thumbsup:"))
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(AM2.queried, 0)
self.assertEqual(A.data['status'], 'MERGED')
self.assertTrue(B.is_merged)
self.assertEqual(A.reported, 2)
self.assertEqual(len(B.comments), 3)
changes = self.getJobFromHistory(
'project-merge', 'gerrit/project1').changes
self.assertEqual(changes, '1,%s 1,1' % B.commit_stop)
def test_crd_check(self):
"Test cross-repo dependencies in independent pipelines"
A = self.fake_gerrit.addFakeChange('gerrit/project1', 'master', 'A')
B = self.fake_pagure.openFakePullRequest(
'pagure/project2', 'master', 'B')
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.url)
self.executor_server.hold_jobs_in_build = True
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertTrue(self.builds[0].hasChanges(A, B))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertFalse(B.is_merged)
self.assertEqual(A.reported, 1)
self.assertEqual(len(B.comments), 0)
changes = self.getJobFromHistory(
'project-merge', 'gerrit/project1').changes
self.assertEqual(changes, '1,%s 1,1' % B.commit_stop)
class TestPagureToGithubCRD(ZuulTestCase):
config_file = 'zuul-crd-pagure.conf'
tenant_config_file = 'config/cross-source-pagure/github.yaml'
def test_crd_gate(self):
"Test cross-repo dependencies"
A = self.fake_pagure.openFakePullRequest('pagure/project2', 'master',
'A')
B = self.fake_github.openFakePullRequest('github/project1', 'master',
'B')
# A Depends-On: B
A.editInitialComment('Depends-On: %s\n' % (B.url))
A.addFlag('success', 'https://url', 'Build passed')
A.threshold_reached = True
# Make A enter the pipeline
self.fake_pagure.emitEvent(
A.getPullRequestCommentedEvent(":thumbsup:"))
self.waitUntilSettled()
# Expect not merged as B not approved yet
self.assertFalse(A.is_merged)
self.assertFalse(B.is_merged)
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
B.addLabel('approved')
self.fake_pagure.emitEvent(
A.getPullRequestCommentedEvent(":thumbsup:"))
self.waitUntilSettled()
self.assertTrue(A.is_merged)
self.assertTrue(B.is_merged)
self.assertEqual(len(A.comments), 4)
self.assertEqual(len(B.comments), 2)
changes = self.getJobFromHistory(
'project-merge', 'pagure/project2').changes
self.assertEqual(changes, '1,%s 1,%s' % (B.head_sha, A.commit_stop))
def test_crd_check(self):
"Test cross-repo dependencies in independent pipelines"
A = self.fake_pagure.openFakePullRequest('pagure/project2', 'master',
'A')
B = self.fake_github.openFakePullRequest('github/project1', 'master',
'A')
# A Depends-On: B
A.editInitialComment('Depends-On: %s\n' % B.url)
self.executor_server.hold_jobs_in_build = True
self.fake_pagure.emitEvent(A.getPullRequestUpdatedEvent())
self.waitUntilSettled()
self.assertTrue(self.builds[0].hasChanges(A, B))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertFalse(A.is_merged)
self.assertFalse(B.is_merged)
self.assertEqual(len(A.comments), 2)
self.assertEqual(len(A.comments), 2)
changes = self.getJobFromHistory(
'project-merge', 'pagure/project2').changes
self.assertEqual(changes, '1,%s 1,%s' % (B.head_sha, A.commit_stop))
class TestGithubToPagureCRD(ZuulTestCase):
config_file = 'zuul-crd-pagure.conf'
tenant_config_file = 'config/cross-source-pagure/github.yaml'
# Those tests are also using the fake github implementation which
# means that every scheduler gets a different fake github instance.
# Thus, assertions might fail depending on which scheduler did the
# interaction with Github.
scheduler_count = 1
def test_crd_gate(self):
"Test cross-repo dependencies"
A = self.fake_github.openFakePullRequest('github/project1', 'master',
'A')
B = self.fake_pagure.openFakePullRequest('pagure/project2', 'master',
'B')
# A Depends-On: B
A.editBody('Depends-On: %s\n' % B.url)
event = A.addLabel('approved')
self.fake_github.emitEvent(event)
self.waitUntilSettled()
self.assertFalse(A.is_merged)
self.assertFalse(B.is_merged)
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
B.addFlag('success', 'https://url', 'Build passed')
B.threshold_reached = True
self.fake_pagure.emitEvent(
B.getPullRequestCommentedEvent(":thumbsup:"))
self.fake_github.emitEvent(event)
self.waitUntilSettled()
self.assertTrue(A.is_merged)
self.assertTrue(B.is_merged)
self.assertEqual(len(A.comments), 2)
self.assertEqual(len(B.comments), 3)
changes = self.getJobFromHistory(
'project-merge', 'github/project1').changes
self.assertEqual(changes, '1,%s 1,%s' % (B.commit_stop, A.head_sha))
def test_crd_check(self):
"Test cross-repo dependencies in independent pipelines"
A = self.fake_github.openFakePullRequest(
'github/project1', 'master', 'A')
B = self.fake_pagure.openFakePullRequest(
'pagure/project2', 'master', 'B')
# A Depends-On: B
self.executor_server.hold_jobs_in_build = True
self.fake_github.emitEvent(A.editBody('Depends-On: %s\n' % B.url))
self.waitUntilSettled()
self.assertTrue(self.builds[0].hasChanges(A, B))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertFalse(A.is_merged)
self.assertFalse(B.is_merged)
self.assertEqual(len(A.comments), 1)
self.assertEqual(len(B.comments), 0)
changes = self.getJobFromHistory(
'project-merge', 'github/project1').changes
self.assertEqual(changes, '1,%s 1,%s' % (B.commit_stop, A.head_sha))
class TestPagureWebhook(ZuulTestCase):
config_file = 'zuul-pagure-driver.conf'
def setUp(self):
super(TestPagureWebhook, self).setUp()
# Start the web server
self.web = self.useFixture(
ZuulWebFixture(self.changes, self.config,
self.additional_event_queues, self.upstream_root,
self.poller_events,
self.git_url_with_auth, self.addCleanup,
self.test_root))
host = '127.0.0.1'
# Wait until web server is started
while True:
port = self.web.port
try:
with socket.create_connection((host, port)):
break
except ConnectionRefusedError:
pass
self.fake_pagure.setZuulWebPort(port)
@simple_layout('layouts/basic-pagure.yaml', driver='pagure')
def test_webhook(self):
A = self.fake_pagure.openFakePullRequest(
'org/project', 'master', 'A')
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent(),
use_zuulweb=True,
project='org/project',
wrong_token=True)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent(),
use_zuulweb=True,
project='org/project')
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test1').result)
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test2').result)
class TestPagureWebhookWhitelist(ZuulTestCase):
config_file = 'zuul-pagure-driver-whitelist.conf'
def setUp(self):
super(TestPagureWebhookWhitelist, self).setUp()
# Start the web server
self.web = self.useFixture(
ZuulWebFixture(self.changes, self.config,
self.additional_event_queues, self.upstream_root,
self.poller_events,
self.git_url_with_auth, self.addCleanup,
self.test_root))
host = '127.0.0.1'
# Wait until web server is started
while True:
port = self.web.port
try:
with socket.create_connection((host, port)):
break
except ConnectionRefusedError:
pass
self.fake_pagure.setZuulWebPort(port)
@simple_layout('layouts/basic-pagure.yaml', driver='pagure')
def test_webhook_whitelist(self):
A = self.fake_pagure.openFakePullRequest(
'org/project', 'master', 'A')
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent(),
use_zuulweb=True,
project='org/project',
wrong_token=True)
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test1').result)
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test2').result)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_pagure_driver.py
|
test_pagure_driver.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tests.base import (
ZuulTestCase,
simple_layout,
)
class TestSerial(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
@simple_layout('layouts/serial.yaml')
def test_deploy_window(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.setMerged()
self.fake_gerrit.addEvent(B.getChangeMergedEvent())
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2)
self.assertTrue(self.builds[0].hasChanges(A))
self.assertTrue(self.builds[1].hasChanges(A))
self.assertFalse(self.builds[0].hasChanges(B))
self.assertFalse(self.builds[1].hasChanges(B))
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 2)
self.assertTrue(self.builds[0].hasChanges(A))
self.assertTrue(self.builds[1].hasChanges(A))
self.assertTrue(self.builds[0].hasChanges(B))
self.assertTrue(self.builds[1].hasChanges(B))
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 1)
self.assertHistory([
dict(name='job1', result='SUCCESS', changes='1,1'),
dict(name='job2', result='SUCCESS', changes='1,1'),
dict(name='job1', result='SUCCESS', changes='2,1'),
dict(name='job2', result='SUCCESS', changes='2,1'),
], ordered=False)
@simple_layout('layouts/serial.yaml')
def test_deploy_shared(self):
# Same as test_deploy_window but with two separate projects
# sharing a queue.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
B.setMerged()
self.fake_gerrit.addEvent(B.getChangeMergedEvent())
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.assertTrue(self.builds[0].hasChanges(A))
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.assertTrue(self.builds[0].hasChanges(B))
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 1)
self.assertHistory([
dict(name='job1', result='SUCCESS', changes='1,1'),
dict(name='job1', result='SUCCESS', changes='2,1'),
], ordered=False)
@simple_layout('layouts/serial.yaml')
def test_deploy_unshared(self):
# Test two projects which don't share a queue.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
B.setMerged()
self.fake_gerrit.addEvent(B.getChangeMergedEvent())
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
self.assertTrue(self.builds[0].hasChanges(A))
self.assertTrue(self.builds[1].hasChanges(A))
self.assertTrue(self.builds[2].hasChanges(B))
self.assertFalse(self.builds[2].hasChanges(A))
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 1)
self.assertHistory([
dict(name='job1', result='SUCCESS', changes='1,1'),
dict(name='job2', result='SUCCESS', changes='1,1'),
dict(name='job1', result='SUCCESS', changes='2,1'),
], ordered=False)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_serial.py
|
test_serial.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import logging
import subprocess
import sys
import tempfile
import testtools
import time
import os
from zuul.driver import bubblewrap
from zuul.executor.server import SshAgent
from tests.base import iterate_timeout
from unittest import skipIf
class TestBubblewrap(testtools.TestCase):
def setUp(self):
super(TestBubblewrap, self).setUp()
self.log_fixture = self.useFixture(
fixtures.FakeLogger(level=logging.DEBUG))
self.useFixture(fixtures.NestedTempfile())
def test_bubblewrap_wraps(self):
bwrap = bubblewrap.BubblewrapDriver(check_bwrap=True)
context = bwrap.getExecutionContext()
work_dir = tempfile.mkdtemp()
ssh_agent = SshAgent()
self.addCleanup(ssh_agent.stop)
ssh_agent.start()
po = context.getPopen(work_dir=work_dir,
ssh_auth_sock=ssh_agent.env['SSH_AUTH_SOCK'])
self.assertTrue(po.fds[0] > 2)
self.assertTrue(po.fds[1] > 2)
self.assertTrue(work_dir in po.command)
# Now run /usr/bin/id to verify passwd/group entries made it in
true_proc = po(['/usr/bin/id'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(output, errs) = true_proc.communicate()
# Make sure it printed things on stdout
self.assertTrue(len(output.strip()))
# And that it did not print things on stderr
self.assertEqual(0, len(errs.strip()))
# Make sure the _r's are closed
self.assertEqual([], po.fds)
@skipIf(sys.platform == 'darwin', 'Not supported on MacOS')
def test_bubblewrap_leak(self):
bwrap = bubblewrap.BubblewrapDriver(check_bwrap=True)
context = bwrap.getExecutionContext()
work_dir = tempfile.mkdtemp()
ansible_dir = tempfile.mkdtemp()
ssh_agent = SshAgent()
self.addCleanup(ssh_agent.stop)
ssh_agent.start()
po = context.getPopen(work_dir=work_dir,
ansible_dir=ansible_dir,
ssh_auth_sock=ssh_agent.env['SSH_AUTH_SOCK'])
leak_time = 60
# Use hexadecimal notation to avoid false-positive
true_proc = po(['bash', '-c', 'sleep 0x%X & disown' % leak_time])
self.assertEqual(0, true_proc.wait())
cmdline = "sleep\x000x%X\x00" % leak_time
for x in iterate_timeout(30, "process to exit"):
try:
sleep_proc = []
for pid in os.listdir("/proc"):
if os.path.isfile("/proc/%s/cmdline" % pid):
with open("/proc/%s/cmdline" % pid) as f:
if f.read() == cmdline:
sleep_proc.append(pid)
if not sleep_proc:
break
except FileNotFoundError:
pass
except ProcessLookupError:
pass
time.sleep(1)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_bubblewrap.py
|
test_bubblewrap.py
|
# Copyright 2021 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zuul.lib import yamlutil
from tests.base import BaseTestCase
import testtools
class TestYamlDumper(BaseTestCase):
def test_load_normal_data(self):
expected = {'foo': 'bar'}
data = 'foo: bar\n'
out = yamlutil.safe_load(data)
self.assertEqual(out, expected)
out = yamlutil.encrypted_load(data)
self.assertEqual(out, expected)
def test_load_encrypted_data(self):
expected = {'foo': yamlutil.EncryptedPKCS1_OAEP('YmFy')}
self.assertEqual(expected['foo'].ciphertext, b'bar')
data = "foo: !encrypted/pkcs1-oaep YmFy\n"
out = yamlutil.encrypted_load(data)
self.assertEqual(out, expected)
with testtools.ExpectedException(
yamlutil.yaml.constructor.ConstructorError):
out = yamlutil.safe_load(data)
def test_dump_normal_data(self):
data = {'foo': 'bar'}
expected = 'foo: bar\n'
out = yamlutil.safe_dump(data, default_flow_style=False)
self.assertEqual(out, expected)
out = yamlutil.encrypted_dump(data, default_flow_style=False)
self.assertEqual(out, expected)
def test_dump_encrypted_data(self):
data = {'foo': yamlutil.EncryptedPKCS1_OAEP('YmFy')}
self.assertEqual(data['foo'].ciphertext, b'bar')
expected = "foo: !encrypted/pkcs1-oaep YmFy\n"
out = yamlutil.encrypted_dump(data, default_flow_style=False)
self.assertEqual(out, expected)
with testtools.ExpectedException(
yamlutil.yaml.representer.RepresenterError):
out = yamlutil.safe_dump(data, default_flow_style=False)
def test_ansible_dumper(self):
data = {'foo': 'bar'}
data = yamlutil.mark_strings_unsafe(data)
expected = "foo: !unsafe 'bar'\n"
yaml_out = yamlutil.ansible_unsafe_dump(data, default_flow_style=False)
self.assertEqual(yaml_out, expected)
data = {'foo': {'bar': 'baz'}, 'list': ['bar', 1, 3.0, True, None]}
data = yamlutil.mark_strings_unsafe(data)
expected = """\
foo:
bar: !unsafe 'baz'
list:
- !unsafe 'bar'
- 1
- 3.0
- true
- null
"""
yaml_out = yamlutil.ansible_unsafe_dump(data, default_flow_style=False)
self.assertEqual(yaml_out, expected)
def test_ansible_dumper_with_aliases(self):
foo = {'bar': 'baz'}
data = {'foo1': foo, 'foo2': foo}
expected = """\
foo1: &id001
bar: baz
foo2: *id001
"""
yaml_out = yamlutil.ansible_unsafe_dump(data, default_flow_style=False)
self.assertEqual(yaml_out, expected)
def test_ansible_dumper_ignore_aliases(self):
foo = {'bar': 'baz'}
data = {'foo1': foo, 'foo2': foo}
expected = """\
foo1:
bar: baz
foo2:
bar: baz
"""
yaml_out = yamlutil.ansible_unsafe_dump(
data,
ignore_aliases=True,
default_flow_style=False)
self.assertEqual(yaml_out, expected)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_yamlutil.py
|
test_yamlutil.py
|
# Copyright 2019 Red Hat
# Copyright 2022 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import os
import git
import yaml
import socket
import time
from zuul.lib import strings
from zuul.zk.layout import LayoutState
from tests.base import random_sha1, simple_layout, skipIfMultiScheduler
from tests.base import ZuulTestCase, ZuulWebFixture
from testtools.matchers import MatchesRegex
EMPTY_LAYOUT_STATE = LayoutState("", "", 0, None, {}, -1)
class TestGitlabWebhook(ZuulTestCase):
config_file = 'zuul-gitlab-driver.conf'
def setUp(self):
super().setUp()
# Start the web server
self.web = self.useFixture(
ZuulWebFixture(self.changes, self.config,
self.additional_event_queues, self.upstream_root,
self.poller_events,
self.git_url_with_auth, self.addCleanup,
self.test_root))
host = '127.0.0.1'
# Wait until web server is started
while True:
port = self.web.port
try:
with socket.create_connection((host, port)):
break
except ConnectionRefusedError:
pass
self.fake_gitlab.setZuulWebPort(port)
def tearDown(self):
super(TestGitlabWebhook, self).tearDown()
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_webhook(self):
A = self.fake_gitlab.openFakeMergeRequest(
'org/project', 'master', 'A')
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent(),
use_zuulweb=False,
project='org/project')
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test1').result)
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_webhook_via_zuulweb(self):
A = self.fake_gitlab.openFakeMergeRequest(
'org/project', 'master', 'A')
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent(),
use_zuulweb=True,
project='org/project')
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test1').result)
class TestGitlabDriver(ZuulTestCase):
config_file = 'zuul-gitlab-driver.conf'
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_merge_request_opened(self):
description = "This is the\nMR description."
A = self.fake_gitlab.openFakeMergeRequest(
'org/project', 'master', 'A', description=description)
self.fake_gitlab.emitEvent(
A.getMergeRequestOpenedEvent(), project='org/project')
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test1').result)
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test2').result)
job = self.getJobFromHistory('project-test2')
zuulvars = job.parameters['zuul']
self.assertEqual(str(A.number), zuulvars['change'])
self.assertEqual(str(A.sha), zuulvars['patchset'])
self.assertEqual(str(A.sha), zuulvars['commit_id'])
self.assertEqual('master', zuulvars['branch'])
self.assertEquals(f'{self.fake_gitlab._test_baseurl}/'
'org/project/merge_requests/1',
zuulvars['items'][0]['change_url'])
self.assertEqual(zuulvars["message"], strings.b64encode(description))
self.assertEqual(2, len(self.history))
self.assertEqual(2, len(A.notes))
self.assertEqual(
A.notes[0]['body'], "Starting check jobs.")
self.assertThat(
A.notes[1]['body'],
MatchesRegex(r'.*project-test1.*SUCCESS.*', re.DOTALL))
self.assertThat(
A.notes[1]['body'],
MatchesRegex(r'.*project-test2.*SUCCESS.*', re.DOTALL))
self.assertTrue(A.approved)
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_merge_request_opened_imcomplete(self):
now = time.monotonic()
complete_at = now + 3
with self.fake_gitlab.enable_delayed_complete_mr(complete_at):
description = "This is the\nMR description."
A = self.fake_gitlab.openFakeMergeRequest(
'org/project', 'master', 'A', description=description)
self.fake_gitlab.emitEvent(
A.getMergeRequestOpenedEvent(), project='org/project')
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test1').result)
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test2').result)
self.assertTrue(self.fake_gitlab._test_web_server.stats["get_mr"] > 2)
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_merge_request_updated(self):
A = self.fake_gitlab.openFakeMergeRequest('org/project', 'master', 'A')
mr_tip1_sha = A.sha
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
self.assertHistory(
[
{'name': 'project-test1', 'changes': '1,%s' % mr_tip1_sha},
{'name': 'project-test2', 'changes': '1,%s' % mr_tip1_sha},
], ordered=False
)
self.fake_gitlab.emitEvent(A.getMergeRequestUpdatedEvent())
mr_tip2_sha = A.sha
self.waitUntilSettled()
self.assertEqual(4, len(self.history))
self.assertHistory(
[
{'name': 'project-test1', 'changes': '1,%s' % mr_tip1_sha},
{'name': 'project-test2', 'changes': '1,%s' % mr_tip1_sha},
{'name': 'project-test1', 'changes': '1,%s' % mr_tip2_sha},
{'name': 'project-test2', 'changes': '1,%s' % mr_tip2_sha}
], ordered=False
)
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_merge_request_approved(self):
A = self.fake_gitlab.openFakeMergeRequest('org/project', 'master', 'A')
self.fake_gitlab.emitEvent(A.getMergeRequestApprovedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
self.fake_gitlab.emitEvent(A.getMergeRequestUnapprovedEvent())
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
job = self.getJobFromHistory('project-test-approval')
zuulvars = job.parameters['zuul']
self.assertEqual('check-approval', zuulvars['pipeline'])
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_merge_request_updated_during_build(self):
A = self.fake_gitlab.openFakeMergeRequest('org/project', 'master', 'A')
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
old = A.sha
A.addCommit()
new = A.sha
self.assertNotEqual(old, new)
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
# MR must not be approved: tested commit isn't current commit
self.assertFalse(A.approved)
self.fake_gitlab.emitEvent(A.getMergeRequestUpdatedEvent())
self.waitUntilSettled()
self.assertEqual(4, len(self.history))
self.assertTrue(A.approved)
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_merge_request_labeled(self):
A = self.fake_gitlab.openFakeMergeRequest('org/project', 'master', 'A')
self.fake_gitlab.emitEvent(A.getMergeRequestLabeledEvent(
add_labels=('label1', 'label2')))
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
self.fake_gitlab.emitEvent(A.getMergeRequestLabeledEvent(
add_labels=('gateit', )))
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
A.labels = ['verified']
self.fake_gitlab.emitEvent(A.getMergeRequestLabeledEvent(
remove_labels=('verified', )))
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_merge_request_merged(self):
A = self.fake_gitlab.openFakeMergeRequest('org/project', 'master', 'A')
self.fake_gitlab.emitEvent(A.getMergeRequestMergedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
self.assertHistory([{'name': 'project-promote'}])
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_merge_push_does_not_reconfigure(self):
# Test that the push event that follows a merge doesn't
# needlessly trigger reconfiguration.
A = self.fake_gitlab.openFakeMergeRequest('org/project', 'master', 'A')
state1 = self.scheds.first.sched.local_layout_state.get("tenant-one")
self.fake_gitlab.emitEvent(A.getMergeRequestMergedEvent())
self.fake_gitlab.emitEvent(A.getMergeRequestMergedPushEvent())
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
self.assertHistory([{'name': 'project-post-job'},
{'name': 'project-promote'}
], ordered=False)
state2 = self.scheds.first.sched.local_layout_state.get("tenant-one")
self.assertEqual(state1, state2)
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_merge_push_does_reconfigure(self):
# Test that the push event that follows a merge does
# trigger reconfiguration if .zuul.yaml is changed.
A = self.fake_gitlab.openFakeMergeRequest('org/project', 'master', 'A')
state1 = self.scheds.first.sched.local_layout_state.get("tenant-one")
self.fake_gitlab.emitEvent(A.getMergeRequestMergedEvent())
self.fake_gitlab.emitEvent(A.getMergeRequestMergedPushEvent(
modified_files=['.zuul.yaml']))
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
self.assertHistory([{'name': 'project-post-job'},
{'name': 'project-promote'}
], ordered=False)
state2 = self.scheds.first.sched.local_layout_state.get("tenant-one")
self.assertNotEqual(state1, state2)
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_merge_request_updated_builds_aborted(self):
A = self.fake_gitlab.openFakeMergeRequest('org/project', 'master', 'A')
mr_tip1_sha = A.sha
self.executor_server.hold_jobs_in_build = True
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
self.fake_gitlab.emitEvent(A.getMergeRequestUpdatedEvent())
mr_tip2_sha = A.sha
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory(
[
{'name': 'project-test1', 'result': 'ABORTED',
'changes': '1,%s' % mr_tip1_sha},
{'name': 'project-test2', 'result': 'ABORTED',
'changes': '1,%s' % mr_tip1_sha},
{'name': 'project-test1', 'changes': '1,%s' % mr_tip2_sha},
{'name': 'project-test2', 'changes': '1,%s' % mr_tip2_sha}
], ordered=False
)
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_merge_request_commented(self):
A = self.fake_gitlab.openFakeMergeRequest('org/project', 'master', 'A')
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
self.fake_gitlab.emitEvent(
A.getMergeRequestCommentedEvent('I like that change'))
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
self.fake_gitlab.emitEvent(
A.getMergeRequestCommentedEvent('recheck'))
self.waitUntilSettled()
self.assertEqual(4, len(self.history))
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
@skipIfMultiScheduler()
# This test fails reproducibly with multiple schedulers because
# the fake_gitlab._test_baseurl used in the assertion for the
# change_url doesn't match.
# An explanation for this would be that each scheduler (and the
# test case itself) use different (fake) Gitlab connections.
# However, the interesting part is that only this test fails
# although there are other gitlab tests with a similar assertion.
# Apart from that I'm wondering why this test first fails with
# multiple schedulers as each scheduler should have a different
# gitlab connection than the test case itself.
def test_ref_updated(self):
event = self.fake_gitlab.getPushEvent('org/project')
expected_newrev = event[1]['after']
expected_oldrev = event[1]['before']
self.fake_gitlab.emitEvent(event)
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
self.assertEqual(
'SUCCESS',
self.getJobFromHistory('project-post-job').result)
job = self.getJobFromHistory('project-post-job')
zuulvars = job.parameters['zuul']
self.assertEqual('refs/heads/master', zuulvars['ref'])
self.assertEqual('post', zuulvars['pipeline'])
self.assertEqual('project-post-job', zuulvars['job'])
self.assertEqual('master', zuulvars['branch'])
self.assertEqual(
f'{self.fake_gitlab._test_baseurl}/org/project/tree/'
f'{zuulvars["newrev"]}',
zuulvars['change_url'])
self.assertEqual(expected_newrev, zuulvars['newrev'])
self.assertEqual(expected_oldrev, zuulvars['oldrev'])
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_ref_created(self):
self.create_branch('org/project', 'stable-1.0')
path = os.path.join(self.upstream_root, 'org/project')
repo = git.Repo(path)
newrev = repo.commit('refs/heads/stable-1.0').hexsha
event = self.fake_gitlab.getPushEvent(
'org/project', branch='refs/heads/stable-1.0',
before='0' * 40, after=newrev)
old = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
self.fake_gitlab.emitEvent(event)
self.waitUntilSettled()
new = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
# New timestamp should be greater than the old timestamp
self.assertLess(old, new)
self.assertEqual(1, len(self.history))
self.assertEqual(
'SUCCESS',
self.getJobFromHistory('project-post-job').result)
job = self.getJobFromHistory('project-post-job')
zuulvars = job.parameters['zuul']
self.assertEqual('refs/heads/stable-1.0', zuulvars['ref'])
self.assertEqual('post', zuulvars['pipeline'])
self.assertEqual('project-post-job', zuulvars['job'])
self.assertEqual('stable-1.0', zuulvars['branch'])
self.assertEqual(newrev, zuulvars['newrev'])
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_ref_deleted(self):
event = self.fake_gitlab.getPushEvent(
'org/project', 'stable-1.0', after='0' * 40)
self.fake_gitlab.emitEvent(event)
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_tag_created(self):
path = os.path.join(self.upstream_root, 'org/project')
repo = git.Repo(path)
repo.create_tag('1.0')
tagsha = repo.tags['1.0'].commit.hexsha
event = self.fake_gitlab.getGitTagEvent(
'org/project', '1.0', tagsha)
self.fake_gitlab.emitEvent(event)
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
self.assertEqual(
'SUCCESS',
self.getJobFromHistory('project-tag-job').result)
job = self.getJobFromHistory('project-tag-job')
zuulvars = job.parameters['zuul']
self.assertEqual('refs/tags/1.0', zuulvars['ref'])
self.assertEqual('tag', zuulvars['pipeline'])
self.assertEqual('project-tag-job', zuulvars['job'])
self.assertEqual(tagsha, zuulvars['newrev'])
self.assertEqual(tagsha, zuulvars['commit_id'])
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_pull_request_with_dyn_reconf(self):
path = os.path.join(self.upstream_root, 'org/project')
zuul_yaml = [
{'job': {
'name': 'project-test3',
'run': 'job.yaml'
}},
{'project': {
'check': {
'jobs': [
'project-test3'
]
}
}}
]
playbook = "- hosts: all\n tasks: []"
A = self.fake_gitlab.openFakeMergeRequest(
'org/project', 'master', 'A',
base_sha=git.Repo(path).head.object.hexsha)
A.addCommit(
{'.zuul.yaml': yaml.dump(zuul_yaml),
'job.yaml': playbook}
)
A.addCommit({"dummy.file": ""})
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test1').result)
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test2').result)
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test3').result)
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_pull_request_with_dyn_reconf_alt(self):
with self.fake_gitlab.enable_uncomplete_mr():
zuul_yaml = [
{'job': {
'name': 'project-test3',
'run': 'job.yaml'
}},
{'project': {
'check': {
'jobs': [
'project-test3'
]
}
}}
]
playbook = "- hosts: all\n tasks: []"
A = self.fake_gitlab.openFakeMergeRequest(
'org/project', 'master', 'A')
A.addCommit(
{'.zuul.yaml': yaml.dump(zuul_yaml),
'job.yaml': playbook}
)
A.addCommit({"dummy.file": ""})
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test1').result)
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test2').result)
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test3').result)
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_ref_updated_and_tenant_reconfigure(self):
self.waitUntilSettled()
old = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
zuul_yaml = [
{'job': {
'name': 'project-post-job2',
'run': 'job.yaml'
}},
{'project': {
'post': {
'jobs': [
'project-post-job2'
]
}
}}
]
playbook = "- hosts: all\n tasks: []"
self.create_commit(
'org/project',
{'.zuul.yaml': yaml.dump(zuul_yaml),
'job.yaml': playbook},
message='Add InRepo configuration'
)
event = self.fake_gitlab.getPushEvent('org/project',
modified_files=['.zuul.yaml'])
self.fake_gitlab.emitEvent(event)
self.waitUntilSettled()
new = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
# New timestamp should be greater than the old timestamp
self.assertLess(old, new)
self.assertHistory(
[{'name': 'project-post-job'},
{'name': 'project-post-job2'},
], ordered=False
)
@simple_layout('layouts/crd-gitlab.yaml', driver='gitlab')
def test_crd_independent(self):
# Create a change in project1 that a project2 change will depend on
A = self.fake_gitlab.openFakeMergeRequest(
'org/project1', 'master', 'A')
# Create a commit in B that sets the dependency on A
msg = "Depends-On: %s" % A.url
B = self.fake_gitlab.openFakeMergeRequest(
'org/project2', 'master', 'B', description=msg)
# Make an event to re-use
self.fake_gitlab.emitEvent(B.getMergeRequestOpenedEvent())
self.waitUntilSettled()
# The changes for the job from project2 should include the project1
# MR content
changes = self.getJobFromHistory(
'project2-test', 'org/project2').changes
self.assertEqual(changes, "%s,%s %s,%s" % (A.number,
A.sha,
B.number,
B.sha))
# There should be no more changes in the queue
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
@simple_layout('layouts/requirements-gitlab.yaml', driver='gitlab')
def test_state_require(self):
A = self.fake_gitlab.openFakeMergeRequest(
'org/project1', 'master', 'A')
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
# Close the MR
A.closeMergeRequest()
# A recheck will not trigger the job
self.fake_gitlab.emitEvent(
A.getMergeRequestCommentedEvent('recheck'))
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
# Merge the MR
A.mergeMergeRequest()
# A recheck will not trigger the job
self.fake_gitlab.emitEvent(
A.getMergeRequestCommentedEvent('recheck'))
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
# Re-open the MR
A.reopenMergeRequest()
# A recheck will trigger the job
self.fake_gitlab.emitEvent(
A.getMergeRequestCommentedEvent('recheck'))
self.waitUntilSettled()
self.assertEqual(2, len(self.history))
@simple_layout('layouts/requirements-gitlab.yaml', driver='gitlab')
def test_approval_require(self):
A = self.fake_gitlab.openFakeMergeRequest(
'org/project2', 'master', 'A')
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
A.approved = True
self.fake_gitlab.emitEvent(A.getMergeRequestUpdatedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
A.approved = False
self.fake_gitlab.emitEvent(A.getMergeRequestUpdatedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
@simple_layout('layouts/requirements-gitlab.yaml', driver='gitlab')
def test_approval_require_community_edition(self):
with self.fake_gitlab.enable_community_edition():
A = self.fake_gitlab.openFakeMergeRequest(
'org/project2', 'master', 'A')
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
A.approved = True
self.fake_gitlab.emitEvent(A.getMergeRequestUpdatedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
A.approved = False
self.fake_gitlab.emitEvent(A.getMergeRequestUpdatedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
@simple_layout('layouts/requirements-gitlab.yaml', driver='gitlab')
def test_label_require(self):
A = self.fake_gitlab.openFakeMergeRequest(
'org/project3', 'master', 'A')
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
A.labels = ['gateit', 'prio:low']
self.fake_gitlab.emitEvent(A.getMergeRequestUpdatedEvent())
self.waitUntilSettled()
self.assertEqual(0, len(self.history))
A.labels = ['gateit', 'prio:low', 'another_label']
self.fake_gitlab.emitEvent(A.getMergeRequestUpdatedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
@simple_layout('layouts/gitlab-label-add-remove.yaml', driver='gitlab')
def test_label_add_remove(self):
A = self.fake_gitlab.openFakeMergeRequest(
'org/project1', 'master', 'A')
A.labels = ['removeme1', 'removeme2']
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
self.assertEqual(set(A.labels), {'addme1', 'addme2'})
@simple_layout('layouts/merging-gitlab.yaml', driver='gitlab')
def test_merge_action_in_independent(self):
A = self.fake_gitlab.openFakeMergeRequest(
'org/project1', 'master', 'A')
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(1, len(self.history))
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test').result)
self.assertEqual('merged', A.state)
@simple_layout('layouts/merging-gitlab.yaml', driver='gitlab')
def test_merge_action_in_dependent(self):
A = self.fake_gitlab.openFakeMergeRequest(
'org/project2', 'master', 'A')
A.merge_status = 'cannot_be_merged'
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
# canMerge is not validated
self.assertEqual(0, len(self.history))
# Set Merge request can be merged
A.merge_status = 'can_be_merged'
self.fake_gitlab.emitEvent(A.getMergeRequestUpdatedEvent())
self.waitUntilSettled()
# canMerge is validated
self.assertEqual(1, len(self.history))
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test').result)
self.assertEqual('merged', A.state)
@simple_layout('layouts/merging-gitlab-squash-merge.yaml', driver='gitlab')
def test_merge_squash(self):
A = self.fake_gitlab.openFakeMergeRequest(
'org/project1', 'master', 'A')
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
# canMerge is validated
self.assertEqual(1, len(self.history))
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test').result)
self.assertEqual('merged', A.state)
self.assertTrue(A.squash_merge)
@simple_layout('layouts/crd-gitlab.yaml', driver='gitlab')
def test_crd_dependent(self):
# Create a change in project3 that a project4 change will depend on
A = self.fake_gitlab.openFakeMergeRequest(
'org/project3', 'master', 'A')
A.approved = True
# Create a change B that sets the dependency on A
msg = "Depends-On: %s" % A.url
B = self.fake_gitlab.openFakeMergeRequest(
'org/project4', 'master', 'B', description=msg)
# Emit B opened event
event = B.getMergeRequestOpenedEvent()
self.fake_gitlab.emitEvent(event)
self.waitUntilSettled()
# B cannot be merged (not approved)
self.assertEqual(0, len(self.history))
# Approve B
B.approved = True
# And send the event
self.fake_gitlab.emitEvent(event)
self.waitUntilSettled()
# The changes for the job from project4 should include the project3
# MR content
changes = self.getJobFromHistory(
'project4-test', 'org/project4').changes
self.assertEqual(changes, "%s,%s %s,%s" % (A.number,
A.sha,
B.number,
B.sha))
self.assertTrue(A.is_merged)
self.assertTrue(B.is_merged)
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_timer_event(self):
self.executor_server.hold_jobs_in_build = True
self.commitConfigUpdate('org/common-config',
'layouts/timer-gitlab.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
time.sleep(2)
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.executor_server.hold_jobs_in_build = False
# Stop queuing timer triggered jobs so that the assertions
# below don't race against more jobs being queued.
self.commitConfigUpdate('org/common-config',
'layouts/no-timer-gitlab.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
# can end up with one more event firing, so give it an extra
# second to settle.
time.sleep(1)
self.waitUntilSettled()
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='project-bitrot', result='SUCCESS',
ref='refs/heads/master'),
], ordered=False)
@simple_layout('layouts/crd-gitlab.yaml', driver='gitlab')
def test_api_token(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
_, project = tenant.getProject('org/project1')
project_git_url = self.fake_gitlab.real_getGitUrl(project)
# cloneurl created from config 'server' should be used
# without credentials
self.assertEqual(f"{self.fake_gitlab._test_baseurl}/org/project1.git",
project_git_url)
@simple_layout('layouts/crd-gitlab.yaml', driver='gitlab2')
def test_api_token_cloneurl(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
_, project = tenant.getProject('org/project1')
project_git_url = self.fake_gitlab2.real_getGitUrl(project)
# cloneurl from config file should be used as it defines token name and
# secret
self.assertEqual("http://myusername:2222@gitlab/org/project1.git",
project_git_url)
@simple_layout('layouts/crd-gitlab.yaml', driver='gitlab3')
def test_api_token_name_cloneurl(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
_, project = tenant.getProject('org/project1')
project_git_url = self.fake_gitlab3.real_getGitUrl(project)
# cloneurl from config file should be used as it defines token name and
# secret, even if token name and token secret are defined
self.assertEqual("http://myusername:2222@gitlabthree/org/project1.git",
project_git_url)
@simple_layout('layouts/crd-gitlab.yaml', driver='gitlab4')
def test_api_token_name(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
_, project = tenant.getProject('org/project1')
project_git_url = self.fake_gitlab4.real_getGitUrl(project)
# cloneurl is not set, generate one from token name, token secret and
# server
self.assertEqual("http://tokenname4:444@localhost:"
f"{self.fake_gitlab4._test_web_server.port}"
"/org/project1.git",
project_git_url)
@simple_layout('layouts/crd-gitlab.yaml', driver='gitlab5')
def test_api_token_name_cloneurl_server(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
_, project = tenant.getProject('org/project1')
project_git_url = self.fake_gitlab5.real_getGitUrl(project)
# cloneurl defines a url, without credentials. As token name is
# set, include token name and secret in cloneurl, 'server' is
# overwritten
self.assertEqual("http://tokenname5:555@gitlabfivvve/org/project1.git",
project_git_url)
@simple_layout('layouts/files-gitlab.yaml', driver='gitlab')
def test_changed_file_match_filter(self):
path = os.path.join(self.upstream_root, 'org/project')
base_sha = git.Repo(path).head.object.hexsha
files = {'{:03d}.txt'.format(n): 'test' for n in range(300)}
files["foobar-requires"] = "test"
files["to-be-removed"] = "test"
A = self.fake_gitlab.openFakeMergeRequest(
'org/project', 'master', 'A', files=files, base_sha=base_sha)
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
# project-test1 and project-test2 should be run
self.assertEqual(2, len(self.history))
@simple_layout('layouts/files-gitlab.yaml', driver='gitlab')
def test_changed_and_reverted_file_not_match_filter(self):
path = os.path.join(self.upstream_root, 'org/project')
base_sha = git.Repo(path).head.object.hexsha
files = {'{:03d}.txt'.format(n): 'test' for n in range(300)}
files["foobar-requires"] = "test"
files["to-be-removed"] = "test"
A = self.fake_gitlab.openFakeMergeRequest(
'org/project', 'master', 'A', files=files, base_sha=base_sha)
A.addCommit(delete_files=['to-be-removed'])
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
# Only project-test1 should be run, because the file to-be-removed
# is reverted and not in changed files to trigger project-test2
self.assertEqual(1, len(self.history))
class TestGitlabUnprotectedBranches(ZuulTestCase):
config_file = 'zuul-gitlab-driver.conf'
tenant_config_file = 'config/unprotected-branches-gitlab/main.yaml'
@skipIfMultiScheduler()
# This test is failing with multiple schedulers depending on which
# scheduler did the tenant reconfiguration first. As the
# assertions are all done on the objects from scheduler-0 they
# will fail if scheduler-1 did the reconfig first.
# To make this work with multiple schedulers, we might want to wait
# until all schedulers completed their tenant reconfiguration.
def test_unprotected_branches(self):
tenant = self.scheds.first.sched.abide.tenants\
.get('tenant-one')
project1 = tenant.untrusted_projects[0]
project2 = tenant.untrusted_projects[1]
tpc1 = tenant.project_configs[project1.canonical_name]
tpc2 = tenant.project_configs[project2.canonical_name]
# project1 should have parsed master
self.assertIn('master', tpc1.parsed_branch_config.keys())
# project2 should have no parsed branch
self.assertEqual(0, len(tpc2.parsed_branch_config.keys()))
# now enable branch protection and trigger reload
self.fake_gitlab.protectBranch('org', 'project2', 'master')
pevent = self.fake_gitlab.getPushEvent(project='org/project2')
self.fake_gitlab.emitEvent(pevent)
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
tpc1 = tenant.project_configs[project1.canonical_name]
tpc2 = tenant.project_configs[project2.canonical_name]
# project1 and project2 should have parsed master now
self.assertIn('master', tpc1.parsed_branch_config.keys())
self.assertIn('master', tpc2.parsed_branch_config.keys())
def test_filtered_branches_in_build(self):
"""
Tests unprotected branches are filtered in builds if excluded
"""
self.executor_server.keep_jobdir = True
# Enable branch protection on org/project2@master
self.create_branch('org/project2', 'feat-x')
self.fake_gitlab.protectBranch('org', 'project2', 'master',
protected=True)
# Enable branch protection on org/project3@stable. We'll use a MR on
# this branch as a depends-on to validate that the stable branch
# which is not protected in org/project2 is not filtered out.
self.create_branch('org/project3', 'stable')
self.fake_gitlab.protectBranch('org', 'project3', 'stable',
protected=True)
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
A = self.fake_gitlab.openFakeMergeRequest('org/project3', 'stable',
'A')
msg = "Depends-On: %s" % A.url
B = self.fake_gitlab.openFakeMergeRequest('org/project2', 'master',
'B', description=msg)
self.fake_gitlab.emitEvent(B.getMergeRequestOpenedEvent())
self.waitUntilSettled()
build = self.history[0]
path = os.path.join(
build.jobdir.src_root, 'gitlab', 'org/project2')
build_repo = git.Repo(path)
branches = [x.name for x in build_repo.branches]
self.assertNotIn('feat-x', branches)
self.assertHistory([
dict(name='used-job', result='SUCCESS',
changes="%s,%s %s,%s" % (A.number, A.sha,
B.number, B.sha)),
])
def test_unfiltered_branches_in_build(self):
"""
Tests unprotected branches are not filtered in builds if not excluded
"""
self.executor_server.keep_jobdir = True
# Enable branch protection on org/project1@master
self.create_branch('org/project1', 'feat-x')
self.fake_gitlab.protectBranch('org', 'project1', 'master',
protected=True)
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
A = self.fake_gitlab.openFakeMergeRequest('org/project1', 'master',
'A')
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
build = self.history[0]
path = os.path.join(
build.jobdir.src_root, 'gitlab', 'org/project1')
build_repo = git.Repo(path)
branches = [x.name for x in build_repo.branches]
self.assertIn('feat-x', branches)
self.assertHistory([
dict(name='project-test', result='SUCCESS',
changes="%s,%s" % (A.number, A.sha)),
])
def test_unprotected_push(self):
"""Test that unprotected pushes don't cause tenant reconfigurations"""
# Prepare repo with an initial commit
A = self.fake_gitlab.openFakeMergeRequest('org/project2', 'master',
'A')
zuul_yaml = [
{'job': {
'name': 'used-job2',
'run': 'playbooks/used-job.yaml'
}},
{'project': {
'check': {
'jobs': [
'used-job2'
]
}
}}
]
A.addCommit({'zuul.yaml': yaml.dump(zuul_yaml)})
A.mergeMergeRequest()
# Do a push on top of A
pevent = self.fake_gitlab.getPushEvent(project='org/project2',
before=A.sha,
branch='refs/heads/master')
# record previous tenant reconfiguration time, which may not be set
old = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
self.waitUntilSettled()
self.fake_gitlab.emitEvent(pevent)
self.waitUntilSettled()
new = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
# We don't expect a reconfiguration because the push was to an
# unprotected branch
self.assertEqual(old, new)
# now enable branch protection and trigger the push event again
self.fake_gitlab.protectBranch('org', 'project2', 'master')
self.fake_gitlab.emitEvent(pevent)
self.waitUntilSettled()
new = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
# We now expect that zuul reconfigured itself
self.assertLess(old, new)
def test_protected_branch_delete(self):
"""Test that protected branch deletes trigger a tenant reconfig"""
# Prepare repo with an initial commit and enable branch protection
self.fake_gitlab.protectBranch('org', 'project2', 'master')
self.fake_gitlab.emitEvent(
self.fake_gitlab.getPushEvent(
project='org/project2', branch='refs/heads/master'))
A = self.fake_gitlab.openFakeMergeRequest('org/project2', 'master',
'A')
A.mergeMergeRequest()
# add a spare branch so that the project is not empty after master gets
# deleted.
self.create_branch('org/project2', 'feat-x')
self.fake_gitlab.protectBranch('org', 'project2', 'feat-x',
protected=False)
self.fake_gitlab.emitEvent(
self.fake_gitlab.getPushEvent(
project='org/project2', branch='refs/heads/feat-x'))
self.waitUntilSettled()
# record previous tenant reconfiguration time, which may not be set
old = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
self.waitUntilSettled()
# Delete the branch
self.fake_gitlab.deleteBranch('org', 'project2', 'master')
pevent = self.fake_gitlab.getPushEvent(project='org/project2',
before=A.sha,
after='0' * 40,
branch='refs/heads/master')
self.fake_gitlab.emitEvent(pevent)
self.waitUntilSettled()
new = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
# We now expect that zuul reconfigured itself as we deleted a protected
# branch
self.assertLess(old, new)
# This test verifies that a PR is considered in case it was created for
# a branch just has been set to protected before a tenant reconfiguration
# took place.
def test_reconfigure_on_pr_to_new_protected_branch(self):
self.create_branch('org/project2', 'release')
self.create_branch('org/project2', 'feature')
self.fake_gitlab.protectBranch('org', 'project2', 'master')
self.fake_gitlab.protectBranch('org', 'project2', 'release',
protected=False)
self.fake_gitlab.protectBranch('org', 'project2', 'feature',
protected=False)
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.fake_gitlab.protectBranch('org', 'project2', 'release')
self.executor_server.hold_jobs_in_build = True
A = self.fake_gitlab.openFakeMergeRequest(
'org/project2', 'release', 'A')
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('used-job').result)
job = self.getJobFromHistory('used-job')
zuulvars = job.parameters['zuul']
self.assertEqual(str(A.number), zuulvars['change'])
self.assertEqual(str(A.sha), zuulvars['patchset'])
self.assertEqual('release', zuulvars['branch'])
self.assertEqual(1, len(self.history))
def _test_push_event_reconfigure(self, project, branch,
expect_reconfigure=False,
old_sha=None, new_sha=None,
modified_files=None,
removed_files=None):
pevent = self.fake_gitlab.getPushEvent(
project=project,
branch='refs/heads/%s' % branch,
before=old_sha,
after=new_sha)
# record previous tenant reconfiguration time, which may not be set
old = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
self.waitUntilSettled()
self.fake_gitlab.emitEvent(pevent)
self.waitUntilSettled()
new = self.scheds.first.sched.tenant_layout_state.get(
'tenant-one', EMPTY_LAYOUT_STATE)
if expect_reconfigure:
# New timestamp should be greater than the old timestamp
self.assertLess(old, new)
else:
# Timestamps should be equal as no reconfiguration shall happen
self.assertEqual(old, new)
def test_push_event_reconfigure_complex_branch(self):
branch = 'feature/somefeature'
project = 'org/project2'
# prepare an existing branch
self.create_branch(project, branch)
self.fake_gitlab.protectBranch(*project.split('/'), branch,
protected=False)
self.fake_gitlab.emitEvent(
self.fake_gitlab.getPushEvent(
project,
branch='refs/heads/%s' % branch))
self.waitUntilSettled()
A = self.fake_gitlab.openFakeMergeRequest(project, branch, 'A')
old_sha = A.sha
A.mergeMergeRequest()
new_sha = random_sha1()
# branch is not protected, no reconfiguration even if config file
self._test_push_event_reconfigure(project, branch,
expect_reconfigure=False,
old_sha=old_sha,
new_sha=new_sha,
modified_files=['zuul.yaml'])
# branch is not protected: no reconfiguration
self.fake_gitlab.deleteBranch(*project.split('/'), branch)
self._test_push_event_reconfigure(project, branch,
expect_reconfigure=False,
old_sha=new_sha,
new_sha='0' * 40,
removed_files=['zuul.yaml'])
class TestGitlabDriverNoPool(ZuulTestCase):
config_file = 'zuul-gitlab-driver-no-pool.conf'
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_merge_request_opened(self):
description = "This is the\nMR description."
A = self.fake_gitlab.openFakeMergeRequest(
'org/project', 'master', 'A', description=description)
self.fake_gitlab.emitEvent(
A.getMergeRequestOpenedEvent(), project='org/project')
self.waitUntilSettled()
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test1').result)
self.assertEqual('SUCCESS',
self.getJobFromHistory('project-test2').result)
job = self.getJobFromHistory('project-test2')
zuulvars = job.parameters['zuul']
self.assertEqual(str(A.number), zuulvars['change'])
self.assertEqual(str(A.sha), zuulvars['patchset'])
self.assertEqual('master', zuulvars['branch'])
self.assertEquals(f'{self.fake_gitlab._test_baseurl}/'
'org/project/merge_requests/1',
zuulvars['items'][0]['change_url'])
self.assertEqual(zuulvars["message"], strings.b64encode(description))
self.assertEqual(2, len(self.history))
self.assertEqual(2, len(A.notes))
self.assertEqual(
A.notes[0]['body'], "Starting check jobs.")
self.assertThat(
A.notes[1]['body'],
MatchesRegex(r'.*project-test1.*SUCCESS.*', re.DOTALL))
self.assertThat(
A.notes[1]['body'],
MatchesRegex(r'.*project-test2.*SUCCESS.*', re.DOTALL))
self.assertTrue(A.approved)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_gitlab_driver.py
|
test_gitlab_driver.py
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from tests.base import ZuulTestCase, ZuulGithubAppTestCase
from zuul.driver.zuul.zuulmodel import ZuulTriggerEvent
class TestZuulTriggerParentChangeEnqueued(ZuulTestCase):
tenant_config_file = 'config/zuultrigger/parent-change-enqueued/main.yaml'
def test_zuul_trigger_parent_change_enqueued(self):
"Test Zuul trigger event: parent-change-enqueued"
# This test has the following three changes:
# B1 -> A; B2 -> A
# When A is enqueued in the gate, B1 and B2 should both attempt
# to be enqueued in both pipelines. B1 should end up in check
# and B2 in gate because of differing pipeline requirements.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B1 = self.fake_gerrit.addFakeChange('org/project', 'master', 'B1')
B2 = self.fake_gerrit.addFakeChange('org/project', 'master', 'B2')
A.addApproval('Code-Review', 2)
B1.addApproval('Code-Review', 2)
B2.addApproval('Code-Review', 2)
A.addApproval('Verified', 1, username="for-check") # reqd by check
A.addApproval('Verified', 1, username="for-gate") # reqd by gate
B1.addApproval('Verified', 1, username="for-check") # go to check
B2.addApproval('Verified', 1, username="for-gate") # go to gate
B1.addApproval('Approved', 1)
B2.addApproval('Approved', 1)
B1.setDependsOn(A, 1)
B2.setDependsOn(A, 1)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
# Jobs are being held in build to make sure that 3,1 has time
# to enqueue behind 1,1 so that the test is more
# deterministic.
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.history), 3)
for job in self.history:
if job.changes == '1,1':
self.assertEqual(job.name, 'project-gate')
elif job.changes == '1,1 2,1':
self.assertEqual(job.name, 'project-check')
elif job.changes == '1,1 3,1':
self.assertEqual(job.name, 'project-gate')
else:
raise Exception("Unknown job")
# Now directly enqueue a change into the check. As no pipeline reacts
# on parent-change-enqueued from pipeline check no
# parent-change-enqueued event is expected.
_add_trigger_event = self.scheds.first.sched.addTriggerEvent
def addTriggerEvent(driver_name, event):
self.assertNotIsInstance(event, ZuulTriggerEvent)
_add_trigger_event(driver_name, event)
with mock.patch.object(
self.scheds.first.sched, "addTriggerEvent", addTriggerEvent
):
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
C.addApproval('Verified', 1, username="for-check")
D = self.fake_gerrit.addFakeChange('org/project', 'master', 'D')
D.addApproval('Verified', 1, username="for-check")
D.setDependsOn(C, 1)
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.history), 4)
class TestZuulTriggerParentChangeEnqueuedGithub(ZuulGithubAppTestCase):
tenant_config_file = \
'config/zuultrigger/parent-change-enqueued-github/main.yaml'
config_file = 'zuul-github-driver.conf'
def test_zuul_trigger_parent_change_enqueued(self):
"Test Zuul trigger event: parent-change-enqueued"
# This test has the following three changes:
# B1 -> A; B2 -> A
# When A is enqueued in the gate, B1 and B2 should both attempt
# to be enqueued in both pipelines. B1 should end up in check
# and B2 in gate because of differing pipeline requirements.
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
msg = "Depends-On: https://github.com/org/project/pull/%s" % A.number
B1 = self.fake_github.openFakePullRequest(
'org/project', 'master', 'B1', body=msg)
B2 = self.fake_github.openFakePullRequest(
'org/project', 'master', 'B2', body=msg)
A.addReview('derp', 'APPROVED')
B1.addReview('derp', 'APPROVED')
B2.addReview('derp', 'APPROVED')
A.addLabel('for-gate') # required by gate
A.addLabel('for-check') # required by check
B1.addLabel('for-check') # should go to check
B2.addLabel('for-gate') # should go to gate
# In this case we have two installations
# 1: org/common-config, org/project (used by tenant-one and tenant-two)
# 2: org2/project (only used by tenant-two)
# In order to track accesses to the installations enable client
# recording in the fake github.
self.fake_github.record_clients = True
self.fake_github.emitEvent(A.getReviewAddedEvent('approved'))
# Jobs are being held in build to make sure that 3,1 has time
# to enqueue behind 1,1 so that the test is more
# deterministic.
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.history), 3)
for job in self.history:
if job.changes == '1,{}'.format(A.head_sha):
self.assertEqual(job.name, 'project-gate')
elif job.changes == '1,{} 2,{}'.format(A.head_sha, B1.head_sha):
self.assertEqual(job.name, 'project-check')
elif job.changes == '1,{} 3,{}'.format(A.head_sha, B2.head_sha):
self.assertEqual(job.name, 'project-gate')
else:
raise Exception("Unknown job")
# Now directly enqueue a change into the check. As no pipeline reacts
# on parent-change-enqueued from pipeline check no
# parent-change-enqueued event is expected.
self.waitUntilSettled()
_add_trigger_event = self.scheds.first.sched.addTriggerEvent
def addTriggerEvent(driver_name, event):
self.assertNotIsInstance(event, ZuulTriggerEvent)
_add_trigger_event(driver_name, event)
with mock.patch.object(
self.scheds.first.sched, "addTriggerEvent", addTriggerEvent
):
C = self.fake_github.openFakePullRequest(
'org/project', 'master', 'C'
)
C.addLabel('for-check') # should go to check
msg = "Depends-On: https://github.com/org/project1/pull/{}".format(
C.number
)
D = self.fake_github.openFakePullRequest(
'org/project', 'master', 'D', body=msg)
D.addLabel('for-check') # should go to check
self.fake_github.emitEvent(C.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(len(self.history), 4)
# After starting recording installation containing org2/project
# should not be contacted
gh_manager = self.fake_github._github_client_manager
inst_id_to_check = gh_manager.installation_map['org2/project']
inst_clients = [x for x in gh_manager.recorded_clients
if x._inst_id == inst_id_to_check]
self.assertEqual(len(inst_clients), 0)
class TestZuulTriggerProjectChangeMerged(ZuulTestCase):
tenant_config_file = 'config/zuultrigger/project-change-merged/main.yaml'
def test_zuul_trigger_project_change_merged(self):
# This test has the following three changes:
# A, B, C; B conflicts with A, but C does not.
# When A is merged, B and C should be checked for conflicts,
# and B should receive a -1.
# D and E are used to repeat the test in the second part, but
# are defined here to that they end up in the trigger cache.
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
D = self.fake_gerrit.addFakeChange('org/project', 'master', 'D')
E = self.fake_gerrit.addFakeChange('org/project', 'master', 'E')
A.addPatchset({'conflict': 'foo'})
B.addPatchset({'conflict': 'bar'})
D.addPatchset({'conflict2': 'foo'})
E.addPatchset({'conflict2': 'bar'})
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, 'project-gate')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 1)
self.assertEqual(C.reported, 0)
self.assertEqual(D.reported, 0)
self.assertEqual(E.reported, 0)
self.assertIn(
"Merge Failed.\n\nThis change or one of its cross-repo "
"dependencies was unable to be automatically merged with the "
"current state of its repository. Please rebase the change and "
"upload a new patchset.",
B.messages[0])
self.assertIn(
'Error merging gerrit/org/project for 2,2',
B.messages[0])
self.assertTrue("project:{org/project} status:open" in
self.fake_gerrit.queries)
# Ensure the gerrit driver has updated its cache after the
# previous comments were left:
self.fake_gerrit.addEvent(A.getChangeCommentEvent(2))
self.fake_gerrit.addEvent(B.getChangeCommentEvent(2))
self.waitUntilSettled()
# Reconfigure and run the test again. This is a regression
# check to make sure that we don't end up with a stale trigger
# cache that has references to projects from the old
# configuration.
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
D.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(D.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.history), 2)
self.assertEqual(self.history[1].name, 'project-gate')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 1)
self.assertEqual(C.reported, 0)
self.assertEqual(D.reported, 2)
self.assertEqual(E.reported, 1)
self.assertIn(
"Merge Failed.\n\nThis change or one of its cross-repo "
"dependencies was unable to be automatically merged with the "
"current state of its repository. Please rebase the change and "
"upload a new patchset.",
E.messages[0])
self.assertIn(
'Error merging gerrit/org/project for 5,2',
E.messages[0])
self.assertIn("project:{org/project} status:open",
self.fake_gerrit.queries)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_zuultrigger.py
|
test_zuultrigger.py
|
# Copyright 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from tests.base import ZuulTestCase
class BaseTestPrometheus(ZuulTestCase):
config_file = 'zuul-prometheus.conf'
tenant_config_file = 'config/single-tenant/main.yaml'
def get_path(self, path):
return requests.get(
"http://localhost:%d%s" % (
self.scheds.first.sched.monitoring_server.port,
path))
def get_metrics(self, path=''):
metrics = {}
r = self.get_path(path)
for line in r.text.split('\n'):
if not line or line.startswith("#"):
continue
try:
key, value = line.split()
except ValueError:
continue
metrics[key] = value
return metrics
class TestPrometheus(BaseTestPrometheus):
def test_prometheus_process_metrics(self):
metrics = self.get_metrics()
self.assertIn("process_resident_memory_bytes", metrics)
self.assertIn("process_open_fds", metrics)
metrics = self.get_metrics('/metrics')
self.assertIn("process_resident_memory_bytes", metrics)
self.assertIn("process_open_fds", metrics)
def test_health(self):
r = self.get_path('/health/live')
self.assertEqual(r.status_code, 200)
r = self.get_path('/health/ready')
self.assertEqual(r.status_code, 200)
r = self.get_path('/health/status')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.text, 'RUNNING')
r = self.get_path('/dne')
self.assertEqual(r.status_code, 404)
self.scheds.first.sched.component_info.state = \
self.scheds.first.sched.component_info.INITIALIZING
r = self.get_path('/health/live')
self.assertEqual(r.status_code, 200)
r = self.get_path('/health/ready')
self.assertEqual(r.status_code, 503)
r = self.get_path('/health/status')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.text, 'INITIALIZING')
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_prometheus.py
|
test_prometheus.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import subprocess
from tests.base import ZuulTestCase
from zuul.executor.server import SshAgent
class TestSshAgent(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def test_ssh_agent(self):
# Need a private key to add
env_copy = dict(os.environ)
# DISPLAY and SSH_ASKPASS will cause interactive test runners to get a
# surprise
if 'DISPLAY' in env_copy:
del env_copy['DISPLAY']
if 'SSH_ASKPASS' in env_copy:
del env_copy['SSH_ASKPASS']
agent = SshAgent()
agent.start()
env_copy.update(agent.env)
pub_key_file = '{}.pub'.format(self.private_key_file)
pub_key = None
with open(pub_key_file) as pub_key_f:
pub_key = pub_key_f.read().split('== ')[0]
agent.add(self.private_key_file)
keys = agent.list()
self.assertEqual(1, len(keys))
self.assertEqual(keys[0].split('== ')[0], pub_key)
agent.remove(self.private_key_file)
keys = agent.list()
self.assertEqual([], keys)
agent.stop()
# Agent is now dead and thus this should fail
with open('/dev/null') as devnull:
self.assertRaises(subprocess.CalledProcessError,
subprocess.check_call,
['ssh-add', self.private_key_file],
env=env_copy,
stderr=devnull)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_ssh_agent.py
|
test_ssh_agent.py
|
# Copyright 2022 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from tests.base import iterate_timeout, ZuulTestCase
import zuul.lib.tracing as tracing
from opentelemetry import trace
def attributes_to_dict(attrlist):
ret = {}
for attr in attrlist:
ret[attr.key] = attr.value.string_value
return ret
class TestTracing(ZuulTestCase):
config_file = 'zuul-tracing.conf'
tenant_config_file = "config/single-tenant/main.yaml"
def _waitForSpans(self, *span_names, timeout=60,):
for _ in iterate_timeout(timeout, "requests to arrive"):
test_requests = [
r for r in self.otlp.requests
if r.resource_spans[0].scope_spans[0].spans[0].name
in span_names
]
if len(test_requests) == len(span_names):
return test_requests
def test_tracing_api(self):
tracer = trace.get_tracer("zuul")
# We have a lot of timestamps stored as floats, so make sure
# our root span is a ZuulSpan that can handle that input.
span_info = tracing.startSavedSpan('parent-trace',
start_time=time.time(),
attributes={'startattr': 'bar'},
include_attributes=True)
# Simulate a reconstructed root span
span = tracing.restoreSpan(span_info)
# Within the root span, use the more typical OpenTelemetry
# context manager api.
with trace.use_span(span):
with tracer.start_span('child1-trace') as child1_span:
link = trace.Link(child1_span.context,
attributes={'relationship': 'prev'})
# Make sure that we can manually start and stop a child span,
# and that it is a ZuulSpan as well.
with trace.use_span(span):
child = tracer.start_span('child2-trace', start_time=time.time(),
links=[link])
child.end(end_time=time.time())
# Make sure that we can start a child span from a span
# context and not a full span:
span_context = tracing.getSpanContext(span)
with tracing.startSpanInContext(span_context, 'child3-trace') as child:
child.end(end_time=time.time())
# End our root span manually.
tracing.endSavedSpan(span_info, end_time=time.time(),
attributes={'endattr': 'baz'})
test_requests = self._waitForSpans(
"parent-trace", "child1-trace", "child2-trace", "child3-trace")
req1 = test_requests[0]
self.log.debug("Received:\n%s", req1)
attrs = attributes_to_dict(req1.resource_spans[0].resource.attributes)
self.assertEqual({"service.name": "zuultest"}, attrs)
self.assertEqual("zuul",
req1.resource_spans[0].scope_spans[0].scope.name)
span1 = req1.resource_spans[0].scope_spans[0].spans[0]
self.assertEqual("child1-trace", span1.name)
req2 = test_requests[1]
self.log.debug("Received:\n%s", req2)
span2 = req2.resource_spans[0].scope_spans[0].spans[0]
self.assertEqual("child2-trace", span2.name)
self.assertEqual(span2.links[0].span_id, span1.span_id)
attrs = attributes_to_dict(span2.links[0].attributes)
self.assertEqual({"relationship": "prev"}, attrs)
req3 = test_requests[2]
self.log.debug("Received:\n%s", req3)
span3 = req3.resource_spans[0].scope_spans[0].spans[0]
self.assertEqual("child3-trace", span3.name)
req4 = test_requests[3]
self.log.debug("Received:\n%s", req4)
span4 = req4.resource_spans[0].scope_spans[0].spans[0]
self.assertEqual("parent-trace", span4.name)
attrs = attributes_to_dict(span4.attributes)
self.assertEqual({"startattr": "bar",
"endattr": "baz"}, attrs)
self.assertEqual(span1.trace_id, span4.trace_id)
self.assertEqual(span2.trace_id, span4.trace_id)
self.assertEqual(span3.trace_id, span4.trace_id)
def test_tracing_api_null(self):
tracer = trace.get_tracer("zuul")
# Test that restoring spans and span contexts works with
# null values.
span_info = None
# Simulate a reconstructed root span from a null value
span = tracing.restoreSpan(span_info)
# Within the root span, use the more typical OpenTelemetry
# context manager api.
with trace.use_span(span):
with tracer.start_span('child1-trace') as child1_span:
link = trace.Link(child1_span.context,
attributes={'relationship': 'prev'})
# Make sure that we can manually start and stop a child span,
# and that it is a ZuulSpan as well.
with trace.use_span(span):
child = tracer.start_span('child2-trace', start_time=time.time(),
links=[link])
child.end(end_time=time.time())
# Make sure that we can start a child span from a null span
# context:
span_context = None
with tracing.startSpanInContext(span_context, 'child3-trace') as child:
child.end(end_time=time.time())
# End our root span manually.
span.end(end_time=time.time())
test_requests = self._waitForSpans(
"child1-trace", "child2-trace", "child3-trace")
req1 = test_requests[0]
self.log.debug("Received:\n%s", req1)
attrs = attributes_to_dict(req1.resource_spans[0].resource.attributes)
self.assertEqual({"service.name": "zuultest"}, attrs)
self.assertEqual("zuul",
req1.resource_spans[0].scope_spans[0].scope.name)
span1 = req1.resource_spans[0].scope_spans[0].spans[0]
self.assertEqual("child1-trace", span1.name)
req2 = test_requests[1]
self.log.debug("Received:\n%s", req2)
span2 = req2.resource_spans[0].scope_spans[0].spans[0]
self.assertEqual("child2-trace", span2.name)
self.assertEqual(span2.links[0].span_id, span1.span_id)
attrs = attributes_to_dict(span2.links[0].attributes)
self.assertEqual({"relationship": "prev"}, attrs)
req3 = test_requests[2]
self.log.debug("Received:\n%s", req3)
span3 = req3.resource_spans[0].scope_spans[0].spans[0]
self.assertEqual("child3-trace", span3.name)
self.assertNotEqual(span1.trace_id, span2.trace_id)
self.assertNotEqual(span2.trace_id, span3.trace_id)
self.assertNotEqual(span1.trace_id, span3.trace_id)
def test_tracing(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
for _ in iterate_timeout(60, "request to arrive"):
if len(self.otlp.requests) >= 2:
break
buildset = self.getSpan('BuildSet')
self.log.debug("Received:\n%s", buildset)
item = self.getSpan('QueueItem')
self.log.debug("Received:\n%s", item)
merge_job = self.getSpan('Merge')
self.log.debug("Received:\n%s", merge_job)
node_request = self.getSpan('RequestNodes')
self.log.debug("Received:\n%s", node_request)
build = self.getSpan('Build')
self.log.debug("Received:\n%s", build)
jobexec = self.getSpan('JobExecution')
self.log.debug("Received:\n%s", jobexec)
self.assertEqual(item.trace_id, buildset.trace_id)
self.assertEqual(item.trace_id, node_request.trace_id)
self.assertEqual(item.trace_id, build.trace_id)
self.assertNotEqual(item.span_id, jobexec.span_id)
self.assertTrue(buildset.start_time_unix_nano >=
item.start_time_unix_nano)
self.assertTrue(buildset.end_time_unix_nano <=
item.end_time_unix_nano)
self.assertTrue(merge_job.start_time_unix_nano >=
buildset.start_time_unix_nano)
self.assertTrue(merge_job.end_time_unix_nano <=
buildset.end_time_unix_nano)
self.assertEqual(jobexec.parent_span_id,
build.span_id)
self.assertEqual(node_request.parent_span_id,
buildset.span_id)
self.assertEqual(build.parent_span_id,
buildset.span_id)
self.assertEqual(merge_job.parent_span_id,
buildset.span_id)
self.assertEqual(buildset.parent_span_id,
item.span_id)
item_attrs = attributes_to_dict(item.attributes)
self.assertTrue(item_attrs['ref_number'] == "1")
self.assertTrue(item_attrs['ref_patchset'] == "1")
self.assertTrue('zuul_event_id' in item_attrs)
def getSpan(self, name):
for req in self.otlp.requests:
span = req.resource_spans[0].scope_spans[0].spans[0]
if span.name == name:
return span
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_tracing.py
|
test_tracing.py
|
# Copyright 2021 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tests.base import ZuulTestCase, iterate_timeout
class TestTimerTwoTenants(ZuulTestCase):
tenant_config_file = 'config/timer-two-tenant/main.yaml'
def test_timer_two_tenants(self):
# The pipeline triggers every second. Wait until we have 4
# jobs (2 from each tenant).
for _ in iterate_timeout(60, 'jobs started'):
if len(self.history) >= 4:
break
tenant_one_projects = set()
tenant_two_projects = set()
for h in self.history:
if h.parameters['zuul']['tenant'] == 'tenant-one':
tenant_one_projects.add((h.parameters['items'][0]['project']))
if h.parameters['zuul']['tenant'] == 'tenant-two':
tenant_two_projects.add((h.parameters['items'][0]['project']))
# Verify that the right job ran in the right tenant
self.assertEqual(tenant_one_projects, {'org/project1'})
self.assertEqual(tenant_two_projects, {'org/project2'})
# Stop running timer jobs so the assertions don't race.
self.commitConfigUpdate('common-config', 'layouts/no-timer.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_timer_driver.py
|
test_timer_driver.py
|
# Copyright (c) 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from tests.base import ZuulGithubAppTestCase, ZuulTestCase, simple_layout
class TestGithubRequirements(ZuulTestCase):
"""Test pipeline and trigger requirements"""
config_file = 'zuul-github-driver.conf'
scheduler_count = 1
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_pipeline_require_status(self):
"Test pipeline requirement: status"
project = 'org/project1'
A = self.fake_github.openFakePullRequest(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent('test me')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# No status from zuul so should not be enqueued
self.assertEqual(len(self.history), 0)
# An error status should not cause it to be enqueued
self.fake_github.setCommitStatus(project, A.head_sha, 'error',
context='tenant-one/check')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A success status goes in
self.fake_github.setCommitStatus(project, A.head_sha, 'success',
context='tenant-one/check')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, 'project1-pipeline')
# Trigger regex matched status
self.fake_github.emitEvent(A.getCommentAddedEvent('test regex'))
self.waitUntilSettled()
self.assertEqual(len(self.history), 2)
self.assertEqual(self.history[1].name, 'project1-pipeline')
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_trigger_require_status(self):
"Test trigger requirement: status"
project = 'org/project1'
A = self.fake_github.openFakePullRequest(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent('trigger me')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# No status from zuul so should not be enqueued
self.assertEqual(len(self.history), 0)
# An error status should not cause it to be enqueued
self.fake_github.setCommitStatus(project, A.head_sha, 'error',
context='tenant-one/check')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A success status goes in
self.fake_github.setCommitStatus(project, A.head_sha, 'success',
context='tenant-one/check')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, 'project1-pipeline')
self.fake_github.emitEvent(A.getCommentAddedEvent('trigger regex'))
self.waitUntilSettled()
self.assertEqual(len(self.history), 2)
self.assertEqual(self.history[1].name, 'project1-pipeline')
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_trigger_on_status(self):
"Test trigger on: status"
project = 'org/project2'
A = self.fake_github.openFakePullRequest(project, 'master', 'A')
# Create second PR which contains the head of A in its history. Zuul
# should not get disturbed by the existence of this one.
self.fake_github.openFakePullRequest(
project, 'master', 'A', base_sha=A.head_sha)
# An error status should not cause it to be enqueued
self.fake_github.setCommitStatus(project, A.head_sha, 'error',
context='tenant-one/check')
self.fake_github.emitEvent(A.getCommitStatusEvent('tenant-one/check',
state='error'))
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A success status from unknown user should not cause it to be
# enqueued
self.fake_github.setCommitStatus(project, A.head_sha, 'success',
context='tenant-one/check',
user='foo')
self.fake_github.emitEvent(A.getCommitStatusEvent('tenant-one/check',
state='success',
user='foo'))
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A success status from zuul goes in
self.fake_github.setCommitStatus(project, A.head_sha, 'success',
context='tenant-one/check')
self.fake_github.emitEvent(A.getCommitStatusEvent('tenant-one/check'))
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, 'project2-trigger')
# An error status for a different context should not cause it to be
# enqueued
self.fake_github.setCommitStatus(project, A.head_sha, 'error',
context='tenant-one/gate')
self.fake_github.emitEvent(A.getCommitStatusEvent('tenant-one/gate',
state='error'))
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
# A success status with a regex match goes in
self.fake_github.emitEvent(A.getCommitStatusEvent('cooltest',
user='other-ci'))
self.waitUntilSettled()
self.assertEqual(len(self.history), 2)
self.assertEqual(self.history[1].name, 'project2-trigger')
@simple_layout("layouts/requirements-github.yaml", driver="github")
def test_trigger_on_check_run(self):
"""Test trigger on: check_run"""
project = "org/project15"
A = self.fake_github.openFakePullRequest(project, "master", "A")
# A check_run request with a different name should not cause it to be
# enqueued.
self.fake_github.emitEvent(
A.getCheckRunRequestedEvent("tenant-one/different-check")
)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A check_run request with the correct name, but for a different app
# should not cause it to be enqueued.
self.fake_github.emitEvent(
A.getCheckRunRequestedEvent("tenant-one/check", app="other-ci")
)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A check_run request with the correct name for the correct app should
# cause it to be enqueued.
self.fake_github.emitEvent(
A.getCheckRunRequestedEvent("tenant-one/check"))
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, "project15-check-run")
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_pipeline_require_review_username(self):
"Test pipeline requirement: review username"
A = self.fake_github.openFakePullRequest('org/project3', 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent('test me')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# No approval from derp so should not be enqueued
self.assertEqual(len(self.history), 0)
# Add an approved review from derp
A.addReview('derp', 'APPROVED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, 'project3-reviewusername')
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_pipeline_require_review_state(self):
"Test pipeline requirement: review state"
A = self.fake_github.openFakePullRequest('org/project4', 'master', 'A')
# Add derp to writers
A.writers.extend(('derp', 'werp'))
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent('test me')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# No positive review from derp so should not be enqueued
self.assertEqual(len(self.history), 0)
# A negative review from derp should not cause it to be enqueued
A.addReview('derp', 'CHANGES_REQUESTED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A negative review from werp should not cause it to be enqueued
A.addReview('werp', 'CHANGES_REQUESTED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A positive from nobody should not cause it to be enqueued
A.addReview('nobody', 'APPROVED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A positive review from derp should still be blocked by the
# negative review from werp
A.addReview('derp', 'APPROVED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A positive review from werp should cause it to be enqueued
A.addReview('werp', 'APPROVED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, 'project4-reviewreq')
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_pipeline_require_review_user_state(self):
"Test pipeline requirement: review state from user"
A = self.fake_github.openFakePullRequest('org/project5', 'master', 'A')
# Add derp and herp to writers
A.writers.extend(('derp', 'herp'))
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent('test me')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# No positive review from derp so should not be enqueued
self.assertEqual(len(self.history), 0)
# A negative review from derp should not cause it to be enqueued
A.addReview('derp', 'CHANGES_REQUESTED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A positive from nobody should not cause it to be enqueued
A.addReview('nobody', 'APPROVED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A positive review from herp (a writer) should not cause it to be
# enqueued
A.addReview('herp', 'APPROVED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A positive review from derp should cause it to be enqueued
A.addReview('derp', 'APPROVED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, 'project5-reviewuserstate')
# TODO: Implement reject on approval username/state
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_pipeline_require_review_latest_user_state(self):
"Test pipeline requirement: review state from user"
A = self.fake_github.openFakePullRequest('org/project5', 'master', 'A')
# Add derp and herp to writers
A.writers.extend(('derp', 'herp'))
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent('test me')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# No positive review from derp so should not be enqueued
self.assertEqual(len(self.history), 0)
# The first negative review from derp should not cause it to be
# enqueued
A.addReview('derp', 'CHANGES_REQUESTED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A positive review from derp should cause it to be enqueued
A.addReview('derp', 'APPROVED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, 'project5-reviewuserstate')
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_pipeline_require_review_write_perms(self):
"Test pipeline requirement: review from user with write"
A = self.fake_github.openFakePullRequest('org/project4', 'master', 'A')
# Add herp to admins
A.admins.append('herp')
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent('test me')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# No positive review from derp so should not be enqueued
self.assertEqual(len(self.history), 0)
# The first review is from a reader, and thus should not be enqueued
A.addReview('derp', 'APPROVED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A positive review from herp should cause it to be enqueued
A.addReview('herp', 'APPROVED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, 'project4-reviewreq')
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_pipeline_require_review_comment_masked(self):
"Test pipeline requirement: review comments on top of votes"
A = self.fake_github.openFakePullRequest('org/project5', 'master', 'A')
# Add derp to writers
A.writers.append('derp')
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent('test me')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# No positive review from derp so should not be enqueued
self.assertEqual(len(self.history), 0)
# The first negative review from derp should not cause it to be
# enqueued
A.addReview('derp', 'CHANGES_REQUESTED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A positive review is required, so provide it
A.addReview('derp', 'APPROVED')
# Add a comment review on top to make sure we can still enqueue
A.addReview('derp', 'COMMENTED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, 'project5-reviewuserstate')
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_require_review_newer_than(self):
A = self.fake_github.openFakePullRequest('org/project6', 'master', 'A')
# Add derp and herp to writers
A.writers.extend(('derp', 'herp'))
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent('test me')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# No positive review from derp so should not be enqueued
self.assertEqual(len(self.history), 0)
# Add a too-old positive review, should not be enqueued
submitted_at = time.time() - 72 * 60 * 60
A.addReview('derp', 'APPROVED',
submitted_at)
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# Add a recent positive review
submitted_at = time.time() - 12 * 60 * 60
A.addReview('derp', 'APPROVED', submitted_at)
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, 'project6-newerthan')
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_require_review_older_than(self):
A = self.fake_github.openFakePullRequest('org/project7', 'master', 'A')
# Add derp and herp to writers
A.writers.extend(('derp', 'herp'))
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent('test me')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# No positive review from derp so should not be enqueued
self.assertEqual(len(self.history), 0)
# Add a too-new positive, should not be enqueued
submitted_at = time.time() - 12 * 60 * 60
A.addReview('derp', 'APPROVED',
submitted_at)
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# Add an old enough positive, should enqueue
submitted_at = time.time() - 72 * 60 * 60
A.addReview('herp', 'APPROVED', submitted_at)
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, 'project7-olderthan')
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_require_open(self):
A = self.fake_github.openFakePullRequest('org/project8', 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent('test me')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# PR is open, we should have enqueued
self.assertEqual(len(self.history), 1)
# close the PR and try again
A.state = 'closed'
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# PR is closed, should not trigger
self.assertEqual(len(self.history), 1)
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_reject_open(self):
A = self.fake_github.openFakePullRequest('org/project13', 'master',
'A')
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent('test me')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# PR is open, we should not have enqueued
self.assertEqual(len(self.history), 0)
# close the PR and try again
A.state = 'closed'
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# PR is closed, should trigger
self.assertEqual(len(self.history), 1)
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_require_current(self):
A = self.fake_github.openFakePullRequest('org/project9', 'master',
'A')
# A sync event that we will keep submitting to trigger
sync = A.getPullRequestSynchronizeEvent()
self.fake_github.emitEvent(sync)
self.waitUntilSettled()
# PR head is current should enqueue
self.assertEqual(len(self.history), 1)
# Add a commit to the PR, re-issue the original comment event
A.addCommit()
self.fake_github.emitEvent(sync)
self.waitUntilSettled()
# Event hash is not current, should not trigger
self.assertEqual(len(self.history), 1)
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_reject_current(self):
A = self.fake_github.openFakePullRequest('org/project14', 'master',
'A')
# A sync event that we will keep submitting to trigger
sync = A.getPullRequestSynchronizeEvent()
self.fake_github.emitEvent(sync)
self.waitUntilSettled()
# PR head is current, should not enqueue
self.assertEqual(len(self.history), 0)
# Add a commit to the PR, re-issue the original comment event
A.addCommit()
self.fake_github.emitEvent(sync)
self.waitUntilSettled()
# Event hash is not current, should trigger
self.assertEqual(len(self.history), 1)
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_require_draft(self):
A = self.fake_github.openFakePullRequest('org/project17', 'master',
'A', draft=True)
# A sync event that we will keep submitting to trigger
sync = A.getPullRequestSynchronizeEvent()
self.fake_github.emitEvent(sync)
self.waitUntilSettled()
# PR is a draft, should enqueue
self.assertEqual(len(self.history), 1)
# Make the PR not a draft
A.draft = False
self.fake_github.emitEvent(sync)
self.waitUntilSettled()
# PR is not a draft, should not enqueue
self.assertEqual(len(self.history), 1)
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_reject_draft(self):
A = self.fake_github.openFakePullRequest('org/project18', 'master',
'A', draft=True)
# A sync event that we will keep submitting to trigger
sync = A.getPullRequestSynchronizeEvent()
self.fake_github.emitEvent(sync)
self.waitUntilSettled()
# PR is a draft, should not enqueue
self.assertEqual(len(self.history), 0)
# Make the PR not a draft
A.draft = False
self.fake_github.emitEvent(sync)
self.waitUntilSettled()
# PR is not a draft, should enqueue
self.assertEqual(len(self.history), 1)
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_pipeline_require_label(self):
"Test pipeline requirement: label"
A = self.fake_github.openFakePullRequest('org/project10', 'master',
'A')
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent('test me')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# No label so should not be enqueued
self.assertEqual(len(self.history), 0)
# A derp label should not cause it to be enqueued
A.addLabel('derp')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# An approved label goes in
A.addLabel('approved')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, 'project10-label')
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_pipeline_reject_label(self):
"Test pipeline reject: label"
A = self.fake_github.openFakePullRequest('org/project11', 'master',
'A')
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent('test me')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# No label so should not be enqueued
self.assertEqual(len(self.history), 0)
# A do-not-merge label should not cause it to be enqueued
A.addLabel('do-not-merge')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# An approved label should still not enqueue due to d-n-m
A.addLabel('approved')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# Remove do-not-merge should enqueue
A.removeLabel('do-not-merge')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, 'project11-label')
@simple_layout('layouts/requirements-github.yaml', driver='github')
def test_pipeline_reject_status(self):
"Test pipeline reject: status"
project = 'org/project12'
A = self.fake_github.openFakePullRequest(project, 'master', 'A')
# Set rejected error status
self.fake_github.setCommitStatus(project, A.head_sha, 'error',
context='tenant-one/check')
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent('test me')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# Status should cause it to be rejected
self.assertEqual(len(self.history), 0)
# Test that also the regex matched pipeline doesn't trigger
self.fake_github.emitEvent(A.getCommentAddedEvent('test regex'))
self.waitUntilSettled()
# Status should cause it to be rejected
self.assertEqual(len(self.history), 0)
self.fake_github.setCommitStatus(project, A.head_sha, 'success',
context='tenant-one/check')
# Now that status is not error, it should be enqueued
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, 'project12-status')
# Test that also the regex matched pipeline triggers now
self.fake_github.emitEvent(A.getCommentAddedEvent('test regex'))
self.waitUntilSettled()
self.assertEqual(len(self.history), 2)
self.assertEqual(self.history[1].name, 'project12-status')
class TestGithubAppRequirements(ZuulGithubAppTestCase):
"""Test pipeline and trigger requirements with app authentication"""
config_file = 'zuul-github-driver.conf'
scheduler_count = 1
@simple_layout("layouts/requirements-github.yaml", driver="github")
def test_pipeline_require_check_run(self):
"Test pipeline requirement: status (reported via a check run)"
project = "org/project16"
github = self.fake_github.getGithubClient()
repo = github.repo_from_project(project)
A = self.fake_github.openFakePullRequest(project, "master", "A")
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent("trigger me")
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
# No status from zuul, so nothing should be enqueued
self.assertEqual(len(self.history), 0)
# An error check run should also not cause it to be enqueued
repo.create_check_run(
A.head_sha,
"tenant-one/check",
conclusion="failure",
app="check-run",
)
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A success check run goes in, ready to be enqueued
repo.create_check_run(
A.head_sha,
"tenant-one/check",
conclusion="success",
app="check-run",
)
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
class TestGithubTriggerRequirements(ZuulTestCase):
"""Test pipeline and trigger requirements"""
config_file = 'zuul-github-driver.conf'
scheduler_count = 1
@simple_layout('layouts/github-trigger-requirements.yaml', driver='github')
def test_require_status(self):
# Test trigger require-status
jobname = 'require-status'
project = 'org/project'
A = self.fake_github.openFakePullRequest(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent(f'test {jobname}')
# No status from zuul so should not be enqueued
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# An error status should not cause it to be enqueued
self.fake_github.setCommitStatus(project, A.head_sha, 'error',
context='tenant-one/check')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A success status goes in
self.fake_github.setCommitStatus(project, A.head_sha, 'success',
context='tenant-one/check')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
@simple_layout('layouts/github-trigger-requirements.yaml', driver='github')
def test_reject_status(self):
# Test trigger reject-status
jobname = 'reject-status'
project = 'org/project'
A = self.fake_github.openFakePullRequest(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent(f'test {jobname}')
# No status from zuul so should be enqueued
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
# A failure status should not cause it to be enqueued
self.fake_github.setCommitStatus(project, A.head_sha, 'failure',
context='tenant-one/check')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
# A success status goes in
self.fake_github.setCommitStatus(project, A.head_sha, 'success',
context='tenant-one/check')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 2)
self.assertEqual(self.history[1].name, jobname)
@simple_layout('layouts/github-trigger-requirements.yaml', driver='github')
def test_require_review(self):
# Test trigger require-review
jobname = 'require-review'
project = 'org/project'
A = self.fake_github.openFakePullRequest(project, 'master', 'A')
A.writers.extend(('maintainer',))
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent(f'test {jobname}')
# No review so should not be enqueued
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# An changes requested review should not cause it to be enqueued
A.addReview('maintainer', 'CHANGES_REQUESTED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A positive review goes in
A.addReview('maintainer', 'APPROVED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
@simple_layout('layouts/github-trigger-requirements.yaml', driver='github')
def test_reject_review(self):
# Test trigger reject-review
jobname = 'reject-review'
project = 'org/project'
A = self.fake_github.openFakePullRequest(project, 'master', 'A')
A.writers.extend(('maintainer',))
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent(f'test {jobname}')
# No review so should be enqueued
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
# An changes requested review should not cause it to be enqueued
A.addReview('maintainer', 'CHANGES_REQUESTED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
# A positive review goes in
A.addReview('maintainer', 'APPROVED')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 2)
self.assertEqual(self.history[1].name, jobname)
@simple_layout('layouts/github-trigger-requirements.yaml', driver='github')
def test_require_label(self):
# Test trigger require-label
jobname = 'require-label'
project = 'org/project'
A = self.fake_github.openFakePullRequest(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent(f'test {jobname}')
# No label so should not be enqueued
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# A random should not cause it to be enqueued
A.addLabel('foobar')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# An approved label goes in
A.addLabel('approved')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
@simple_layout('layouts/github-trigger-requirements.yaml', driver='github')
def test_reject_label(self):
# Test trigger reject-label
jobname = 'reject-label'
project = 'org/project'
A = self.fake_github.openFakePullRequest(project, 'master', 'A')
# A comment event that we will keep submitting to trigger
comment = A.getCommentAddedEvent(f'test {jobname}')
# No label so should be enqueued
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, jobname)
# A rejected label should not cause it to be enqueued
A.addLabel('rejected')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
# Any other label, it goes in
A.removeLabel('rejected')
A.addLabel('okay')
self.fake_github.emitEvent(comment)
self.waitUntilSettled()
self.assertEqual(len(self.history), 2)
self.assertEqual(self.history[1].name, jobname)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_github_requirements.py
|
test_github_requirements.py
|
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import os
import subprocess
import tempfile
from zuul.lib import encryption
from tests.base import BaseTestCase
class TestEncryption(BaseTestCase):
def setUp(self):
super(TestEncryption, self).setUp()
self.private, self.public = encryption.generate_rsa_keypair()
# Because we set delete to False when using NamedTemporaryFile below
# we need to stick our usage of temporary files in the NestedTempfile
# fixture ensuring everything gets cleaned up when it is done.
self.useFixture(fixtures.NestedTempfile())
def test_serialization(self):
"Verify key serialization"
pem_private = encryption.serialize_rsa_private_key(self.private)
private2, public2 = encryption.deserialize_rsa_keypair(pem_private)
# cryptography public / private key objects don't implement
# equality testing, so we make sure they have the same numbers.
self.assertEqual(self.private.private_numbers(),
private2.private_numbers())
self.assertEqual(self.public.public_numbers(),
public2.public_numbers())
def test_pkcs1_oaep(self):
"Verify encryption and decryption"
orig_plaintext = b"some text to encrypt"
ciphertext = encryption.encrypt_pkcs1_oaep(orig_plaintext, self.public)
plaintext = encryption.decrypt_pkcs1_oaep(ciphertext, self.private)
self.assertEqual(orig_plaintext, plaintext)
def test_openssl_pkcs1_oaep(self):
"Verify that we can decrypt something encrypted with OpenSSL"
orig_plaintext = b"some text to encrypt"
pem_public = encryption.serialize_rsa_public_key(self.public)
public_file = tempfile.NamedTemporaryFile(delete=False)
try:
public_file.write(pem_public)
public_file.close()
p = subprocess.Popen(['openssl', 'rsautl', '-encrypt',
'-oaep', '-pubin', '-inkey',
public_file.name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate(orig_plaintext)
ciphertext = stdout
finally:
os.unlink(public_file.name)
plaintext = encryption.decrypt_pkcs1_oaep(ciphertext, self.private)
self.assertEqual(orig_plaintext, plaintext)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_encryption.py
|
test_encryption.py
|
# Copyright 2021 BMW Group
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import string
import threading
import testtools
from zuul import model
from zuul.driver import Driver, TriggerInterface
from zuul.lib.connections import ConnectionRegistry
from zuul.zk import ZooKeeperClient, event_queues, sharding
from tests.base import BaseTestCase, iterate_timeout
class EventQueueBaseTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.setupZK()
self.zk_client = ZooKeeperClient(
self.zk_chroot_fixture.zk_hosts,
tls_cert=self.zk_chroot_fixture.zookeeper_cert,
tls_key=self.zk_chroot_fixture.zookeeper_key,
tls_ca=self.zk_chroot_fixture.zookeeper_ca
)
self.addCleanup(self.zk_client.disconnect)
self.zk_client.connect()
self.connections = ConnectionRegistry()
self.addCleanup(self.connections.stop)
class DummyEvent(model.AbstractEvent):
def toDict(self):
return {}
def updateFromDict(self):
pass
@classmethod
def fromDict(cls, d):
return cls()
class DummyEventQueue(event_queues.ZooKeeperEventQueue):
def put(self, event):
self._put(event.toDict())
def __iter__(self):
for data, ack_ref, _ in self._iterEvents():
event = DummyEvent.fromDict(data)
event.ack_ref = ack_ref
yield event
class TestEventQueue(EventQueueBaseTestCase):
def setUp(self):
super().setUp()
self.queue = DummyEventQueue(self.zk_client, "root")
def test_missing_ack_ref(self):
# Every event from a ZK event queue should have an ack_ref
# attached to it when it is deserialized; ensure that an error
# is raised if we try to ack an event without one.
with testtools.ExpectedException(RuntimeError):
self.queue.ack(DummyEvent())
def test_double_ack(self):
# Test that if we ack an event twice, an exception isn't
# raised.
self.queue.put(DummyEvent())
self.assertEqual(len(self.queue), 1)
event = next(iter(self.queue))
self.queue.ack(event)
self.assertEqual(len(self.queue), 0)
# Should not raise an exception
self.queue.ack(event)
def test_invalid_json_ignored(self):
# Test that invalid json is automatically removed.
event_path = self.queue._put({})
self.zk_client.client.set(event_path, b"{ invalid")
self.assertEqual(len(self.queue), 1)
self.assertEqual(list(self.queue._iterEvents()), [])
self.assertEqual(len(self.queue), 0)
class DummyTriggerEvent(model.TriggerEvent):
pass
class DummyDriver(Driver, TriggerInterface):
name = driver_name = "dummy"
def getTrigger(self, connection, config=None):
pass
def getTriggerSchema(self):
pass
def getTriggerEventClass(self):
return DummyTriggerEvent
class TestTriggerEventQueue(EventQueueBaseTestCase):
def setUp(self):
super().setUp()
self.driver = DummyDriver()
self.connections.registerDriver(self.driver)
def test_sharded_tenant_trigger_events(self):
# Test enqueue/dequeue of the tenant trigger event queue.
queue = event_queues.TenantTriggerEventQueue(
self.zk_client, self.connections, "tenant"
)
self.assertEqual(len(queue), 0)
self.assertFalse(queue.hasEvents())
event = DummyTriggerEvent()
data = {'test': ''.join(
random.choice(string.ascii_letters + string.digits)
for x in range(sharding.NODE_BYTE_SIZE_LIMIT * 2))}
event.data = data
queue.put(self.driver.driver_name, event)
queue.put(self.driver.driver_name, event)
self.assertEqual(len(queue), 2)
self.assertTrue(queue.hasEvents())
processed = 0
for event in queue:
self.assertIsInstance(event, DummyTriggerEvent)
processed += 1
self.assertEqual(processed, 2)
self.assertEqual(len(queue), 2)
self.assertTrue(queue.hasEvents())
acked = 0
for event in queue:
queue.ack(event)
self.assertEqual(event.data, data)
acked += 1
self.assertEqual(acked, 2)
self.assertEqual(len(queue), 0)
self.assertFalse(queue.hasEvents())
def test_lost_sharded_tenant_trigger_events(self):
# Test cleanup when we write out side-channel data but not an
# event.
queue = event_queues.TenantTriggerEventQueue(
self.zk_client, self.connections, "tenant"
)
self.assertEqual(len(queue), 0)
self.assertFalse(queue.hasEvents())
event = DummyTriggerEvent()
data = {'test': ''.join(
random.choice(string.ascii_letters + string.digits)
for x in range(sharding.NODE_BYTE_SIZE_LIMIT * 2))}
event.data = data
queue.put(self.driver.driver_name, event)
queue.put(self.driver.driver_name, event)
self.assertEqual(len(queue), 2)
self.assertTrue(queue.hasEvents())
# Delete the first event (but not its sharded data)
events = list(queue)
self.zk_client.client.delete(events[0].ack_ref.path)
# There should still be 2 data nodes
self.assertEqual(len(
self.zk_client.client.get_children(queue.data_root)), 2)
# Clean up lost data
queue.cleanup(age=0)
# There should only be one data node remaining
self.assertEqual(len(
self.zk_client.client.get_children(queue.data_root)), 1)
# Ack the second event
queue.ack(events[1])
# Assert there are no side channel data nodes
self.assertEqual(len(
self.zk_client.client.get_children(queue.data_root)), 0)
def test_tenant_trigger_events(self):
# Test enqueue/dequeue of the tenant trigger event queue.
queue = event_queues.TenantTriggerEventQueue(
self.zk_client, self.connections, "tenant"
)
self.assertEqual(len(queue), 0)
self.assertFalse(queue.hasEvents())
event = DummyTriggerEvent()
queue.put(self.driver.driver_name, event)
queue.put(self.driver.driver_name, event)
self.assertEqual(len(queue), 2)
self.assertTrue(queue.hasEvents())
processed = 0
for event in queue:
self.assertIsInstance(event, DummyTriggerEvent)
processed += 1
self.assertEqual(processed, 2)
self.assertEqual(len(queue), 2)
self.assertTrue(queue.hasEvents())
acked = 0
for event in queue:
queue.ack(event)
acked += 1
self.assertEqual(acked, 2)
self.assertEqual(len(queue), 0)
self.assertFalse(queue.hasEvents())
def test_pipeline_trigger_events(self):
# Test enqueue/dequeue of pipeline-specific trigger event
# queues.
registry = event_queues.PipelineTriggerEventQueue.createRegistry(
self.zk_client, self.connections
)
queue = registry["tenant"]["pipeline"]
self.assertIsInstance(queue, event_queues.TriggerEventQueue)
self.assertEqual(len(queue), 0)
self.assertFalse(queue.hasEvents())
event = DummyTriggerEvent()
queue.put(self.driver.driver_name, event)
self.assertEqual(len(queue), 1)
self.assertTrue(queue.hasEvents())
other_queue = registry["other_tenant"]["pipeline"]
self.assertEqual(len(other_queue), 0)
self.assertFalse(other_queue.hasEvents())
acked = 0
for event in queue:
self.assertIsInstance(event, DummyTriggerEvent)
queue.ack(event)
acked += 1
self.assertEqual(acked, 1)
self.assertEqual(len(queue), 0)
self.assertFalse(queue.hasEvents())
class TestManagementEventQueue(EventQueueBaseTestCase):
def test_management_events(self):
# Test enqueue/dequeue of the tenant management event queue.
queue = event_queues.TenantManagementEventQueue(
self.zk_client, "tenant")
self.assertEqual(len(queue), 0)
self.assertFalse(queue.hasEvents())
event = model.ReconfigureEvent()
result_future = queue.put(event, needs_result=False)
self.assertIsNone(result_future)
result_future = queue.put(event)
self.assertIsNotNone(result_future)
self.assertEqual(len(queue), 2)
self.assertTrue(queue.hasEvents())
self.assertFalse(result_future.wait(0.1))
acked = 0
for event in queue:
self.assertIsInstance(event, model.ReconfigureEvent)
queue.ack(event)
acked += 1
self.assertEqual(acked, 2)
self.assertTrue(result_future.wait(5))
self.assertEqual(len(queue), 0)
self.assertFalse(queue.hasEvents())
def test_management_event_error(self):
# Test that management event errors are reported.
queue = event_queues.TenantManagementEventQueue(
self.zk_client, "tenant")
event = model.ReconfigureEvent()
result_future = queue.put(event)
acked = 0
for event in queue:
event.traceback = "hello traceback"
queue.ack(event)
acked += 1
self.assertEqual(acked, 1)
with testtools.ExpectedException(RuntimeError, msg="hello traceback"):
self.assertFalse(result_future.wait(5))
def test_event_merge(self):
# Test that similar management events (eg, reconfiguration of
# two projects) can be merged.
queue = event_queues.TenantManagementEventQueue(
self.zk_client, "tenant")
event = model.TenantReconfigureEvent("tenant", "project", "master")
queue.put(event, needs_result=False)
event = model.TenantReconfigureEvent("tenant", "other", "branch")
queue.put(event, needs_result=False)
self.assertEqual(len(queue), 2)
events = list(queue)
self.assertEqual(len(events), 1)
event = events[0]
self.assertEqual(len(event.merged_events), 1)
self.assertEqual(
event.project_branches,
set([("project", "master"), ("other", "branch")])
)
queue.ack(event)
self.assertFalse(queue.hasEvents())
def test_event_ltime(self):
tenant_queue = event_queues.TenantManagementEventQueue(
self.zk_client, "tenant")
registry = event_queues.PipelineManagementEventQueue.createRegistry(
self.zk_client
)
event = model.ReconfigureEvent()
tenant_queue.put(event, needs_result=False)
self.assertTrue(tenant_queue.hasEvents())
pipeline_queue = registry["tenant"]["pipeline"]
self.assertIsInstance(
pipeline_queue, event_queues.ManagementEventQueue
)
processed_events = 0
for event in tenant_queue:
processed_events += 1
event_ltime = event.zuul_event_ltime
self.assertGreater(event_ltime, -1)
# Forward event to pipeline management event queue
pipeline_queue.put(event)
self.assertEqual(processed_events, 1)
self.assertTrue(pipeline_queue.hasEvents())
processed_events = 0
for event in pipeline_queue:
pipeline_queue.ack(event)
processed_events += 1
self.assertEqual(event.zuul_event_ltime, event_ltime)
self.assertEqual(processed_events, 1)
def test_pipeline_management_events(self):
# Test that when a management event is forwarded from the
# tenant to the a pipeline-specific queue, it is not
# prematurely acked and the future returns correctly.
tenant_queue = event_queues.TenantManagementEventQueue(
self.zk_client, "tenant")
registry = event_queues.PipelineManagementEventQueue.createRegistry(
self.zk_client
)
event = model.PromoteEvent('tenant', 'check', ['1234,1'])
result_future = tenant_queue.put(event, needs_result=False)
self.assertIsNone(result_future)
result_future = tenant_queue.put(event)
self.assertIsNotNone(result_future)
self.assertEqual(len(tenant_queue), 2)
self.assertTrue(tenant_queue.hasEvents())
pipeline_queue = registry["tenant"]["pipeline"]
self.assertIsInstance(
pipeline_queue, event_queues.ManagementEventQueue
)
acked = 0
for event in tenant_queue:
self.assertIsInstance(event, model.PromoteEvent)
# Forward event to pipeline management event queue
pipeline_queue.put(event)
tenant_queue.ackWithoutResult(event)
acked += 1
self.assertEqual(acked, 2)
# Event was just forwarded and since we expect a result, the
# future should not be completed yet.
self.assertFalse(result_future.wait(0.1))
self.assertEqual(len(tenant_queue), 0)
self.assertFalse(tenant_queue.hasEvents())
self.assertEqual(len(pipeline_queue), 2)
self.assertTrue(pipeline_queue.hasEvents())
acked = 0
for event in pipeline_queue:
self.assertIsInstance(event, model.PromoteEvent)
pipeline_queue.ack(event)
acked += 1
self.assertEqual(acked, 2)
self.assertTrue(result_future.wait(5))
self.assertEqual(len(pipeline_queue), 0)
self.assertFalse(pipeline_queue.hasEvents())
def test_management_events_client(self):
# Test management events from a second client
queue = event_queues.TenantManagementEventQueue(
self.zk_client, "tenant")
self.assertEqual(len(queue), 0)
self.assertFalse(queue.hasEvents())
# This client will submit a reconfigure event and wait for it.
external_client = ZooKeeperClient(
self.zk_chroot_fixture.zk_hosts,
tls_cert=self.zk_chroot_fixture.zookeeper_cert,
tls_key=self.zk_chroot_fixture.zookeeper_key,
tls_ca=self.zk_chroot_fixture.zookeeper_ca)
self.addCleanup(external_client.disconnect)
external_client.connect()
external_queue = event_queues.TenantManagementEventQueue(
external_client, "tenant")
event = model.ReconfigureEvent()
result_future = external_queue.put(event)
self.assertIsNotNone(result_future)
self.assertEqual(len(queue), 1)
self.assertTrue(queue.hasEvents())
self.assertFalse(result_future.wait(0.1))
acked = 0
for event in queue:
self.assertIsInstance(event, model.ReconfigureEvent)
queue.ack(event)
acked += 1
self.assertEqual(acked, 1)
self.assertTrue(result_future.wait(5))
self.assertEqual(len(queue), 0)
self.assertFalse(queue.hasEvents())
def test_management_events_client_disconnect(self):
# Test management events from a second client which
# disconnects before the event is complete.
queue = event_queues.TenantManagementEventQueue(
self.zk_client, "tenant")
self.assertEqual(len(queue), 0)
self.assertFalse(queue.hasEvents())
# This client will submit a reconfigure event and disconnect
# before it's complete.
external_client = ZooKeeperClient(
self.zk_chroot_fixture.zk_hosts,
tls_cert=self.zk_chroot_fixture.zookeeper_cert,
tls_key=self.zk_chroot_fixture.zookeeper_key,
tls_ca=self.zk_chroot_fixture.zookeeper_ca)
self.addCleanup(external_client.disconnect)
external_client.connect()
external_queue = event_queues.TenantManagementEventQueue(
external_client, "tenant")
# Submit the event
event = model.ReconfigureEvent()
result_future = external_queue.put(event)
self.assertIsNotNone(result_future)
# Make sure the event is in the queue and the result node exists
self.assertEqual(len(queue), 1)
self.assertTrue(queue.hasEvents())
self.assertFalse(result_future.wait(0.1))
self.assertEqual(len(
self.zk_client.client.get_children('/zuul/results/management')), 1)
# Disconnect the originating client
external_client.disconnect()
# Ensure the result node is gone
self.assertEqual(len(
self.zk_client.client.get_children('/zuul/results/management')), 0)
# Process the event
acked = 0
for event in queue:
self.assertIsInstance(event, model.ReconfigureEvent)
queue.ack(event)
acked += 1
# Make sure the event has been processed and we didn't
# re-create the result node.
self.assertEqual(acked, 1)
self.assertEqual(len(queue), 0)
self.assertFalse(queue.hasEvents())
self.assertEqual(len(
self.zk_client.client.get_children('/zuul/results/management')), 0)
class TestResultEventQueue(EventQueueBaseTestCase):
def test_pipeline_result_events(self):
# Test enqueue/dequeue of result events.
registry = event_queues.PipelineResultEventQueue.createRegistry(
self.zk_client
)
queue = registry["tenant"]["pipeline"]
self.assertIsInstance(queue, event_queues.PipelineResultEventQueue)
self.assertEqual(len(queue), 0)
self.assertFalse(queue.hasEvents())
event = model.BuildStartedEvent(
"build", "buildset", "job", "build_request_path", {})
queue.put(event)
self.assertEqual(len(queue), 1)
self.assertTrue(queue.hasEvents())
other_queue = registry["other_tenant"]["pipeline"]
self.assertEqual(len(other_queue), 0)
self.assertFalse(other_queue.hasEvents())
acked = 0
for event in queue:
self.assertIsInstance(event, model.BuildStartedEvent)
queue.ack(event)
acked += 1
self.assertEqual(acked, 1)
self.assertEqual(len(queue), 0)
self.assertFalse(queue.hasEvents())
class TestEventWatchers(EventQueueBaseTestCase):
def setUp(self):
super().setUp()
self.driver = DummyDriver()
self.connections.registerDriver(self.driver)
def _wait_for_event(self, event):
for _ in iterate_timeout(5, "event set"):
if event.is_set():
break
def test_tenant_event_watcher(self):
event = threading.Event()
event_queues.EventWatcher(self.zk_client, event.set)
management_queue = (
event_queues.TenantManagementEventQueue.createRegistry(
self.zk_client)
)
trigger_queue = event_queues.TenantTriggerEventQueue.createRegistry(
self.zk_client, self.connections
)
self.assertFalse(event.is_set())
management_queue["tenant"].put(model.ReconfigureEvent(),
needs_result=False)
self._wait_for_event(event)
event.clear()
trigger_queue["tenant"].put(self.driver.driver_name,
DummyTriggerEvent())
self._wait_for_event(event)
def test_pipeline_event_watcher(self):
event = threading.Event()
event_queues.EventWatcher(self.zk_client, event.set)
management_queues = (
event_queues.PipelineManagementEventQueue.createRegistry(
self.zk_client
)
)
trigger_queues = event_queues.PipelineTriggerEventQueue.createRegistry(
self.zk_client, self.connections
)
result_queues = event_queues.PipelineResultEventQueue.createRegistry(
self.zk_client
)
self.assertFalse(event.is_set())
management_queues["tenant"]["check"].put(model.ReconfigureEvent())
self._wait_for_event(event)
event.clear()
trigger_queues["tenant"]["gate"].put(self.driver.driver_name,
DummyTriggerEvent())
self._wait_for_event(event)
event.clear()
result_event = model.BuildStartedEvent(
"build", "buildset", "job", "build_request_path", {})
result_queues["other-tenant"]["post"].put(result_event)
self._wait_for_event(event)
def test_pipeline_event_watcher_recreate(self):
event = threading.Event()
watcher = event_queues.EventWatcher(self.zk_client, event.set)
management_queues = (
event_queues.PipelineManagementEventQueue.createRegistry(
self.zk_client
)
)
self.assertFalse(event.is_set())
management_queues["tenant"]["check"].put(model.ReconfigureEvent())
self._wait_for_event(event)
# Wait for the watch to be fully established to avoid race
# conditions, since the event watcher will also ensure that the
# trigger and result event paths exist.
for _ in iterate_timeout(5, "all watches to be established"):
if watcher.watched_pipelines:
break
self.zk_client.client.delete(
event_queues.PIPELINE_NAME_ROOT.format(
tenant="tenant", pipeline="check"), recursive=True)
event.clear()
management_queues["tenant"]["check"].initialize()
management_queues["tenant"]["check"].put(model.ReconfigureEvent())
self._wait_for_event(event)
class TestConnectionEventQueue(EventQueueBaseTestCase):
def test_connection_events(self):
# Test enqueue/dequeue of the connection event queue.
queue = event_queues.ConnectionEventQueue(self.zk_client, "dummy")
self.assertEqual(len(queue), 0)
self.assertFalse(queue.hasEvents())
payload = {"message": "hello world!"}
queue.put(payload)
queue.put(payload)
self.assertEqual(len(queue), 2)
self.assertTrue(queue.hasEvents())
acked = 0
for event in queue:
self.assertIsInstance(event, model.ConnectionEvent)
self.assertEqual(event, payload)
queue.ack(event)
acked += 1
self.assertEqual(acked, 2)
self.assertEqual(len(queue), 0)
self.assertFalse(queue.hasEvents())
def test_event_watch(self):
# Test the registered function is called on new events.
queue = event_queues.ConnectionEventQueue(self.zk_client, "dummy")
event = threading.Event()
queue.registerEventWatch(event.set)
self.assertFalse(event.is_set())
queue.put({"message": "hello world!"})
for _ in iterate_timeout(5, "event set"):
if event.is_set():
break
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_event_queues.py
|
test_event_queues.py
|
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
import yaml
from tests.base import ZuulTestCase, simple_layout
class TestGitDriver(ZuulTestCase):
config_file = 'zuul-git-driver.conf'
tenant_config_file = 'config/git-driver/main.yaml'
def setUp(self):
super(TestGitDriver, self).setUp()
self.git_connection = self.scheds.first.sched.connections\
.getSource('git').connection
def setup_config(self, config_file: str):
config = super(TestGitDriver, self).setup_config(config_file)
config.set('connection git', 'baseurl', self.upstream_root)
return config
def test_basic(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
# Check that we have the git source for common-config and the
# gerrit source for the project.
self.assertEqual('git', tenant.config_projects[0].source.name)
self.assertEqual('common-config', tenant.config_projects[0].name)
self.assertEqual('gerrit', tenant.untrusted_projects[0].source.name)
self.assertEqual('org/project', tenant.untrusted_projects[0].name)
# The configuration for this test is accessed via the git
# driver (in common-config), rather than the gerrit driver, so
# if the job runs, it worked.
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(A.reported, 1)
def test_config_refreshed(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual(A.reported, 1)
self.assertEqual(self.history[0].name, 'project-test1')
# Update zuul.yaml to force a tenant reconfiguration
path = os.path.join(self.upstream_root, 'common-config', 'zuul.yaml')
with open(path, 'r') as f:
config = yaml.safe_load(f)
change = {
'name': 'org/project',
'check': {
'jobs': [
'project-test2'
]
}
}
config[4]['project'] = change
files = {'zuul.yaml': yaml.dump(config)}
self.addCommitToRepo(
'common-config', 'Change zuul.yaml configuration', files)
# Wait for the tenant reconfiguration to happen
count = self.waitForEvent()
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.history), 2)
self.assertEqual(A.reported, 1)
# We make sure the new job has run
self.assertEqual(self.history[1].name, 'project-test2')
# Let's stop the git Watcher to let us merge some changes commits
# We want to verify that config changes are detected for commits
# on the range oldrev..newrev
self.scheds.first.sched.connections.getSource('git').connection\
.watcher_thread._pause = True
# Add a config change
change = {
'name': 'org/project',
'check': {
'jobs': [
'project-test1'
]
}
}
config[4]['project'] = change
files = {'zuul.yaml': yaml.dump(config)}
self.addCommitToRepo(
'common-config', 'Change zuul.yaml configuration', files)
# Add two other changes
self.addCommitToRepo(
'common-config', 'Adding f1',
{'f1': "Content"})
self.addCommitToRepo(
'common-config', 'Adding f2',
{'f2': "Content"})
# Restart the git watcher
self.scheds.first.sched.connections.getSource('git').connection\
.watcher_thread._pause = False
# Wait for the tenant reconfiguration to happen
self.waitForEvent(count)
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.history), 3)
self.assertEqual(A.reported, 1)
# We make sure the new job has run
self.assertEqual(self.history[2].name, 'project-test1')
def ensure_watcher_has_context(self):
# Make sure watcher have read initial refs shas
delay = 0.1
max_delay = 1
while not self.git_connection.watcher_thread.projects_refs:
time.sleep(delay)
max_delay -= delay
if max_delay <= 0:
raise Exception("Timeout waiting for initial read")
return self.git_connection.watcher_thread._event_count
def waitForEvent(self, initial_count=0):
delay = 0.1
max_delay = 5
while self.git_connection.watcher_thread._event_count <= initial_count:
time.sleep(delay)
max_delay -= delay
if max_delay <= 0:
raise Exception("Timeout waiting for event")
return self.git_connection.watcher_thread._event_count
@simple_layout('layouts/basic-git.yaml', driver='git')
def test_ref_updated_event(self):
count = self.ensure_watcher_has_context()
# Add a commit to trigger a ref-updated event
self.addCommitToRepo(
'org/project', 'A change for ref-updated', {'f1': 'Content'})
# Wait for the git watcher to detect the ref-update event
self.waitForEvent(count)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual('SUCCESS',
self.getJobFromHistory('post-job').result)
@simple_layout('layouts/basic-git.yaml', driver='git')
def test_ref_created(self):
count = self.ensure_watcher_has_context()
# Tag HEAD to trigger a ref-updated event
self.addTagToRepo(
'org/project', 'atag', 'HEAD')
# Wait for the git watcher to detect the ref-update event
self.waitForEvent(count)
self.waitUntilSettled()
self.assertEqual(len(self.history), 1)
self.assertEqual('SUCCESS',
self.getJobFromHistory('tag-job').result)
@simple_layout('layouts/basic-git.yaml', driver='git')
def test_ref_deleted(self):
count = self.ensure_watcher_has_context()
# Delete default tag init to trigger a ref-updated event
self.delTagFromRepo(
'org/project', 'init')
# Wait for the git watcher to detect the ref-update event
self.waitForEvent(count)
self.waitUntilSettled()
# Make sure no job as run as ignore-delete is True by default
self.assertEqual(len(self.history), 0)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_git_driver.py
|
test_git_driver.py
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tests.base import (
ZuulTestCase,
simple_layout,
skipIfMultiScheduler,
)
class TestGerritLegacyCRD(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def test_crd_gate(self):
"Test cross-repo dependencies"
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
AM2 = self.fake_gerrit.addFakeChange('org/project1', 'master', 'AM2')
AM1 = self.fake_gerrit.addFakeChange('org/project1', 'master', 'AM1')
AM2.setMerged()
AM1.setMerged()
BM2 = self.fake_gerrit.addFakeChange('org/project2', 'master', 'BM2')
BM1 = self.fake_gerrit.addFakeChange('org/project2', 'master', 'BM1')
BM2.setMerged()
BM1.setMerged()
# A -> AM1 -> AM2
# B -> BM1 -> BM2
# A Depends-On: B
# M2 is here to make sure it is never queried. If it is, it
# means zuul is walking down the entire history of merged
# changes.
B.setDependsOn(BM1, 1)
BM1.setDependsOn(BM2, 1)
A.setDependsOn(AM1, 1)
AM1.setDependsOn(AM2, 1)
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['id'])
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
self.executor_server.hold_jobs_in_build = True
B.addApproval('Approved', 1)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(AM2.queried, 0)
self.assertEqual(BM2.queried, 0)
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
changes = self.getJobFromHistory(
'project-merge', 'org/project1').changes
self.assertEqual(changes, '2,1 1,1')
def test_crd_branch(self):
"Test cross-repo dependencies in multiple branches"
self.create_branch('org/project2', 'mp')
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
C1 = self.fake_gerrit.addFakeChange('org/project2', 'mp', 'C1')
C2 = self.fake_gerrit.addFakeChange('org/project2', 'mp', 'C2',
status='ABANDONED')
C1.data['id'] = B.data['id']
C2.data['id'] = B.data['id']
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C1.addApproval('Code-Review', 2)
# A Depends-On: B+C1
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['id'])
self.executor_server.hold_jobs_in_build = True
B.addApproval('Approved', 1)
C1.addApproval('Approved', 1)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(C1.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C1.reported, 2)
changes = self.getJobFromHistory(
'project-merge', 'org/project1').changes
self.assertEqual(changes, '2,1 3,1 1,1')
def test_crd_multiline(self):
"Test multiple depends-on lines in commit"
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
# A Depends-On: B+C
A.data['commitMessage'] = '%s\n\nDepends-On: %s\nDepends-On: %s\n' % (
A.subject, B.data['id'], C.data['id'])
self.executor_server.hold_jobs_in_build = True
B.addApproval('Approved', 1)
C.addApproval('Approved', 1)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
changes = self.getJobFromHistory(
'project-merge', 'org/project1').changes
self.assertEqual(changes, '2,1 3,1 1,1')
def test_crd_unshared_gate(self):
"Test cross-repo dependencies in unshared gate queues"
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['id'])
# A and B do not share a queue, make sure that A is unable to
# enqueue B (and therefore, A is unable to be enqueued).
B.addApproval('Approved', 1)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 0)
self.assertEqual(len(self.history), 0)
# Enqueue and merge B alone.
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 2)
# Now that B is merged, A should be able to be enqueued and
# merged.
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 3)
def test_crd_gate_reverse(self):
"Test reverse cross-repo dependencies"
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['id'])
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.executor_server.hold_jobs_in_build = True
A.addApproval('Approved', 1)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
changes = self.getJobFromHistory(
'project-merge', 'org/project1').changes
self.assertEqual(changes, '2,1 1,1')
def test_crd_cycle(self):
"Test cross-repo dependency cycles"
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
B.addApproval('Approved', 1)
# A -> B -> A (via commit-depends)
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['id'])
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['id'])
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 0)
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
def test_crd_gate_unknown(self):
"Test unknown projects in dependent pipeline"
self.init_repo("org/unknown", tag='init')
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/unknown', 'master', 'B')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['id'])
B.addApproval('Approved', 1)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
# Unknown projects cannot share a queue with any other
# since they don't have common jobs with any other (they have no jobs).
# Changes which depend on unknown project changes
# should not be processed in dependent pipeline
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 0)
self.assertEqual(len(self.history), 0)
# Simulate change B being gated outside this layout Set the
# change merged before submitting the event so that when the
# event triggers a gerrit query to update the change, we get
# the information that it was merged.
B.setMerged()
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# Now that B is merged, A should be able to be enqueued and
# merged.
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 3)
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 0)
def test_crd_check(self):
"Test cross-repo dependencies in independent pipelines"
self.executor_server.hold_jobs_in_build = True
self.hold_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['id'])
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertTrue(self.builds[0].hasChanges(A, B))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 0)
self.assertEqual(self.history[0].changes, '2,1 1,1')
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
def test_crd_check_git_depends(self):
"Test single-repo dependencies in independent pipelines"
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
# Add two git-dependent changes and make sure they both report
# success.
B.setDependsOn(A, 1)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 1)
self.assertEqual(self.history[0].changes, '1,1')
self.assertEqual(self.history[-1].changes, '1,1 2,1')
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
self.assertIn('Build succeeded', A.messages[0])
self.assertIn('Build succeeded', B.messages[0])
def test_crd_check_duplicate(self):
"Test duplicate check in independent pipelines"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
# Add two git-dependent changes...
B.setDependsOn(A, 1)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(check_pipeline.getAllItems()), 2)
# ...make sure the live one is not duplicated...
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(check_pipeline.getAllItems()), 2)
# ...but the non-live one is able to be.
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(check_pipeline.getAllItems()), 3)
# Release jobs in order to avoid races with change A jobs
# finishing before change B jobs.
self.orderedRelease()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 1)
self.assertEqual(self.history[0].changes, '1,1 2,1')
self.assertEqual(self.history[1].changes, '1,1')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
self.assertIn('Build succeeded', A.messages[0])
self.assertIn('Build succeeded', B.messages[0])
def _test_crd_check_reconfiguration(self, project1, project2):
"Test cross-repo dependencies re-enqueued in independent pipelines"
self.hold_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange(project1, 'master', 'A')
B = self.fake_gerrit.addFakeChange(project2, 'master', 'B')
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['id'])
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# Make sure the items still share a change queue, and the
# first one is not live.
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 1)
queue = tenant.layout.pipelines['check'].queues[0]
first_item = queue.queue[0]
for item in queue.queue:
self.assertEqual(item.queue, first_item.queue)
self.assertFalse(first_item.live)
self.assertTrue(queue.queue[1].live)
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 0)
self.assertEqual(self.history[0].changes, '2,1 1,1')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
@skipIfMultiScheduler()
def test_crd_check_reconfiguration(self):
self._test_crd_check_reconfiguration('org/project1', 'org/project2')
@skipIfMultiScheduler()
def test_crd_undefined_project(self):
"""Test that undefined projects in dependencies are handled for
independent pipelines"""
# It's a hack for fake gerrit,
# as it implies repo creation upon the creation of any change
self.init_repo("org/unknown", tag='init')
self._test_crd_check_reconfiguration('org/project1', 'org/unknown')
@simple_layout('layouts/ignore-dependencies.yaml')
def test_crd_check_ignore_dependencies(self):
"Test cross-repo dependencies can be ignored"
self.hold_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['id'])
# C git-depends on B
C.setDependsOn(B, 1)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Make sure none of the items share a change queue, and all
# are live.
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
self.assertEqual(len(check_pipeline.queues), 3)
self.assertEqual(len(check_pipeline.getAllItems()), 3)
for item in check_pipeline.getAllItems():
self.assertTrue(item.live)
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(C.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 1)
self.assertEqual(C.reported, 1)
# Each job should have tested exactly one change
for job in self.history:
self.assertEqual(len(job.changes.split()), 1)
@simple_layout('layouts/three-projects.yaml')
def test_crd_check_transitive(self):
"Test transitive cross-repo dependencies"
# Specifically, if A -> B -> C, and C gets a new patchset and
# A gets a new patchset, ensure the test of A,2 includes B,1
# and C,2 (not C,1 which would indicate stale data in the
# cache for B).
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project3', 'master', 'C')
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['id'])
# B Depends-On: C
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, C.data['id'])
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '3,1 2,1 1,1')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '3,1 2,1')
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '3,1')
C.addPatchset()
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '3,2')
A.addPatchset()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '3,2 2,1 1,2')
def test_crd_check_unknown(self):
"Test unknown projects in independent pipeline"
self.init_repo("org/unknown", tag='init')
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/unknown', 'master', 'D')
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['id'])
# Make sure zuul has seen an event on B.
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(B.reported, 0)
def test_crd_cycle_join(self):
"Test an updated change creates a cycle"
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
# Create B->A
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['id'])
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Dep is there so zuul should have reported on B
self.assertEqual(B.reported, 1)
# Update A to add A->B (a cycle).
A.addPatchset()
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['id'])
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
# Dependency cycle injected so zuul should have reported again on A
self.assertEqual(A.reported, 2)
# Now if we update B to remove the depends-on, everything
# should be okay. B; A->B
B.addPatchset()
B.data['commitMessage'] = '%s\n' % (B.subject,)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
# Cycle was removed so now zuul should have reported again on A
self.assertEqual(A.reported, 3)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.assertEqual(B.reported, 2)
class TestGerritLegacyCRDWeb(TestGerritLegacyCRD):
config_file = 'zuul-gerrit-web.conf'
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_gerrit_legacy_crd.py
|
test_gerrit_legacy_crd.py
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Wikimedia Foundation Inc.
# Copyright 2021 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import configparser
import multiprocessing
import os
import re
import time
from unittest import mock
from tests.base import (
BaseTestCase,
ZuulTestCase,
AnsibleZuulTestCase,
FIXTURE_DIR,
simple_layout,
iterate_timeout
)
from zuul.executor.sensors.startingbuilds import StartingBuildsSensor
from zuul.executor.sensors.ram import RAMSensor
from zuul.executor.server import squash_variables
from zuul.model import NodeSet, Group
class TestExecutorRepos(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
log = logging.getLogger("zuul.test.executor")
def assertRepoState(self, repo, state, project, build, number):
if 'branch' in state:
self.assertFalse(repo.head.is_detached,
'Project %s commit for build %s #%s should '
'not have a detached HEAD' % (
project, build, number))
self.assertEqual(repo.active_branch.name,
state['branch'],
'Project %s commit for build %s #%s should '
'be on the correct branch' % (
project, build, number))
# Remote 'origin' needs to be kept intact with a bogus URL
self.assertEqual(repo.remotes.origin.url, 'file:///dev/null')
self.assertIn(state['branch'], repo.remotes.origin.refs)
if 'commit' in state:
self.assertEqual(state['commit'],
str(repo.commit('HEAD')),
'Project %s commit for build %s #%s should '
'be correct' % (
project, build, number))
ref = repo.commit('HEAD')
repo_messages = set(
[c.message.strip() for c in repo.iter_commits(ref)])
if 'present' in state:
for change in state['present']:
msg = '%s-1' % change.subject
self.assertTrue(msg in repo_messages,
'Project %s for build %s #%s should '
'have change %s' % (
project, build, number, change.subject))
if 'absent' in state:
for change in state['absent']:
msg = '%s-1' % change.subject
self.assertTrue(msg not in repo_messages,
'Project %s for build %s #%s should '
'not have change %s' % (
project, build, number, change.subject))
def assertBuildStates(self, states, projects):
for number, build in enumerate(self.builds):
work = build.getWorkspaceRepos(projects)
state = states[number]
for project in projects:
self.assertRepoState(work[project], state[project],
project, build, number)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
@simple_layout('layouts/repo-checkout-two-project.yaml')
def test_one_branch(self):
self.executor_server.hold_jobs_in_build = True
p1 = 'review.example.com/org/project1'
p2 = 'review.example.com/org/project2'
projects = [p1, p2]
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(2, len(self.builds), "Two builds are running")
upstream = self.getUpstreamRepos(projects)
states = [
{p1: dict(present=[A], absent=[B], branch='master'),
p2: dict(commit=str(upstream[p2].commit('master')),
branch='master'),
},
{p1: dict(present=[A], absent=[B], branch='master'),
p2: dict(present=[B], absent=[A], branch='master'),
},
]
self.assertBuildStates(states, projects)
@simple_layout('layouts/repo-checkout-four-project.yaml')
def test_multi_branch(self):
self.executor_server.hold_jobs_in_build = True
p1 = 'review.example.com/org/project1'
p2 = 'review.example.com/org/project2'
p3 = 'review.example.com/org/project3'
p4 = 'review.example.com/org/project4'
projects = [p1, p2, p3, p4]
self.create_branch('org/project2', 'stable/havana')
self.create_branch('org/project4', 'stable/havana')
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'stable/havana',
'B')
C = self.fake_gerrit.addFakeChange('org/project3', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(3, len(self.builds), "Three builds are running")
upstream = self.getUpstreamRepos(projects)
states = [
{p1: dict(present=[A], absent=[B, C], branch='master'),
p2: dict(commit=str(upstream[p2].commit('master')),
branch='master'),
p3: dict(commit=str(upstream[p3].commit('master')),
branch='master'),
p4: dict(commit=str(upstream[p4].commit('master')),
branch='master'),
},
{p1: dict(present=[A], absent=[B, C], branch='master'),
p2: dict(present=[B], absent=[A, C], branch='stable/havana'),
p3: dict(commit=str(upstream[p3].commit('master')),
branch='master'),
p4: dict(commit=str(upstream[p4].commit('stable/havana')),
branch='stable/havana'),
},
{p1: dict(present=[A], absent=[B, C], branch='master'),
p2: dict(commit=str(upstream[p2].commit('master')),
branch='master'),
p3: dict(present=[C], absent=[A, B], branch='master'),
p4: dict(commit=str(upstream[p4].commit('master')),
branch='master'),
},
]
self.assertBuildStates(states, projects)
@simple_layout('layouts/repo-checkout-six-project.yaml')
def test_project_override(self):
self.executor_server.hold_jobs_in_build = True
p1 = 'review.example.com/org/project1'
p2 = 'review.example.com/org/project2'
p3 = 'review.example.com/org/project3'
p4 = 'review.example.com/org/project4'
p5 = 'review.example.com/org/project5'
p6 = 'review.example.com/org/project6'
projects = [p1, p2, p3, p4, p5, p6]
self.create_branch('org/project3', 'stable/havana')
self.create_branch('org/project4', 'stable/havana')
self.create_branch('org/project6', 'stable/havana')
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
D = self.fake_gerrit.addFakeChange('org/project3', 'stable/havana',
'D')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
D.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(D.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(4, len(self.builds), "Four builds are running")
upstream = self.getUpstreamRepos(projects)
states = [
{p1: dict(present=[A], absent=[B, C, D], branch='master'),
p2: dict(commit=str(upstream[p2].commit('master')),
branch='master'),
p3: dict(commit=str(upstream[p3].commit('master')),
branch='master'),
p4: dict(commit=str(upstream[p4].commit('master')),
branch='master'),
p5: dict(commit=str(upstream[p5].commit('master')),
branch='master'),
p6: dict(commit=str(upstream[p6].commit('master')),
branch='master'),
},
{p1: dict(present=[A, B], absent=[C, D], branch='master'),
p2: dict(commit=str(upstream[p2].commit('master')),
branch='master'),
p3: dict(commit=str(upstream[p3].commit('master')),
branch='master'),
p4: dict(commit=str(upstream[p4].commit('master')),
branch='master'),
p5: dict(commit=str(upstream[p5].commit('master')),
branch='master'),
p6: dict(commit=str(upstream[p6].commit('master')),
branch='master'),
},
{p1: dict(present=[A, B], absent=[C, D], branch='master'),
p2: dict(present=[C], absent=[A, B, D], branch='master'),
p3: dict(commit=str(upstream[p3].commit('master')),
branch='master'),
p4: dict(commit=str(upstream[p4].commit('master')),
branch='master'),
p5: dict(commit=str(upstream[p5].commit('master')),
branch='master'),
p6: dict(commit=str(upstream[p6].commit('master')),
branch='master'),
},
{p1: dict(present=[A, B], absent=[C, D], branch='master'),
p2: dict(present=[C], absent=[A, B, D], branch='master'),
p3: dict(present=[D], absent=[A, B, C],
branch='stable/havana'),
p4: dict(commit=str(upstream[p4].commit('master')),
branch='master'),
p5: dict(commit=str(upstream[p5].commit('master')),
branch='master'),
p6: dict(commit=str(upstream[p6].commit('stable/havana')),
branch='stable/havana'),
},
]
self.assertBuildStates(states, projects)
def test_periodic_override(self):
# This test can not use simple_layout because it must start
# with a configuration which does not include a
# timer-triggered job so that we have an opportunity to set
# the hold flag before the first job.
# This tests that we can override the branch in a timer
# trigger (mostly to ensure backwards compatability for jobs).
self.executor_server.hold_jobs_in_build = True
p1 = 'review.example.com/org/project1'
projects = [p1]
self.create_branch('org/project1', 'stable/havana')
# Start timer trigger - also org/project
self.commitConfigUpdate('common-config',
'layouts/repo-checkout-timer-override.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# The pipeline triggers every second, so we should have seen
# several by now.
time.sleep(5)
self.waitUntilSettled()
# Stop queuing timer triggered jobs so that the assertions
# below don't race against more jobs being queued.
self.commitConfigUpdate('common-config',
'layouts/repo-checkout-no-timer-override.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
# can end up with one more event firing, so give it an extra
# second to settle.
time.sleep(1)
self.waitUntilSettled()
self.assertEqual(1, len(self.builds), "One build is running")
upstream = self.getUpstreamRepos(projects)
states = [
{p1: dict(commit=str(upstream[p1].commit('stable/havana')),
branch='stable/havana'),
},
]
self.assertBuildStates(states, projects)
def test_periodic(self):
# This test can not use simple_layout because it must start
# with a configuration which does not include a
# timer-triggered job so that we have an opportunity to set
# the hold flag before the first job.
self.executor_server.hold_jobs_in_build = True
# Start timer trigger - also org/project
self.commitConfigUpdate('common-config',
'layouts/repo-checkout-timer.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
p1 = 'review.example.com/org/project1'
projects = [p1]
self.create_branch('org/project1', 'stable/havana')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'stable/havana'))
self.waitUntilSettled()
# The pipeline triggers every second, so we should have seen
# several by now.
time.sleep(5)
self.waitUntilSettled()
# Stop queuing timer triggered jobs so that the assertions
# below don't race against more jobs being queued.
self.commitConfigUpdate('common-config',
'layouts/repo-checkout-no-timer.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
# can end up with one more event firing, so give it an extra
# second to settle.
time.sleep(1)
self.waitUntilSettled()
self.assertEqual(2, len(self.builds), "Two builds are running")
upstream = self.getUpstreamRepos(projects)
states = [
{p1: dict(commit=str(upstream[p1].commit('stable/havana')),
branch='stable/havana'),
},
{p1: dict(commit=str(upstream[p1].commit('master')),
branch='master'),
},
]
if self.builds[0].parameters['zuul']['ref'] == 'refs/heads/master':
states = list(reversed(states))
self.assertBuildStates(states, projects)
@simple_layout('layouts/repo-checkout-post.yaml')
def test_post_and_master_checkout(self):
self.executor_server.hold_jobs_in_build = True
p1 = "review.example.com/org/project1"
p2 = "review.example.com/org/project2"
projects = [p1, p2]
upstream = self.getUpstreamRepos(projects)
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
event = A.getRefUpdatedEvent()
A.setMerged()
A_commit = str(upstream[p1].commit('master'))
self.log.debug("A commit: %s" % A_commit)
# Add another commit to the repo that merged right after this
# one to make sure that our post job runs with the one that we
# intended rather than simply the current repo state.
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B',
parent='refs/changes/01/1/1')
B.setMerged()
B_commit = str(upstream[p1].commit('master'))
self.log.debug("B commit: %s" % B_commit)
self.fake_gerrit.addEvent(event)
self.waitUntilSettled()
states = [
{p1: dict(commit=A_commit,
present=[A], absent=[B], branch='master'),
p2: dict(commit=str(upstream[p2].commit('master')),
absent=[A, B], branch='master'),
},
]
self.assertBuildStates(states, projects)
@simple_layout('layouts/repo-checkout-tag.yaml')
def test_tag_checkout(self):
self.executor_server.hold_jobs_in_build = True
p1 = "review.example.com/org/project1"
p2 = "review.example.com/org/project2"
projects = [p1, p2]
upstream = self.getUpstreamRepos(projects)
self.create_branch('org/project2', 'stable/havana')
files = {'README': 'tagged readme'}
self.addCommitToRepo('org/project2', 'tagged commit',
files, branch='stable/havana', tag='test-tag')
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
states = [
{p1: dict(present=[A], branch='master'),
p2: dict(commit=str(upstream[p2].commit('test-tag')),
absent=[A]),
},
]
self.assertBuildStates(states, projects)
class TestAnsibleJob(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def run_job(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
return list(self.executor_server.job_workers.values())[0]
def test_host_keys(self):
self.fake_nodepool.host_keys = ['fake-host-key']
job = self.run_job()
keys = job.host_list[0]['host_keys']
self.assertEqual(keys[0], '127.0.0.1 fake-host-key')
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
def test_host_keys_connection_port(self):
# Test with custom connection_port set
self.fake_nodepool.host_keys = ['fake-host-key']
self.fake_nodepool.connection_port = 22022
job = self.run_job()
keys = job.host_list[0]['host_keys']
self.assertEqual(keys[0], '[127.0.0.1]:22022 fake-host-key')
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
def test_no_host_keys_connection_port(self):
# Test with no host keys
self.fake_nodepool.host_keys = []
self.fake_nodepool.connection_port = 22022
job = self.run_job()
keys = job.host_list[0]['host_keys']
self.assertEqual(keys, [])
self.assertEqual(
job.host_list[0]['host_vars']['ansible_ssh_common_args'],
'-o StrictHostKeyChecking=false')
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
def test_no_shell_type(self):
# Test without shell type set
job = self.run_job()
host = job.host_list[0]
self.assertNotIn('ansible_shell_type', host['host_vars'])
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
def test_custom_shell_type(self):
# Test with custom shell type set.
self.fake_nodepool.shell_type = 'cmd'
job = self.run_job()
host = job.host_list[0]
self.assertIn('ansible_shell_type', host['host_vars'])
self.assertEqual(
host['host_vars']['ansible_shell_type'],
'cmd')
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
class TestExecutorHostname(ZuulTestCase):
config_file = 'zuul-executor-hostname.conf'
tenant_config_file = 'config/single-tenant/main.yaml'
def test_executor_hostname(self):
self.assertEqual('test-executor-hostname.example.com',
self.executor_server.hostname)
class TestStartingBuildsSensor(ZuulTestCase):
config_file = 'zuul.conf'
tenant_config_file = 'config/governor/main.yaml'
def test_default_case(self):
# Given
cores = multiprocessing.cpu_count()
# When
sensor = StartingBuildsSensor(None, cores * 2.5, None)
# Then
coefficient = 2 if multiprocessing.cpu_count() <= 4 else 1
max_default = int(cores * 2.5 * coefficient)
self.assertEqual(sensor.max_starting_builds, max_default)
self.assertEqual(sensor.min_starting_builds, max(int(cores / 2), 1))
def test_configuration_not_exists(self):
# Given
cores = multiprocessing.cpu_count()
# When
sensor = StartingBuildsSensor(None, cores * 2.5, self.config)
# Then
coefficient = 2 if multiprocessing.cpu_count() <= 4 else 1
max_default = int(cores * 2.5 * coefficient)
self.assertEqual(sensor.max_starting_builds, max_default)
self.assertEqual(sensor.min_starting_builds, max(int(cores / 2), 1))
def test_configuration_override(self):
# Given
cores = multiprocessing.cpu_count()
self.config.set('executor', 'max_starting_builds', '5')
# When
sensor = StartingBuildsSensor(None, cores * 2.5, self.config)
# Then
self.assertEqual(sensor.max_starting_builds, 5)
self.assertEqual(sensor.min_starting_builds, min(
max(int(cores / 2), 1), sensor.max_starting_builds))
def test_configuration_override_affecting_min(self):
# Given
cores = multiprocessing.cpu_count()
self.config.set('executor', 'max_starting_builds', '1')
# When
sensor = StartingBuildsSensor(None, cores * 2.5, self.config)
# Then
self.assertEqual(sensor.max_starting_builds, 1)
self.assertEqual(sensor.min_starting_builds, 1)
class TestGovernor(ZuulTestCase):
config_file = 'zuul-executor-governor.conf'
tenant_config_file = 'config/governor/main.yaml'
@mock.patch('os.getloadavg')
@mock.patch('psutil.virtual_memory')
def test_load_governor(self, vm_mock, loadavg_mock):
class Dummy(object):
pass
ram = Dummy()
ram.percent = 20.0 # 20% used
ram.total = 8 * 1024 * 1024 * 1024 # 8GiB
vm_mock.return_value = ram
loadavg_mock.return_value = (0.0, 0.0, 0.0)
self.executor_server.manageLoad()
self.assertTrue(self.executor_server.accepting_work)
# fake the load to be higher than permitted
fake_load = multiprocessing.cpu_count() * 2.6
loadavg_mock.return_value = (fake_load, fake_load, fake_load)
self.executor_server.manageLoad()
self.assertFalse(self.executor_server.accepting_work)
@mock.patch('os.getloadavg')
@mock.patch('psutil.virtual_memory')
def test_ram_governor(self, vm_mock, loadavg_mock):
class Dummy(object):
pass
ram = Dummy()
ram.percent = 20.0 # 20% used
ram.total = 8 * 1024 * 1024 * 1024 # 8GiB
vm_mock.return_value = ram
loadavg_mock.return_value = (0.0, 0.0, 0.0)
self.executor_server.manageLoad()
self.assertTrue(self.executor_server.accepting_work)
ram.percent = 99.0 # 99% used
self.executor_server.manageLoad()
self.assertFalse(self.executor_server.accepting_work)
@mock.patch('os.getloadavg')
@mock.patch('psutil.virtual_memory')
def test_ram_cgroup_governor(self, vm_mock, loadavg_mock):
class Dummy(object):
pass
ram = Dummy()
ram.percent = 20.0 # 20% used
ram.total = 8 * 1024 * 1024 * 1024 # 8GiB
vm_mock.return_value = ram
loadavg_mock.return_value = (0.0, 0.0, 0.0)
# Set no cgroup limit
ram_sensor = [x for x in self.executor_server.sensors
if isinstance(x, RAMSensor)][0]
ram_sensor.cgroup_stats_file = os.path.join(
FIXTURE_DIR, 'cgroup', 'memory.stat.nolimit')
self.executor_server.manageLoad()
self.assertTrue(self.executor_server.accepting_work)
# Set cgroup limit 5GiB and ram usage 20%
ram_sensor.cgroup_stats_file = os.path.join(
FIXTURE_DIR, 'cgroup', 'memory.stat.ok')
self.executor_server.manageLoad()
self.assertTrue(self.executor_server.accepting_work)
# Set cgroup limit 5GiB and ram usage 96%
ram_sensor.cgroup_stats_file = os.path.join(
FIXTURE_DIR, 'cgroup', 'memory.stat.bad')
self.executor_server.manageLoad()
self.assertFalse(self.executor_server.accepting_work)
@mock.patch('os.getloadavg')
@mock.patch('os.statvfs')
def test_hdd_governor(self, statvfs_mock, loadavg_mock):
class Dummy(object):
pass
hdd = Dummy()
hdd.f_frsize = 4096
hdd.f_blocks = 120920708
hdd.f_bfree = 95716701
statvfs_mock.return_value = hdd # 20.84% used
loadavg_mock.return_value = (0.0, 0.0, 0.0)
self.executor_server.manageLoad()
self.assertTrue(self.executor_server.accepting_work)
self.assertReportedStat(
'zuul.executor.test-executor-hostname_example_com.pct_used_hdd',
value='2084', kind='g')
hdd.f_bfree = 5716701
statvfs_mock.return_value = hdd # 95.27% used
self.executor_server.manageLoad()
self.assertFalse(self.executor_server.accepting_work)
self.assertReportedStat(
'zuul.executor.test-executor-hostname_example_com.pct_used_hdd',
value='9527', kind='g')
@mock.patch('os.getloadavg')
def test_pause_governor(self, loadavg_mock):
loadavg_mock.return_value = (0.0, 0.0, 0.0)
self.executor_server.manageLoad()
self.assertTrue(self.executor_server.accepting_work)
self.executor_server.pause_sensor.pause = True
self.executor_server.manageLoad()
self.assertFalse(self.executor_server.accepting_work)
def waitForExecutorBuild(self, jobname):
self.log.debug("Waiting for %s to start", jobname)
timeout = time.time() + 30
build = None
while time.time() < timeout and not build:
for b in self.builds:
if b.name == jobname:
build = b
break
time.sleep(0.1)
self.log.debug("Found build %s", jobname)
build_id = build.uuid
while (time.time() < timeout and
build_id not in self.executor_server.job_workers):
time.sleep(0.1)
worker = self.executor_server.job_workers[build_id]
self.log.debug("Found worker %s", jobname)
while (time.time() < timeout and
not worker.started):
time.sleep(0.1)
self.log.debug("Worker for %s started: %s", jobname, worker.started)
return build
@mock.patch('os.getloadavg')
def test_slow_start(self, loadavg_mock):
loadavg_mock.return_value = (0.0, 0.0, 0.0)
def _set_starting_builds(min, max):
for sensor in self.executor_server.sensors:
if isinstance(sensor, StartingBuildsSensor):
sensor.min_starting_builds = min
sensor.max_starting_builds = max
# Note: This test relies on the fact that manageLoad is only
# run at specific points. Several times in the test we check
# that manageLoad has disabled or enabled job acceptance based
# on the number of "starting" jobs. Some of those jobs may
# have actually moved past the "starting" phase and are
# actually "running". But because manageLoad hasn't run
# again, it still uses the old values. Keep this in mind when
# double checking its calculations.
#
# Disable the periodic governor runs to make sure they don't
# interefere (only possible if the test runs longer than 10
# seconds).
self.executor_server.governor_stop_event.set()
self.executor_server.hold_jobs_in_build = True
_set_starting_builds(1, 1)
self.executor_server.manageLoad()
self.assertTrue(self.executor_server.accepting_work)
A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
build1 = self.waitForExecutorBuild('test1')
# With one job (test1) being started, we should no longer
# be accepting new work
self.assertFalse(self.executor_server.accepting_work)
self.assertEqual(len(self.executor_server.job_workers), 1)
# Allow enough starting builds for the test to complete.
_set_starting_builds(1, 3)
# We must wait for build1 to enter a waiting state otherwise
# the subsequent release() is a noop and the build is never
# released. We don't use waitUntilSettled as that requires
# the other two builds to start which can't happen while we
# don't accept jobs.
for x in iterate_timeout(30, "build1 is waiting"):
if build1.waiting:
break
build1.release()
for x in iterate_timeout(30, "Wait for build1 to complete"):
if build1.uuid not in self.executor_server.job_workers:
break
self.executor_server.manageLoad()
# This manageLoad call has determined that there are 0 workers
# running, so our full complement of 3 starting builds is
# available. It will re-register for work and pick up the
# next two jobs.
self.waitForExecutorBuild('test2')
self.waitForExecutorBuild('test3')
# When each of these jobs started, they caused manageLoad to
# be called, the second invocation calculated that there were
# 2 workers running, so our starting build limit was reduced
# to 1. Usually it will calculate that there are 2 starting
# builds, but theoretically it could count only 1 if the first
# build manages to leave the starting phase before the second
# build starts. It should always count the second build as
# starting. As long as at least one build is still in the
# starting phase, this will exceed the limit and unregister.
self.assertFalse(self.executor_server.accepting_work)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.executor_server.manageLoad()
self.assertTrue(self.executor_server.accepting_work)
class TestLineMapping(AnsibleZuulTestCase):
config_file = 'zuul-gerrit-web.conf'
tenant_config_file = 'config/line-mapping/main.yaml'
def test_line_mapping(self):
header = 'add something to the top\n'
footer = 'this is the change\n'
with open(os.path.join(FIXTURE_DIR,
'config/line-mapping/git/',
'org_project/README')) as f:
content = f.read()
# The change under test adds a line to the end.
file_dict = {'README': content + footer}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
# An intervening change adds a line to the top.
file_dict = {'README': header + content}
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
files=file_dict)
B.setMerged()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('file-comments').result,
'SUCCESS')
self.assertEqual(len(A.comments), 2)
comments = sorted(A.comments, key=lambda x: x['line'])
self.assertEqual(comments[0],
{'file': 'README',
'line': 14,
'message': 'interesting comment',
'reviewer': {'email': '[email protected]',
'name': 'Zuul',
'username': 'jenkins'}}
)
self.assertEqual(
comments[1],
{
"file": "README",
"line": 14,
"message": "That's a cool section",
"range": {
"end_character": 26,
"end_line": 14,
"start_character": 0,
"start_line": 12
},
"reviewer": {
"email": "[email protected]",
"name": "Zuul",
"username": "jenkins"
}
}
)
class ExecutorFactsMixin:
# These should be overridden in child classes.
tenant_config_file = 'config/executor-facts/main.yaml'
ansible_major_minor = 'X.Y'
def _get_file(self, build, path):
p = os.path.join(build.jobdir.root, path)
with open(p) as f:
return f.read()
def test_datetime_fact(self):
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
self.assertEqual(self.getJobFromHistory('datetime-fact').result,
'SUCCESS')
j = json.loads(self._get_file(self.history[0],
'work/logs/job-output.json'))
date_time = \
j[0]['plays'][0]['tasks'][0]['hosts']['localhost']['date_time']
self.assertEqual(18, len(date_time))
build = self.getJobFromHistory('datetime-fact', result='SUCCESS')
with open(build.jobdir.job_output_file) as f:
output = f.read()
self.assertIn(f'Ansible version={self.ansible_major_minor}',
output)
class TestExecutorFacts6(AnsibleZuulTestCase, ExecutorFactsMixin):
tenant_config_file = 'config/executor-facts/main6.yaml'
ansible_major_minor = '2.13'
class TestExecutorFacts8(AnsibleZuulTestCase, ExecutorFactsMixin):
tenant_config_file = 'config/executor-facts/main8.yaml'
ansible_major_minor = '2.15'
class AnsibleCallbackConfigsMixin:
config_file = 'zuul-executor-ansible-callback.conf'
# These should be overridden in child classes.
tenant_config_file = 'config/ansible-callbacks/main.yaml'
ansible_major_minor = 'X.Y'
def test_ansible_callback_config(self):
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A')
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
callbacks = [
'callback_test_callback',
'callback_nocows = True',
'callback_nocows = False',
'callback_\\nnocows = True',
'callback_\\nnocows = False',
'callback_ansible_interpolation'
]
p = os.path.join(self.getJobFromHistory('callback-test').jobdir.root,
'ansible/playbook_0/ansible.cfg')
self.assertEqual(self.getJobFromHistory('callback-test').result,
'SUCCESS')
c = configparser.ConfigParser(interpolation=None)
c.read(p)
for callback in callbacks:
self.assertIn(callback, c.sections())
self.assertIn('test_field', c['callback_ansible_interpolation'])
self.assertIn('test-%-value',
c['callback_ansible_interpolation']['test_field'])
self.assertIn('file_name', c['callback_test_callback'])
self.assertEqual('callback-success',
c['callback_test_callback']['file_name'])
callback_result_file = os.path.join(
self.getJobFromHistory('callback-test').jobdir.root,
'trusted/project_0/review.example.com/',
'common-config/playbooks/callback_plugins/',
c['callback_test_callback']['file_name'])
self.assertTrue(os.path.isfile(callback_result_file))
build = self.getJobFromHistory('callback-test', result='SUCCESS')
with open(build.jobdir.job_output_file) as f:
output = f.read()
self.assertIn(f'Ansible version={self.ansible_major_minor}',
output)
class TestAnsibleCallbackConfigs6(AnsibleZuulTestCase,
AnsibleCallbackConfigsMixin):
config_file = 'zuul-executor-ansible-callback.conf'
tenant_config_file = 'config/ansible-callbacks/main6.yaml'
ansible_major_minor = '2.13'
class TestAnsibleCallbackConfigs8(AnsibleZuulTestCase,
AnsibleCallbackConfigsMixin):
config_file = 'zuul-executor-ansible-callback.conf'
tenant_config_file = 'config/ansible-callbacks/main8.yaml'
ansible_major_minor = '2.15'
class TestExecutorEnvironment(AnsibleZuulTestCase):
tenant_config_file = 'config/zuul-environment-filter/main.yaml'
@mock.patch.dict('os.environ', {'ZUUL_TEST_VAR': 'some-value',
'TEST_VAR': 'not-empty'})
def test_zuul_environment_filter(self):
A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A')
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
self.assertEqual(
self.getJobFromHistory('zuul-environment-filter').result,
'SUCCESS')
class TestExecutorStart(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def setup_config(self, config_file: str):
config = super(TestExecutorStart, self).setup_config(config_file)
self.junk_dir = os.path.join(self.jobdir_root, 'junk')
os.makedirs(self.junk_dir)
return config
def test_executor_start(self):
self.assertFalse(os.path.exists(self.junk_dir))
class TestExecutorExtraPackages(AnsibleZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
test_package = 'pywinrm'
def setUp(self):
super(TestExecutorExtraPackages, self).setUp()
import subprocess
ansible_manager = self.executor_server.ansible_manager
for version in ansible_manager._supported_versions:
command = [ansible_manager.getAnsibleCommand(version, 'pip'),
'uninstall', '-y', self.test_package]
# We want to error if the uninstall fails as the test below
# relies on the package not being installed to be properly
# exercised.
s = subprocess.run(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.log.info(s.stdout)
self.assertEqual(s.returncode, 0)
@mock.patch('zuul.lib.ansible.ManagedAnsible.extra_packages',
new_callable=mock.PropertyMock)
def test_extra_packages(self, mock_extra_packages):
mock_extra_packages.return_value = [self.test_package]
ansible_manager = self.executor_server.ansible_manager
self.assertFalse(ansible_manager.validate())
ansible_manager.install()
self.assertTrue(ansible_manager.validate())
class TestVarSquash(BaseTestCase):
def test_squash_variables(self):
# Test that we correctly squash job variables
nodeset = NodeSet()
nodes = [
{'name': 'node1', 'host_vars': {
'host': 'node1_host',
'extra': 'node1_extra',
}},
{'name': 'node2', 'host_vars': {
'host': 'node2_host',
'extra': 'node2_extra',
}},
]
nodeset.addGroup(Group('group1', ['node1']))
nodeset.addGroup(Group('group2', ['node2']))
groupvars = {
'group1': {
'host': 'group1_host',
'group': 'group1_group',
'extra': 'group1_extra',
},
'group2': {
'host': 'group2_host',
'group': 'group2_group',
'extra': 'group2_extra',
},
'all': {
'all2': 'groupvar_all2',
}
}
jobvars = {
'host': 'jobvar_host',
'group': 'jobvar_group',
'all': 'jobvar_all',
'extra': 'jobvar_extra',
}
extravars = {
'extra': 'extravar_extra',
}
out = squash_variables(
nodes, nodeset, jobvars, groupvars, extravars)
expected = {
'node1': {
'all': 'jobvar_all',
'all2': 'groupvar_all2',
'group': 'group1_group',
'host': 'node1_host',
'extra': 'extravar_extra'},
'node2': {
'all': 'jobvar_all',
'all2': 'groupvar_all2',
'group': 'group2_group',
'host': 'node2_host',
'extra': 'extravar_extra'},
}
self.assertEqual(out, expected)
class TestExecutorFailure(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
@mock.patch('zuul.executor.server.ExecutorServer.executeJob')
def test_executor_job_start_failure(self, execute_job_mock):
execute_job_mock.side_effect = Exception('Failed to start')
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertTrue(re.search(
'- project-merge .* ERROR',
A.messages[-1]))
def test_executor_transient_error(self):
self.hold_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
with mock.patch('zuul.merger.merger.Merger.updateRepo') as update_mock:
update_mock.side_effect = IOError("Transient error")
self.executor_api.release()
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
pipeline = tenant.layout.pipelines['gate']
items = pipeline.getAllItems()
self.assertEqual(len(items), 1)
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
retry_builds = items[0].current_build_set.retry_builds
self.assertIn("project-merge", retry_builds)
build_retries = retry_builds["project-merge"]
self.assertEqual(len(build_retries), 1)
self.assertIsNotNone(build_retries[0].error_detail)
self.assertTrue(
build_retries[0].error_detail.startswith(
"Failed to update project"))
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_executor.py
|
test_executor.py
|
# Copyright 2018 Red Hat, Inc.
# Copyright 2022 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import os
import sys
import subprocess
import time
import configparser
import datetime
import dateutil.tz
import uuid
import fixtures
import jwt
import testtools
import sqlalchemy
from zuul.zk import ZooKeeperClient
from zuul.zk.locks import SessionAwareLock
from zuul.cmd.client import parse_cutoff
from tests.base import BaseTestCase, ZuulTestCase
from tests.base import FIXTURE_DIR, iterate_timeout
from kazoo.exceptions import NoNodeError
class BaseClientTestCase(BaseTestCase):
config_file = 'zuul.conf'
config_with_zk = True
def setUp(self):
super(BaseClientTestCase, self).setUp()
self.test_root = self.useFixture(fixtures.TempDir(
rootdir=os.environ.get("ZUUL_TEST_ROOT"))).path
self.config = configparser.ConfigParser()
self.config.read(os.path.join(FIXTURE_DIR, self.config_file))
if self.config_with_zk:
self.config_add_zk()
def config_add_zk(self):
self.setupZK()
self.config.add_section('zookeeper')
self.config.set('zookeeper', 'hosts', self.zk_chroot_fixture.zk_hosts)
self.config.set('zookeeper', 'session_timeout', '30')
self.config.set('zookeeper', 'tls_cert',
self.zk_chroot_fixture.zookeeper_cert)
self.config.set('zookeeper', 'tls_key',
self.zk_chroot_fixture.zookeeper_key)
self.config.set('zookeeper', 'tls_ca',
self.zk_chroot_fixture.zookeeper_ca)
class TestTenantValidationClient(BaseClientTestCase):
config_with_zk = True
def test_client_tenant_conf_check(self):
self.config.set(
'scheduler', 'tenant_config',
os.path.join(FIXTURE_DIR, 'config/tenant-parser/simple.yaml'))
with open(os.path.join(self.test_root, 'tenant_ok.conf'), 'w') as f:
self.config.write(f)
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', os.path.join(self.test_root, 'tenant_ok.conf'),
'tenant-conf-check'], stdout=subprocess.PIPE)
p.communicate()
self.assertEqual(p.returncode, 0, 'The command must exit 0')
self.config.set(
'scheduler', 'tenant_config',
os.path.join(FIXTURE_DIR, 'config/tenant-parser/invalid.yaml'))
with open(os.path.join(self.test_root, 'tenant_ko.conf'), 'w') as f:
self.config.write(f)
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', os.path.join(self.test_root, 'tenant_ko.conf'),
'tenant-conf-check'], stdout=subprocess.PIPE)
out, _ = p.communicate()
self.assertEqual(p.returncode, 1, "The command must exit 1")
self.assertIn(
b"expected a dictionary for dictionary", out,
"Expected error message not found")
class TestWebTokenClient(BaseClientTestCase):
config_file = 'zuul-admin-web.conf'
def test_no_authenticator(self):
"""Test that token generation is not possible without authenticator"""
old_conf = io.StringIO()
self.config.write(old_conf)
self.config.remove_section('auth zuul_operator')
with open(os.path.join(self.test_root,
'no_zuul_operator.conf'), 'w') as f:
self.config.write(f)
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', os.path.join(self.test_root, 'no_zuul_operator.conf'),
'create-auth-token',
'--auth-config', 'zuul_operator',
'--user', 'marshmallow_man',
'--tenant', 'tenant_one', ],
stdout=subprocess.PIPE)
out, _ = p.communicate()
old_conf.seek(0)
self.config = configparser.ConfigParser()
self.config.read_file(old_conf)
self.assertEqual(p.returncode, 1, 'The command must exit 1')
def test_unsupported_driver(self):
"""Test that token generation is not possible with wrong driver"""
old_conf = io.StringIO()
self.config.write(old_conf)
self.config.add_section('auth someauth')
self.config.set('auth someauth', 'driver', 'RS256withJWKS')
with open(os.path.join(self.test_root, 'JWKS.conf'), 'w') as f:
self.config.write(f)
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', os.path.join(self.test_root, 'JWKS.conf'),
'create-auth-token',
'--auth-config', 'someauth',
'--user', 'marshmallow_man',
'--tenant', 'tenant_one', ],
stdout=subprocess.PIPE)
out, _ = p.communicate()
old_conf.seek(0)
self.config = configparser.ConfigParser()
self.config.read_file(old_conf)
self.assertEqual(p.returncode, 1, 'The command must exit 1')
def test_token_generation(self):
"""Test token generation"""
with open(os.path.join(self.test_root, 'good.conf'), 'w') as f:
self.config.write(f)
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', os.path.join(self.test_root, 'good.conf'),
'create-auth-token',
'--auth-conf', 'zuul_operator',
'--user', 'marshmallow_man',
'--tenant', 'tenant_one', ],
stdout=subprocess.PIPE)
now = time.time()
out, _ = p.communicate()
self.assertEqual(p.returncode, 0, 'The command must exit 0')
self.assertTrue(out.startswith(b"Bearer "), out)
# there is a trailing carriage return in the output
token = jwt.decode(out[len("Bearer "):-1],
key=self.config.get(
'auth zuul_operator',
'secret'),
algorithms=[self.config.get(
'auth zuul_operator',
'driver')],
audience=self.config.get(
'auth zuul_operator',
'client_id'),)
self.assertEqual('marshmallow_man', token.get('sub'))
self.assertEqual('zuul_operator', token.get('iss'))
self.assertEqual('zuul.example.com', token.get('aud'))
admin_tenants = token.get('zuul', {}).get('admin', [])
self.assertTrue('tenant_one' in admin_tenants, admin_tenants)
# allow one minute for the process to run
self.assertTrue(580 <= int(token['exp']) - now < 660,
(token['exp'], now))
class TestKeyOperations(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def test_export_import(self):
# Test a round trip export/import of keys
export_root = os.path.join(self.test_root, 'export')
config_file = os.path.join(self.test_root, 'zuul.conf')
with open(config_file, 'w') as f:
self.config.write(f)
# Save a copy of the keys in ZK
old_data = self.getZKTree('/keystorage')
# Export keys
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'export-keys', export_root],
stdout=subprocess.PIPE)
out, _ = p.communicate()
self.log.debug(out.decode('utf8'))
self.assertEqual(p.returncode, 0)
# Delete keys from ZK
self.zk_client.client.delete('/keystorage', recursive=True)
# Make sure it's really gone
with testtools.ExpectedException(NoNodeError):
self.getZKTree('/keystorage')
# Import keys
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'import-keys', export_root],
stdout=subprocess.PIPE)
out, _ = p.communicate()
self.log.debug(out.decode('utf8'))
self.assertEqual(p.returncode, 0)
# Make sure the new data matches the original
new_data = self.getZKTree('/keystorage')
self.assertEqual(new_data, old_data)
def test_copy_delete(self):
config_file = os.path.join(self.test_root, 'zuul.conf')
with open(config_file, 'w') as f:
self.config.write(f)
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'copy-keys',
'gerrit', 'org/project',
'gerrit', 'neworg/newproject',
],
stdout=subprocess.PIPE)
out, _ = p.communicate()
self.log.debug(out.decode('utf8'))
self.assertEqual(p.returncode, 0)
data = self.getZKTree('/keystorage')
self.assertEqual(
data['/keystorage/gerrit/org/org%2Fproject/secrets'],
data['/keystorage/gerrit/neworg/neworg%2Fnewproject/secrets'])
self.assertEqual(
data['/keystorage/gerrit/org/org%2Fproject/ssh'],
data['/keystorage/gerrit/neworg/neworg%2Fnewproject/ssh'])
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'delete-keys',
'gerrit', 'org/project',
],
stdout=subprocess.PIPE)
out, _ = p.communicate()
self.log.debug(out.decode('utf8'))
self.assertEqual(p.returncode, 0)
data = self.getZKTree('/keystorage')
self.assertIsNone(
data.get('/keystorage/gerrit/org/org%2Fproject/secrets'))
self.assertIsNone(
data.get('/keystorage/gerrit/org/org%2Fproject/ssh'))
self.assertIsNone(
data.get('/keystorage/gerrit/org/org%2Fproject'))
# Ensure that deleting one project in a tree doesn't remove other
# projects in that tree.
self.assertIsNotNone(
data.get('/keystorage/gerrit/org/org%2Fproject1'))
self.assertIsNotNone(
data.get('/keystorage/gerrit/org/org%2Fproject2'))
self.assertIsNotNone(
data.get('/keystorage/gerrit/org'))
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'delete-keys',
'gerrit', 'org/project1',
],
stdout=subprocess.PIPE)
out, _ = p.communicate()
self.log.debug(out.decode('utf8'))
self.assertEqual(p.returncode, 0)
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'delete-keys',
'gerrit', 'org/project2',
],
stdout=subprocess.PIPE)
out, _ = p.communicate()
self.log.debug(out.decode('utf8'))
self.assertEqual(p.returncode, 0)
data = self.getZKTree('/keystorage')
# Ensure that the last project being removed also removes its
# org prefix entry.
self.assertIsNone(
data.get('/keystorage/gerrit/org/org%2Fproject1'))
self.assertIsNone(
data.get('/keystorage/gerrit/org/org%2Fproject2'))
self.assertIsNone(
data.get('/keystorage/gerrit/org'))
class TestOfflineZKOperations(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def shutdown(self):
pass
def assertFinalState(self):
pass
def assertCleanShutdown(self):
pass
def test_delete_state(self):
# Shut everything down (as much as possible) to reduce
# logspam and errors.
ZuulTestCase.shutdown(self)
# Re-start the client connection because we need one for the
# test.
self.zk_client = ZooKeeperClient.fromConfig(self.config)
self.zk_client.connect()
config_file = os.path.join(self.test_root, 'zuul.conf')
with open(config_file, 'w') as f:
self.config.write(f)
# Save a copy of the keys in ZK
old_data = self.getZKTree('/keystorage')
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'delete-state',
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, _ = p.communicate(b'yes\n')
self.log.debug(out.decode('utf8'))
# Make sure the keys are still around
new_data = self.getZKTree('/keystorage')
self.assertEqual(new_data, old_data)
# Make sure we really deleted everything
with testtools.ExpectedException(NoNodeError):
self.getZKTree('/zuul')
self.zk_client.disconnect()
class TestOnlineZKOperations(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def assertSQLState(self):
pass
def _test_delete_pipeline(self, pipeline):
sched = self.scheds.first.sched
tenant = sched.abide.tenants['tenant-one']
# Force a reconfiguration due to a config change (so that the
# tenant trigger event queue gets a minimum timestamp set)
file_dict = {'zuul.yaml': ''}
M = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
M.setMerged()
self.fake_gerrit.addEvent(M.getChangeMergedEvent())
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
if pipeline == 'check':
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
else:
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
# Lock the check pipeline so we don't process the event we're
# about to submit (it should go into the pipeline trigger event
# queue and stay there while we delete the pipeline state).
# This way we verify that events arrived before the deletion
# still work.
plock = SessionAwareLock(
self.zk_client.client,
f"/zuul/locks/pipeline/{tenant.name}/{pipeline}")
plock.acquire(blocking=True, timeout=None)
try:
self.log.debug('Got pipeline lock')
# Add a new event while our old last reconfigure time is
# in place.
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
if pipeline == 'check':
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
else:
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
# Wait until it appears in the pipeline trigger event queue
self.log.debug('Waiting for event')
for x in iterate_timeout(30, 'trigger event queue has events'):
if sched.pipeline_trigger_events[
tenant.name][pipeline].hasEvents():
break
self.log.debug('Got event')
except Exception:
plock.release()
raise
# Grab the run handler lock so that we will continue to avoid
# further processing of the event after we release the
# pipeline lock (which the delete command needs to acquire).
sched.run_handler_lock.acquire()
try:
plock.release()
self.log.debug('Got run lock')
config_file = os.path.join(self.test_root, 'zuul.conf')
with open(config_file, 'w') as f:
self.config.write(f)
# Make sure the pipeline exists
self.getZKTree(
f'/zuul/tenant/{tenant.name}/pipeline/{pipeline}/item')
# Save the old layout uuid
tenant = sched.abide.tenants[tenant.name]
old_layout_uuid = tenant.layout.uuid
self.log.debug('Deleting pipeline state')
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'delete-pipeline-state',
tenant.name, pipeline,
],
stdout=subprocess.PIPE)
# Delete the pipeline state
out, _ = p.communicate()
self.log.debug(out.decode('utf8'))
self.assertEqual(p.returncode, 0, 'The command must exit 0')
# Make sure it's deleted
with testtools.ExpectedException(NoNodeError):
self.getZKTree(
f'/zuul/tenant/{tenant.name}/pipeline/{pipeline}/item')
# Make sure the change list is re-created
self.getZKTree(
f'/zuul/tenant/{tenant.name}/pipeline/{pipeline}/change_list')
finally:
sched.run_handler_lock.release()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='2,1'),
dict(name='project-merge', result='SUCCESS', changes='3,1'),
dict(name='project-test1', result='SUCCESS', changes='3,1'),
dict(name='project-test2', result='SUCCESS', changes='3,1'),
], ordered=False)
tenant = sched.abide.tenants[tenant.name]
new_layout_uuid = tenant.layout.uuid
self.assertEqual(old_layout_uuid, new_layout_uuid)
self.assertEqual(tenant.layout.pipelines[pipeline].state.layout_uuid,
old_layout_uuid)
def test_delete_pipeline_check(self):
self._test_delete_pipeline('check')
def test_delete_pipeline_gate(self):
self._test_delete_pipeline('gate')
class TestDBPruneParse(BaseTestCase):
def test_db_prune_parse(self):
now = datetime.datetime(year=2023, month=5, day=28,
hour=22, minute=15, second=1,
tzinfo=dateutil.tz.tzutc())
reference = datetime.datetime(year=2022, month=5, day=28,
hour=22, minute=15, second=1,
tzinfo=dateutil.tz.tzutc())
# Test absolute times
self.assertEqual(
reference,
parse_cutoff(now, '2022-05-28 22:15:01 UTC', None))
self.assertEqual(
reference,
parse_cutoff(now, '2022-05-28 22:15:01', None))
# Test relative times
self.assertEqual(reference,
parse_cutoff(now, None, '8760h'))
self.assertEqual(reference,
parse_cutoff(now, None, '365d'))
with testtools.ExpectedException(RuntimeError):
self.assertEqual(reference,
parse_cutoff(now, None, '1y'))
class DBPruneTestCase(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
# This should be larger than the limit size in sqlconnection
num_buildsets = 55
def _createBuildset(self, update_time):
connection = self.scheds.first.sched.sql.connection
buildset_uuid = uuid.uuid4().hex
event_id = uuid.uuid4().hex
with connection.getSession() as db:
start_time = update_time - datetime.timedelta(seconds=1)
end_time = update_time
db_buildset = db.createBuildSet(
uuid=buildset_uuid,
tenant='tenant-one',
pipeline='check',
project='org/project',
change='1',
patchset='1',
ref='refs/changes/1',
oldrev='',
newrev='',
branch='master',
zuul_ref='Zref',
ref_url='http://gerrit.example.com/1',
event_id=event_id,
event_timestamp=update_time,
updated=update_time,
first_build_start_time=start_time,
last_build_end_time=end_time,
result='SUCCESS',
)
for build_num in range(2):
build_uuid = uuid.uuid4().hex
db_build = db_buildset.createBuild(
uuid=build_uuid,
job_name=f'job{build_num}',
start_time=start_time,
end_time=end_time,
result='SUCCESS',
voting=True,
)
for art_num in range(2):
db_build.createArtifact(
name=f'artifact{art_num}',
url='http://example.com',
)
for provides_num in range(2):
db_build.createProvides(
name=f'item{provides_num}',
)
for event_num in range(2):
db_build.createBuildEvent(
event_type=f'event{event_num}',
event_time=start_time,
)
def _query(self, db, model):
table = model.__table__
q = db.session().query(model).order_by(table.c.id.desc())
try:
return q.all()
except sqlalchemy.orm.exc.NoResultFound:
return []
def _getBuildsets(self, db):
return self._query(db, db.connection.buildSetModel)
def _getBuilds(self, db):
return self._query(db, db.connection.buildModel)
def _getProvides(self, db):
return self._query(db, db.connection.providesModel)
def _getArtifacts(self, db):
return self._query(db, db.connection.artifactModel)
def _getBuildEvents(self, db):
return self._query(db, db.connection.buildEventModel)
def _setup(self):
config_file = os.path.join(self.test_root, 'zuul.conf')
with open(config_file, 'w') as f:
self.config.write(f)
update_time = (datetime.datetime.utcnow() -
datetime.timedelta(minutes=self.num_buildsets))
for x in range(self.num_buildsets):
update_time = update_time + datetime.timedelta(minutes=1)
self._createBuildset(update_time)
connection = self.scheds.first.sched.sql.connection
with connection.getSession() as db:
buildsets = self._getBuildsets(db)
builds = self._getBuilds(db)
artifacts = self._getArtifacts(db)
provides = self._getProvides(db)
events = self._getBuildEvents(db)
self.assertEqual(len(buildsets), self.num_buildsets)
self.assertEqual(len(builds), 2 * self.num_buildsets)
self.assertEqual(len(artifacts), 4 * self.num_buildsets)
self.assertEqual(len(provides), 4 * self.num_buildsets)
self.assertEqual(len(events), 4 * self.num_buildsets)
for build in builds:
self.log.debug("Build %s %s %s",
build, build.start_time, build.end_time)
return config_file
def test_db_prune_before(self):
# Test pruning buildsets before a specific date
config_file = self._setup()
connection = self.scheds.first.sched.sql.connection
# Builds are reverse ordered; 0 is most recent
buildsets = connection.getBuildsets()
start_time = buildsets[0].first_build_start_time
self.log.debug("Cutoff %s", start_time)
# Use the default batch size (omit --batch-size arg)
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'prune-database',
'--before', str(start_time),
],
stdout=subprocess.PIPE)
out, _ = p.communicate()
self.log.debug(out.decode('utf8'))
with connection.getSession() as db:
buildsets = self._getBuildsets(db)
builds = self._getBuilds(db)
artifacts = self._getArtifacts(db)
provides = self._getProvides(db)
events = self._getBuildEvents(db)
for build in builds:
self.log.debug("Build %s %s %s",
build, build.start_time, build.end_time)
self.assertEqual(len(buildsets), 1)
self.assertEqual(len(builds), 2)
self.assertEqual(len(artifacts), 4)
self.assertEqual(len(provides), 4)
self.assertEqual(len(events), 4)
def test_db_prune_older_than(self):
# Test pruning buildsets older than a relative time
config_file = self._setup()
connection = self.scheds.first.sched.sql.connection
# We use 0d as the relative time here since the earliest we
# support is 1d and that's tricky in unit tests. The
# prune_before test handles verifying that we don't just
# always delete everything.
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'prune-database',
'--older-than', '0d',
'--batch-size', '5',
],
stdout=subprocess.PIPE)
out, _ = p.communicate()
self.log.debug(out.decode('utf8'))
with connection.getSession() as db:
buildsets = self._getBuildsets(db)
builds = self._getBuilds(db)
artifacts = self._getArtifacts(db)
provides = self._getProvides(db)
events = self._getBuildEvents(db)
self.assertEqual(len(buildsets), 0)
self.assertEqual(len(builds), 0)
self.assertEqual(len(artifacts), 0)
self.assertEqual(len(provides), 0)
self.assertEqual(len(events), 0)
class TestDBPruneMysql(DBPruneTestCase):
config_file = 'zuul-sql-driver-mysql.conf'
class TestDBPrunePostgres(DBPruneTestCase):
config_file = 'zuul-sql-driver-postgres.conf'
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_client.py
|
test_client.py
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Rackspace Australia
# Copyright 2021-2022 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import urllib.parse
import socket
import textwrap
import time
import jwt
import sys
import subprocess
import threading
from unittest import skip
import requests
from zuul.lib.statsd import normalize_statsd_name
from zuul.zk.locks import tenant_write_lock
import zuul.web
from tests.base import ZuulTestCase, AnsibleZuulTestCase
from tests.base import ZuulWebFixture, FIXTURE_DIR, iterate_timeout
from tests.base import simple_layout
class FakeConfig(object):
def __init__(self, config):
self.config = config or {}
def has_option(self, section, option):
return option in self.config.get(section, {})
def get(self, section, option):
return self.config.get(section, {}).get(option)
class BaseWithWeb(ZuulTestCase):
config_ini_data = {}
def startWebServer(self):
self.zuul_ini_config = FakeConfig(self.config_ini_data)
# Start the web server
self.web = self.useFixture(
ZuulWebFixture(self.changes, self.config,
self.additional_event_queues, self.upstream_root,
self.poller_events,
self.git_url_with_auth, self.addCleanup,
self.test_root,
info=zuul.model.WebInfo.fromConfig(
self.zuul_ini_config)))
self.executor_server.hold_jobs_in_build = True
self.host = 'localhost'
self.port = self.web.port
# Wait until web server is started
while True:
try:
with socket.create_connection((self.host, self.port)):
break
except ConnectionRefusedError:
pass
self.base_url = "http://{host}:{port}".format(
host=self.host, port=self.port)
def get_url(self, url, *args, **kwargs):
return requests.get(
urllib.parse.urljoin(self.base_url, url), *args, **kwargs)
def post_url(self, url, *args, **kwargs):
return requests.post(
urllib.parse.urljoin(self.base_url, url), *args, **kwargs)
def delete_url(self, url, *args, **kwargs):
return requests.delete(
urllib.parse.urljoin(self.base_url, url), *args, **kwargs)
def options_url(self, url, *args, **kwargs):
return requests.options(
urllib.parse.urljoin(self.base_url, url), *args, **kwargs)
class BaseTestWeb(BaseWithWeb):
tenant_config_file = 'config/single-tenant/main.yaml'
def setUp(self):
super(BaseTestWeb, self).setUp()
self.startWebServer()
def add_base_changes(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
def tearDown(self):
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
super(BaseTestWeb, self).tearDown()
class TestWeb(BaseTestWeb):
def test_web_index(self):
"Test that we can retrieve the index page"
resp = self.get_url('api')
data = resp.json()
# no point checking the whole thing; just make sure _something_ we
# expect is here
self.assertIn('info', data)
def test_web_status(self):
"Test that we can retrieve JSON status info"
self.add_base_changes()
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('project-merge')
self.waitUntilSettled()
resp = self.get_url("api/tenant/tenant-one/status")
self.assertIn('Content-Length', resp.headers)
self.assertIn('Content-Type', resp.headers)
self.assertEqual(
'application/json; charset=utf-8', resp.headers['Content-Type'])
self.assertIn('Access-Control-Allow-Origin', resp.headers)
self.assertIn('Cache-Control', resp.headers)
self.assertIn('Last-Modified', resp.headers)
self.assertTrue(resp.headers['Last-Modified'].endswith(' GMT'))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
data = resp.json()
status_jobs = []
self.assertEqual(
data["connection_event_queues"]["gerrit"]["length"], 0)
for p in data['pipelines']:
self.assertEqual(p["trigger_events"], 0)
self.assertEqual(p["result_events"], 0)
self.assertEqual(p["management_events"], 0)
self.assertIn('manager', p, p)
self.assertTrue(len(p.get('triggers', [])) > 0, p)
for q in p['change_queues']:
if p['name'] in ['gate', 'conflict']:
self.assertEqual(q['window'], 20)
else:
self.assertEqual(q['window'], 0)
# This test uses unbranched queues so validate that the branch
# information is missing.
self.assertIsNone(q['branch'])
for head in q['heads']:
for change in head:
self.assertIn(
'review.example.com/org/project',
change['project_canonical'])
self.assertTrue(change['active'])
self.assertIn(change['id'], ('1,1', '2,1', '3,1'))
for job in change['jobs']:
status_jobs.append(job)
self.assertEqual('project-merge', status_jobs[0]['name'])
# TODO(mordred) pull uuids from self.builds
self.assertEqual(
'stream/{uuid}?logfile=console.log'.format(
uuid=status_jobs[0]['uuid']),
status_jobs[0]['url'])
self.assertEqual(
'finger://{hostname}/{uuid}'.format(
hostname=self.executor_server.hostname,
uuid=status_jobs[0]['uuid']),
status_jobs[0]['finger_url'])
self.assertEqual(
'https://zuul.example.com/t/tenant-one/build/{uuid}'.format(
uuid=status_jobs[0]['uuid']),
status_jobs[0]['report_url'])
self.assertEqual('project-test1', status_jobs[1]['name'])
self.assertEqual(
'stream/{uuid}?logfile=console.log'.format(
uuid=status_jobs[1]['uuid']),
status_jobs[1]['url'])
self.assertEqual(
'finger://{hostname}/{uuid}'.format(
hostname=self.executor_server.hostname,
uuid=status_jobs[1]['uuid']),
status_jobs[1]['finger_url'])
self.assertEqual(
'https://zuul.example.com/t/tenant-one/build/{uuid}'.format(
uuid=status_jobs[1]['uuid']),
status_jobs[1]['report_url'])
self.assertEqual('project-test2', status_jobs[2]['name'])
self.assertEqual(
'stream/{uuid}?logfile=console.log'.format(
uuid=status_jobs[2]['uuid']),
status_jobs[2]['url'])
self.assertEqual(
'finger://{hostname}/{uuid}'.format(
hostname=self.executor_server.hostname,
uuid=status_jobs[2]['uuid']),
status_jobs[2]['finger_url'])
self.assertEqual(
'https://zuul.example.com/t/tenant-one/build/{uuid}'.format(
uuid=status_jobs[2]['uuid']),
status_jobs[2]['report_url'])
# check job dependencies
self.assertIsNotNone(status_jobs[0]['dependencies'])
self.assertIsNotNone(status_jobs[1]['dependencies'])
self.assertIsNotNone(status_jobs[2]['dependencies'])
self.assertEqual(len(status_jobs[0]['dependencies']), 0)
self.assertEqual(len(status_jobs[1]['dependencies']), 1)
self.assertEqual(len(status_jobs[2]['dependencies']), 1)
self.assertIn('project-merge', status_jobs[1]['dependencies'])
self.assertIn('project-merge', status_jobs[2]['dependencies'])
hostname = normalize_statsd_name(socket.getfqdn())
self.assertReportedStat(
f'zuul.web.server.{hostname}.threadpool.idle', kind='g')
self.assertReportedStat(
f'zuul.web.server.{hostname}.threadpool.queue', kind='g')
def test_web_components(self):
"Test that we can retrieve the list of connected components"
resp = self.get_url("api/components")
data = resp.json()
# The list should contain one of each kind: executor, scheduler, web
self.assertEqual(len(data), 3)
self.assertEqual(len(data["executor"]), 1)
self.assertEqual(len(data["scheduler"]), self.scheduler_count)
self.assertEqual(len(data["web"]), 1)
# Each component should contain hostname and state information
for key in ["hostname", "state", "version"]:
self.assertIn(key, data["executor"][0])
self.assertIn(key, data["scheduler"][0])
self.assertIn(key, data["web"][0])
def test_web_tenants(self):
"Test that we can retrieve JSON status info"
# Disable tenant list caching
self.web.web.api.cache_expiry = 0
self.add_base_changes()
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('project-merge')
self.waitUntilSettled()
resp = self.get_url("api/tenants")
self.assertIn('Content-Length', resp.headers)
self.assertIn('Content-Type', resp.headers)
self.assertEqual(
'application/json; charset=utf-8', resp.headers['Content-Type'])
# self.assertIn('Access-Control-Allow-Origin', resp.headers)
# self.assertIn('Cache-Control', resp.headers)
# self.assertIn('Last-Modified', resp.headers)
data = resp.json()
self.assertEqual('tenant-one', data[0]['name'])
self.assertEqual(3, data[0]['projects'])
self.assertEqual(3, data[0]['queue'])
# release jobs and check if the queue size is 0
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
data = self.get_url("api/tenants").json()
self.assertEqual('tenant-one', data[0]['name'])
self.assertEqual(3, data[0]['projects'])
self.assertEqual(0, data[0]['queue'])
# test that non-live items are not counted
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B.setDependsOn(A, 1)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
data = self.get_url("api/tenants").json()
self.assertEqual('tenant-one', data[0]['name'])
self.assertEqual(3, data[0]['projects'])
self.assertEqual(1, data[0]['queue'])
def test_web_connections_list(self):
data = self.get_url('api/connections').json()
connection = {
'driver': 'gerrit',
'name': 'gerrit',
'baseurl': 'https://review.example.com',
'canonical_hostname': 'review.example.com',
'server': 'review.example.com',
'ssh_server': 'review.example.com',
'port': 29418,
}
self.assertEqual([connection], data)
def test_web_bad_url(self):
# do we redirect to index.html
resp = self.get_url("status/foo")
self.assertEqual(200, resp.status_code)
def test_web_find_change(self):
# can we filter by change id
self.add_base_changes()
data = self.get_url("api/tenant/tenant-one/status/change/1,1").json()
self.assertEqual(1, len(data), data)
self.assertEqual("org/project", data[0]['project'])
data = self.get_url("api/tenant/tenant-one/status/change/2,1").json()
self.assertEqual(1, len(data), data)
self.assertEqual("org/project1", data[0]['project'], data)
@simple_layout('layouts/nodeset-alternatives.yaml')
def test_web_find_job_nodeset_alternatives(self):
# test a complex nodeset
data = self.get_url('api/tenant/tenant-one/job/test-job').json()
self.assertEqual([
{'abstract': False,
'ansible_split_streams': None,
'ansible_version': None,
'attempts': 3,
'branches': [],
'cleanup_run': [],
'deduplicate': 'auto',
'dependencies': [],
'description': None,
'extra_variables': {},
'files': [],
'final': False,
'group_variables': {},
'host_variables': {},
'intermediate': False,
'irrelevant_files': [],
'match_on_config_updates': True,
'name': 'test-job',
'nodeset_alternatives': [{'alternatives': [],
'groups': [],
'name': 'fast-nodeset',
'nodes': [{'aliases': [],
'comment': None,
'hold_job': None,
'id': None,
'label': 'fast-label',
'name': 'controller',
'requestor': None,
'state': 'unknown',
'tenant_name': None,
'user_data': None}]},
{'alternatives': [],
'groups': [],
'name': '',
'nodes': [{'aliases': [],
'comment': None,
'hold_job': None,
'id': None,
'label': 'slow-label',
'name': 'controller',
'requestor': None,
'state': 'unknown',
'tenant_name': None,
'user_data': None}]}],
'override_checkout': None,
'parent': 'base',
'post_review': None,
'post_run': [],
'pre_run': [],
'protected': None,
'provides': [],
'required_projects': [],
'requires': [],
'roles': [{'implicit': True,
'project_canonical_name':
'review.example.com/org/common-config',
'target_name': 'common-config',
'type': 'zuul'}],
'run': [],
'semaphores': [],
'source_context': {'branch': 'master',
'path': 'zuul.yaml',
'project': 'org/common-config'},
'tags': [],
'timeout': None,
'variables': {},
'variant_description': '',
'voting': True,
'workspace_scheme': 'golang',
}], data)
def test_web_find_job(self):
# can we fetch the variants for a single job
data = self.get_url('api/tenant/tenant-one/job/project-test1').json()
common_config_role = {
'implicit': True,
'project_canonical_name': 'review.example.com/common-config',
'target_name': 'common-config',
'type': 'zuul',
}
source_ctx = {
'branch': 'master',
'path': 'zuul.yaml',
'project': 'common-config',
}
run = [{
'path': 'playbooks/project-test1.yaml',
'roles': [{
'implicit': True,
'project_canonical_name': 'review.example.com/common-config',
'target_name': 'common-config',
'type': 'zuul'
}],
'secrets': [],
'semaphores': [],
'source_context': source_ctx,
}]
self.assertEqual([
{
'name': 'project-test1',
'abstract': False,
'ansible_split_streams': None,
'ansible_version': None,
'attempts': 4,
'branches': [],
'deduplicate': 'auto',
'dependencies': [],
'description': None,
'files': [],
'intermediate': False,
'irrelevant_files': [],
'match_on_config_updates': True,
'final': False,
'nodeset': {
'alternatives': [],
'groups': [],
'name': '',
'nodes': [{'comment': None,
'hold_job': None,
'id': None,
'label': 'label1',
'name': 'controller',
'aliases': [],
'requestor': None,
'state': 'unknown',
'tenant_name': None,
'user_data': None}],
},
'override_checkout': None,
'parent': 'base',
'post_review': None,
'protected': None,
'provides': [],
'required_projects': [],
'requires': [],
'roles': [common_config_role],
'run': run,
'pre_run': [],
'post_run': [],
'cleanup_run': [],
'semaphores': [],
'source_context': source_ctx,
'tags': [],
'timeout': None,
'variables': {},
'extra_variables': {},
'group_variables': {},
'host_variables': {},
'variant_description': '',
'voting': True,
'workspace_scheme': 'golang'
}, {
'name': 'project-test1',
'abstract': False,
'ansible_split_streams': None,
'ansible_version': None,
'attempts': 3,
'branches': ['stable'],
'deduplicate': 'auto',
'dependencies': [],
'description': None,
'files': [],
'intermediate': False,
'irrelevant_files': [],
'match_on_config_updates': True,
'final': False,
'nodeset': {
'alternatives': [],
'groups': [],
'name': '',
'nodes': [{'comment': None,
'hold_job': None,
'id': None,
'label': 'label2',
'name': 'controller',
'aliases': [],
'requestor': None,
'state': 'unknown',
'tenant_name': None,
'user_data': None}],
},
'override_checkout': None,
'parent': 'base',
'post_review': None,
'protected': None,
'provides': [],
'required_projects': [],
'requires': [],
'roles': [common_config_role],
'run': run,
'pre_run': [],
'post_run': [],
'cleanup_run': [],
'semaphores': [],
'source_context': source_ctx,
'tags': [],
'timeout': None,
'variables': {},
'extra_variables': {},
'group_variables': {},
'host_variables': {},
'variant_description': 'stable',
'voting': True,
'workspace_scheme': 'golang'
}], data)
data = self.get_url('api/tenant/tenant-one/job/test-job').json()
run[0]['path'] = 'playbooks/project-merge.yaml'
self.assertEqual([
{
'abstract': False,
'ansible_split_streams': None,
'ansible_version': None,
'attempts': 3,
'branches': [],
'deduplicate': 'auto',
'dependencies': [],
'description': None,
'files': [],
'final': False,
'intermediate': False,
'irrelevant_files': [],
'match_on_config_updates': True,
'name': 'test-job',
'override_checkout': None,
'parent': 'base',
'post_review': None,
'protected': None,
'provides': [],
'required_projects': [
{'override_branch': None,
'override_checkout': None,
'project_name': 'review.example.com/org/project'}],
'requires': [],
'roles': [common_config_role],
'run': run,
'pre_run': [],
'post_run': [],
'cleanup_run': [],
'semaphores': [],
'source_context': source_ctx,
'tags': [],
'timeout': None,
'variables': {},
'extra_variables': {},
'group_variables': {},
'host_variables': {},
'variant_description': '',
'voting': True,
'workspace_scheme': 'golang'
}], data)
def test_find_job_complete_playbooks(self):
# can we fetch the variants for a single job
data = self.get_url('api/tenant/tenant-one/job/complete-job').json()
def expected_pb(path):
return {
'path': path,
'roles': [{
'implicit': True,
'project_canonical_name':
'review.example.com/common-config',
'target_name': 'common-config',
'type': 'zuul'
}],
'secrets': [],
'semaphores': [],
'source_context': {
'branch': 'master',
'path': 'zuul.yaml',
'project': 'common-config',
}
}
self.assertEqual([
expected_pb("playbooks/run.yaml")
], data[0]['run'])
self.assertEqual([
expected_pb("playbooks/pre-run.yaml")
], data[0]['pre_run'])
self.assertEqual([
expected_pb("playbooks/post-run-01.yaml"),
expected_pb("playbooks/post-run-02.yaml")
], data[0]['post_run'])
self.assertEqual([
expected_pb("playbooks/cleanup-run.yaml")
], data[0]['cleanup_run'])
def test_web_nodes_list(self):
# can we fetch the nodes list
self.add_base_changes()
data = self.get_url('api/tenant/tenant-one/nodes').json()
self.assertGreater(len(data), 0)
self.assertEqual("test-provider", data[0]["provider"])
self.assertEqual("label1", data[0]["type"])
def test_web_labels_list(self):
# can we fetch the labels list
data = self.get_url('api/tenant/tenant-one/labels').json()
expected_list = [{'name': 'label1'}]
self.assertEqual(expected_list, data)
def test_web_pipeline_list(self):
# can we fetch the list of pipelines
data = self.get_url('api/tenant/tenant-one/pipelines').json()
gerrit_trigger = {'name': 'gerrit', 'driver': 'gerrit'}
timer_trigger = {'name': 'timer', 'driver': 'timer'}
expected_list = [
{'name': 'check', 'triggers': [gerrit_trigger]},
{'name': 'gate', 'triggers': [gerrit_trigger]},
{'name': 'post', 'triggers': [gerrit_trigger]},
{'name': 'periodic', 'triggers': [timer_trigger]},
]
self.assertEqual(expected_list, data)
def test_web_project_list(self):
# can we fetch the list of projects
data = self.get_url('api/tenant/tenant-one/projects').json()
expected_list = [
{'name': 'common-config', 'type': 'config'},
{'name': 'org/project', 'type': 'untrusted'},
{'name': 'org/project1', 'type': 'untrusted'},
{'name': 'org/project2', 'type': 'untrusted'}
]
for p in expected_list:
p["canonical_name"] = "review.example.com/%s" % p["name"]
p["connection_name"] = "gerrit"
self.assertEqual(expected_list, data)
def test_web_project_get(self):
# can we fetch project details
data = self.get_url(
'api/tenant/tenant-one/project/org/project1').json()
jobs = [[{'abstract': False,
'ansible_split_streams': None,
'ansible_version': None,
'attempts': 3,
'branches': [],
'deduplicate': 'auto',
'dependencies': [],
'description': None,
'files': [],
'final': False,
'intermediate': False,
'irrelevant_files': [],
'match_on_config_updates': True,
'name': 'project-merge',
'override_checkout': None,
'parent': 'base',
'post_review': None,
'protected': None,
'provides': [],
'required_projects': [],
'requires': [],
'roles': [],
'run': [],
'pre_run': [],
'post_run': [],
'cleanup_run': [],
'semaphores': [],
'source_context': {
'branch': 'master',
'path': 'zuul.yaml',
'project': 'common-config'},
'tags': [],
'timeout': None,
'variables': {},
'extra_variables': {},
'group_variables': {},
'host_variables': {},
'variant_description': '',
'voting': True,
'workspace_scheme': 'golang'}],
[{'abstract': False,
'ansible_split_streams': None,
'ansible_version': None,
'attempts': 3,
'branches': [],
'deduplicate': 'auto',
'dependencies': [{'name': 'project-merge',
'soft': False}],
'description': None,
'files': [],
'final': False,
'intermediate': False,
'irrelevant_files': [],
'match_on_config_updates': True,
'name': 'project-test1',
'override_checkout': None,
'parent': 'base',
'post_review': None,
'protected': None,
'provides': [],
'required_projects': [],
'requires': [],
'roles': [],
'run': [],
'pre_run': [],
'post_run': [],
'cleanup_run': [],
'semaphores': [],
'source_context': {
'branch': 'master',
'path': 'zuul.yaml',
'project': 'common-config'},
'tags': [],
'timeout': None,
'variables': {},
'extra_variables': {},
'group_variables': {},
'host_variables': {},
'variant_description': '',
'voting': True,
'workspace_scheme': 'golang'}],
[{'abstract': False,
'ansible_split_streams': None,
'ansible_version': None,
'attempts': 3,
'branches': [],
'deduplicate': 'auto',
'dependencies': [{'name': 'project-merge',
'soft': False}],
'description': None,
'files': [],
'final': False,
'intermediate': False,
'irrelevant_files': [],
'match_on_config_updates': True,
'name': 'project-test2',
'override_checkout': None,
'parent': 'base',
'post_review': None,
'protected': None,
'provides': [],
'required_projects': [],
'requires': [],
'roles': [],
'run': [],
'pre_run': [],
'post_run': [],
'cleanup_run': [],
'semaphores': [],
'source_context': {
'branch': 'master',
'path': 'zuul.yaml',
'project': 'common-config'},
'tags': [],
'timeout': None,
'variables': {},
'extra_variables': {},
'group_variables': {},
'host_variables': {},
'variant_description': '',
'voting': True,
'workspace_scheme': 'golang'}],
[{'abstract': False,
'ansible_split_streams': None,
'ansible_version': None,
'attempts': 3,
'branches': [],
'deduplicate': 'auto',
'dependencies': [{'name': 'project-merge',
'soft': False}],
'description': None,
'files': [],
'final': False,
'intermediate': False,
'irrelevant_files': [],
'match_on_config_updates': True,
'name': 'project1-project2-integration',
'override_checkout': None,
'parent': 'base',
'post_review': None,
'protected': None,
'provides': [],
'required_projects': [],
'requires': [],
'roles': [],
'run': [],
'pre_run': [],
'post_run': [],
'cleanup_run': [],
'semaphores': [],
'source_context': {
'branch': 'master',
'path': 'zuul.yaml',
'project': 'common-config'},
'tags': [],
'timeout': None,
'variables': {},
'extra_variables': {},
'group_variables': {},
'host_variables': {},
'variant_description': '',
'voting': True,
'workspace_scheme': 'golang'}]]
self.assertEqual(
{
'canonical_name': 'review.example.com/org/project1',
'connection_name': 'gerrit',
'name': 'org/project1',
'metadata': {
'is_template': False,
'default_branch': 'master',
'merge_mode': 'merge-resolve',
'queue_name': 'integrated',
},
'configs': [{
'source_context': {'branch': 'master',
'path': 'zuul.yaml',
'project': 'common-config'},
'is_template': False,
'templates': [],
'default_branch': None,
'queue_name': 'integrated',
'merge_mode': None,
'pipelines': [{
'name': 'check',
'jobs': jobs,
}, {
'name': 'gate',
'jobs': jobs,
}, {'name': 'post',
'jobs': [[
{'abstract': False,
'ansible_split_streams': None,
'ansible_version': None,
'attempts': 3,
'branches': [],
'deduplicate': 'auto',
'dependencies': [],
'description': None,
'files': [],
'final': False,
'intermediate': False,
'irrelevant_files': [],
'match_on_config_updates': True,
'name': 'project-post',
'override_checkout': None,
'parent': 'base',
'post_review': None,
'post_run': [],
'cleanup_run': [],
'pre_run': [],
'protected': None,
'provides': [],
'required_projects': [],
'requires': [],
'roles': [],
'run': [],
'semaphores': [],
'source_context': {'branch': 'master',
'path': 'zuul.yaml',
'project': 'common-config'},
'tags': [],
'timeout': None,
'variables': {},
'extra_variables': {},
'group_variables': {},
'host_variables': {},
'variant_description': '',
'voting': True,
'workspace_scheme': 'golang'}
]],
}
]
}]
}, data)
def test_web_keys(self):
with open(os.path.join(FIXTURE_DIR, 'public.pem'), 'rb') as f:
public_pem = f.read()
resp = self.get_url("api/tenant/tenant-one/key/org/project.pub")
self.assertEqual(resp.content, public_pem)
self.assertIn('text/plain', resp.headers.get('Content-Type'))
resp = self.get_url("api/tenant/non-tenant/key/org/project.pub")
self.assertEqual(404, resp.status_code)
resp = self.get_url("api/tenant/tenant-one/key/org/no-project.pub")
self.assertEqual(404, resp.status_code)
with open(os.path.join(FIXTURE_DIR, 'ssh.pub'), 'rb') as f:
public_ssh = f.read()
resp = self.get_url("api/tenant/tenant-one/project-ssh-key/"
"org/project.pub")
self.assertEqual(resp.content, public_ssh)
self.assertIn('text/plain', resp.headers.get('Content-Type'))
def test_web_404_on_unknown_tenant(self):
resp = self.get_url("api/tenant/non-tenant/status")
self.assertEqual(404, resp.status_code)
def test_autohold_info_404_on_invalid_id(self):
resp = self.get_url("api/tenant/tenant-one/autohold/12345")
self.assertEqual(404, resp.status_code)
def test_autohold_delete_401_on_invalid_id(self):
resp = self.delete_url("api/tenant/tenant-one/autohold/12345")
self.assertEqual(401, resp.status_code)
def test_autohold_info(self):
self.addAutohold('tenant-one', 'review.example.com/org/project',
'project-test2', '.*', 'reason text', 1, 600)
# Use autohold-list API to retrieve request ID
resp = self.get_url(
"api/tenant/tenant-one/autohold")
self.assertEqual(200, resp.status_code, resp.text)
autohold_requests = resp.json()
self.assertNotEqual([], autohold_requests)
self.assertEqual(1, len(autohold_requests))
request_id = autohold_requests[0]['id']
# Now try the autohold-info API
resp = self.get_url("api/tenant/tenant-one/autohold/%s" % request_id)
self.assertEqual(200, resp.status_code, resp.text)
request = resp.json()
self.assertEqual(request_id, request['id'])
self.assertEqual('tenant-one', request['tenant'])
self.assertIn('org/project', request['project'])
self.assertEqual('project-test2', request['job'])
self.assertEqual(".*", request['ref_filter'])
self.assertEqual(1, request['max_count'])
self.assertEqual(0, request['current_count'])
self.assertEqual("reason text", request['reason'])
self.assertEqual([], request['nodes'])
# Scope the request to tenant-two, not found
resp = self.get_url("api/tenant/tenant-two/autohold/%s" % request_id)
self.assertEqual(404, resp.status_code, resp.text)
def test_autohold_list(self):
"""test listing autoholds through zuul-web"""
self.addAutohold('tenant-one', 'review.example.com/org/project',
'project-test2', '.*', 'reason text', 1, 600)
resp = self.get_url(
"api/tenant/tenant-one/autohold")
self.assertEqual(200, resp.status_code, resp.text)
autohold_requests = resp.json()
self.assertNotEqual([], autohold_requests)
self.assertEqual(1, len(autohold_requests))
ah_request = autohold_requests[0]
self.assertEqual('tenant-one', ah_request['tenant'])
self.assertIn('org/project', ah_request['project'])
self.assertEqual('project-test2', ah_request['job'])
self.assertEqual(".*", ah_request['ref_filter'])
self.assertEqual(1, ah_request['max_count'])
self.assertEqual(0, ah_request['current_count'])
self.assertEqual("reason text", ah_request['reason'])
self.assertEqual([], ah_request['nodes'])
# filter by project
resp = self.get_url(
"api/tenant/tenant-one/autohold?project=org/project2")
self.assertEqual(200, resp.status_code, resp.text)
autohold_requests = resp.json()
self.assertEqual([], autohold_requests)
resp = self.get_url(
"api/tenant/tenant-one/autohold?project=org/project")
self.assertEqual(200, resp.status_code, resp.text)
autohold_requests = resp.json()
self.assertNotEqual([], autohold_requests)
self.assertEqual(1, len(autohold_requests))
ah_request = autohold_requests[0]
self.assertEqual('tenant-one', ah_request['tenant'])
self.assertIn('org/project', ah_request['project'])
self.assertEqual('project-test2', ah_request['job'])
self.assertEqual(".*", ah_request['ref_filter'])
self.assertEqual(1, ah_request['max_count'])
self.assertEqual(0, ah_request['current_count'])
self.assertEqual("reason text", ah_request['reason'])
self.assertEqual([], ah_request['nodes'])
# Unknown tenants return 404
resp = self.get_url(
"api/tenant/tenant-fifty/autohold")
self.assertEqual(404, resp.status_code, resp.text)
def test_admin_routes_404_by_default(self):
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/autohold",
json={'job': 'project-test1',
'count': 1,
'reason': 'because',
'node_hold_expiration': 36000})
self.assertEqual(404, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
json={'trigger': 'gerrit',
'change': '2,1',
'pipeline': 'check'})
self.assertEqual(404, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
json={'trigger': 'gerrit',
'ref': 'abcd',
'newrev': 'aaaa',
'oldrev': 'bbbb',
'pipeline': 'check'})
self.assertEqual(404, resp.status_code)
def test_jobs_list(self):
jobs = self.get_url("api/tenant/tenant-one/jobs").json()
self.assertEqual(len(jobs), 10)
resp = self.get_url("api/tenant/non-tenant/jobs")
self.assertEqual(404, resp.status_code)
def test_jobs_list_variants(self):
resp = self.get_url("api/tenant/tenant-one/jobs").json()
for job in resp:
if job['name'] in ["base", "noop"]:
variants = None
elif job['name'] == 'project-test1':
variants = [
{'parent': 'base'},
{'branches': ['stable'], 'parent': 'base'},
]
else:
variants = [{'parent': 'base'}]
self.assertEqual(variants, job.get('variants'))
def test_jobs_list_tags(self):
resp = self.get_url("api/tenant/tenant-one/jobs").json()
post_job = None
for job in resp:
if job['name'] == 'project-post':
post_job = job
break
self.assertIsNotNone(post_job)
self.assertEqual(['post'], post_job.get('tags'))
def test_web_job_noop(self):
job = self.get_url("api/tenant/tenant-one/job/noop").json()
self.assertEqual("noop", job[0]["name"])
@simple_layout('layouts/special-characters-job.yaml')
def test_web_job_special_characters(self):
job = self.get_url("api/tenant/tenant-one/job/a%40b%2Fc").json()
self.assertEqual("a@b/c", job[0]["name"])
def test_freeze_jobs(self):
# Test can get a list of the jobs for a given project+pipeline+branch.
resp = self.get_url(
"api/tenant/tenant-one/pipeline/check"
"/project/org/project1/branch/master/freeze-jobs")
freeze_jobs = [{
'name': 'project-merge',
'dependencies': [],
}, {
'name': 'project-test1',
'dependencies': [{
'name': 'project-merge',
'soft': False,
}],
}, {
'name': 'project-test2',
'dependencies': [{
'name': 'project-merge',
'soft': False,
}],
}, {
'name': 'project1-project2-integration',
'dependencies': [{
'name': 'project-merge',
'soft': False,
}],
}]
self.assertEqual(freeze_jobs, resp.json())
def test_freeze_jobs_set_includes_all_jobs(self):
# When freezing a job set we want to include all jobs even if they
# have certain matcher requirements (such as required files) since we
# can't otherwise evaluate them.
resp = self.get_url(
"api/tenant/tenant-one/pipeline/gate"
"/project/org/project/branch/master/freeze-jobs")
expected = {
'name': 'project-testfile',
'dependencies': [{
'name': 'project-merge',
'soft': False,
}],
}
self.assertIn(expected, resp.json())
def test_freeze_job(self):
resp = self.get_url(
"api/tenant/tenant-one/pipeline/check"
"/project/org/project1/branch/master/freeze-job/"
"project-test1")
job_params = {
'job': 'project-test1',
'ansible_split_streams': None,
'ansible_version': '6',
'timeout': None,
'post_timeout': None,
'items': [],
'projects': [],
'branch': 'master',
'cleanup_playbooks': [],
'nodeset': {
'alternatives': [],
'groups': [],
'name': '',
'nodes': [
{'aliases': [],
'comment': None,
'hold_job': None,
'id': None,
'label': 'label1',
'name': 'controller',
'requestor': None,
'state': 'unknown',
'tenant_name': None,
'user_data': None}]},
'override_branch': None,
'override_checkout': None,
'merge_repo_state_ref': None,
'extra_repo_state_ref': None,
'playbooks': [{
'connection': 'gerrit',
'project': 'common-config',
'branch': 'master',
'trusted': True,
'roles': [{
'target_name': 'common-config',
'type': 'zuul',
'project_canonical_name':
'review.example.com/common-config',
'implicit': True,
'project_default_branch': 'master',
'connection': 'gerrit',
'project': 'common-config',
}],
'secrets': {},
'semaphores': [],
'path': 'playbooks/project-test1.yaml',
}],
'pre_playbooks': [],
'post_playbooks': [],
'ssh_keys': [],
'vars': {},
'extra_vars': {},
'host_vars': {},
'group_vars': {},
'secret_vars': None,
'zuul': {
'_inheritance_path': [
'<Job base branches: None source: '
'common-config/zuul.yaml@master#53>',
'<Job project-test1 branches: None source: '
'common-config/zuul.yaml@master#66>',
'<Job project-test1 branches: None source: '
'common-config/zuul.yaml@master#138>'],
'build': '00000000000000000000000000000000',
'buildset': None,
'branch': 'master',
'ref': None,
'pipeline': 'check',
'post_review': False,
'job': 'project-test1',
'voting': True,
'project': {
'name': 'org/project1',
'short_name': 'project1',
'canonical_hostname': 'review.example.com',
'canonical_name': 'review.example.com/org/project1',
'src_dir': 'src/review.example.com/org/project1',
},
'tenant': 'tenant-one',
'timeout': None,
'jobtags': [],
'branch': 'master',
'projects': {},
'items': [],
'change_url': None,
'child_jobs': [],
'event_id': None,
},
'workspace_scheme': 'golang',
}
self.assertEqual(job_params, resp.json())
@simple_layout('layouts/noop-job.yaml')
def test_freeze_noop_job(self):
resp = self.get_url(
"api/tenant/tenant-one/pipeline/gate"
"/project/org/noop-project/branch/master/freeze-job/"
"noop")
job_params = {
'ansible_split_streams': None,
'ansible_version': '6',
'branch': 'master',
'extra_vars': {},
'group_vars': {},
'host_vars': {},
'items': [],
'job': 'noop',
'nodeset': {'alternatives': [],
'groups': [], 'name': '', 'nodes': []},
'override_branch': None,
'override_checkout': None,
'post_timeout': None,
'projects': [],
'merge_repo_state_ref': None,
'extra_repo_state_ref': None,
'secret_vars': None,
'ssh_keys': [],
'timeout': None,
'vars': {},
'workspace_scheme': 'golang',
'zuul': {
'_inheritance_path': [
'<Job noop branches: None source: None#0>',
'<Job noop branches: None source: '
'org/common-config/zuul.yaml@master#22>'],
'branch': 'master',
'build': '00000000000000000000000000000000',
'buildset': None,
'change_url': None,
'child_jobs': [],
'event_id': None,
'items': [],
'job': 'noop',
'jobtags': [],
'pipeline': 'gate',
'post_review': False,
'project': {
'canonical_hostname': 'review.example.com',
'canonical_name': 'review.example.com/org/noop-project',
'name': 'org/noop-project',
'short_name': 'noop-project',
'src_dir': 'src/review.example.com/org/noop-project'},
'projects': {},
'ref': None,
'tenant': 'tenant-one',
'timeout': None,
'voting': True}}
self.assertEqual(job_params, resp.json())
class TestWebStatusDisplayBranch(BaseTestWeb):
tenant_config_file = 'config/change-queues/main.yaml'
def add_changes(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
C = self.fake_gerrit.addFakeChange('org/project3', 'master', 'C')
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
D = self.fake_gerrit.addFakeChange('org/project4', 'master', 'D')
D.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(D.addApproval('Approved', 1))
def test_web_status_display_branch(self):
"Test that we can retrieve JSON status info with branch name"
self.add_changes()
self.waitUntilSettled()
resp = self.get_url("api/tenant/tenant-one/status")
self.executor_server.release()
data = resp.json()
for p in data['pipelines']:
if p['name'] == 'gate':
for q in p['change_queues']:
# 'per-branch: true' is configured for all gate queues
self.assertEqual(q['branch'], 'master')
class TestWebMultiTenant(BaseTestWeb):
tenant_config_file = 'config/multi-tenant/main.yaml'
def test_tenant_reconfigure_command(self):
# The 'zuul-scheduler tenant-reconfigure' and full-reconfigure
# are used to correct problems, and as such they clear the
# branch cache. Until the reconfiguration is complete,
# zuul-web will be unable to load configuration for any tenant
# which has projects that have been cleared from the branch
# cache. This test verifies that we retry that operation
# after encountering missing branch errors.
sched = self.scheds.first.sched
web = self.web.web
# Don't perform any automatic config updates on zuul web so
# that we can control the sequencing.
self.web.web._system_config_running = False
self.web.web.system_config_cache_wake_event.set()
self.web.web.system_config_thread.join()
first_state = sched.tenant_layout_state.get('tenant-one')
self.assertEqual(first_state,
web.local_layout_state.get('tenant-one'))
data = self.get_url('api/tenant/tenant-one/jobs').json()
self.assertEqual(len(data), 4)
# Reconfigure tenant-one so that the layout state will be
# different and we can start a layout update in zuul-web
# later.
self.log.debug("Reconfigure tenant-one")
self.scheds.first.tenantReconfigure(['tenant-one'])
self.waitUntilSettled()
self.log.debug("Done reconfigure tenant-one")
second_state = sched.tenant_layout_state.get('tenant-one')
self.assertEqual(second_state,
sched.local_layout_state.get('tenant-one'))
self.assertEqual(first_state,
web.local_layout_state.get('tenant-one'))
self.log.debug("Grab write lock for tenant-two")
with tenant_write_lock(self.zk_client, 'tenant-two') as lock:
# Start a reconfiguration of tenant-two; allow it to
# proceed past the point that the branch cache is cleared
# and is waiting on the lock we hold.
self.scheds.first.tenantReconfigure(
['tenant-two'], command_socket=True)
for _ in iterate_timeout(30, "reconfiguration to start"):
if 'RECONFIG' in lock.contenders():
break
# Now that the branch cache is cleared as part of the
# tenant-two reconfiguration, allow zuul-web to
# reconfigure tenant-one. This should produce an error
# because of the missing branch cache.
self.log.debug("Web update layout 1")
self.web.web.updateSystemConfig()
self.assertFalse(self.web.web.updateLayout())
self.log.debug("Web update layout done")
self.assertEqual(second_state,
sched.local_layout_state.get('tenant-one'))
self.assertEqual(first_state,
web.local_layout_state.get('tenant-one'))
# Make sure we can still access tenant-one's config via
# zuul-web
data = self.get_url('api/tenant/tenant-one/jobs').json()
self.assertEqual(len(data), 4)
self.log.debug("Release write lock for tenant-two")
for _ in iterate_timeout(30, "reconfiguration to finish"):
if 'RECONFIG' not in lock.contenders():
break
self.log.debug("Web update layout 2")
self.web.web.updateSystemConfig()
self.web.web.updateLayout()
self.log.debug("Web update layout done")
# Depending on tenant order, we may need to run one more time
self.log.debug("Web update layout 3")
self.web.web.updateSystemConfig()
self.assertTrue(self.web.web.updateLayout())
self.log.debug("Web update layout done")
self.assertEqual(second_state,
sched.local_layout_state.get('tenant-one'))
self.assertEqual(second_state,
web.local_layout_state.get('tenant-one'))
data = self.get_url('api/tenant/tenant-one/jobs').json()
self.assertEqual(len(data), 4)
def test_web_labels_allowed_list(self):
labels = ["tenant-one-label", "fake", "tenant-two-label"]
self.fake_nodepool.registerLauncher(labels, "FakeLauncher2")
# Tenant-one has label restriction in place on tenant-two
res = self.get_url('api/tenant/tenant-one/labels').json()
self.assertEqual([{'name': 'fake'}, {'name': 'tenant-one-label'}], res)
# Tenant-two has label restriction in place on tenant-one
expected = ["label1", "fake", "tenant-two-label"]
res = self.get_url('api/tenant/tenant-two/labels').json()
self.assertEqual(
list(map(lambda x: {'name': x}, sorted(expected))), res)
def test_tenant_add_remove(self):
"Test that tenants are correctly added/removed to/from the layout"
# Disable tenant list caching
self.web.web.api.cache_expiry = 0
resp = self.get_url("api/tenants")
data = resp.json()
self.assertEqual(sorted(d["name"] for d in data),
sorted(["tenant-one", "tenant-two", "tenant-three"]))
self.newTenantConfig('config/multi-tenant/main-reconfig.yaml')
self.scheds.first.smartReconfigure(command_socket=True)
self.waitUntilSettled()
for _ in iterate_timeout(
10, "tenants to be updated from zuul-web"):
if ('tenant-three' not in self.web.web.local_layout_state and
'tenant-four' in self.web.web.local_layout_state):
break
self.assertNotIn('tenant-three', self.web.web.abide.tenants)
self.assertIn('tenant-four', self.web.web.abide.tenants)
resp = self.get_url("api/tenants")
data = resp.json()
self.assertEqual(sorted(d["name"] for d in data),
sorted(["tenant-one", "tenant-two", "tenant-four"]))
class TestWebGlobalSemaphores(BaseTestWeb):
tenant_config_file = 'config/global-semaphores-config/main.yaml'
def test_web_semaphores(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertBuilds([
dict(name='test-global-semaphore', changes='1,1'),
dict(name='test-common-semaphore', changes='1,1'),
dict(name='test-project1-semaphore', changes='1,1'),
dict(name='test-global-semaphore', changes='2,1'),
dict(name='test-common-semaphore', changes='2,1'),
dict(name='test-project2-semaphore', changes='2,1'),
])
tenant1_buildset_uuid = self.builds[0].parameters['zuul']['buildset']
data = self.get_url('api/tenant/tenant-one/semaphores').json()
expected = [
{'name': 'common-semaphore',
'global': False,
'max': 10,
'holders': {
'count': 1,
'this_tenant': [
{'buildset_uuid': tenant1_buildset_uuid,
'job_name': 'test-common-semaphore'}
],
'other_tenants': 0
}},
{'name': 'global-semaphore',
'global': True,
'max': 100,
'holders': {
'count': 2,
'this_tenant': [
{'buildset_uuid': tenant1_buildset_uuid,
'job_name': 'test-global-semaphore'}
],
'other_tenants': 1
}},
{'name': 'project1-semaphore',
'global': False,
'max': 11,
'holders': {
'count': 1,
'this_tenant': [
{'buildset_uuid': tenant1_buildset_uuid,
'job_name': 'test-project1-semaphore'}
],
'other_tenants': 0
}}
]
self.assertEqual(expected, data)
class TestEmptyConfig(BaseTestWeb):
tenant_config_file = 'config/empty-config/main.yaml'
def test_empty_config_startup(self):
# Test that we can bootstrap a tenant with an empty config
resp = self.get_url("api/tenant/tenant-one/jobs").json()
self.assertEqual(len(resp), 1)
self.commitConfigUpdate(
'common-config',
'config/empty-config/git/common-config/new-zuul.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
layout_scheduler = self.scheds.first.sched.local_layout_state.get(
'tenant-one')
for _ in iterate_timeout(10, "local layout of zuul-web to be updated"):
layout_web = self.web.web.local_layout_state.get('tenant-one')
if layout_web == layout_scheduler:
break
resp = self.get_url("api/tenant/tenant-one/jobs").json()
self.assertEqual(len(resp), 3)
class TestWebSecrets(BaseTestWeb):
tenant_config_file = 'config/secrets/main.yaml'
def test_web_find_job_secret(self):
data = self.get_url('api/tenant/tenant-one/job/project1-secret').json()
run = data[0]['run']
secret = {'name': 'project1_secret', 'alias': 'secret_name'}
self.assertEqual([secret], run[0]['secrets'])
def test_freeze_job_redacted(self):
# Test that ssh_keys and secrets are redacted
resp = self.get_url(
"api/tenant/tenant-one/pipeline/check"
"/project/org/project1/branch/master/freeze-job/"
"project1-secret").json()
self.assertEqual(
{'secret_name': 'REDACTED'}, resp['playbooks'][0]['secrets'])
self.assertEqual('REDACTED', resp['ssh_keys'][0])
class TestInfo(BaseTestWeb):
config_file = 'zuul-sql-driver-mysql.conf'
def setUp(self):
super(TestInfo, self).setUp()
web_config = self.config_ini_data.get('web', {})
self.websocket_url = web_config.get('websocket_url')
self.stats_url = web_config.get('stats_url')
statsd_config = self.config_ini_data.get('statsd', {})
self.stats_prefix = statsd_config.get('prefix')
def _expected_info(self):
return {
"info": {
"capabilities": {
"job_history": True,
"auth": {
"realms": {},
"default_realm": None,
"read_protected": False,
}
},
"stats": {
"url": self.stats_url,
"prefix": self.stats_prefix,
"type": "graphite",
},
"websocket_url": self.websocket_url,
}
}
def test_info(self):
info = self.get_url("api/info").json()
self.assertEqual(
info, self._expected_info())
def test_tenant_info(self):
info = self.get_url("api/tenant/tenant-one/info").json()
expected_info = self._expected_info()
expected_info['info']['tenant'] = 'tenant-one'
self.assertEqual(
info, expected_info)
class TestWebCapabilitiesInfo(TestInfo):
config_file = 'zuul-admin-web-oidc.conf'
def _expected_info(self):
info = super(TestWebCapabilitiesInfo, self)._expected_info()
info['info']['capabilities']['auth'] = {
'realms': {
'myOIDC1': {
'authority': 'http://oidc1',
'client_id': 'zuul',
'type': 'JWT',
'scope': 'openid profile',
'driver': 'OpenIDConnect',
'load_user_info': True,
},
'myOIDC2': {
'authority': 'http://oidc2',
'client_id': 'zuul',
'type': 'JWT',
'scope': 'openid profile email special-scope',
'driver': 'OpenIDConnect',
'load_user_info': True,
},
'zuul.example.com': {
'authority': 'zuul_operator',
'client_id': 'zuul.example.com',
'type': 'JWT',
'driver': 'HS256',
}
},
'default_realm': 'myOIDC1',
'read_protected': False,
}
return info
class TestTenantAuthRealmInfo(TestWebCapabilitiesInfo):
tenant_config_file = 'config/authorization/rules-templating/main.yaml'
def test_tenant_info(self):
expected_info = self._expected_info()
info = self.get_url("api/tenant/tenant-zero/info").json()
expected_info['info']['tenant'] = 'tenant-zero'
expected_info['info']['capabilities']['auth']['default_realm'] =\
'myOIDC1'
self.assertEqual(expected_info,
info,
info)
info = self.get_url("api/tenant/tenant-one/info").json()
expected_info['info']['tenant'] = 'tenant-one'
expected_info['info']['capabilities']['auth']['default_realm'] =\
'myOIDC1'
self.assertEqual(expected_info,
info,
info)
info = self.get_url("api/tenant/tenant-two/info").json()
expected_info['info']['tenant'] = 'tenant-two'
expected_info['info']['capabilities']['auth']['default_realm'] =\
'myOIDC2'
self.assertEqual(expected_info,
info,
info)
class TestRootAuth(TestWebCapabilitiesInfo):
tenant_config_file = 'config/authorization/api-root/main.yaml'
def test_info(self):
# This overrides the test in TestInfo
expected_info = self._expected_info()
info = self.get_url("api/info").json()
expected_info['info']['capabilities']['auth']['default_realm'] =\
'myOIDC2'
self.assertEqual(expected_info, info)
def test_tenant_info(self):
expected_info = self._expected_info()
info = self.get_url("api/tenant/tenant-zero/info").json()
expected_info['info']['tenant'] = 'tenant-zero'
expected_info['info']['capabilities']['auth']['default_realm'] =\
'myOIDC1'
self.assertEqual(expected_info,
info,
info)
info = self.get_url("api/tenant/tenant-one/info").json()
expected_info['info']['tenant'] = 'tenant-one'
expected_info['info']['capabilities']['auth']['default_realm'] =\
'myOIDC1'
self.assertEqual(expected_info,
info,
info)
info = self.get_url("api/tenant/tenant-two/info").json()
expected_info['info']['tenant'] = 'tenant-two'
expected_info['info']['capabilities']['auth']['default_realm'] =\
'myOIDC2'
self.assertEqual(expected_info,
info,
info)
class TestTenantInfoConfigBroken(BaseTestWeb):
tenant_config_file = 'config/broken/main.yaml'
def test_tenant_info_broken_config(self):
config_errors = self.get_url(
"api/tenant/tenant-broken/config-errors").json()
self.assertEqual(
len(config_errors), 2)
self.assertEqual(
config_errors[0]['source_context']['project'], 'org/project3')
self.assertIn('Zuul encountered an error while accessing the repo '
'org/project3',
config_errors[0]['error'])
self.assertEqual(
config_errors[1]['source_context']['project'], 'org/project2')
self.assertEqual(
config_errors[1]['source_context']['branch'], 'master')
self.assertEqual(
config_errors[1]['source_context']['path'], '.zuul.yaml')
self.assertIn('Zuul encountered a syntax error',
config_errors[1]['error'])
resp = self.get_url("api/tenant/non-tenant/config-errors")
self.assertEqual(404, resp.status_code)
class TestBrokenConfigCache(BaseWithWeb):
tenant_config_file = 'config/single-tenant/main.yaml'
def test_broken_config_cache(self):
# Delete the cached config files from ZK to simulate a
# scheduler encountering an error in reconfiguration.
path = '/zuul/config/cache/review.example.com%2Forg%2Fproject'
self.assertIsNotNone(self.zk_client.client.exists(path))
self.zk_client.client.delete(path, recursive=True)
self.startWebServer()
config_errors = self.get_url(
"api/tenant/tenant-one/config-errors").json()
self.assertIn('Configuration files missing',
config_errors[0]['error'])
class TestWebSocketInfo(TestInfo):
config_ini_data = {
'web': {
'websocket_url': 'wss://ws.example.com'
}
}
class TestGraphiteUrl(TestInfo):
config_ini_data = {
'statsd': {
'prefix': 'example'
},
'web': {
'stats_url': 'https://graphite.example.com',
}
}
class TestBuildInfo(BaseTestWeb):
config_file = 'zuul-sql-driver-mysql.conf'
tenant_config_file = 'config/sql-driver/main.yaml'
def test_web_list_builds(self):
# Generate some build records in the db.
self.add_base_changes()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
builds = self.get_url("api/tenant/tenant-one/builds").json()
self.assertEqual(len(builds), 6)
uuid = builds[0]['uuid']
build = self.get_url("api/tenant/tenant-one/build/%s" % uuid).json()
self.assertEqual(build['job_name'], builds[0]['job_name'])
resp = self.get_url("api/tenant/tenant-one/build/1234")
self.assertEqual(404, resp.status_code)
builds_query = self.get_url("api/tenant/tenant-one/builds?"
"project=org/project&"
"project=org/project1").json()
self.assertEqual(len(builds_query), 6)
self.assertEqual(builds_query[0]['nodeset'], 'test-nodeset')
resp = self.get_url("api/tenant/non-tenant/builds")
self.assertEqual(404, resp.status_code)
extrema = [int(builds[-1]['_id']), int(builds[0]['_id'])]
idx_min = min(extrema)
idx_max = max(extrema)
builds_query = self.get_url("api/tenant/tenant-one/builds?"
"idx_max=%i" % idx_min).json()
self.assertEqual(len(builds_query), 1, builds_query)
builds_query = self.get_url("api/tenant/tenant-one/builds?"
"idx_min=%i" % idx_min).json()
self.assertEqual(len(builds_query), len(builds), builds_query)
builds_query = self.get_url("api/tenant/tenant-one/builds?"
"idx_max=%i" % idx_max).json()
self.assertEqual(len(builds_query), len(builds), builds_query)
builds_query = self.get_url("api/tenant/tenant-one/builds?"
"idx_min=%i" % idx_max).json()
self.assertEqual(len(builds_query), 1, builds_query)
def test_web_list_skipped_builds(self):
# Test the exclude_result filter
# Generate some build records in the db.
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.executor_server.failJob('project-merge', A)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
builds = self.get_url("api/tenant/tenant-one/builds").json()
builds.sort(key=lambda x: x['job_name'])
self.assertEqual(len(builds), 3)
self.assertEqual(builds[0]['job_name'], 'project-merge')
self.assertEqual(builds[1]['job_name'], 'project-test1')
self.assertEqual(builds[2]['job_name'], 'project-test2')
self.assertEqual(builds[0]['result'], 'FAILURE')
self.assertEqual(builds[1]['result'], 'SKIPPED')
self.assertEqual(builds[2]['result'], 'SKIPPED')
self.assertIsNone(builds[0]['error_detail'])
self.assertEqual(builds[1]['error_detail'],
'Skipped due to failed job project-merge')
self.assertEqual(builds[2]['error_detail'],
'Skipped due to failed job project-merge')
builds = self.get_url("api/tenant/tenant-one/builds?"
"exclude_result=SKIPPED").json()
self.assertEqual(len(builds), 1)
self.assertEqual(builds[0]['job_name'], 'project-merge')
self.assertEqual(builds[0]['result'], 'FAILURE')
builds = self.get_url("api/tenant/tenant-one/builds?"
"result=SKIPPED&result=FAILURE").json()
builds.sort(key=lambda x: x['job_name'])
self.assertEqual(len(builds), 3)
self.assertEqual(builds[0]['job_name'], 'project-merge')
self.assertEqual(builds[1]['job_name'], 'project-test1')
self.assertEqual(builds[2]['job_name'], 'project-test2')
self.assertEqual(builds[0]['result'], 'FAILURE')
self.assertEqual(builds[1]['result'], 'SKIPPED')
self.assertEqual(builds[2]['result'], 'SKIPPED')
def test_web_badge(self):
# Generate some build records in the db.
self.add_base_changes()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# Now request badge for the buildsets
result = self.get_url("api/tenant/tenant-one/badge")
self.log.error(result.content)
result.raise_for_status()
self.assertTrue(result.text.startswith('<svg '))
self.assertIn('passing', result.text)
# Generate a failing record
self.executor_server.hold_jobs_in_build = True
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.failJob('project-merge', C)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# Request again badge for the buildsets
result = self.get_url("api/tenant/tenant-one/badge")
self.log.error(result.content)
result.raise_for_status()
self.assertTrue(result.text.startswith('<svg '))
self.assertIn('failing', result.text)
def test_web_list_buildsets(self):
# Generate some build records in the db.
self.add_base_changes()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
buildsets = self.get_url("api/tenant/tenant-one/buildsets").json()
self.assertEqual(2, len(buildsets))
project_bs = [x for x in buildsets if x["project"] == "org/project"][0]
buildset = self.get_url(
"api/tenant/tenant-one/buildset/%s" % project_bs['uuid']).json()
self.assertEqual(3, len(buildset["builds"]))
project_test1_build = [x for x in buildset["builds"]
if x["job_name"] == "project-test1"][0]
self.assertEqual('SUCCESS', project_test1_build['result'])
project_test2_build = [x for x in buildset["builds"]
if x["job_name"] == "project-test2"][0]
self.assertEqual('SUCCESS', project_test2_build['result'])
project_merge_build = [x for x in buildset["builds"]
if x["job_name"] == "project-merge"][0]
self.assertEqual('SUCCESS', project_merge_build['result'])
extrema = [int(buildsets[-1]['_id']), int(buildsets[0]['_id'])]
idx_min = min(extrema)
idx_max = max(extrema)
buildsets_query = self.get_url("api/tenant/tenant-one/buildsets?"
"idx_max=%i" % idx_min).json()
self.assertEqual(len(buildsets_query), 1, buildsets_query)
buildsets_query = self.get_url("api/tenant/tenant-one/buildsets?"
"idx_min=%i" % idx_min).json()
self.assertEqual(len(buildsets_query), len(buildsets), buildsets_query)
buildsets_query = self.get_url("api/tenant/tenant-one/buildsets?"
"idx_max=%i" % idx_max).json()
self.assertEqual(len(buildsets_query), len(buildsets), buildsets_query)
buildsets_query = self.get_url("api/tenant/tenant-one/buildsets?"
"idx_min=%i" % idx_max).json()
self.assertEqual(len(buildsets_query), 1, buildsets_query)
@simple_layout('layouts/empty-check.yaml')
def test_build_error(self):
conf = textwrap.dedent(
"""
- job:
name: test-job
run: playbooks/dne.yaml
- project:
name: org/project
check:
jobs:
- test-job
""")
file_dict = {'.zuul.yaml': conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
builds = self.get_url("api/tenant/tenant-one/builds").json()
self.assertIn('Unable to find playbook',
builds[0]['error_detail'])
class TestArtifacts(BaseTestWeb, AnsibleZuulTestCase):
config_file = 'zuul-sql-driver-mysql.conf'
tenant_config_file = 'config/sql-driver/main.yaml'
def test_artifacts(self):
# Generate some build records in the db.
self.add_base_changes()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
build_query = self.get_url("api/tenant/tenant-one/builds?"
"project=org/project&"
"job_name=project-test1").json()
self.assertEqual(len(build_query), 1)
self.assertEqual(len(build_query[0]['artifacts']), 3)
arts = build_query[0]['artifacts']
arts.sort(key=lambda x: x['name'])
self.assertEqual(build_query[0]['artifacts'], [
{'url': 'http://example.com/docs',
'name': 'docs'},
{'url': 'http://logs.example.com/build/relative/docs',
'name': 'relative',
'metadata': {'foo': 'bar'}},
{'url': 'http://example.com/tarball',
'name': 'tarball'},
])
def test_buildset_artifacts(self):
self.add_base_changes()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
buildsets = self.get_url("api/tenant/tenant-one/buildsets").json()
project_bs = [x for x in buildsets if x["project"] == "org/project"][0]
buildset = self.get_url(
"api/tenant/tenant-one/buildset/%s" % project_bs['uuid']).json()
self.assertEqual(3, len(buildset["builds"]))
test1_build = [x for x in buildset["builds"]
if x["job_name"] == "project-test1"][0]
arts = test1_build['artifacts']
arts.sort(key=lambda x: x['name'])
self.assertEqual([
{'url': 'http://example.com/docs',
'name': 'docs'},
{'url': 'http://logs.example.com/build/relative/docs',
'name': 'relative',
'metadata': {'foo': 'bar'}},
{'url': 'http://example.com/tarball',
'name': 'tarball'},
], test1_build['artifacts'])
class TestTenantScopedWebApi(BaseTestWeb):
config_file = 'zuul-admin-web.conf'
def test_admin_routes_no_token(self):
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/autohold",
json={'job': 'project-test1',
'count': 1,
'reason': 'because',
'node_hold_expiration': 36000})
self.assertEqual(401, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
json={'trigger': 'gerrit',
'change': '2,1',
'pipeline': 'check'})
self.assertEqual(401, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
json={'trigger': 'gerrit',
'ref': 'abcd',
'newrev': 'aaaa',
'oldrev': 'bbbb',
'pipeline': 'check'})
self.assertEqual(401, resp.status_code)
def test_bad_key_JWT_token(self):
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ],
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='OnlyZuulNoDana',
algorithm='HS256')
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/autohold",
headers={'Authorization': 'Bearer %s' % token},
json={'job': 'project-test1',
'count': 1,
'reason': 'because',
'node_hold_expiration': 36000})
self.assertEqual(401, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
headers={'Authorization': 'Bearer %s' % token},
json={'trigger': 'gerrit',
'change': '2,1',
'pipeline': 'check'})
self.assertEqual(401, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
headers={'Authorization': 'Bearer %s' % token},
json={'trigger': 'gerrit',
'ref': 'abcd',
'newrev': 'aaaa',
'oldrev': 'bbbb',
'pipeline': 'check'})
self.assertEqual(401, resp.status_code)
def test_bad_format_JWT_token(self):
token = 'thisisnotwhatatokenshouldbelike'
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/autohold",
headers={'Authorization': 'Bearer %s' % token},
json={'job': 'project-test1',
'count': 1,
'reason': 'because',
'node_hold_expiration': 36000})
self.assertEqual(401, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
headers={'Authorization': 'Bearer %s' % token},
json={'trigger': 'gerrit',
'change': '2,1',
'pipeline': 'check'})
self.assertEqual(401, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
headers={'Authorization': 'Bearer %s' % token},
json={'trigger': 'gerrit',
'ref': 'abcd',
'newrev': 'aaaa',
'oldrev': 'bbbb',
'pipeline': 'check'})
self.assertEqual(401, resp.status_code)
def test_expired_JWT_token(self):
authz = {'iss': 'zuul_operator',
'sub': 'testuser',
'aud': 'zuul.example.com',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) - 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/autohold",
headers={'Authorization': 'Bearer %s' % token},
json={'job': 'project-test1',
'count': 1,
'reason': 'because',
'node_hold_expiration': 36000})
self.assertEqual(401, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
headers={'Authorization': 'Bearer %s' % token},
json={'trigger': 'gerrit',
'change': '2,1',
'pipeline': 'check'})
self.assertEqual(401, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
headers={'Authorization': 'Bearer %s' % token},
json={'trigger': 'gerrit',
'ref': 'abcd',
'newrev': 'aaaa',
'oldrev': 'bbbb',
'pipeline': 'check'})
self.assertEqual(401, resp.status_code)
def test_valid_JWT_bad_tenants(self):
authz = {'iss': 'zuul_operator',
'sub': 'testuser',
'aud': 'zuul.example.com',
'zuul': {
'admin': ['tenant-six', 'tenant-ten', ]
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/autohold",
headers={'Authorization': 'Bearer %s' % token},
json={'job': 'project-test1',
'count': 1,
'reason': 'because',
'node_hold_expiration': 36000})
self.assertEqual(403, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
headers={'Authorization': 'Bearer %s' % token},
json={'trigger': 'gerrit',
'change': '2,1',
'pipeline': 'check'})
self.assertEqual(403, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
headers={'Authorization': 'Bearer %s' % token},
json={'trigger': 'gerrit',
'ref': 'abcd',
'newrev': 'aaaa',
'oldrev': 'bbbb',
'pipeline': 'check'})
self.assertEqual(403, resp.status_code)
# For autohold-delete, we first must make sure that an autohold
# exists before the delete attempt.
good_authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {'admin': ['tenant-one', ]},
'exp': int(time.time()) + 3600}
args = {"reason": "some reason",
"count": 1,
'job': 'project-test2',
'change': None,
'ref': None,
'node_hold_expiration': None}
good_token = jwt.encode(good_authz, key='NoDanaOnlyZuul',
algorithm='HS256')
req = self.post_url(
'api/tenant/tenant-one/project/org/project/autohold',
headers={'Authorization': 'Bearer %s' % good_token},
json=args)
self.assertEqual(200, req.status_code, req.text)
resp = self.get_url(
"api/tenant/tenant-one/autohold")
self.assertEqual(200, resp.status_code, resp.text)
autohold_requests = resp.json()
self.assertNotEqual([], autohold_requests)
self.assertEqual(1, len(autohold_requests))
request = autohold_requests[0]
resp = self.delete_url(
"api/tenant/tenant-one/autohold/%s" % request['id'],
headers={'Authorization': 'Bearer %s' % token})
self.assertEqual(403, resp.status_code)
def _test_autohold(self, args, code=200):
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
req = self.post_url(
'api/tenant/tenant-one/project/org/project/autohold',
headers={'Authorization': 'Bearer %s' % token},
json=args)
self.assertEqual(code, req.status_code, req.text)
if code != 200:
return
data = req.json()
self.assertEqual(True, data)
# Check result
resp = self.get_url(
"api/tenant/tenant-one/autohold")
self.assertEqual(200, resp.status_code, resp.text)
autohold_requests = resp.json()
self.assertNotEqual([], autohold_requests)
self.assertEqual(1, len(autohold_requests))
request = autohold_requests[0]
return request
def test_autohold_default_ref(self):
"""Test that autohold can be set through the admin web interface
with default ref values"""
args = {"reason": "some reason",
"count": 1,
'job': 'project-test2',
'change': None,
'ref': None,
'node_hold_expiration': None}
request = self._test_autohold(args)
self.assertEqual('tenant-one', request['tenant'])
# The matcher expects a canonical project name
self.assertEqual('review.example.com/org/project', request['project'])
self.assertEqual('project-test2', request['job'])
self.assertEqual(".*", request['ref_filter'])
self.assertEqual("some reason", request['reason'])
self.assertEqual(1, request['max_count'])
def test_autohold_change(self):
"""Test that autohold can be set through the admin web interface
with a change supplied"""
args = {"reason": "some reason",
"count": 1,
'job': 'project-test2',
'change': '1',
'ref': None,
'node_hold_expiration': None}
request = self._test_autohold(args)
self.assertEqual('tenant-one', request['tenant'])
# The matcher expects a canonical project name
self.assertEqual('review.example.com/org/project', request['project'])
self.assertEqual('project-test2', request['job'])
self.assertEqual("refs/changes/01/1/.*", request['ref_filter'])
self.assertEqual("some reason", request['reason'])
self.assertEqual(1, request['max_count'])
def test_autohold_ref(self):
"""Test that autohold can be set through the admin web interface
with a change supplied"""
args = {"reason": "some reason",
"count": 1,
'job': 'project-test2',
'change': None,
'ref': 'refs/tags/foo',
'node_hold_expiration': None}
request = self._test_autohold(args)
self.assertEqual('tenant-one', request['tenant'])
# The matcher expects a canonical project name
self.assertEqual('review.example.com/org/project', request['project'])
self.assertEqual('project-test2', request['job'])
self.assertEqual("refs/tags/foo", request['ref_filter'])
self.assertEqual("some reason", request['reason'])
self.assertEqual(1, request['max_count'])
def test_autohold_change_and_ref(self):
"""Test that autohold can be set through the admin web interface
with a change supplied"""
args = {"reason": "some reason",
"count": 1,
'job': 'project-test2',
'change': '1',
'ref': 'refs/tags/foo',
'node_hold_expiration': None}
self._test_autohold(args, code=400)
def _init_autohold_delete(self, authz):
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
self.addAutohold('tenant-one', 'review.example.com/org/project',
'project-test2', '.*', 'reason text', 1, 600)
# Use autohold-list API to retrieve request ID
resp = self.get_url(
"api/tenant/tenant-one/autohold")
self.assertEqual(200, resp.status_code, resp.text)
autohold_requests = resp.json()
self.assertNotEqual([], autohold_requests)
self.assertEqual(1, len(autohold_requests))
request_id = autohold_requests[0]['id']
return request_id, token
def test_autohold_delete_wrong_tenant(self):
"""Make sure authorization rules are applied"""
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
request_id, _ = self._init_autohold_delete(authz)
# now try the autohold-delete API
bad_authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-two', ]
},
'exp': int(time.time()) + 3600}
bad_token = jwt.encode(bad_authz, key='NoDanaOnlyZuul',
algorithm='HS256')
resp = self.delete_url(
"api/tenant/tenant-one/autohold/%s" % request_id,
headers={'Authorization': 'Bearer %s' % bad_token})
# Throw a "Forbidden" error, because user is authenticated but not
# authorized for tenant-one
self.assertEqual(403, resp.status_code, resp.text)
def test_autohold_delete_invalid_id(self):
"""Make sure authorization rules are applied"""
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
bad_token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
resp = self.delete_url(
"api/tenant/tenant-one/autohold/invalidid",
headers={'Authorization': 'Bearer %s' % bad_token})
self.assertEqual(404, resp.status_code, resp.text)
def test_autohold_delete(self):
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
request_id, token = self._init_autohold_delete(authz)
resp = self.delete_url(
"api/tenant/tenant-one/autohold/%s" % request_id,
headers={'Authorization': 'Bearer %s' % token})
self.assertEqual(204, resp.status_code, resp.text)
# autohold-list should be empty now
resp = self.get_url(
"api/tenant/tenant-one/autohold")
self.assertEqual(200, resp.status_code, resp.text)
autohold_requests = resp.json()
self.assertEqual([], autohold_requests)
def _test_enqueue(self, use_trigger=False):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
A.addApproval('Approved', 1)
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
path = "api/tenant/%(tenant)s/project/%(project)s/enqueue"
enqueue_args = {'tenant': 'tenant-one',
'project': 'org/project', }
change = {'change': '1,1',
'pipeline': 'gate', }
if use_trigger:
change['trigger'] = 'gerrit'
req = self.post_url(path % enqueue_args,
headers={'Authorization': 'Bearer %s' % token},
json=change)
# The JSON returned is the same as the client's output
self.assertEqual(200, req.status_code, req.text)
data = req.json()
self.assertEqual(True, data)
self.waitUntilSettled()
def test_enqueue_with_deprecated_trigger(self):
"""Test that the admin web interface can enqueue a change"""
# TODO(mhu) remove in a few releases
self._test_enqueue(use_trigger=True)
def test_enqueue(self):
"""Test that the admin web interface can enqueue a change"""
self._test_enqueue()
def _test_enqueue_ref(self, use_trigger=False):
"""Test that the admin web interface can enqueue a ref"""
p = "review.example.com/org/project"
upstream = self.getUpstreamRepos([p])
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.setMerged()
A_commit = str(upstream[p].commit('master'))
self.log.debug("A commit: %s" % A_commit)
path = "api/tenant/%(tenant)s/project/%(project)s/enqueue"
enqueue_args = {'tenant': 'tenant-one',
'project': 'org/project', }
ref = {'ref': 'master',
'oldrev': '90f173846e3af9154517b88543ffbd1691f31366',
'newrev': A_commit,
'pipeline': 'post', }
if use_trigger:
ref['trigger'] = 'gerrit'
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
req = self.post_url(path % enqueue_args,
headers={'Authorization': 'Bearer %s' % token},
json=ref)
self.assertEqual(200, req.status_code, req.text)
# The JSON returned is the same as the client's output
data = req.json()
self.assertEqual(True, data)
self.waitUntilSettled()
def test_enqueue_ref_with_deprecated_trigger(self):
"""Test that the admin web interface can enqueue a ref"""
# TODO(mhu) remove in a few releases
self._test_enqueue_ref(use_trigger=True)
def test_enqueue_ref(self):
"""Test that the admin web interface can enqueue a ref"""
self._test_enqueue_ref()
def test_dequeue(self):
"""Test that the admin web interface can dequeue a change"""
start_builds = len(self.builds)
self.create_branch('org/project', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable'))
self.executor_server.hold_jobs_in_build = True
self.commitConfigUpdate('common-config', 'layouts/timer.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
for _ in iterate_timeout(30, 'Wait for a build on hold'):
if len(self.builds) > start_builds:
break
self.waitUntilSettled()
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
path = "api/tenant/%(tenant)s/project/%(project)s/dequeue"
dequeue_args = {'tenant': 'tenant-one',
'project': 'org/project', }
change = {'ref': 'refs/heads/stable',
'pipeline': 'periodic', }
req = self.post_url(path % dequeue_args,
headers={'Authorization': 'Bearer %s' % token},
json=change)
# The JSON returned is the same as the client's output
self.assertEqual(200, req.status_code, req.text)
data = req.json()
self.assertEqual(True, data)
self.waitUntilSettled()
self.commitConfigUpdate('common-config',
'layouts/no-timer.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 1)
def test_OPTIONS(self):
"""Ensure that protected endpoints handle CORS preflight requests
properly"""
# Note that %tenant, %project are not relevant here. The client is
# just checking what the endpoint allows.
endpoints = [
{'action': 'promote',
'path': 'api/tenant/my-tenant/promote',
'allowed_methods': ['POST', ]},
{'action': 'enqueue',
'path': 'api/tenant/my-tenant/project/my-project/enqueue',
'allowed_methods': ['POST', ]},
{'action': 'enqueue_ref',
'path': 'api/tenant/my-tenant/project/my-project/enqueue',
'allowed_methods': ['POST', ]},
{'action': 'autohold',
'path': 'api/tenant/my-tenant/project/my-project/autohold',
'allowed_methods': ['GET', 'POST', ]},
{'action': 'autohold_by_request_id',
'path': 'api/tenant/my-tenant/autohold/123',
'allowed_methods': ['GET', 'DELETE', ]},
{'action': 'dequeue',
'path': 'api/tenant/my-tenant/project/my-project/enqueue',
'allowed_methods': ['POST', ]},
{'action': 'authorizations',
'path': 'api/tenant/my-tenant/authorizations',
'allowed_methods': ['GET', ]},
]
for endpoint in endpoints:
preflight = self.options_url(
endpoint['path'],
headers={'Access-Control-Request-Method': 'GET',
'Access-Control-Request-Headers': 'Authorization'})
self.assertEqual(
204,
preflight.status_code,
"%s failed: %s" % (endpoint['action'], preflight.text))
self.assertEqual(
'*',
preflight.headers.get('Access-Control-Allow-Origin'),
"%s failed: %s" % (endpoint['action'], preflight.headers))
self.assertEqual(
'Authorization, Content-Type',
preflight.headers.get('Access-Control-Allow-Headers'),
"%s failed: %s" % (endpoint['action'], preflight.headers))
allowed_methods = preflight.headers.get(
'Access-Control-Allow-Methods').split(', ')
self.assertTrue(
'OPTIONS' in allowed_methods,
"%s has allowed methods: %s" % (endpoint['action'],
allowed_methods))
for method in endpoint['allowed_methods']:
self.assertTrue(
method in allowed_methods,
"%s has allowed methods: %s,"
" expected: %s" % (endpoint['action'],
allowed_methods,
endpoint['allowed_methods']))
def test_promote(self):
"""Test that a change can be promoted via the admin web interface"""
# taken from test_client_promote in test_scheduler
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
items = tenant.layout.pipelines['gate'].getAllItems()
enqueue_times = {}
for item in items:
enqueue_times[str(item.change)] = item.enqueue_time
# REST API
args = {'pipeline': 'gate',
'changes': ['2,1', '3,1']}
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ],
},
'exp': int(time.time()) + 3600,
'iat': int(time.time())}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
req = self.post_url(
'api/tenant/tenant-one/promote',
headers={'Authorization': 'Bearer %s' % token},
json=args)
self.assertEqual(200, req.status_code, req.text)
data = req.json()
self.assertEqual(True, data)
# ensure that enqueue times are durable
items = tenant.layout.pipelines['gate'].getAllItems()
for item in items:
self.assertEqual(
enqueue_times[str(item.change)], item.enqueue_time)
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 6)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertEqual(self.builds[2].name, 'project-test1')
self.assertEqual(self.builds[3].name, 'project-test2')
self.assertEqual(self.builds[4].name, 'project-test1')
self.assertEqual(self.builds[5].name, 'project-test2')
self.assertTrue(self.builds[0].hasChanges(B))
self.assertFalse(self.builds[0].hasChanges(A))
self.assertFalse(self.builds[0].hasChanges(C))
self.assertTrue(self.builds[2].hasChanges(B))
self.assertTrue(self.builds[2].hasChanges(C))
self.assertFalse(self.builds[2].hasChanges(A))
self.assertTrue(self.builds[4].hasChanges(B))
self.assertTrue(self.builds[4].hasChanges(C))
self.assertTrue(self.builds[4].hasChanges(A))
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 2)
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(C.reported, 2)
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 1)
self.assertEqual(len(self.history), 10)
def test_promote_no_change(self):
"""Test that jobs are not unecessarily restarted when promoting"""
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
items = tenant.layout.pipelines['gate'].getAllItems()
enqueue_times = {}
for item in items:
enqueue_times[str(item.change)] = item.enqueue_time
# REST API
args = {'pipeline': 'gate',
'changes': ['1,1', '2,1', '3,1']}
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ],
},
'exp': int(time.time()) + 3600,
'iat': int(time.time())}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
req = self.post_url(
'api/tenant/tenant-one/promote',
headers={'Authorization': 'Bearer %s' % token},
json=args)
self.assertEqual(200, req.status_code, req.text)
data = req.json()
self.assertEqual(True, data)
# ensure that enqueue times are durable
items = tenant.layout.pipelines['gate'].getAllItems()
for item in items:
self.assertEqual(
enqueue_times[str(item.change)], item.enqueue_time)
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 6)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertEqual(self.builds[2].name, 'project-test1')
self.assertEqual(self.builds[3].name, 'project-test2')
self.assertEqual(self.builds[4].name, 'project-test1')
self.assertEqual(self.builds[5].name, 'project-test2')
self.assertTrue(self.builds[0].hasChanges(A))
self.assertFalse(self.builds[0].hasChanges(B))
self.assertFalse(self.builds[0].hasChanges(C))
self.assertTrue(self.builds[2].hasChanges(A))
self.assertTrue(self.builds[2].hasChanges(B))
self.assertFalse(self.builds[2].hasChanges(C))
self.assertTrue(self.builds[4].hasChanges(A))
self.assertTrue(self.builds[4].hasChanges(B))
self.assertTrue(self.builds[4].hasChanges(C))
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 2)
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(C.reported, 2)
# The promote should be a noop, so no canceled jobs
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 0)
self.assertEqual(len(self.history), 9)
def test_promote_check(self):
"""Test that a change can be promoted via the admin web interface"""
self.executor_server.hold_jobs_in_build = True
# Make a patch series so that we have some non-live items in
# the pipeline and we can make sure they are not promoted.
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.setDependsOn(A, 1)
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
C.setDependsOn(B, 1)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
items = [i for i in tenant.layout.pipelines['check'].getAllItems()
if i.live]
enqueue_times = {}
for item in items:
enqueue_times[str(item.change)] = item.enqueue_time
# REST API
args = {'pipeline': 'check',
'changes': ['2,1', '3,1']}
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ],
},
'exp': int(time.time()) + 3600,
'iat': int(time.time())}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
req = self.post_url(
'api/tenant/tenant-one/promote',
headers={'Authorization': 'Bearer %s' % token},
json=args)
self.assertEqual(200, req.status_code, req.text)
data = req.json()
self.assertEqual(True, data)
self.waitUntilSettled()
# ensure that enqueue times are durable
items = [i for i in tenant.layout.pipelines['check'].getAllItems()
if i.live]
for item in items:
self.assertEqual(
enqueue_times[str(item.change)], item.enqueue_time)
# We can't reliably test for side effects in the check
# pipeline since the change queues are independent, so we
# directly examine the queues.
queue_items = [(item.change.number, item.live) for item in
tenant.layout.pipelines['check'].getAllItems()]
expected = [('1', False),
('2', True),
('1', False),
('2', False),
('3', True),
('1', True)]
self.assertEqual(expected, queue_items)
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release()
self.waitUntilSettled()
# No jobs should be canceled
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 0)
self.assertEqual(len(self.history), 9)
def test_tenant_authorizations_override(self):
"""Test that user gets overriden tenant authz if allowed"""
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one'],
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
req = self.get_url(
'api/tenant/tenant-one/authorizations',
headers={'Authorization': 'Bearer %s' % token})
self.assertEqual(200, req.status_code, req.text)
data = req.json()
self.assertTrue('zuul' in data)
self.assertTrue(data['zuul']['admin'], data)
self.assertTrue(data['zuul']['scope'] == ['tenant-one'], data)
# change tenant
authz['zuul']['admin'] = ['tenant-whatever', ]
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
req = self.get_url(
'api/tenant/tenant-one/authorizations',
headers={'Authorization': 'Bearer %s' % token})
self.assertEqual(200, req.status_code, req.text)
data = req.json()
self.assertTrue('zuul' in data)
self.assertTrue(data['zuul']['admin'] is False, data)
self.assertTrue(data['zuul']['scope'] == ['tenant-one'], data)
class TestTenantScopedWebApiWithAuthRules(BaseTestWeb):
config_file = 'zuul-admin-web-no-override.conf'
tenant_config_file = 'config/authorization/single-tenant/main.yaml'
def test_override_not_allowed(self):
"""Test that authz cannot be overriden if config does not allow it"""
args = {"reason": "some reason",
"count": 1,
'job': 'project-test2',
'change': None,
'ref': None,
'node_hold_expiration': None}
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ],
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
req = self.post_url(
'api/tenant/tenant-one/project/org/project/autohold',
headers={'Authorization': 'Bearer %s' % token},
json=args)
self.assertEqual(401, req.status_code, req.text)
def test_tenant_level_rule(self):
"""Test that authz rules defined at tenant level are checked"""
path = "api/tenant/%(tenant)s/project/%(project)s/enqueue"
def _test_project_enqueue_with_authz(i, project, authz, expected):
f_ch = self.fake_gerrit.addFakeChange(project, 'master',
'%s %i' % (project, i))
f_ch.addApproval('Code-Review', 2)
f_ch.addApproval('Approved', 1)
change = {'trigger': 'gerrit',
'change': '%i,1' % i,
'pipeline': 'gate', }
enqueue_args = {'tenant': 'tenant-one',
'project': project, }
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
req = self.post_url(path % enqueue_args,
headers={'Authorization': 'Bearer %s' % token},
json=change)
self.assertEqual(expected, req.status_code, req.text)
self.waitUntilSettled()
i = 0
for p in ['org/project', 'org/project1', 'org/project2']:
i += 1
# Authorized sub
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'venkman',
'exp': int(time.time()) + 3600}
_test_project_enqueue_with_authz(i, p, authz, 200)
i += 1
# Unauthorized sub
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'vigo',
'exp': int(time.time()) + 3600}
_test_project_enqueue_with_authz(i, p, authz, 403)
i += 1
# unauthorized issuer
authz = {'iss': 'columbia.edu',
'aud': 'zuul.example.com',
'sub': 'stantz',
'exp': int(time.time()) + 3600}
_test_project_enqueue_with_authz(i, p, authz, 401)
self.waitUntilSettled()
def test_group_rule(self):
"""Test a group rule"""
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A')
A.addApproval('Code-Review', 2)
A.addApproval('Approved', 1)
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'melnitz',
'groups': ['ghostbusters', 'secretary'],
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
path = "api/tenant/%(tenant)s/project/%(project)s/enqueue"
enqueue_args = {'tenant': 'tenant-one',
'project': 'org/project2', }
change = {'trigger': 'gerrit',
'change': '1,1',
'pipeline': 'gate', }
req = self.post_url(path % enqueue_args,
headers={'Authorization': 'Bearer %s' % token},
json=change)
self.assertEqual(200, req.status_code, req.text)
self.waitUntilSettled()
def test_depth_claim_rule(self):
"""Test a rule based on a complex claim"""
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
A.addApproval('Approved', 1)
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'zeddemore',
'vehicle': {
'car': 'ecto-1'},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
path = "api/tenant/%(tenant)s/project/%(project)s/enqueue"
enqueue_args = {'tenant': 'tenant-one',
'project': 'org/project', }
change = {'trigger': 'gerrit',
'change': '1,1',
'pipeline': 'gate', }
req = self.post_url(path % enqueue_args,
headers={'Authorization': 'Bearer %s' % token},
json=change)
self.assertEqual(200, req.status_code, req.text)
self.waitUntilSettled()
def test_user_actions_action_override(self):
"""Test that user with 'zuul.admin' claim does NOT get it back"""
admin_tenants = ['tenant-zero', ]
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {'admin': admin_tenants},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
req = self.get_url('/api/tenant/tenant-one/authorizations',
headers={'Authorization': 'Bearer %s' % token})
self.assertEqual(401, req.status_code, req.text)
def test_user_actions(self):
"""Test that users get the right 'zuul.actions' trees"""
users = [
{'authz': {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'vigo'},
'zuul.admin': []},
{'authz': {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'venkman'},
'zuul.admin': ['tenant-one', ]},
{'authz': {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'stantz'},
'zuul.admin': []},
{'authz': {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'zeddemore',
'vehicle': {
'car': 'ecto-1'
}},
'zuul.admin': ['tenant-one', ]},
{'authz': {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'melnitz',
'groups': ['secretary', 'ghostbusters']},
'zuul.admin': ['tenant-one', ]},
]
for test_user in users:
authz = test_user['authz']
authz['exp'] = int(time.time()) + 3600
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
req = self.get_url('/api/tenant/tenant-one/authorizations',
headers={'Authorization': 'Bearer %s' % token})
self.assertEqual(200, req.status_code, req.text)
data = req.json()
self.assertTrue('zuul' in data,
"%s got %s" % (authz['sub'], data))
self.assertTrue('admin' in data['zuul'],
"%s got %s" % (authz['sub'], data))
self.assertEqual('tenant-one' in test_user['zuul.admin'],
data['zuul']['admin'],
"%s got %s" % (authz['sub'], data))
self.assertEqual(['tenant-one', ],
data['zuul']['scope'],
"%s got %s" % (authz['sub'], data))
req = self.get_url('/api/tenant/unknown/authorizations',
headers={'Authorization': 'Bearer %s' % token})
self.assertEqual(404, req.status_code, req.text)
def test_authorizations_no_header(self):
"""Test that missing Authorization header results in HTTP 401"""
req = self.get_url('/api/tenant/tenant-one/authorizations')
self.assertEqual(401, req.status_code, req.text)
class TestTenantScopedWebApiTokenWithExpiry(BaseTestWeb):
config_file = 'zuul-admin-web-token-expiry.conf'
def test_iat_claim_mandatory(self):
"""Test that the 'iat' claim is mandatory when
max_validity_time is set"""
authz = {'iss': 'zuul_operator',
'sub': 'testuser',
'aud': 'zuul.example.com',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/autohold",
headers={'Authorization': 'Bearer %s' % token},
json={'job': 'project-test1',
'count': 1,
'reason': 'because',
'node_hold_expiration': 36000})
self.assertEqual(401, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
headers={'Authorization': 'Bearer %s' % token},
json={'trigger': 'gerrit',
'change': '2,1',
'pipeline': 'check'})
self.assertEqual(401, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
headers={'Authorization': 'Bearer %s' % token},
json={'trigger': 'gerrit',
'ref': 'abcd',
'newrev': 'aaaa',
'oldrev': 'bbbb',
'pipeline': 'check'})
self.assertEqual(401, resp.status_code)
def test_token_from_the_future(self):
authz = {'iss': 'zuul_operator',
'sub': 'testuser',
'aud': 'zuul.example.com',
'zuul': {
'admin': ['tenant-one', ],
},
'exp': int(time.time()) + 7200,
'iat': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/autohold",
headers={'Authorization': 'Bearer %s' % token},
json={'job': 'project-test1',
'count': 1,
'reason': 'because',
'node_hold_expiration': 36000})
self.assertEqual(401, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
headers={'Authorization': 'Bearer %s' % token},
json={'trigger': 'gerrit',
'change': '2,1',
'pipeline': 'check'})
self.assertEqual(401, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
headers={'Authorization': 'Bearer %s' % token},
json={'trigger': 'gerrit',
'ref': 'abcd',
'newrev': 'aaaa',
'oldrev': 'bbbb',
'pipeline': 'check'})
self.assertEqual(401, resp.status_code)
def test_token_expired(self):
authz = {'iss': 'zuul_operator',
'sub': 'testuser',
'aud': 'zuul.example.com',
'zuul': {
'admin': ['tenant-one', ],
},
'exp': int(time.time()) + 3600,
'iat': int(time.time())}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
time.sleep(10)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/autohold",
headers={'Authorization': 'Bearer %s' % token},
json={'job': 'project-test1',
'count': 1,
'reason': 'because',
'node_hold_expiration': 36000})
self.assertEqual(401, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
headers={'Authorization': 'Bearer %s' % token},
json={'trigger': 'gerrit',
'change': '2,1',
'pipeline': 'check'})
self.assertEqual(401, resp.status_code)
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/enqueue",
headers={'Authorization': 'Bearer %s' % token},
json={'trigger': 'gerrit',
'ref': 'abcd',
'newrev': 'aaaa',
'oldrev': 'bbbb',
'pipeline': 'check'})
self.assertEqual(401, resp.status_code)
def test_autohold(self):
"""Test that autohold can be set through the admin web interface"""
args = {"reason": "some reason",
"count": 1,
'job': 'project-test2',
'change': None,
'ref': None,
'node_hold_expiration': None}
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ],
},
'exp': int(time.time()) + 3600,
'iat': int(time.time())}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
req = self.post_url(
'api/tenant/tenant-one/project/org/project/autohold',
headers={'Authorization': 'Bearer %s' % token},
json=args)
self.assertEqual(200, req.status_code, req.text)
data = req.json()
self.assertEqual(True, data)
# Check result
resp = self.get_url(
"api/tenant/tenant-one/autohold")
self.assertEqual(200, resp.status_code, resp.text)
autohold_requests = resp.json()
self.assertNotEqual([], autohold_requests)
self.assertEqual(1, len(autohold_requests))
ah_request = autohold_requests[0]
self.assertEqual('tenant-one', ah_request['tenant'])
self.assertIn('org/project', ah_request['project'])
self.assertEqual('project-test2', ah_request['job'])
self.assertEqual(".*", ah_request['ref_filter'])
self.assertEqual("some reason", ah_request['reason'])
class TestHeldAttributeInBuildInfo(BaseTestWeb):
config_file = 'zuul-sql-driver-mysql.conf'
tenant_config_file = 'config/sql-driver/main.yaml'
def test_autohold_and_retrieve_held_build_info(self):
"""Ensure the "held" attribute can be used to filter builds"""
self.addAutohold('tenant-one', 'review.example.com/org/project',
'project-test2', '.*', 'reason text', 1, 600)
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
self.executor_server.failJob('project-test2', B)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
all_builds_resp = self.get_url("api/tenant/tenant-one/builds?"
"project=org/project")
held_builds_resp = self.get_url("api/tenant/tenant-one/builds?"
"project=org/project&"
"held=1")
self.assertEqual(200,
all_builds_resp.status_code,
all_builds_resp.text)
self.assertEqual(200,
held_builds_resp.status_code,
held_builds_resp.text)
all_builds = all_builds_resp.json()
held_builds = held_builds_resp.json()
self.assertEqual(len(held_builds), 1, all_builds)
held_build = held_builds[0]
self.assertEqual('project-test2', held_build['job_name'], held_build)
self.assertEqual(True, held_build['held'], held_build)
class TestWebMulti(BaseTestWeb):
config_file = 'zuul-gerrit-ssh.conf'
def test_web_connections_list_multi(self):
data = self.get_url('api/connections').json()
port = self.web.connections.connections['gerrit'].web_server.port
url = f'http://localhost:{port}'
gerrit_connection = {
'driver': 'gerrit',
'name': 'gerrit',
'baseurl': url,
'canonical_hostname': 'review.example.com',
'server': 'review.example.com',
'ssh_server': 'ssh-review.example.com',
'port': 29418,
}
github_connection = {
'baseurl': 'https://api.github.com',
'canonical_hostname': 'github.com',
'driver': 'github',
'name': 'github',
'server': 'github.com',
'repo_cache': None,
}
self.assertEqual([gerrit_connection, github_connection], data)
# TODO Remove this class once REST support is removed from Zuul CLI
class TestCLIViaWebApi(BaseTestWeb):
config_file = 'zuul-admin-web.conf'
def test_autohold(self):
"""Test that autohold can be set with the CLI through REST"""
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'--zuul-url', self.base_url, '--auth-token', token,
'autohold', '--reason', 'some reason',
'--tenant', 'tenant-one', '--project', 'org/project',
'--job', 'project-test2', '--count', '1'],
stdout=subprocess.PIPE)
output = p.communicate()
self.assertEqual(p.returncode, 0, output[0])
# Check result
resp = self.get_url(
"api/tenant/tenant-one/autohold")
self.assertEqual(200, resp.status_code, resp.text)
autohold_requests = resp.json()
self.assertNotEqual([], autohold_requests)
self.assertEqual(1, len(autohold_requests))
request = autohold_requests[0]
self.assertEqual('tenant-one', request['tenant'])
self.assertIn('org/project', request['project'])
self.assertEqual('project-test2', request['job'])
self.assertEqual(".*", request['ref_filter'])
self.assertEqual("some reason", request['reason'])
self.assertEqual(1, request['max_count'])
def test_enqueue(self):
"""Test that the CLI can enqueue a change via REST"""
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
A.addApproval('Approved', 1)
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'--zuul-url', self.base_url, '--auth-token', token,
'enqueue', '--tenant', 'tenant-one',
'--project', 'org/project',
'--pipeline', 'gate', '--change', '1,1'],
stdout=subprocess.PIPE)
output = p.communicate()
self.assertEqual(p.returncode, 0, output[0])
self.waitUntilSettled()
def test_enqueue_ref(self):
"""Test that the CLI can enqueue a ref via REST"""
p = "review.example.com/org/project"
upstream = self.getUpstreamRepos([p])
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.setMerged()
A_commit = str(upstream[p].commit('master'))
self.log.debug("A commit: %s" % A_commit)
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'--zuul-url', self.base_url, '--auth-token', token,
'enqueue-ref', '--tenant', 'tenant-one',
'--project', 'org/project',
'--pipeline', 'post', '--ref', 'master',
'--oldrev', '90f173846e3af9154517b88543ffbd1691f31366',
'--newrev', A_commit],
stdout=subprocess.PIPE)
output = p.communicate()
self.assertEqual(p.returncode, 0, output[0])
self.waitUntilSettled()
def test_dequeue(self):
"""Test that the CLI can dequeue a change via REST"""
start_builds = len(self.builds)
self.create_branch('org/project', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable'))
self.executor_server.hold_jobs_in_build = True
self.commitConfigUpdate('common-config', 'layouts/timer.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
for _ in iterate_timeout(30, 'Wait for a build on hold'):
if len(self.builds) > start_builds:
break
self.waitUntilSettled()
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'--zuul-url', self.base_url, '--auth-token', token,
'dequeue', '--tenant', 'tenant-one', '--project', 'org/project',
'--pipeline', 'periodic', '--ref', 'refs/heads/stable'],
stdout=subprocess.PIPE)
output = p.communicate()
self.assertEqual(p.returncode, 0, output[0])
self.waitUntilSettled()
self.commitConfigUpdate('common-config',
'layouts/no-timer.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 1)
def test_promote(self):
"Test that the RPC client can promote a change"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
items = tenant.layout.pipelines['gate'].getAllItems()
enqueue_times = {}
for item in items:
enqueue_times[str(item.change)] = item.enqueue_time
# Promote B and C using the cli
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'--zuul-url', self.base_url, '--auth-token', token,
'promote', '--tenant', 'tenant-one',
'--pipeline', 'gate', '--changes', '2,1', '3,1'],
stdout=subprocess.PIPE)
output = p.communicate()
self.assertEqual(p.returncode, 0, output[0])
self.waitUntilSettled()
# ensure that enqueue times are durable
items = tenant.layout.pipelines['gate'].getAllItems()
for item in items:
self.assertEqual(
enqueue_times[str(item.change)], item.enqueue_time)
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 6)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertEqual(self.builds[2].name, 'project-test1')
self.assertEqual(self.builds[3].name, 'project-test2')
self.assertEqual(self.builds[4].name, 'project-test1')
self.assertEqual(self.builds[5].name, 'project-test2')
self.assertTrue(self.builds[0].hasChanges(B))
self.assertFalse(self.builds[0].hasChanges(A))
self.assertFalse(self.builds[0].hasChanges(C))
self.assertTrue(self.builds[2].hasChanges(B))
self.assertTrue(self.builds[2].hasChanges(C))
self.assertFalse(self.builds[2].hasChanges(A))
self.assertTrue(self.builds[4].hasChanges(B))
self.assertTrue(self.builds[4].hasChanges(C))
self.assertTrue(self.builds[4].hasChanges(A))
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 2)
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(C.reported, 2)
class TestWebStartup(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
config_ini_data = {}
def _start_web(self):
# Start the web server
self.web = ZuulWebFixture(
self.changes, self.config, self.additional_event_queues,
self.upstream_root, self.poller_events,
self.git_url_with_auth, self.addCleanup, self.test_root,
info=zuul.model.WebInfo.fromConfig(self.zuul_ini_config))
self.useFixture(self.web)
def get_url(self, url, *args, **kwargs):
return requests.get(
urllib.parse.urljoin(self.base_url, url), *args, **kwargs)
def createScheduler(self):
pass
def realCreateScheduler(self):
super().createScheduler()
@skip("This test is not reliable in the gate")
def test_web_startup(self):
self.zuul_ini_config = FakeConfig(self.config_ini_data)
self.web = None
t = threading.Thread(target=self._start_web)
t.daemon = True
t.start()
for _ in iterate_timeout(30, 'Wait for web to begin startup'):
if self.web and getattr(self.web, 'web', None):
break
self.web.web.system_config_cache_wake_event.wait()
self.realCreateScheduler()
self.scheds.execute(
lambda app: app.start(self.validate_tenants))
t.join()
self.host = 'localhost'
# Wait until web server is started
while True:
if self.web is None:
time.sleep(0.1)
continue
self.port = self.web.port
try:
with socket.create_connection((self.host, self.port)):
break
except ConnectionRefusedError:
pass
self.base_url = "http://{host}:{port}".format(
host=self.host, port=self.port)
# If the config didn't load correctly, we won't have the jobs
jobs = self.get_url("api/tenant/tenant-one/jobs").json()
self.assertEqual(len(jobs), 10)
class TestWebUnprotectedBranches(BaseWithWeb):
config_file = 'zuul-github-driver.conf'
tenant_config_file = 'config/unprotected-branches/main.yaml'
def test_no_protected_branches(self):
"""Regression test to check that zuul-web doesn't display
config errors when no protected branch exists."""
self.startWebServer()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
project2 = tenant.untrusted_projects[1]
tpc2 = tenant.project_configs[project2.canonical_name]
# project2 should have no parsed branch
self.assertEqual(0, len(tpc2.parsed_branch_config.keys()))
# Zuul-web should not display any config errors
config_errors = self.get_url(
"api/tenant/tenant-one/config-errors").json()
self.assertEqual(len(config_errors), 0)
class TestWebApiAccessRules(BaseTestWeb):
# Test read-level access restrictions
config_file = 'zuul-admin-web.conf'
tenant_config_file = 'config/access-rules/main.yaml'
routes = [
'/api/connections',
'/api/components',
'/api/tenants',
'/api/authorizations',
'/api/tenant/{tenant}/status',
'/api/tenant/{tenant}/status/change/{change}',
'/api/tenant/{tenant}/jobs',
'/api/tenant/{tenant}/job/{job_name}',
'/api/tenant/{tenant}/projects',
'/api/tenant/{tenant}/project/{project}',
('/api/tenant/{tenant}/pipeline/{pipeline}/'
'project/{project}/branch/{branch}/freeze-jobs'),
'/api/tenant/{tenant}/pipelines',
'/api/tenant/{tenant}/semaphores',
'/api/tenant/{tenant}/labels',
'/api/tenant/{tenant}/nodes',
'/api/tenant/{tenant}/key/{project}.pub',
'/api/tenant/{tenant}/project-ssh-key/{project}.pub',
'/api/tenant/{tenant}/console-stream',
'/api/tenant/{tenant}/badge',
'/api/tenant/{tenant}/builds',
'/api/tenant/{tenant}/build/{uuid}',
'/api/tenant/{tenant}/buildsets',
'/api/tenant/{tenant}/buildset/{uuid}',
'/api/tenant/{tenant}/config-errors',
'/api/tenant/{tenant}/authorizations',
'/api/tenant/{tenant}/project/{project}/autohold',
'/api/tenant/{tenant}/autohold',
'/api/tenant/{tenant}/autohold/{request_id}',
'/api/tenant/{tenant}/autohold/{request_id}',
'/api/tenant/{tenant}/project/{project}/enqueue',
'/api/tenant/{tenant}/project/{project}/dequeue',
'/api/tenant/{tenant}/promote',
]
info_routes = [
'/api/info',
'/api/tenant/{tenant}/info',
]
def test_read_routes_no_token(self):
for route in self.routes:
url = route.format(tenant='tenant-one',
project='org/project',
change='1,1',
job_name='testjob',
pipeline='check',
branch='master',
uuid='1',
request_id='1')
resp = self.get_url(url)
self.assertEqual(
401,
resp.status_code,
"get %s failed: %s" % (url, resp.text))
def test_read_info_routes_no_token(self):
for route in self.info_routes:
url = route.format(tenant='tenant-one',
project='org/project',
change='1,1',
job_name='testjob',
pipeline='check',
branch='master',
uuid='1',
request_id='1')
resp = self.get_url(url)
self.assertEqual(
200,
resp.status_code,
"get %s failed: %s" % (url, resp.text))
info = resp.json()
self.assertTrue(
info['info']['capabilities']['auth']['read_protected'])
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_web.py
|
test_web.py
|
# Copyright 2019 Red Hat, Inc.
# Copyright 2022 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
import json
import queue
import threading
import uuid
from unittest import mock
import testtools
from zuul import model
from zuul.lib import yamlutil as yaml
from zuul.model import BuildRequest, HoldRequest, MergeRequest
from zuul.zk import ZooKeeperClient
from zuul.zk.blob_store import BlobStore
from zuul.zk.branch_cache import BranchCache
from zuul.zk.change_cache import (
AbstractChangeCache,
ChangeKey,
ConcurrentUpdateError,
)
from zuul.zk.config_cache import SystemConfigCache, UnparsedConfigCache
from zuul.zk.exceptions import LockException
from zuul.zk.executor import ExecutorApi
from zuul.zk.job_request_queue import JobRequestEvent
from zuul.zk.merger import MergerApi
from zuul.zk.layout import LayoutStateStore, LayoutState
from zuul.zk.locks import locked
from zuul.zk.nodepool import ZooKeeperNodepool
from zuul.zk.sharding import (
RawShardIO,
BufferedShardReader,
BufferedShardWriter,
NODE_BYTE_SIZE_LIMIT,
)
from zuul.zk.components import (
BaseComponent, ComponentRegistry, ExecutorComponent, COMPONENT_REGISTRY
)
from tests.base import (
BaseTestCase, HoldableExecutorApi, HoldableMergerApi,
iterate_timeout
)
from zuul.zk.zkobject import (
ShardedZKObject, ZKObject, ZKContext
)
from zuul.zk.locks import tenant_write_lock
from kazoo.exceptions import ZookeeperError, OperationTimeoutError, NoNodeError
class ZooKeeperBaseTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.setupZK()
self.zk_client = ZooKeeperClient(
self.zk_chroot_fixture.zk_hosts,
tls_cert=self.zk_chroot_fixture.zookeeper_cert,
tls_key=self.zk_chroot_fixture.zookeeper_key,
tls_ca=self.zk_chroot_fixture.zookeeper_ca)
self.addCleanup(self.zk_client.disconnect)
self.zk_client.connect()
self.component_registry = ComponentRegistry(self.zk_client)
# We don't have any other component to initialize the global
# registry in these tests, so we do it ourselves.
COMPONENT_REGISTRY.create(self.zk_client)
class TestZookeeperClient(ZooKeeperBaseTestCase):
def test_ltime(self):
ltime = self.zk_client.getCurrentLtime()
self.assertGreaterEqual(ltime, 0)
self.assertIsInstance(ltime, int)
self.assertGreater(self.zk_client.getCurrentLtime(), ltime)
class TestNodepool(ZooKeeperBaseTestCase):
def setUp(self):
super().setUp()
self.zk_nodepool = ZooKeeperNodepool(self.zk_client)
def _createRequest(self):
req = HoldRequest()
req.count = 1
req.reason = 'some reason'
req.expiration = 1
return req
def test_hold_requests_api(self):
# Test no requests returns empty list
self.assertEqual([], self.zk_nodepool.getHoldRequests())
# Test get on non-existent request is None
self.assertIsNone(self.zk_nodepool.getHoldRequest('anything'))
# Test creating a new request
req1 = self._createRequest()
self.zk_nodepool.storeHoldRequest(req1)
self.assertIsNotNone(req1.id)
self.assertEqual(1, len(self.zk_nodepool.getHoldRequests()))
# Test getting the request
req2 = self.zk_nodepool.getHoldRequest(req1.id)
self.assertEqual(req1.toDict(), req2.toDict())
# Test updating the request
req2.reason = 'a new reason'
self.zk_nodepool.storeHoldRequest(req2)
req2 = self.zk_nodepool.getHoldRequest(req2.id)
self.assertNotEqual(req1.reason, req2.reason)
# Test lock operations
self.zk_nodepool.lockHoldRequest(req2, blocking=False)
with testtools.ExpectedException(
LockException, "Timeout trying to acquire lock .*"
):
self.zk_nodepool.lockHoldRequest(req2, blocking=True, timeout=2)
self.zk_nodepool.unlockHoldRequest(req2)
self.assertIsNone(req2.lock)
# Test deleting the request
self.zk_nodepool.deleteHoldRequest(req1)
self.assertEqual([], self.zk_nodepool.getHoldRequests())
class TestSharding(ZooKeeperBaseTestCase):
def test_reader(self):
shard_io = RawShardIO(self.zk_client.client, "/test/shards")
self.assertEqual(len(shard_io._shards), 0)
with BufferedShardReader(
self.zk_client.client, "/test/shards"
) as shard_reader:
self.assertEqual(shard_reader.read(), b"")
shard_io.write(b"foobar")
self.assertEqual(len(shard_io._shards), 1)
self.assertEqual(shard_io.read(), b"foobar")
def test_writer(self):
shard_io = RawShardIO(self.zk_client.client, "/test/shards")
self.assertEqual(len(shard_io._shards), 0)
with BufferedShardWriter(
self.zk_client.client, "/test/shards"
) as shard_writer:
shard_writer.write(b"foobar")
self.assertEqual(len(shard_io._shards), 1)
self.assertEqual(shard_io.read(), b"foobar")
def test_truncate(self):
shard_io = RawShardIO(self.zk_client.client, "/test/shards")
shard_io.write(b"foobar")
self.assertEqual(len(shard_io._shards), 1)
with BufferedShardWriter(
self.zk_client.client, "/test/shards"
) as shard_writer:
shard_writer.truncate(0)
self.assertEqual(len(shard_io._shards), 0)
def test_shard_bytes_limit(self):
with BufferedShardWriter(
self.zk_client.client, "/test/shards"
) as shard_writer:
shard_writer.write(b"x" * (NODE_BYTE_SIZE_LIMIT + 1))
shard_writer.flush()
self.assertEqual(len(shard_writer.raw._shards), 2)
def test_json(self):
data = {"key": "value"}
with BufferedShardWriter(
self.zk_client.client, "/test/shards"
) as shard_io:
shard_io.write(json.dumps(data).encode("utf8"))
with BufferedShardReader(
self.zk_client.client, "/test/shards"
) as shard_io:
self.assertDictEqual(json.load(shard_io), data)
class TestUnparsedConfigCache(ZooKeeperBaseTestCase):
def setUp(self):
super().setUp()
self.config_cache = UnparsedConfigCache(self.zk_client)
def test_files_cache(self):
master_files = self.config_cache.getFilesCache("project", "master")
with self.config_cache.readLock("project"):
self.assertEqual(len(master_files), 0)
with self.config_cache.writeLock("project"):
master_files["/path/to/file"] = "content"
with self.config_cache.readLock("project"):
self.assertEqual(master_files["/path/to/file"], "content")
self.assertEqual(len(master_files), 1)
with self.config_cache.writeLock("project"):
master_files.clear()
self.assertEqual(len(master_files), 0)
def test_valid_for(self):
tpc = model.TenantProjectConfig("project")
tpc.extra_config_files = {"foo.yaml", "bar.yaml"}
tpc.extra_config_dirs = {"foo.d/", "bar.d/"}
master_files = self.config_cache.getFilesCache("project", "master")
self.assertFalse(master_files.isValidFor(tpc, min_ltime=-1))
master_files.setValidFor(tpc.extra_config_files, tpc.extra_config_dirs,
ltime=1)
self.assertTrue(master_files.isValidFor(tpc, min_ltime=-1))
tpc.extra_config_files = set()
tpc.extra_config_dirs = set()
self.assertTrue(master_files.isValidFor(tpc, min_ltime=-1))
self.assertFalse(master_files.isValidFor(tpc, min_ltime=2))
tpc.extra_config_files = {"bar.yaml"}
tpc.extra_config_dirs = {"bar.d/"}
# Valid for subset
self.assertTrue(master_files.isValidFor(tpc, min_ltime=-1))
tpc.extra_config_files = {"foo.yaml", "bar.yaml"}
tpc.extra_config_dirs = {"foo.d/", "bar.d/", "other.d/"}
# Invalid for additional dirs
self.assertFalse(master_files.isValidFor(tpc, min_ltime=-1))
self.assertFalse(master_files.isValidFor(tpc, min_ltime=2))
tpc.extra_config_files = {"foo.yaml", "bar.yaml", "other.yaml"}
tpc.extra_config_dirs = {"foo.d/", "bar.d/"}
# Invalid for additional files
self.assertFalse(master_files.isValidFor(tpc, min_ltime=-1))
self.assertFalse(master_files.isValidFor(tpc, min_ltime=2))
def test_cache_ltime(self):
cache = self.config_cache.getFilesCache("project", "master")
self.assertEqual(cache.ltime, -1)
cache.setValidFor(set(), set(), ltime=1)
self.assertEqual(cache.ltime, 1)
def test_branch_cleanup(self):
master_files = self.config_cache.getFilesCache("project", "master")
release_files = self.config_cache.getFilesCache("project", "release")
master_files["/path/to/file"] = "content"
release_files["/path/to/file"] = "content"
self.config_cache.clearCache("project", "master")
self.assertEqual(len(master_files), 0)
self.assertEqual(len(release_files), 1)
def test_project_cleanup(self):
master_files = self.config_cache.getFilesCache("project", "master")
stable_files = self.config_cache.getFilesCache("project", "stable")
other_files = self.config_cache.getFilesCache("other", "master")
self.assertEqual(len(master_files), 0)
self.assertEqual(len(stable_files), 0)
master_files["/path/to/file"] = "content"
stable_files["/path/to/file"] = "content"
other_files["/path/to/file"] = "content"
self.assertEqual(len(master_files), 1)
self.assertEqual(len(stable_files), 1)
self.assertEqual(len(other_files), 1)
self.config_cache.clearCache("project")
self.assertEqual(len(master_files), 0)
self.assertEqual(len(stable_files), 0)
self.assertEqual(len(other_files), 1)
class TestComponentRegistry(ZooKeeperBaseTestCase):
def setUp(self):
super().setUp()
self.second_zk_client = ZooKeeperClient(
self.zk_chroot_fixture.zk_hosts,
tls_cert=self.zk_chroot_fixture.zookeeper_cert,
tls_key=self.zk_chroot_fixture.zookeeper_key,
tls_ca=self.zk_chroot_fixture.zookeeper_ca,
)
self.addCleanup(self.second_zk_client.disconnect)
self.second_zk_client.connect()
self.second_component_registry = ComponentRegistry(
self.second_zk_client)
def assertComponentAttr(self, component_name, attr_name,
attr_value, timeout=10):
for _ in iterate_timeout(
timeout,
f"{component_name} in cache has {attr_name} set to {attr_value}",
):
components = list(self.second_component_registry.all(
component_name))
if (
len(components) > 0 and
getattr(components[0], attr_name) == attr_value
):
break
def assertComponentState(self, component_name, state, timeout=10):
return self.assertComponentAttr(
component_name, "state", state, timeout
)
def assertComponentStopped(self, component_name, timeout=10):
for _ in iterate_timeout(
timeout, f"{component_name} in cache is stopped"
):
components = list(self.second_component_registry.all(
component_name))
if len(components) == 0:
break
def test_component_registry(self):
self.component_info = ExecutorComponent(self.zk_client, 'test')
self.component_info.register()
self.assertComponentState("executor", BaseComponent.STOPPED)
self.zk_client.client.stop()
self.assertComponentStopped("executor")
self.zk_client.client.start()
self.assertComponentState("executor", BaseComponent.STOPPED)
self.component_info.state = self.component_info.RUNNING
self.assertComponentState("executor", BaseComponent.RUNNING)
self.log.debug("DISCONNECT")
self.second_zk_client.client.stop()
self.second_zk_client.client.start()
self.log.debug("RECONNECT")
self.component_info.state = self.component_info.PAUSED
self.assertComponentState("executor", BaseComponent.PAUSED)
# Make sure the registry didn't create any read/write
# component objects that re-registered themselves.
components = list(self.second_component_registry.all('executor'))
self.assertEqual(len(components), 1)
self.component_info.state = self.component_info.RUNNING
self.assertComponentState("executor", BaseComponent.RUNNING)
class TestExecutorApi(ZooKeeperBaseTestCase):
def test_build_request(self):
# Test the lifecycle of a build request
request_queue = queue.Queue()
event_queue = queue.Queue()
# A callback closure for the request queue
def rq_put():
request_queue.put(None)
# and the event queue
def eq_put(br, e):
event_queue.put((br, e))
# Simulate the client side
client = ExecutorApi(self.zk_client)
# Simulate the server side
server = ExecutorApi(self.zk_client,
build_request_callback=rq_put,
build_event_callback=eq_put)
# Scheduler submits request
request = BuildRequest(
"A", None, None, "job", "tenant", "pipeline", '1')
client.submit(request, {'job': 'test'})
request_queue.get(timeout=30)
# Executor receives request
reqs = list(server.next())
self.assertEqual(len(reqs), 1)
a = reqs[0]
self.assertEqual(a.uuid, 'A')
params = client.getParams(a)
self.assertEqual(params, {'job': 'test'})
client.clearParams(a)
params = client.getParams(a)
self.assertIsNone(params)
# Executor locks request
self.assertTrue(server.lock(a, blocking=False))
a.state = BuildRequest.RUNNING
server.update(a)
self.assertEqual(client.get(a.path).state, BuildRequest.RUNNING)
# Executor should see no pending requests
reqs = list(server.next())
self.assertEqual(len(reqs), 0)
# Executor pauses build
a.state = BuildRequest.PAUSED
server.update(a)
self.assertEqual(client.get(a.path).state, BuildRequest.PAUSED)
# Scheduler resumes build
self.assertTrue(event_queue.empty())
sched_a = client.get(a.path)
client.requestResume(sched_a)
(build_request, event) = event_queue.get(timeout=30)
self.assertEqual(build_request, a)
self.assertEqual(event, JobRequestEvent.RESUMED)
# Executor resumes build
a.state = BuildRequest.RUNNING
server.update(a)
server.fulfillResume(a)
self.assertEqual(client.get(a.path).state, BuildRequest.RUNNING)
# Scheduler cancels build
self.assertTrue(event_queue.empty())
sched_a = client.get(a.path)
client.requestCancel(sched_a)
(build_request, event) = event_queue.get(timeout=30)
self.assertEqual(build_request, a)
self.assertEqual(event, JobRequestEvent.CANCELED)
# Executor aborts build
a.state = BuildRequest.COMPLETED
server.update(a)
server.fulfillCancel(a)
server.unlock(a)
self.assertEqual(client.get(a.path).state, BuildRequest.COMPLETED)
# Scheduler removes build request on completion
client.remove(sched_a)
self.assertEqual(set(self.getZKPaths('/zuul/executor')),
set(['/zuul/executor/unzoned',
'/zuul/executor/unzoned/locks',
'/zuul/executor/unzoned/params',
'/zuul/executor/unzoned/requests',
'/zuul/executor/unzoned/result-data',
'/zuul/executor/unzoned/results',
'/zuul/executor/unzoned/waiters']))
self.assertEqual(self.getZKWatches(), {})
def test_build_request_remove(self):
# Test the scheduler forcibly removing a request (perhaps the
# tenant is being deleted, so there will be no result queue).
request_queue = queue.Queue()
event_queue = queue.Queue()
def rq_put():
request_queue.put(None)
def eq_put(br, e):
event_queue.put((br, e))
# Simulate the client side
client = ExecutorApi(self.zk_client)
# Simulate the server side
server = ExecutorApi(self.zk_client,
build_request_callback=rq_put,
build_event_callback=eq_put)
# Scheduler submits request
request = BuildRequest(
"A", None, None, "job", "tenant", "pipeline", '1')
client.submit(request, {})
request_queue.get(timeout=30)
# Executor receives request
reqs = list(server.next())
self.assertEqual(len(reqs), 1)
a = reqs[0]
self.assertEqual(a.uuid, 'A')
# Executor locks request
self.assertTrue(server.lock(a, blocking=False))
a.state = BuildRequest.RUNNING
server.update(a)
self.assertEqual(client.get(a.path).state, BuildRequest.RUNNING)
# Executor should see no pending requests
reqs = list(server.next())
self.assertEqual(len(reqs), 0)
self.assertTrue(event_queue.empty())
# Scheduler rudely removes build request
sched_a = client.get(a.path)
client.remove(sched_a)
# Make sure it shows up as deleted
(build_request, event) = event_queue.get(timeout=30)
self.assertEqual(build_request, a)
self.assertEqual(event, JobRequestEvent.DELETED)
# Executor should not write anything else since the request
# was deleted.
def test_build_request_hold(self):
# Test that we can hold a build request in "queue"
request_queue = queue.Queue()
event_queue = queue.Queue()
def rq_put():
request_queue.put(None)
def eq_put(br, e):
event_queue.put((br, e))
# Simulate the client side
client = HoldableExecutorApi(self.zk_client)
client.hold_in_queue = True
# Simulate the server side
server = ExecutorApi(self.zk_client,
build_request_callback=rq_put,
build_event_callback=eq_put)
# Scheduler submits request
request = BuildRequest(
"A", None, None, "job", "tenant", "pipeline", '1')
client.submit(request, {})
request_queue.get(timeout=30)
# Executor receives nothing
reqs = list(server.next())
self.assertEqual(len(reqs), 0)
# Test releases hold
a = client.get(request.path)
self.assertEqual(a.uuid, 'A')
a.state = BuildRequest.REQUESTED
client.update(a)
# Executor receives request
request_queue.get(timeout=30)
reqs = list(server.next())
self.assertEqual(len(reqs), 1)
a = reqs[0]
self.assertEqual(a.uuid, 'A')
# The rest is redundant.
def test_nonexistent_lock(self):
request_queue = queue.Queue()
event_queue = queue.Queue()
def rq_put():
request_queue.put(None)
def eq_put(br, e):
event_queue.put((br, e))
# Simulate the client side
client = ExecutorApi(self.zk_client)
# Scheduler submits request
request = BuildRequest(
"A", None, None, "job", "tenant", "pipeline", '1')
client.submit(request, {})
sched_a = client.get(request.path)
# Simulate the server side
server = ExecutorApi(self.zk_client,
build_request_callback=rq_put,
build_event_callback=eq_put)
exec_a = server.get(request.path)
client.remove(sched_a)
# Try to lock a request that was just removed
self.assertFalse(server.lock(exec_a))
def test_efficient_removal(self):
# Test that we don't try to lock a removed request
request_queue = queue.Queue()
event_queue = queue.Queue()
def rq_put():
request_queue.put(None)
def eq_put(br, e):
event_queue.put((br, e))
# Simulate the client side
client = ExecutorApi(self.zk_client)
# Scheduler submits two requests
request_a = BuildRequest(
"A", None, None, "job", "tenant", "pipeline", '1')
client.submit(request_a, {})
request_b = BuildRequest(
"B", None, None, "job", "tenant", "pipeline", '2')
client.submit(request_b, {})
sched_b = client.get(request_b.path)
request_c = BuildRequest(
"C", None, None, "job", "tenant", "pipeline", '3')
client.submit(request_c, {})
sched_c = client.get(request_c.path)
# Simulate the server side
server = ExecutorApi(self.zk_client,
build_request_callback=rq_put,
build_event_callback=eq_put)
count = 0
for exec_request in server.next():
count += 1
if count == 1:
# Someone starts the second request and client removes
# the third request all while we're processing the first.
sched_b.state = sched_b.RUNNING
client.update(sched_b)
client.remove(sched_c)
for _ in iterate_timeout(30, "cache to be up-to-date"):
if (len(server.zone_queues[None]._cached_requests) == 2):
break
# Make sure we only got the first request
self.assertEqual(count, 1)
def test_lost_build_requests(self):
# Test that lostBuildRequests() returns unlocked running build
# requests
executor_api = ExecutorApi(self.zk_client)
br = BuildRequest(
"A", "zone", None, "job", "tenant", "pipeline", '1')
executor_api.submit(br, {})
br = BuildRequest(
"B", None, None, "job", "tenant", "pipeline", '1')
executor_api.submit(br, {})
path_b = br.path
br = BuildRequest(
"C", "zone", None, "job", "tenant", "pipeline", '1')
executor_api.submit(br, {})
path_c = br.path
br = BuildRequest(
"D", "zone", None, "job", "tenant", "pipeline", '1')
executor_api.submit(br, {})
path_d = br.path
br = BuildRequest(
"E", "zone", None, "job", "tenant", "pipeline", '1')
executor_api.submit(br, {})
path_e = br.path
b = executor_api.get(path_b)
c = executor_api.get(path_c)
d = executor_api.get(path_d)
e = executor_api.get(path_e)
# Make sure the get() method used the correct zone keys
self.assertEqual(set(executor_api.zone_queues.keys()), {"zone", None})
b.state = BuildRequest.RUNNING
executor_api.update(b)
c.state = BuildRequest.RUNNING
executor_api.lock(c)
executor_api.update(c)
d.state = BuildRequest.COMPLETED
executor_api.update(d)
e.state = BuildRequest.PAUSED
executor_api.update(e)
# Wait until the latest state transition is reflected in the Executor
# APIs cache. Using a DataWatch for this purpose could lead to race
# conditions depending on which DataWatch is executed first. The
# DataWatch might be triggered for the correct event, but the cache
# might still be outdated as the DataWatch that updates the cache
# itself wasn't triggered yet.
b_cache = executor_api.zone_queues[None]._cached_requests
e_cache = executor_api.zone_queues['zone']._cached_requests
for _ in iterate_timeout(30, "cache to be up-to-date"):
if (b_cache[path_b].state == BuildRequest.RUNNING and
e_cache[path_e].state == BuildRequest.PAUSED):
break
# The lost_builds method should only return builds which are running or
# paused, but not locked by any executor, in this case build b and e.
lost_build_requests = list(executor_api.lostRequests())
self.assertEqual(2, len(lost_build_requests))
self.assertEqual(b.path, lost_build_requests[0].path)
def test_lost_build_request_params(self):
# Test cleaning up orphaned request parameters
executor_api = ExecutorApi(self.zk_client)
br = BuildRequest(
"A", "zone", None, "job", "tenant", "pipeline", '1')
executor_api.submit(br, {})
params_root = executor_api.zone_queues['zone'].PARAM_ROOT
self.assertEqual(len(executor_api._getAllRequestIds()), 1)
self.assertEqual(len(
self.zk_client.client.get_children(params_root)), 1)
# Delete the request but not the params
self.zk_client.client.delete(br.path)
self.assertEqual(len(executor_api._getAllRequestIds()), 0)
self.assertEqual(len(
self.zk_client.client.get_children(params_root)), 1)
# Clean up leaked params
executor_api.cleanup(0)
self.assertEqual(len(
self.zk_client.client.get_children(params_root)), 0)
def test_existing_build_request(self):
# Test that an executor sees an existing build request when
# coming online
# Test the lifecycle of a build request
request_queue = queue.Queue()
event_queue = queue.Queue()
# A callback closure for the request queue
def rq_put():
request_queue.put(None)
# and the event queue
def eq_put(br, e):
event_queue.put((br, e))
# Simulate the client side
client = ExecutorApi(self.zk_client)
client.submit(
BuildRequest(
"A", None, None, "job", "tenant", "pipeline", '1'), {})
# Simulate the server side
server = ExecutorApi(self.zk_client,
build_request_callback=rq_put,
build_event_callback=eq_put)
# Scheduler submits request
request_queue.get(timeout=30)
# Executor receives request
reqs = list(server.next())
self.assertEqual(len(reqs), 1)
a = reqs[0]
self.assertEqual(a.uuid, 'A')
class TestMergerApi(ZooKeeperBaseTestCase):
def _assertEmptyRoots(self, client):
self.assertEqual(self.getZKPaths(client.REQUEST_ROOT), [])
self.assertEqual(self.getZKPaths(client.PARAM_ROOT), [])
self.assertEqual(self.getZKPaths(client.RESULT_ROOT), [])
self.assertEqual(self.getZKPaths(client.RESULT_DATA_ROOT), [])
self.assertEqual(self.getZKPaths(client.WAITER_ROOT), [])
self.assertEqual(self.getZKPaths(client.LOCK_ROOT), [])
self.assertEqual(self.getZKWatches(), {})
def test_merge_request(self):
# Test the lifecycle of a merge request
request_queue = queue.Queue()
# A callback closure for the request queue
def rq_put():
request_queue.put(None)
# Simulate the client side
client = MergerApi(self.zk_client)
# Simulate the server side
server = MergerApi(self.zk_client,
merge_request_callback=rq_put)
# Scheduler submits request
payload = {'merge': 'test'}
request = MergeRequest(
uuid='A',
job_type=MergeRequest.MERGE,
build_set_uuid='AA',
tenant_name='tenant',
pipeline_name='check',
event_id='1',
)
client.submit(request, payload)
request_queue.get(timeout=30)
# Merger receives request
reqs = list(server.next())
self.assertEqual(len(reqs), 1)
a = reqs[0]
self.assertEqual(a.uuid, 'A')
params = client.getParams(a)
self.assertEqual(params, payload)
client.clearParams(a)
params = client.getParams(a)
self.assertIsNone(params)
# Merger locks request
self.assertTrue(server.lock(a, blocking=False))
a.state = MergeRequest.RUNNING
server.update(a)
self.assertEqual(client.get(a.path).state, MergeRequest.RUNNING)
# Merger should see no pending requests
reqs = list(server.next())
self.assertEqual(len(reqs), 0)
# Merger removes and unlocks merge request on completion
server.remove(a)
server.unlock(a)
self._assertEmptyRoots(client)
def test_merge_request_hold(self):
# Test that we can hold a merge request in "queue"
request_queue = queue.Queue()
def rq_put():
request_queue.put(None)
# Simulate the client side
client = HoldableMergerApi(self.zk_client)
client.hold_in_queue = True
# Simulate the server side
server = MergerApi(self.zk_client,
merge_request_callback=rq_put)
# Scheduler submits request
payload = {'merge': 'test'}
client.submit(MergeRequest(
uuid='A',
job_type=MergeRequest.MERGE,
build_set_uuid='AA',
tenant_name='tenant',
pipeline_name='check',
event_id='1',
), payload)
request_queue.get(timeout=30)
# Merger receives nothing
reqs = list(server.next())
self.assertEqual(len(reqs), 0)
# Test releases hold
# We have to get a new merge_request object to update it.
a = client.get(f"{client.REQUEST_ROOT}/A")
self.assertEqual(a.uuid, 'A')
a.state = MergeRequest.REQUESTED
client.update(a)
# Merger receives request
request_queue.get(timeout=30)
reqs = list(server.next())
self.assertEqual(len(reqs), 1)
a = reqs[0]
self.assertEqual(a.uuid, 'A')
server.remove(a)
# The rest is redundant.
self._assertEmptyRoots(client)
def test_merge_request_result(self):
# Test the lifecycle of a merge request
request_queue = queue.Queue()
# A callback closure for the request queue
def rq_put():
request_queue.put(None)
# Simulate the client side
client = MergerApi(self.zk_client)
# Simulate the server side
server = MergerApi(self.zk_client,
merge_request_callback=rq_put)
# Scheduler submits request
payload = {'merge': 'test'}
future = client.submit(MergeRequest(
uuid='A',
job_type=MergeRequest.MERGE,
build_set_uuid='AA',
tenant_name='tenant',
pipeline_name='check',
event_id='1',
), payload, needs_result=True)
request_queue.get(timeout=30)
# Merger receives request
reqs = list(server.next())
self.assertEqual(len(reqs), 1)
a = reqs[0]
self.assertEqual(a.uuid, 'A')
# Merger locks request
self.assertTrue(server.lock(a, blocking=False))
a.state = MergeRequest.RUNNING
server.update(a)
self.assertEqual(client.get(a.path).state, MergeRequest.RUNNING)
# Merger reports result
result_data = {'result': 'ok'}
server.reportResult(a, result_data)
self.assertEqual(set(self.getZKPaths(client.RESULT_ROOT)),
set(['/zuul/merger/results/A']))
self.assertEqual(set(self.getZKPaths(client.RESULT_DATA_ROOT)),
set(['/zuul/merger/result-data/A',
'/zuul/merger/result-data/A/0000000000']))
self.assertEqual(self.getZKPaths(client.WAITER_ROOT),
['/zuul/merger/waiters/A'])
# Merger removes and unlocks merge request on completion
server.remove(a)
server.unlock(a)
# Scheduler awaits result
self.assertTrue(future.wait())
self.assertEqual(future.data, result_data)
self._assertEmptyRoots(client)
def test_lost_merge_request_params(self):
# Test cleaning up orphaned request parameters
merger_api = MergerApi(self.zk_client)
# Scheduler submits request
payload = {'merge': 'test'}
merger_api.submit(MergeRequest(
uuid='A',
job_type=MergeRequest.MERGE,
build_set_uuid='AA',
tenant_name='tenant',
pipeline_name='check',
event_id='1',
), payload)
path_a = '/'.join([merger_api.REQUEST_ROOT, 'A'])
params_root = merger_api.PARAM_ROOT
self.assertEqual(len(merger_api._getAllRequestIds()), 1)
self.assertEqual(len(
self.zk_client.client.get_children(params_root)), 1)
# Delete the request but not the params
self.zk_client.client.delete(path_a)
self.assertEqual(len(merger_api._getAllRequestIds()), 0)
self.assertEqual(len(
self.zk_client.client.get_children(params_root)), 1)
# Clean up leaked params
merger_api.cleanup(0)
self.assertEqual(len(
self.zk_client.client.get_children(params_root)), 0)
self._assertEmptyRoots(merger_api)
def test_lost_merge_request_result(self):
# Test that we can clean up orphaned merge results
request_queue = queue.Queue()
# A callback closure for the request queue
def rq_put():
request_queue.put(None)
# Simulate the client side
client = MergerApi(self.zk_client)
# Simulate the server side
server = MergerApi(self.zk_client,
merge_request_callback=rq_put)
# Scheduler submits request
payload = {'merge': 'test'}
future = client.submit(MergeRequest(
uuid='A',
job_type=MergeRequest.MERGE,
build_set_uuid='AA',
tenant_name='tenant',
pipeline_name='check',
event_id='1',
), payload, needs_result=True)
request_queue.get(timeout=30)
# Merger receives request
reqs = list(server.next())
self.assertEqual(len(reqs), 1)
a = reqs[0]
self.assertEqual(a.uuid, 'A')
# Merger locks request
self.assertTrue(server.lock(a, blocking=False))
a.state = MergeRequest.RUNNING
server.update(a)
self.assertEqual(client.get(a.path).state, MergeRequest.RUNNING)
# Merger reports result
result_data = {'result': 'ok'}
server.reportResult(a, result_data)
# Merger removes and unlocks merge request on completion
server.remove(a)
server.unlock(a)
self.assertEqual(set(self.getZKPaths(client.RESULT_ROOT)),
set(['/zuul/merger/results/A']))
self.assertEqual(set(self.getZKPaths(client.RESULT_DATA_ROOT)),
set(['/zuul/merger/result-data/A',
'/zuul/merger/result-data/A/0000000000']))
self.assertEqual(self.getZKPaths(client.WAITER_ROOT),
['/zuul/merger/waiters/A'])
# Scheduler "disconnects"
self.zk_client.client.delete(future._waiter_path)
# Find orphaned results
client.cleanup(age=0)
self._assertEmptyRoots(client)
def test_nonexistent_lock(self):
request_queue = queue.Queue()
def rq_put():
request_queue.put(None)
# Simulate the client side
client = MergerApi(self.zk_client)
# Scheduler submits request
payload = {'merge': 'test'}
client.submit(MergeRequest(
uuid='A',
job_type=MergeRequest.MERGE,
build_set_uuid='AA',
tenant_name='tenant',
pipeline_name='check',
event_id='1',
), payload)
client_a = client.get(f"{client.REQUEST_ROOT}/A")
# Simulate the server side
server = MergerApi(self.zk_client,
merge_request_callback=rq_put)
server_a = list(server.next())[0]
client.remove(client_a)
# Try to lock a request that was just removed
self.assertFalse(server.lock(server_a))
self._assertEmptyRoots(client)
def test_efficient_removal(self):
# Test that we don't try to lock a removed request
request_queue = queue.Queue()
event_queue = queue.Queue()
def rq_put():
request_queue.put(None)
def eq_put(br, e):
event_queue.put((br, e))
# Simulate the client side
client = MergerApi(self.zk_client)
# Scheduler submits three requests
payload = {'merge': 'test'}
client.submit(MergeRequest(
uuid='A',
job_type=MergeRequest.MERGE,
build_set_uuid='AA',
tenant_name='tenant',
pipeline_name='check',
event_id='1',
), payload)
client.submit(MergeRequest(
uuid='B',
job_type=MergeRequest.MERGE,
build_set_uuid='BB',
tenant_name='tenant',
pipeline_name='check',
event_id='2',
), payload)
client_b = client.get(f"{client.REQUEST_ROOT}/B")
client.submit(MergeRequest(
uuid='C',
job_type=MergeRequest.MERGE,
build_set_uuid='CC',
tenant_name='tenant',
pipeline_name='check',
event_id='2',
), payload)
client_c = client.get(f"{client.REQUEST_ROOT}/C")
# Simulate the server side
server = MergerApi(self.zk_client,
merge_request_callback=rq_put)
count = 0
for merge_request in server.next():
count += 1
if count == 1:
# Someone starts the second request and client removes
# the third request all while we're processing the first.
client_b.state = client_b.RUNNING
client.update(client_b)
client.remove(client_c)
for _ in iterate_timeout(30, "cache to be up-to-date"):
if (len(server._cached_requests) == 2):
break
# Make sure we only got the first request
self.assertEqual(count, 1)
def test_leaked_lock(self):
client = MergerApi(self.zk_client)
# Manually create a lock with no underlying request
self.zk_client.client.create(f"{client.LOCK_ROOT}/A", b'')
client.cleanup(0)
self._assertEmptyRoots(client)
def test_lost_merge_requests(self):
# Test that lostMergeRequests() returns unlocked running merge
# requests
merger_api = MergerApi(self.zk_client)
payload = {'merge': 'test'}
merger_api.submit(MergeRequest(
uuid='A',
job_type=MergeRequest.MERGE,
build_set_uuid='AA',
tenant_name='tenant',
pipeline_name='check',
event_id='1',
), payload)
merger_api.submit(MergeRequest(
uuid='B',
job_type=MergeRequest.MERGE,
build_set_uuid='BB',
tenant_name='tenant',
pipeline_name='check',
event_id='1',
), payload)
merger_api.submit(MergeRequest(
uuid='C',
job_type=MergeRequest.MERGE,
build_set_uuid='CC',
tenant_name='tenant',
pipeline_name='check',
event_id='1',
), payload)
merger_api.submit(MergeRequest(
uuid='D',
job_type=MergeRequest.MERGE,
build_set_uuid='DD',
tenant_name='tenant',
pipeline_name='check',
event_id='1',
), payload)
b = merger_api.get(f"{merger_api.REQUEST_ROOT}/B")
c = merger_api.get(f"{merger_api.REQUEST_ROOT}/C")
d = merger_api.get(f"{merger_api.REQUEST_ROOT}/D")
b.state = MergeRequest.RUNNING
merger_api.update(b)
merger_api.lock(c)
c.state = MergeRequest.RUNNING
merger_api.update(c)
d.state = MergeRequest.COMPLETED
merger_api.update(d)
# Wait until the latest state transition is reflected in the Merger
# APIs cache. Using a DataWatch for this purpose could lead to race
# conditions depending on which DataWatch is executed first. The
# DataWatch might be triggered for the correct event, but the cache
# might still be outdated as the DataWatch that updates the cache
# itself wasn't triggered yet.
cache = merger_api._cached_requests
for _ in iterate_timeout(30, "cache to be up-to-date"):
if (cache[b.path].state == MergeRequest.RUNNING and
cache[c.path].state == MergeRequest.RUNNING):
break
# The lost_merges method should only return merges which are running
# but not locked by any merger, in this case merge b
lost_merge_requests = list(merger_api.lostRequests())
self.assertEqual(1, len(lost_merge_requests))
self.assertEqual(b.path, lost_merge_requests[0].path)
# This test does not clean them up, so we can't assert empty roots
def test_existing_merge_request(self):
# Test that a merger sees an existing merge request when
# coming online
# Test the lifecycle of a merge request
request_queue = queue.Queue()
# A callback closure for the request queue
def rq_put():
request_queue.put(None)
# Simulate the client side
client = MergerApi(self.zk_client)
payload = {'merge': 'test'}
client.submit(MergeRequest(
uuid='A',
job_type=MergeRequest.MERGE,
build_set_uuid='AA',
tenant_name='tenant',
pipeline_name='check',
event_id='1',
), payload)
# Simulate the server side
server = MergerApi(self.zk_client,
merge_request_callback=rq_put)
# Scheduler submits request
request_queue.get(timeout=30)
# Merger receives request
reqs = list(server.next())
self.assertEqual(len(reqs), 1)
a = reqs[0]
self.assertEqual(a.uuid, 'A')
client.remove(a)
self._assertEmptyRoots(client)
class TestLocks(ZooKeeperBaseTestCase):
def test_locking_ctx(self):
lock = self.zk_client.client.Lock("/lock")
with locked(lock) as ctx_lock:
self.assertIs(lock, ctx_lock)
self.assertTrue(lock.is_acquired)
self.assertFalse(lock.is_acquired)
def test_already_locked_ctx(self):
lock = self.zk_client.client.Lock("/lock")
other_lock = self.zk_client.client.Lock("/lock")
other_lock.acquire()
with testtools.ExpectedException(
LockException, "Failed to acquire lock .*"
):
with locked(lock, blocking=False):
pass
self.assertFalse(lock.is_acquired)
def test_unlock_exception(self):
lock = self.zk_client.client.Lock("/lock")
with testtools.ExpectedException(RuntimeError):
with locked(lock):
self.assertTrue(lock.is_acquired)
raise RuntimeError
self.assertFalse(lock.is_acquired)
class TestLayoutStore(ZooKeeperBaseTestCase):
def test_layout_state(self):
store = LayoutStateStore(self.zk_client, lambda: None)
layout_uuid = uuid.uuid4().hex
branch_cache_min_ltimes = {
"gerrit": 123,
"github": 456,
}
state = LayoutState("tenant", "hostname", 0, layout_uuid,
branch_cache_min_ltimes, -1)
store["tenant"] = state
self.assertEqual(state, store["tenant"])
self.assertNotEqual(state.ltime, -1)
self.assertNotEqual(store["tenant"].ltime, -1)
self.assertEqual(store["tenant"].branch_cache_min_ltimes,
branch_cache_min_ltimes)
def test_ordering(self):
layout_uuid = uuid.uuid4().hex
state_one = LayoutState("tenant", "hostname", 1, layout_uuid,
{}, -1, ltime=1)
state_two = LayoutState("tenant", "hostname", 2, layout_uuid,
{}, -1, ltime=2)
self.assertGreater(state_two, state_one)
def test_cleanup(self):
store = LayoutStateStore(self.zk_client, lambda: None)
min_ltimes = defaultdict(lambda x: -1)
min_ltimes['foo'] = 1
state_one = LayoutState("tenant", "hostname", 1, uuid.uuid4().hex,
{}, -1, ltime=1)
state_two = LayoutState("tenant", "hostname", 2, uuid.uuid4().hex,
{}, -1, ltime=2)
store.setMinLtimes(state_one, min_ltimes)
store.setMinLtimes(state_two, min_ltimes)
store['tenant'] = state_one
# Run with the default delay of 5 minutes; nothing should be deleted.
store.cleanup()
self.assertEqual(store.get('tenant'), state_one)
self.assertIsNotNone(
self.zk_client.client.exists(
f'/zuul/layout-data/{state_one.uuid}'))
self.assertIsNotNone(
self.zk_client.client.exists(
f'/zuul/layout-data/{state_two.uuid}'))
# Run again with immediate deletion
store.cleanup(delay=0)
self.assertEqual(store.get('tenant'), state_one)
self.assertIsNotNone(
self.zk_client.client.exists(
f'/zuul/layout-data/{state_one.uuid}'))
self.assertIsNone(
self.zk_client.client.exists(
f'/zuul/layout-data/{state_two.uuid}'))
class TestSystemConfigCache(ZooKeeperBaseTestCase):
def setUp(self):
super().setUp()
self.config_cache = SystemConfigCache(self.zk_client, lambda: None)
def test_set_get(self):
uac = model.UnparsedAbideConfig()
uac.tenants = {"foo": "bar"}
uac.authz_rules = ["bar", "foo"]
attrs = model.SystemAttributes.fromDict({
"use_relative_priority": True,
"max_hold_expiration": 7200,
"default_hold_expiration": 3600,
"default_ansible_version": "6",
"web_root": "/web/root",
"web_status_url": "/web/status",
"websocket_url": "/web/socket",
})
self.config_cache.set(uac, attrs)
uac_cached, cached_attrs = self.config_cache.get()
self.assertEqual(uac.uuid, uac_cached.uuid)
self.assertEqual(uac.tenants, uac_cached.tenants)
self.assertEqual(uac.authz_rules, uac_cached.authz_rules)
self.assertEqual(attrs, cached_attrs)
def test_cache_empty(self):
with testtools.ExpectedException(RuntimeError):
self.config_cache.get()
def test_ltime(self):
uac = model.UnparsedAbideConfig()
attrs = model.SystemAttributes()
self.assertEqual(self.config_cache.ltime, -1)
self.config_cache.set(uac, attrs)
self.assertGreater(self.config_cache.ltime, -1)
self.assertEqual(uac.ltime, self.config_cache.ltime)
old_ltime = self.config_cache.ltime
self.config_cache.set(uac, attrs)
self.assertGreater(self.config_cache.ltime, old_ltime)
self.assertEqual(uac.ltime, self.config_cache.ltime)
cache_uac, _ = self.config_cache.get()
self.assertEqual(uac.ltime, cache_uac.ltime)
def test_valid(self):
uac = model.UnparsedAbideConfig()
attrs = model.SystemAttributes()
self.assertFalse(self.config_cache.is_valid)
self.config_cache.set(uac, attrs)
self.assertTrue(self.config_cache.is_valid)
class DummyChange:
def __init__(self, project, data=None):
self.uid = uuid.uuid4().hex
self.project = project
self.cache_stat = None
if data is not None:
self.deserialize(data)
@property
def cache_version(self):
return -1 if self.cache_stat is None else self.cache_stat.version
def serialize(self):
d = self.__dict__.copy()
d.pop('cache_stat')
return d
def deserialize(self, data):
self.__dict__.update(data)
def getRelatedChanges(self, sched, relevant):
return
class DummyChangeCache(AbstractChangeCache):
CHANGE_TYPE_MAP = {
"DummyChange": DummyChange,
}
class DummySource:
def getProject(self, project_name):
return project_name
def getChange(self, change_key):
return DummyChange('project')
class DummyConnections:
def getSource(self, name):
return DummySource()
class DummyScheduler:
def __init__(self):
self.connections = DummyConnections()
class DummyConnection:
def __init__(self):
self.connection_name = "DummyConnection"
self.source = DummySource()
self.sched = DummyScheduler()
class TestChangeCache(ZooKeeperBaseTestCase):
def setUp(self):
super().setUp()
self.cache = DummyChangeCache(self.zk_client, DummyConnection())
def test_insert(self):
change_foo = DummyChange("project", {"foo": "bar"})
change_bar = DummyChange("project", {"bar": "foo"})
key_foo = ChangeKey('conn', 'project', 'change', 'foo', '1')
key_bar = ChangeKey('conn', 'project', 'change', 'bar', '1')
self.cache.set(key_foo, change_foo)
self.cache.set(key_bar, change_bar)
self.assertEqual(self.cache.get(key_foo), change_foo)
self.assertEqual(self.cache.get(key_bar), change_bar)
compressed_size, uncompressed_size = self.cache.estimateDataSize()
self.assertTrue(compressed_size != uncompressed_size != 0)
def test_update(self):
change = DummyChange("project", {"foo": "bar"})
key = ChangeKey('conn', 'project', 'change', 'foo', '1')
self.cache.set(key, change)
change.number = 123
self.cache.set(key, change, change.cache_version)
# The change instance must stay the same
updated_change = self.cache.get(key)
self.assertIs(change, updated_change)
self.assertEqual(change.number, 123)
def test_delete(self):
change = DummyChange("project", {"foo": "bar"})
key = ChangeKey('conn', 'project', 'change', 'foo', '1')
self.cache.set(key, change)
self.cache.delete(key)
self.assertIsNone(self.cache.get(key))
# Deleting an non-existent key should not raise an exception
invalid_key = ChangeKey('conn', 'project', 'change', 'invalid', '1')
self.cache.delete(invalid_key)
def test_concurrent_delete(self):
change = DummyChange("project", {"foo": "bar"})
key = ChangeKey('conn', 'project', 'change', 'foo', '1')
self.cache.set(key, change)
old_version = change.cache_version
# Simulate someone updating the cache after we decided to
# delete the entry
self.cache.set(key, change, old_version)
self.assertNotEqual(old_version, change.cache_version)
self.cache.delete(key, old_version)
# The change should still be in the cache
self.assertIsNotNone(self.cache.get(key))
def test_prune(self):
change1 = DummyChange("project", {"foo": "bar"})
change2 = DummyChange("project", {"foo": "baz"})
key1 = ChangeKey('conn', 'project', 'change', 'foo', '1')
key2 = ChangeKey('conn', 'project', 'change', 'foo', '2')
self.cache.set(key1, change1)
self.cache.set(key2, change2)
self.cache.prune([key1], max_age=0)
self.assertIsNotNone(self.cache.get(key1))
self.assertIsNone(self.cache.get(key2))
def test_concurrent_update(self):
change = DummyChange("project", {"foo": "bar"})
key = ChangeKey('conn', 'project', 'change', 'foo', '1')
self.cache.set(key, change)
# Attempt to update with the old change stat
with testtools.ExpectedException(ConcurrentUpdateError):
self.cache.set(key, change, change.cache_version - 1)
def test_change_update_retry(self):
change = DummyChange("project", {"foobar": 0})
key = ChangeKey('conn', 'project', 'change', 'foo', '1')
self.cache.set(key, change)
# Update the change so we have a new cache stat.
change.foobar = 1
self.cache.set(key, change, change.cache_version)
self.assertEqual(self.cache.get(key).foobar, 1)
def updater(c):
c.foobar += 1
# Change the cache stat so the change is considered outdated and we
# need to retry because of a concurrent update error.
change.cache_stat = model.CacheStat(change.cache_stat.key,
uuid.uuid4().hex,
change.cache_version - 1,
change.cache_stat.mzxid - 1,
0, 0, 0)
updated_change = self.cache.updateChangeWithRetry(
key, change, updater)
self.assertEqual(updated_change.foobar, 2)
def test_cache_sync(self):
other_cache = DummyChangeCache(self.zk_client, DummyConnection())
key = ChangeKey('conn', 'project', 'change', 'foo', '1')
change = DummyChange("project", {"foo": "bar"})
self.cache.set(key, change)
self.assertIsNotNone(other_cache.get(key))
change_other = other_cache.get(key)
change_other.number = 123
other_cache.set(key, change_other, change_other.cache_version)
for _ in iterate_timeout(10, "update to propagate"):
if getattr(change, "number", None) == 123:
break
other_cache.delete(key)
self.assertIsNone(self.cache.get(key))
def test_cache_sync_on_start(self):
key = ChangeKey('conn', 'project', 'change', 'foo', '1')
change = DummyChange("project", {"foo": "bar"})
self.cache.set(key, change)
change.number = 123
self.cache.set(key, change, change.cache_version)
other_cache = DummyChangeCache(self.zk_client, DummyConnection())
other_cache.cleanup()
other_cache.cleanup()
self.assertIsNotNone(other_cache.get(key))
def test_cleanup(self):
change = DummyChange("project", {"foo": "bar"})
key = ChangeKey('conn', 'project', 'change', 'foo', '1')
self.cache.set(key, change)
self.cache.cleanup()
self.assertEqual(len(self.cache._data_cleanup_candidates), 0)
self.assertEqual(
len(self.zk_client.client.get_children(self.cache.data_root)), 1)
change.number = 123
self.cache.set(key, change, change.cache_version)
self.cache.cleanup()
self.assertEqual(len(self.cache._data_cleanup_candidates), 1)
self.assertEqual(
len(self.zk_client.client.get_children(self.cache.data_root)), 2)
self.cache.cleanup()
self.assertEqual(len(self.cache._data_cleanup_candidates), 0)
self.assertEqual(
len(self.zk_client.client.get_children(self.cache.data_root)), 1)
def test_watch_cleanup(self):
change = DummyChange("project", {"foo": "bar"})
key = ChangeKey('conn', 'project', 'change', 'foo', '1')
self.cache.set(key, change)
for _ in iterate_timeout(10, "watch to be registered"):
if change.cache_stat.key._hash in self.cache._watched_keys:
break
self.cache.delete(key)
self.assertIsNone(self.cache.get(key))
for _ in iterate_timeout(10, "watch to be removed"):
if change.cache_stat.key._hash not in self.cache._watched_keys:
break
class DummyZKObjectMixin:
_retry_interval = 0.1
def getPath(self):
return f'/zuul/pipeline/{self.name}'
def serialize(self, context):
d = {'name': self.name,
'foo': self.foo}
return json.dumps(d).encode('utf-8')
class DummyZKObject(DummyZKObjectMixin, ZKObject):
pass
class DummyShardedZKObject(DummyZKObjectMixin, ShardedZKObject):
pass
class TestZKObject(ZooKeeperBaseTestCase):
def _test_zk_object(self, zkobject_class):
stop_event = threading.Event()
self.zk_client.client.create('/zuul/pipeline', makepath=True)
# Create a new object
tenant_name = 'fake_tenant'
with tenant_write_lock(self.zk_client, tenant_name) as lock:
context = ZKContext(self.zk_client, lock, stop_event, self.log)
pipeline1 = zkobject_class.new(context,
name=tenant_name,
foo='bar')
self.assertEqual(pipeline1.foo, 'bar')
compressed_size, uncompressed_size = pipeline1.estimateDataSize()
self.assertTrue(compressed_size != uncompressed_size != 0)
# Load an object from ZK (that we don't already have)
with tenant_write_lock(self.zk_client, tenant_name) as lock:
context = ZKContext(self.zk_client, lock, stop_event, self.log)
pipeline2 = zkobject_class.fromZK(context,
'/zuul/pipeline/fake_tenant')
self.assertEqual(pipeline2.foo, 'bar')
compressed_size, uncompressed_size = pipeline2.estimateDataSize()
self.assertTrue(compressed_size != uncompressed_size != 0)
# Test that nested ZKObject sizes are summed up correctly
p1_compressed, p1_uncompressed = pipeline1.estimateDataSize()
p2_compressed, p2_uncompressed = pipeline2.estimateDataSize()
pipeline2._set(other=pipeline1)
compressed_size, uncompressed_size = pipeline2.estimateDataSize()
self.assertEqual(compressed_size, p1_compressed + p2_compressed)
self.assertEqual(uncompressed_size, p1_uncompressed + p2_uncompressed)
def get_ltime(obj):
zstat = self.zk_client.client.exists(obj.getPath())
return zstat.last_modified_transaction_id
# Update an object
with (tenant_write_lock(self.zk_client, tenant_name) as lock,
ZKContext(
self.zk_client, lock, stop_event, self.log) as context):
ltime1 = get_ltime(pipeline1)
pipeline1.updateAttributes(context, foo='qux')
self.assertEqual(pipeline1.foo, 'qux')
ltime2 = get_ltime(pipeline1)
self.assertNotEqual(ltime1, ltime2)
# This should not produce an unnecessary write
pipeline1.updateAttributes(context, foo='qux')
ltime3 = get_ltime(pipeline1)
self.assertEqual(ltime2, ltime3)
# Update an object using an active context
with (tenant_write_lock(self.zk_client, tenant_name) as lock,
ZKContext(
self.zk_client, lock, stop_event, self.log) as context):
ltime1 = get_ltime(pipeline1)
with pipeline1.activeContext(context):
pipeline1.foo = 'baz'
self.assertEqual(pipeline1.foo, 'baz')
ltime2 = get_ltime(pipeline1)
self.assertNotEqual(ltime1, ltime2)
# This should not produce an unnecessary write
with pipeline1.activeContext(context):
pipeline1.foo = 'baz'
ltime3 = get_ltime(pipeline1)
self.assertEqual(ltime2, ltime3)
# Update of object w/o active context should not work
with testtools.ExpectedException(Exception):
pipeline1.foo = 'nope'
self.assertEqual(pipeline1.foo, 'baz')
# Refresh an existing object
with (tenant_write_lock(self.zk_client, tenant_name) as lock,
ZKContext(
self.zk_client, lock, stop_event, self.log) as context):
pipeline2.refresh(context)
self.assertEqual(pipeline2.foo, 'baz')
# Delete an object
with (tenant_write_lock(self.zk_client, tenant_name) as lock,
ZKContext(self.zk_client, lock, stop_event, self.log) as context):
self.assertIsNotNone(self.zk_client.client.exists(
'/zuul/pipeline/fake_tenant'))
pipeline2.delete(context)
self.assertIsNone(self.zk_client.client.exists(
'/zuul/pipeline/fake_tenant'))
def _test_zk_object_exception(self, zkobject_class):
# Exercise the exception handling in the _save method
stop_event = threading.Event()
self.zk_client.client.create('/zuul/pipeline', makepath=True)
# Create a new object
tenant_name = 'fake_tenant'
class ZKFailsOnUpdate:
def delete(self, *args, **kw):
raise ZookeeperError()
def set(self, *args, **kw):
raise ZookeeperError()
class FailsOnce:
def __init__(self, real_client):
self.count = 0
self._real_client = real_client
def create(self, *args, **kw):
return self._real_client.create(*args, **kw)
def delete(self, *args, **kw):
self.count += 1
if self.count < 2:
raise OperationTimeoutError()
return self._real_client.delete(*args, **kw)
def set(self, *args, **kw):
self.count += 1
if self.count < 2:
raise OperationTimeoutError()
return self._real_client.set(*args, **kw)
# Fail an update
with (tenant_write_lock(self.zk_client, tenant_name) as lock,
ZKContext(
self.zk_client, lock, stop_event, self.log) as context):
pipeline1 = zkobject_class.new(context,
name=tenant_name,
foo='one')
self.assertEqual(pipeline1.foo, 'one')
# Simulate a fatal ZK exception
context.client = ZKFailsOnUpdate()
with testtools.ExpectedException(ZookeeperError):
pipeline1.updateAttributes(context, foo='two')
# We should still have the old attribute
self.assertEqual(pipeline1.foo, 'one')
# Any other error is retryable
context.client = FailsOnce(self.zk_client.client)
pipeline1.updateAttributes(context, foo='three')
# This time it should be updated
self.assertEqual(pipeline1.foo, 'three')
# Repeat test using an active context
context.client = ZKFailsOnUpdate()
with testtools.ExpectedException(ZookeeperError):
with pipeline1.activeContext(context):
pipeline1.foo = 'four'
self.assertEqual(pipeline1.foo, 'three')
context.client = FailsOnce(self.zk_client.client)
with pipeline1.activeContext(context):
pipeline1.foo = 'five'
self.assertEqual(pipeline1.foo, 'five')
def test_zk_object(self):
self._test_zk_object(DummyZKObject)
def test_sharded_zk_object(self):
self._test_zk_object(DummyShardedZKObject)
def test_zk_object_exception(self):
self._test_zk_object_exception(DummyZKObject)
def test_sharded_zk_object_exception(self):
self._test_zk_object_exception(DummyShardedZKObject)
class TestBranchCache(ZooKeeperBaseTestCase):
def test_branch_cache_protected_then_all(self):
conn = DummyConnection()
cache = BranchCache(self.zk_client, conn, self.component_registry)
test_data = {
'project1': {
'all': ['protected1', 'protected2',
'unprotected1', 'unprotected2'],
'protected': ['protected1', 'protected2'],
},
}
# Test a protected-only query followed by all
cache.setProjectBranches('project1', True,
test_data['project1']['protected'])
self.assertEqual(
sorted(cache.getProjectBranches('project1', True)),
test_data['project1']['protected']
)
self.assertRaises(
LookupError,
lambda: cache.getProjectBranches('project1', False)
)
cache.setProjectBranches('project1', False,
test_data['project1']['all'])
self.assertEqual(
sorted(cache.getProjectBranches('project1', True)),
test_data['project1']['protected']
)
self.assertEqual(
sorted(cache.getProjectBranches('project1', False)),
test_data['project1']['all']
)
def test_branch_cache_all_then_protected(self):
conn = DummyConnection()
cache = BranchCache(self.zk_client, conn, self.component_registry)
test_data = {
'project1': {
'all': ['protected1', 'protected2',
'unprotected1', 'unprotected2'],
'protected': ['protected1', 'protected2'],
},
}
self.assertRaises(
LookupError,
lambda: cache.getProjectBranches('project1', True)
)
self.assertRaises(
LookupError,
lambda: cache.getProjectBranches('project1', False)
)
# Test the other order; all followed by protected-only
cache.setProjectBranches('project1', False,
test_data['project1']['all'])
self.assertRaises(
LookupError,
lambda: cache.getProjectBranches('project1', True)
)
self.assertEqual(
sorted(cache.getProjectBranches('project1', False)),
test_data['project1']['all']
)
cache.setProjectBranches('project1', True,
test_data['project1']['protected'])
self.assertEqual(
sorted(cache.getProjectBranches('project1', True)),
test_data['project1']['protected']
)
self.assertEqual(
sorted(cache.getProjectBranches('project1', False)),
test_data['project1']['all']
)
def test_branch_cache_change_protected(self):
conn = DummyConnection()
cache = BranchCache(self.zk_client, conn, self.component_registry)
data1 = {
'project1': {
'all': ['newbranch', 'protected'],
'protected': ['protected'],
},
}
data2 = {
'project1': {
'all': ['newbranch', 'protected'],
'protected': ['newbranch', 'protected'],
},
}
# Create a new unprotected branch
cache.setProjectBranches('project1', False,
data1['project1']['all'])
cache.setProjectBranches('project1', True,
data1['project1']['protected'])
self.assertEqual(
cache.getProjectBranches('project1', True),
data1['project1']['protected']
)
self.assertEqual(
sorted(cache.getProjectBranches('project1', False)),
data1['project1']['all']
)
# Change it to protected
cache.setProtected('project1', 'newbranch', True)
self.assertEqual(
sorted(cache.getProjectBranches('project1', True)),
data2['project1']['protected']
)
self.assertEqual(
sorted(cache.getProjectBranches('project1', False)),
data2['project1']['all']
)
# Change it back
cache.setProtected('project1', 'newbranch', False)
self.assertEqual(
sorted(cache.getProjectBranches('project1', True)),
data1['project1']['protected']
)
self.assertEqual(
sorted(cache.getProjectBranches('project1', False)),
data1['project1']['all']
)
def test_branch_cache_lookup_error(self):
# Test that a missing branch cache entry results in a LookupError
conn = DummyConnection()
cache = BranchCache(self.zk_client, conn, self.component_registry)
self.assertRaises(
LookupError,
lambda: cache.getProjectBranches('project1', True)
)
self.assertIsNone(
cache.getProjectBranches('project1', True, default=None)
)
class TestConfigurationErrorList(ZooKeeperBaseTestCase):
def test_config_error_list(self):
stop_event = threading.Event()
self.zk_client.client.create('/zuul/pipeline', makepath=True)
source_context = model.SourceContext(
'cname', 'project', 'connection', 'branch', 'test', True)
m1 = yaml.Mark('name', 0, 0, 0, '', 0)
m2 = yaml.Mark('name', 1, 0, 0, '', 0)
start_mark = model.ZuulMark(m1, m2, 'hello')
# Create a new object
with (tenant_write_lock(self.zk_client, 'test') as lock,
ZKContext(
self.zk_client, lock, stop_event, self.log) as context):
pipeline = DummyZKObject.new(context, name="test", foo="bar")
e1 = model.ConfigurationError(
source_context, start_mark, "Test error1")
e2 = model.ConfigurationError(
source_context, start_mark, "Test error2")
with pipeline.activeContext(context):
path = '/zuul/pipeline/test/errors'
el1 = model.ConfigurationErrorList.new(
context, errors=[e1, e2], _path=path)
el2 = model.ConfigurationErrorList.fromZK(
context, path, _path=path)
self.assertEqual(el1.errors, el2.errors)
self.assertFalse(el1 is el2)
self.assertEqual(el1.errors[0], el2.errors[0])
self.assertEqual(el1.errors[0], e1)
self.assertNotEqual(e1, e2)
self.assertEqual([e1, e2], [e1, e2])
class TestBlobStore(ZooKeeperBaseTestCase):
def test_blob_store(self):
stop_event = threading.Event()
self.zk_client.client.create('/zuul/pipeline', makepath=True)
# Create a new object
tenant_name = 'fake_tenant'
start_ltime = self.zk_client.getCurrentLtime()
with (tenant_write_lock(self.zk_client, tenant_name) as lock,
ZKContext(
self.zk_client, lock, stop_event, self.log) as context):
bs = BlobStore(context)
with testtools.ExpectedException(KeyError):
bs.get('nope')
path = bs.put(b'something')
self.assertEqual(bs.get(path), b'something')
self.assertEqual([x for x in bs], [path])
self.assertEqual(len(bs), 1)
self.assertTrue(path in bs)
self.assertFalse('nope' in bs)
self.assertTrue(bs._checkKey(path))
self.assertFalse(bs._checkKey('nope'))
cur_ltime = self.zk_client.getCurrentLtime()
self.assertEqual(bs.getKeysLastUsedBefore(cur_ltime), {path})
self.assertEqual(bs.getKeysLastUsedBefore(start_ltime), set())
bs.delete(path, cur_ltime)
with testtools.ExpectedException(KeyError):
bs.get(path)
class TestPipelineInit(ZooKeeperBaseTestCase):
# Test the initialize-on-refresh code paths of various pipeline objects
def test_pipeline_state_new_object(self):
# Test the initialize-on-refresh code path with no existing object
tenant = model.Tenant('tenant')
pipeline = model.Pipeline('gate', tenant)
layout = model.Layout(tenant)
tenant.layout = layout
pipeline.state = model.PipelineState.create(
pipeline, pipeline.state)
context = ZKContext(self.zk_client, None, None, self.log)
pipeline.state.refresh(context)
self.assertTrue(self.zk_client.client.exists(pipeline.state.getPath()))
self.assertEqual(pipeline.state.layout_uuid, layout.uuid)
def test_pipeline_state_existing_object(self):
# Test the initialize-on-refresh code path with a pre-existing object
tenant = model.Tenant('tenant')
pipeline = model.Pipeline('gate', tenant)
layout = model.Layout(tenant)
tenant.layout = layout
pipeline.manager = mock.Mock()
pipeline.state = model.PipelineState.create(
pipeline, pipeline.state)
pipeline.change_list = model.PipelineChangeList.create(
pipeline)
context = ZKContext(self.zk_client, None, None, self.log)
# We refresh the change list here purely for the side effect
# of creating the pipeline state object with no data (the list
# is a subpath of the state object).
pipeline.change_list.refresh(context)
pipeline.state.refresh(context)
self.assertTrue(
self.zk_client.client.exists(pipeline.change_list.getPath()))
self.assertTrue(self.zk_client.client.exists(pipeline.state.getPath()))
self.assertEqual(pipeline.state.layout_uuid, layout.uuid)
def test_pipeline_change_list_new_object(self):
# Test the initialize-on-refresh code path with no existing object
tenant = model.Tenant('tenant')
pipeline = model.Pipeline('gate', tenant)
layout = model.Layout(tenant)
tenant.layout = layout
pipeline.state = model.PipelineState.create(
pipeline, pipeline.state)
pipeline.change_list = model.PipelineChangeList.create(
pipeline)
context = ZKContext(self.zk_client, None, None, self.log)
pipeline.change_list.refresh(context)
self.assertTrue(
self.zk_client.client.exists(pipeline.change_list.getPath()))
pipeline.manager = mock.Mock()
pipeline.state.refresh(context)
self.assertEqual(pipeline.state.layout_uuid, layout.uuid)
def test_pipeline_change_list_new_object_without_lock(self):
# Test the initialize-on-refresh code path if we don't have
# the lock. This should fail.
tenant = model.Tenant('tenant')
pipeline = model.Pipeline('gate', tenant)
layout = model.Layout(tenant)
tenant.layout = layout
pipeline.state = model.PipelineState.create(
pipeline, pipeline.state)
pipeline.change_list = model.PipelineChangeList.create(
pipeline)
context = ZKContext(self.zk_client, None, None, self.log)
with testtools.ExpectedException(NoNodeError):
pipeline.change_list.refresh(context, allow_init=False)
self.assertIsNone(
self.zk_client.client.exists(pipeline.change_list.getPath()))
pipeline.manager = mock.Mock()
pipeline.state.refresh(context)
self.assertEqual(pipeline.state.layout_uuid, layout.uuid)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_zk.py
|
test_zk.py
|
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import logging
import textwrap
import testtools
import voluptuous as vs
from collections import defaultdict
from configparser import ConfigParser
from zuul import model
from zuul.lib.ansible import AnsibleManager
from zuul.configloader import (
AuthorizationRuleParser, ConfigLoader, safe_load_yaml
)
from zuul.model import Abide, MergeRequest, SourceContext
from zuul.zk.locks import tenant_read_lock
from tests.base import iterate_timeout, ZuulTestCase, simple_layout
class TestConfigLoader(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def test_update_system_config(self):
"""Test if the system config can be updated without a scheduler."""
sched = self.scheds.first.sched
# Get the current system config before instantiating a ConfigLoader.
unparsed_abide, zuul_globals = sched.system_config_cache.get()
ansible_manager = AnsibleManager(
default_version=zuul_globals.default_ansible_version)
loader = ConfigLoader(
sched.connections, self.zk_client, zuul_globals, sched.statsd,
keystorage=sched.keystore)
abide = Abide()
loader.loadTPCs(abide, unparsed_abide)
loader.loadAuthzRules(abide, unparsed_abide)
for tenant_name in unparsed_abide.tenants:
tlock = tenant_read_lock(self.zk_client, tenant_name)
# Consider all caches valid (min. ltime -1)
min_ltimes = defaultdict(lambda: defaultdict(lambda: -1))
with tlock:
tenant = loader.loadTenant(
abide, tenant_name, ansible_manager, unparsed_abide,
min_ltimes=min_ltimes)
self.assertEqual(tenant.name, tenant_name)
class TenantParserTestCase(ZuulTestCase):
create_project_keys = True
CONFIG_SET = set(['pipeline', 'job', 'semaphore', 'project',
'project-template', 'nodeset', 'secret', 'queue'])
UNTRUSTED_SET = CONFIG_SET - set(['pipeline'])
def setupAllProjectKeys(self, config: ConfigParser):
for project in ['common-config', 'org/project1', 'org/project2']:
self.setupProjectKeys('gerrit', project)
class TestTenantSimple(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/simple.yaml'
def test_tenant_simple(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(['common-config'],
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2'],
[x.name for x in tenant.untrusted_projects])
project = tenant.config_projects[0]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.CONFIG_SET, tpc.load_classes)
project = tenant.untrusted_projects[0]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.UNTRUSTED_SET, tpc.load_classes)
project = tenant.untrusted_projects[1]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.UNTRUSTED_SET, tpc.load_classes)
self.assertTrue('common-config-job' in tenant.layout.jobs)
self.assertTrue('project1-job' in tenant.layout.jobs)
self.assertTrue('project2-job' in tenant.layout.jobs)
project1_config = tenant.layout.project_configs.get(
'review.example.com/org/project1')
self.assertTrue('common-config-job' in
project1_config[0].pipelines['check'].job_list.jobs)
self.assertTrue('project1-job' in
project1_config[1].pipelines['check'].job_list.jobs)
project2_config = tenant.layout.project_configs.get(
'review.example.com/org/project2')
self.assertTrue('common-config-job' in
project2_config[0].pipelines['check'].job_list.jobs)
self.assertTrue('project2-job' in
project2_config[1].pipelines['check'].job_list.jobs)
def test_cache(self):
# A full reconfiguration should issue cat jobs for all repos
with self.assertLogs('zuul.TenantParser', level='DEBUG') as full_logs:
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.log.debug("Full reconfigure logs:")
for x in full_logs.output:
self.log.debug(x)
self.assertRegexInList(
r'Submitting cat job (.*?) for gerrit common-config master',
full_logs.output)
self.assertRegexInList(
r'Submitting cat job (.*?) for gerrit org/project1 master',
full_logs.output)
self.assertRegexInList(
r'Submitting cat job (.*?) for gerrit org/project2 master',
full_logs.output)
self.assertRegexNotInList(
r'Using files from cache',
full_logs.output)
first = self.scheds.first
second = self.createScheduler()
second.start()
self.assertEqual(len(self.scheds), 2)
for _ in iterate_timeout(10, "until priming is complete"):
state_one = first.sched.local_layout_state.get("tenant-one")
if state_one:
break
for _ in iterate_timeout(
10, "all schedulers to have the same layout state"):
if (second.sched.local_layout_state.get(
"tenant-one") == state_one):
break
self.log.debug("Freeze scheduler-1")
# Start the log context manager for the update test below now,
# so that it's already in place when we release the second
# scheduler lock.
with self.assertLogs('zuul.TenantParser', level='DEBUG'
) as update_logs:
lock1 = second.sched.layout_update_lock
lock2 = second.sched.run_handler_lock
with lock1, lock2:
# A tenant reconfiguration should use the cache except for the
# updated project.
file_dict = {'zuul.d/test.yaml': ''}
# Now start a second log context manager just for the
# tenant reconfig test
with self.assertLogs('zuul.TenantParser', level='DEBUG') \
as tenant_logs:
A = self.fake_gerrit.addFakeChange(
'org/project1', 'master', 'A',
files=file_dict)
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled(matcher=[first])
self.log.debug("Tenant reconfigure logs:")
for x in tenant_logs.output:
self.log.debug(x)
self.assertRegexNotInList(
r'Submitting cat job (.*?) for '
r'gerrit common-config master',
tenant_logs.output)
self.assertRegexInList(
r'Submitting cat job (.*?) for '
r'gerrit org/project1 master',
tenant_logs.output)
self.assertRegexNotInList(
r'Submitting cat job (.*?) for '
r'gerrit org/project2 master',
tenant_logs.output)
self.assertRegexNotInList(
r'Using files from cache',
tenant_logs.output)
# A layout update should use the unparsed config cache
# except for what needs to be refreshed from the files
# cache in ZK.
self.log.debug("Thaw scheduler-1")
self.waitUntilSettled()
self.log.debug("Layout update logs:")
for x in update_logs.output:
self.log.debug(x)
self.assertRegexNotInList(
r'Submitting cat job',
update_logs.output)
self.assertRegexNotInList(
r'Using files from cache for project '
r'review.example.com/common-config @master.*',
update_logs.output)
self.assertRegexInList(
r'Using files from cache for project '
r'review.example.com/org/project1 @master.*',
update_logs.output)
self.assertRegexNotInList(
r'Using files from cache for project '
r'review.example.com/org/project2 @master.*',
update_logs.output)
def test_cache_new_branch(self):
first = self.scheds.first
lock1 = first.sched.layout_update_lock
lock2_ = first.sched.run_handler_lock
with lock1, lock2_:
self.create_branch('org/project1', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'stable'))
second = self.createScheduler()
second.start()
self.assertEqual(len(self.scheds), 2)
for _ in iterate_timeout(10, "until priming is complete"):
state_one = first.sched.local_layout_state.get("tenant-one")
if state_one:
break
for _ in iterate_timeout(
10, "all schedulers to have the same layout state"):
if (second.sched.local_layout_state.get(
"tenant-one") == state_one):
break
self.waitUntilSettled()
def test_variant_description(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
job = tenant.layout.jobs.get("project2-job")
self.assertEqual(job[0].variant_description, "")
self.assertEqual(job[1].variant_description, "stable")
def test_merge_anchor(self):
to_parse = textwrap.dedent(
"""
- job:
name: job1
vars: &docker_vars
registry: 'registry.example.org'
- job:
name: job2
vars:
<<: &buildenv_vars
image_name: foo
<<: *docker_vars
- job:
name: job3
vars:
<<: *buildenv_vars
<<: *docker_vars
""")
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
project = tenant.config_projects[0]
source_context = SourceContext(
project.canonical_name, project.name, project.connection_name,
'master', 'zuul.yaml', True)
data = safe_load_yaml(to_parse, source_context)
self.assertEqual(len(data), 3)
job_vars = [i['job']['vars'] for i in data]
# Test that merging worked
self.assertEqual(job_vars, [
{'registry': 'registry.example.org'},
{'registry': 'registry.example.org', 'image_name': 'foo'},
{'registry': 'registry.example.org', 'image_name': 'foo'},
])
def test_deny_localhost_nodeset(self):
in_repo_conf = textwrap.dedent(
"""
- nodeset:
name: localhost
nodes:
- name: localhost
label: ubuntu
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# No job should have run due to the change introducing a config error
self.assertHistory([])
self.assertTrue(A.reported)
self.assertTrue("Nodes named 'localhost' are not allowed."
in A.messages[0])
in_repo_conf = textwrap.dedent(
"""
- nodeset:
name: localhost-group
nodes:
- name: ubuntu
label: ubuntu
groups:
- name: localhost
nodes: ubuntu
""")
file_dict = {'zuul.yaml': in_repo_conf}
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# No job should have run due to the change introducing a config error
self.assertHistory([])
self.assertTrue(B.reported)
self.assertTrue("Groups named 'localhost' are not allowed."
in B.messages[0])
class TestTenantOverride(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/override.yaml'
def test_tenant_override(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(['common-config'],
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2', 'org/project4'],
[x.name for x in tenant.untrusted_projects])
project = tenant.config_projects[0]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.CONFIG_SET, tpc.load_classes)
project = tenant.untrusted_projects[0]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.UNTRUSTED_SET - set(['project']),
tpc.load_classes)
project = tenant.untrusted_projects[1]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(set(['job']), tpc.load_classes)
self.assertTrue('common-config-job' in tenant.layout.jobs)
self.assertTrue('project1-job' in tenant.layout.jobs)
self.assertTrue('project2-job' in tenant.layout.jobs)
project1_config = tenant.layout.project_configs.get(
'review.example.com/org/project1')
self.assertTrue('common-config-job' in
project1_config[0].pipelines['check'].job_list.jobs)
self.assertFalse('project1-job' in
project1_config[0].pipelines['check'].job_list.jobs)
project2_config = tenant.layout.project_configs.get(
'review.example.com/org/project2')
self.assertTrue('common-config-job' in
project2_config[0].pipelines['check'].job_list.jobs)
self.assertFalse('project2-job' in
project2_config[0].pipelines['check'].job_list.jobs)
class TestTenantGroups(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/groups.yaml'
def test_tenant_groups(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(['common-config'],
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2'],
[x.name for x in tenant.untrusted_projects])
project = tenant.config_projects[0]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.CONFIG_SET, tpc.load_classes)
project = tenant.untrusted_projects[0]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.UNTRUSTED_SET - set(['project']),
tpc.load_classes)
project = tenant.untrusted_projects[1]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.UNTRUSTED_SET - set(['project']),
tpc.load_classes)
self.assertTrue('common-config-job' in tenant.layout.jobs)
self.assertTrue('project1-job' in tenant.layout.jobs)
self.assertTrue('project2-job' in tenant.layout.jobs)
project1_config = tenant.layout.project_configs.get(
'review.example.com/org/project1')
self.assertTrue('common-config-job' in
project1_config[0].pipelines['check'].job_list.jobs)
self.assertFalse('project1-job' in
project1_config[0].pipelines['check'].job_list.jobs)
project2_config = tenant.layout.project_configs.get(
'review.example.com/org/project2')
self.assertTrue('common-config-job' in
project2_config[0].pipelines['check'].job_list.jobs)
self.assertFalse('project2-job' in
project2_config[0].pipelines['check'].job_list.jobs)
class TestTenantGroups2(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/groups2.yaml'
def test_tenant_groups2(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(['common-config'],
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2', 'org/project3'],
[x.name for x in tenant.untrusted_projects])
project = tenant.config_projects[0]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.CONFIG_SET, tpc.load_classes)
project = tenant.untrusted_projects[0]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.UNTRUSTED_SET - set(['project']),
tpc.load_classes)
project = tenant.untrusted_projects[1]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.UNTRUSTED_SET - set(['project', 'job']),
tpc.load_classes)
self.assertTrue('common-config-job' in tenant.layout.jobs)
self.assertTrue('project1-job' in tenant.layout.jobs)
self.assertFalse('project2-job' in tenant.layout.jobs)
project1_config = tenant.layout.project_configs.get(
'review.example.com/org/project1')
self.assertTrue('common-config-job' in
project1_config[0].pipelines['check'].job_list.jobs)
self.assertFalse('project1-job' in
project1_config[0].pipelines['check'].job_list.jobs)
project2_config = tenant.layout.project_configs.get(
'review.example.com/org/project2')
self.assertTrue('common-config-job' in
project2_config[0].pipelines['check'].job_list.jobs)
self.assertFalse('project2-job' in
project2_config[0].pipelines['check'].job_list.jobs)
class TestTenantGroups3(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/groups3.yaml'
def test_tenant_groups3(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(False, tenant.exclude_unprotected_branches)
self.assertEqual(['common-config'],
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2'],
[x.name for x in tenant.untrusted_projects])
project = tenant.config_projects[0]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.CONFIG_SET, tpc.load_classes)
project = tenant.untrusted_projects[0]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(set(['job']), tpc.load_classes)
project = tenant.untrusted_projects[1]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(set(['project', 'job']), tpc.load_classes)
self.assertTrue('common-config-job' in tenant.layout.jobs)
self.assertTrue('project1-job' in tenant.layout.jobs)
self.assertTrue('project2-job' in tenant.layout.jobs)
project1_config = tenant.layout.project_configs.get(
'review.example.com/org/project1')
self.assertTrue('common-config-job' in
project1_config[0].pipelines['check'].job_list.jobs)
self.assertFalse('project1-job' in
project1_config[0].pipelines['check'].job_list.jobs)
project2_config = tenant.layout.project_configs.get(
'review.example.com/org/project2')
self.assertTrue('common-config-job' in
project2_config[0].pipelines['check'].job_list.jobs)
self.assertTrue('project2-job' in
project2_config[1].pipelines['check'].job_list.jobs)
class TestTenantGroups4(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/groups4.yaml'
def test_tenant_groups(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(['common-config'],
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2'],
[x.name for x in tenant.untrusted_projects])
project = tenant.config_projects[0]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.CONFIG_SET, tpc.load_classes)
project = tenant.untrusted_projects[0]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(set([]),
tpc.load_classes)
project = tenant.untrusted_projects[1]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(set([]),
tpc.load_classes)
# Check that only one merger:cat job was requested
# org/project1 and org/project2 have an empty load_classes
self.assertEqual(1, len(self.merge_job_history.get(MergeRequest.CAT)))
old_layout = tenant.layout
# Check that creating a change in project1 doesn't cause a
# reconfiguration (due to a mistaken belief that we need to
# load config from it since there is none in memory).
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
new_layout = tenant.layout
self.assertEqual(old_layout, new_layout)
class TestTenantGroups5(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/groups5.yaml'
def test_tenant_single_projet_exclude(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(['common-config'],
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1'],
[x.name for x in tenant.untrusted_projects])
project = tenant.config_projects[0]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(self.CONFIG_SET, tpc.load_classes)
project = tenant.untrusted_projects[0]
tpc = tenant.project_configs[project.canonical_name]
self.assertEqual(set([]),
tpc.load_classes)
# Check that only one merger:cat job was requested
# org/project1 and org/project2 have an empty load_classes
self.assertEqual(1, len(self.merge_job_history.get(MergeRequest.CAT)))
class TestTenantFromScript(TestTenantSimple):
tenant_config_file = None
tenant_config_script_file = 'config/tenant-parser/tenant_config_script.py'
def test_tenant_simple(self):
TestTenantSimple.test_tenant_simple(self)
class TestTenantUnprotectedBranches(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/unprotected-branches.yaml'
def test_tenant_unprotected_branches(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(True, tenant.exclude_unprotected_branches)
self.assertEqual(['common-config'],
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2'],
[x.name for x in tenant.untrusted_projects])
tpc = tenant.project_configs
project_name = tenant.config_projects[0].canonical_name
self.assertEqual(False, tpc[project_name].exclude_unprotected_branches)
project_name = tenant.untrusted_projects[0].canonical_name
self.assertIsNone(tpc[project_name].exclude_unprotected_branches)
project_name = tenant.untrusted_projects[1].canonical_name
self.assertIsNone(tpc[project_name].exclude_unprotected_branches)
class TestTenantIncludeBranches(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/include-branches.yaml'
def test_tenant_branches(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(['common-config'],
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2'],
[x.name for x in tenant.untrusted_projects])
tpc = tenant.project_configs
project_name = tenant.config_projects[0].canonical_name
self.assertEqual(['master'], tpc[project_name].branches)
# No branches pass the filter at the start
project_name = tenant.untrusted_projects[0].canonical_name
self.assertEqual([], tpc[project_name].branches)
# Create the foo branch
self.create_branch('org/project1', 'foo')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'foo'))
self.waitUntilSettled()
# It should pass the filter
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
tpc = tenant.project_configs
project_name = tenant.untrusted_projects[0].canonical_name
self.assertEqual(['foo'], tpc[project_name].branches)
# Create the baz branch
self.create_branch('org/project1', 'baz')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project1', 'baz'))
self.waitUntilSettled()
# It should not pass the filter
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
tpc = tenant.project_configs
project_name = tenant.untrusted_projects[0].canonical_name
self.assertEqual(['foo'], tpc[project_name].branches)
class TestTenantExcludeBranches(TestTenantIncludeBranches):
tenant_config_file = 'config/tenant-parser/exclude-branches.yaml'
# Same test results as include-branches
class TestTenantExcludeIncludeBranches(TestTenantIncludeBranches):
tenant_config_file = 'config/tenant-parser/exclude-include-branches.yaml'
# Same test results as include-branches
class TestTenantExcludeAll(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/exclude-all.yaml'
def test_tenant_exclude_all(self):
"""
Tests that excluding all configuration of project1 in tenant-one
doesn't remove the configuration of project1 in tenant-two.
"""
# The config in org/project5 depends on config in org/project1 so
# validate that there are no config errors in that tenant.
tenant_two = self.scheds.first.sched.abide.tenants.get('tenant-two')
self.assertEquals(
len(tenant_two.layout.loading_errors), 0,
"No error should have been accumulated")
class TestTenantConfigBranches(ZuulTestCase):
tenant_config_file = 'config/tenant-parser/simple.yaml'
def _validate_job(self, job, branch):
tenant_one = self.scheds.first.sched.abide.tenants.get('tenant-one')
jobs = tenant_one.layout.getJobs(job)
self.assertEquals(len(jobs), 1)
self.assertIn(jobs[0].source_context.branch, branch)
def test_tenant_config_load_branch(self):
"""
Tests that when specifying branches for a project only those branches
are parsed.
"""
# Job must be defined in master
common_job = 'common-config-job'
self._validate_job(common_job, 'master')
self.log.debug('Creating branches')
self.create_branch('common-config', 'stable')
self.create_branch('common-config', 'feat_x')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'common-config', 'stable'))
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'common-config', 'feat_x'))
self.waitUntilSettled()
# Job must be defined in master
self._validate_job(common_job, 'master')
# Reconfigure with load-branch stable for common-config
self.newTenantConfig('config/tenant-parser/branch.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# Now job must be defined on stable branch
self._validate_job(common_job, 'stable')
# Now try to break the config in common-config on stable
in_repo_conf = textwrap.dedent(
"""
- job:
name: base
parent: non-existing
""")
file_dict = {'zuul.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('common-config', 'stable', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# No job should have run due to the change introducing a config error
self.assertHistory([])
self.assertTrue(A.reported)
self.assertTrue('Job non-existing not defined' in A.messages[0])
class TestSplitConfig(ZuulTestCase):
tenant_config_file = 'config/split-config/main.yaml'
def test_split_config(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertIn('project-test1', tenant.layout.jobs)
self.assertIn('project-test2', tenant.layout.jobs)
test1 = tenant.layout.getJob('project-test1')
self.assertEqual(test1.source_context.project_name, 'common-config')
self.assertEqual(test1.source_context.branch, 'master')
self.assertEqual(test1.source_context.path, 'zuul.d/jobs.yaml')
self.assertEqual(test1.source_context.trusted, True)
test2 = tenant.layout.getJob('project-test2')
self.assertEqual(test2.source_context.project_name, 'common-config')
self.assertEqual(test2.source_context.branch, 'master')
self.assertEqual(test2.source_context.path, 'zuul.d/more-jobs.yaml')
self.assertEqual(test2.source_context.trusted, True)
self.assertNotEqual(test1.source_context, test2.source_context)
self.assertTrue(test1.source_context.isSameProject(
test2.source_context))
project_config = tenant.layout.project_configs.get(
'review.example.com/org/project')
self.assertIn('project-test1',
project_config[0].pipelines['check'].job_list.jobs)
project1_config = tenant.layout.project_configs.get(
'review.example.com/org/project1')
self.assertIn('project1-project2-integration',
project1_config[0].pipelines['check'].job_list.jobs)
# This check ensures the .zuul.ignore flag file is working in
# the config directory.
self.assertEquals(
len(tenant.layout.loading_errors), 0)
def test_dynamic_split_config(self):
in_repo_conf = textwrap.dedent(
"""
- project:
name: org/project1
check:
jobs:
- project-test1
""")
file_dict = {'.zuul.d/gate.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# project1-project2-integration test removed, only want project-test1
self.assertHistory([
dict(name='project-test1', result='SUCCESS', changes='1,1')])
def test_config_path_conflict(self):
def add_file(project, path):
new_file = textwrap.dedent(
"""
- job:
name: test-job
"""
)
file_dict = {path: new_file}
A = self.fake_gerrit.addFakeChange(project, 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
return A
log_fixture = self.useFixture(
fixtures.FakeLogger(level=logging.WARNING))
log_fixture._output.truncate(0)
A = add_file("common-config", "zuul.yaml")
self.assertIn("Configuration in common-config/zuul.d/jobs.yaml@master "
"ignored because project-branch is already configured",
log_fixture.output)
self.assertIn("Configuration in common-config/zuul.d/jobs.yaml@master "
"ignored because project-branch is already configured",
A.messages[0])
log_fixture._output.truncate(0)
add_file("org/project1", ".zuul.yaml")
self.assertIn("Configuration in org/project1/.zuul.d/gate.yaml@master "
"ignored because project-branch is already configured",
log_fixture.output)
class TestConfigConflict(ZuulTestCase):
tenant_config_file = 'config/conflict-config/main.yaml'
def test_conflict_config(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
jobs = sorted(tenant.layout.jobs.keys())
self.assertEqual(
['base', 'noop', 'trusted-zuul.yaml-job',
'untrusted-zuul.yaml-job'],
jobs)
class TestUnparsedConfigCache(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def test_config_caching(self):
sched = self.scheds.first.sched
cache = sched.unparsed_config_cache
tenant = sched.abide.tenants["tenant-one"]
common_cache = cache.getFilesCache("review.example.com/common-config",
"master")
upb_common_cache = sched.abide.getUnparsedBranchCache(
"review.example.com/common-config", "master")
tpc = tenant.project_configs["review.example.com/common-config"]
self.assertTrue(common_cache.isValidFor(tpc, min_ltime=-1))
self.assertEqual(len(common_cache), 1)
self.assertIn("zuul.yaml", common_cache)
self.assertTrue(len(common_cache["zuul.yaml"]) > 0)
self.assertEqual(upb_common_cache.ltime, common_cache.ltime)
project_cache = cache.getFilesCache("review.example.com/org/project",
"master")
upb_project_cache = sched.abide.getUnparsedBranchCache(
"review.example.com/org/project", "master")
# Cache of org/project should be valid but empty (no in-repo config)
tpc = tenant.project_configs["review.example.com/org/project"]
self.assertTrue(project_cache.isValidFor(tpc, min_ltime=-1))
self.assertEqual(len(project_cache), 0)
self.assertEqual(upb_project_cache.ltime, project_cache.ltime)
def test_cache_use(self):
sched = self.scheds.first.sched
# Stop cleanup jobs so it's not removing projects from
# the cache during the test.
sched.apsched.shutdown()
tenant = sched.abide.tenants['tenant-one']
_, project = tenant.getProject('org/project2')
cache = self.scheds.first.sched.unparsed_config_cache
files_cache = cache.getFilesCache(
"review.example.com/org/project2", "master")
zk_initial_ltime = files_cache.ltime
upb_cache = sched.abide.getUnparsedBranchCache(
"review.example.com/org/project2", "master")
self.assertEqual(zk_initial_ltime, upb_cache.ltime)
# Get the current ltime from Zookeeper and run a full reconfiguration,
# so that we know all items in the cache have a larger ltime.
ltime = self.zk_client.getCurrentLtime()
self.scheds.first.fullReconfigure()
self.assertGreater(files_cache.ltime, zk_initial_ltime)
upb_cache = sched.abide.getUnparsedBranchCache(
"review.example.com/org/project2", "master")
self.assertEqual(files_cache.ltime, upb_cache.ltime)
# Clear the unparsed branch cache so all projects (except for
# org/project2) are retrieved from the cache in Zookeeper.
sched.abide.unparsed_project_branch_cache.clear()
del self.merge_job_history
# Create a tenant reconfiguration event with a known ltime that is
# smaller than the ltime of the items in the cache.
event = model.TenantReconfigureEvent(
tenant.name, project.canonical_name, branch_name=None)
event.zuul_event_ltime = ltime
sched.management_events[tenant.name].put(event, needs_result=False)
self.waitUntilSettled()
# As the cache should be valid (cache ltime of org/project2 newer than
# event ltime) we don't expect any cat jobs.
self.assertIsNone(self.merge_job_history.get(MergeRequest.CAT))
# Set canary value so we can detect if the configloader used
# the cache in Zookeeper (it shouldn't).
common_cache = cache.getFilesCache("review.example.com/common-config",
"master")
common_cache.setValidFor({"CANARY"}, set(), common_cache.ltime)
del self.merge_job_history
# Create a tenant reconfiguration event with a known ltime that is
# smaller than the ltime of the items in the cache.
event = model.TenantReconfigureEvent(
tenant.name, project.canonical_name, branch_name=None)
event.zuul_event_ltime = ltime
sched.management_events[tenant.name].put(event, needs_result=False)
self.waitUntilSettled()
upb_cache = sched.abide.getUnparsedBranchCache(
"review.example.com/common-config", "master")
self.assertEqual(common_cache.ltime, upb_cache.ltime)
self.assertNotIn("CANARY", upb_cache.extra_files_searched)
# As the cache should be valid (cache ltime of org/project2 newer than
# event ltime) we don't expect any cat jobs.
self.assertIsNone(self.merge_job_history.get(MergeRequest.CAT))
sched.apsched.start()
class TestAuthorizationRuleParser(ZuulTestCase):
tenant_config_file = 'config/tenant-parser/authorizations.yaml'
def test_rules_are_loaded(self):
rules = self.scheds.first.sched.abide.authz_rules
self.assertTrue('auth-rule-one' in rules,
self.scheds.first.sched.abide)
self.assertTrue('auth-rule-two' in rules,
self.scheds.first.sched.abide)
claims_1 = {'sub': 'venkman'}
claims_2 = {'sub': 'gozer',
'iss': 'another_dimension'}
self.assertTrue(rules['auth-rule-one'](claims_1))
self.assertTrue(not rules['auth-rule-one'](claims_2))
self.assertTrue(not rules['auth-rule-two'](claims_1))
self.assertTrue(rules['auth-rule-two'](claims_2))
def test_parse_simplest_rule_from_yaml(self):
rule_d = {'name': 'my-rule',
'conditions': {'sub': 'user1'}
}
rule = AuthorizationRuleParser().fromYaml(rule_d)
self.assertEqual('my-rule', rule.name)
claims = {'iss': 'my-idp',
'sub': 'user1',
'groups': ['admin', 'ghostbusters']}
self.assertTrue(rule(claims))
claims = {'iss': 'my-2nd-idp',
'sub': 'user2',
'groups': ['admin', 'ghostbusters']}
self.assertFalse(rule(claims))
def test_parse_AND_rule_from_yaml(self):
rule_d = {'name': 'my-rule',
'conditions': {'sub': 'user1',
'iss': 'my-idp'}
}
rule = AuthorizationRuleParser().fromYaml(rule_d)
self.assertEqual('my-rule', rule.name)
claims = {'iss': 'my-idp',
'sub': 'user1',
'groups': ['admin', 'ghostbusters']}
self.assertTrue(rule(claims))
claims = {'iss': 'my-2nd-idp',
'sub': 'user1',
'groups': ['admin', 'ghostbusters']}
self.assertFalse(rule(claims))
def test_parse_OR_rule_from_yaml(self):
rule_d = {'name': 'my-rule',
'conditions': [{'sub': 'user1',
'iss': 'my-idp'},
{'sub': 'user2',
'iss': 'my-2nd-idp'}
]
}
rule = AuthorizationRuleParser().fromYaml(rule_d)
self.assertEqual('my-rule', rule.name)
claims = {'iss': 'my-idp',
'sub': 'user1',
'groups': ['admin', 'ghostbusters']}
self.assertTrue(rule(claims))
claims = {'iss': 'my-2nd-idp',
'sub': 'user1',
'groups': ['admin', 'ghostbusters']}
self.assertFalse(rule(claims))
claims = {'iss': 'my-2nd-idp',
'sub': 'user2',
'groups': ['admin', 'ghostbusters']}
self.assertTrue(rule(claims))
def test_parse_rule_with_list_claim_from_yaml(self):
rule_d = {'name': 'my-rule',
'conditions': [{'groups': 'ghostbusters',
'iss': 'my-idp'},
{'sub': 'user2',
'iss': 'my-2nd-idp'}
],
}
rule = AuthorizationRuleParser().fromYaml(rule_d)
self.assertEqual('my-rule', rule.name)
claims = {'iss': 'my-idp',
'sub': 'user1',
'groups': ['admin', 'ghostbusters']}
self.assertTrue(rule(claims))
claims = {'iss': 'my-idp',
'sub': 'user1',
'groups': ['admin', 'ghostbeaters']}
self.assertFalse(rule(claims))
claims = {'iss': 'my-2nd-idp',
'sub': 'user2',
'groups': ['admin', 'ghostbusters']}
self.assertTrue(rule(claims))
def test_check_complex_rule_from_yaml_jsonpath(self):
rule_d = {'name': 'my-rule',
'conditions': [{'hello.this.is': 'a complex value'},
],
}
rule = AuthorizationRuleParser().fromYaml(rule_d)
self.assertEqual('my-rule', rule.name)
claims = {'iss': 'my-idp',
'hello': {
'this': {
'is': 'a complex value'
},
'and': {
'this one': 'too'
}
}
}
self.assertTrue(rule(claims))
def test_check_complex_rule_from_yaml_nested_dict(self):
rule_d = {'name': 'my-rule',
'conditions': [{'hello': {'this': {'is': 'a complex value'
}
}
},
],
}
rule = AuthorizationRuleParser().fromYaml(rule_d)
self.assertEqual('my-rule', rule.name)
claims = {'iss': 'my-idp',
'hello': {
'this': {
'is': 'a complex value'
},
'and': {
'this one': 'too'
}
}
}
self.assertTrue(rule(claims))
class TestAuthorizationRuleParserWithTemplating(ZuulTestCase):
tenant_config_file = 'config/tenant-parser/authorizations-templating.yaml'
def test_rules_are_loaded(self):
rules = self.scheds.first.sched.abide.authz_rules
self.assertTrue('tenant-admin' in rules, self.scheds.first.sched.abide)
self.assertTrue('tenant-admin-complex' in rules,
self.scheds.first.sched.abide)
def test_tenant_substitution(self):
claims_1 = {'group': 'tenant-one-admin'}
claims_2 = {'group': 'tenant-two-admin'}
rules = self.scheds.first.sched.abide.authz_rules
tenant_one = self.scheds.first.sched.abide.tenants.get('tenant-one')
tenant_two = self.scheds.first.sched.abide.tenants.get('tenant-two')
self.assertTrue(rules['tenant-admin'](claims_1, tenant_one))
self.assertTrue(rules['tenant-admin'](claims_2, tenant_two))
self.assertTrue(not rules['tenant-admin'](claims_1, tenant_two))
self.assertTrue(not rules['tenant-admin'](claims_2, tenant_one))
def test_tenant_substitution_in_list(self):
claims_1 = {'group': ['tenant-one-admin', 'some-other-tenant']}
claims_2 = {'group': ['tenant-two-admin', 'some-other-tenant']}
rules = self.scheds.first.sched.abide.authz_rules
tenant_one = self.scheds.first.sched.abide.tenants.get('tenant-one')
tenant_two = self.scheds.first.sched.abide.tenants.get('tenant-two')
self.assertTrue(rules['tenant-admin'](claims_1, tenant_one))
self.assertTrue(rules['tenant-admin'](claims_2, tenant_two))
self.assertTrue(not rules['tenant-admin'](claims_1, tenant_two))
self.assertTrue(not rules['tenant-admin'](claims_2, tenant_one))
def test_tenant_substitution_in_dict(self):
claims_2 = {
'path': {
'to': {
'group': 'tenant-two-admin'
}
}
}
rules = self.scheds.first.sched.abide.authz_rules
tenant_one = self.scheds.first.sched.abide.tenants.get('tenant-one')
tenant_two = self.scheds.first.sched.abide.tenants.get('tenant-two')
self.assertTrue(not rules['tenant-admin-complex'](claims_2,
tenant_one))
self.assertTrue(rules['tenant-admin-complex'](claims_2, tenant_two))
class TestTenantExtra(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/extra.yaml'
def test_tenant_extra(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertTrue('project2-extra-file' in tenant.layout.jobs)
self.assertTrue('project2-extra-dir' in tenant.layout.jobs)
self.assertTrue('project6-extra-dir' in tenant.layout.jobs)
def test_dynamic_extra(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: project2-extra-file2
parent: common-config-job
- project:
name: org/project2
check:
jobs:
- project2-extra-file2
""")
file_dict = {'extra.yaml': in_repo_conf, '.zuul.yaml': ''}
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='common-config-job', result='SUCCESS', changes='1,1'),
dict(name='project2-extra-file2', result='SUCCESS', changes='1,1'),
], ordered=False)
def test_dynamic_extra_dir(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: project6-extra-dir2
parent: common-config-job
- project:
check:
jobs:
- project6-extra-dir
- project6-extra-dir2
""")
file_dict = {
'other/extra.d/new/extra.yaml': in_repo_conf,
}
A = self.fake_gerrit.addFakeChange('org/project6', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='project6-extra-dir', result='SUCCESS', changes='1,1'),
dict(name='project6-extra-dir2', result='SUCCESS', changes='1,1'),
], ordered=False)
def test_extra_reconfigure(self):
in_repo_conf = textwrap.dedent(
"""
- job:
name: project2-extra-file2
parent: common-config-job
- project:
name: org/project2
check:
jobs:
- project2-extra-file2
""")
file_dict = {'extra.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A',
files=file_dict)
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
self.fake_gerrit.addEvent(A.getRefUpdatedEvent())
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='common-config-job', result='SUCCESS', changes='2,1'),
dict(name='project2-job', result='SUCCESS', changes='2,1'),
dict(name='project2-extra-file2', result='SUCCESS', changes='2,1'),
], ordered=False)
class TestTenantExtraConfigsInvalidType(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/extra_invalid_type.yaml'
# This test raises a config error during the startup of the test
# case which makes the first scheduler fail during its startup.
# The second (or any additional) scheduler won't even run as the
# startup is serialized in tests/base.py.
# Thus it doesn't make sense to execute this test with multiple
# schedulers.
scheduler_count = 1
def setUp(self):
err = "Expected str or list of str for extra-config-paths.*"
with testtools.ExpectedException(vs.MultipleInvalid, err):
super().setUp()
def test_tenant_extra_configs_invalid_type(self):
# The magic is in setUp
pass
class TestTenantExtraConfigsInvalidValue(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/extra_invalid_value.yaml'
# This test raises a config error during the startup of the test
# case which makes the first scheduler fail during its startup.
# The second (or any additional) scheduler won't even run as the
# startup is serialized in tests/base.py.
# Thus it doesn't make sense to execute this test with multiple
# schedulers.
scheduler_count = 1
def setUp(self):
err = "Default zuul configs are not allowed in extra-config-paths.*"
with testtools.ExpectedException(vs.MultipleInvalid, err):
super().setUp()
def test_tenant_extra_configs_invalid_value(self):
# The magic is in setUp
pass
class TestTenantDuplicate(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/duplicate.yaml'
# This test raises a config error during the startup of the test
# case which makes the first scheduler fail during its startup.
# The second (or any additional) scheduler won't even run as the
# startup is serialized in tests/base.py.
# Thus it doesn't make sense to execute this test with multiple
# schedulers.
scheduler_count = 1
def setUp(self):
with testtools.ExpectedException(Exception, 'Duplicate configuration'):
super().setUp()
def test_tenant_dupe(self):
# The magic is in setUp
pass
class TestMergeMode(ZuulTestCase):
config_file = 'zuul-connections-gerrit-and-github.conf'
def _test_default_merge_mode(self, driver_default, host):
layout = self.scheds.first.sched.abide.tenants.get('tenant-one').layout
md = layout.getProjectMetadata(
f'{host}/org/project-empty')
self.assertEqual(driver_default, md.merge_mode)
md = layout.getProjectMetadata(
f'{host}/org/regex-empty-project-empty')
self.assertEqual(driver_default, md.merge_mode)
md = layout.getProjectMetadata(
f'{host}/org/regex-empty-project-squash')
self.assertEqual(model.MERGER_SQUASH_MERGE, md.merge_mode)
md = layout.getProjectMetadata(
f'{host}/org/regex-cherry-project-empty')
self.assertEqual(model.MERGER_CHERRY_PICK, md.merge_mode)
md = layout.getProjectMetadata(
f'{host}/org/regex-cherry-project-squash')
self.assertEqual(model.MERGER_SQUASH_MERGE, md.merge_mode)
@simple_layout('layouts/merge-mode-default.yaml')
def test_default_merge_mode_gerrit(self):
self._test_default_merge_mode(model.MERGER_MERGE_RESOLVE,
'review.example.com')
@simple_layout('layouts/merge-mode-default.yaml', driver='github')
def test_default_merge_mode_github(self):
self._test_default_merge_mode(model.MERGER_MERGE,
'github.com')
class TestDefaultBranch(ZuulTestCase):
config_file = 'zuul-connections-gerrit-and-github.conf'
@simple_layout('layouts/default-branch.yaml')
def test_default_branch(self):
layout = self.scheds.first.sched.abide.tenants.get('tenant-one').layout
md = layout.getProjectMetadata(
'review.example.com/org/project-default')
self.assertEqual('master', md.default_branch)
md = layout.getProjectMetadata(
'review.example.com/org/regex-default-project-empty')
self.assertEqual('master', md.default_branch)
md = layout.getProjectMetadata(
'review.example.com/org/regex-default-project-develop')
self.assertEqual('develop', md.default_branch)
md = layout.getProjectMetadata(
'review.example.com/org/regex-override-project-empty')
self.assertEqual('regex', md.default_branch)
md = layout.getProjectMetadata(
'review.example.com/org/regex-override-project-develop')
self.assertEqual('develop', md.default_branch)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_configloader.py
|
test_configloader.py
|
# Copyright 2021 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import subprocess
from zuul.driver.sql import SQLDriver
from zuul.zk import ZooKeeperClient
from tests.base import (
BaseTestCase, MySQLSchemaFixture, PostgresqlSchemaFixture
)
class DBBaseTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.setupZK()
self.zk_client = ZooKeeperClient(
self.zk_chroot_fixture.zk_hosts,
tls_cert=self.zk_chroot_fixture.zookeeper_cert,
tls_key=self.zk_chroot_fixture.zookeeper_key,
tls_ca=self.zk_chroot_fixture.zookeeper_ca
)
self.addCleanup(self.zk_client.disconnect)
self.zk_client.connect()
class TestMysqlDatabase(DBBaseTestCase):
def setUp(self):
super().setUp()
f = MySQLSchemaFixture()
self.useFixture(f)
config = dict(dburi=f.dburi)
driver = SQLDriver()
self.connection = driver.getConnection('database', config)
self.connection.onLoad(self.zk_client)
self.addCleanup(self._cleanup)
def _cleanup(self):
self.connection.onStop()
def compareMysql(self, alembic_text, sqlalchemy_text):
alembic_lines = alembic_text.split('\n')
sqlalchemy_lines = sqlalchemy_text.split('\n')
self.assertEqual(len(alembic_lines), len(sqlalchemy_lines))
alembic_constraints = []
sqlalchemy_constraints = []
for i in range(len(alembic_lines)):
if alembic_lines[i].startswith(" `"):
# Column
self.assertEqual(alembic_lines[i], sqlalchemy_lines[i])
elif alembic_lines[i].startswith(" "):
# Constraints can be unordered
# strip trailing commas since the last line omits it
alembic_constraints.append(
re.sub(',$', '', alembic_lines[i]))
sqlalchemy_constraints.append(
re.sub(',$', '', sqlalchemy_lines[i]))
else:
self.assertEqual(alembic_lines[i], sqlalchemy_lines[i])
alembic_constraints.sort()
sqlalchemy_constraints.sort()
self.assertEqual(alembic_constraints, sqlalchemy_constraints)
def test_migration(self):
# Test that SQLAlchemy create_all produces the same output as
# a full migration run.
sqlalchemy_tables = {}
with self.connection.engine.begin() as connection:
connection.exec_driver_sql("set foreign_key_checks=0")
for table in connection.exec_driver_sql("show tables"):
table = table[0]
sqlalchemy_tables[table] = connection.exec_driver_sql(
f"show create table {table}").one()[1]
connection.exec_driver_sql(f"drop table {table}")
connection.exec_driver_sql("set foreign_key_checks=1")
self.connection.force_migrations = True
self.connection.onLoad(self.zk_client)
with self.connection.engine.begin() as connection:
for table in connection.exec_driver_sql("show tables"):
table = table[0]
create = connection.exec_driver_sql(
f"show create table {table}").one()[1]
self.compareMysql(create, sqlalchemy_tables[table])
def test_migration_4647def24b32(self):
with self.connection.engine.begin() as connection:
connection.exec_driver_sql("set foreign_key_checks=0")
for table in connection.exec_driver_sql("show tables"):
table = table[0]
connection.exec_driver_sql(f"drop table {table}")
connection.exec_driver_sql("set foreign_key_checks=1")
self.connection.force_migrations = True
self.connection._migrate('c57e9e76b812')
with self.connection.engine.begin() as connection:
connection.exec_driver_sql(
"insert into zuul_buildset (result) values ('SUCCESS')")
connection.exec_driver_sql(
"insert into zuul_buildset (result) values ('MERGER_FAILURE')")
results = [r[0] for r in connection.exec_driver_sql(
"select result from zuul_buildset")]
self.assertEqual(results, ['SUCCESS', 'MERGER_FAILURE'])
self.connection._migrate()
with self.connection.engine.begin() as connection:
results = [r[0] for r in connection.exec_driver_sql(
"select result from zuul_buildset")]
self.assertEqual(results, ['SUCCESS', 'MERGE_CONFLICT'])
def test_migration_c7467b642498(self):
with self.connection.engine.begin() as connection:
connection.exec_driver_sql("set foreign_key_checks=0")
for table in connection.exec_driver_sql("show tables"):
table = table[0]
connection.exec_driver_sql(f"drop table {table}")
connection.exec_driver_sql("set foreign_key_checks=1")
self.connection.force_migrations = True
self.connection._migrate('4647def24b32')
with self.connection.engine.begin() as connection:
connection.exec_driver_sql(
"insert into zuul_buildset (result) values ('SUCCESS')")
connection.exec_driver_sql(
"insert into zuul_buildset (result, first_build_start_time) "
"values ('SUCCESS', '2022-05-01 12:34:56')")
connection.exec_driver_sql(
"insert into zuul_buildset (result, last_build_end_time) "
"values ('SUCCESS', '2022-05-02 12:34:56')")
connection.exec_driver_sql(
"insert into zuul_buildset (result, event_timestamp) "
"values ('SUCCESS', '2022-05-03 12:34:56')")
connection.exec_driver_sql(
"insert into zuul_buildset (result, "
"first_build_start_time, "
"last_build_end_time, "
"event_timestamp)"
"values ('SUCCESS', "
"'2022-05-11 12:34:56', "
"'2022-05-12 12:34:56', "
"'2022-05-13 12:34:56')")
self.connection._migrate()
with self.connection.engine.begin() as connection:
results = [str(r[0]) for r in connection.exec_driver_sql(
"select updated from zuul_buildset")]
self.assertEqual(results,
['1970-01-01 00:00:00',
'2022-05-01 12:34:56',
'2022-05-02 12:34:56',
'2022-05-03 12:34:56',
'2022-05-13 12:34:56'])
def test_buildsets(self):
tenant = 'tenant1',
buildset_uuid = 'deadbeef'
change = 1234
buildset_args = dict(
uuid=buildset_uuid,
tenant=tenant,
pipeline='check',
project='project',
change=change,
patchset='1',
ref='',
oldrev='',
newrev='',
branch='master',
zuul_ref='Zdeadbeef',
ref_url='http://example.com/1234',
event_id='eventid',
)
# Create the buildset entry (driver-internal interface)
with self.connection.getSession() as db:
db.createBuildSet(**buildset_args)
# Verify that worked using the driver-external interface
self.assertEqual(len(self.connection.getBuildsets()), 1)
self.assertEqual(self.connection.getBuildsets()[0].uuid, buildset_uuid)
# Update the buildset using the internal interface
with self.connection.getSession() as db:
db_buildset = db.getBuildset(tenant=tenant, uuid=buildset_uuid)
self.assertEqual(db_buildset.change, change)
db_buildset.result = 'SUCCESS'
# Verify that worked
db_buildset = self.connection.getBuildset(
tenant=tenant, uuid=buildset_uuid)
self.assertEqual(db_buildset.result, 'SUCCESS')
class TestPostgresqlDatabase(DBBaseTestCase):
def setUp(self):
super().setUp()
f = PostgresqlSchemaFixture()
self.useFixture(f)
self.db = f
config = dict(dburi=f.dburi)
driver = SQLDriver()
self.connection = driver.getConnection('database', config)
self.connection.onLoad(self.zk_client)
self.addCleanup(self._cleanup)
def _cleanup(self):
self.connection.onStop()
def test_migration(self):
# Test that SQLAlchemy create_all produces the same output as
# a full migration run.
sqlalchemy_out = subprocess.check_output(
f"pg_dump -h {self.db.host} -U {self.db.name} -s {self.db.name}",
shell=True,
env={'PGPASSWORD': self.db.passwd}
)
with self.connection.engine.begin() as connection:
tables = [x[0] for x in connection.exec_driver_sql(
"select tablename from pg_catalog.pg_tables "
"where schemaname='public'"
).all()]
self.assertTrue(len(tables) > 0)
for table in tables:
connection.exec_driver_sql(f"drop table {table} cascade")
self.connection.force_migrations = True
self.connection.onLoad(self.zk_client)
alembic_out = subprocess.check_output(
f"pg_dump -h {self.db.host} -U {self.db.name} -s {self.db.name}",
shell=True,
env={'PGPASSWORD': self.db.passwd}
)
self.assertEqual(alembic_out, sqlalchemy_out)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_database.py
|
test_database.py
|
# Copyright 2020 Antoine Musso
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from unittest import mock
from tests.base import BaseTestCase
from zuul.lib.ansible import AnsibleManager
class TestLibAnsibleManager(BaseTestCase):
@mock.patch('zuul.lib.ansible.AnsibleManager.load_ansible_config')
@mock.patch('zuul.lib.ansible.AnsibleManager._validate_packages')
@mock.patch('zuul.lib.ansible.AnsibleManager._validate_ansible')
def test_validate_remembers_failures(self,
mock_validate_ansible,
mock_validate_packages, _):
okish = mock.Mock(
'subprocess.CompletedProcess',
returncode=0, stdout=b'Some valid ansible infos\n')
okish.returncode
am = AnsibleManager()
am._supported_versions = collections.OrderedDict([
('1.0', False),
('2.8', True),
])
mock_validate_packages.side_effect = am._supported_versions.values()
mock_validate_ansible.side_effect = am._supported_versions.values()
self.assertFalse(
am.validate(),
'A valid ansible should not mask a previous failure')
self.assertEquals(
[mock.call('1.0'),
mock.call('2.8')
],
mock_validate_ansible.mock_calls)
self.assertEquals(
[mock.call('2.8')],
mock_validate_packages.mock_calls)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_lib_ansible.py
|
test_lib_ansible.py
|
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import configparser
import io
import logging
import json
import os
import os.path
import re
import socket
import ssl
import tempfile
import testtools
import threading
import time
import zuul.web
import zuul.lib.log_streamer
from zuul.lib.fingergw import FingerGateway
from zuul.lib.statsd import normalize_statsd_name
import tests.base
from tests.base import iterate_timeout, ZuulWebFixture, FIXTURE_DIR
from ws4py.client import WebSocketBaseClient
class WSClient(WebSocketBaseClient):
def __init__(self, port, build_uuid):
self.port = port
self.build_uuid = build_uuid
self.results = ''
self.event = threading.Event()
uri = 'ws://[::1]:%s/api/tenant/tenant-one/console-stream' % port
super(WSClient, self).__init__(uri)
self.thread = threading.Thread(target=self.run)
self.thread.start()
def received_message(self, message):
if message.is_text:
self.results += message.data.decode('utf-8')
def run(self):
self.connect()
req = {'uuid': self.build_uuid, 'logfile': None}
self.send(json.dumps(req))
self.event.set()
super(WSClient, self).run()
self.close()
class TestLogStreamer(tests.base.BaseTestCase):
def startStreamer(self, host, port, root=None):
self.host = host
if not root:
root = tempfile.gettempdir()
return zuul.lib.log_streamer.LogStreamer(self.host, port, root)
def test_start_stop_ipv6(self):
streamer = self.startStreamer('::1', 0)
self.addCleanup(streamer.stop)
port = streamer.server.socket.getsockname()[1]
s = socket.create_connection((self.host, port))
s.close()
streamer.stop()
with testtools.ExpectedException(ConnectionRefusedError):
s = socket.create_connection((self.host, port))
s.close()
def test_start_stop_ipv4(self):
streamer = self.startStreamer('127.0.0.1', 0)
self.addCleanup(streamer.stop)
port = streamer.server.socket.getsockname()[1]
s = socket.create_connection((self.host, port))
s.close()
streamer.stop()
with testtools.ExpectedException(ConnectionRefusedError):
s = socket.create_connection((self.host, port))
s.close()
class TestStreamingBase(tests.base.AnsibleZuulTestCase):
tenant_config_file = 'config/streamer/main.yaml'
log = logging.getLogger("zuul.test_streaming")
fingergw_use_ssl = False
def setUp(self):
super().setUp()
self.host = '::'
self.streamer = None
self.stop_streamer = False
self.streaming_data = {}
self.test_streaming_event = threading.Event()
def stopStreamer(self):
self.stop_streamer = True
def startStreamer(self, port, build_uuid, root=None):
if not root:
root = tempfile.gettempdir()
self.streamer = zuul.lib.log_streamer.LogStreamer(self.host,
port, root)
port = self.streamer.server.socket.getsockname()[1]
s = socket.create_connection((self.host, port))
self.addCleanup(s.close)
req = '%s\r\n' % build_uuid
s.sendall(req.encode('utf-8'))
self.test_streaming_event.set()
self.streaming_data.setdefault(None, '')
while not self.stop_streamer:
data = s.recv(2048)
if not data:
break
self.streaming_data[None] += data.decode('utf-8')
s.shutdown(socket.SHUT_RDWR)
s.close()
self.streamer.stop()
def _readSocket(self, sock, build_uuid, event, name):
msg = "%s\r\n" % build_uuid
sock.sendall(msg.encode('utf-8'))
event.set() # notify we are connected and req sent
while True:
data = sock.recv(1024)
if not data:
break
self.streaming_data[name] += data.decode('utf-8')
sock.shutdown(socket.SHUT_RDWR)
def runFingerClient(self, build_uuid, gateway_address, event, name=None):
# Wait until the gateway is started
for x in iterate_timeout(30, "finger client to start"):
try:
# NOTE(Shrews): This causes the gateway to begin to handle
# a request for which it never receives data, and thus
# causes the getCommand() method to timeout (seen in the
# test results, but is harmless).
with socket.create_connection(gateway_address) as s:
break
except ConnectionRefusedError:
pass
self.streaming_data[name] = ''
with socket.create_connection(gateway_address) as s:
if self.fingergw_use_ssl:
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = False
context.load_cert_chain(
os.path.join(FIXTURE_DIR, 'fingergw/fingergw.pem'),
os.path.join(FIXTURE_DIR, 'fingergw/fingergw.key'))
context.load_verify_locations(
os.path.join(FIXTURE_DIR, 'fingergw/root-ca.pem'))
with context.wrap_socket(s) as s:
self._readSocket(s, build_uuid, event, name)
else:
self._readSocket(s, build_uuid, event, name)
def runFingerGateway(self, zone=None):
self.log.info('Starting fingergw with zone %s', zone)
config = configparser.ConfigParser()
config.read_dict(self.config)
config.read_dict({
'fingergw': {
'listen_address': self.host,
'port': '0',
'hostname': 'localhost',
}
})
if zone:
config.set('fingergw', 'zone', zone)
if self.fingergw_use_ssl:
self.log.info('SSL enabled for fingergw')
config.set('fingergw', 'tls_ca',
os.path.join(FIXTURE_DIR, 'fingergw/root-ca.pem'))
config.set('fingergw', 'tls_cert',
os.path.join(FIXTURE_DIR, 'fingergw/fingergw.pem'))
config.set('fingergw', 'tls_key',
os.path.join(FIXTURE_DIR, 'fingergw/fingergw.key'))
config.set('fingergw', 'tls_verify_hostnames', 'False')
gateway = FingerGateway(
config,
command_socket=None,
pid_file=None
)
gateway.history = []
gateway.start()
self.addCleanup(gateway.stop)
if zone:
for _ in iterate_timeout(20, 'fingergw is registered'):
found = False
for gw in self.scheds.first.sched.component_registry.\
all('fingergw'):
if gw.zone == zone:
found = True
break
if found:
break
gateway_port = gateway.server.socket.getsockname()[1]
return gateway, (self.host, gateway_port)
class TestStreaming(TestStreamingBase):
def test_streaming(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
# We don't have any real synchronization for the ansible jobs, so
# just wait until we get our running build.
for x in iterate_timeout(30, "builds"):
if len(self.builds):
break
build = self.builds[0]
self.assertEqual(build.name, 'python27')
build_dir = os.path.join(self.executor_server.jobdir_root, build.uuid)
for x in iterate_timeout(30, "build dir"):
if os.path.exists(build_dir):
break
# Need to wait to make sure that jobdir gets set
for x in iterate_timeout(30, "jobdir"):
if build.jobdir is not None:
break
build = self.builds[0]
# Wait for the job to begin running and create the ansible log file.
# The job waits to complete until the flag file exists, so we can
# safely access the log here. We only open it (to force a file handle
# to be kept open for it after the job finishes) but wait to read the
# contents until the job is done.
ansible_log = os.path.join(build.jobdir.log_root, 'job-output.txt')
for x in iterate_timeout(30, "ansible log"):
if os.path.exists(ansible_log):
break
logfile = open(ansible_log, 'r')
self.addCleanup(logfile.close)
# Create a thread to stream the log. We need this to be happening
# before we create the flag file to tell the job to complete.
streamer_thread = threading.Thread(
target=self.startStreamer,
args=(0, build.uuid, self.executor_server.jobdir_root,)
)
streamer_thread.start()
self.addCleanup(self.stopStreamer)
self.test_streaming_event.wait()
# Allow the job to complete, which should close the streaming
# connection (and terminate the thread) as well since the log file
# gets closed/deleted.
flag_file = os.path.join(build_dir, 'test_wait')
open(flag_file, 'w').close()
self.waitUntilSettled()
streamer_thread.join()
# Now that the job is finished, the log file has been closed by the
# job and deleted. However, we still have a file handle to it, so we
# can make sure that we read the entire contents at this point.
# Compact the returned lines into a single string for easy comparison.
file_contents = logfile.read()
logfile.close()
self.log.debug("\n\nFile contents: %s\n\n", file_contents)
self.log.debug("\n\nStreamed: %s\n\n", self.streaming_data[None])
self.assertEqual(file_contents, self.streaming_data[None])
# Check that we logged a multiline debug message
pattern = (r'^\d\d\d\d-\d\d-\d\d \d\d:\d\d\:\d\d\.\d\d\d\d\d\d \| '
r'Debug Test Token String$')
r = re.compile(pattern, re.MULTILINE)
match = r.search(self.streaming_data[None])
self.assertNotEqual(match, None)
# Check that we logged loop_var contents properly
pattern = r'ok: "(one|two|three)"'
m = re.search(pattern, self.streaming_data[None])
self.assertNotEqual(m, None)
def runWSClient(self, port, build_uuid):
client = WSClient(port, build_uuid)
client.event.wait()
return client
def test_decode_boundaries(self):
'''
Test multi-byte characters crossing read buffer boundaries.
The finger client used by ZuulWeb reads in increments of 1024 bytes.
If the last byte is a multi-byte character, we end up with an error
similar to:
'utf-8' codec can't decode byte 0xe2 in position 1023: \
unexpected end of data
By making the 1024th character in the log file a multi-byte character
(here, the Euro character), we can test this.
'''
# Start the web server
web = self.useFixture(
ZuulWebFixture(self.changes, self.config,
self.additional_event_queues, self.upstream_root,
self.poller_events,
self.git_url_with_auth, self.addCleanup,
self.test_root))
# Start the finger streamer daemon
streamer = zuul.lib.log_streamer.LogStreamer(
self.host, 0, self.executor_server.jobdir_root)
self.addCleanup(streamer.stop)
# Need to set the streaming port before submitting the job
finger_port = streamer.server.socket.getsockname()[1]
self.executor_server.log_streaming_port = finger_port
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
# We don't have any real synchronization for the ansible jobs, so
# just wait until we get our running build.
for x in iterate_timeout(30, "builds"):
if len(self.builds):
break
build = self.builds[0]
self.assertEqual(build.name, 'python27')
build_dir = os.path.join(self.executor_server.jobdir_root, build.uuid)
for x in iterate_timeout(30, "build dir"):
if os.path.exists(build_dir):
break
# Need to wait to make sure that jobdir gets set
for x in iterate_timeout(30, "jobdir"):
if build.jobdir is not None:
break
build = self.builds[0]
# Wait for the job to begin running and create the ansible log file.
# The job waits to complete until the flag file exists, so we can
# safely access the log here. We only open it (to force a file handle
# to be kept open for it after the job finishes) but wait to read the
# contents until the job is done.
ansible_log = os.path.join(build.jobdir.log_root, 'job-output.txt')
for x in iterate_timeout(30, "ansible log"):
if os.path.exists(ansible_log):
break
# Replace log file contents with the 1024th character being a
# multi-byte character.
with io.open(ansible_log, 'w', encoding='utf8') as f:
f.write("a" * 1023)
f.write(u"\u20AC")
logfile = open(ansible_log, 'r')
self.addCleanup(logfile.close)
# Start a thread with the websocket client
client1 = self.runWSClient(web.port, build.uuid)
client1.event.wait()
# Allow the job to complete
flag_file = os.path.join(build_dir, 'test_wait')
open(flag_file, 'w').close()
# Wait for the websocket client to complete, which it should when
# it's received the full log.
client1.thread.join()
self.waitUntilSettled()
file_contents = logfile.read()
logfile.close()
self.log.debug("\n\nFile contents: %s\n\n", file_contents)
self.log.debug("\n\nStreamed: %s\n\n", client1.results)
self.assertEqual(file_contents, client1.results)
hostname = normalize_statsd_name(socket.getfqdn())
self.assertReportedStat(
f'zuul.web.server.{hostname}.streamers', kind='g')
def test_websocket_streaming(self):
# Start the web server
web = self.useFixture(
ZuulWebFixture(self.changes, self.config,
self.additional_event_queues, self.upstream_root,
self.poller_events,
self.git_url_with_auth, self.addCleanup,
self.test_root))
# Start the finger streamer daemon
streamer = zuul.lib.log_streamer.LogStreamer(
self.host, 0, self.executor_server.jobdir_root)
self.addCleanup(streamer.stop)
# Need to set the streaming port before submitting the job
finger_port = streamer.server.socket.getsockname()[1]
self.executor_server.log_streaming_port = finger_port
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
# We don't have any real synchronization for the ansible jobs, so
# just wait until we get our running build.
for x in iterate_timeout(30, "build"):
if len(self.builds):
break
build = self.builds[0]
self.assertEqual(build.name, 'python27')
build_dir = os.path.join(self.executor_server.jobdir_root, build.uuid)
for x in iterate_timeout(30, "build dir"):
if os.path.exists(build_dir):
break
# Need to wait to make sure that jobdir gets set
for x in iterate_timeout(30, "jobdir"):
if build.jobdir is not None:
break
build = self.builds[0]
# Wait for the job to begin running and create the ansible log file.
# The job waits to complete until the flag file exists, so we can
# safely access the log here. We only open it (to force a file handle
# to be kept open for it after the job finishes) but wait to read the
# contents until the job is done.
ansible_log = os.path.join(build.jobdir.log_root, 'job-output.txt')
for x in iterate_timeout(30, "ansible log"):
if os.path.exists(ansible_log):
break
logfile = open(ansible_log, 'r')
self.addCleanup(logfile.close)
# Start a thread with the websocket client
client1 = self.runWSClient(web.port, build.uuid)
client1.event.wait()
client2 = self.runWSClient(web.port, build.uuid)
client2.event.wait()
# Allow the job to complete
flag_file = os.path.join(build_dir, 'test_wait')
open(flag_file, 'w').close()
# Wait for the websocket client to complete, which it should when
# it's received the full log.
client1.thread.join()
client2.thread.join()
self.waitUntilSettled()
file_contents = logfile.read()
self.log.debug("\n\nFile contents: %s\n\n", file_contents)
self.log.debug("\n\nStreamed: %s\n\n", client1.results)
self.assertEqual(file_contents, client1.results)
self.log.debug("\n\nStreamed: %s\n\n", client2.results)
self.assertEqual(file_contents, client2.results)
def test_websocket_hangup(self):
# Start the web server
web = self.useFixture(
ZuulWebFixture(self.changes, self.config,
self.additional_event_queues, self.upstream_root,
self.poller_events,
self.git_url_with_auth, self.addCleanup,
self.test_root))
# Start the finger streamer daemon
streamer = zuul.lib.log_streamer.LogStreamer(
self.host, 0, self.executor_server.jobdir_root)
self.addCleanup(streamer.stop)
# Need to set the streaming port before submitting the job
finger_port = streamer.server.socket.getsockname()[1]
self.executor_server.log_streaming_port = finger_port
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
# We don't have any real synchronization for the ansible jobs, so
# just wait until we get our running build.
for x in iterate_timeout(30, "build"):
if len(self.builds):
break
build = self.builds[0]
self.assertEqual(build.name, 'python27')
build_dir = os.path.join(self.executor_server.jobdir_root, build.uuid)
for x in iterate_timeout(30, "build dir"):
if os.path.exists(build_dir):
break
# Need to wait to make sure that jobdir gets set
for x in iterate_timeout(30, "jobdir"):
if build.jobdir is not None:
break
build = self.builds[0]
# Wait for the job to begin running and create the ansible log file.
# The job waits to complete until the flag file exists, so we can
# safely access the log here.
ansible_log = os.path.join(build.jobdir.log_root, 'job-output.txt')
for x in iterate_timeout(30, "ansible log"):
if os.path.exists(ansible_log):
break
# Start a thread with the websocket client
client1 = self.runWSClient(web.port, build.uuid)
client1.event.wait()
# Wait until we've streamed everything so far
for x in iterate_timeout(30, "streamer is caught up"):
with open(ansible_log, 'r') as logfile:
if client1.results == logfile.read():
break
# This is intensive, give it some time
time.sleep(1)
self.assertNotEqual(len(web.web.stream_manager.streamers.keys()), 0)
# Hangup the client side
client1.close(1000, 'test close')
client1.thread.join()
# The client should be de-registered shortly
for x in iterate_timeout(30, "client cleanup"):
if len(web.web.stream_manager.streamers.keys()) == 0:
break
# Allow the job to complete
flag_file = os.path.join(build_dir, 'test_wait')
open(flag_file, 'w').close()
self.waitUntilSettled()
def test_finger_gateway(self):
# Start the finger streamer daemon
streamer = zuul.lib.log_streamer.LogStreamer(
self.host, 0, self.executor_server.jobdir_root)
self.addCleanup(streamer.stop)
finger_port = streamer.server.socket.getsockname()[1]
# Need to set the streaming port before submitting the job
self.executor_server.log_streaming_port = finger_port
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
# We don't have any real synchronization for the ansible jobs, so
# just wait until we get our running build.
for x in iterate_timeout(30, "build"):
if len(self.builds):
break
build = self.builds[0]
self.assertEqual(build.name, 'python27')
build_dir = os.path.join(self.executor_server.jobdir_root, build.uuid)
for x in iterate_timeout(30, "build dir"):
if os.path.exists(build_dir):
break
# Need to wait to make sure that jobdir gets set
for x in iterate_timeout(30, "jobdir"):
if build.jobdir is not None:
break
# Wait for the job to begin running and create the ansible log file.
# The job waits to complete until the flag file exists, so we can
# safely access the log here. We only open it (to force a file handle
# to be kept open for it after the job finishes) but wait to read the
# contents until the job is done.
ansible_log = os.path.join(build.jobdir.log_root, 'job-output.txt')
for x in iterate_timeout(30, "ansible log"):
if os.path.exists(ansible_log):
break
logfile = open(ansible_log, 'r')
self.addCleanup(logfile.close)
# Start the finger gateway daemon
_, gateway_address = self.runFingerGateway()
# Start a thread with the finger client
finger_client_event = threading.Event()
self.finger_client_results = ''
finger_client_thread = threading.Thread(
target=self.runFingerClient,
args=(build.uuid, gateway_address, finger_client_event)
)
finger_client_thread.start()
finger_client_event.wait()
# Allow the job to complete
flag_file = os.path.join(build_dir, 'test_wait')
open(flag_file, 'w').close()
# Wait for the finger client to complete, which it should when
# it's received the full log.
finger_client_thread.join()
self.waitUntilSettled()
file_contents = logfile.read()
logfile.close()
self.log.debug("\n\nFile contents: %s\n\n", file_contents)
self.log.debug("\n\nStreamed: %s\n\n", self.streaming_data[None])
self.assertEqual(file_contents, self.streaming_data[None])
class CountingFingerRequestHandler(zuul.lib.fingergw.RequestHandler):
def _fingerClient(self, server, port, build_uuid, use_ssl):
self.fingergw.history.append(build_uuid)
super()._fingerClient(server, port, build_uuid, use_ssl)
class TestStreamingZones(TestStreamingBase):
def setUp(self):
super().setUp()
self.fake_nodepool.attributes = {'executor-zone': 'eu-central'}
zuul.lib.fingergw.FingerGateway.handler_class = \
CountingFingerRequestHandler
def setup_config(self, config_file: str):
config = super().setup_config(config_file)
config.set('executor', 'zone', 'eu-central')
return config
def _run_finger_client(self, build, address, name):
# Start a thread with the finger client
finger_client_event = threading.Event()
self.finger_client_results = ''
finger_client_thread = threading.Thread(
target=self.runFingerClient,
args=(build.uuid, address, finger_client_event),
kwargs={'name': name}
)
finger_client_thread.start()
finger_client_event.wait()
return finger_client_thread
def test_finger_gateway(self):
# Start the finger streamer daemon
streamer = zuul.lib.log_streamer.LogStreamer(
self.host, 0, self.executor_server.jobdir_root)
self.addCleanup(streamer.stop)
finger_port = streamer.server.socket.getsockname()[1]
# Need to set the streaming port before submitting the job
self.executor_server.log_streaming_port = finger_port
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
# We don't have any real synchronization for the ansible jobs, so
# just wait until we get our running build.
for x in iterate_timeout(30, "build"):
if len(self.builds):
break
build = self.builds[0]
self.assertEqual(build.name, 'python27')
build_dir = os.path.join(self.executor_server.jobdir_root, build.uuid)
for x in iterate_timeout(30, "build dir"):
if os.path.exists(build_dir):
break
# Need to wait to make sure that jobdir gets set
for x in iterate_timeout(30, "jobdir"):
if build.jobdir is not None:
break
# Wait for the job to begin running and create the ansible log file.
# The job waits to complete until the flag file exists, so we can
# safely access the log here. We only open it (to force a file handle
# to be kept open for it after the job finishes) but wait to read the
# contents until the job is done.
ansible_log = os.path.join(build.jobdir.log_root, 'job-output.txt')
for x in iterate_timeout(30, "ansible log"):
if os.path.exists(ansible_log):
break
logfile = open(ansible_log, 'r')
self.addCleanup(logfile.close)
def wait_for_stream(name):
for x in iterate_timeout(30, "incoming streaming data"):
if len(self.streaming_data.get(name, '')) > 0:
break
# Start the finger gateway daemons
try:
gateway_unzoned, gateway_unzoned_address = self.runFingerGateway()
gateway_us_west, gateway_us_west_address = self.runFingerGateway(
zone='us-west')
except Exception:
self.log.exception("Failed to run finger gateway")
raise
# This finger client runs against a finger gateway in a different zone
# while there is no gateway in the worker zone yet. This should work.
finger_client_us_west_alone = self._run_finger_client(
build, gateway_us_west_address, name='us-west-alone')
# The stream must go only via gateway_us_west
wait_for_stream('us-west-alone')
self.assertEqual(0, len(gateway_unzoned.history))
self.assertEqual(1, len(gateway_us_west.history))
gateway_unzoned.history.clear()
gateway_us_west.history.clear()
# This finger client runs against an unzoned finger gateway
finger_client_unzoned = self._run_finger_client(
build, gateway_unzoned_address, name='unzoned')
wait_for_stream('unzoned')
self.assertEqual(1, len(gateway_unzoned.history))
self.assertEqual(0, len(gateway_us_west.history))
gateway_unzoned.history.clear()
gateway_us_west.history.clear()
# Now start a finger gateway in the target zone.
gateway_eu_central, gateway_eu_central_address = self.runFingerGateway(
zone='eu-central')
# This finger client runs against a finger gateway in a different zone
# while there is a gateway in the worker zone. This should route via
# the gateway in the worker zone.
finger_client_us_west = self._run_finger_client(
build, gateway_us_west_address, name='us-west')
# The stream must go only via gateway_us_west
wait_for_stream('us-west')
self.assertEqual(0, len(gateway_unzoned.history))
self.assertEqual(1, len(gateway_eu_central.history))
self.assertEqual(1, len(gateway_us_west.history))
gateway_unzoned.history.clear()
gateway_eu_central.history.clear()
gateway_us_west.history.clear()
# This finger client runs against an unzoned finger gateway
# while there is a gateway in the worker zone. It should still
# route via the gateway in the worker zone since that may be
# the only way it's accessible.
finger_client_unzoned2 = self._run_finger_client(
build, gateway_unzoned_address, name='unzoned2')
wait_for_stream('unzoned2')
self.assertEqual(1, len(gateway_unzoned.history))
self.assertEqual(1, len(gateway_eu_central.history))
self.assertEqual(0, len(gateway_us_west.history))
gateway_unzoned.history.clear()
gateway_eu_central.history.clear()
gateway_us_west.history.clear()
# This finger client runs against the target finger gateway.
finger_client_eu_central = self._run_finger_client(
build, gateway_eu_central_address, name='eu-central')
wait_for_stream('eu-central')
self.assertEqual(0, len(gateway_unzoned.history))
self.assertEqual(1, len(gateway_eu_central.history))
self.assertEqual(0, len(gateway_us_west.history))
gateway_unzoned.history.clear()
gateway_eu_central.history.clear()
gateway_us_west.history.clear()
# Allow the job to complete
flag_file = os.path.join(build_dir, 'test_wait')
open(flag_file, 'w').close()
# Wait for the finger client to complete, which it should when
# it's received the full log.
finger_client_us_west_alone.join()
finger_client_us_west.join()
finger_client_eu_central.join()
finger_client_unzoned.join()
finger_client_unzoned2.join()
self.waitUntilSettled()
file_contents = logfile.read()
logfile.close()
self.log.debug("\n\nFile contents: %s\n\n", file_contents)
self.log.debug("\n\nStreamed: %s\n\n",
self.streaming_data['us-west-alone'])
self.assertEqual(file_contents, self.streaming_data['us-west-alone'])
self.assertEqual(file_contents, self.streaming_data['us-west'])
self.assertEqual(file_contents, self.streaming_data['unzoned'])
self.assertEqual(file_contents, self.streaming_data['unzoned2'])
self.assertEqual(file_contents, self.streaming_data['eu-central'])
class TestStreamingZonesSSL(TestStreamingZones):
fingergw_use_ssl = True
class TestStreamingUnzonedJob(TestStreamingZones):
def setUp(self):
super().setUp()
self.fake_nodepool.attributes = None
def setup_config(self, config_file: str):
config = super().setup_config(config_file)
config.set('executor', 'allow_unzoned', 'true')
return config
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_streaming.py
|
test_streaming.py
|
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import os
from zuul.lib import yamlutil as yaml
from tests.base import AnsibleZuulTestCase
from tests.base import ZuulTestCase
class TestInventoryBase(ZuulTestCase):
config_file = 'zuul-gerrit-github.conf'
tenant_config_file = 'config/inventory/main.yaml'
use_gerrit = True
def setUp(self, python_path=None, shell_type=None):
super(TestInventoryBase, self).setUp()
if python_path:
self.fake_nodepool.python_path = python_path
if shell_type:
self.fake_nodepool.shell_type = shell_type
self.executor_server.hold_jobs_in_build = True
self.hold_jobs_in_queue = True
if self.use_gerrit:
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
else:
A = self.fake_github.openFakePullRequest(
'org/project3', 'master', 'A')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.change_A = A
self.waitUntilSettled()
def tearDown(self):
self.cancelExecutorJobs()
self.waitUntilSettled()
super(TestInventoryBase, self).tearDown()
def _get_build_inventory(self, name):
self.runJob(name)
build = self.getBuildByName(name)
inv_path = os.path.join(build.jobdir.root, 'ansible', 'inventory.yaml')
with open(inv_path, 'r') as f:
inventory = yaml.safe_load(f)
return inventory
def _get_setup_inventory(self, name):
self.runJob(name)
build = self.getBuildByName(name)
setup_inv_path = build.jobdir.setup_playbook.inventory
with open(setup_inv_path, 'r') as f:
inventory = yaml.ansible_unsafe_load(f)
return inventory
def runJob(self, name):
self.hold_jobs_in_queue = False
self.executor_api.release(f'^{name}$')
self.waitUntilSettled()
def cancelExecutorJobs(self):
if self.use_gerrit:
self.fake_gerrit.addEvent(
self.change_A.getChangeAbandonedEvent())
else:
self.fake_github.emitEvent(
self.change_A.getPullRequestClosedEvent())
class TestInventoryGithub(TestInventoryBase):
use_gerrit = False
def test_single_inventory(self):
inventory = self._get_build_inventory('single-inventory')
all_nodes = ('ubuntu-xenial',)
self.assertIn('all', inventory)
self.assertIn('hosts', inventory['all'])
self.assertIn('vars', inventory['all'])
for node_name in all_nodes:
self.assertIn(node_name, inventory['all']['hosts'])
node_vars = inventory['all']['hosts'][node_name]
self.assertEqual(
'auto', node_vars['ansible_python_interpreter'])
self.assertIn('zuul', inventory['all']['vars'])
self.assertIn('attempts', inventory['all']['vars']['zuul'])
self.assertEqual(1, inventory['all']['vars']['zuul']['attempts'])
self.assertIn('max_attempts', inventory['all']['vars']['zuul'])
self.assertEqual(3, inventory['all']['vars']['zuul']['max_attempts'])
z_vars = inventory['all']['vars']['zuul']
self.assertIn('executor', z_vars)
self.assertIn('src_root', z_vars['executor'])
self.assertIn('ansible_version', z_vars)
self.assertIn('job', z_vars)
self.assertIn('event_id', z_vars)
self.assertEqual(z_vars['job'], 'single-inventory')
self.assertEqual(z_vars['message'], 'QQ==')
self.assertEqual(z_vars['change_url'],
'https://github.com/org/project3/pull/1')
self.executor_server.release()
self.waitUntilSettled()
class TestInventoryPythonPath(TestInventoryBase):
def setUp(self):
super(TestInventoryPythonPath, self).setUp(python_path='fake-python')
def test_single_inventory(self):
inventory = self._get_build_inventory('single-inventory')
all_nodes = ('ubuntu-xenial',)
self.assertIn('all', inventory)
self.assertIn('hosts', inventory['all'])
self.assertIn('vars', inventory['all'])
for node_name in all_nodes:
self.assertIn(node_name, inventory['all']['hosts'])
node_vars = inventory['all']['hosts'][node_name]
self.assertEqual(
'fake-python', node_vars['ansible_python_interpreter'])
self.assertIn('zuul', inventory['all']['vars'])
z_vars = inventory['all']['vars']['zuul']
self.assertIn('executor', z_vars)
self.assertIn('src_root', z_vars['executor'])
self.assertIn('ansible_version', z_vars)
self.assertIn('job', z_vars)
self.assertEqual(z_vars['job'], 'single-inventory')
self.assertEqual(z_vars['message'], 'QQ==')
self.executor_server.release()
self.waitUntilSettled()
class TestInventoryShellType(TestInventoryBase):
def setUp(self):
super(TestInventoryShellType, self).setUp(shell_type='cmd')
def test_single_inventory(self):
inventory = self._get_build_inventory('single-inventory')
all_nodes = ('ubuntu-xenial',)
self.assertIn('all', inventory)
self.assertIn('hosts', inventory['all'])
self.assertIn('vars', inventory['all'])
for node_name in all_nodes:
self.assertIn(node_name, inventory['all']['hosts'])
node_vars = inventory['all']['hosts'][node_name]
self.assertEqual(
'cmd', node_vars['ansible_shell_type'])
self.assertIn('zuul', inventory['all']['vars'])
z_vars = inventory['all']['vars']['zuul']
self.assertIn('executor', z_vars)
self.assertIn('src_root', z_vars['executor'])
self.assertIn('ansible_version', z_vars)
self.assertIn('job', z_vars)
self.assertEqual(z_vars['job'], 'single-inventory')
self.assertEqual(z_vars['message'], 'QQ==')
self.executor_server.release()
self.waitUntilSettled()
class InventoryAutoPythonMixin:
ansible_version = 'X'
def test_auto_python_ansible6_inventory(self):
inventory = self._get_build_inventory(
f'ansible-version{self.ansible_version}-inventory')
all_nodes = ('ubuntu-xenial',)
self.assertIn('all', inventory)
self.assertIn('hosts', inventory['all'])
self.assertIn('vars', inventory['all'])
for node_name in all_nodes:
self.assertIn(node_name, inventory['all']['hosts'])
node_vars = inventory['all']['hosts'][node_name]
self.assertEqual(
'auto', node_vars['ansible_python_interpreter'])
self.assertIn('zuul', inventory['all']['vars'])
z_vars = inventory['all']['vars']['zuul']
self.assertIn('executor', z_vars)
self.assertIn('src_root', z_vars['executor'])
self.assertIn('job', z_vars)
self.assertEqual(z_vars['job'],
f'ansible-version{self.ansible_version}-inventory')
self.assertEqual(z_vars['message'], 'QQ==')
self.executor_server.release()
self.waitUntilSettled()
class TestInventoryAutoPythonAnsible6(TestInventoryBase,
InventoryAutoPythonMixin):
ansible_version = '6'
class TestInventoryAutoPythonAnsible8(TestInventoryBase,
InventoryAutoPythonMixin):
ansible_version = '8'
class TestInventory(TestInventoryBase):
def test_single_inventory(self):
inventory = self._get_build_inventory('single-inventory')
all_nodes = ('ubuntu-xenial',)
self.assertIn('all', inventory)
self.assertIn('hosts', inventory['all'])
self.assertIn('vars', inventory['all'])
for node_name in all_nodes:
self.assertIn(node_name, inventory['all']['hosts'])
node_vars = inventory['all']['hosts'][node_name]
self.assertEqual(
'auto', node_vars['ansible_python_interpreter'])
self.assertNotIn(
'ansible_shell_type', node_vars)
self.assertIn('zuul', inventory['all']['vars'])
self.assertIn('attempts', inventory['all']['vars']['zuul'])
self.assertEqual(1, inventory['all']['vars']['zuul']['attempts'])
self.assertIn('max_attempts', inventory['all']['vars']['zuul'])
self.assertEqual(3, inventory['all']['vars']['zuul']['max_attempts'])
z_vars = inventory['all']['vars']['zuul']
self.assertIn('executor', z_vars)
self.assertIn('src_root', z_vars['executor'])
self.assertIn('job', z_vars)
self.assertEqual(z_vars['job'], 'single-inventory')
self.assertEqual(z_vars['message'], 'QQ==')
self.executor_server.release()
self.waitUntilSettled()
def test_single_inventory_list(self):
inventory = self._get_build_inventory('single-inventory-list')
all_nodes = ('compute', 'controller')
self.assertIn('all', inventory)
self.assertIn('hosts', inventory['all'])
self.assertIn('vars', inventory['all'])
for node_name in all_nodes:
self.assertIn(node_name, inventory['all']['hosts'])
self.assertIn('zuul', inventory['all']['vars'])
z_vars = inventory['all']['vars']['zuul']
self.assertIn('executor', z_vars)
self.assertIn('src_root', z_vars['executor'])
self.assertIn('job', z_vars)
self.assertEqual(z_vars['job'], 'single-inventory-list')
self.executor_server.release()
self.waitUntilSettled()
def test_executor_only_inventory(self):
inventory = self._get_build_inventory('executor-only-inventory')
self.assertIn('all', inventory)
self.assertIn('hosts', inventory['all'])
self.assertIn('vars', inventory['all'])
# Should be blank; i.e. rely on the implicit localhost
self.assertEqual(0, len(inventory['all']['hosts']))
self.assertIn('zuul', inventory['all']['vars'])
z_vars = inventory['all']['vars']['zuul']
self.assertIn('executor', z_vars)
self.assertIn('src_root', z_vars['executor'])
self.assertIn('job', z_vars)
self.assertEqual(z_vars['job'], 'executor-only-inventory')
self.assertEqual(z_vars['message'], 'QQ==')
self.executor_server.release()
self.waitUntilSettled()
def test_group_inventory(self):
inventory = self._get_build_inventory('group-inventory')
all_nodes = ('controller', 'compute1', 'compute2')
self.assertIn('all', inventory)
self.assertIn('children', inventory['all'])
self.assertIn('hosts', inventory['all'])
self.assertIn('vars', inventory['all'])
for group_name in ('ceph-osd', 'ceph-monitor'):
self.assertIn(group_name, inventory['all']['children'])
for node_name in all_nodes:
self.assertIn(node_name, inventory['all']['hosts'])
self.assertIn(node_name,
inventory['all']['children']
['ceph-monitor']['hosts'])
self.assertEqual(
'auto',
inventory['all']['hosts']['controller']
['ansible_python_interpreter'])
self.assertEqual(
'ceph',
inventory['all']['hosts']['controller']
['ceph_var'])
self.assertEqual(
'auto',
inventory['all']['hosts']['compute1']
['ansible_python_interpreter'])
self.assertNotIn(
'ceph_var',
inventory['all']['hosts']['compute1'])
self.assertIn('zuul', inventory['all']['vars'])
z_vars = inventory['all']['vars']['zuul']
self.assertIn('executor', z_vars)
self.assertIn('src_root', z_vars['executor'])
self.assertIn('job', z_vars)
self.assertEqual(z_vars['job'], 'group-inventory')
self.executor_server.release()
self.waitUntilSettled()
def test_hostvars_inventory(self):
inventory = self._get_build_inventory('hostvars-inventory')
all_nodes = ('default', 'fakeuser')
self.assertIn('all', inventory)
self.assertIn('hosts', inventory['all'])
self.assertIn('vars', inventory['all'])
for node_name in all_nodes:
self.assertIn(node_name, inventory['all']['hosts'])
# check if the nodes use the correct username
if node_name == 'fakeuser':
username = 'fakeuser'
else:
username = 'zuul'
self.assertEqual(
inventory['all']['hosts'][node_name]['ansible_user'], username)
# check if the nodes use the correct or no ansible_connection
if node_name == 'windows':
self.assertEqual(
inventory['all']['hosts'][node_name]['ansible_connection'],
'winrm')
else:
self.assertEqual(
'local',
inventory['all']['hosts'][node_name]['ansible_connection'])
self.assertEqual(
'auto',
inventory['all']['hosts'][node_name]
['ansible_python_interpreter'])
self.assertEqual(
'all',
inventory['all']['hosts'][node_name]
['all_var'])
self.assertNotIn(
'ansible_python_interpreter',
inventory['all']['vars'])
self.executor_server.release()
self.waitUntilSettled()
def test_setup_inventory(self):
setup_inventory = self._get_setup_inventory('hostvars-inventory')
inventory = self._get_build_inventory('hostvars-inventory')
self.assertIn('all', inventory)
self.assertIn('hosts', inventory['all'])
self.assertIn('default', setup_inventory['all']['hosts'])
self.assertIn('fakeuser', setup_inventory['all']['hosts'])
self.assertIn('windows', setup_inventory['all']['hosts'])
self.assertNotIn('network', setup_inventory['all']['hosts'])
self.assertIn('default', inventory['all']['hosts'])
self.assertIn('fakeuser', inventory['all']['hosts'])
self.assertIn('windows', inventory['all']['hosts'])
self.assertIn('network', inventory['all']['hosts'])
self.executor_server.release()
self.waitUntilSettled()
class TestAnsibleInventory(AnsibleZuulTestCase):
config_file = 'zuul-gerrit-github.conf'
tenant_config_file = 'config/inventory/main.yaml'
def _get_file(self, build, path):
p = os.path.join(build.jobdir.root, path)
with open(p) as f:
return f.read()
def _jinja2_message(self, expected_message):
# This test runs a bit long and needs extra time.
self.wait_timeout = 120
# Keep the jobdir around to check inventory
self.executor_server.keep_jobdir = True
# Output extra ansible info so we might see errors.
self.executor_server.verbose = True
A = self.fake_gerrit.addFakeChange(
'org/project2', 'master', expected_message,
files={'jinja.txt': 'foo'})
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='jinja2-message', result='SUCCESS', changes='1,1')])
build = self.history[0]
inv_path = os.path.join(build.jobdir.root, 'ansible', 'inventory.yaml')
with open(inv_path, 'r') as f:
inventory = yaml.safe_load(f)
zv_path = os.path.join(build.jobdir.root, 'ansible', 'zuul_vars.yaml')
with open(zv_path, 'r') as f:
zv = yaml.ansible_unsafe_load(f)
# TODO(corvus): zuul vars aren't really stored here anymore;
# rework these tests to examine them separately.
inventory['all']['vars'] = {'zuul': zv['zuul']}
# The deprecated base64 version
decoded_message = base64.b64decode(
inventory['all']['vars']['zuul']['message']).decode('utf-8')
self.assertEqual(decoded_message, expected_message)
obtained_message = self._get_file(self.history[0],
'work/logs/commit-message.txt')
self.assertEqual(obtained_message, expected_message)
# The new !unsafe version
decoded_message = inventory['all']['vars']['zuul']['change_message']
self.assertEqual(decoded_message, expected_message)
obtained_message = self._get_file(self.history[0],
'work/logs/change-message.txt')
self.assertEqual(obtained_message, expected_message)
def test_jinja2_message_brackets(self):
self._jinja2_message("This message has {{ ansible_host }} in it ")
def test_jinja2_message_raw(self):
self._jinja2_message("This message has {% raw %} in {% endraw %} it ")
def test_network_inventory(self):
# Network appliances can't run the freeze or setup playbooks,
# so they won't have any job variables available. But they
# should still have nodepool hostvars. Run a playbook that
# verifies that.
A = self.fake_gerrit.addFakeChange(
'org/project2', 'master', 'A',
files={'network.txt': 'foo'})
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='network', result='SUCCESS', changes='1,1')])
class TestWindowsInventory(TestInventoryBase):
config_file = 'zuul-winrm.conf'
def test_windows_inventory(self):
inventory = self._get_build_inventory('hostvars-inventory')
windows_host = inventory['all']['hosts']['windows']
self.assertEqual(windows_host['ansible_connection'], 'winrm')
self.assertEqual(
windows_host['ansible_winrm_operation_timeout_sec'],
'120')
self.assertEqual(
windows_host['ansible_winrm_read_timeout_sec'],
'180')
self.executor_server.release()
self.waitUntilSettled()
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_inventory.py
|
test_inventory.py
|
# Copyright 2023 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import time
import boto3
from moto import mock_kinesis
import tests.base
from tests.base import (
ZuulTestCase,
iterate_timeout,
simple_layout,
)
FIXTURE_DIR = os.path.join(tests.base.FIXTURE_DIR, 'gerrit')
def serialize(event):
return json.dumps(event).encode('utf8')
class TestGerritEventSourceAWSKinesis(ZuulTestCase):
config_file = 'zuul-gerrit-awskinesis.conf'
mock_kinesis = mock_kinesis()
def setUp(self):
self.mock_kinesis.start()
self.kinesis_client = boto3.client('kinesis', region_name='us-west-2')
self.kinesis_client.create_stream(
StreamName='gerrit',
ShardCount=4,
StreamModeDetails={
'StreamMode': 'ON_DEMAND'
}
)
super().setUp()
def tearDown(self):
self.mock_kinesis.stop()
super().tearDown()
@simple_layout('layouts/simple.yaml')
def test_kinesis(self):
listener = self.fake_gerrit.event_thread
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.kinesis_client.put_record(
StreamName='gerrit',
Data=serialize(A.getPatchsetCreatedEvent(1)),
PartitionKey='whatever',
)
for _ in iterate_timeout(60, 'wait for event'):
if listener._event_count == 1:
break
time.sleep(0.2)
self.waitUntilSettled()
self.assertHistory([
dict(name='check-job', result='SUCCESS', changes='1,1')
])
self.assertEqual(A.reported, 1, "A should be reported")
# Stop the listener
listener.stop()
listener.join()
# Add new gerrit events while we are "offline"
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
self.kinesis_client.put_record(
StreamName='gerrit',
Data=serialize(B.getPatchsetCreatedEvent(1)),
PartitionKey='whatever',
)
# Restart the listener
listener.init()
listener.start()
for _ in iterate_timeout(60, 'wait for caught up'):
if all(listener._caught_up.values()):
break
time.sleep(0.2)
self.waitUntilSettled()
# Make sure we don't reprocess old events (change A), but do
# see new events (change B)
self.assertHistory([
dict(name='check-job', result='SUCCESS', changes='1,1'),
dict(name='check-job', result='SUCCESS', changes='2,1'),
])
self.assertEqual(A.reported, 1, "A should be reported")
self.assertEqual(B.reported, 1, "B should be reported")
@simple_layout('layouts/simple.yaml')
def test_kinesis_bad_checkpoint(self):
listener = self.fake_gerrit.event_thread
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.kinesis_client.put_record(
StreamName='gerrit',
Data=serialize(A.getPatchsetCreatedEvent(1)),
PartitionKey='whatever',
)
for _ in iterate_timeout(60, 'wait for event'):
if listener._event_count == 1:
break
time.sleep(0.2)
self.waitUntilSettled()
self.assertHistory([
dict(name='check-job', result='SUCCESS', changes='1,1')
])
self.assertEqual(A.reported, 1, "A should be reported")
# Stop the listener
listener.stop()
listener.join()
# Corrupt the checkpoint
for cp in listener.checkpoints.values():
cp.set("nope")
# Add new gerrit events while we are "offline"
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
self.kinesis_client.put_record(
StreamName='gerrit',
Data=serialize(B.getPatchsetCreatedEvent(1)),
PartitionKey='whatever',
)
# Restart the listener
listener.init()
listener.start()
for _ in iterate_timeout(60, 'wait for caught up'):
if all(listener._caught_up.values()):
break
time.sleep(0.2)
self.waitUntilSettled()
# Make sure we don't reprocess old events (change A),
# and also that we missed change B because of the corruption
self.assertHistory([
dict(name='check-job', result='SUCCESS', changes='1,1'),
])
self.assertEqual(A.reported, 1, "A should be reported")
self.assertEqual(B.reported, 0, "B should not be reported")
# Poke B again to make sure we get new events
self.kinesis_client.put_record(
StreamName='gerrit',
Data=serialize(B.getPatchsetCreatedEvent(1)),
PartitionKey='whatever',
)
for _ in iterate_timeout(60, 'wait for event'):
if listener._event_count == 2:
break
time.sleep(0.2)
self.waitUntilSettled()
self.assertHistory([
dict(name='check-job', result='SUCCESS', changes='1,1'),
dict(name='check-job', result='SUCCESS', changes='2,1'),
])
self.assertEqual(A.reported, 1, "A should be reported")
self.assertEqual(B.reported, 1, "B should be reported")
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_gerrit_awskinesis.py
|
test_gerrit_awskinesis.py
|
# Copyright 2021 BMW Group
# Copyright 2021 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import zuul.model
from tests.base import iterate_timeout, ZuulTestCase, simple_layout
from zuul.zk.locks import SessionAwareWriteLock, TENANT_LOCK_ROOT
class TestScaleOutScheduler(ZuulTestCase):
tenant_config_file = "config/single-tenant/main.yaml"
# Those tests are testing specific interactions between multiple
# schedulers. They create additional schedulers as necessary and
# start or stop them individually to test specific interactions.
# Using the scheduler_count in addition to create even more
# schedulers doesn't make sense for those tests.
scheduler_count = 1
def test_multi_scheduler(self):
# A smoke test that we can enqueue a change with one scheduler
# and have another one finish the run.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
app = self.createScheduler()
app.start()
self.assertEqual(len(self.scheds), 2)
# Hold the lock on the first scheduler so that only the second
# will act.
with self.scheds.first.sched.run_handler_lock:
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled(matcher=[app])
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
], ordered=False)
def test_pipeline_cache_clear(self):
# Test that the pipeline cache on a second scheduler isn't
# holding old change objects.
# Hold jobs in build
sched1 = self.scheds.first
self.executor_server.hold_jobs_in_build = True
# We need a pair of changes in order to populate the pipeline
# change cache (a single change doesn't activate the cache,
# it's for dependencies).
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.addApproval('Code-Review', 2)
B.addApproval('Approved', 1)
B.setDependsOn(A, 1)
# Fail a job
self.executor_server.failJob('project-test1', A)
# Enqueue into gate with scheduler 1
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
# Start scheduler 2
sched2 = self.createScheduler()
sched2.start()
self.assertEqual(len(self.scheds), 2)
# Pause scheduler 1
with sched1.sched.run_handler_lock:
# Release jobs
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
# Wait for scheduler 2 to dequeue
self.waitUntilSettled(matcher=[sched2])
# Unpause scheduler 1
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
# Clear zk change cache
self.fake_gerrit._change_cache.prune([], max_age=0)
# At this point, scheduler 1 should have a bogus change entry
# in the pipeline cache because scheduler 2 performed the
# dequeue so scheduler 1 never cleaned up its cache.
self.executor_server.fail_tests.clear()
self.executor_server.hold_jobs_in_build = True
# Pause scheduler 1
with sched1.sched.run_handler_lock:
# Enqueue into gate with scheduler 2
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled(matcher=[sched2])
# Pause scheduler 2
with sched2.sched.run_handler_lock:
# Make sure that scheduler 1 does some pipeline runs which
# reconstitute state from ZK. This gives it the
# opportunity to use old cache data if we don't clear it.
# Release job1
self.executor_server.release()
self.waitUntilSettled(matcher=[sched1])
# Release job2
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
# Wait for scheduler 1 to merge change
self.waitUntilSettled(matcher=[sched1])
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
@simple_layout('layouts/multi-scheduler-status.yaml')
def test_multi_scheduler_status(self):
self.hold_merge_jobs_in_queue = True
first = self.scheds.first
second = self.createScheduler()
second.start()
self.assertEqual(len(self.scheds), 2)
self.waitUntilSettled()
self.log.debug("Force second scheduler to process check")
with first.sched.run_handler_lock:
event = zuul.model.PipelinePostConfigEvent()
first.sched.pipeline_management_events[
'tenant-one']['check'].put(event, needs_result=False)
self.waitUntilSettled(matcher=[second])
self.log.debug("Add change in first scheduler")
with second.sched.run_handler_lock:
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled(matcher=[first])
self.log.debug("Finish change in second scheduler")
with first.sched.run_handler_lock:
self.hold_merge_jobs_in_queue = False
self.merger_api.release()
self.waitUntilSettled(matcher=[second])
self.assertHistory([])
tenant = first.sched.abide.tenants['tenant-one']
pipeline = tenant.layout.pipelines['check']
summary = zuul.model.PipelineSummary()
summary._set(pipeline=pipeline)
with self.createZKContext() as context:
summary.refresh(context)
self.assertEqual(summary.status['change_queues'], [])
def test_config_priming(self):
# Wait until scheduler is primed
self.waitUntilSettled()
first_app = self.scheds.first
initial_max_hold_exp = first_app.sched.globals.max_hold_expiration
layout_state = first_app.sched.tenant_layout_state.get("tenant-one")
self.assertIsNotNone(layout_state)
# Second scheduler instance
second_app = self.createScheduler()
# Change a system attribute in order to check that the system config
# from Zookeeper was used.
second_app.sched.globals.max_hold_expiration += 1234
second_app.config.set("scheduler", "max_hold_expiration", str(
second_app.sched.globals.max_hold_expiration))
second_app.start()
self.waitUntilSettled()
self.assertEqual(first_app.sched.local_layout_state.get("tenant-one"),
second_app.sched.local_layout_state.get("tenant-one"))
# Make sure only the first schedulers issued cat jobs
self.assertIsNotNone(
first_app.sched.merger.merger_api.history.get("cat"))
self.assertIsNone(
second_app.sched.merger.merger_api.history.get("cat"))
for _ in iterate_timeout(
10, "Wait for all schedulers to have the same system config"):
if (first_app.sched.unparsed_abide.ltime
== second_app.sched.unparsed_abide.ltime):
break
# TODO (swestphahl): change this to assertEqual() when we remove
# the smart reconfiguration during config priming.
# Currently the smart reconfiguration during priming of the second
# scheduler will update the system config in Zookeeper and the first
# scheduler updates it's config in return.
self.assertNotEqual(second_app.sched.globals.max_hold_expiration,
initial_max_hold_exp)
def test_reconfigure(self):
# Create a second scheduler instance
app = self.createScheduler()
app.start()
self.assertEqual(len(self.scheds), 2)
for _ in iterate_timeout(10, "Wait until priming is complete"):
old = self.scheds.first.sched.tenant_layout_state.get("tenant-one")
if old is not None:
break
for _ in iterate_timeout(
10, "Wait for all schedulers to have the same layout state"):
layout_states = [a.sched.local_layout_state.get("tenant-one")
for a in self.scheds.instances]
if all(l == old for l in layout_states):
break
self.scheds.first.sched.reconfigure(self.scheds.first.config)
self.waitUntilSettled()
new = self.scheds.first.sched.tenant_layout_state["tenant-one"]
self.assertNotEqual(old, new)
for _ in iterate_timeout(10, "Wait for all schedulers to update"):
layout_states = [a.sched.local_layout_state.get("tenant-one")
for a in self.scheds.instances]
if all(l == new for l in layout_states):
break
layout_uuids = [a.sched.abide.tenants["tenant-one"].layout.uuid
for a in self.scheds.instances]
self.assertTrue(all(l == new.uuid for l in layout_uuids))
self.waitUntilSettled()
def test_live_reconfiguration_del_pipeline(self):
# Test pipeline deletion while changes are enqueued
# Create a second scheduler instance
app = self.createScheduler()
app.start()
self.assertEqual(len(self.scheds), 2)
for _ in iterate_timeout(10, "Wait until priming is complete"):
old = self.scheds.first.sched.tenant_layout_state.get("tenant-one")
if old is not None:
break
for _ in iterate_timeout(
10, "Wait for all schedulers to have the same layout state"):
layout_states = [a.sched.local_layout_state.get("tenant-one")
for a in self.scheds.instances]
if all(l == old for l in layout_states):
break
pipeline_zk_path = app.sched.abide.tenants[
"tenant-one"].layout.pipelines["check"].state.getPath()
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
# Let the first scheduler enqueue the change into the pipeline that
# will be removed later on.
with app.sched.run_handler_lock:
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled(matcher=[self.scheds.first])
# Process item only on second scheduler so the first scheduler has
# an outdated pipeline state.
with self.scheds.first.sched.run_handler_lock:
self.executor_server.release('.*-merge')
self.waitUntilSettled(matcher=[app])
self.assertEqual(len(self.builds), 2)
self.commitConfigUpdate(
'common-config',
'layouts/live-reconfiguration-del-pipeline.yaml')
# Trigger a reconfiguration on the first scheduler with the outdated
# pipeline state of the pipeline that will be removed.
self.scheds.execute(lambda a: a.sched.reconfigure(a.config),
matcher=[self.scheds.first])
new = self.scheds.first.sched.tenant_layout_state.get("tenant-one")
for _ in iterate_timeout(
10, "Wait for all schedulers to have the same layout state"):
layout_states = [a.sched.local_layout_state.get("tenant-one")
for a in self.scheds.instances]
if all(l == new for l in layout_states):
break
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 0)
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='ABORTED', changes='1,1'),
dict(name='project-test2', result='ABORTED', changes='1,1'),
], ordered=False)
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines), 0)
stat = self.zk_client.client.exists(pipeline_zk_path)
self.assertIsNone(stat)
def test_change_cache(self):
# Test re-using a change from the change cache.
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.setDependsOn(A, 1)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# This has populated the change cache with our change.
app = self.createScheduler()
app.start()
self.assertEqual(len(self.scheds), 2)
# Hold the lock on the first scheduler so that only the second
# will act.
with self.scheds.first.sched.run_handler_lock:
# Enqueue the change again. The second scheduler will
# load the change object from the cache.
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled(matcher=[app])
# Each job should appear twice and contain both changes.
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-merge', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1 2,1'),
], ordered=False)
def test_change_cache_error(self):
# Test that if a change is deleted from the change cache,
# pipeline processing can continue
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Delete the change cache
for connection in self.scheds.first.connections.connections.values():
if hasattr(connection, '_change_cache'):
connection.maintainCache([], max_age=0)
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
# Release
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project-merge', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1 2,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1 2,1'),
], ordered=False)
def test_pipeline_summary(self):
# Test that we can deal with a truncated pipeline summary
self.executor_server.hold_jobs_in_build = True
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
pipeline = tenant.layout.pipelines['check']
context = self.createZKContext()
def new_summary():
summary = zuul.model.PipelineSummary()
summary._set(pipeline=pipeline)
with context:
summary.refresh(context)
return summary
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Check we have a good summary
summary1 = new_summary()
self.assertNotEqual(summary1.status, {})
self.assertTrue(context.client.exists(summary1.getPath()))
# Make a syntax error in the status summary json
summary = new_summary()
summary._save(context, b'{"foo')
# With the corrupt data, we should get an empty status but the
# path should still exist.
summary2 = new_summary()
self.assertEqual(summary2.status, {})
self.assertTrue(context.client.exists(summary2.getPath()))
# Our earlier summary object should use its cached data
with context:
summary1.refresh(context)
self.assertNotEqual(summary1.status, {})
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# The scheduler should have written a new summary that our
# second object can read now.
with context:
summary2.refresh(context)
self.assertNotEqual(summary2.status, {})
@simple_layout('layouts/semaphore.yaml')
def test_semaphore(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.assertEqual(self.builds[0].name, 'test1')
self.assertHistory([])
tenant = self.scheds.first.sched.abide.tenants['tenant-one']
semaphore = tenant.semaphore_handler.getSemaphores()[0]
holders = tenant.semaphore_handler.semaphoreHolders(semaphore)
self.assertEqual(len(holders), 1)
# Start a second scheduler so that it runs through the initial
# cleanup processes.
app = self.createScheduler()
# Hold the lock on the second scheduler so that if any events
# happen, they are processed by the first scheduler (this lets
# them be as out of sync as possible).
with app.sched.run_handler_lock:
app.start()
self.assertEqual(len(self.scheds), 2)
self.waitUntilSettled(matcher=[self.scheds.first])
# Wait until initial cleanup is run
app.sched.start_cleanup_thread.join()
# We should not have released the semaphore
holders = tenant.semaphore_handler.semaphoreHolders(semaphore)
self.assertEqual(len(holders), 1)
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.assertEqual(self.builds[0].name, 'test2')
self.assertHistory([
dict(name='test1', result='SUCCESS', changes='1,1'),
], ordered=False)
holders = tenant.semaphore_handler.semaphoreHolders(semaphore)
self.assertEqual(len(holders), 1)
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertHistory([
dict(name='test1', result='SUCCESS', changes='1,1'),
dict(name='test2', result='SUCCESS', changes='1,1'),
], ordered=False)
holders = tenant.semaphore_handler.semaphoreHolders(semaphore)
self.assertEqual(len(holders), 0)
@simple_layout('layouts/two-projects-integrated.yaml')
def test_nodepool_relative_priority_check(self):
"Test that nodes are requested at the relative priority"
self.fake_nodepool.pause()
# Start a second scheduler that uses the existing layout
app = self.createScheduler()
app.start()
# Hold the lock on the first scheduler so that if any events
# happen, they are processed by the second scheduler.
with self.scheds.first.sched.run_handler_lock:
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled(matcher=[app])
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled(matcher=[app])
C = self.fake_gerrit.addFakeChange('org/project1', 'master', 'C')
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled(matcher=[app])
D = self.fake_gerrit.addFakeChange('org/project2', 'master', 'D')
self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(1))
self.waitUntilSettled(matcher=[app])
reqs = self.fake_nodepool.getNodeRequests()
# The requests come back sorted by priority.
# Change A, first change for project, high relative priority.
self.assertEqual(reqs[0]['_oid'], '200-0000000000')
self.assertEqual(reqs[0]['relative_priority'], 0)
# Change C, first change for project1, high relative priority.
self.assertEqual(reqs[1]['_oid'], '200-0000000002')
self.assertEqual(reqs[1]['relative_priority'], 0)
# Change B, second change for project, lower relative priority.
self.assertEqual(reqs[2]['_oid'], '200-0000000001')
self.assertEqual(reqs[2]['relative_priority'], 1)
# Change D, first change for project2 shared with project1,
# lower relative priority than project1.
self.assertEqual(reqs[3]['_oid'], '200-0000000003')
self.assertEqual(reqs[3]['relative_priority'], 1)
# Fulfill only the first request
self.fake_nodepool.fulfillRequest(reqs[0])
for x in iterate_timeout(30, 'fulfill request'):
reqs = list(self.scheds.first.sched.nodepool.getNodeRequests())
if len(reqs) < 4:
break
self.waitUntilSettled(matcher=[app])
reqs = self.fake_nodepool.getNodeRequests()
# Change B, now first change for project, equal priority.
self.assertEqual(reqs[0]['_oid'], '200-0000000001')
self.assertEqual(reqs[0]['relative_priority'], 0)
# Change C, now first change for project1, equal priority.
self.assertEqual(reqs[1]['_oid'], '200-0000000002')
self.assertEqual(reqs[1]['relative_priority'], 0)
# Change D, first change for project2 shared with project1,
# still lower relative priority than project1.
self.assertEqual(reqs[2]['_oid'], '200-0000000003')
self.assertEqual(reqs[2]['relative_priority'], 1)
self.fake_nodepool.unpause()
self.waitUntilSettled()
@simple_layout('layouts/two-projects-integrated.yaml')
def test_nodepool_relative_priority_gate(self):
"Test that nodes are requested at the relative priority"
self.fake_nodepool.pause()
# Start a second scheduler that uses the existing layout
app = self.createScheduler()
app.start()
# Hold the lock on the first scheduler so that if any events
# happen, they are processed by the second scheduler.
with self.scheds.first.sched.run_handler_lock:
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled(matcher=[app])
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled(matcher=[app])
# project does not share a queue with project1 and project2.
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled(matcher=[app])
reqs = self.fake_nodepool.getNodeRequests()
# The requests come back sorted by priority.
# Change A, first change for shared queue, high relative
# priority.
self.assertEqual(reqs[0]['_oid'], '100-0000000000')
self.assertEqual(reqs[0]['relative_priority'], 0)
# Change C, first change for independent project, high
# relative priority.
self.assertEqual(reqs[1]['_oid'], '100-0000000002')
self.assertEqual(reqs[1]['relative_priority'], 0)
# Change B, second change for shared queue, lower relative
# priority.
self.assertEqual(reqs[2]['_oid'], '100-0000000001')
self.assertEqual(reqs[2]['relative_priority'], 1)
self.fake_nodepool.unpause()
self.waitUntilSettled()
class TestSOSCircularDependencies(ZuulTestCase):
# Those tests are testing specific interactions between multiple
# schedulers. They create additional schedulers as necessary and
# start or stop them individually to test specific interactions.
# Using the scheduler_count in addition to create even more
# schedulers doesn't make sense for those tests.
scheduler_count = 1
@simple_layout('layouts/sos-circular.yaml')
def test_sos_circular_deps(self):
# This test sets the window to 1 so that we can test a code
# path where we write the queue items to ZK as little as
# possible on the first scheduler while doing most of the work
# on the second.
self.executor_server.hold_jobs_in_build = True
Z = self.fake_gerrit.addFakeChange('org/project', "master", "Z")
A = self.fake_gerrit.addFakeChange('org/project', "master", "A")
B = self.fake_gerrit.addFakeChange('org/project', "master", "B")
# Z, A <-> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
Z.addApproval("Code-Review", 2)
self.fake_gerrit.addEvent(Z.addApproval("Approved", 1))
self.waitUntilSettled()
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
A.addApproval("Approved", 1)
self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
self.waitUntilSettled()
# Start a second scheduler
app = self.createScheduler()
app.start()
self.assertEqual(len(self.scheds), 2)
self.waitUntilSettled()
# Hold the lock on the first scheduler so that only the second
# will act.
with self.scheds.first.sched.run_handler_lock:
# Release the first item so the second moves into the
# active window.
self.assertEqual(len(self.builds), 2)
builds = self.builds[:]
builds[0].release()
builds[1].release()
self.waitUntilSettled(matcher=[app])
self.assertEqual(len(self.builds), 4)
builds = self.builds[:]
self.executor_server.failJob('job1', A)
builds[0].release()
app.sched.wake_event.set()
self.waitUntilSettled(matcher=[app])
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
class TestScaleOutSchedulerMultiTenant(ZuulTestCase):
# Those tests are testing specific interactions between multiple
# schedulers. They create additional schedulers as necessary and
# start or stop them individually to test specific interactions.
# Using the scheduler_count in addition to create even more
# schedulers doesn't make sense for those tests.
scheduler_count = 1
tenant_config_file = "config/two-tenant/main.yaml"
def test_background_layout_update(self):
# This test performs a reconfiguration on one scheduler and
# verifies that a second scheduler begins processing changes
# for each tenant as it is updated.
first = self.scheds.first
# Create a second scheduler instance
second = self.createScheduler()
second.start()
self.assertEqual(len(self.scheds), 2)
tenant_one_lock = SessionAwareWriteLock(
self.zk_client.client,
f"{TENANT_LOCK_ROOT}/tenant-one")
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
for _ in iterate_timeout(10, "until priming is complete"):
state_one = first.sched.local_layout_state.get("tenant-one")
state_two = first.sched.local_layout_state.get("tenant-two")
if all([state_one, state_two]):
break
for _ in iterate_timeout(
10, "all schedulers to have the same layout state"):
if (second.sched.local_layout_state.get(
"tenant-one") == state_one and
second.sched.local_layout_state.get(
"tenant-two") == state_two):
break
self.log.debug("Freeze scheduler-1")
with second.sched.layout_update_lock:
state_one = first.sched.local_layout_state.get("tenant-one")
state_two = first.sched.local_layout_state.get("tenant-two")
self.log.debug("Reconfigure scheduler-0")
first.sched.reconfigure(first.config)
for _ in iterate_timeout(
10, "tenants to be updated on scheduler-0"):
if ((first.sched.local_layout_state["tenant-one"] !=
state_one) and
(first.sched.local_layout_state["tenant-two"] !=
state_two)):
break
self.waitUntilSettled(matcher=[first])
self.log.debug("Grab tenant-one write lock")
tenant_one_lock.acquire(blocking=True)
self.log.debug("Thaw scheduler-1")
self.log.debug("Freeze scheduler-0")
with first.sched.run_handler_lock:
try:
self.log.debug("Open change in tenant-one")
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
for _ in iterate_timeout(30, "trigger event appears"):
if second.sched.trigger_events['tenant-one'].hasEvents():
break
for _ in iterate_timeout(
30, "tenant-two to be updated on scheduler-1"):
if (first.sched.local_layout_state["tenant-two"] ==
second.sched.local_layout_state.get("tenant-two")):
break
# Tenant two should be up to date, but tenant one should
# still be out of date on scheduler two.
self.assertEqual(
first.sched.local_layout_state["tenant-two"],
second.sched.local_layout_state["tenant-two"])
self.assertNotEqual(
first.sched.local_layout_state["tenant-one"],
second.sched.local_layout_state["tenant-one"])
self.log.debug("Verify tenant-one change is unprocessed")
# If we have updated tenant-two's configuration without
# processing the tenant-one change, then we know we've
# completed at least one run loop.
self.assertHistory([])
self.log.debug("Open change in tenant-two")
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.log.debug(
"Wait for scheduler-1 to process tenant-two change")
for _ in iterate_timeout(30, "tenant-two build finish"):
if len(self.history):
break
self.assertHistory([
dict(name='test', result='SUCCESS', changes='2,1'),
], ordered=False)
# Tenant two should be up to date, but tenant one should
# still be out of date on scheduler two.
self.assertEqual(
first.sched.local_layout_state["tenant-two"],
second.sched.local_layout_state["tenant-two"])
self.assertNotEqual(
first.sched.local_layout_state["tenant-one"],
second.sched.local_layout_state["tenant-one"])
self.log.debug("Release tenant-one write lock")
finally:
# Release this in a finally clause so that the test
# doesn't hang if we fail an assertion.
tenant_one_lock.release()
self.log.debug("Wait for both changes to be processed")
self.waitUntilSettled(matcher=[second])
self.assertHistory([
dict(name='test', result='SUCCESS', changes='2,1'),
dict(name='test', result='SUCCESS', changes='1,1'),
], ordered=False)
# Both tenants should be up to date
self.assertEqual(first.sched.local_layout_state["tenant-two"],
second.sched.local_layout_state["tenant-two"])
self.assertEqual(first.sched.local_layout_state["tenant-one"],
second.sched.local_layout_state["tenant-one"])
self.waitUntilSettled()
def test_background_layout_update_add_tenant(self):
# This test adds a new tenant and verifies that two schedulers
# end up with layouts for the new tenant (one after an initial
# reconfiguration, the other via the background update
# thread).
first = self.scheds.first
# Create a second scheduler instance
second = self.createScheduler()
second.start()
self.assertEqual(len(self.scheds), 2)
for _ in iterate_timeout(10, "until priming is complete"):
state_one = first.sched.local_layout_state.get("tenant-one")
state_two = first.sched.local_layout_state.get("tenant-two")
if all([state_one, state_two]):
break
for _ in iterate_timeout(
10, "all schedulers to have the same layout state"):
if (second.sched.local_layout_state.get(
"tenant-one") == state_one and
second.sched.local_layout_state.get(
"tenant-two") == state_two):
break
self.log.debug("Freeze scheduler-1")
with second.sched.layout_update_lock:
state_one = first.sched.local_layout_state.get("tenant-one")
state_two = first.sched.local_layout_state.get("tenant-two")
self.log.debug("Reconfigure scheduler-0")
self.newTenantConfig('config/two-tenant/three-tenant.yaml')
first.smartReconfigure(command_socket=True)
for _ in iterate_timeout(
10, "tenants to be updated on scheduler-0"):
if 'tenant-three' in first.sched.local_layout_state:
break
self.waitUntilSettled(matcher=[first])
self.log.debug("Thaw scheduler-1")
for _ in iterate_timeout(
10, "tenants to be updated on scheduler-1"):
if 'tenant-three' in second.sched.local_layout_state:
break
self.waitUntilSettled(matcher=[second])
def test_background_layout_update_remove_tenant(self):
# This test removes a tenant and verifies that the two schedulers
# remove the tenant from their layout (one after an initial
# reconfiguration, the other via the background update
# thread).
first = self.scheds.first
# Create a second scheduler instance
second = self.createScheduler()
second.start()
self.assertEqual(len(self.scheds), 2)
for _ in iterate_timeout(10, "until priming is complete"):
state_one = first.sched.local_layout_state.get("tenant-one")
state_two = first.sched.local_layout_state.get("tenant-two")
if all([state_one, state_two]):
break
for _ in iterate_timeout(
10, "all schedulers to have the same layout state"):
if (second.sched.local_layout_state.get(
"tenant-one") == state_one and
second.sched.local_layout_state.get(
"tenant-two") == state_two):
break
self.assertIn('tenant-two', first.sched.abide.tenants)
self.assertIn('tenant-two', second.sched.abide.tenants)
self.log.debug("Freeze scheduler-1")
with second.sched.layout_update_lock:
self.log.debug("Reconfigure scheduler-0")
self.newTenantConfig('config/two-tenant/one-tenant.yaml')
first.smartReconfigure(command_socket=True)
for _ in iterate_timeout(
10, "tenants to be removed on scheduler-0"):
if 'tenant-two' not in first.sched.local_layout_state:
break
self.waitUntilSettled(matcher=[first])
self.assertNotIn('tenant-two', first.sched.abide.tenants)
self.log.debug("Thaw scheduler-1")
for _ in iterate_timeout(
10, "tenants to be removed on scheduler-1"):
if 'tenant-two' not in second.sched.local_layout_state:
break
self.waitUntilSettled(matcher=[second])
self.assertNotIn('tenant-two', second.sched.abide.tenants)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_sos.py
|
test_sos.py
|
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import urllib
from bs4 import BeautifulSoup
from tests.base import ZuulTestCase, WebProxyFixture
from tests.base import ZuulWebFixture
class TestWebURLs(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def setUp(self):
super(TestWebURLs, self).setUp()
self.web = self.useFixture(
ZuulWebFixture(self.changes, self.config,
self.additional_event_queues, self.upstream_root,
self.poller_events,
self.git_url_with_auth, self.addCleanup,
self.test_root))
def _get(self, port, uri):
url = "http://localhost:{}{}".format(port, uri)
self.log.debug("GET {}".format(url))
req = urllib.request.Request(url)
try:
f = urllib.request.urlopen(req)
except urllib.error.HTTPError:
raise Exception("Error on URL {}".format(url))
return f.read()
def _crawl(self, url):
page = self._get(self.port, url)
page = BeautifulSoup(page, 'html.parser')
for (tag, attr) in [
('script', 'src'),
('link', 'href'),
('a', 'href'),
('img', 'src'),
]:
for item in page.find_all(tag):
suburl = item.get(attr)
if tag == 'script' and suburl is None:
# There can be an embedded script
continue
if suburl.startswith('/'):
suburl = suburl[1:]
link = urllib.parse.urljoin(url, suburl)
self._get(self.port, link)
class TestDirect(TestWebURLs):
# Test directly accessing the zuul-web server with no proxy
def setUp(self):
super(TestDirect, self).setUp()
self.port = self.web.port
def test_status_page(self):
self._crawl('/')
self._crawl('/t/tenant-one/status')
class TestWhiteLabel(TestWebURLs):
# Test a zuul-web behind a whitelabel proxy (i.e., what
# zuul.openstack.org does).
def setUp(self):
super(TestWhiteLabel, self).setUp()
rules = [
('^/(.*)$', 'http://localhost:{}/\\1'.format(self.web.port)),
]
self.proxy = self.useFixture(WebProxyFixture(rules))
self.port = self.proxy.port
def test_status_page(self):
self._crawl('/')
self._crawl('/status')
class TestWhiteLabelAPI(TestWebURLs):
# Test a zuul-web behind a whitelabel proxy (i.e., what
# zuul.openstack.org does).
def setUp(self):
super(TestWhiteLabelAPI, self).setUp()
rules = [
('^/api/(.*)$',
'http://localhost:{}/api/tenant/tenant-one/\\1'.format(
self.web.port)),
]
self.proxy = self.useFixture(WebProxyFixture(rules))
self.port = self.proxy.port
def test_info(self):
info = json.loads(self._get(self.port, '/api/info').decode('utf-8'))
self.assertEqual('tenant-one', info['info']['tenant'])
class TestSuburl(TestWebURLs):
# Test a zuul-web mounted on a suburl (i.e., what software factory
# does).
def setUp(self):
super(TestSuburl, self).setUp()
rules = [
('^/zuul/(.*)$', 'http://localhost:{}/\\1'.format(
self.web.port)),
]
self.proxy = self.useFixture(WebProxyFixture(rules))
self.port = self.proxy.port
def test_status_page(self):
self._crawl('/zuul/')
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_web_urls.py
|
test_web_urls.py
|
# Copyright 2019 BMW Group
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import re
import textwrap
from zuul.model import PromoteEvent
from tests.base import (
iterate_timeout,
simple_layout,
ZuulGithubAppTestCase,
ZuulTestCase,
)
class TestGerritCircularDependencies(ZuulTestCase):
config_file = "zuul-gerrit-github.conf"
tenant_config_file = "config/circular-dependencies/main.yaml"
def _test_simple_cycle(self, project1, project2):
A = self.fake_gerrit.addFakeChange(project1, "master", "A")
B = self.fake_gerrit.addFakeChange(project2, "master", "B")
# A <-> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
# We're about to add approvals to changes without adding the
# triggering events to Zuul, so that we can be sure that it is
# enqueing the changes based on dependencies, not because of
# triggering events. Since it will have the changes cached
# already (without approvals), we need to clear the cache
# first.
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
A.addApproval("Approved", 1)
self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
def _test_transitive_cycle(self, project1, project2, project3):
A = self.fake_gerrit.addFakeChange(project1, "master", "A")
B = self.fake_gerrit.addFakeChange(project2, "master", "B")
C = self.fake_gerrit.addFakeChange(project3, "master", "C")
# A -> B -> C -> A (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, C.data["url"]
)
C.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
C.subject, A.data["url"]
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(C.patchsets[-1]["approvals"]), 1)
self.assertEqual(C.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(C.patchsets[-1]["approvals"][0]["value"], "1")
# We're about to add approvals to changes without adding the
# triggering events to Zuul, so that we can be sure that it is
# enqueing the changes based on dependencies, not because of
# triggering events. Since it will have the changes cached
# already (without approvals), we need to clear the cache
# first.
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
C.addApproval("Code-Review", 2)
C.addApproval("Approved", 1)
A.addApproval("Approved", 1)
self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertEqual(C.reported, 3)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
self.assertEqual(C.data["status"], "MERGED")
def test_single_project_cycle(self):
self._test_simple_cycle("org/project", "org/project")
def test_crd_cycle(self):
self._test_simple_cycle("org/project1", "org/project2")
def test_single_project_transitive_cycle(self):
self._test_transitive_cycle(
"org/project1", "org/project1", "org/project1"
)
def test_crd_transitive_cycle(self):
self._test_transitive_cycle(
"org/project", "org/project1", "org/project2"
)
def test_enqueue_order(self):
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project1", "master", "B")
C = self.fake_gerrit.addFakeChange("org/project2", "master", "C")
# A <-> B and A -> C (via commit-depends)
A.data[
"commitMessage"
] = "{}\n\nDepends-On: {}\nDepends-On: {}\n".format(
A.subject, B.data["url"], C.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(C.patchsets[-1]["approvals"]), 1)
self.assertEqual(C.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(C.patchsets[-1]["approvals"][0]["value"], "1")
# We're about to add approvals to changes without adding the
# triggering events to Zuul, so that we can be sure that it is
# enqueuing the changes based on dependencies, not because of
# triggering events. Since it will have the changes cached
# already (without approvals), we need to clear the cache
# first.
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
C.addApproval("Code-Review", 2)
B.addApproval("Approved", 1)
C.addApproval("Approved", 1)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertEqual(C.reported, 3)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
self.assertEqual(C.data["status"], "MERGED")
self.assertHistory([
# Change A (check + gate)
dict(name="project1-job", result="SUCCESS", changes="3,1 1,1 2,1"),
dict(name="project-vars-job", result="SUCCESS",
changes="3,1 1,1 2,1"),
dict(name="project1-job", result="SUCCESS", changes="3,1 1,1 2,1"),
dict(name="project-vars-job", result="SUCCESS",
changes="3,1 1,1 2,1"),
# Change B (check + gate)
dict(name="project-job", result="SUCCESS", changes="3,1 2,1 1,1"),
dict(name="project-job", result="SUCCESS", changes="3,1 1,1 2,1"),
# Change C (check + gate)
dict(name="project2-job", result="SUCCESS", changes="3,1"),
dict(name="project2-job", result="SUCCESS", changes="3,1"),
], ordered=False)
def test_forbidden_cycle(self):
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project3", "master", "B")
# A <-> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "-1")
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "-1")
# We're about to add approvals to changes without adding the
# triggering events to Zuul, so that we can be sure that it is
# enqueing the changes based on dependencies, not because of
# triggering events. Since it will have the changes cached
# already (without approvals), we need to clear the cache
# first.
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
B.addApproval("Approved", 1)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 1)
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(B.data["status"], "NEW")
def test_git_dependency_with_cycle(self):
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project", "master", "B")
C = self.fake_gerrit.addFakeChange("org/project1", "master", "C")
# A -> B (git) -> C -> A
A.setDependsOn(B, 1)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, C.data["url"]
)
C.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
C.subject, A.data["url"]
)
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(C.patchsets[-1]["approvals"]), 1)
self.assertEqual(C.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(C.patchsets[-1]["approvals"][0]["value"], "1")
# We're about to add approvals to changes without adding the
# triggering events to Zuul, so that we can be sure that it is
# enqueing the changes based on dependencies, not because of
# triggering events. Since it will have the changes cached
# already (without approvals), we need to clear the cache
# first.
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
self.executor_server.hold_jobs_in_build = True
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
C.addApproval("Code-Review", 2)
A.addApproval("Approved", 1)
B.addApproval("Approved", 1)
self.fake_gerrit.addEvent(C.addApproval("Approved", 1))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertEqual(C.reported, 3)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
self.assertEqual(C.data["status"], "MERGED")
def test_dependency_on_cycle(self):
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project1", "master", "B")
C = self.fake_gerrit.addFakeChange("org/project2", "master", "C")
# A -> B -> C -> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, C.data["url"]
)
C.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
C.subject, B.data["url"]
)
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(C.patchsets[-1]["approvals"]), 1)
self.assertEqual(C.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(C.patchsets[-1]["approvals"][0]["value"], "1")
# We're about to add approvals to changes without adding the
# triggering events to Zuul, so that we can be sure that it is
# enqueing the changes based on dependencies, not because of
# triggering events. Since it will have the changes cached
# already (without approvals), we need to clear the cache
# first.
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
C.addApproval("Code-Review", 2)
B.addApproval("Approved", 1)
C.addApproval("Approved", 1)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertEqual(C.reported, 3)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
self.assertEqual(C.data["status"], "MERGED")
def test_dependent_change_on_cycle(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project", "master", "B")
C = self.fake_gerrit.addFakeChange("org/project2", "master", "C")
A.setDependsOn(B, 1)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, C.data["url"]
)
C.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
C.subject, B.data["url"]
)
A.addApproval("Code-Review", 2)
A.addApproval("Approved", 1)
B.addApproval("Code-Review", 2)
B.addApproval("Approved", 1)
C.addApproval("Code-Review", 2)
self.fake_gerrit.addEvent(C.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
# Make sure the out-of-cycle change (A) is enqueued after the cycle.
tenant = self.scheds.first.sched.abide.tenants.get("tenant-one")
queue_change_numbers = []
for queue in tenant.layout.pipelines["gate"].queues:
for item in queue.queue:
queue_change_numbers.append(item.change.number)
self.assertEqual(queue_change_numbers, ['2', '3', '1'])
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
self.assertEqual(C.data["status"], "MERGED")
def test_cycle_dependency_on_cycle(self):
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project1", "master", "B")
C = self.fake_gerrit.addFakeChange("org/project1", "master", "C")
D = self.fake_gerrit.addFakeChange("org/project2", "master", "D")
# A -> B -> A + C
# C -> D -> C
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data[
"commitMessage"
] = "{}\n\nDepends-On: {}\nDepends-On: {}\n".format(
B.subject, A.data["url"], C.data["url"]
)
C.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
C.subject, D.data["url"]
)
D.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
D.subject, C.data["url"]
)
self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(C.patchsets[-1]["approvals"]), 1)
self.assertEqual(C.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(C.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(D.patchsets[-1]["approvals"]), 1)
self.assertEqual(D.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(D.patchsets[-1]["approvals"][0]["value"], "1")
# We're about to add approvals to changes without adding the
# triggering events to Zuul, so that we can be sure that it is
# enqueing the changes based on dependencies, not because of
# triggering events. Since it will have the changes cached
# already (without approvals), we need to clear the cache
# first.
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
C.addApproval("Code-Review", 2)
D.addApproval("Code-Review", 2)
B.addApproval("Approved", 1)
C.addApproval("Approved", 1)
D.addApproval("Approved", 1)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertEqual(C.reported, 3)
self.assertEqual(D.reported, 3)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
self.assertEqual(C.data["status"], "MERGED")
self.assertEqual(D.data["status"], "MERGED")
def test_cycle_failure(self):
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project1", "master", "B")
# A <-> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
self.executor_server.failJob("project-job", A)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "-1")
# We're about to add approvals to changes without adding the
# triggering events to Zuul, so that we can be sure that it is
# enqueing the changes based on dependencies, not because of
# triggering events. Since it will have the changes cached
# already (without approvals), we need to clear the cache
# first.
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
B.addApproval("Approved", 1)
self.executor_server.failJob("project-job", A)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertIn("bundle", A.messages[-1])
self.assertIn("bundle", B.messages[-1])
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(B.data["status"], "NEW")
@simple_layout('layouts/circular-deps-node-failure.yaml')
def test_cycle_failed_node_request(self):
# Test a node request failure as part of a dependency cycle
# Pause nodepool so we can fail the node request later
self.fake_nodepool.pause()
A = self.fake_gerrit.addFakeChange("org/project1", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project2", "master", "B")
# A <-> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
B.addApproval("Approved", 1)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
# Fail the node request and unpause
req = self.fake_nodepool.getNodeRequests()
self.fake_nodepool.addFailRequest(req[0])
self.fake_nodepool.unpause()
self.waitUntilSettled()
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertIn("bundle", A.messages[-1])
self.assertIn("bundle", B.messages[-1])
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(B.data["status"], "NEW")
def test_failing_cycle_behind_failing_change(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project", "master", "B")
C = self.fake_gerrit.addFakeChange("org/project", "master", "C")
D = self.fake_gerrit.addFakeChange("org/project", "master", "D")
E = self.fake_gerrit.addFakeChange("org/project", "master", "E")
# C <-> D (via commit-depends)
C.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
C.subject, D.data["url"]
)
D.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
D.subject, C.data["url"]
)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
C.addApproval("Code-Review", 2)
D.addApproval("Code-Review", 2)
E.addApproval("Code-Review", 2)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
# Make sure we enqueue C as part of the circular dependency with D, so
# we end up with the following queue state: A, B, C, ...
C.addApproval("Approved", 1)
self.fake_gerrit.addEvent(D.addApproval("Approved", 1))
self.fake_gerrit.addEvent(E.addApproval("Approved", 1))
self.waitUntilSettled()
# Fail a job of the circular dependency
self.executor_server.failJob("project-job", D)
self.executor_server.release("project-job", change="4 1")
# Fail job for item B ahead of the circular dependency so that this
# causes a gate reset and item C and D are moved behind item A.
self.executor_server.failJob("project-job", B)
self.executor_server.release("project-job", change="2 1")
self.waitUntilSettled()
# Don't fail any other jobs
self.executor_server.fail_tests.clear()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "NEW")
self.assertEqual(C.data["status"], "MERGED")
self.assertEqual(D.data["status"], "MERGED")
self.assertEqual(E.data["status"], "MERGED")
self.assertHistory([
dict(name="project-job", result="SUCCESS", changes="1,1"),
dict(name="project-job", result="FAILURE", changes="1,1 2,1"),
# First attempt of change C and D before gate reset due to change B
dict(name="project-job", result="FAILURE",
changes="1,1 2,1 3,1 4,1"),
dict(name="project-job", result="FAILURE",
changes="1,1 2,1 3,1 4,1"),
dict(name="project-job", result="ABORTED",
changes="1,1 2,1 3,1 4,1 5,1"),
dict(name="project-job", result="SUCCESS", changes="1,1 3,1 4,1"),
dict(name="project-job", result="SUCCESS", changes="1,1 3,1 4,1"),
dict(name="project-job", result="SUCCESS",
changes="1,1 3,1 4,1 5,1"),
], ordered=False)
def test_dependency_on_cycle_failure(self):
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project1", "master", "B")
C = self.fake_gerrit.addFakeChange("org/project2", "master", "C")
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
B.addApproval("Approved", 1)
C.addApproval("Code-Review", 2)
C.addApproval("Approved", 1)
# A -> B -> C -> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, C.data["url"]
)
C.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
C.subject, B.data["url"]
)
self.executor_server.failJob("project2-job", C)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertIn("depends on a change that failed to merge",
A.messages[-1])
self.assertTrue(re.search(r'Change http://localhost:\d+/2 is needed',
A.messages[-1]))
self.assertFalse(re.search('Change .*? can not be merged',
A.messages[-1]))
self.assertIn("bundle that failed.", B.messages[-1])
self.assertFalse(re.search('Change http://localhost:.*? is needed',
B.messages[-1]))
self.assertFalse(re.search('Change .*? can not be merged',
B.messages[-1]))
self.assertIn("bundle that failed.", C.messages[-1])
self.assertFalse(re.search('Change http://localhost:.*? is needed',
C.messages[-1]))
self.assertFalse(re.search('Change .*? can not be merged',
C.messages[-1]))
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(B.data["status"], "NEW")
self.assertEqual(C.data["status"], "NEW")
def test_cycle_dependency_on_change(self):
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project1", "master", "B")
C = self.fake_gerrit.addFakeChange("org/project2", "master", "C")
# A -> B -> A + C (git)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
B.setDependsOn(C, 1)
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(C.patchsets[-1]["approvals"]), 1)
self.assertEqual(C.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(C.patchsets[-1]["approvals"][0]["value"], "1")
# We're about to add approvals to changes without adding the
# triggering events to Zuul, so that we can be sure that it is
# enqueing the changes based on dependencies, not because of
# triggering events. Since it will have the changes cached
# already (without approvals), we need to clear the cache
# first.
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
C.addApproval("Code-Review", 2)
A.addApproval("Approved", 1)
B.addApproval("Approved", 1)
self.fake_gerrit.addEvent(C.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertEqual(C.reported, 3)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
self.assertEqual(C.data["status"], "MERGED")
def test_failing_cycle_dependency_on_change(self):
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project1", "master", "B")
C = self.fake_gerrit.addFakeChange("org/project2", "master", "C")
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
B.addApproval("Approved", 1)
C.addApproval("Code-Review", 2)
C.addApproval("Approved", 1)
# A -> B -> A + C (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data[
"commitMessage"
] = "{}\n\nDepends-On: {}\nDepends-On: {}\n".format(
B.subject, A.data["url"], C.data["url"]
)
self.executor_server.failJob("project-job", A)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(B.data["status"], "NEW")
self.assertEqual(C.data["status"], "MERGED")
def test_reopen_cycle(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project2", "master", "B")
# A <-> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
A.addApproval("Approved", 1)
self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
items_before = tenant.layout.pipelines['gate'].getAllItems()
# Trigger a re-enqueue of change B
self.fake_gerrit.addEvent(B.getChangeAbandonedEvent())
self.fake_gerrit.addEvent(B.getChangeRestoredEvent())
self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
items_after = tenant.layout.pipelines['gate'].getAllItems()
# Make sure the complete cycle was re-enqueued
for before, after in zip(items_before, items_after):
self.assertNotEqual(before, after)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
def test_cycle_larger_pipeline_window(self):
tenant = self.scheds.first.sched.abide.tenants.get("tenant-one")
# Make the gate window smaller than the length of the cycle
for queue in tenant.layout.pipelines["gate"].queues:
if any("org/project" in p.name for p in queue.projects):
queue.window = 1
self._test_simple_cycle("org/project", "org/project")
def test_cycle_reporting_failure(self):
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project1", "master", "B")
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
B.addApproval("Approved", 1)
B.fail_merge = True
# A <-> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertEqual(A.patchsets[-1]["approvals"][-1]["value"], "-2")
self.assertEqual(B.patchsets[-1]["approvals"][-1]["value"], "-2")
self.assertIn("bundle", A.messages[-1])
self.assertIn("bundle", B.messages[-1])
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(B.data["status"], "NEW")
buildsets = {bs.change: bs for bs in
self.scheds.first.connections.connections[
'database'].getBuildsets()}
self.assertEqual(buildsets[2].result, 'MERGE_FAILURE')
self.assertEqual(buildsets[1].result, 'FAILURE')
def test_cycle_reporting_partial_failure(self):
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project1", "master", "B")
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
B.addApproval("Approved", 1)
A.fail_merge = True
# A <-> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertIn("bundle", A.messages[-1])
self.assertIn("bundle", B.messages[-1])
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(B.data["status"], "MERGED")
def test_gate_reset_with_cycle(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project1", "master", "B")
C = self.fake_gerrit.addFakeChange("org/project1", "master", "C")
# A <-> B (via depends-on)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
C.addApproval("Code-Review", 2)
C.addApproval("Approved", 1)
B.addApproval("Approved", 1)
self.fake_gerrit.addEvent(C.addApproval("Approved", 1))
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.executor_server.failJob("project1-job", C)
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 3)
for build in self.builds:
self.assertTrue(build.hasChanges(A, B))
self.assertFalse(build.hasChanges(C))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
self.assertEqual(C.data["status"], "NEW")
def test_independent_bundle_items(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project", "master", "B")
# A <-> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get("tenant-one")
for queue in tenant.layout.pipelines["check"].queues:
for item in queue.queue:
self.assertIn(item, item.bundle.items)
self.assertEqual(len(item.bundle.items), 2)
for build in self.builds:
self.assertTrue(build.hasChanges(A, B))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
def test_gate_correct_commits(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project1", "master", "B")
C = self.fake_gerrit.addFakeChange("org/project1", "master", "C")
D = self.fake_gerrit.addFakeChange("org/project", "master", "D")
# A <-> B (via depends-on)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
D.setDependsOn(A, 1)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
C.addApproval("Code-Review", 2)
D.addApproval("Code-Review", 2)
C.addApproval("Approved", 1)
B.addApproval("Approved", 1)
self.fake_gerrit.addEvent(C.addApproval("Approved", 1))
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.fake_gerrit.addEvent(D.addApproval("Approved", 1))
self.waitUntilSettled()
for build in self.builds:
if build.change in ("1 1", "2 1"):
self.assertTrue(build.hasChanges(C, B, A))
self.assertFalse(build.hasChanges(D))
elif build.change == "3 1":
self.assertTrue(build.hasChanges(C))
self.assertFalse(build.hasChanges(A))
self.assertFalse(build.hasChanges(B))
self.assertFalse(build.hasChanges(D))
else:
self.assertTrue(build.hasChanges(C, B, A, D))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
self.assertEqual(D.reported, 2)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
self.assertEqual(C.data["status"], "MERGED")
self.assertEqual(D.data["status"], "MERGED")
def test_cycle_git_dependency(self):
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project", "master", "B")
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
B.addApproval("Approved", 1)
# A -> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
# B -> A (via parent-child dependency)
B.setDependsOn(A, 1)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
def test_cycle_git_dependency_failure(self):
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project", "master", "B")
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
B.addApproval("Approved", 1)
# A -> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
# B -> A (via parent-child dependency)
B.setDependsOn(A, 1)
self.executor_server.failJob("project-job", A)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(B.data["status"], "NEW")
def test_independent_reporting(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project", "master", "B")
# A <-> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(B.getChangeAbandonedEvent())
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
def test_cycle_merge_conflict(self):
self.hold_merge_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
# A <-> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
B.addApproval("Approved", 1)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
# We only want to have a merge failure for the first item in the queue
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
items = tenant.layout.pipelines['gate'].getAllItems()
with self.createZKContext() as context:
items[0].current_build_set.updateAttributes(context,
unable_to_merge=True)
self.waitUntilSettled()
self.hold_merge_jobs_in_queue = False
self.merger_api.release()
self.waitUntilSettled()
self.assertEqual(A.reported, 0)
self.assertEqual(B.reported, 1)
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(B.data["status"], "NEW")
def test_circular_config_change(self):
define_job = textwrap.dedent(
"""
- job:
name: new-job
""")
use_job = textwrap.dedent(
"""
- project:
queue: integrated
check:
jobs:
- new-job
gate:
jobs:
- new-job
""")
A = self.fake_gerrit.addFakeChange("org/project", "master", "A",
files={"zuul.yaml": define_job})
B = self.fake_gerrit.addFakeChange("org/project1", "master", "B",
files={"zuul.yaml": use_job})
# A <-> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
def test_circular_config_change_job_vars(self):
org_project_files = {
"zuul.yaml": textwrap.dedent(
"""
- job:
name: project-vars-job
deduplicate: false
vars:
test_var: pass
- project:
queue: integrated
check:
jobs:
- project-vars-job
gate:
jobs:
- project-vars-job
""")
}
A = self.fake_gerrit.addFakeChange("org/project2", "master", "A",
files=org_project_files)
B = self.fake_gerrit.addFakeChange("org/project1", "master", "B")
C = self.fake_gerrit.addFakeChange("org/project1", "master", "C")
# C <-> A <-> B (via commit-depends)
A.data["commitMessage"] = (
"{}\n\nDepends-On: {}\nDepends-On: {}\n".format(
A.subject, B.data["url"], C.data["url"]
)
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
C.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
C.subject, A.data["url"]
)
self.executor_server.hold_jobs_in_build = True
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
vars_builds = [b for b in self.builds if b.name == "project-vars-job"]
self.assertEqual(len(vars_builds), 1)
self.assertEqual(vars_builds[0].job.combined_variables["test_var"],
"pass")
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
vars_builds = [b for b in self.builds if b.name == "project-vars-job"]
self.assertEqual(len(vars_builds), 1)
self.assertEqual(vars_builds[0].job.combined_variables["test_var"],
"pass")
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
vars_builds = [b for b in self.builds if b.name == "project-vars-job"]
self.assertEqual(len(vars_builds), 1)
self.assertEqual(vars_builds[0].job.combined_variables["test_var"],
"pass")
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(C.patchsets[-1]["approvals"]), 1)
self.assertEqual(C.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(C.patchsets[-1]["approvals"][0]["value"], "1")
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
C.addApproval("Code-Review", 2)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
self.fake_gerrit.addEvent(C.addApproval("Approved", 1))
self.waitUntilSettled()
vars_builds = [b for b in self.builds if b.name == "project-vars-job"]
self.assertEqual(len(vars_builds), 3)
for build in vars_builds:
self.assertEqual(build.job.combined_variables["test_var"],
"pass")
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertEqual(C.reported, 3)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
self.assertEqual(C.data["status"], "MERGED")
def test_circular_config_change_single_merge_job(self):
"""Regression tests to make sure that a bundle with non-live
config changes only spawns one merge job (so that we avoid
problems with multiple jobs arriving in the wrong order)."""
define_job = textwrap.dedent(
"""
- job:
name: new-job
""")
use_job = textwrap.dedent(
"""
- project:
check:
jobs:
- new-job
gate:
jobs:
- new-job
""")
A = self.fake_gerrit.addFakeChange("org/project", "master", "A",
files={"zuul.yaml": define_job})
B = self.fake_gerrit.addFakeChange("org/project1", "master", "B",
files={"zuul.yaml": use_job})
# A <-> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
self.waitUntilSettled()
self.hold_merge_jobs_in_queue = True
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Assert that there is a single merge job for the bundle.
self.assertEqual(len(self.merger_api.queued()), 1)
self.hold_merge_jobs_in_queue = False
self.merger_api.release()
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
def test_bundle_id_in_zuul_var(self):
A = self.fake_gerrit.addFakeChange("org/project1", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project1", "master", "B")
C = self.fake_gerrit.addFakeChange("org/project1", "master", "C")
# A <-> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
self.executor_server.hold_jobs_in_build = True
# bundle_id should be in check build of A
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
var_zuul_items = self.builds[0].parameters["zuul"]["items"]
self.assertEqual(len(var_zuul_items), 2)
self.assertIn("bundle_id", var_zuul_items[0])
bundle_id_0 = var_zuul_items[0]["bundle_id"]
self.assertIn("bundle_id", var_zuul_items[1])
bundle_id_1 = var_zuul_items[1]["bundle_id"]
self.assertEqual(bundle_id_0, bundle_id_1)
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
# bundle_id should be in check build of B
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
var_zuul_items = self.builds[0].parameters["zuul"]["items"]
self.assertEqual(len(var_zuul_items), 2)
self.assertIn("bundle_id", var_zuul_items[0])
bundle_id_0 = var_zuul_items[0]["bundle_id"]
self.assertIn("bundle_id", var_zuul_items[1])
bundle_id_1 = var_zuul_items[1]["bundle_id"]
self.assertEqual(bundle_id_0, bundle_id_1)
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
# bundle_id should not be in check build of C
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
var_zuul_items = self.builds[0].parameters["zuul"]["items"]
self.assertEqual(len(var_zuul_items), 1)
self.assertNotIn("bundle_id", var_zuul_items[0])
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(C.patchsets[-1]["approvals"]), 1)
self.assertEqual(C.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(C.patchsets[-1]["approvals"][0]["value"], "1")
# bundle_id should be in gate jobs of A and B, but not in C
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
C.addApproval("Code-Review", 2)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
self.fake_gerrit.addEvent(C.addApproval("Approved", 1))
self.waitUntilSettled()
var_zuul_items = self.builds[-1].parameters["zuul"]["items"]
self.assertEqual(len(var_zuul_items), 3)
self.assertIn("bundle_id", var_zuul_items[0])
bundle_id_0 = var_zuul_items[0]["bundle_id"]
self.assertIn("bundle_id", var_zuul_items[1])
bundle_id_1 = var_zuul_items[1]["bundle_id"]
self.assertEqual(bundle_id_0, bundle_id_1)
self.assertNotIn("bundle_id", var_zuul_items[2])
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertEqual(C.reported, 3)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
self.assertEqual(C.data["status"], "MERGED")
def test_cross_tenant_cycle(self):
org_project_files = {
"zuul.yaml": textwrap.dedent(
"""
- job:
name: project-vars-job
vars:
test_var: pass
- project:
queue: integrated
check:
jobs:
- project-vars-job
gate:
jobs:
- project-vars-job
""")
}
# Change zuul config so the bundle is considered updating config
A = self.fake_gerrit.addFakeChange("org/project2", "master", "A",
files=org_project_files)
B = self.fake_gerrit.addFakeChange("org/project1", "master", "B")
C = self.fake_gerrit.addFakeChange("org/project1", "master", "C")
D = self.fake_gerrit.addFakeChange("org/project4", "master", "D",)
# C <-> A <-> B (via commit-depends)
A.data["commitMessage"] = (
"{}\n\nDepends-On: {}\nDepends-On: {}\n".format(
A.subject, B.data["url"], C.data["url"]
)
)
# A <-> B (via commit-depends)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
# A <-> C <-> D (via commit-depends)
C.data["commitMessage"] = (
"{}\n\nDepends-On: {}\nDepends-On: {}\n".format(
C.subject, A.data["url"], D.data["url"]
)
)
# D <-> C (via commit-depends)
D.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
D.subject, C.data["url"]
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "-1")
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "-1")
self.assertEqual(len(C.patchsets[-1]["approvals"]), 1)
self.assertEqual(C.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(C.patchsets[-1]["approvals"][0]["value"], "-1")
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
C.addApproval("Code-Review", 2)
D.addApproval("Code-Review", 2)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
self.fake_gerrit.addEvent(C.addApproval("Approved", 1))
self.fake_gerrit.addEvent(D.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(B.data["status"], "NEW")
self.assertEqual(C.data["status"], "NEW")
D.setMerged()
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
# Pretend D was merged so we can gate the cycle
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertEqual(C.reported, 6)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
self.assertEqual(C.data["status"], "MERGED")
def test_cycle_unknown_repo(self):
self.init_repo("org/unknown", tag='init')
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/unknown", "master", "B")
# A <-> B (via commit-depends)
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "-1")
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
B.addApproval("Approved", 1)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 2)
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(B.data["status"], "NEW")
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
B.setMerged()
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 4)
self.assertEqual(A.data["status"], "MERGED")
def test_promote_cycle(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project1", "master", "B")
C = self.fake_gerrit.addFakeChange("org/project2", "master", "C")
# A <-> B
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
C.addApproval("Code-Review", 2)
B.addApproval("Approved", 1)
self.fake_gerrit.addEvent(C.addApproval("Approved", 1))
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
event = PromoteEvent('tenant-one', 'gate', ["2,1"])
self.scheds.first.sched.pipeline_management_events['tenant-one'][
'gate'].put(event)
self.waitUntilSettled()
self.assertEqual(len(self.builds), 4)
self.assertTrue(self.builds[0].hasChanges(A))
self.assertTrue(self.builds[0].hasChanges(B))
self.assertFalse(self.builds[0].hasChanges(C))
self.assertTrue(self.builds[1].hasChanges(A))
self.assertTrue(self.builds[1].hasChanges(B))
self.assertFalse(self.builds[0].hasChanges(C))
self.assertTrue(self.builds[3].hasChanges(B))
self.assertTrue(self.builds[3].hasChanges(C))
self.assertTrue(self.builds[3].hasChanges(A))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
self.assertEqual(C.data["status"], "MERGED")
def test_shared_queue_removed(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
# A <-> B
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
# Remove the shared queue.
self.commitConfigUpdate(
'common-config',
'layouts/circular-dependency-shared-queue-removed.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(C.data['status'], 'MERGED')
def _test_job_deduplication(self):
# We make a second scheduler here so that the first scheduler
# can freeze the jobs for the first item, and the second
# scheduler freeze jobs for the second. This forces the
# scheduler to compare a desiralized FrozenJob with a newly
# created one and therefore show difference-in-serialization
# issues.
self.hold_merge_jobs_in_queue = True
app = self.createScheduler()
app.start()
self.assertEqual(len(self.scheds), 2)
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
# A <-> B
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
with app.sched.run_handler_lock:
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled(matcher=[self.scheds.first])
self.merger_api.release(self.merger_api.queued()[0])
self.waitUntilSettled(matcher=[self.scheds.first])
# Hold the lock on the first scheduler so that only the second
# will act.
with self.scheds.first.sched.run_handler_lock:
self.merger_api.release()
self.waitUntilSettled(matcher=[app])
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
@simple_layout('layouts/job-dedup-auto-shared.yaml')
def test_job_deduplication_auto_shared(self):
self._test_job_deduplication()
self.assertHistory([
dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
# This is deduplicated
# dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
], ordered=False)
self.assertEqual(len(self.fake_nodepool.history), 3)
@simple_layout('layouts/job-dedup-auto-unshared.yaml')
def test_job_deduplication_auto_unshared(self):
self._test_job_deduplication()
self.assertHistory([
dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
# This is not deduplicated
dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
], ordered=False)
self.assertEqual(len(self.fake_nodepool.history), 4)
@simple_layout('layouts/job-dedup-true.yaml')
def test_job_deduplication_true(self):
self._test_job_deduplication()
self.assertHistory([
dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
# This is deduplicated
# dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
], ordered=False)
self.assertEqual(len(self.fake_nodepool.history), 3)
@simple_layout('layouts/job-dedup-false.yaml')
def test_job_deduplication_false(self):
self._test_job_deduplication()
self.assertHistory([
dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
# This is not deduplicated, though it would be under auto
dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
], ordered=False)
self.assertEqual(len(self.fake_nodepool.history), 4)
@simple_layout('layouts/job-dedup-empty-nodeset.yaml')
def test_job_deduplication_empty_nodeset(self):
# Make sure that jobs with empty nodesets can still be
# deduplicated
self._test_job_deduplication()
self.assertHistory([
dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
# This is deduplicated
# dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
], ordered=False)
self.assertEqual(len(self.fake_nodepool.history), 0)
@simple_layout('layouts/job-dedup-auto-shared.yaml')
def test_job_deduplication_failed_node_request(self):
# Pause nodepool so we can fail the node request later
self.fake_nodepool.pause()
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
# A <-> B
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
# Fail the node request and unpause
for req in self.fake_nodepool.getNodeRequests():
if req['requestor_data']['job_name'] == 'common-job':
self.fake_nodepool.addFailRequest(req)
self.fake_nodepool.unpause()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertHistory([])
self.assertEqual(len(self.fake_nodepool.history), 3)
@simple_layout('layouts/job-dedup-auto-shared.yaml')
def test_job_deduplication_failed_job(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
# A <-> B
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.executor_server.failJob("common-job", A)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
# If we don't make sure these jobs finish first, then one of
# the items may complete before the other and cause Zuul to
# abort the project*-job on the other item (with a "bundle
# failed to merge" error).
self.waitUntilSettled()
self.executor_server.release('project1-job')
self.executor_server.release('project2-job')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertHistory([
dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="common-job", result="FAILURE", changes="2,1 1,1"),
dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
# This is deduplicated
# dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
], ordered=False)
self.assertEqual(len(self.fake_nodepool.history), 3)
@simple_layout('layouts/job-dedup-false.yaml')
def test_job_deduplication_false_failed_job(self):
# Test that if we are *not* deduplicating jobs, we don't
# duplicate the result on two different builds.
# The way we check that is to retry the common-job between two
# items, but only once, and only on one item. The other item
# should be unaffected.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
# A <-> B
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
# If we don't make sure these jobs finish first, then one of
# the items may complete before the other and cause Zuul to
# abort the project*-job on the other item (with a "bundle
# failed to merge" error).
self.waitUntilSettled()
for build in self.builds:
if build.name == 'common-job' and build.project == 'org/project1':
break
else:
raise Exception("Unable to find build")
build.should_retry = True
# Store a reference to the queue items so we can inspect their
# internal attributes later to double check the retry build
# count is correct.
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
pipeline = tenant.layout.pipelines['gate']
items = pipeline.getAllItems()
self.assertEqual(len(items), 2)
self.executor_server.release('project1-job')
self.executor_server.release('project2-job')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertHistory([
dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="common-job", result=None, changes="2,1 1,1"),
dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
], ordered=False)
self.assertEqual(len(self.fake_nodepool.history), 5)
self.assertEqual(items[0].change.project.name, 'org/project2')
self.assertEqual(len(items[0].current_build_set.retry_builds), 0)
self.assertEqual(items[1].change.project.name, 'org/project1')
self.assertEqual(len(items[1].current_build_set.retry_builds), 1)
@simple_layout('layouts/job-dedup-auto-shared.yaml')
def test_job_deduplication_multi_scheduler(self):
# Test that a second scheduler can correctly refresh
# deduplicated builds
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
# A <-> B
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
app = self.createScheduler()
app.start()
self.assertEqual(len(self.scheds), 2)
# Hold the lock on the first scheduler so that only the second
# will act.
with self.scheds.first.sched.run_handler_lock:
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled(matcher=[app])
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertHistory([
dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
], ordered=False)
@simple_layout('layouts/job-dedup-noop.yaml')
def test_job_deduplication_noop(self):
# Test that we don't deduplicate noop (there's no good reason
# to do so)
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
# A <-> B
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertHistory([
dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
], ordered=False)
# It's tricky to get info about a noop build, but the jobs in
# the report have the build UUID, so we make sure it's
# different.
a_noop = [l for l in A.messages[-1].split('\n') if 'noop' in l][0]
b_noop = [l for l in B.messages[-1].split('\n') if 'noop' in l][0]
self.assertNotEqual(a_noop, b_noop)
@simple_layout('layouts/job-dedup-retry.yaml')
def test_job_deduplication_retry(self):
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
# A <-> B
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
self.executor_server.retryJob('common-job', A)
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertHistory([
dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
# There should be exactly 3 runs of the job (not 6)
dict(name="common-job", result=None, changes="2,1 1,1"),
dict(name="common-job", result=None, changes="2,1 1,1"),
dict(name="common-job", result=None, changes="2,1 1,1"),
], ordered=False)
self.assertEqual(len(self.fake_nodepool.history), 5)
@simple_layout('layouts/job-dedup-retry-child.yaml')
def test_job_deduplication_retry_child(self):
# This tests retrying a paused build (simulating an executor restart)
# See test_data_return_child_from_retried_paused_job
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
# A <-> B
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
self.executor_server.returnData(
'parent-job', A,
{'zuul': {'pause': True}}
)
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('parent-job')
self.waitUntilSettled("till job is paused")
paused_job = self.builds[0]
self.assertTrue(paused_job.paused)
# Stop the job worker to simulate an executor restart
for job_worker in self.executor_server.job_workers.values():
if job_worker.build_request.uuid == paused_job.uuid:
job_worker.stop()
self.waitUntilSettled("stop job worker")
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled("all jobs are done")
# The "pause" job might be paused during the waitUntilSettled
# call and appear settled; it should automatically resume
# though, so just wait for it.
for x in iterate_timeout(60, 'paused job'):
if not self.builds:
break
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertHistory([
dict(name="parent-job", result="ABORTED", changes="2,1 1,1"),
dict(name="project1-job", result="ABORTED", changes="2,1 1,1"),
dict(name="project2-job", result="ABORTED", changes="2,1 1,1"),
dict(name="parent-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
], ordered=False)
self.assertEqual(len(self.fake_nodepool.history), 6)
@simple_layout('layouts/job-dedup-parent-data.yaml')
def test_job_deduplication_parent_data(self):
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
# A <-> B
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
# The parent job returns data
self.executor_server.returnData(
'parent-job', A,
{'zuul':
{'artifacts': [
{'name': 'image',
'url': 'http://example.com/image',
'metadata': {
'type': 'container_image'
}},
]}}
)
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertHistory([
dict(name="parent-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
# Only one run of the common job since it's the same
dict(name="common-child-job", result="SUCCESS", changes="2,1 1,1"),
# The forked job depends on different parents
# so it should run twice
dict(name="forked-child-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="forked-child-job", result="SUCCESS", changes="2,1 1,1"),
], ordered=False)
self.assertEqual(len(self.fake_nodepool.history), 6)
def _test_job_deduplication_semaphore(self):
"Test semaphores with max=1 (mutex) and get resources first"
self.executor_server.hold_jobs_in_build = True
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
# A <-> B
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
1)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
# This is deduplicated
# dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
], ordered=False)
self.assertEqual(len(self.fake_nodepool.history), 3)
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
0)
@simple_layout('layouts/job-dedup-semaphore.yaml')
def test_job_deduplication_semaphore(self):
self._test_job_deduplication_semaphore()
@simple_layout('layouts/job-dedup-semaphore-first.yaml')
def test_job_deduplication_semaphore_resources_first(self):
self._test_job_deduplication_semaphore()
@simple_layout('layouts/job-dedup-auto-shared-check.yaml')
def test_job_deduplication_check(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
# A <-> B
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.executor_server.release('common-job')
self.executor_server.release('project1-job')
self.waitUntilSettled()
# We do this even though it results in no changes to force an
# extra pipeline processing run to make sure we don't garbage
# collect the item early.
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.executor_server.release('project2-job')
self.waitUntilSettled()
self.assertHistory([
dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"),
# This is deduplicated
# dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
], ordered=False)
self.assertEqual(len(self.fake_nodepool.history), 3)
# Make sure there are no leaked queue items
tenant = self.scheds.first.sched.abide.tenants.get("tenant-one")
pipeline = tenant.layout.pipelines["check"]
pipeline_path = pipeline.state.getPath()
all_items = set(self.zk_client.client.get_children(
f"{pipeline_path}/item"))
self.assertEqual(len(all_items), 0)
def test_submitted_together(self):
self.fake_gerrit._fake_submit_whole_topic = True
A = self.fake_gerrit.addFakeChange('org/project1', "master", "A",
topic='test-topic')
B = self.fake_gerrit.addFakeChange('org/project2', "master", "B",
topic='test-topic')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
# We're about to add approvals to changes without adding the
# triggering events to Zuul, so that we can be sure that it is
# enqueing the changes based on dependencies, not because of
# triggering events. Since it will have the changes cached
# already (without approvals), we need to clear the cache
# first.
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
A.addApproval("Approved", 1)
self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
def test_submitted_together_git(self):
self.fake_gerrit._fake_submit_whole_topic = True
A = self.fake_gerrit.addFakeChange('org/project1', "master", "A")
B = self.fake_gerrit.addFakeChange('org/project1', "master", "B")
C = self.fake_gerrit.addFakeChange('org/project1', "master", "C")
D = self.fake_gerrit.addFakeChange('org/project1', "master", "D")
E = self.fake_gerrit.addFakeChange('org/project1', "master", "E")
F = self.fake_gerrit.addFakeChange('org/project1', "master", "F")
G = self.fake_gerrit.addFakeChange('org/project1', "master", "G")
G.setDependsOn(F, 1)
F.setDependsOn(E, 1)
E.setDependsOn(D, 1)
D.setDependsOn(C, 1)
C.setDependsOn(B, 1)
B.setDependsOn(A, 1)
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(C.patchsets[-1]["approvals"]), 1)
self.assertEqual(C.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(C.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(A.queried, 1)
self.assertEqual(B.queried, 1)
self.assertEqual(C.queried, 1)
self.assertEqual(D.queried, 1)
self.assertEqual(E.queried, 1)
self.assertEqual(F.queried, 1)
self.assertEqual(G.queried, 1)
self.assertHistory([
dict(name="project1-job", result="SUCCESS",
changes="1,1 2,1 3,1"),
dict(name="project-vars-job", result="SUCCESS",
changes="1,1 2,1 3,1"),
], ordered=False)
def test_submitted_together_git_topic(self):
self.fake_gerrit._fake_submit_whole_topic = True
A = self.fake_gerrit.addFakeChange('org/project1', "master", "A",
topic='test-topic')
B = self.fake_gerrit.addFakeChange('org/project1', "master", "B",
topic='test-topic')
C = self.fake_gerrit.addFakeChange('org/project1', "master", "C",
topic='test-topic')
D = self.fake_gerrit.addFakeChange('org/project1', "master", "D",
topic='test-topic')
E = self.fake_gerrit.addFakeChange('org/project1', "master", "E",
topic='test-topic')
F = self.fake_gerrit.addFakeChange('org/project1', "master", "F",
topic='test-topic')
G = self.fake_gerrit.addFakeChange('org/project1', "master", "G",
topic='test-topic')
G.setDependsOn(F, 1)
F.setDependsOn(E, 1)
E.setDependsOn(D, 1)
D.setDependsOn(C, 1)
C.setDependsOn(B, 1)
B.setDependsOn(A, 1)
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(C.patchsets[-1]["approvals"]), 1)
self.assertEqual(C.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(C.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(A.queried, 8)
self.assertEqual(B.queried, 8)
self.assertEqual(C.queried, 8)
self.assertEqual(D.queried, 8)
self.assertEqual(E.queried, 8)
self.assertEqual(F.queried, 8)
self.assertEqual(G.queried, 8)
self.assertHistory([
dict(name="project1-job", result="SUCCESS",
changes="7,1 6,1 5,1 4,1 1,1 2,1 3,1"),
dict(name="project-vars-job", result="SUCCESS",
changes="7,1 6,1 5,1 4,1 1,1 2,1 3,1"),
], ordered=False)
@simple_layout('layouts/submitted-together-per-branch.yaml')
def test_submitted_together_per_branch(self):
self.fake_gerrit._fake_submit_whole_topic = True
self.create_branch('org/project2', 'stable/foo')
A = self.fake_gerrit.addFakeChange('org/project1', "master", "A",
topic='test-topic')
B = self.fake_gerrit.addFakeChange('org/project2', "stable/foo", "B",
topic='test-topic')
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
A.addApproval("Approved", 1)
self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 0)
self.assertEqual(B.reported, 1)
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(B.data["status"], "NEW")
self.assertIn("does not share a change queue", B.messages[-1])
@simple_layout('layouts/deps-by-topic.yaml')
def test_deps_by_topic(self):
A = self.fake_gerrit.addFakeChange('org/project1', "master", "A",
topic='test-topic')
B = self.fake_gerrit.addFakeChange('org/project2', "master", "B",
topic='test-topic')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
# We're about to add approvals to changes without adding the
# triggering events to Zuul, so that we can be sure that it is
# enqueing the changes based on dependencies, not because of
# triggering events. Since it will have the changes cached
# already (without approvals), we need to clear the cache
# first.
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
A.addApproval("Approved", 1)
self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
@simple_layout('layouts/deps-by-topic.yaml')
def test_deps_by_topic_git_needs(self):
A = self.fake_gerrit.addFakeChange('org/project1', "master", "A",
topic='test-topic')
B = self.fake_gerrit.addFakeChange('org/project2', "master", "B",
topic='test-topic')
C = self.fake_gerrit.addFakeChange('org/project2', "master", "C",
topic='other-topic')
D = self.fake_gerrit.addFakeChange('org/project1', "master", "D",
topic='other-topic')
# Git level dependency between B and C
B.setDependsOn(C, 1)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(C.patchsets[-1]["approvals"]), 1)
self.assertEqual(C.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(C.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(D.patchsets[-1]["approvals"]), 1)
self.assertEqual(D.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(D.patchsets[-1]["approvals"][0]["value"], "1")
# We're about to add approvals to changes without adding the
# triggering events to Zuul, so that we can be sure that it is
# enqueing the changes based on dependencies, not because of
# triggering events. Since it will have the changes cached
# already (without approvals), we need to clear the cache
# first.
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
C.addApproval("Code-Review", 2)
D.addApproval("Code-Review", 2)
A.addApproval("Approved", 1)
C.addApproval("Approved", 1)
D.addApproval("Approved", 1)
self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertEqual(C.reported, 3)
self.assertEqual(D.reported, 3)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
self.assertEqual(C.data["status"], "MERGED")
self.assertEqual(D.data["status"], "MERGED")
@simple_layout('layouts/deps-by-topic.yaml')
def test_deps_by_topic_new_patchset(self):
# Make sure that we correctly update the change cache on new
# patchsets.
A = self.fake_gerrit.addFakeChange('org/project1', "master", "A",
topic='test-topic')
B = self.fake_gerrit.addFakeChange('org/project2', "master", "B",
topic='test-topic')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
self.assertHistory([
dict(name="check-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="check-job", result="SUCCESS", changes="1,1 2,1"),
], ordered=False)
A.addPatchset()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.assertHistory([
# Original check run
dict(name="check-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="check-job", result="SUCCESS", changes="1,1 2,1"),
# Second check run
dict(name="check-job", result="SUCCESS", changes="2,1 1,2"),
], ordered=False)
def test_deps_by_topic_multi_tenant(self):
A = self.fake_gerrit.addFakeChange('org/project5', "master", "A",
topic='test-topic')
B = self.fake_gerrit.addFakeChange('org/project6', "master", "B",
topic='test-topic')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
# We're about to add approvals to changes without adding the
# triggering events to Zuul, so that we can be sure that it is
# enqueuing the changes based on dependencies, not because of
# triggering events. Since it will have the changes cached
# already (without approvals), we need to clear the cache
# first.
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
A.addApproval("Approved", 1)
self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 4)
self.assertEqual(B.reported, 4)
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
self.assertHistory([
# Check
dict(name="project5-job-t1", result="SUCCESS", changes="1,1"),
dict(name="project6-job-t1", result="SUCCESS", changes="2,1"),
dict(name="project5-job-t2", result="SUCCESS", changes="2,1 1,1"),
dict(name="project6-job-t2", result="SUCCESS", changes="1,1 2,1"),
# Gate
dict(name="project5-job-t2", result="SUCCESS", changes="1,1 2,1"),
dict(name="project6-job-t2", result="SUCCESS", changes="1,1 2,1"),
], ordered=False)
def test_dependency_refresh(self):
# Test that when two changes are put into a cycle, the
# dependencies are refreshed and items already in pipelines
# are updated.
self.executor_server.hold_jobs_in_build = True
# This simulates the typical workflow where a developer only
# knows the change id of changes one at a time.
# The first change:
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Now that it has been uploaded, upload the second change and
# point it at the first.
# B -> A
B = self.fake_gerrit.addFakeChange("org/project", "master", "B")
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Now that the second change is known, update the first change
# B <-> A
A.addPatchset()
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name="project-job", result="ABORTED", changes="1,1"),
dict(name="project-job", result="ABORTED", changes="1,1 2,1"),
dict(name="project-job", result="SUCCESS", changes="1,2 2,1"),
dict(name="project-job", result="SUCCESS", changes="2,1 1,2"),
], ordered=False)
@simple_layout('layouts/deps-by-topic.yaml')
def test_dependency_refresh_by_topic_check(self):
# Test that when two changes are put into a cycle, the
# dependencies are refreshed and items already in pipelines
# are updated.
self.executor_server.hold_jobs_in_build = True
# This simulates the typical workflow where a developer
# uploads changes one at a time.
# The first change:
A = self.fake_gerrit.addFakeChange('org/project1', "master", "A",
topic='test-topic')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Now that it has been uploaded, upload the second change
# in the same topic.
B = self.fake_gerrit.addFakeChange('org/project2', "master", "B",
topic='test-topic')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name="check-job", result="ABORTED", changes="1,1"),
dict(name="check-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="check-job", result="SUCCESS", changes="1,1 2,1"),
], ordered=False)
@simple_layout('layouts/deps-by-topic.yaml')
def test_dependency_refresh_by_topic_gate(self):
# Test that when two changes are put into a cycle, the
# dependencies are refreshed and items already in pipelines
# are updated.
self.executor_server.hold_jobs_in_build = True
# This simulates a workflow where a developer adds a change to
# a cycle already in gate.
A = self.fake_gerrit.addFakeChange('org/project1', "master", "A",
topic='test-topic')
B = self.fake_gerrit.addFakeChange('org/project2', "master", "B",
topic='test-topic')
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
A.addApproval("Approved", 1)
self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
self.waitUntilSettled()
# Add a new change to the cycle.
C = self.fake_gerrit.addFakeChange('org/project1', "master", "C",
topic='test-topic')
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# At the end of this process, the gate jobs should be aborted
# because the new dpendency showed up.
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(B.data["status"], "NEW")
self.assertEqual(C.data["status"], "NEW")
self.assertHistory([
dict(name="gate-job", result="ABORTED", changes="1,1 2,1"),
dict(name="gate-job", result="ABORTED", changes="1,1 2,1"),
dict(name="check-job", result="SUCCESS", changes="2,1 1,1 3,1"),
], ordered=False)
class TestGithubCircularDependencies(ZuulTestCase):
config_file = "zuul-gerrit-github.conf"
tenant_config_file = "config/circular-dependencies/main.yaml"
scheduler_count = 1
def test_cycle_not_ready(self):
A = self.fake_github.openFakePullRequest("gh/project", "master", "A")
B = self.fake_github.openFakePullRequest("gh/project1", "master", "B")
C = self.fake_github.openFakePullRequest("gh/project1", "master", "C")
A.addReview('derp', 'APPROVED')
B.addReview('derp', 'APPROVED')
B.addLabel("approved")
C.addReview('derp', 'APPROVED')
# A -> B + C (via PR depends)
# B -> A
# C -> A
A.body = "{}\n\nDepends-On: {}\nDepends-On: {}\n".format(
A.subject, B.url, C.url
)
B.body = "{}\n\nDepends-On: {}\n".format(
B.subject, A.url
)
C.body = "{}\n\nDepends-On: {}\n".format(
C.subject, A.url
)
self.fake_github.emitEvent(A.addLabel("approved"))
self.waitUntilSettled()
self.assertEqual(len(A.comments), 0)
self.assertEqual(len(B.comments), 0)
self.assertEqual(len(C.comments), 0)
self.assertFalse(A.is_merged)
self.assertFalse(B.is_merged)
self.assertFalse(C.is_merged)
def test_complex_cycle_not_ready(self):
A = self.fake_github.openFakePullRequest("gh/project", "master", "A")
B = self.fake_github.openFakePullRequest("gh/project1", "master", "B")
C = self.fake_github.openFakePullRequest("gh/project1", "master", "C")
X = self.fake_github.openFakePullRequest("gh/project1", "master", "C")
Y = self.fake_github.openFakePullRequest("gh/project1", "master", "C")
A.addReview('derp', 'APPROVED')
A.addLabel("approved")
B.addReview('derp', 'APPROVED')
B.addLabel("approved")
C.addReview('derp', 'APPROVED')
Y.addReview('derp', 'APPROVED')
Y.addLabel("approved")
X.addReview('derp', 'APPROVED')
# A -> B + C (via PR depends)
# B -> A
# C -> A
# X -> A + Y
# Y -> X
A.body = "{}\n\nDepends-On: {}\nDepends-On: {}\n".format(
A.subject, B.url, C.url
)
B.body = "{}\n\nDepends-On: {}\n".format(
B.subject, A.url
)
C.body = "{}\n\nDepends-On: {}\n".format(
C.subject, A.url
)
X.body = "{}\n\nDepends-On: {}\nDepends-On: {}\n".format(
X.subject, Y.url, A.url
)
Y.body = "{}\n\nDepends-On: {}\n".format(
Y.subject, X.url
)
self.fake_github.emitEvent(X.addLabel("approved"))
self.waitUntilSettled()
self.assertEqual(len(A.comments), 0)
self.assertEqual(len(B.comments), 0)
self.assertEqual(len(C.comments), 0)
self.assertEqual(len(X.comments), 0)
self.assertEqual(len(Y.comments), 0)
self.assertFalse(A.is_merged)
self.assertFalse(B.is_merged)
self.assertFalse(C.is_merged)
self.assertFalse(X.is_merged)
self.assertFalse(Y.is_merged)
def test_filter_unprotected_branches(self):
"""
Tests that repo state filtering due to excluding unprotected branches
doesn't break builds if the items are targeted against different
branches.
"""
github = self.fake_github.getGithubClient()
self.create_branch('gh/project', 'stable/foo')
github.repo_from_project('gh/project')._set_branch_protection(
'master', True)
github.repo_from_project('gh/project')._set_branch_protection(
'stable/foo', True)
pevent = self.fake_github.getPushEvent(project='gh/project',
ref='refs/heads/stable/foo')
self.fake_github.emitEvent(pevent)
self.create_branch('gh/project1', 'stable/bar')
github.repo_from_project('gh/project1')._set_branch_protection(
'master', True)
github.repo_from_project('gh/project1')._set_branch_protection(
'stable/bar', True)
pevent = self.fake_github.getPushEvent(project='gh/project',
ref='refs/heads/stable/bar')
self.fake_github.emitEvent(pevent)
# Wait until push events are processed to pick up branch
# protection settings
self.waitUntilSettled()
A = self.fake_github.openFakePullRequest(
"gh/project", "stable/foo", "A")
B = self.fake_github.openFakePullRequest(
"gh/project1", "stable/bar", "B")
A.addReview('derp', 'APPROVED')
B.addReview('derp', 'APPROVED')
B.addLabel("approved")
# A <-> B
A.body = "{}\n\nDepends-On: {}\n".format(
A.subject, B.url
)
B.body = "{}\n\nDepends-On: {}\n".format(
B.subject, A.url
)
self.fake_github.emitEvent(A.addLabel("approved"))
self.waitUntilSettled()
self.assertEqual(len(A.comments), 2)
self.assertEqual(len(B.comments), 2)
self.assertTrue(A.is_merged)
self.assertTrue(B.is_merged)
def test_cycle_failed_reporting(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest("gh/project", "master", "A")
B = self.fake_github.openFakePullRequest("gh/project1", "master", "B")
A.addReview('derp', 'APPROVED')
B.addReview('derp', 'APPROVED')
B.addLabel("approved")
# A <-> B
A.body = "{}\n\nDepends-On: {}\n".format(
A.subject, B.url
)
B.body = "{}\n\nDepends-On: {}\n".format(
B.subject, A.url
)
self.fake_github.emitEvent(A.addLabel("approved"))
self.waitUntilSettled()
# Change draft status of A so it can no longer merge. Note that we
# don't send an event to test the "github doesn't send an event"
# case.
A.draft = True
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(A.comments), 2)
self.assertEqual(len(B.comments), 2)
self.assertFalse(A.is_merged)
self.assertFalse(B.is_merged)
self.assertIn("part of a bundle that can not merge",
A.comments[-1])
self.assertTrue(
re.search("Change https://github.com/gh/project/pull/1 "
"can not be merged",
A.comments[-1]))
self.assertFalse(re.search('Change .*? is needed',
A.comments[-1]))
self.assertIn("part of a bundle that can not merge",
B.comments[-1])
self.assertTrue(
re.search("Change https://github.com/gh/project/pull/1 "
"can not be merged",
B.comments[-1]))
self.assertFalse(re.search('Change .*? is needed',
B.comments[-1]))
def test_dependency_refresh(self):
# Test that when two changes are put into a cycle, the
# dependencies are refreshed and items already in pipelines
# are updated.
self.executor_server.hold_jobs_in_build = True
# This simulates the typical workflow where a developer only
# knows the PR id of changes one at a time.
# The first change:
A = self.fake_github.openFakePullRequest("gh/project", "master", "A")
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# Now that it has been uploaded, upload the second change and
# point it at the first.
# B -> A
B = self.fake_github.openFakePullRequest("gh/project", "master", "B")
B.body = "{}\n\nDepends-On: {}\n".format(
B.subject, A.url
)
self.fake_github.emitEvent(B.getPullRequestOpenedEvent())
self.waitUntilSettled()
# Now that the second change is known, update the first change
# B <-> A
A.body = "{}\n\nDepends-On: {}\n".format(
A.subject, B.url
)
self.fake_github.emitEvent(A.getPullRequestEditedEvent(A.subject))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name="project-job", result="ABORTED",
changes=f"{A.number},{A.head_sha}"),
dict(name="project-job", result="SUCCESS",
changes=f"{A.number},{A.head_sha} {B.number},{B.head_sha}"),
dict(name="project-job", result="SUCCESS",
changes=f"{B.number},{B.head_sha} {A.number},{A.head_sha}"),
], ordered=False)
class TestGithubAppCircularDependencies(ZuulGithubAppTestCase):
config_file = "zuul-gerrit-github-app.conf"
tenant_config_file = "config/circular-dependencies/main.yaml"
scheduler_count = 1
def test_dependency_refresh_checks_api(self):
# Test that when two changes are put into a cycle, the
# dependencies are refreshed and items already in pipelines
# are updated and that the Github check-run is still
# in-progress.
self.executor_server.hold_jobs_in_build = True
# This simulates the typical workflow where a developer only
# knows the PR id of changes one at a time.
# The first change:
A = self.fake_github.openFakePullRequest("gh/project", "master", "A")
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# Now that it has been uploaded, upload the second change and
# point it at the first.
# B -> A
B = self.fake_github.openFakePullRequest("gh/project", "master", "B")
B.body = "{}\n\nDepends-On: {}\n".format(
B.subject, A.url
)
self.fake_github.emitEvent(B.getPullRequestOpenedEvent())
self.waitUntilSettled()
# Now that the second change is known, update the first change
# B <-> A
A.body = "{}\n\nDepends-On: {}\n".format(
A.subject, B.url
)
self.fake_github.emitEvent(A.getPullRequestEditedEvent(A.subject))
self.waitUntilSettled()
# Validate that the Github check-run is still in progress
# and wasn't cancelled.
check_runs = self.fake_github.getCommitChecks("gh/project", A.head_sha)
self.assertEqual(len(check_runs), 1)
check_run = check_runs[0]
self.assertEqual(check_run["status"], "in_progress")
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name="project-job", result="ABORTED",
changes=f"{A.number},{A.head_sha}"),
dict(name="project-job", result="SUCCESS",
changes=f"{A.number},{A.head_sha} {B.number},{B.head_sha}"),
dict(name="project-job", result="SUCCESS",
changes=f"{B.number},{B.head_sha} {A.number},{A.head_sha}"),
], ordered=False)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_circular_dependencies.py
|
test_circular_dependencies.py
|
# Copyright 2018 EasyStack, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import configparser
from tests.base import BaseTestCase
from tests.base import FIXTURE_DIR
from zuul.lib.config import get_default
class TestDefaultConfigValue(BaseTestCase):
config_file = 'zuul.conf'
def setUp(self):
super(TestDefaultConfigValue, self).setUp()
self.config = configparser.ConfigParser()
self.config.read(os.path.join(FIXTURE_DIR, self.config_file))
def test_default_config_value(self):
default_value = get_default(self.config,
'web',
'static_cache_expiry',
default=3600)
self.assertEqual(1200, default_value)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_default_config.py
|
test_default_config.py
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import configparser
import collections
import os
import random
import types
from unittest import mock
import fixtures
import testtools
from zuul import model
from zuul import configloader
from zuul.lib import encryption
from zuul.lib import yamlutil as yaml
import zuul.lib.connections
from tests.base import BaseTestCase, FIXTURE_DIR
from zuul.lib.ansible import AnsibleManager
from zuul.lib import tracing
from zuul.model_api import MODEL_API
from zuul.zk.zkobject import LocalZKContext
from zuul.zk.components import COMPONENT_REGISTRY
from zuul import change_matcher
class Dummy(object):
def __init__(self, **kw):
for k, v in kw.items():
setattr(self, k, v)
class TestJob(BaseTestCase):
def setUp(self):
COMPONENT_REGISTRY.registry = Dummy()
COMPONENT_REGISTRY.registry.model_api = MODEL_API
self._env_fixture = self.useFixture(
fixtures.EnvironmentVariable('HISTTIMEFORMAT', '%Y-%m-%dT%T%z '))
super(TestJob, self).setUp()
# Toss in % in env vars to trigger the configparser issue
self.connections = zuul.lib.connections.ConnectionRegistry()
self.addCleanup(self.connections.stop)
self.connection = Dummy(connection_name='dummy_connection')
self.source = Dummy(canonical_hostname='git.example.com',
connection=self.connection)
self.abide = model.Abide()
self.tenant = model.Tenant('tenant')
self.tenant.default_ansible_version = AnsibleManager().default_version
self.tenant.semaphore_handler = Dummy(abide=self.abide)
self.layout = model.Layout(self.tenant)
self.tenant.layout = self.layout
self.project = model.Project('project', self.source)
self.context = model.SourceContext(
self.project.canonical_name, self.project.name,
self.project.connection_name, 'master', 'test', True)
self.untrusted_context = model.SourceContext(
self.project.canonical_name, self.project.name,
self.project.connection_name, 'master', 'test', False)
self.tpc = model.TenantProjectConfig(self.project)
self.tenant.addUntrustedProject(self.tpc)
self.pipeline = model.Pipeline('gate', self.tenant)
self.pipeline.source_context = self.context
self.pipeline.manager = mock.Mock()
self.pipeline.tenant = self.tenant
self.zk_context = LocalZKContext(self.log)
self.pipeline.manager.current_context = self.zk_context
self.pipeline.state = model.PipelineState()
self.pipeline.state._set(pipeline=self.pipeline)
self.layout.addPipeline(self.pipeline)
with self.zk_context as ctx:
self.queue = model.ChangeQueue.new(
ctx, pipeline=self.pipeline)
self.pcontext = configloader.ParseContext(
self.connections, None, self.tenant, AnsibleManager())
private_key_file = os.path.join(FIXTURE_DIR, 'private.pem')
with open(private_key_file, "rb") as f:
priv, pub = encryption.deserialize_rsa_keypair(f.read())
self.project.private_secrets_key = priv
self.project.public_secrets_key = pub
m = yaml.Mark('name', 0, 0, 0, '', 0)
self.start_mark = model.ZuulMark(m, m, '')
config = configparser.ConfigParser()
self.tracing = tracing.Tracing(config)
@property
def job(self):
job = self.pcontext.job_parser.fromYaml({
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'job',
'parent': None,
'irrelevant-files': [
'^docs/.*$'
]})
return job
def test_change_matches_returns_false_for_matched_skip_if(self):
change = model.Change('project')
change.files = ['/COMMIT_MSG', 'docs/foo']
self.assertFalse(self.job.changeMatchesFiles(change))
def test_change_matches_returns_false_for_single_matched_skip_if(self):
change = model.Change('project')
change.files = ['docs/foo']
self.assertFalse(self.job.changeMatchesFiles(change))
def test_change_matches_returns_true_for_unmatched_skip_if(self):
change = model.Change('project')
change.files = ['/COMMIT_MSG', 'foo']
self.assertTrue(self.job.changeMatchesFiles(change))
def test_change_matches_returns_true_for_single_unmatched_skip_if(self):
change = model.Change('project')
change.files = ['foo']
self.assertTrue(self.job.changeMatchesFiles(change))
def test_job_sets_defaults_for_boolean_attributes(self):
self.assertIsNotNone(self.job.voting)
def test_job_variants(self):
# This simulates freezing a job.
secrets = ['foo']
py27_pre = model.PlaybookContext(
self.context, 'py27-pre', [], secrets, [])
py27_run = model.PlaybookContext(
self.context, 'py27-run', [], secrets, [])
py27_post = model.PlaybookContext(
self.context, 'py27-post', [], secrets, [])
py27 = model.Job('py27')
py27.timeout = 30
py27.pre_run = (py27_pre,)
py27.run = (py27_run,)
py27.post_run = (py27_post,)
job = py27.copy()
self.assertEqual(30, job.timeout)
# Apply the diablo variant
diablo = model.Job('py27')
diablo.timeout = 40
job.applyVariant(diablo, self.layout, None)
self.assertEqual(40, job.timeout)
self.assertEqual(['py27-pre'],
[x.path for x in job.pre_run])
self.assertEqual(['py27-run'],
[x.path for x in job.run])
self.assertEqual(['py27-post'],
[x.path for x in job.post_run])
self.assertEqual(secrets, job.pre_run[0].secrets)
self.assertEqual(secrets, job.run[0].secrets)
self.assertEqual(secrets, job.post_run[0].secrets)
# Set the job to final for the following checks
job.final = True
self.assertTrue(job.voting)
good_final = model.Job('py27')
good_final.voting = False
job.applyVariant(good_final, self.layout, None)
self.assertFalse(job.voting)
bad_final = model.Job('py27')
bad_final.timeout = 600
with testtools.ExpectedException(
Exception,
"Unable to modify final job"):
job.applyVariant(bad_final, self.layout, None)
@mock.patch("zuul.model.zkobject.ZKObject._save")
def test_job_inheritance_job_tree(self, save_mock):
base = self.pcontext.job_parser.fromYaml({
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'base',
'parent': None,
'timeout': 30,
})
self.layout.addJob(base)
python27 = self.pcontext.job_parser.fromYaml({
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'python27',
'parent': 'base',
'timeout': 40,
})
self.layout.addJob(python27)
python27diablo = self.pcontext.job_parser.fromYaml({
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'python27',
'branches': [
'stable/diablo'
],
'timeout': 50,
})
self.layout.addJob(python27diablo)
project_config = self.pcontext.project_parser.fromYaml({
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'project',
'gate': {
'jobs': [
{'python27': {'timeout': 70,
'run': 'playbooks/python27.yaml'}}
]
}
})
self.layout.addProjectConfig(project_config)
change = model.Change(self.project)
change.branch = 'master'
item = self.queue.enqueueChange(change, None)
self.assertTrue(base.changeMatchesBranch(change))
self.assertTrue(python27.changeMatchesBranch(change))
self.assertFalse(python27diablo.changeMatchesBranch(change))
with self.zk_context as ctx:
item.freezeJobGraph(self.layout, ctx,
skip_file_matcher=False,
redact_secrets_and_keys=False)
self.assertEqual(len(item.getJobs()), 1)
job = item.getJobs()[0]
self.assertEqual(job.name, 'python27')
self.assertEqual(job.timeout, 70)
change.branch = 'stable/diablo'
item = self.queue.enqueueChange(change, None)
self.assertTrue(base.changeMatchesBranch(change))
self.assertTrue(python27.changeMatchesBranch(change))
self.assertTrue(python27diablo.changeMatchesBranch(change))
with self.zk_context as ctx:
item.freezeJobGraph(self.layout, ctx,
skip_file_matcher=False,
redact_secrets_and_keys=False)
self.assertEqual(len(item.getJobs()), 1)
job = item.getJobs()[0]
self.assertEqual(job.name, 'python27')
self.assertEqual(job.timeout, 70)
@mock.patch("zuul.model.zkobject.ZKObject._save")
def test_inheritance_keeps_matchers(self, save_mock):
base = self.pcontext.job_parser.fromYaml({
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'base',
'parent': None,
'timeout': 30,
})
self.layout.addJob(base)
python27 = self.pcontext.job_parser.fromYaml({
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'python27',
'parent': 'base',
'timeout': 40,
'irrelevant-files': ['^ignored-file$'],
})
self.layout.addJob(python27)
project_config = self.pcontext.project_parser.fromYaml({
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'project',
'gate': {
'jobs': [
'python27',
]
}
})
self.layout.addProjectConfig(project_config)
change = model.Change(self.project)
change.branch = 'master'
change.files = ['/COMMIT_MSG', 'ignored-file']
item = self.queue.enqueueChange(change, None)
self.assertTrue(base.changeMatchesFiles(change))
self.assertFalse(python27.changeMatchesFiles(change))
self.pipeline.manager.getFallbackLayout = mock.Mock(return_value=None)
with self.zk_context as ctx:
item.freezeJobGraph(self.layout, ctx,
skip_file_matcher=False,
redact_secrets_and_keys=False)
self.assertEqual([], item.getJobs())
def test_job_source_project(self):
base_project = model.Project('base_project', self.source)
base_context = model.SourceContext(
base_project.canonical_name, base_project.name,
base_project.connection_name, 'master', 'test', True)
tpc = model.TenantProjectConfig(base_project)
self.tenant.addUntrustedProject(tpc)
base = self.pcontext.job_parser.fromYaml({
'_source_context': base_context,
'_start_mark': self.start_mark,
'parent': None,
'name': 'base',
})
self.layout.addJob(base)
other_project = model.Project('other_project', self.source)
other_context = model.SourceContext(
other_project.canonical_name, other_project.name,
other_project.connection_name, 'master', 'test', True)
tpc = model.TenantProjectConfig(other_project)
self.tenant.addUntrustedProject(tpc)
base2 = self.pcontext.job_parser.fromYaml({
'_source_context': other_context,
'_start_mark': self.start_mark,
'name': 'base',
})
with testtools.ExpectedException(
Exception,
"Job base in other_project is not permitted "
"to shadow job base in base_project"):
self.layout.addJob(base2)
@mock.patch("zuul.model.zkobject.ZKObject._save")
def test_job_pipeline_allow_untrusted_secrets(self, save_mock):
self.pipeline.post_review = False
job = self.pcontext.job_parser.fromYaml({
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'job',
'parent': None,
'post-review': True
})
self.layout.addJob(job)
project_config = self.pcontext.project_parser.fromYaml(
{
'_source_context': self.context,
'_start_mark': self.start_mark,
'name': 'project',
'gate': {
'jobs': [
'job'
]
}
}
)
self.layout.addProjectConfig(project_config)
change = model.Change(self.project)
# Test master
change.branch = 'master'
item = self.queue.enqueueChange(change, None)
with testtools.ExpectedException(
Exception,
"Pre-review pipeline gate does not allow post-review job"):
with self.zk_context as ctx:
item.freezeJobGraph(self.layout, ctx,
skip_file_matcher=False,
redact_secrets_and_keys=False)
class TestGraph(BaseTestCase):
def test_job_graph_disallows_multiple_jobs_with_same_name(self):
graph = model.JobGraph({})
job1 = model.Job('job')
job2 = model.Job('job')
graph.addJob(job1)
with testtools.ExpectedException(Exception,
"Job job already added"):
graph.addJob(job2)
def test_job_graph_disallows_circular_dependencies(self):
graph = model.JobGraph({})
jobs = [model.Job('job%d' % i) for i in range(0, 10)]
prevjob = None
for j in jobs[:3]:
if prevjob:
j.dependencies = frozenset([
model.JobDependency(prevjob.name)])
graph.addJob(j)
prevjob = j
# 0 triggers 1 triggers 2 triggers 3...
# Cannot depend on itself
with testtools.ExpectedException(
Exception,
"Dependency cycle detected in job jobX"):
j = model.Job('jobX')
j.dependencies = frozenset([model.JobDependency(j.name)])
graph.addJob(j)
# Disallow circular dependencies
with testtools.ExpectedException(
Exception,
"Dependency cycle detected in job job3"):
jobs[4].dependencies = frozenset([
model.JobDependency(jobs[3].name)])
graph.addJob(jobs[4])
jobs[3].dependencies = frozenset([
model.JobDependency(jobs[4].name)])
graph.addJob(jobs[3])
jobs[5].dependencies = frozenset([model.JobDependency(jobs[4].name)])
graph.addJob(jobs[5])
with testtools.ExpectedException(
Exception,
"Dependency cycle detected in job job3"):
jobs[3].dependencies = frozenset([
model.JobDependency(jobs[5].name)])
graph.addJob(jobs[3])
jobs[3].dependencies = frozenset([
model.JobDependency(jobs[2].name)])
graph.addJob(jobs[3])
jobs[6].dependencies = frozenset([
model.JobDependency(jobs[2].name)])
graph.addJob(jobs[6])
def test_job_graph_allows_soft_dependencies(self):
parent = model.Job('parent')
child = model.Job('child')
child.dependencies = frozenset([
model.JobDependency(parent.name, True)])
# With the parent
graph = model.JobGraph({})
graph.addJob(parent)
graph.addJob(child)
self.assertEqual(graph.getParentJobsRecursively(child.name),
[parent])
# Skip the parent
graph = model.JobGraph({})
graph.addJob(child)
self.assertEqual(graph.getParentJobsRecursively(child.name), [])
def test_job_graph_allows_soft_dependencies4(self):
# A more complex scenario with multiple parents at each level
parents = [model.Job('parent%i' % i) for i in range(6)]
child = model.Job('child')
child.dependencies = frozenset([
model.JobDependency(parents[0].name, True),
model.JobDependency(parents[1].name)])
parents[0].dependencies = frozenset([
model.JobDependency(parents[2].name),
model.JobDependency(parents[3].name, True)])
parents[1].dependencies = frozenset([
model.JobDependency(parents[4].name),
model.JobDependency(parents[5].name)])
# Run them all
graph = model.JobGraph({})
for j in parents:
graph.addJob(j)
graph.addJob(child)
self.assertEqual(set(graph.getParentJobsRecursively(child.name)),
set(parents))
# Skip first parent, therefore its recursive dependencies don't appear
graph = model.JobGraph({})
for j in parents:
if j is not parents[0]:
graph.addJob(j)
graph.addJob(child)
self.assertEqual(set(graph.getParentJobsRecursively(child.name)),
set(parents) -
set([parents[0], parents[2], parents[3]]))
# Skip a leaf node
graph = model.JobGraph({})
for j in parents:
if j is not parents[3]:
graph.addJob(j)
graph.addJob(child)
self.assertEqual(set(graph.getParentJobsRecursively(child.name)),
set(parents) - set([parents[3]]))
class TestTenant(BaseTestCase):
def test_add_project(self):
tenant = model.Tenant('tenant')
connection1 = Dummy(connection_name='dummy_connection1')
source1 = Dummy(canonical_hostname='git1.example.com',
name='dummy', # TODOv3(jeblair): remove
connection=connection1)
source1_project1 = model.Project('project1', source1)
source1_project1_tpc = model.TenantProjectConfig(source1_project1)
tenant.addConfigProject(source1_project1_tpc)
d = {'project1':
{'git1.example.com': source1_project1}}
self.assertEqual(d, tenant.projects)
self.assertEqual((True, source1_project1),
tenant.getProject('project1'))
self.assertEqual((True, source1_project1),
tenant.getProject('git1.example.com/project1'))
source1_project2 = model.Project('project2', source1)
tpc = model.TenantProjectConfig(source1_project2)
tenant.addUntrustedProject(tpc)
d = {'project1':
{'git1.example.com': source1_project1},
'project2':
{'git1.example.com': source1_project2}}
self.assertEqual(d, tenant.projects)
self.assertEqual((False, source1_project2),
tenant.getProject('project2'))
self.assertEqual((False, source1_project2),
tenant.getProject('git1.example.com/project2'))
connection2 = Dummy(connection_name='dummy_connection2')
source2 = Dummy(canonical_hostname='git2.example.com',
name='dummy', # TODOv3(jeblair): remove
connection=connection2)
source2_project1 = model.Project('project1', source2)
tpc = model.TenantProjectConfig(source2_project1)
tenant.addUntrustedProject(tpc)
d = {'project1':
{'git1.example.com': source1_project1,
'git2.example.com': source2_project1},
'project2':
{'git1.example.com': source1_project2}}
self.assertEqual(d, tenant.projects)
with testtools.ExpectedException(
Exception,
"Project name 'project1' is ambiguous"):
tenant.getProject('project1')
self.assertEqual((False, source1_project2),
tenant.getProject('project2'))
self.assertEqual((True, source1_project1),
tenant.getProject('git1.example.com/project1'))
self.assertEqual((False, source2_project1),
tenant.getProject('git2.example.com/project1'))
source2_project2 = model.Project('project2', source2)
tpc = model.TenantProjectConfig(source2_project2)
tenant.addConfigProject(tpc)
d = {'project1':
{'git1.example.com': source1_project1,
'git2.example.com': source2_project1},
'project2':
{'git1.example.com': source1_project2,
'git2.example.com': source2_project2}}
self.assertEqual(d, tenant.projects)
with testtools.ExpectedException(
Exception,
"Project name 'project1' is ambiguous"):
tenant.getProject('project1')
with testtools.ExpectedException(
Exception,
"Project name 'project2' is ambiguous"):
tenant.getProject('project2')
self.assertEqual((True, source1_project1),
tenant.getProject('git1.example.com/project1'))
self.assertEqual((False, source2_project1),
tenant.getProject('git2.example.com/project1'))
self.assertEqual((False, source1_project2),
tenant.getProject('git1.example.com/project2'))
self.assertEqual((True, source2_project2),
tenant.getProject('git2.example.com/project2'))
source1_project2b = model.Project('subpath/project2', source1)
tpc = model.TenantProjectConfig(source1_project2b)
tenant.addConfigProject(tpc)
d = {'project1':
{'git1.example.com': source1_project1,
'git2.example.com': source2_project1},
'project2':
{'git1.example.com': source1_project2,
'git2.example.com': source2_project2},
'subpath/project2':
{'git1.example.com': source1_project2b}}
self.assertEqual(d, tenant.projects)
self.assertEqual((False, source1_project2),
tenant.getProject('git1.example.com/project2'))
self.assertEqual((True, source2_project2),
tenant.getProject('git2.example.com/project2'))
self.assertEqual((True, source1_project2b),
tenant.getProject('subpath/project2'))
self.assertEqual(
(True, source1_project2b),
tenant.getProject('git1.example.com/subpath/project2'))
source2_project2b = model.Project('subpath/project2', source2)
tpc = model.TenantProjectConfig(source2_project2b)
tenant.addConfigProject(tpc)
d = {'project1':
{'git1.example.com': source1_project1,
'git2.example.com': source2_project1},
'project2':
{'git1.example.com': source1_project2,
'git2.example.com': source2_project2},
'subpath/project2':
{'git1.example.com': source1_project2b,
'git2.example.com': source2_project2b}}
self.assertEqual(d, tenant.projects)
self.assertEqual((False, source1_project2),
tenant.getProject('git1.example.com/project2'))
self.assertEqual((True, source2_project2),
tenant.getProject('git2.example.com/project2'))
with testtools.ExpectedException(
Exception,
"Project name 'subpath/project2' is ambiguous"):
tenant.getProject('subpath/project2')
self.assertEqual(
(True, source1_project2b),
tenant.getProject('git1.example.com/subpath/project2'))
self.assertEqual(
(True, source2_project2b),
tenant.getProject('git2.example.com/subpath/project2'))
with testtools.ExpectedException(
Exception,
"Project project1 is already in project index"):
tenant._addProject(source1_project1_tpc)
class TestFreezable(BaseTestCase):
def test_freezable_object(self):
o = model.Freezable()
o.foo = 1
o.list = []
o.dict = {}
o.odict = collections.OrderedDict()
o.odict2 = collections.OrderedDict()
o1 = model.Freezable()
o1.foo = 1
l1 = [1]
d1 = {'foo': 1}
od1 = {'foo': 1}
o.list.append(o1)
o.list.append(l1)
o.list.append(d1)
o.list.append(od1)
o2 = model.Freezable()
o2.foo = 1
l2 = [1]
d2 = {'foo': 1}
od2 = {'foo': 1}
o.dict['o'] = o2
o.dict['l'] = l2
o.dict['d'] = d2
o.dict['od'] = od2
o3 = model.Freezable()
o3.foo = 1
l3 = [1]
d3 = {'foo': 1}
od3 = {'foo': 1}
o.odict['o'] = o3
o.odict['l'] = l3
o.odict['d'] = d3
o.odict['od'] = od3
seq = list(range(1000))
random.shuffle(seq)
for x in seq:
o.odict2[x] = x
o.freeze()
with testtools.ExpectedException(Exception, "Unable to modify frozen"):
o.bar = 2
with testtools.ExpectedException(AttributeError, "'tuple' object"):
o.list.append(2)
with testtools.ExpectedException(TypeError, "'mappingproxy' object"):
o.dict['bar'] = 2
with testtools.ExpectedException(TypeError, "'mappingproxy' object"):
o.odict['bar'] = 2
with testtools.ExpectedException(Exception, "Unable to modify frozen"):
o1.bar = 2
with testtools.ExpectedException(Exception, "Unable to modify frozen"):
o.list[0].bar = 2
with testtools.ExpectedException(AttributeError, "'tuple' object"):
o.list[1].append(2)
with testtools.ExpectedException(TypeError, "'mappingproxy' object"):
o.list[2]['bar'] = 2
with testtools.ExpectedException(TypeError, "'mappingproxy' object"):
o.list[3]['bar'] = 2
with testtools.ExpectedException(Exception, "Unable to modify frozen"):
o2.bar = 2
with testtools.ExpectedException(Exception, "Unable to modify frozen"):
o.dict['o'].bar = 2
with testtools.ExpectedException(AttributeError, "'tuple' object"):
o.dict['l'].append(2)
with testtools.ExpectedException(TypeError, "'mappingproxy' object"):
o.dict['d']['bar'] = 2
with testtools.ExpectedException(TypeError, "'mappingproxy' object"):
o.dict['od']['bar'] = 2
with testtools.ExpectedException(Exception, "Unable to modify frozen"):
o3.bar = 2
with testtools.ExpectedException(Exception, "Unable to modify frozen"):
o.odict['o'].bar = 2
with testtools.ExpectedException(AttributeError, "'tuple' object"):
o.odict['l'].append(2)
with testtools.ExpectedException(TypeError, "'mappingproxy' object"):
o.odict['d']['bar'] = 2
with testtools.ExpectedException(TypeError, "'mappingproxy' object"):
o.odict['od']['bar'] = 2
# Make sure that mapping proxy applied to an ordered dict
# still shows the ordered behavior.
self.assertTrue(isinstance(o.odict2, types.MappingProxyType))
self.assertEqual(list(o.odict2.keys()), seq)
class TestRef(BaseTestCase):
def test_ref_equality(self):
change1 = model.Change('project1')
change1.ref = '/change1'
change1b = model.Change('project1')
change1b.ref = '/change1'
change2 = model.Change('project2')
change2.ref = '/change2'
self.assertFalse(change1.equals(change2))
self.assertTrue(change1.equals(change1b))
tag1 = model.Tag('project1')
tag1.ref = '/tag1'
tag1b = model.Tag('project1')
tag1b.ref = '/tag1'
tag2 = model.Tag('project2')
tag2.ref = '/tag2'
self.assertFalse(tag1.equals(tag2))
self.assertTrue(tag1.equals(tag1b))
self.assertFalse(tag1.equals(change1))
branch1 = model.Branch('project1')
branch1.ref = '/branch1'
branch1b = model.Branch('project1')
branch1b.ref = '/branch1'
branch2 = model.Branch('project2')
branch2.ref = '/branch2'
self.assertFalse(branch1.equals(branch2))
self.assertTrue(branch1.equals(branch1b))
self.assertFalse(branch1.equals(change1))
self.assertFalse(branch1.equals(tag1))
class TestSourceContext(BaseTestCase):
def setUp(self):
super().setUp()
self.connection = Dummy(connection_name='dummy_connection')
self.source = Dummy(canonical_hostname='git.example.com',
connection=self.connection)
self.project = model.Project('project', self.source)
self.context = model.SourceContext(
self.project.canonical_name, self.project.name,
self.project.connection_name, 'master', 'test', True)
self.context.implied_branches = [
change_matcher.BranchMatcher('foo'),
change_matcher.ImpliedBranchMatcher('foo'),
]
def test_serialize(self):
self.context.deserialize(self.context.serialize())
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_model.py
|
test_model.py
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Wikimedia Foundation Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import logging
import os
import shutil
from unittest import mock
import git
import testtools
from zuul.merger.merger import MergerTree, Repo
import zuul.model
from zuul.model import MergeRequest
from tests.base import (
BaseTestCase, ZuulTestCase, FIXTURE_DIR, simple_layout, iterate_timeout
)
class TestMergerRepo(ZuulTestCase):
log = logging.getLogger("zuul.test.merger.repo")
tenant_config_file = 'config/single-tenant/main.yaml'
workspace_root = None
def setUp(self):
super(TestMergerRepo, self).setUp()
self.workspace_root = os.path.join(self.test_root, 'workspace')
def test_create_head_path(self):
parent_path = os.path.join(self.upstream_root, 'org/project1')
parent_repo = git.Repo(parent_path)
parent_repo.create_head("refs/heads/foobar")
parent_repo.create_head("refs/heads/refs/heads/foobar")
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
repo = work_repo.createRepoObject(None)
self.assertIn('foobar', repo.branches)
self.assertIn('refs/heads/foobar', repo.branches)
self.assertNotIn('refs/heads/refs/heads/foobar', repo.branches)
def test_create_head_at_char(self):
"""Test that we can create branches containing the '@' char.
This is a regression test to make sure we are not using GitPython
APIs that interpret the '@' as a special char.
"""
parent_path = os.path.join(self.upstream_root, 'org/project1')
parent_repo = git.Repo(parent_path)
parent_repo.create_head("refs/heads/foo@bar")
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
repo = work_repo.createRepoObject(None)
self.assertIn('foo@bar', repo.branches)
def test_ensure_cloned(self):
parent_path = os.path.join(self.upstream_root, 'org/project1')
# Forge a repo having a submodule
parent_repo = git.Repo(parent_path)
parent_repo.git(c='protocol.file.allow=always').submodule(
'add',
os.path.join(self.upstream_root, 'org/project2'),
'subdir')
parent_repo.index.commit('Adding project2 as a submodule in subdir')
# git 1.7.8 changed .git from being a directory to a file pointing
# to the parent repository /.git/modules/*
self.assertTrue(os.path.exists(
os.path.join(parent_path, 'subdir', '.git')),
msg='.git file in submodule should be a file')
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
self.assertTrue(
os.path.isdir(os.path.join(self.workspace_root, 'subdir')),
msg='Cloned repository has a submodule placeholder directory')
self.assertFalse(os.path.exists(
os.path.join(self.workspace_root, 'subdir', '.git')),
msg='Submodule is not initialized')
sub_repo = Repo(
os.path.join(self.upstream_root, 'org/project2'),
os.path.join(self.workspace_root, 'subdir'),
'[email protected]', 'User Name', '0', '0')
self.assertTrue(os.path.exists(
os.path.join(self.workspace_root, 'subdir', '.git')),
msg='Cloned over the submodule placeholder')
self.assertEqual(
os.path.join(self.upstream_root, 'org/project1'),
work_repo.createRepoObject(None).remotes[0].url,
message="Parent clone still point to upstream project1")
self.assertEqual(
os.path.join(self.upstream_root, 'org/project2'),
sub_repo.createRepoObject(None).remotes[0].url,
message="Sub repository points to upstream project2")
def test_repo_reset_branch_conflict(self):
"""Test correct reset with conflicting branch names"""
parent_path = os.path.join(self.upstream_root, 'org/project1')
parent_repo = git.Repo(parent_path)
parent_repo.create_head("foobar")
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
# Checkout branch that will be deleted from the remote repo
work_repo.checkout("foobar")
# Delete remote branch and create a branch that conflicts with
# the branch checked out locally.
parent_repo.delete_head("foobar")
parent_repo.create_head("foobar/sub")
work_repo.update()
work_repo.reset()
work_repo.checkout("foobar/sub")
# Try the reverse conflict
parent_path = os.path.join(self.upstream_root, 'org/project2')
parent_repo = git.Repo(parent_path)
parent_repo.create_head("foobar/sub")
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
# Checkout branch that will be deleted from the remote repo
work_repo.checkout("foobar/sub")
# Delete remote branch and create a branch that conflicts with
# the branch checked out locally.
parent_repo.delete_head("foobar/sub")
# Note: Before git 2.13 deleting a a ref foo/bar leaves an empty
# directory foo behind that will block creating the reference foo
# in the future. As a workaround we must clean up empty directories
# in .git/refs.
if parent_repo.git.version_info[:2] < (2, 13):
Repo._cleanup_leaked_ref_dirs(parent_path, None, [])
parent_repo.create_head("foobar")
work_repo.update()
work_repo.reset()
work_repo.checkout("foobar")
def test_rebase_merge_conflict_abort(self):
"""Test that a failed rebase is properly aborted and related
directories are cleaned up."""
parent_path = os.path.join(self.upstream_root, 'org/project1')
parent_repo = git.Repo(parent_path)
parent_repo.create_head("feature")
files = {"test.txt": "master"}
self.create_commit("org/project1", files=files, head="master",
message="Add master file")
files = {"test.txt": "feature"}
self.create_commit("org/project1", files=files, head="feature",
message="Add feature file")
work_repo = Repo(parent_path, self.workspace_root,
"[email protected]", "User Name", "0", "0")
item = {"ref": "refs/heads/feature"}
# We expect the rebase to fail because of a conflict, but the
# rebase will be aborted.
with testtools.ExpectedException(git.exc.GitCommandError):
work_repo.rebaseMerge(item, "master")
# Assert that the failed rebase doesn't leave any temporary
# directories behind.
self.assertFalse(
os.path.exists(f"{work_repo.local_path}/.git/rebase-merge"))
self.assertFalse(
os.path.exists(f"{work_repo.local_path}/.git/rebase-apply"))
def test_rebase_merge_conflict_reset_cleanup(self):
"""Test temporary directories of a failed rebase merge are
removed on repo reset."""
parent_path = os.path.join(self.upstream_root, 'org/project1')
parent_repo = git.Repo(parent_path)
parent_repo.create_head("feature")
files = {"master.txt": "master"}
self.create_commit("org/project1", files=files, head="master",
message="Add master file")
files = {"feature.txt": "feature"}
self.create_commit("org/project1", files=files, head="feature",
message="Add feature file")
work_repo = Repo(parent_path, self.workspace_root,
"[email protected]", "User Name", "0", "0")
# Simulate leftovers from a failed rebase
os.mkdir(f"{work_repo.local_path}/.git/rebase-merge")
os.mkdir(f"{work_repo.local_path}/.git/rebase-apply")
# Resetting the repo should clean up any leaked directories
work_repo.reset()
item = {"ref": "refs/heads/feature"}
work_repo.rebaseMerge(item, "master")
def test_set_refs(self):
parent_path = os.path.join(self.upstream_root, 'org/project1')
remote_sha = self.create_commit('org/project1')
self.create_branch('org/project1', 'foobar')
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
repo = git.Repo(self.workspace_root)
new_sha = repo.heads.foobar.commit.hexsha
work_repo.setRefs({'refs/heads/master': new_sha}, True)
self.assertEqual(work_repo.getBranchHead('master').hexsha, new_sha)
self.assertIn('master', repo.remotes.origin.refs)
work_repo.setRefs({'refs/heads/master': remote_sha})
self.assertEqual(work_repo.getBranchHead('master').hexsha, remote_sha)
self.assertNotIn('master', repo.remotes.origin.refs)
def test_set_remote_ref(self):
parent_path = os.path.join(self.upstream_root, 'org/project1')
commit_sha = self.create_commit('org/project1')
self.create_commit('org/project1')
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
work_repo.setRemoteRef('master', commit_sha)
# missing remote ref would be created
work_repo.setRemoteRef('missing', commit_sha)
repo = git.Repo(self.workspace_root)
self.assertEqual(repo.remotes.origin.refs.master.commit.hexsha,
commit_sha)
self.assertEqual(repo.remotes.origin.refs.missing.commit.hexsha,
commit_sha)
def test_clone_timeout(self):
parent_path = os.path.join(self.upstream_root, 'org/project1')
self.patch(git.Git, 'GIT_PYTHON_GIT_EXECUTABLE',
os.path.join(FIXTURE_DIR, 'fake_git.sh'))
self.patch(Repo, 'retry_attempts', 1)
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0',
git_timeout=0.001)
# TODO: have the merger and repo classes catch fewer
# exceptions, including this one on initialization. For the
# test, we try cloning again.
with testtools.ExpectedException(git.exc.GitCommandError,
r'.*exit code\(-9\)'):
work_repo._ensure_cloned(None)
def test_fetch_timeout(self):
parent_path = os.path.join(self.upstream_root, 'org/project1')
self.patch(Repo, 'retry_attempts', 1)
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
work_repo.git_timeout = 0.001
self.patch(git.Git, 'GIT_PYTHON_GIT_EXECUTABLE',
os.path.join(FIXTURE_DIR, 'fake_git.sh'))
with testtools.ExpectedException(git.exc.GitCommandError,
r'.*exit code\(-9\)'):
work_repo.update()
def test_fetch_retry(self):
parent_path = os.path.join(self.upstream_root, 'org/project1')
self.patch(Repo, 'retry_interval', 1)
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
self.patch(git.Git, 'GIT_PYTHON_GIT_EXECUTABLE',
os.path.join(FIXTURE_DIR, 'git_fetch_error.sh'))
work_repo.update()
# This is created on the first fetch
self.assertTrue(os.path.exists(os.path.join(
self.workspace_root, 'stamp1')))
# This is created on the second fetch
self.assertTrue(os.path.exists(os.path.join(
self.workspace_root, 'stamp2')))
def test_deleted_local_ref(self):
parent_path = os.path.join(self.upstream_root, 'org/project1')
self.create_branch('org/project1', 'foobar')
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
# Delete local ref on the cached repo. This leaves us with a remote
# ref but no local ref anymore.
gitrepo = git.Repo(work_repo.local_path)
gitrepo.delete_head('foobar', force=True)
# Delete the branch upstream.
self.delete_branch('org/project1', 'foobar')
# And now reset the repo again. This should not crash
work_repo.reset()
def test_branch_rename(self):
parent_path = os.path.join(self.upstream_root, 'org/project1')
# Clone upstream so that current head is master
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
# Rename master to main in upstream repo
gitrepo = git.Repo(parent_path)
main_branch = gitrepo.create_head('main')
gitrepo.head.reference = main_branch
gitrepo.delete_head(gitrepo.heads['master'], force=True)
# And now reset the repo. This should not crash
work_repo.reset()
def test_broken_cache(self):
parent_path = os.path.join(self.upstream_root, 'org/project1')
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
self.waitUntilSettled()
# Break the work repo
path = work_repo.local_path
os.remove(os.path.join(path, '.git/HEAD'))
# And now reset the repo again. This should not crash
work_repo.reset()
# Now open a cache repo and break it in a way that git.Repo is happy
# at first but git won't be because of a broken HEAD revision.
merger = self.executor_server.merger
cache_repo = merger.getRepo('gerrit', 'org/project')
with open(os.path.join(cache_repo.local_path, '.git/HEAD'), 'w'):
pass
cache_repo.update()
# Now open a cache repo and break it in a way that git.Repo is happy
# at first but git won't be because of a corrupt object file.
#
# To construct this we create a commit so we have a guaranteed free
# object file, then we break it by truncating it.
fn = os.path.join(cache_repo.local_path, 'commit_filename')
with open(fn, 'a') as f:
f.write("test")
repo = cache_repo.createRepoObject(None)
repo.index.add([fn])
repo.index.commit('test commit')
# Pick the first object file we find and break it
objects_path = os.path.join(cache_repo.local_path, '.git', 'objects')
object_dir = os.path.join(
objects_path,
[d for d in os.listdir(objects_path) if len(d) == 2][0])
object_to_break = os.path.join(object_dir, os.listdir(object_dir)[0])
self.log.error(os.stat(object_to_break))
os.chmod(object_to_break, 644)
with open(object_to_break, 'w'):
pass
os.chmod(object_to_break, 444)
cache_repo.update()
def test_broken_gitmodules(self):
parent_path = os.path.join(self.upstream_root, 'org/project1')
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
self.waitUntilSettled()
# Break the gitmodules with uncommited changes
path = work_repo.local_path
with open(os.path.join(path, '.gitmodules'), 'w') as f:
f.write('[submodule "libfoo"]\n'
'path = include/foo\n'
'---\n'
'url = git://example.com/git/lib.git')
# And now reset the repo. This should not crash
work_repo.reset()
# Break the gitmodules with a commit
path = work_repo.local_path
with open(os.path.join(path, '.gitmodules'), 'w') as f:
f.write('[submodule "libfoo"]\n'
'path = include/foo\n'
'---\n'
'url = git://example.com/git/lib.git')
git_repo = work_repo._createRepoObject(work_repo.local_path,
work_repo.env)
git_repo.git.add('.gitmodules')
git_repo.index.commit("Broken gitmodule")
# And now reset the repo. This should not crash
work_repo.reset()
def test_files_changes(self):
parent_path = os.path.join(self.upstream_root, 'org/project1')
self.create_branch('org/project1', 'feature')
files = {'feature.txt': 'feature'}
self.create_commit('org/project1', files=files, head='feature',
message='Add feature file')
# Let the master diverge from the feature branch. This new file should
# NOT be included in the changed files list.
files = {'must-not-be-in-changelist.txt': 'FAIL'}
self.create_commit('org/project1', files=files, head='master',
message='Add master file')
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
changed_files = work_repo.getFilesChanges('feature', 'master')
self.assertEqual(sorted(['README', 'feature.txt']),
sorted(changed_files))
def test_files_changes_add_and_remove_files(self):
"""
If the changed files in previous commits are reverted in later commits,
they should not be considered as changed in the PR.
"""
parent_path = os.path.join(self.upstream_root, 'org/project1')
self.create_branch('org/project1', 'feature1')
base_sha = git.Repo(parent_path).commit('master').hexsha
# Let the file that is also changed in the feature branch diverge
# in master. This change should NOT be considered in the changed
# files list.
files = {'to-be-deleted.txt': 'FAIL'}
self.create_commit('org/project1', files=files, head='master',
message='Add master file')
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
# Add a file in first commit
files = {'to-be-deleted.txt': 'test'}
self.create_commit('org/project1', files=files, head='feature1',
message='Add file')
changed_files = work_repo.getFilesChanges('feature1', base_sha)
self.assertEqual(sorted(['README', 'to-be-deleted.txt']),
sorted(changed_files))
# Delete the file in second commit
delete_files = ['to-be-deleted.txt']
self.create_commit('org/project1', files={},
delete_files=delete_files, head='feature1',
message='Delete file')
changed_files = work_repo.getFilesChanges('feature1', base_sha)
self.assertEqual(['README'], changed_files)
def test_files_changes_master_fork_merges(self):
"""Regression test for getFilesChanges()
Check if correct list of changed files is listed for a messy
branch that has a merge of a fork, with the fork including a
merge of a new master revision.
The previously used "git merge-base" approach did not handle this
case correctly.
"""
parent_path = os.path.join(self.upstream_root, 'org/project1')
repo = git.Repo(parent_path)
self.create_branch('org/project1', 'messy',
commit_filename='messy1.txt')
# Let time pass to reproduce the order for this error case
commit_date = datetime.datetime.now() + datetime.timedelta(seconds=5)
commit_date = commit_date.replace(microsecond=0).isoformat()
# Create a commit on 'master' so we can merge it into the fork
files = {"master.txt": "master"}
master_ref = self.create_commit('org/project1', files=files,
message="Add master.txt",
commit_date=commit_date)
repo.refs.master.commit = master_ref
# Create a fork of the 'messy' branch and merge
# 'master' into the fork (no fast-forward)
repo.create_head("messy-fork")
repo.heads["messy-fork"].commit = "messy"
repo.head.reference = 'messy'
repo.head.reset(index=True, working_tree=True)
repo.git.checkout('messy-fork')
repo.git.merge('master', no_ff=True)
# Merge fork back into 'messy' branch (no fast-forward)
repo.head.reference = 'messy'
repo.head.reset(index=True, working_tree=True)
repo.git.checkout('messy')
repo.git.merge('messy-fork', no_ff=True)
# Create another commit on top of 'messy'
files = {"messy2.txt": "messy2"}
messy_ref = self.create_commit('org/project1', files=files,
head='messy', message="Add messy2.txt")
repo.refs.messy.commit = messy_ref
# Check that we get all changes for the 'messy' but not 'master' branch
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
changed_files = work_repo.getFilesChanges('messy', 'master')
self.assertEqual(sorted(['messy1.txt', 'messy2.txt']),
sorted(changed_files))
def test_update_needed(self):
parent_path = os.path.join(self.upstream_root, 'org/project1')
repo = git.Repo(parent_path)
self.create_branch('org/project1', 'stable')
proj_repo_state_no_update_master = {
'refs/heads/master': repo.commit('refs/heads/master').hexsha,
}
proj_repo_state_no_update = {
'refs/heads/master': repo.commit('refs/heads/master').hexsha,
'refs/heads/stable': repo.commit('refs/heads/stable').hexsha,
}
repo_state_no_update = {
'gerrit': {'org/project1': proj_repo_state_no_update}
}
proj_repo_state_update_ref = {
'refs/heads/master': repo.commit('refs/heads/master').hexsha,
'refs/heads/stable': repo.commit('refs/heads/stable').hexsha,
# New branch based on master
'refs/heads/test': repo.commit('refs/heads/master').hexsha,
}
repo_state_update_ref = {
'gerrit': {'org/project1': proj_repo_state_update_ref}
}
proj_repo_state_update_rev = {
'refs/heads/master': repo.commit('refs/heads/master').hexsha,
# Commit changed on existing branch
'refs/heads/stable': '1234567',
}
repo_state_update_rev = {
'gerrit': {'org/project1': proj_repo_state_update_rev}
}
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
self.assertFalse(work_repo.isUpdateNeeded(
proj_repo_state_no_update_master))
self.assertFalse(work_repo.isUpdateNeeded(proj_repo_state_no_update))
self.assertTrue(work_repo.isUpdateNeeded(proj_repo_state_update_ref))
self.assertTrue(work_repo.isUpdateNeeded(proj_repo_state_update_rev))
# Get repo and update for the first time.
merger = self.executor_server.merger
merger.updateRepo('gerrit', 'org/project1')
repo = merger.getRepo('gerrit', 'org/project1')
repo.reset()
# Branches master and stable must exist
self.assertEqual(['master', 'stable'], repo.getBranches())
# Test new ref causes update
# Now create an additional branch in the parent repo
self.create_branch('org/project1', 'stable2')
# Update with repo state and expect no update done
self.log.info('Calling updateRepo with repo_state_no_update')
merger.updateRepo('gerrit', 'org/project1',
repo_state=repo_state_no_update)
repo = merger.getRepo('gerrit', 'org/project1')
repo.reset()
self.assertEqual(['master', 'stable'], repo.getBranches())
# Update with repo state and expect update
self.log.info('Calling updateRepo with repo_state_update_ref')
merger.updateRepo('gerrit', 'org/project1',
repo_state=repo_state_update_ref)
repo = merger.getRepo('gerrit', 'org/project1')
repo.reset()
self.assertEqual(['master', 'stable', 'stable2'], repo.getBranches())
# Test new rev causes update
# Now create an additional branch in the parent repo
self.create_branch('org/project1', 'stable3')
# Update with repo state and expect no update done
self.log.info('Calling updateRepo with repo_state_no_update')
merger.updateRepo('gerrit', 'org/project1',
repo_state=repo_state_no_update)
repo = merger.getRepo('gerrit', 'org/project1')
repo.reset()
self.assertEqual(['master', 'stable', 'stable2'], repo.getBranches())
# Update with repo state and expect update
self.log.info('Calling updateRepo with repo_state_update_rev')
merger.updateRepo('gerrit', 'org/project1',
repo_state=repo_state_update_rev)
repo = merger.getRepo('gerrit', 'org/project1')
repo.reset()
self.assertEqual(['master', 'stable', 'stable2', 'stable3'],
repo.getBranches())
# Make sure that we always update repos that aren't in the
# repo_state. Prime a second project.
self.log.info('Calling updateRepo for project2')
merger.updateRepo('gerrit', 'org/project2',
repo_state=repo_state_no_update)
repo = merger.getRepo('gerrit', 'org/project2')
repo.reset()
self.assertEqual(['master'],
repo.getBranches())
# Then update it, passing in a repo_state where project2 is
# not present and ensure that we perform the update.
self.log.info('Creating stable branch for project2')
self.create_branch('org/project2', 'stable')
merger.updateRepo('gerrit', 'org/project2',
repo_state=repo_state_no_update)
repo = merger.getRepo('gerrit', 'org/project2')
repo.reset()
self.assertEqual(['master', 'stable'],
repo.getBranches())
def test_garbage_collect(self):
'''Tests that git gc doesn't prune FETCH_HEAD'''
parent_path = os.path.join(self.upstream_root, 'org/project1')
repo = git.Repo(parent_path)
change_ref = 'refs/changes/01/1'
self.log.info('Creating a commit on %s', change_ref)
repo.head.reference = repo.head.commit
files = {"README": "creating fake commit\n"}
for name, content in files.items():
file_name = os.path.join(parent_path, name)
with open(file_name, 'a') as f:
f.write(content)
repo.index.add([file_name])
commit = repo.index.commit('Test commit')
ref = git.refs.Reference(repo, change_ref)
ref.set_commit(commit)
self.log.info('Cloning parent repo')
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
self.log.info('Fetch %s', change_ref)
work_repo.fetch(change_ref)
self.log.info('Checkout master and run garbage collection')
work_repo_object = work_repo.createRepoObject(None)
work_repo.checkout('master')
result = work_repo_object.git.gc('--prune=now')
self.log.info(result)
self.log.info('Dereferencing FETCH_HEAD')
commit = work_repo_object.commit('FETCH_HEAD')
self.assertIsNotNone(commit)
def test_delete_upstream_tag(self):
# Test that we can delete a tag from upstream and that our
# working dir will prune it.
parent_path = os.path.join(self.upstream_root, 'org/project1')
parent_repo = git.Repo(parent_path)
# Tag upstream
self.addTagToRepo('org/project1', 'testtag', 'HEAD')
commit = parent_repo.commit('testtag')
# Update downstream and verify tag matches
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
work_repo_underlying = git.Repo(work_repo.local_path)
work_repo.update()
result = work_repo_underlying.commit('testtag')
self.assertEqual(commit, result)
# Delete tag upstream
self.delTagFromRepo('org/project1', 'testtag')
# Update downstream and verify tag is gone
work_repo.update()
with testtools.ExpectedException(git.exc.BadName):
result = work_repo_underlying.commit('testtag')
# Make a new empty commit
new_commit = parent_repo.index.commit('test commit')
self.assertNotEqual(commit, new_commit)
# Tag the new commit
self.addTagToRepo('org/project1', 'testtag', new_commit)
new_tag_commit = parent_repo.commit('testtag')
self.assertEqual(new_commit, new_tag_commit)
# Verify that the downstream tag matches
work_repo.update()
new_result = work_repo_underlying.commit('testtag')
self.assertEqual(new_commit, new_result)
def test_move_upstream_tag(self):
# Test that if an upstream tag moves, our local copy moves
# too.
parent_path = os.path.join(self.upstream_root, 'org/project1')
parent_repo = git.Repo(parent_path)
# Tag upstream
self.addTagToRepo('org/project1', 'testtag', 'HEAD')
commit = parent_repo.commit('testtag')
# Update downstream and verify tag matches
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
work_repo_underlying = git.Repo(work_repo.local_path)
work_repo.update()
result = work_repo_underlying.commit('testtag')
self.assertEqual(commit, result)
# Make an empty commit
new_commit = parent_repo.index.commit('test commit')
self.assertNotEqual(commit, new_commit)
# Re-tag upstream
self.delTagFromRepo('org/project1', 'testtag')
self.addTagToRepo('org/project1', 'testtag', new_commit)
new_tag_commit = parent_repo.commit('testtag')
self.assertEqual(new_commit, new_tag_commit)
# Verify our downstream tag has moved
work_repo.update()
new_result = work_repo_underlying.commit('testtag')
self.assertEqual(new_commit, new_result)
def test_set_remote_url_clone(self):
"""Test that we always use the new Git URL for cloning.
This is a regression test to make sure we always use the new
Git URL when a clone of the repo is necessary before updating
the config.
"""
parent_path = os.path.join(self.upstream_root, 'org/project1')
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
# Simulate an invalid/outdated remote URL with the repo no
# longer existing on the file system.
work_repo.remote_url = "file:///dev/null"
shutil.rmtree(work_repo.local_path)
# Setting a valid remote URL should update the attribute and
# clone the repository.
work_repo.setRemoteUrl(parent_path)
self.assertEqual(work_repo.remote_url, parent_path)
self.assertTrue(os.path.exists(work_repo.local_path))
def test_set_remote_url_invalid(self):
"""Test that we don't store the Git URL when failing to set it.
This is a regression test to make sure we will always update
the Git URL after a previously failed attempt.
"""
parent_path = os.path.join(self.upstream_root, 'org/project1')
work_repo = Repo(parent_path, self.workspace_root,
'[email protected]', 'User Name', '0', '0')
# Set the Git remote URL to an invalid value.
invalid_url = "file:///dev/null"
repo = work_repo.createRepoObject(None)
work_repo._git_set_remote_url(repo, invalid_url)
work_repo.remote_url = invalid_url
# Simulate a failed attempt to update the remote URL
with mock.patch.object(work_repo, "_git_set_remote_url",
side_effect=RuntimeError):
with testtools.ExpectedException(RuntimeError):
work_repo.setRemoteUrl(parent_path)
# Make sure we cleared out the remote URL.
self.assertIsNone(work_repo.remote_url)
# Setting a valid remote URL should update the attribute and
# clone the repository.
work_repo.setRemoteUrl(parent_path)
self.assertEqual(work_repo.remote_url, parent_path)
self.assertTrue(os.path.exists(work_repo.local_path))
class TestMergerWithAuthUrl(ZuulTestCase):
config_file = 'zuul-github-driver.conf'
git_url_with_auth = True
@simple_layout('layouts/merging-github.yaml', driver='github')
def test_changing_url(self):
"""
This test checks that if getGitUrl returns different urls for the same
repo (which happens if an access token is part of the url) then the
remote urls are changed in the merger accordingly. This tests directly
the merger.
"""
merger = self.executor_server.merger
repo = merger.getRepo('github', 'org/project')
first_url = repo.remote_url
repo = merger.getRepo('github', 'org/project')
second_url = repo.remote_url
# the urls should differ
self.assertNotEqual(first_url, second_url)
@simple_layout('layouts/merging-github.yaml', driver='github')
def test_changing_url_end_to_end(self):
"""
This test checks that if getGitUrl returns different urls for the same
repo (which happens if an access token is part of the url) then the
remote urls are changed in the merger accordingly. This is an end to
end test.
"""
A = self.fake_github.openFakePullRequest('org/project', 'master',
'PR title')
self.fake_github.emitEvent(A.getCommentAddedEvent('merge me'))
self.waitUntilSettled()
self.assertTrue(A.is_merged)
# get remote url of org/project in merger
repo = self.executor_server.merger.repos.get('github.com/org/project')
self.assertIsNotNone(repo)
git_repo = git.Repo(repo.local_path)
first_url = list(git_repo.remotes[0].urls)[0]
B = self.fake_github.openFakePullRequest('org/project', 'master',
'PR title')
self.fake_github.emitEvent(B.getCommentAddedEvent('merge me again'))
self.waitUntilSettled()
self.assertTrue(B.is_merged)
repo = self.executor_server.merger.repos.get('github.com/org/project')
self.assertIsNotNone(repo)
git_repo = git.Repo(repo.local_path)
second_url = list(git_repo.remotes[0].urls)[0]
# the urls should differ
self.assertNotEqual(first_url, second_url)
class TestMerger(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
@staticmethod
def _item_from_fake_change(fake_change):
return dict(
number=fake_change.number,
patchset=1,
ref=fake_change.patchsets[0]['ref'],
connection='gerrit',
branch=fake_change.branch,
project=fake_change.project,
buildset_uuid='fake-uuid',
merge_mode=zuul.model.MERGER_MERGE_RESOLVE,
)
def test_merge_multiple_items(self):
"""
Tests that the merger merges and returns the requested file changes per
change and in the correct order.
"""
merger = self.executor_server.merger
files = ['zuul.yaml', '.zuul.yaml']
dirs = ['zuul.d', '.zuul.d']
# Simple change A
file_dict_a = {'zuul.d/a.yaml': 'a'}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict_a)
item_a = self._item_from_fake_change(A)
# Simple change B
file_dict_b = {'zuul.d/b.yaml': 'b'}
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B',
files=file_dict_b)
item_b = self._item_from_fake_change(B)
# Simple change C on top of A
file_dict_c = {'zuul.d/a.yaml': 'a-with-c'}
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C',
files=file_dict_c,
parent=A.patchsets[0]['ref'])
item_c = self._item_from_fake_change(C)
# Change in different project
file_dict_d = {'zuul.d/a.yaml': 'a-in-project1'}
D = self.fake_gerrit.addFakeChange('org/project1', 'master', 'D',
files=file_dict_d)
item_d = self._item_from_fake_change(D)
# Merge A
result = merger.mergeChanges([item_a], files=files, dirs=dirs)
self.assertIsNotNone(result)
hexsha, read_files, repo_state, ret_recent, orig_commit = result
self.assertEqual(len(read_files), 1)
self.assertEqual(read_files[0]['project'], 'org/project')
self.assertEqual(read_files[0]['branch'], 'master')
self.assertEqual(read_files[0]['files']['zuul.d/a.yaml'], 'a')
# Merge A -> B
result = merger.mergeChanges([item_a, item_b], files=files, dirs=dirs)
self.assertIsNotNone(result)
hexsha, read_files, repo_state, ret_recent, orig_commit = result
self.assertEqual(len(read_files), 2)
self.assertEqual(read_files[0]['project'], 'org/project')
self.assertEqual(read_files[0]['branch'], 'master')
self.assertEqual(read_files[0]['files']['zuul.d/a.yaml'], 'a')
self.assertEqual(read_files[1]['project'], 'org/project')
self.assertEqual(read_files[1]['branch'], 'master')
self.assertEqual(read_files[1]['files']['zuul.d/b.yaml'], 'b')
# Merge A -> B -> C
result = merger.mergeChanges([item_a, item_b, item_c], files=files,
dirs=dirs)
self.assertIsNotNone(result)
hexsha, read_files, repo_state, ret_recent, orig_commit = result
self.assertEqual(len(read_files), 3)
self.assertEqual(read_files[0]['project'], 'org/project')
self.assertEqual(read_files[0]['branch'], 'master')
self.assertEqual(read_files[0]['files']['zuul.d/a.yaml'], 'a')
self.assertEqual(read_files[1]['project'], 'org/project')
self.assertEqual(read_files[1]['branch'], 'master')
self.assertEqual(read_files[1]['files']['zuul.d/b.yaml'], 'b')
self.assertEqual(read_files[2]['project'], 'org/project')
self.assertEqual(read_files[2]['branch'], 'master')
self.assertEqual(read_files[2]['files']['zuul.d/a.yaml'],
'a-with-c')
# Merge A -> B -> C -> D
result = merger.mergeChanges([item_a, item_b, item_c, item_d],
files=files, dirs=dirs)
self.assertIsNotNone(result)
hexsha, read_files, repo_state, ret_recent, orig_commit = result
self.assertEqual(len(read_files), 4)
self.assertEqual(read_files[0]['project'], 'org/project')
self.assertEqual(read_files[0]['branch'], 'master')
self.assertEqual(read_files[0]['files']['zuul.d/a.yaml'], 'a')
self.assertEqual(read_files[1]['project'], 'org/project')
self.assertEqual(read_files[1]['branch'], 'master')
self.assertEqual(read_files[1]['files']['zuul.d/b.yaml'], 'b')
self.assertEqual(read_files[2]['project'], 'org/project')
self.assertEqual(read_files[2]['branch'], 'master')
self.assertEqual(read_files[2]['files']['zuul.d/a.yaml'],
'a-with-c')
self.assertEqual(read_files[3]['project'], 'org/project1')
self.assertEqual(read_files[3]['branch'], 'master')
self.assertEqual(read_files[3]['files']['zuul.d/a.yaml'],
'a-in-project1')
def test_merge_temp_refs(self):
"""
Test that the merge updates local zuul refs in order to avoid
garbage collection of needed objects.
"""
merger = self.executor_server.merger
parent_path = os.path.join(self.upstream_root, 'org/project')
parent_repo = git.Repo(parent_path)
parent_repo.create_head("foo/bar")
# Simple change A
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
item_a = self._item_from_fake_change(A)
# Simple change B on branch foo/bar
B = self.fake_gerrit.addFakeChange('org/project', 'foo/bar', 'B')
item_b = self._item_from_fake_change(B)
# Simple change C
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
item_c = self._item_from_fake_change(C)
# Merge A -> B -> C
# TODO(corvus): remove this if we update in mergeChanges
for item in [item_a, item_b, item_c]:
merger.updateRepo(item['connection'], item['project'])
result = merger.mergeChanges([item_a, item_b, item_c])
self.assertIsNotNone(result)
merge_state = result[3]
cache_repo = merger.getRepo('gerrit', 'org/project')
repo = cache_repo.createRepoObject(zuul_event_id="dummy")
# Make sure zuul refs are updated
foobar_zuul_ref = Repo.refNameToZuulRef("foo/bar")
master_zuul_ref = Repo.refNameToZuulRef("master")
ref_map = {r.path: r for r in repo.refs}
self.assertIn(foobar_zuul_ref, ref_map)
self.assertIn(master_zuul_ref, ref_map)
self.assertEqual(
ref_map[master_zuul_ref].commit.hexsha,
merge_state[("gerrit", "org/project", "master")]
)
self.assertEqual(
ref_map[foobar_zuul_ref].commit.hexsha,
merge_state[("gerrit", "org/project", "foo/bar")]
)
# Delete the remote branch so a reset cleanes up the local branch
parent_repo.delete_head('foo/bar', force=True)
# Note: Before git 2.13 deleting a a ref foo/bar leaves an empty
# directory foo behind that will block creating the reference foo
# in the future. As a workaround we must clean up empty directories
# in .git/refs.
if parent_repo.git.version_info[:2] < (2, 13):
Repo._cleanup_leaked_ref_dirs(parent_path, None, [])
cache_repo.update()
cache_repo.reset()
self.assertNotIn(foobar_zuul_ref, [r.path for r in repo.refs])
# Create another head 'foo' that can't be created if the 'foo/bar'
# branch wasn't cleaned up properly
parent_repo.create_head("foo")
# Change B now on branch 'foo'
B = self.fake_gerrit.addFakeChange('org/project', 'foo', 'B')
item_b = self._item_from_fake_change(B)
# Merge A -> B -> C
# TODO(corvus): remove this if we update in mergeChanges
for item in [item_a, item_b, item_c]:
merger.updateRepo(item['connection'], item['project'])
result = merger.mergeChanges([item_a, item_b, item_c])
self.assertIsNotNone(result)
merge_state = result[3]
foo_zuul_ref = Repo.refNameToZuulRef("foo")
ref_map = {r.path: r for r in repo.refs}
self.assertIn(foo_zuul_ref, ref_map)
self.assertIn(master_zuul_ref, ref_map)
self.assertEqual(
ref_map[master_zuul_ref].commit.hexsha,
merge_state[("gerrit", "org/project", "master")]
)
self.assertEqual(
ref_map[foo_zuul_ref].commit.hexsha,
merge_state[("gerrit", "org/project", "foo")]
)
def test_stale_index_lock_cleanup(self):
# Stop the running executor's merger. We needed it running to merge
# things during test boostrapping but now it is just in the way.
self.executor_server._merger_running = False
self.executor_server.merger_loop_wake_event.set()
self.executor_server.merger_thread.join()
# Start a dedicated merger and do a merge to populate the repo on disk
self._startMerger()
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
# Add an index.lock file
fpath = os.path.join(self.merger_src_root, 'review.example.com',
'org', 'org%2Fproject1', '.git', 'index.lock')
with open(fpath, 'w'):
pass
self.assertTrue(os.path.exists(fpath))
# This will fail if git can't modify the repo due to a stale lock file.
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(B.data['status'], 'MERGED')
self.assertFalse(os.path.exists(fpath))
def test_update_after_ff_merge(self):
# Test update to branch from pre existing fast forwardable commit
# causes the branch to update
parent_path = os.path.join(self.upstream_root, 'org/project1')
upstream_repo = git.Repo(parent_path)
# Get repo and update for the first time.
merger = self.executor_server.merger
merger.updateRepo('gerrit', 'org/project1')
repo = merger.getRepo('gerrit', 'org/project1')
# Branch master must exist
self.assertEqual(['master'], repo.getBranches())
self.log.debug("Upstream master %s",
upstream_repo.commit('master').hexsha)
# Create a new change in the upstream repo
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
item_a = self._item_from_fake_change(A)
change_sha = A.data['currentPatchSet']['revision']
change_ref = 'refs/changes/01/1/1'
# This will pull the upstream change into the zuul repo
self.log.info('Merge the new change so it is present in the zuul repo')
merger.mergeChanges([item_a], zuul_event_id='testeventid')
repo = merger.getRepo('gerrit', 'org/project1')
zuul_repo = git.Repo(repo.local_path)
zuul_ref = repo.refNameToZuulRef('master')
self.log.debug("Upstream commit %s",
upstream_repo.commit(change_ref).hexsha)
self.log.debug("Zuul commit %s",
zuul_repo.commit(zuul_ref).hexsha)
self.assertEqual(upstream_repo.commit(change_ref).hexsha,
zuul_repo.commit(zuul_ref).hexsha)
self.assertNotEqual(upstream_repo.commit(change_ref).hexsha,
zuul_repo.commit('refs/heads/master').hexsha)
# Update upstream master to point at the change commit simulating a
# fast forward merge of a change
upstream_repo.refs.master.commit = change_sha
self.assertEqual(upstream_repo.commit('refs/heads/master').hexsha,
change_sha)
# Construct a repo state to simulate it being created by
# another merger.
repo_state_update_branch_ff_rev = {
'gerrit': {
'org/project1': {
'refs/heads/master': change_sha,
}
}
}
self.log.debug("Upstream master %s",
upstream_repo.commit('master').hexsha)
# This should update master
self.log.info('Update the repo and ensure it has updated properly')
merger.updateRepo('gerrit', 'org/project1',
repo_state=repo_state_update_branch_ff_rev)
merger.checkoutBranch('gerrit', 'org/project1', 'master',
repo_state=repo_state_update_branch_ff_rev)
repo = merger.getRepo('gerrit', 'org/project1')
zuul_repo = git.Repo(repo.local_path)
self.log.debug("Zuul master %s",
zuul_repo.commit('master').hexsha)
# It's not important for the zuul ref to match; it's only used
# to avoid garbage collection, so we don't check that here.
self.assertEqual(upstream_repo.commit('refs/heads/master').hexsha,
zuul_repo.commit('refs/heads/master').hexsha)
self.assertEqual(upstream_repo.commit(change_ref).hexsha,
zuul_repo.commit('refs/heads/master').hexsha)
self.assertEqual(upstream_repo.commit(change_ref).hexsha,
zuul_repo.commit('HEAD').hexsha)
def test_lost_merge_requests(self):
# Test the cleanupLostMergeRequests method of the merger
# client. This is normally called from apsched from the
# scheduler. To exercise it, we need to produce a fake lost
# merge request and then invoke it ourselves.
# Stop the actual merger which will see this as garbage:
self.executor_server._merger_running = False
self.executor_server.merger_loop_wake_event.set()
self.executor_server.merger_thread.join()
merger_client = self.scheds.first.sched.merger
merger_api = merger_client.merger_api
# Create a fake lost merge request. This is based on
# test_lost_merge_requests in test_zk.
payload = {'merge': 'test'}
merger_api.submit(MergeRequest(
uuid='B',
job_type=MergeRequest.MERGE,
build_set_uuid='BB',
tenant_name='tenant',
pipeline_name='check',
event_id='1',
), payload)
b = merger_api.get(f"{merger_api.REQUEST_ROOT}/B")
b.state = MergeRequest.RUNNING
merger_api.update(b)
# Wait until the latest state transition is reflected in the Merger
# APIs cache. Using a DataWatch for this purpose could lead to race
# conditions depending on which DataWatch is executed first. The
# DataWatch might be triggered for the correct event, but the cache
# might still be outdated as the DataWatch that updates the cache
# itself wasn't triggered yet.
cache = merger_api._cached_requests
for _ in iterate_timeout(30, "cache to be up-to-date"):
if (cache and cache[b.path].state == MergeRequest.RUNNING):
break
# The lost_merges method should only return merges which are running
# but not locked by any merger, in this case merge b
lost_merge_requests = list(merger_api.lostRequests())
self.assertEqual(1, len(lost_merge_requests))
self.assertEqual(b.path, lost_merge_requests[0].path)
# Exercise the cleanup code
self.log.debug("Removing lost merge requests")
merger_client.cleanupLostMergeRequests()
cache = merger_api._cached_requests
for _ in iterate_timeout(30, "cache to be empty"):
if not cache:
break
class TestMergerTree(BaseTestCase):
def test_tree(self):
t = MergerTree()
t.add('/root/component')
t.add('/root/component2')
with testtools.ExpectedException(Exception):
t.add('/root/component/subcomponent')
t.add('/root/foo/bar/baz')
with testtools.ExpectedException(Exception):
t.add('/root/foo')
class TestMergerSchemes(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def setUp(self):
super().setUp()
self.work_root = os.path.join(self.test_root, 'workspace')
self.cache_root = os.path.join(self.test_root, 'cache')
def _getMerger(self, work_root=None, cache_root=None, scheme=None):
work_root = work_root or self.work_root
return self.executor_server._getMerger(
work_root, cache_root=cache_root, scheme=scheme)
def _assertScheme(self, root, scheme):
if scheme == 'unique':
self.assertTrue(os.path.exists(
os.path.join(root, 'review.example.com',
'org/org%2Fproject1')))
else:
self.assertFalse(os.path.exists(
os.path.join(root, 'review.example.com',
'org/org%2Fproject1')))
if scheme == 'golang':
self.assertTrue(os.path.exists(
os.path.join(root, 'review.example.com',
'org/project1')))
else:
self.assertFalse(os.path.exists(
os.path.join(root, 'review.example.com',
'org/project1')))
if scheme == 'flat':
self.assertTrue(os.path.exists(
os.path.join(root, 'project1')))
else:
self.assertFalse(os.path.exists(
os.path.join(root, 'project1')))
def test_unique_scheme(self):
cache_merger = self._getMerger(work_root=self.cache_root)
cache_merger.updateRepo('gerrit', 'org/project1')
self._assertScheme(self.cache_root, 'unique')
merger = self._getMerger(
cache_root=self.cache_root,
scheme=zuul.model.SCHEME_UNIQUE)
merger.getRepo('gerrit', 'org/project1')
self._assertScheme(self.work_root, 'unique')
def test_golang_scheme(self):
cache_merger = self._getMerger(work_root=self.cache_root)
cache_merger.updateRepo('gerrit', 'org/project1')
self._assertScheme(self.cache_root, 'unique')
merger = self._getMerger(
cache_root=self.cache_root,
scheme=zuul.model.SCHEME_GOLANG)
merger.getRepo('gerrit', 'org/project1')
self._assertScheme(self.work_root, 'golang')
def test_flat_scheme(self):
cache_merger = self._getMerger(work_root=self.cache_root)
cache_merger.updateRepo('gerrit', 'org/project1')
self._assertScheme(self.cache_root, 'unique')
merger = self._getMerger(
cache_root=self.cache_root,
scheme=zuul.model.SCHEME_FLAT)
merger.getRepo('gerrit', 'org/project1')
self._assertScheme(self.work_root, 'flat')
@simple_layout('layouts/overlapping-repos.yaml')
def test_golang_collision(self):
merger = self._getMerger(scheme=zuul.model.SCHEME_GOLANG)
repo = merger.getRepo('gerrit', 'component')
self.assertIsNotNone(repo)
repo = merger.getRepo('gerrit', 'component/subcomponent')
self.assertIsNone(repo)
@simple_layout('layouts/overlapping-repos.yaml')
def test_flat_collision(self):
merger = self._getMerger(scheme=zuul.model.SCHEME_FLAT)
repo = merger.getRepo('gerrit', 'component')
self.assertIsNotNone(repo)
repo = merger.getRepo('gerrit', 'component/component')
self.assertIsNone(repo)
class TestOverlappingRepos(ZuulTestCase):
@simple_layout('layouts/overlapping-repos.yaml')
def test_overlapping_repos(self):
self.executor_server.keep_jobdir = True
A = self.fake_gerrit.addFakeChange('component', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='test-job', result='SUCCESS', changes='1,1')],
ordered=False)
build = self.getJobFromHistory('test-job')
jobdir_git_dir = os.path.join(build.jobdir.src_root,
'component', '.git')
self.assertTrue(os.path.exists(jobdir_git_dir))
jobdir_git_dir = os.path.join(build.jobdir.src_root,
'subcomponent', '.git')
self.assertTrue(os.path.exists(jobdir_git_dir))
class TestMergerUpgrade(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def test_merger_upgrade(self):
work_root = os.path.join(self.test_root, 'workspace')
# Simulate existing repos
org_project = os.path.join(work_root, 'review.example.com', 'org',
'project', '.git')
os.makedirs(org_project)
scheme_file = os.path.join(work_root, '.zuul_merger_scheme')
# Verify that an executor merger doesn't "upgrade" or write a
# scheme file.
self.executor_server._getMerger(
work_root, cache_root=None, scheme=zuul.model.SCHEME_FLAT)
self.assertTrue(os.path.exists(org_project))
self.assertFalse(os.path.exists(scheme_file))
# Verify that a "real" merger does upgrade.
self.executor_server._getMerger(
work_root, cache_root=None,
execution_context=False)
self.assertFalse(os.path.exists(org_project))
self.assertTrue(os.path.exists(scheme_file))
with open(scheme_file) as f:
self.assertEqual(f.read().strip(), 'unique')
# Verify that the next time it starts, we don't upgrade again.
flag_dir = os.path.join(work_root, 'flag')
os.makedirs(flag_dir)
self.executor_server._getMerger(
work_root, cache_root=None,
execution_context=False)
self.assertFalse(os.path.exists(org_project))
self.assertTrue(os.path.exists(scheme_file))
self.assertTrue(os.path.exists(flag_dir))
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_merger_repo.py
|
test_merger_repo.py
|
# Copyright 2022 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from zuul.zk.components import ComponentRegistry
from tests.base import ZuulTestCase, simple_layout, iterate_timeout
from tests.base import ZuulWebFixture
def model_version(version):
"""Specify a model version for a model upgrade test
This creates a dummy scheduler component with the specified model
API version. The component is created before any other, so it
will appear to Zuul that it is joining an existing cluster with
data at the old version.
"""
def decorator(test):
test.__model_version__ = version
return test
return decorator
class TestModelUpgrade(ZuulTestCase):
tenant_config_file = "config/single-tenant/main-model-upgrade.yaml"
scheduler_count = 1
def getJobData(self, tenant, pipeline):
item_path = f'/zuul/tenant/{tenant}/pipeline/{pipeline}/item'
count = 0
for item in self.zk_client.client.get_children(item_path):
bs_path = f'{item_path}/{item}/buildset'
for buildset in self.zk_client.client.get_children(bs_path):
data = json.loads(self.getZKObject(
f'{bs_path}/{buildset}/job/check-job'))
count += 1
yield data
if not count:
raise Exception("No job data found")
@model_version(0)
@simple_layout('layouts/simple.yaml')
def test_model_upgrade_0_1(self):
component_registry = ComponentRegistry(self.zk_client)
self.assertEqual(component_registry.model_api, 0)
# Upgrade our component
self.model_test_component_info.model_api = 1
for _ in iterate_timeout(30, "model api to update"):
if component_registry.model_api == 1:
break
@model_version(2)
@simple_layout('layouts/pipeline-supercedes.yaml')
def test_supercedes(self):
"""
Test that pipeline supsercedes still work with model API 2,
which uses deqeueue events.
"""
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.assertEqual(self.builds[0].name, 'test-job')
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
self.assertEqual(self.builds[0].name, 'test-job')
self.assertEqual(self.builds[0].pipeline, 'gate')
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(len(self.builds), 0)
self.assertEqual(A.reported, 2)
self.assertEqual(A.data['status'], 'MERGED')
self.assertHistory([
dict(name='test-job', result='ABORTED', changes='1,1'),
dict(name='test-job', result='SUCCESS', changes='1,1'),
], ordered=False)
@model_version(4)
def test_model_4(self):
# Test that Zuul return values are correctly passed to child
# jobs in version 4 compatibility mode.
A = self.fake_gerrit.addFakeChange('org/project3', 'master', 'A')
fake_data = [
{'name': 'image',
'url': 'http://example.com/image',
'metadata': {
'type': 'container_image'
}},
]
self.executor_server.returnData(
'project-merge', A,
{'zuul': {'artifacts': fake_data}}
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project1-project2-integration',
result='SUCCESS', changes='1,1'),
], ordered=False)
# Verify that the child jobs got the data from the parent
test1 = self.getJobFromHistory('project-test1')
self.assertEqual(fake_data[0]['url'],
test1.parameters['zuul']['artifacts'][0]['url'])
integration = self.getJobFromHistory('project1-project2-integration')
self.assertEqual(fake_data[0]['url'],
integration.parameters['zuul']['artifacts'][0]['url'])
@model_version(4)
def test_model_4_5(self):
# Changes share a queue, but with only one job, the first
# merges before the second starts.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
fake_data = [
{'name': 'image',
'url': 'http://example.com/image',
'metadata': {
'type': 'container_image'
}},
]
self.executor_server.returnData(
'project-merge', A,
{'zuul': {'artifacts': fake_data}}
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
# Upgrade our component
self.model_test_component_info.model_api = 5
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project1-project2-integration',
result='SUCCESS', changes='1,1'),
], ordered=False)
# Verify that the child job got the data from the parent
test1 = self.getJobFromHistory('project-test1')
self.assertEqual(fake_data[0]['url'],
test1.parameters['zuul']['artifacts'][0]['url'])
@model_version(5)
def test_model_5_6(self):
# This exercises the min_ltimes=None case in configloader on
# layout updates.
first = self.scheds.first
second = self.createScheduler()
second.start()
self.assertEqual(len(self.scheds), 2)
for _ in iterate_timeout(10, "until priming is complete"):
state_one = first.sched.local_layout_state.get("tenant-one")
if state_one:
break
for _ in iterate_timeout(
10, "all schedulers to have the same layout state"):
if (second.sched.local_layout_state.get(
"tenant-one") == state_one):
break
with second.sched.layout_update_lock, second.sched.run_handler_lock:
file_dict = {'zuul.d/test.yaml': ''}
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
files=file_dict)
A.setMerged()
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled(matcher=[first])
# Delete the layout data to simulate the first scheduler
# being on model api 5 (we write the data regardless of
# the cluster version since it's a new znode).
self.scheds.first.sched.zk_client.client.delete(
'/zuul/layout-data', recursive=True)
self.waitUntilSettled()
self.assertEqual(first.sched.local_layout_state.get("tenant-one"),
second.sched.local_layout_state.get("tenant-one"))
# No test for model version 7 (secrets in blob store): old and new
# code paths are exercised in existing tests since small secrets
# don't use the blob store.
@model_version(8)
def test_model_8_9(self):
# This excercises the upgrade to nodeset_alternates
first = self.scheds.first
second = self.createScheduler()
second.start()
self.assertEqual(len(self.scheds), 2)
for _ in iterate_timeout(10, "until priming is complete"):
state_one = first.sched.local_layout_state.get("tenant-one")
if state_one:
break
for _ in iterate_timeout(
10, "all schedulers to have the same layout state"):
if (second.sched.local_layout_state.get(
"tenant-one") == state_one):
break
self.fake_nodepool.pause()
with second.sched.layout_update_lock, second.sched.run_handler_lock:
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled(matcher=[first])
self.model_test_component_info.model_api = 9
with first.sched.layout_update_lock, first.sched.run_handler_lock:
self.fake_nodepool.unpause()
self.waitUntilSettled(matcher=[second])
self.waitUntilSettled()
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project1-project2-integration',
result='SUCCESS', changes='1,1'),
], ordered=False)
@model_version(11)
def test_model_11_12(self):
# This excercises the upgrade to store build/job versions
first = self.scheds.first
second = self.createScheduler()
second.start()
self.assertEqual(len(self.scheds), 2)
for _ in iterate_timeout(10, "until priming is complete"):
state_one = first.sched.local_layout_state.get("tenant-one")
if state_one:
break
for _ in iterate_timeout(
10, "all schedulers to have the same layout state"):
if (second.sched.local_layout_state.get(
"tenant-one") == state_one):
break
self.executor_server.hold_jobs_in_build = True
with second.sched.layout_update_lock, second.sched.run_handler_lock:
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled(matcher=[first])
self.model_test_component_info.model_api = 12
with first.sched.layout_update_lock, first.sched.run_handler_lock:
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled(matcher=[second])
self.waitUntilSettled()
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project1-project2-integration',
result='SUCCESS', changes='1,1'),
], ordered=False)
@model_version(12)
def test_model_12_13(self):
# Initially queue items will still have the full trigger event
# stored in Zookeeper. The trigger event will be converted to
# an event info object after the model API update.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
# Upgrade our component
self.model_test_component_info.model_api = 13
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-test1', result='SUCCESS', changes='1,1'),
dict(name='project-test2', result='SUCCESS', changes='1,1'),
dict(name='project1-project2-integration',
result='SUCCESS', changes='1,1'),
], ordered=False)
class TestGithubModelUpgrade(ZuulTestCase):
config_file = 'zuul-github-driver.conf'
scheduler_count = 1
@model_version(3)
@simple_layout('layouts/gate-github.yaml', driver='github')
def test_status_checks_removal(self):
# This tests the old behavior -- that changes are not dequeued
# once their required status checks are removed -- since the
# new behavior requires a flag in ZK.
# Contrast with test_status_checks_removal.
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project')
repo._set_branch_protection(
'master', contexts=['something/check', 'tenant-one/gate'])
A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = True
# Since the required status 'something/check' is not fulfilled,
# no job is expected
self.assertEqual(0, len(self.history))
# Set the required status 'something/check'
repo.create_status(A.head_sha, 'success', 'example.com', 'description',
'something/check')
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
# Remove it and verify the change is not dequeued (old behavior).
repo.create_status(A.head_sha, 'failed', 'example.com', 'description',
'something/check')
self.fake_github.emitEvent(A.getCommitStatusEvent('something/check',
state='failed',
user='foo'))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# the change should have entered the gate
self.assertHistory([
dict(name='project-test1', result='SUCCESS'),
dict(name='project-test2', result='SUCCESS'),
], ordered=False)
self.assertTrue(A.is_merged)
@model_version(10)
@simple_layout('layouts/github-merge-mode.yaml', driver='github')
def test_merge_method_syntax_check(self):
"""
Tests that the merge mode gets forwarded to the reporter and the
PR was rebased.
"""
webfixture = self.useFixture(
ZuulWebFixture(self.changes, self.config,
self.additional_event_queues, self.upstream_root,
self.poller_events,
self.git_url_with_auth, self.addCleanup,
self.test_root))
sched = self.scheds.first.sched
web = webfixture.web
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project')
repo._repodata['allow_rebase_merge'] = False
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# Verify that there are no errors with model version 9 (we
# should be using the defaultdict that indicates all merge
# modes are supported).
tenant = sched.abide.tenants.get('tenant-one')
self.assertEquals(len(tenant.layout.loading_errors), 0)
# Upgrade our component
self.model_test_component_info.model_api = 11
# Perform a smart reconfiguration which should not clear the
# cache; we should continue to see no errors because we should
# still be using the defaultdict.
self.scheds.first.smartReconfigure()
tenant = sched.abide.tenants.get('tenant-one')
self.assertEquals(len(tenant.layout.loading_errors), 0)
# Wait for web to have the same config
for _ in iterate_timeout(10, "config is synced"):
if (web.tenant_layout_state.get('tenant-one') ==
web.local_layout_state.get('tenant-one')):
break
# Repeat the check
tenant = web.abide.tenants.get('tenant-one')
self.assertEquals(len(tenant.layout.loading_errors), 0)
# Perform a full reconfiguration which should cause us to
# actually query, update the branch cache, and report an
# error.
self.scheds.first.fullReconfigure()
self.waitUntilSettled()
tenant = sched.abide.tenants.get('tenant-one')
loading_errors = tenant.layout.loading_errors
self.assertEquals(
len(tenant.layout.loading_errors), 1,
"An error should have been stored in sched")
self.assertIn(
"rebase not supported",
str(loading_errors[0].error))
# Wait for web to have the same config
for _ in iterate_timeout(10, "config is synced"):
if (web.tenant_layout_state.get('tenant-one') ==
web.local_layout_state.get('tenant-one')):
break
# Repoat the check for web
tenant = web.abide.tenants.get('tenant-one')
loading_errors = tenant.layout.loading_errors
self.assertEquals(
len(tenant.layout.loading_errors), 1,
"An error should have been stored in web")
self.assertIn(
"rebase not supported",
str(loading_errors[0].error))
class TestDeduplication(ZuulTestCase):
config_file = "zuul-gerrit-github.conf"
tenant_config_file = "config/circular-dependencies/main.yaml"
scheduler_count = 1
def _test_job_deduplication(self):
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
# A <-> B
A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
A.subject, B.data["url"]
)
B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
B.subject, A.data["url"]
)
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
@simple_layout('layouts/job-dedup-auto-shared.yaml')
@model_version(7)
def test_job_deduplication_auto_shared(self):
self._test_job_deduplication()
self.assertHistory([
dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
# This would be deduplicated
dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
], ordered=False)
self.assertEqual(len(self.fake_nodepool.history), 4)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_model_upgrade.py
|
test_model_upgrade.py
|
# Copyright 2022 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import zuul.configloader
from tests.base import ZuulTestCase
class TestGlobalSemaphoresConfig(ZuulTestCase):
tenant_config_file = 'config/global-semaphores-config/main.yaml'
def assertSemaphores(self, tenant, semaphores):
for k, v in semaphores.items():
self.assertEqual(
len(tenant.semaphore_handler.semaphoreHolders(k)),
v, k)
def assertSemaphoresMax(self, tenant, semaphores):
for k, v in semaphores.items():
abide = tenant.semaphore_handler.abide
semaphore = tenant.layout.getSemaphore(abide, k)
self.assertEqual(semaphore.max, v, k)
def test_semaphore_scope(self):
# This tests global and tenant semaphore scope
self.executor_server.hold_jobs_in_build = True
tenant1 = self.scheds.first.sched.abide.tenants.get('tenant-one')
tenant2 = self.scheds.first.sched.abide.tenants.get('tenant-two')
tenant3 = self.scheds.first.sched.abide.tenants.get('tenant-three')
# The different max values will tell us that we have the right
# semaphore objects. Each tenant has one tenant-scope
# semaphore in a tenant-specific project, and one tenant-scope
# semaphore with a common definition. Tenants 1 and 2 share a
# global-scope semaphore, and tenant 3 has a tenant-scope
# semaphore with the same name.
# Here is what is defined in each tenant:
# Tenant-one:
# * global-semaphore: scope:global max:100 definition:main.yaml
# * common-semaphore: scope:tenant max:10 definition:common-config
# * project1-semaphore: scope:tenant max:11 definition:project1
# * (global-semaphore): scope:tenant max:2 definition:project1
# [unused since it shadows the actual global-semaphore]
# Tenant-two:
# * global-semaphore: scope:global max:100 definition:main.yaml
# * common-semaphore: scope:tenant max:10 definition:common-config
# * project2-semaphore: scope:tenant max:12 definition:project2
# Tenant-three:
# * global-semaphore: scope:global max:999 definition:project3
# * common-semaphore: scope:tenant max:10 definition:common-config
# * project3-semaphore: scope:tenant max:13 definition:project3
self.assertSemaphoresMax(tenant1, {'global-semaphore': 100,
'common-semaphore': 10,
'project1-semaphore': 11,
'project2-semaphore': 1,
'project3-semaphore': 1})
self.assertSemaphoresMax(tenant2, {'global-semaphore': 100,
'common-semaphore': 10,
'project1-semaphore': 1,
'project2-semaphore': 12,
'project3-semaphore': 1})
# This "global" semaphore is really tenant-scoped, it just has
# the same name.
self.assertSemaphoresMax(tenant3, {'global-semaphore': 999,
'common-semaphore': 10,
'project1-semaphore': 1,
'project2-semaphore': 1,
'project3-semaphore': 13})
# We should have a config error in tenant1 due to the
# redefinition.
self.assertEquals(len(tenant1.layout.loading_errors), 1)
self.assertEquals(len(tenant2.layout.loading_errors), 0)
self.assertEquals(len(tenant3.layout.loading_errors), 0)
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project3', 'master', 'C')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Checking the number of holders tells us whethere we are
# using global or tenant-scoped semaphores. Each in-use
# semaphore in a tenant should have only one holder except the
# global-scope semaphore shared between tenants 1 and 2.
self.assertSemaphores(tenant1, {'global-semaphore': 2,
'common-semaphore': 1,
'project1-semaphore': 1,
'project2-semaphore': 0,
'project3-semaphore': 0})
self.assertSemaphores(tenant2, {'global-semaphore': 2,
'common-semaphore': 1,
'project1-semaphore': 0,
'project2-semaphore': 1,
'project3-semaphore': 0})
self.assertSemaphores(tenant3, {'global-semaphore': 1,
'common-semaphore': 1,
'project1-semaphore': 0,
'project2-semaphore': 0,
'project3-semaphore': 1})
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
class TestGlobalSemaphoresBroken(ZuulTestCase):
validate_tenants = []
tenant_config_file = 'config/global-semaphores-config/broken.yaml'
# This test raises a config error during the startup of the test
# case which makes the first scheduler fail during its startup.
# The second (or any additional) scheduler won't even run as the
# startup is serialized in tests/base.py.
# Thus it doesn't make sense to execute this test with multiple
# schedulers.
scheduler_count = 1
def setUp(self):
self.assertRaises(zuul.configloader.GlobalSemaphoreNotFoundError,
super().setUp)
def test_broken_global_semaphore_config(self):
pass
class TestGlobalSemaphores(ZuulTestCase):
tenant_config_file = 'config/global-semaphores/main.yaml'
def test_global_semaphores(self):
# This tests that a job finishing in one tenant will correctly
# start a job in another tenant waiting on the semahpore.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([])
self.assertBuilds([
dict(name='test-global-semaphore', changes='1,1'),
])
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertHistory([
dict(name='test-global-semaphore',
result='SUCCESS', changes='1,1'),
dict(name='test-global-semaphore',
result='SUCCESS', changes='2,1'),
], ordered=False)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_global_semaphores.py
|
test_global_semaphores.py
|
# Copyright 2015 GoodData
# Copyright (c) 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tests.base import ZuulTestCase
class TestGerritAndGithub(ZuulTestCase):
config_file = 'zuul-connections-gerrit-and-github.conf'
tenant_config_file = 'config/multi-driver/main.yaml'
# Those tests are also using the fake github implementation which
# means that every scheduler gets a different fake github instance.
# Thus, assertions might fail depending on which scheduler did the
# interaction with Github.
scheduler_count = 1
def test_multiple_project_gerrit_and_github(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
B = self.fake_github.openFakePullRequest('org/project1', 'master', 'B')
self.fake_github.emitEvent(B.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(2, len(self.builds))
self.assertEqual('project-gerrit', self.builds[0].name)
self.assertEqual('project1-github', self.builds[1].name)
self.assertTrue(self.builds[0].hasChanges(A))
self.assertTrue(self.builds[1].hasChanges(B))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# Check on reporting results
# github should have a success status (only).
statuses = self.fake_github.getCommitStatuses(
'org/project1', B.head_sha)
self.assertEqual(1, len(statuses))
self.assertEqual('success', statuses[0]['state'])
# gerrit should have only reported twice, on start and success
self.assertEqual(A.reported, 2)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_multi_driver.py
|
test_multi_driver.py
|
# Copyright 2021 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zuul.lib import strings
from tests.base import BaseTestCase
class TestStrings(BaseTestCase):
def test_unique_project_name(self):
self.assertEqual(('project', 'project'),
strings.unique_project_name('project'))
self.assertEqual(('project', 'project%2Fsubproject'),
strings.unique_project_name('project/subproject'))
self.assertEqual(('project', 'project%2Fsub%2Fproject'),
strings.unique_project_name('project/sub/project'))
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_strings.py
|
test_strings.py
|
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zuul import model
import zuul.nodepool
from tests.base import BaseTestCase, FakeNodepool, iterate_timeout
from zuul.zk import ZooKeeperClient
from zuul.zk.nodepool import ZooKeeperNodepool
class NodepoolWithCallback(zuul.nodepool.Nodepool):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.provisioned_requests = []
def _handleNodeRequestEvent(self, request, event):
super()._handleNodeRequestEvent(request, event)
self.provisioned_requests.append(request)
class TestNodepoolBase(BaseTestCase):
# Tests the Nodepool interface class using a fake nodepool and
# scheduler.
def setUp(self):
super().setUp()
self.statsd = None
self.setupZK()
self.zk_client = ZooKeeperClient(
self.zk_chroot_fixture.zk_hosts,
tls_cert=self.zk_chroot_fixture.zookeeper_cert,
tls_key=self.zk_chroot_fixture.zookeeper_key,
tls_ca=self.zk_chroot_fixture.zookeeper_ca)
self.zk_nodepool = ZooKeeperNodepool(self.zk_client)
self.addCleanup(self.zk_client.disconnect)
self.zk_client.connect()
self.hostname = 'nodepool-test-hostname'
self.nodepool = NodepoolWithCallback(
self.zk_client, self.hostname, self.statsd, scheduler=True)
self.fake_nodepool = FakeNodepool(self.zk_chroot_fixture)
self.addCleanup(self.fake_nodepool.stop)
class TestNodepool(TestNodepoolBase):
def test_node_request(self):
# Test a simple node request
nodeset = model.NodeSet()
nodeset.addNode(model.Node(['controller', 'foo'], 'ubuntu-xenial'))
nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
job = model.Job('testjob')
job.nodeset = nodeset
request = self.nodepool.requestNodes(
"test-uuid", job, "tenant", "pipeline", "provider", 0, 0)
for x in iterate_timeout(30, 'requests are complete'):
if len(self.nodepool.provisioned_requests) == 1:
break
request = self.nodepool.zk_nodepool.getNodeRequest(request.id)
# We have to look up the request from ZK directly to check the
# state.
zk_request = self.zk_nodepool.getNodeRequest(request.id)
self.assertEqual(zk_request.state, 'fulfilled')
# Accept the nodes
new_nodeset = self.nodepool.getNodeSet(request, nodeset)
self.assertIsNotNone(new_nodeset)
# acceptNodes will be called on the executor, but only if the
# noderequest was accepted before.
executor_nodeset = nodeset.copy()
self.nodepool.acceptNodes(request, executor_nodeset)
for node in executor_nodeset.getNodes():
self.assertIsNotNone(node.lock)
self.assertEqual(node.state, 'ready')
# Mark the nodes in use
self.nodepool.useNodeSet(
executor_nodeset, tenant_name=None, project_name=None)
for node in executor_nodeset.getNodes():
self.assertEqual(node.state, 'in-use')
# Return the nodes
self.nodepool.returnNodeSet(
executor_nodeset, build=None, tenant_name=None, project_name=None,
duration=0)
for node in executor_nodeset.getNodes():
self.assertIsNone(node.lock)
self.assertEqual(node.state, 'used')
def test_node_request_canceled(self):
# Test that node requests can be canceled
nodeset = model.NodeSet()
nodeset.addNode(model.Node(['controller'], 'ubuntu-xenial'))
nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
job = model.Job('testjob')
job.nodeset = nodeset
self.fake_nodepool.pause()
request = self.nodepool.requestNodes(
"test-uuid", job, "tenant", "pipeline", "provider", 0, 0)
for x in iterate_timeout(30, 'request created'):
if len(self.nodepool.zk_nodepool._node_request_cache):
break
self.assertEqual(len(self.nodepool.provisioned_requests), 0)
self.nodepool.cancelRequest(request)
for x in iterate_timeout(30, 'request deleted'):
if len(self.nodepool.provisioned_requests):
break
self.assertEqual(len(self.nodepool.provisioned_requests), 1)
self.assertEqual(self.nodepool.provisioned_requests[0].state,
'requested')
def test_node_request_priority(self):
# Test that requests are satisfied in priority order
nodeset = model.NodeSet()
nodeset.addNode(model.Node(['controller', 'foo'], 'ubuntu-xenial'))
nodeset.addNode(model.Node(['compute'], 'ubuntu-xenial'))
job = model.Job('testjob')
job.nodeset = nodeset
self.fake_nodepool.pause()
request1 = self.nodepool.requestNodes(
"test-uuid", job, "tenant", "pipeline", "provider", 0, 1)
request2 = self.nodepool.requestNodes(
"test-uuid", job, "tenant", "pipeline", "provider", 0, 0)
self.fake_nodepool.unpause()
for x in iterate_timeout(30, 'requests are complete'):
if len(self.nodepool.provisioned_requests) == 2:
break
request1 = self.nodepool.zk_nodepool.getNodeRequest(request1.id)
request2 = self.nodepool.zk_nodepool.getNodeRequest(request2.id)
self.assertEqual(request1.state, 'fulfilled')
self.assertEqual(request2.state, 'fulfilled')
self.assertTrue(request2.state_time < request1.state_time)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_nodepool.py
|
test_nodepool.py
|
# Copyright 2020 BMW Group
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from zuul.model import DequeueEvent
from tests.base import ZuulTestCase, simple_layout
class TestReporting(ZuulTestCase):
tenant_config_file = "config/single-tenant/main.yaml"
@simple_layout("layouts/dequeue-reporting.yaml")
def test_dequeue_reporting(self):
"""Check that explicitly dequeued items are reported as dequeued"""
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
A.addApproval("Code-Review", 2)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
event = DequeueEvent('tenant-one', 'check',
'review.example.com', 'org/project',
change='1,1',
ref=None, oldrev=None, newrev=None)
self.scheds.first.sched.pipeline_management_events['tenant-one'][
'check'].put(event)
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
# A should have been reported two times: start, cancel
self.assertEqual(2, A.reported)
self.assertEqual(2, len(A.messages))
self.assertIn("Build started (check)", A.messages[0])
self.assertIn("Build canceled (check)", A.messages[1])
# There shouldn't be any successful items
self.assertEqual(len(check_pipeline.getAllItems()), 0)
# But one canceled
self.assertEqual(self.countJobResults(self.history, "ABORTED"), 1)
@simple_layout("layouts/dequeue-reporting.yaml")
def test_dequeue_reporting_gate_reset(self):
"""Check that a gate reset is not reported as dequeued"""
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
B = self.fake_gerrit.addFakeChange("org/project", "master", "B")
A.addApproval("Code-Review", 2)
B.addApproval("Code-Review", 2)
self.executor_server.failJob("project-test1", A)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
self.waitUntilSettled()
# None of the items should be reported as dequeued, only success or
# failure
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(B.data["status"], "MERGED")
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertIn("Build started (gate)", A.messages[0])
self.assertIn("Build failed (gate)", A.messages[1])
self.assertIn("Build started (gate)", B.messages[0])
self.assertIn("Build succeeded (gate)", B.messages[1])
@simple_layout("layouts/dequeue-reporting.yaml")
def test_dequeue_reporting_supercedes(self):
"""Test that a superceeded change is reported as dequeued"""
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
A.addApproval("Code-Review", 2)
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(4, A.reported)
self.assertIn("Build started (check)", A.messages[0])
self.assertIn("Build canceled (check)", A.messages[1])
self.assertIn("Build started (gate)", A.messages[2])
self.assertIn("Build succeeded (gate)", A.messages[3])
@simple_layout("layouts/dequeue-reporting.yaml")
def test_dequeue_reporting_new_patchset(self):
"Test that change superceeded by a new patchset is reported as deqeued"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(1, len(self.builds))
A.addPatchset()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(4, A.reported)
self.assertIn("Build started (check)", A.messages[0])
self.assertIn("Build canceled (check)", A.messages[1])
self.assertIn("Build started (check)", A.messages[2])
self.assertIn("Build succeeded (check)", A.messages[3])
@simple_layout("layouts/no-jobs-reporting.yaml")
def test_no_jobs_reporting_check(self):
# Test that we don't report NO_JOBS results
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(0, A.reported)
self.assertHistory([])
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
pipeline = tenant.layout.pipelines['check']
reporter = self.scheds.first.connections.getSqlReporter(
pipeline)
with self.scheds.first.connections.getSqlConnection().\
engine.connect() as conn:
result = conn.execute(
sa.sql.select(reporter.connection.zuul_buildset_table))
buildsets = result.fetchall()
for x in buildsets:
self.log.debug("Buildset %s", x)
self.assertEqual(0, len(buildsets))
@simple_layout("layouts/no-jobs-reporting.yaml")
def test_no_jobs_reporting_check_and_gate(self):
# Test that we don't report NO_JOBS results
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
A.addApproval("Code-Review", 2)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
self.assertEqual(0, A.reported)
self.assertHistory([])
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
pipeline = tenant.layout.pipelines['check']
reporter = self.scheds.first.connections.getSqlReporter(
pipeline)
with self.scheds.first.connections.getSqlConnection().\
engine.connect() as conn:
result = conn.execute(
sa.sql.select(reporter.connection.zuul_buildset_table))
buildsets = result.fetchall()
for x in buildsets:
self.log.debug("Buildset %s", x)
self.assertEqual(0, len(buildsets))
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_reporting.py
|
test_reporting.py
|
# Copyright (c) 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tests.base import ZuulTestCase
class TestPushRequirements(ZuulTestCase):
config_file = 'zuul-push-reqs.conf'
tenant_config_file = 'config/push-reqs/main.yaml'
def test_push_requirements(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_github.openFakePullRequest('org/project1', 'master', 'A')
new_sha = A.head_sha
A.setMerged("merging A")
pevent = self.fake_github.getPushEvent(project='org/project1',
ref='refs/heads/master',
new_rev=new_sha)
self.fake_github.emitEvent(pevent)
self.waitUntilSettled()
# All but one pipeline should be skipped
self.assertEqual(1, len(self.builds))
self.assertEqual('pushhub', self.builds[0].pipeline)
self.assertEqual('org/project1', self.builds[0].project)
# Make a gerrit change, and emit a ref-updated event
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
self.fake_gerrit.addEvent(B.getRefUpdatedEvent())
B.setMerged()
self.waitUntilSettled()
# All but one pipeline should be skipped, increasing builds by 1
self.assertEqual(2, len(self.builds))
self.assertEqual('pushgerrit', self.builds[1].pipeline)
self.assertEqual('org/project2', self.builds[1].project)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_push_reqs.py
|
test_push_reqs.py
|
# Copyright 2014 Rackspace Australia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import configparser
import os
import re
import textwrap
import time
import types
import sqlalchemy as sa
import zuul
from zuul.lib import yamlutil
from tests.base import ZuulTestCase, FIXTURE_DIR, \
PostgresqlSchemaFixture, MySQLSchemaFixture, \
BaseTestCase, AnsibleZuulTestCase
class TestConnections(ZuulTestCase):
config_file = 'zuul-connections-same-gerrit.conf'
tenant_config_file = 'config/zuul-connections-same-gerrit/main.yaml'
def test_multiple_gerrit_connections(self):
"Test multiple connections to the one gerrit"
A = self.fake_review_gerrit.addFakeChange('org/project', 'master', 'A')
self.addEvent('review_gerrit', A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]['approvals']), 1)
self.assertEqual(A.patchsets[-1]['approvals'][0]['type'], 'Verified')
self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '1')
self.assertEqual(A.patchsets[-1]['approvals'][0]['by']['username'],
'jenkins')
B = self.fake_review_gerrit.addFakeChange('org/project', 'master', 'B')
self.executor_server.failJob('project-test2', B)
self.addEvent('review_gerrit', B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(B.patchsets[-1]['approvals']), 1)
self.assertEqual(B.patchsets[-1]['approvals'][0]['type'], 'Verified')
self.assertEqual(B.patchsets[-1]['approvals'][0]['value'], '-1')
self.assertEqual(B.patchsets[-1]['approvals'][0]['by']['username'],
'civoter')
class TestSQLConnectionMysql(ZuulTestCase):
config_file = 'zuul-sql-driver-mysql.conf'
tenant_config_file = 'config/sql-driver/main.yaml'
expected_table_prefix = ''
def _sql_tables_created(self, connection_name):
connection = self.scheds.first.connections.connections[connection_name]
insp = sa.inspect(connection.engine)
table_prefix = connection.table_prefix
self.assertEqual(self.expected_table_prefix, table_prefix)
buildset_table = table_prefix + 'zuul_buildset'
build_table = table_prefix + 'zuul_build'
self.assertEqual(20, len(insp.get_columns(buildset_table)))
self.assertEqual(13, len(insp.get_columns(build_table)))
def test_sql_tables_created(self):
"Test the tables for storing results are created properly"
self._sql_tables_created('database')
def _sql_indexes_created(self, connection_name):
connection = self.scheds.first.connections.connections[connection_name]
insp = sa.inspect(connection.engine)
table_prefix = connection.table_prefix
self.assertEqual(self.expected_table_prefix, table_prefix)
buildset_table = table_prefix + 'zuul_buildset'
build_table = table_prefix + 'zuul_build'
indexes_buildset = insp.get_indexes(buildset_table)
indexes_build = insp.get_indexes(build_table)
self.assertEqual(4, len(indexes_buildset))
self.assertEqual(3, len(indexes_build))
# check if all indexes are prefixed
if table_prefix:
indexes = indexes_buildset + indexes_build
for index in indexes:
self.assertTrue(index['name'].startswith(table_prefix))
def test_sql_indexes_created(self):
"Test the indexes are created properly"
self._sql_indexes_created('database')
def test_sql_results(self):
"Test results are entered into an sql table"
def check_results():
# Grab the sa tables
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
pipeline = tenant.layout.pipelines['check']
reporter = self.scheds.first.connections.getSqlReporter(
pipeline)
with self.scheds.first.connections.getSqlConnection().\
engine.connect() as conn:
result = conn.execute(
sa.sql.select(reporter.connection.zuul_buildset_table))
buildsets = result.fetchall()
self.assertEqual(5, len(buildsets))
buildset0 = buildsets[0]
buildset1 = buildsets[1]
buildset2 = buildsets[2]
buildset3 = buildsets[3]
buildset4 = buildsets[4]
self.assertEqual('check', buildset0.pipeline)
self.assertEqual('org/project', buildset0.project)
self.assertEqual(1, buildset0.change)
self.assertEqual('1', buildset0.patchset)
self.assertEqual('SUCCESS', buildset0.result)
self.assertEqual('Build succeeded.', buildset0.message)
self.assertEqual('tenant-one', buildset0.tenant)
self.assertEqual(
'https://review.example.com/%d' % buildset0.change,
buildset0.ref_url)
self.assertNotEqual(None, buildset0.event_id)
self.assertNotEqual(None, buildset0.event_timestamp)
buildset0_builds = conn.execute(
sa.sql.select(
reporter.connection.zuul_build_table
).where(
reporter.connection.zuul_build_table.c.buildset_id ==
buildset0.id
)
).fetchall()
# Check the first result, which should be the project-merge job
self.assertEqual(
'project-merge', buildset0_builds[0].job_name)
self.assertEqual("SUCCESS", buildset0_builds[0].result)
self.assertEqual(None, buildset0_builds[0].log_url)
self.assertEqual('check', buildset1.pipeline)
self.assertEqual('master', buildset1.branch)
self.assertEqual('org/project', buildset1.project)
self.assertEqual(2, buildset1.change)
self.assertEqual('1', buildset1.patchset)
self.assertEqual('FAILURE', buildset1.result)
self.assertEqual('Build failed.', buildset1.message)
buildset1_builds = conn.execute(
sa.sql.select(
reporter.connection.zuul_build_table
).where(
reporter.connection.zuul_build_table.c.buildset_id ==
buildset1.id
)
).fetchall()
# Check the second result, which should be the project-test1
# job which failed
self.assertEqual(
'project-test1', buildset1_builds[1].job_name)
self.assertEqual("FAILURE", buildset1_builds[1].result)
self.assertEqual(None, buildset1_builds[1].log_url)
buildset2_builds = conn.execute(
sa.sql.select(
reporter.connection.zuul_build_table
).where(
reporter.connection.zuul_build_table.c.buildset_id ==
buildset2.id
)
).fetchall()
# Check the first result, which should be the project-publish
# job
self.assertEqual('project-publish',
buildset2_builds[0].job_name)
self.assertEqual("SUCCESS", buildset2_builds[0].result)
buildset3_builds = conn.execute(
sa.sql.select(
reporter.connection.zuul_build_table
).where(
reporter.connection.zuul_build_table.c.buildset_id ==
buildset3.id
)
).fetchall()
self.assertEqual(
'project-test1', buildset3_builds[1].job_name)
self.assertEqual('NODE_FAILURE', buildset3_builds[1].result)
self.assertEqual(None, buildset3_builds[1].log_url)
self.assertIsNotNone(buildset3_builds[1].start_time)
self.assertIsNotNone(buildset3_builds[1].end_time)
self.assertGreaterEqual(
buildset3_builds[1].end_time,
buildset3_builds[1].start_time)
# Check the paused build result
buildset4_builds = conn.execute(
sa.sql.select(
reporter.connection.zuul_build_table
).where(
reporter.connection.zuul_build_table.c.buildset_id ==
buildset4.id
).order_by(reporter.connection.zuul_build_table.c.id)
).fetchall()
paused_build_events = conn.execute(
sa.sql.select(
reporter.connection.zuul_build_event_table
).where(
reporter.connection.zuul_build_event_table.c.build_id
== buildset4_builds[0].id
)
).fetchall()
self.assertEqual(len(paused_build_events), 2)
pause_event = paused_build_events[0]
resume_event = paused_build_events[1]
self.assertEqual(
pause_event.event_type, "paused")
self.assertIsNotNone(pause_event.event_time)
self.assertIsNone(pause_event.description)
self.assertEqual(
resume_event.event_type, "resumed")
self.assertIsNotNone(resume_event.event_time)
self.assertIsNone(resume_event.description)
self.assertGreater(
resume_event.event_time, pause_event.event_time)
self.executor_server.hold_jobs_in_build = True
# Add a success result
self.log.debug("Adding success FakeChange")
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.orderedRelease()
self.waitUntilSettled()
# Add a failed result
self.log.debug("Adding failed FakeChange")
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
self.executor_server.failJob('project-test1', B)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.orderedRelease()
self.waitUntilSettled()
# Add a tag result
self.log.debug("Adding FakeTag event")
C = self.fake_gerrit.addFakeTag('org/project', 'master', 'foo')
self.fake_gerrit.addEvent(C)
self.waitUntilSettled()
self.orderedRelease()
self.waitUntilSettled()
# Add a node_failure result
self.fake_nodepool.pause()
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.orderedRelease()
self.waitUntilSettled()
req = self.fake_nodepool.getNodeRequests()[0]
self.fake_nodepool.addFailRequest(req)
self.fake_nodepool.unpause()
self.waitUntilSettled()
self.orderedRelease()
self.waitUntilSettled()
# We are pausing a job within this test, so holding the jobs in
# build and releasing them in order becomes difficult as the
# paused job will either be paused or waiting on the child jobs
# to start.
# As we are not interested in the order the jobs are running but
# only on the results in the database, simply deactivate
# hold_jobs_in_build.
self.executor_server.hold_jobs_in_build = False
# Add a paused build result
self.log.debug("Adding paused build result")
D = self.fake_gerrit.addFakeChange("org/project", "master", "D")
self.executor_server.returnData(
"project-merge", D, {"zuul": {"pause": True}})
self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
check_results()
def test_sql_results_retry_builds(self):
"Test that retry results are entered into an sql table correctly"
# Check the results
def check_results():
# Grab the sa tables
tenant = self.scheds.first.sched.abide.tenants.get("tenant-one")
pipeline = tenant.layout.pipelines['check']
reporter = self.scheds.first.connections.getSqlReporter(
pipeline)
with self.scheds.first.connections.getSqlConnection().\
engine.connect() as conn:
result = conn.execute(
sa.sql.select(reporter.connection.zuul_buildset_table)
)
buildsets = result.fetchall()
self.assertEqual(1, len(buildsets))
buildset0 = buildsets[0]
self.assertEqual('check', buildset0.pipeline)
self.assertEqual('org/project', buildset0.project)
self.assertEqual(1, buildset0.change)
self.assertEqual('1', buildset0.patchset)
self.assertEqual('SUCCESS', buildset0.result)
self.assertEqual('Build succeeded.', buildset0.message)
self.assertEqual('tenant-one', buildset0.tenant)
self.assertEqual(
'https://review.example.com/%d' % buildset0.change,
buildset0.ref_url)
buildset0_builds = conn.execute(
sa.sql.select(
reporter.connection.zuul_build_table
).where(
reporter.connection.zuul_build_table.c.buildset_id ==
buildset0.id
)
).fetchall()
# Check the retry results
self.assertEqual('project-merge', buildset0_builds[0].job_name)
self.assertEqual('SUCCESS', buildset0_builds[0].result)
self.assertTrue(buildset0_builds[0].final)
self.assertEqual('project-test1', buildset0_builds[1].job_name)
self.assertEqual('RETRY', buildset0_builds[1].result)
self.assertFalse(buildset0_builds[1].final)
self.assertEqual('project-test2', buildset0_builds[2].job_name)
self.assertEqual('RETRY', buildset0_builds[2].result)
self.assertFalse(buildset0_builds[2].final)
self.assertEqual('project-test1', buildset0_builds[3].job_name)
self.assertEqual('SUCCESS', buildset0_builds[3].result)
self.assertTrue(buildset0_builds[3].final)
self.assertEqual('project-test2', buildset0_builds[4].job_name)
self.assertEqual('SUCCESS', buildset0_builds[4].result)
self.assertTrue(buildset0_builds[4].final)
self.executor_server.hold_jobs_in_build = True
# Add a retry result
self.log.debug("Adding retry FakeChange")
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Release the merge job (which is the dependency for the other jobs)
self.executor_server.release('.*-merge')
self.waitUntilSettled()
# Let both test jobs fail on the first run, so they are both run again.
self.builds[0].requeue = True
self.builds[1].requeue = True
self.orderedRelease()
self.waitUntilSettled()
check_results()
def test_sql_intermittent_failure(self):
# Test that if we fail to create the buildset at the start of
# a build, we still create it at the end.
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Delete the buildset
with self.scheds.first.connections.getSqlConnection().\
engine.connect() as conn:
result = conn.execute(sa.text(
f"delete from {self.expected_table_prefix}zuul_build;"))
result = conn.execute(sa.text(
f"delete from {self.expected_table_prefix}zuul_buildset;"))
result = conn.execute(sa.text("commit;"))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# Check the results
tenant = self.scheds.first.sched.abide.tenants.get("tenant-one")
pipeline = tenant.layout.pipelines['check']
reporter = self.scheds.first.connections.getSqlReporter(
pipeline)
with self.scheds.first.connections.getSqlConnection().\
engine.connect() as conn:
result = conn.execute(
sa.sql.select(reporter.connection.zuul_buildset_table)
)
buildsets = result.fetchall()
self.assertEqual(1, len(buildsets))
buildset0 = buildsets[0]
buildset0_builds = conn.execute(
sa.sql.select(
reporter.connection.zuul_build_table
).where(
reporter.connection.zuul_build_table.c.buildset_id ==
buildset0.id
)
).fetchall()
self.assertEqual(len(buildset0_builds), 5)
def test_sql_retry(self):
# Exercise the SQL retry code
reporter = self.scheds.first.sched.sql
reporter.test_buildset_retries = 0
reporter.test_build_retries = 0
reporter.retry_delay = 0
orig_createBuildset = reporter._createBuildset
orig_createBuild = reporter._createBuild
def _createBuildset(*args, **kw):
ret = orig_createBuildset(*args, **kw)
if reporter.test_buildset_retries == 0:
reporter.test_buildset_retries += 1
raise sa.exc.DBAPIError(None, None, None)
return ret
def _createBuild(*args, **kw):
ret = orig_createBuild(*args, **kw)
if reporter.test_build_retries == 0:
reporter.test_build_retries += 1
raise sa.exc.DBAPIError(None, None, None)
return ret
reporter._createBuildset = _createBuildset
reporter._createBuild = _createBuild
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Check the results
self.assertEqual(reporter.test_buildset_retries, 1)
self.assertEqual(reporter.test_build_retries, 1)
with self.scheds.first.connections.getSqlConnection().\
engine.connect() as conn:
result = conn.execute(
sa.sql.select(reporter.connection.zuul_buildset_table)
)
buildsets = result.fetchall()
self.assertEqual(1, len(buildsets))
buildset0 = buildsets[0]
buildset0_builds = conn.execute(
sa.sql.select(
reporter.connection.zuul_build_table
).where(
reporter.connection.zuul_build_table.c.buildset_id ==
buildset0.id
)
).fetchall()
self.assertEqual(len(buildset0_builds), 5)
class TestSQLConnectionPostgres(TestSQLConnectionMysql):
config_file = 'zuul-sql-driver-postgres.conf'
class TestSQLConnectionPrefixMysql(TestSQLConnectionMysql):
config_file = 'zuul-sql-driver-prefix-mysql.conf'
expected_table_prefix = 'prefix_'
class TestSQLConnectionPrefixPostgres(TestSQLConnectionMysql):
config_file = 'zuul-sql-driver-prefix-postgres.conf'
expected_table_prefix = 'prefix_'
class TestRequiredSQLConnection(BaseTestCase):
config = None
connections = None
def setUp(self):
super().setUp()
self.addCleanup(self.stop_connection)
def setup_connection(self, config_file):
self.config = configparser.ConfigParser()
self.config.read(os.path.join(FIXTURE_DIR, config_file))
# Setup databases
for section_name in self.config.sections():
con_match = re.match(r'^connection ([\'\"]?)(.*)(\1)$',
section_name, re.I)
if not con_match:
continue
if self.config.get(section_name, 'driver') == 'sql':
if (self.config.get(section_name, 'dburi') ==
'$MYSQL_FIXTURE_DBURI$'):
f = MySQLSchemaFixture()
self.useFixture(f)
self.config.set(section_name, 'dburi', f.dburi)
elif (self.config.get(section_name, 'dburi') ==
'$POSTGRESQL_FIXTURE_DBURI$'):
f = PostgresqlSchemaFixture()
self.useFixture(f)
self.config.set(section_name, 'dburi', f.dburi)
self.connections = zuul.lib.connections.ConnectionRegistry()
def stop_connection(self):
self.connections.stop()
class TestMultipleGerrits(ZuulTestCase):
config_file = 'zuul-connections-multiple-gerrits.conf'
tenant_config_file = 'config/zuul-connections-multiple-gerrits/main.yaml'
def test_multiple_project_separate_gerrits(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_another_gerrit.addFakeChange(
'org/project1', 'master', 'A')
self.fake_another_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertBuilds([dict(name='project-test2',
changes='1,1',
project='org/project1',
pipeline='another_check')])
# NOTE(jamielennox): the tests back the git repo for both connections
# onto the same git repo on the file system. If we just create another
# fake change the fake_review_gerrit will try to create another 1,1
# change and git will fail to create the ref. Arbitrarily set it to get
# around the problem.
self.fake_review_gerrit.change_number = 50
B = self.fake_review_gerrit.addFakeChange(
'org/project1', 'master', 'B')
self.fake_review_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertBuilds([
dict(name='project-test2',
changes='1,1',
project='org/project1',
pipeline='another_check'),
dict(name='project-test1',
changes='51,1',
project='org/project1',
pipeline='review_check'),
])
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
def test_multiple_project_separate_gerrits_common_pipeline(self):
self.executor_server.hold_jobs_in_build = True
self.create_branch('org/project2', 'develop')
self.fake_another_gerrit.addEvent(
self.fake_another_gerrit.getFakeBranchCreatedEvent(
'org/project2', 'develop'))
self.fake_another_gerrit.addEvent(
self.fake_review_gerrit.getFakeBranchCreatedEvent(
'org/project2', 'develop'))
self.waitUntilSettled()
A = self.fake_another_gerrit.addFakeChange(
'org/project2', 'master', 'A')
self.fake_another_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertBuilds([dict(name='project-test2',
changes='1,1',
project='org/project2',
pipeline='common_check')])
# NOTE(jamielennox): the tests back the git repo for both connections
# onto the same git repo on the file system. If we just create another
# fake change the fake_review_gerrit will try to create another 1,1
# change and git will fail to create the ref. Arbitrarily set it to get
# around the problem.
self.fake_review_gerrit.change_number = 50
B = self.fake_review_gerrit.addFakeChange(
'org/project2', 'develop', 'B')
self.fake_review_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertBuilds([
dict(name='project-test2',
changes='1,1',
project='org/project2',
pipeline='common_check'),
dict(name='project-test1',
changes='51,1',
project='org/project2',
pipeline='common_check'),
])
# NOTE(avass): This last change should not trigger any pipelines since
# common_check is configured to only run on master for another_gerrit
C = self.fake_another_gerrit.addFakeChange(
'org/project2', 'develop', 'C')
self.fake_another_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertBuilds([
dict(name='project-test2',
changes='1,1',
project='org/project2',
pipeline='common_check'),
dict(name='project-test1',
changes='51,1',
project='org/project2',
pipeline='common_check'),
])
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
class TestConnectionsMerger(ZuulTestCase):
config_file = 'zuul-connections-merger.conf'
tenant_config_file = 'config/single-tenant/main.yaml'
def test_connections_merger(self):
"Test merger only configures source connections"
self.assertIn("gerrit", self.executor_server.connections.connections)
self.assertIn("github", self.executor_server.connections.connections)
self.assertNotIn("smtp", self.executor_server.connections.connections)
self.assertNotIn("sql", self.executor_server.connections.connections)
self.assertNotIn("timer", self.executor_server.connections.connections)
self.assertNotIn("zuul", self.executor_server.connections.connections)
class TestConnectionsCgit(ZuulTestCase):
config_file = 'zuul-connections-cgit.conf'
tenant_config_file = 'config/single-tenant/main.yaml'
def test_cgit_web_url(self):
self.assertIn("gerrit", self.scheds.first.connections.connections)
conn = self.scheds.first.connections.connections['gerrit']
source = conn.source
proj = source.getProject('foo/bar')
url = conn._getWebUrl(proj, '1')
self.assertEqual(url,
'https://cgit.example.com/cgit/foo/bar/commit/?id=1')
class TestConnectionsGitweb(ZuulTestCase):
config_file = 'zuul-connections-gitweb.conf'
tenant_config_file = 'config/single-tenant/main.yaml'
def test_gitweb_url(self):
self.assertIn("gerrit", self.scheds.first.connections.connections)
conn = self.scheds.first.connections.connections['gerrit']
source = conn.source
proj = source.getProject('foo/bar')
url = conn._getWebUrl(proj, '1')
url_should_be = 'https://review.example.com/' \
'gitweb?p=foo/bar.git;a=commitdiff;h=1'
self.assertEqual(url, url_should_be)
class TestMQTTConnection(ZuulTestCase):
config_file = 'zuul-mqtt-driver.conf'
tenant_config_file = 'config/mqtt-driver/main.yaml'
def test_mqtt_reporter(self):
"Test the MQTT reporter"
# Add a success result
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
artifact = {'name': 'image',
'url': 'http://example.com/image',
'metadata': {
'type': 'container_image'
}}
self.executor_server.returnData(
"test", A, {
"zuul": {
"log_url": "some-log-url",
'artifacts': [artifact],
},
'foo': 'bar',
}
)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
success_event = self.mqtt_messages.pop()
start_event = self.mqtt_messages.pop()
self.assertEquals(start_event.get('topic'),
'tenant-one/zuul_start/check/org/project/master')
mqtt_payload = start_event['msg']
self.assertEquals(mqtt_payload['project'], 'org/project')
self.assertEqual(len(mqtt_payload['commit_id']), 40)
self.assertEquals(mqtt_payload['owner'], 'username')
self.assertEquals(mqtt_payload['branch'], 'master')
self.assertEquals(mqtt_payload['buildset']['result'], None)
self.assertEquals(mqtt_payload['buildset']['builds'][0]['job_name'],
'test')
self.assertNotIn('result', mqtt_payload['buildset']['builds'][0])
self.assertNotIn('artifacts', mqtt_payload['buildset']['builds'][0])
builds = mqtt_payload['buildset']['builds']
test_job = [b for b in builds if b['job_name'] == 'test'][0]
self.assertNotIn('returned_data', test_job)
self.assertEquals(success_event.get('topic'),
'tenant-one/zuul_buildset/check/org/project/master')
mqtt_payload = success_event['msg']
self.assertEquals(mqtt_payload['project'], 'org/project')
self.assertEquals(mqtt_payload['branch'], 'master')
self.assertEquals(mqtt_payload['buildset']['result'], 'SUCCESS')
builds = mqtt_payload['buildset']['builds']
test_job = [b for b in builds if b['job_name'] == 'test'][0]
dependent_test_job = [
b for b in builds if b['job_name'] == 'dependent-test'
][0]
self.assertEquals(test_job['job_name'], 'test')
self.assertEquals(test_job['result'], 'SUCCESS')
self.assertEquals(test_job['dependencies'], [])
self.assertEquals(test_job['artifacts'], [artifact])
self.assertEquals(test_job['log_url'], 'some-log-url/')
self.assertEquals(test_job['returned_data'], {'foo': 'bar'})
build_id = test_job["uuid"]
self.assertEquals(
test_job["web_url"],
"https://tenant.example.com/t/tenant-one/build/{}".format(
build_id
),
)
self.assertIn('execute_time', test_job)
self.assertIn('timestamp', mqtt_payload)
self.assertIn('enqueue_time', mqtt_payload)
self.assertIn('trigger_time', mqtt_payload)
self.assertIn('zuul_event_id', mqtt_payload)
self.assertIn('uuid', mqtt_payload)
self.assertEquals(dependent_test_job['dependencies'], ['test'])
def test_mqtt_paused_job(self):
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
# Let the job being paused via the executor
self.executor_server.returnData("test", A, {"zuul": {"pause": True}})
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
success_event = self.mqtt_messages.pop()
mqtt_payload = success_event["msg"]
self.assertEquals(mqtt_payload["project"], "org/project")
builds = mqtt_payload["buildset"]["builds"]
paused_job = [b for b in builds if b["job_name"] == "test"][0]
self.assertEquals(len(paused_job["events"]), 2)
pause_event = paused_job["events"][0]
self.assertEquals(pause_event["event_type"], "paused")
self.assertGreater(
pause_event["event_time"], paused_job["start_time"])
self.assertLess(pause_event["event_time"], paused_job["end_time"])
resume_event = paused_job["events"][1]
self.assertEquals(resume_event["event_type"], "resumed")
self.assertGreater(
resume_event["event_time"], paused_job["start_time"])
self.assertLess(resume_event["event_time"], paused_job["end_time"])
self.assertGreater(
resume_event["event_time"], pause_event["event_time"])
def test_mqtt_invalid_topic(self):
in_repo_conf = textwrap.dedent(
"""
- pipeline:
name: test-pipeline
manager: independent
trigger:
gerrit:
- event: comment-added
start:
mqtt:
topic: "{bad}/{topic}"
""")
file_dict = {'zuul.d/test.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertIn("topic component 'bad' is invalid", A.messages[0],
"A should report a syntax error")
class TestElasticsearchConnection(AnsibleZuulTestCase):
config_file = 'zuul-elastic-driver.conf'
tenant_config_file = 'config/elasticsearch-driver/main.yaml'
# These tests are storing the reported index on the fake
# elasticsearch backend which is a different instance for each
# scheduler. Thus, depending on which scheduler reports the
# item, the assertions in these test might pass or fail.
scheduler_count = 1
def _getSecrets(self, job, pbtype):
secrets = []
build = self.getJobFromHistory(job)
for pb in getattr(build.jobdir, pbtype):
if pb.secrets_content:
secrets.append(
yamlutil.ansible_unsafe_load(pb.secrets_content))
else:
secrets.append({})
return secrets
def test_elastic_reporter(self):
"Test the Elasticsearch reporter"
# Add a success result
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
indexed_docs = self.scheds.first.connections.connections[
'elasticsearch'].source_it
index = self.scheds.first.connections.connections[
'elasticsearch'].index
self.assertEqual(len(indexed_docs), 2)
self.assertEqual(index, ('zuul-index.tenant-one-%s' %
time.strftime("%Y.%m.%d")))
buildset_doc = [doc for doc in indexed_docs if
doc['build_type'] == 'buildset'][0]
self.assertEqual(buildset_doc['tenant'], 'tenant-one')
self.assertEqual(buildset_doc['pipeline'], 'check')
self.assertEqual(buildset_doc['result'], 'SUCCESS')
build_doc = [doc for doc in indexed_docs if
doc['build_type'] == 'build'][0]
self.assertEqual(build_doc['buildset_uuid'], buildset_doc['uuid'])
self.assertEqual(build_doc['result'], 'SUCCESS')
self.assertEqual(build_doc['job_name'], 'test')
self.assertEqual(build_doc['tenant'], 'tenant-one')
self.assertEqual(build_doc['pipeline'], 'check')
self.assertIn('job_vars', build_doc)
self.assertDictEqual(
build_doc['job_vars'], {'bar': 'foo', 'bar2': 'foo2'})
self.assertIn('job_returned_vars', build_doc)
self.assertDictEqual(
build_doc['job_returned_vars'], {'foo': 'bar'})
self.assertEqual(self.history[0].uuid, build_doc['uuid'])
self.assertIn('duration', build_doc)
self.assertTrue(type(build_doc['duration']) is int)
doc_gen = self.scheds.first.connections.connections[
'elasticsearch'].gen(indexed_docs, index)
self.assertIsInstance(doc_gen, types.GeneratorType)
self.assertTrue('@timestamp' in list(doc_gen)[0]['_source'])
def test_elasticsearch_secret_leak(self):
expected_secret = [{
'test_secret': {
'username': 'test-username',
'password': 'test-password'
}
}]
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
indexed_docs = self.scheds.first.connections.connections[
'elasticsearch'].source_it
build_doc = [doc for doc in indexed_docs if
doc['build_type'] == 'build'][0]
# Ensure that job include secret
self.assertEqual(
self._getSecrets('test', 'playbooks'),
expected_secret)
# Check if there is a secret leak
self.assertFalse('test_secret' in build_doc['job_vars'])
class TestConnectionsBranchCache(ZuulTestCase):
config_file = "zuul-gerrit-github.conf"
tenant_config_file = 'config/multi-driver/main.yaml'
def test_branch_cache_fetch_error(self):
# Test that a fetch error stores the right value in the branch cache
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
connection = self.scheds.first.connections.connections['github']
source = connection.source
project = source.getProject('org/project1')
# Patch the fetch method so that it fails
orig = connection._fetchProjectBranches
def fail(*args, **kw):
raise Exception("Unable to fetch branches")
self.patch(connection, '_fetchProjectBranches', fail)
# Clear the branch cache so we start with nothing
connection.clearBranchCache()
# Verify that we raise an error when we try to get branches
# for a missing project
self.assertRaises(
Exception,
lambda: connection.getProjectBranches(project, tenant))
# This should happen again (ie, we should retry since we don't
# have an entry)
self.assertRaises(
Exception,
lambda: connection.getProjectBranches(project, tenant))
# Restore the normal fetch method and verify that the cache
# works as expected
self.patch(connection, '_fetchProjectBranches', orig)
branches = connection.getProjectBranches(project, tenant)
self.assertEqual(['master'], branches)
# Ensure that the empty list of branches is valid and is not
# seen as an error
newproject = source.getProject('org/newproject')
connection.addProject(newproject)
tpc = zuul.model.TenantProjectConfig(newproject)
tpc.exclude_unprotected_branches = True
tenant.addUntrustedProject(tpc)
branches = connection.getProjectBranches(newproject, tenant)
self.assertEqual([], branches)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_connection.py
|
test_connection.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import os
import tempfile
import time
from tests.base import BaseTestCase
from zuul.executor.server import DiskAccountant
class FakeExecutor(object):
def __init__(self):
self.stopped_jobs = set()
self.used = {}
def stopJobByJobDir(self, jobdir):
self.stopped_jobs.add(jobdir)
def usage(self, dirname, used):
self.used[dirname] = used
class TestDiskAccountant(BaseTestCase):
def setUp(self):
super(TestDiskAccountant, self).setUp()
self.useFixture(fixtures.NestedTempfile())
def test_disk_accountant(self):
jobs_dir = tempfile.mkdtemp(
dir=os.environ.get("ZUUL_TEST_ROOT", None))
cache_dir = tempfile.mkdtemp()
executor_server = FakeExecutor()
da = DiskAccountant(jobs_dir, 1, executor_server.stopJobByJobDir,
cache_dir)
da.start()
try:
jobdir = os.path.join(jobs_dir, '012345')
os.mkdir(jobdir)
testfile = os.path.join(jobdir, 'tfile')
with open(testfile, 'w') as tf:
tf.write(2 * 1024 * 1024 * '.')
tf.flush()
os.fsync(tf.fileno())
# da should catch over-limit dir within 5 seconds
for i in range(0, 50):
if jobdir in executor_server.stopped_jobs:
break
time.sleep(0.1)
self.assertEqual(set([jobdir]), executor_server.stopped_jobs)
finally:
da.stop()
self.assertFalse(da.thread.is_alive())
def test_disk_accountant_no_limit(self):
jobs_dir = tempfile.mkdtemp(
dir=os.environ.get("ZUUL_TEST_ROOT", None))
cache_dir = tempfile.mkdtemp()
executor_server = FakeExecutor()
da = DiskAccountant(jobs_dir, -1, executor_server.stopJobByJobDir,
cache_dir)
da.start()
self.assertFalse(da.running)
da.stop()
self.assertFalse(da.running)
def test_cache_hard_links(self):
root_dir = tempfile.mkdtemp(
dir=os.environ.get("ZUUL_TEST_ROOT", None))
jobs_dir = os.path.join(root_dir, 'jobs')
os.mkdir(jobs_dir)
cache_dir = os.path.join(root_dir, 'cache')
os.mkdir(cache_dir)
executor_server = FakeExecutor()
da = DiskAccountant(jobs_dir, 1, executor_server.stopJobByJobDir,
cache_dir, executor_server.usage)
da.start()
self.addCleanup(da.stop)
jobdir = os.path.join(jobs_dir, '012345')
os.mkdir(jobdir)
repo_dir = os.path.join(cache_dir, 'a.repo')
os.mkdir(repo_dir)
source_file = os.path.join(repo_dir, 'big_file')
with open(source_file, 'w') as tf:
tf.write(2 * 1024 * 1024 * '.')
dest_link = os.path.join(jobdir, 'big_file')
os.link(source_file, dest_link)
# da should _not_ count this file. Wait for 5s to get noticed
for i in range(0, 50):
if jobdir in executor_server.used:
break
time.sleep(0.1)
self.assertEqual(set(), executor_server.stopped_jobs)
self.assertIn(jobdir, executor_server.used)
self.assertTrue(executor_server.used[jobdir] <= 1)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_disk_accountant.py
|
test_disk_accountant.py
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tests.base import (
ZuulTestCase,
simple_layout,
skipIfMultiScheduler,
)
URL_FORMATS = [
'{baseurl}/{change_no}',
'{baseurl}/#/c/{change_no}',
'{baseurl}/c/{project}/+/{change_no}/',
'{change_id}',
]
class TestGerritCRD(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def _test_crd_gate(self, url_fmt):
"Test cross-repo dependencies"
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
AM2 = self.fake_gerrit.addFakeChange('org/project1', 'master', 'AM2')
AM1 = self.fake_gerrit.addFakeChange('org/project1', 'master', 'AM1')
AM2.setMerged()
AM1.setMerged()
BM2 = self.fake_gerrit.addFakeChange('org/project2', 'master', 'BM2')
BM1 = self.fake_gerrit.addFakeChange('org/project2', 'master', 'BM1')
BM2.setMerged()
BM1.setMerged()
# A -> AM1 -> AM2
# B -> BM1 -> BM2
# A Depends-On: B
# M2 is here to make sure it is never queried. If it is, it
# means zuul is walking down the entire history of merged
# changes.
B.setDependsOn(BM1, 1)
BM1.setDependsOn(BM2, 1)
A.setDependsOn(AM1, 1)
AM1.setDependsOn(AM2, 1)
url = url_fmt.format(baseurl=B.gerrit.baseurl.rstrip('/'),
project=B.project,
change_no=B.number,
change_id=B.data['id'])
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, url)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
for connection in self.scheds.first.connections.connections.values():
connection.maintainCache([], max_age=0)
self.executor_server.hold_jobs_in_build = True
B.addApproval('Approved', 1)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(AM2.queried, 0)
self.assertEqual(BM2.queried, 0)
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
changes = self.getJobFromHistory(
'project-merge', 'org/project1').changes
self.assertEqual(changes, '2,1 1,1')
# Different versions of Gerrit have used 3 different URL schemata for
# changes - repeat the simple test on each of the 3 to ensure they can be
# parsed, the other tests just use the default URL schema provided in
# FakeGerritChange.data['url'] .
# This list also includes the legacy change id.
def test_crd_gate_url_schema0(self):
self._test_crd_gate(URL_FORMATS[0])
def test_crd_gate_url_schema1(self):
self._test_crd_gate(URL_FORMATS[1])
def test_crd_gate_url_schema2(self):
self._test_crd_gate(URL_FORMATS[2])
def test_crd_gate_legacy_id(self):
self._test_crd_gate(URL_FORMATS[3])
def test_crd_gate_triangle(self):
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
A.addApproval('Approved', 1)
B.addApproval('Approved', 1)
# C-->B
# \ /
# v
# A
# C Depends-On: A
C.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
C.subject, A.data['url'])
# B Depends-On: A
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['url'])
# C git-depends on B
C.setDependsOn(B, 1)
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(self.history[-1].changes, '1,1 2,1 3,1')
def test_crd_branch(self):
"Test cross-repo dependencies in multiple branches"
self.create_branch('org/project2', 'mp')
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
C1 = self.fake_gerrit.addFakeChange('org/project2', 'mp', 'C1')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C1.addApproval('Code-Review', 2)
# A Depends-On: B+C1
A.data['commitMessage'] = '%s\n\nDepends-On: %s\nDepends-On: %s\n' % (
A.subject, B.data['url'], C1.data['url'])
self.executor_server.hold_jobs_in_build = True
B.addApproval('Approved', 1)
C1.addApproval('Approved', 1)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(C1.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C1.reported, 2)
changes = self.getJobFromHistory(
'project-merge', 'org/project1').changes
self.assertEqual(changes, '2,1 3,1 1,1')
def test_crd_multiline(self):
"Test multiple depends-on lines in commit"
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
# A Depends-On: B+C
A.data['commitMessage'] = '%s\n\nDepends-On: %s\nDepends-On: %s\n' % (
A.subject, B.data['url'], C.data['url'])
self.executor_server.hold_jobs_in_build = True
B.addApproval('Approved', 1)
C.addApproval('Approved', 1)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
self.assertEqual(C.reported, 2)
changes = self.getJobFromHistory(
'project-merge', 'org/project1').changes
self.assertEqual(changes, '2,1 3,1 1,1')
def test_crd_unshared_gate(self):
"Test cross-repo dependencies in unshared gate queues"
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['url'])
# A and B do not share a queue, make sure that A is unable to
# enqueue B (and therefore, A is unable to be enqueued).
B.addApproval('Approved', 1)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 0)
self.assertEqual(len(self.history), 0)
# Enqueue and merge B alone.
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 2)
# Now that B is merged, A should be able to be enqueued and
# merged.
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 3)
def _test_crd_gate_reverse(self, url_fmt):
"Test reverse cross-repo dependencies"
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
# A Depends-On: B
url = url_fmt.format(baseurl=B.gerrit.baseurl.rstrip('/'),
project=B.project,
change_no=B.number)
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, url)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.executor_server.hold_jobs_in_build = True
A.addApproval('Approved', 1)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.reported, 2)
changes = self.getJobFromHistory(
'project-merge', 'org/project1').changes
self.assertEqual(changes, '2,1 1,1')
def test_crd_gate_reverse_schema0(self):
self._test_crd_gate_reverse(URL_FORMATS[0])
def test_crd_gate_reverse_schema1(self):
self._test_crd_gate_reverse(URL_FORMATS[1])
def test_crd_gate_reverse_schema2(self):
self._test_crd_gate_reverse(URL_FORMATS[2])
def test_crd_cycle(self):
"Test cross-repo dependency cycles"
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
B.addApproval('Approved', 1)
# A -> B -> A (via commit-depends)
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['url'])
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['url'])
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 0)
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
def test_crd_gate_unknown(self):
"Test unknown projects in dependent pipeline"
self.init_repo("org/unknown", tag='init')
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/unknown', 'master', 'B')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['url'])
B.addApproval('Approved', 1)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
# Unknown projects cannot share a queue with any other
# since they don't have common jobs with any other (they have no jobs).
# Changes which depend on unknown project changes
# should not be processed in dependent pipeline
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 0)
self.assertEqual(len(self.history), 0)
# Simulate change B being gated outside this layout Set the
# change merged before submitting the event so that when the
# event triggers a gerrit query to update the change, we get
# the information that it was merged.
B.setMerged()
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(len(self.history), 0)
# Now that B is merged, A should be able to be enqueued and
# merged.
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 3)
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 0)
def test_crd_check(self):
"Test cross-repo dependencies in independent pipelines"
self.executor_server.hold_jobs_in_build = True
self.hold_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['url'])
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertTrue(self.builds[0].hasChanges(A, B))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 0)
self.assertEqual(self.history[0].changes, '2,1 1,1')
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
def test_crd_check_git_depends(self):
"Test single-repo dependencies in independent pipelines"
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
# Add two git-dependent changes and make sure they both report
# success.
B.setDependsOn(A, 1)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 1)
self.assertEqual(self.history[0].changes, '1,1')
self.assertEqual(self.history[-1].changes, '1,1 2,1')
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
self.assertIn('Build succeeded', A.messages[0])
self.assertIn('Build succeeded', B.messages[0])
def test_crd_check_duplicate(self):
"Test duplicate check in independent pipelines"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
# Add two git-dependent changes...
B.setDependsOn(A, 1)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(check_pipeline.getAllItems()), 2)
# ...make sure the live one is not duplicated...
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(check_pipeline.getAllItems()), 2)
# ...but the non-live one is able to be.
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(check_pipeline.getAllItems()), 3)
# Release jobs in order to avoid races with change A jobs
# finishing before change B jobs.
self.orderedRelease()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 1)
self.assertEqual(self.history[0].changes, '1,1 2,1')
self.assertEqual(self.history[1].changes, '1,1')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
self.assertIn('Build succeeded', A.messages[0])
self.assertIn('Build succeeded', B.messages[0])
def _test_crd_check_reconfiguration(self, project1, project2):
"Test cross-repo dependencies re-enqueued in independent pipelines"
self.hold_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange(project1, 'master', 'A')
B = self.fake_gerrit.addFakeChange(project2, 'master', 'B')
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['url'])
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# Make sure the items still share a change queue, and the
# first one is not live.
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 1)
queue = tenant.layout.pipelines['check'].queues[0]
first_item = queue.queue[0]
for item in queue.queue:
self.assertEqual(item.queue, first_item.queue)
self.assertFalse(first_item.live)
self.assertTrue(queue.queue[1].live)
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 0)
self.assertEqual(self.history[0].changes, '2,1 1,1')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
@skipIfMultiScheduler()
def test_crd_check_reconfiguration(self):
self._test_crd_check_reconfiguration('org/project1', 'org/project2')
@skipIfMultiScheduler()
def test_crd_undefined_project(self):
"""Test that undefined projects in dependencies are handled for
independent pipelines"""
# It's a hack for fake gerrit,
# as it implies repo creation upon the creation of any change
self.init_repo("org/unknown", tag='init')
self._test_crd_check_reconfiguration('org/project1', 'org/unknown')
@simple_layout('layouts/ignore-dependencies.yaml')
def test_crd_check_ignore_dependencies(self):
"Test cross-repo dependencies can be ignored"
self.hold_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['url'])
# C git-depends on B
C.setDependsOn(B, 1)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Make sure none of the items share a change queue, and all
# are live.
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
self.assertEqual(len(check_pipeline.queues), 3)
self.assertEqual(len(check_pipeline.getAllItems()), 3)
for item in check_pipeline.getAllItems():
self.assertTrue(item.live)
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(C.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 1)
self.assertEqual(C.reported, 1)
# Each job should have tested exactly one change
for job in self.history:
self.assertEqual(len(job.changes.split()), 1)
def test_crd_check_triangle(self):
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project2', 'master', 'C')
# C-->B
# \ /
# v
# A
# C Depends-On: A
C.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
C.subject, A.data['url'])
# B Depends-On: A
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['url'])
# C git-depends on B
C.setDependsOn(B, 1)
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(C.reported, 1)
self.assertEqual(self.history[0].changes, '1,1 2,1 3,1')
@simple_layout('layouts/three-projects.yaml')
def test_crd_check_transitive(self):
"Test transitive cross-repo dependencies"
# Specifically, if A -> B -> C, and C gets a new patchset and
# A gets a new patchset, ensure the test of A,2 includes B,1
# and C,2 (not C,1 which would indicate stale data in the
# cache for B).
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project3', 'master', 'C')
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['url'])
# B Depends-On: C
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, C.data['url'])
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '3,1 2,1 1,1')
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '3,1 2,1')
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '3,1')
C.addPatchset()
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '3,2')
A.addPatchset()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.assertEqual(self.history[-1].changes, '3,2 2,1 1,2')
def test_crd_check_unknown(self):
"Test unknown projects in independent pipeline"
self.init_repo("org/unknown", tag='init')
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/unknown', 'master', 'D')
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['url'])
# Make sure zuul has seen an event on B.
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(B.reported, 0)
def test_crd_cycle_join(self):
"Test an updated change creates a cycle"
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(A.reported, 1)
# Create B->A
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
B.subject, A.data['url'])
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
# Dep is there so zuul should have reported on B
self.assertEqual(B.reported, 1)
# Update A to add A->B (a cycle).
A.addPatchset()
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['url'])
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
# Dependency cycle injected so zuul should have reported again on A
self.assertEqual(A.reported, 2)
# Now if we update B to remove the depends-on, everything
# should be okay. B; A->B
B.addPatchset()
B.data['commitMessage'] = '%s\n' % (B.subject,)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
# Cycle was removed so now zuul should have reported again on A
self.assertEqual(A.reported, 3)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(2))
self.waitUntilSettled()
self.assertEqual(B.reported, 2)
class TestGerritCRDAltBaseUrl(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
def setup_config(self, config_file: str):
config = super(TestGerritCRDAltBaseUrl, self).setup_config(config_file)
self.baseurl = 'https://review.example.com/prefixed_gerrit_ui/'
config.set('connection gerrit', 'baseurl', self.baseurl)
return config
def test_basic_crd_check(self):
"Test basic cross-repo dependencies with an alternate gerrit baseurl"
self.executor_server.hold_jobs_in_build = True
self.hold_jobs_in_queue = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
self.assertEqual(B.data['url'], '%s/2' % self.baseurl.rstrip('/'))
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.data['url'])
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertTrue(self.builds[0].hasChanges(A, B))
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'NEW')
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(A.reported, 1)
self.assertEqual(B.reported, 0)
self.assertEqual(self.history[0].changes, '2,1 1,1')
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
class TestGerritCRDWeb(TestGerritCRD):
config_file = 'zuul-gerrit-web.conf'
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_gerrit_crd.py
|
test_gerrit_crd.py
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import daemon
import logging
import os
import sys
import extras
import fixtures
import testtools
from tests.base import iterate_timeout
# as of python-daemon 1.6 it doesn't bundle pidlockfile anymore
# instead it depends on lockfile-0.9.1 which uses pidfile.
pid_file_module = extras.try_imports(['daemon.pidlockfile', 'daemon.pidfile'])
def daemon_test(pidfile, flagfile):
pid = pid_file_module.TimeoutPIDLockFile(pidfile, 10)
with daemon.DaemonContext(pidfile=pid):
for x in iterate_timeout(30, "flagfile to be removed"):
if not os.path.exists(flagfile):
break
sys.exit(0)
class TestDaemon(testtools.TestCase):
log = logging.getLogger("zuul.test.daemon")
def setUp(self):
super(TestDaemon, self).setUp()
self.test_root = self.useFixture(fixtures.TempDir(
rootdir=os.environ.get("ZUUL_TEST_ROOT"))).path
def test_daemon(self):
pidfile = os.path.join(self.test_root, "daemon.pid")
flagfile = os.path.join(self.test_root, "daemon.flag")
open(flagfile, 'w').close()
if not os.fork():
self._cleanups = []
daemon_test(pidfile, flagfile)
for x in iterate_timeout(30, "daemon to start"):
if os.path.exists(pidfile):
break
os.unlink(flagfile)
for x in iterate_timeout(30, "daemon to stop"):
if not os.path.exists(pidfile):
break
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/unit/test_daemon.py
|
test_daemon.py
|
# Copyright 2020 Red Hat, inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import jwt
import os
import subprocess
import tempfile
import textwrap
from zuul.lib import yamlutil
from tests.base import iterate_timeout
from tests.base import AnsibleZuulTestCase
from tests.unit.test_web import BaseTestWeb
class TestSmokeZuulClient(BaseTestWeb):
def test_is_installed(self):
"""Test that the CLI is installed"""
test_version = subprocess.check_output(
['zuul-client', '--version'],
stderr=subprocess.STDOUT)
self.assertTrue(b'Zuul-client version:' in test_version)
class TestZuulClientEncrypt(BaseTestWeb):
"""Test using zuul-client to encrypt secrets"""
tenant_config_file = 'config/secrets/main.yaml'
config_file = 'zuul-admin-web.conf'
secret = {'password': 'zuul-client'}
large_secret = {'key': (('a' * 79 + '\n') * 50)[:-1]}
def setUp(self):
super(TestZuulClientEncrypt, self).setUp()
self.executor_server.hold_jobs_in_build = False
def _getSecrets(self, job, pbtype):
secrets = []
build = self.getJobFromHistory(job)
for pb in getattr(build.jobdir, pbtype):
if pb.secrets_content:
secrets.append(
yamlutil.ansible_unsafe_load(pb.secrets_content))
else:
secrets.append({})
return secrets
def test_encrypt_large_secret(self):
"""Test that we can use zuul-client to encrypt a large secret"""
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url,
'encrypt', '--tenant', 'tenant-one', '--project', 'org/project2',
'--secret-name', 'my_secret', '--field-name', 'key'],
stdout=subprocess.PIPE, stdin=subprocess.PIPE)
p.stdin.write(
str.encode(self.large_secret['key'])
)
output, error = p.communicate()
p.stdin.close()
self._test_encrypt(self.large_secret, output, error)
def test_encrypt(self):
"""Test that we can use zuul-client to generate a project secret"""
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url,
'encrypt', '--tenant', 'tenant-one', '--project', 'org/project2',
'--secret-name', 'my_secret', '--field-name', 'password'],
stdout=subprocess.PIPE, stdin=subprocess.PIPE)
p.stdin.write(
str.encode(self.secret['password'])
)
output, error = p.communicate()
p.stdin.close()
self._test_encrypt(self.secret, output, error)
def test_encrypt_outfile(self):
"""Test that we can use zuul-client to generate a project secret to a
file"""
outfile = tempfile.NamedTemporaryFile(delete=False)
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url,
'encrypt', '--tenant', 'tenant-one', '--project', 'org/project2',
'--secret-name', 'my_secret', '--field-name', 'password',
'--outfile', outfile.name],
stdout=subprocess.PIPE, stdin=subprocess.PIPE)
p.stdin.write(
str.encode(self.secret['password'])
)
_, error = p.communicate()
p.stdin.close()
output = outfile.read()
self._test_encrypt(self.secret, output, error)
def test_encrypt_infile(self):
"""Test that we can use zuul-client to generate a project secret from
a file"""
infile = tempfile.NamedTemporaryFile(delete=False)
infile.write(
str.encode(self.secret['password'])
)
infile.close()
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url,
'encrypt', '--tenant', 'tenant-one', '--project', 'org/project2',
'--secret-name', 'my_secret', '--field-name', 'password',
'--infile', infile.name],
stdout=subprocess.PIPE)
output, error = p.communicate()
os.unlink(infile.name)
self._test_encrypt(self.secret, output, error)
def _test_encrypt(self, _secret, output, error):
self.assertEqual(None, error, error)
self.assertTrue(b'- secret:' in output, output.decode())
new_repo_conf = output.decode()
new_repo_conf += textwrap.dedent(
"""
- job:
parent: base
name: project2-secret
run: playbooks/secret.yaml
secrets:
- my_secret
- project:
check:
jobs:
- project2-secret
gate:
jobs:
- noop
"""
)
file_dict = {'zuul.yaml': new_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project2', 'master',
'Add secret',
files=file_dict)
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.fake_gerrit.addEvent(A.getChangeMergedEvent())
self.waitUntilSettled()
# check that the secret is used from there on
B = self.fake_gerrit.addFakeChange('org/project2', 'master',
'test secret',
files={'newfile': 'xxx'})
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(B.reported, 1, "B should report success")
self.assertHistory([
dict(name='project2-secret', result='SUCCESS', changes='2,1'),
])
secrets = self._getSecrets('project2-secret', 'playbooks')
self.assertEqual(
secrets,
[{'my_secret': _secret}],
secrets)
class TestZuulClientAdmin(BaseTestWeb):
"""Test the admin commands of zuul-client"""
config_file = 'zuul-admin-web.conf'
def test_autohold(self):
"""Test that autohold can be set with the Web client"""
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url, '--auth-token', token, '-v',
'autohold', '--reason', 'some reason',
'--tenant', 'tenant-one', '--project', 'org/project',
'--job', 'project-test2', '--count', '1'],
stdout=subprocess.PIPE)
output = p.communicate()
self.assertEqual(p.returncode, 0, output)
# Check result
resp = self.get_url(
"api/tenant/tenant-one/autohold")
self.assertEqual(200, resp.status_code, resp.text)
autohold_requests = resp.json()
self.assertNotEqual([], autohold_requests)
self.assertEqual(1, len(autohold_requests))
request = autohold_requests[0]
self.assertEqual('tenant-one', request['tenant'])
self.assertIn('org/project', request['project'])
self.assertEqual('project-test2', request['job'])
self.assertEqual(".*", request['ref_filter'])
self.assertEqual("some reason", request['reason'])
self.assertEqual(1, request['max_count'])
def test_enqueue(self):
"""Test that the Web client can enqueue a change"""
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.addApproval('Code-Review', 2)
A.addApproval('Approved', 1)
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url, '--auth-token', token, '-v',
'enqueue', '--tenant', 'tenant-one',
'--project', 'org/project',
'--pipeline', 'gate', '--change', '1,1'],
stdout=subprocess.PIPE)
output = p.communicate()
self.assertEqual(p.returncode, 0, output)
self.waitUntilSettled()
# Check the build history for our enqueued build
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
# project-merge, project-test1, project-test2 in SUCCESS
self.assertEqual(self.countJobResults(self.history, 'SUCCESS'), 3)
def test_enqueue_ref(self):
"""Test that the Web client can enqueue a ref"""
self.executor_server.hold_jobs_in_build = True
p = "review.example.com/org/project"
upstream = self.getUpstreamRepos([p])
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
A.setMerged()
A_commit = str(upstream[p].commit('master'))
self.log.debug("A commit: %s" % A_commit)
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url, '--auth-token', token, '-v',
'enqueue-ref', '--tenant', 'tenant-one',
'--project', 'org/project',
'--pipeline', 'post', '--ref', 'master',
'--oldrev', '90f173846e3af9154517b88543ffbd1691f31366',
'--newrev', A_commit],
stdout=subprocess.PIPE)
output = p.communicate()
self.assertEqual(p.returncode, 0, output)
self.waitUntilSettled()
# Check the build history for our enqueued build
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(self.countJobResults(self.history, 'SUCCESS'), 1)
def test_dequeue(self):
"""Test that the Web client can dequeue a change"""
self.executor_server.hold_jobs_in_build = True
start_builds = len(self.builds)
self.create_branch('org/project', 'stable')
self.fake_gerrit.addEvent(
self.fake_gerrit.getFakeBranchCreatedEvent(
'org/project', 'stable'))
self.executor_server.hold_jobs_in_build = True
self.commitConfigUpdate('common-config', 'layouts/timer.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
for _ in iterate_timeout(30, 'Wait for a build on hold'):
if len(self.builds) > start_builds:
break
self.waitUntilSettled()
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url, '--auth-token', token, '-v',
'dequeue', '--tenant', 'tenant-one', '--project', 'org/project',
'--pipeline', 'periodic', '--ref', 'refs/heads/stable'],
stdout=subprocess.PIPE)
output = p.communicate()
self.assertEqual(p.returncode, 0, output)
self.waitUntilSettled()
self.commitConfigUpdate('common-config',
'layouts/no-timer.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 1)
def test_promote(self):
"Test that the Web client can promote a change"
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
A.addApproval('Code-Review', 2)
B.addApproval('Code-Review', 2)
C.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
self.waitUntilSettled()
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
items = tenant.layout.pipelines['gate'].getAllItems()
enqueue_times = {}
for item in items:
enqueue_times[str(item.change)] = item.enqueue_time
# Promote B and C using the cli
authz = {'iss': 'zuul_operator',
'aud': 'zuul.example.com',
'sub': 'testuser',
'zuul': {
'admin': ['tenant-one', ]
},
'exp': int(time.time()) + 3600}
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url, '--auth-token', token, '-v',
'promote', '--tenant', 'tenant-one',
'--pipeline', 'gate', '--changes', '2,1', '3,1'],
stdout=subprocess.PIPE)
output = p.communicate()
self.assertEqual(p.returncode, 0, output)
self.waitUntilSettled()
# ensure that enqueue times are durable
items = tenant.layout.pipelines['gate'].getAllItems()
for item in items:
self.assertEqual(
enqueue_times[str(item.change)], item.enqueue_time)
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.executor_server.release('.*-merge')
self.waitUntilSettled()
self.assertEqual(len(self.builds), 6)
self.assertEqual(self.builds[0].name, 'project-test1')
self.assertEqual(self.builds[1].name, 'project-test2')
self.assertEqual(self.builds[2].name, 'project-test1')
self.assertEqual(self.builds[3].name, 'project-test2')
self.assertEqual(self.builds[4].name, 'project-test1')
self.assertEqual(self.builds[5].name, 'project-test2')
self.assertTrue(self.builds[0].hasChanges(B))
self.assertFalse(self.builds[0].hasChanges(A))
self.assertFalse(self.builds[0].hasChanges(C))
self.assertTrue(self.builds[2].hasChanges(B))
self.assertTrue(self.builds[2].hasChanges(C))
self.assertFalse(self.builds[2].hasChanges(A))
self.assertTrue(self.builds[4].hasChanges(B))
self.assertTrue(self.builds[4].hasChanges(C))
self.assertTrue(self.builds[4].hasChanges(A))
self.executor_server.release()
self.waitUntilSettled()
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
self.assertEqual(B.data['status'], 'MERGED')
self.assertEqual(B.reported, 2)
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(C.reported, 2)
class TestZuulClientQueryData(BaseTestWeb):
"""Test that zuul-client can fetch builds"""
config_file = 'zuul-sql-driver-mysql.conf'
tenant_config_file = 'config/sql-driver/main.yaml'
def _split_pretty_table(self, output):
lines = output.decode().split('\n')
headers = [x.strip() for x in lines[1].split('|') if x != '']
# Trim headers and last line of the table
return [dict(zip(headers,
[x.strip() for x in l.split('|') if x != '']))
for l in lines[3:-2]]
def _split_line_output(self, output):
lines = output.decode().split('\n')
info = {}
for l in lines:
if l.startswith('==='):
continue
try:
key, value = l.split(':', 1)
info[key] = value.strip()
except ValueError:
continue
return info
def setUp(self):
super(TestZuulClientQueryData, self).setUp()
self.add_base_changes()
def add_base_changes(self):
# change on org/project will run 5 jobs in check
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
# fail project-merge on PS1; its 2 dependent jobs will be skipped
self.executor_server.failJob('project-merge', B)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = True
B.addPatchset()
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(2))
# change on org/project1 will run 3 jobs in check
self.waitUntilSettled()
# changes on both projects will run 3 jobs in gate each
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
B.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
self.waitUntilSettled()
class TestZuulClientBuilds(TestZuulClientQueryData,
AnsibleZuulTestCase):
"""Test that zuul-client can fetch builds"""
def test_get_builds(self):
"""Test querying builds"""
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url,
'builds', '--tenant', 'tenant-one', ],
stdout=subprocess.PIPE)
output, err = p.communicate()
self.assertEqual(p.returncode, 0, output)
results = self._split_pretty_table(output)
self.assertEqual(17, len(results), results)
# 5 jobs in check, 3 jobs in gate
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url,
'builds', '--tenant', 'tenant-one', '--project', 'org/project', ],
stdout=subprocess.PIPE)
output, err = p.communicate()
self.assertEqual(p.returncode, 0, output)
results = self._split_pretty_table(output)
self.assertEqual(8, len(results), results)
self.assertTrue(all(x['Project'] == 'org/project' for x in results),
results)
# project-test1 is run 3 times in check, 2 times in gate
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url,
'builds', '--tenant', 'tenant-one', '--job', 'project-test1', ],
stdout=subprocess.PIPE)
output, err = p.communicate()
self.assertEqual(p.returncode, 0, output)
results = self._split_pretty_table(output)
self.assertEqual(5, len(results), results)
self.assertTrue(all(x['Job'] == 'project-test1' for x in results),
results)
# 3 builds in check for 2,1; 3 in check + 3 in gate for 2,2
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url,
'builds', '--tenant', 'tenant-one', '--change', '2', ],
stdout=subprocess.PIPE)
output, err = p.communicate()
self.assertEqual(p.returncode, 0, output)
results = self._split_pretty_table(output)
self.assertEqual(9, len(results), results)
self.assertTrue(all(x['Change or Ref'].startswith('2,')
for x in results),
results)
# 1,3 does not exist
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url,
'builds', '--tenant', 'tenant-one', '--change', '1',
'--ref', '3', ],
stdout=subprocess.PIPE)
output, err = p.communicate()
self.assertEqual(p.returncode, 0, output)
results = self._split_pretty_table(output)
self.assertEqual(0, len(results), results)
for result in ['SUCCESS', 'FAILURE']:
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url,
'builds', '--tenant', 'tenant-one', '--result', result, ],
stdout=subprocess.PIPE)
job_count = self.countJobResults(self.history, result)
# noop job not included, must be added
if result == 'SUCCESS':
job_count += 1
output, err = p.communicate()
self.assertEqual(p.returncode, 0, output)
results = self._split_pretty_table(output)
self.assertEqual(job_count, len(results), results)
if len(results) > 0:
self.assertTrue(all(x['Result'] == result for x in results),
results)
# 6 jobs in gate
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url,
'builds', '--tenant', 'tenant-one', '--pipeline', 'gate', ],
stdout=subprocess.PIPE)
output, err = p.communicate()
self.assertEqual(p.returncode, 0, output)
results = self._split_pretty_table(output)
self.assertEqual(6, len(results), results)
self.assertTrue(all(x['Pipeline'] == 'gate' for x in results),
results)
class TestZuulClientBuildInfo(TestZuulClientQueryData,
AnsibleZuulTestCase):
"""Test that zuul-client can fetch a build's details"""
def test_get_build_info(self):
"""Test querying a specific build"""
test_build = self.history[-1]
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url,
'build-info', '--tenant', 'tenant-one',
'--uuid', test_build.uuid],
stdout=subprocess.PIPE)
output, err = p.communicate()
self.assertEqual(p.returncode, 0, (output, err))
info = self._split_line_output(output)
self.assertEqual(test_build.uuid, info.get('UUID'), test_build)
self.assertEqual(test_build.result, info.get('Result'), test_build)
self.assertEqual(test_build.name, info.get('Job'), test_build)
def test_get_build_artifacts(self):
"""Test querying a specific build's artifacts"""
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url,
'builds', '--tenant', 'tenant-one', '--job', 'project-test1',
'--limit', '1'],
stdout=subprocess.PIPE)
output, err = p.communicate()
self.assertEqual(p.returncode, 0, output)
results = self._split_pretty_table(output)
uuid = results[0]['ID']
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url,
'build-info', '--tenant', 'tenant-one',
'--uuid', uuid,
'--show-artifacts'],
stdout=subprocess.PIPE)
output, err = p.communicate()
self.assertEqual(p.returncode, 0, (output, err))
artifacts = self._split_pretty_table(output)
self.assertTrue(
any(x['name'] == 'tarball' and
x['url'] == 'http://example.com/tarball'
for x in artifacts),
output)
self.assertTrue(
any(x['name'] == 'docs' and
x['url'] == 'http://example.com/docs'
for x in artifacts),
output)
class TestZuulClientJobGraph(BaseTestWeb):
def _split_pretty_table(self, output):
lines = output.decode().split('\n')
headers = [x.strip() for x in lines[1].split('|') if x != '']
# Trim headers and last line of the table
return [dict(zip(headers,
[x.strip() for x in l.split('|') if x != '']))
for l in lines[3:-2]]
def test_job_graph(self):
"""Test the job-graph command"""
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url,
'job-graph',
'--tenant', 'tenant-one',
'--pipeline', 'check',
'--project', 'org/project1',
'--branch', 'master',
],
stdout=subprocess.PIPE)
output, err = p.communicate()
self.assertEqual(p.returncode, 0, (output, err))
results = self._split_pretty_table(output)
expected = [
{'Job': 'project-merge', 'Dependencies': ''},
{'Job': 'project-test1', 'Dependencies': 'project-merge'},
{'Job': 'project-test2', 'Dependencies': 'project-merge'},
{'Job': 'project1-project2-integration',
'Dependencies': 'project-merge'}
]
self.assertEqual(results, expected)
def test_job_graph_dot(self):
"""Test the job-graph command dot output"""
p = subprocess.Popen(
['zuul-client',
'--format', 'dot',
'--zuul-url', self.base_url,
'job-graph',
'--tenant', 'tenant-one',
'--pipeline', 'check',
'--project', 'org/project1',
'--branch', 'master',
],
stdout=subprocess.PIPE)
output, err = p.communicate()
self.assertEqual(p.returncode, 0, (output, err))
expected = textwrap.dedent('''\
digraph job_graph {
rankdir=LR;
node [shape=box];
"project-merge";
"project-merge" -> "project-test1" [dir=back];
"project-merge" -> "project-test2" [dir=back];
"project-merge" -> "project1-project2-integration" [dir=back];
}
''').encode('utf8')
self.assertEqual(output.strip(), expected.strip())
class TestZuulClientFreezeJob(BaseTestWeb):
def test_freeze_job(self):
"""Test the freeze-job command"""
p = subprocess.Popen(
['zuul-client',
'--zuul-url', self.base_url,
'freeze-job',
'--tenant', 'tenant-one',
'--pipeline', 'check',
'--project', 'org/project1',
'--branch', 'master',
'--job', 'project-test1',
],
stdout=subprocess.PIPE)
output, err = p.communicate()
self.assertEqual(p.returncode, 0, (output, err))
output = output.decode('utf8')
for s in [
'Job: project-test1',
'Branch: master',
'Ansible Version:',
'Workspace Scheme: golang',
('gerrit:common-config:playbooks/project-test1.yaml'
'@master [trusted]'),
]:
self.assertIn(s, output)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tests/zuul_client/test_zuulclient.py
|
test_zuulclient.py
|
#!/usr/bin/python
# Copyright: (c) 2022, Acme Gating LLC
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
from ansible.module_utils.basic import AnsibleModule
def run_module():
module = AnsibleModule(
argument_spec=dict(key=dict(type='str', required=True)),
supports_check_mode=True
)
raise Exception("Test module failure exception " + module.params['key'])
def main():
run_module()
if __name__ == '__main__':
main()
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/playbooks/zuul-stream/fixtures/library/zuul_fail.py
|
zuul_fail.py
|
#!/bin/bash -e
# Copyright 2022 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Rebuild old versions of documentation
ZUUL_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
function init () {
rm -fr /tmp/rebuild
mkdir /tmp/rebuild
cd /tmp/rebuild
git clone https://opendev.org/zuul/zuul
cd /tmp/rebuild/zuul
tox -e docs --notest
}
function build {
mkdir -p /tmp/rebuild/output
cd /tmp/rebuild/zuul
git reset --hard origin/master
git checkout $1
mkdir -p doc/source/_static
mkdir -p doc/source/_templates
cp $ZUUL_DIR/doc/source/_static/* doc/source/_static
cp $ZUUL_DIR/doc/source/_templates/* doc/source/_templates
cp $ZUUL_DIR/doc/source/conf.py doc/source
cp $ZUUL_DIR/doc/requirements.txt doc
cp $ZUUL_DIR/tox.ini .
. .tox/docs/bin/activate
sphinx-build -E -d doc/build/doctrees -b html doc/source/ doc/build/html
mv doc/build/html /tmp/rebuild/output/$1
rm -fr doc/build/doctrees
}
# TODO: iterate over tags
init
build 3.3.0
build 3.3.1
build 3.4.0
build 3.5.0
build 3.6.0
build 3.6.1
build 3.7.0
build 3.7.1
build 3.8.0
build 3.8.1
build 3.9.0
build 4.0.0
build 4.1.0
build 4.2.0
build 4.3.0
build 4.4.0
build 4.5.0
build 4.5.1
build 4.6.0
build 4.7.0
build 4.8.0
build 4.8.1
build 4.9.0
build 4.10.0
build 4.10.1
build 4.10.2
build 4.10.3
build 4.10.4
build 4.11.0
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/rebuild-docs.sh
|
rebuild-docs.sh
|
#!/bin/bash
# Copyright 2018 Red Hat, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
# This script checks if yarn is installed in the current path. If it is not,
# it will use nodeenv to install node, npm and yarn.
# Finally, it will install pip things.
if [[ ! $(command -v yarn) ]]
then
pip install nodeenv
# Initialize nodeenv and tell it to re-use the currently active virtualenv
attempts=0
set +e
until nodeenv --python-virtualenv -n 16.14.0 ; do
((attempts++))
if [[ $attempts > 2 ]]
then
echo "Failed creating nodeenv"
exit 1
fi
done
set -e
# Use -g because inside of the virtualenv '-g' means 'install into the'
# virtualenv - as opposed to installing into the local node_modules.
# Avoid writing a package-lock.json file since we don't use it.
# Avoid writing yarn into package.json.
npm install -g --no-package-lock --no-save yarn
fi
if [[ ! -f zuul/web/static/index.html ]]
then
mkdir -p zuul/web/static
ln -sfn ../zuul/web/static web/build
pushd web/
if [[ -n "${YARN_REGISTRY}" ]]
then
echo "Using yarn registry: ${YARN_REGISTRY}"
sed -i "s#https://registry.yarnpkg.com#${YARN_REGISTRY}#" yarn.lock
fi
# Be forgiving of package retrieval errors
attempts=0
set +e
until yarn install; do
((attempts++))
if [[ $attempts > 2 ]]
then
echo "Failed installing npm packages"
exit 1
fi
done
set -e
yarn build
if [[ -n "${YARN_REGISTRY}" ]]
then
echo "Resetting yarn registry"
sed -i "s#${YARN_REGISTRY}#https://registry.yarnpkg.com#" yarn.lock
fi
popd
fi
pip install $*
# Fail-fast if pip detects conflicts
pip check
# Check if we're installing zuul. If so install the managed ansible as well.
if echo "$*" | grep -vq requirements.txt; then
zuul-manage-ansible -v
fi
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/pip.sh
|
pip.sh
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
from zuul.lib import encryption
import zuul.configloader
import zuul.model
DESCRIPTION = """Decrypt a Zuul secret.
"""
def main():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('private_key',
help="The path to the private key")
parser.add_argument('file',
help="The YAML file with secrets")
args = parser.parse_args()
(private_secrets_key, public_secrets_key) = \
encryption.deserialize_rsa_keypair(open(args.private_key, 'rb').read())
parser = zuul.configloader.SecretParser(None)
sc = zuul.model.SourceContext(None, 'project', None, 'master',
'path', False)
data = zuul.configloader.safe_load_yaml(open(args.file).read(), sc)
for element in data:
if 'secret' not in element:
continue
s = element['secret']
secret = parser.fromYaml(s)
print(secret.name)
print(secret.decrypt(private_secrets_key).secret_data)
if __name__ == '__main__':
main()
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/decrypt_secret.py
|
decrypt_secret.py
|
#!/usr/bin/env python3
# Copyright 2020 BMW Group
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
import subprocess
def main():
pos_args = {
'--dir': 1,
'--tmpfs': 1,
'--ro-bind': 2,
'--bind': 2,
'--chdir': 1,
'--uid': 1,
'--gid': 1,
'--file': 2,
'--proc': 1,
'--dev': 1,
}
bool_args = [
'--unshare-all',
'--unshare-user',
'--unshare-user-try',
'--unshare-ipc',
'--unshare-pid',
'--unshare-net',
'--unshare-uts',
'--unshare-cgroup',
'--unshare-cgroup-try',
'--share-net',
'--die-with-parent',
]
parser = argparse.ArgumentParser()
for arg, nargs in pos_args.items():
parser.add_argument(arg, nargs=nargs, action='append')
for arg in bool_args:
parser.add_argument(arg, action='store_true')
parser.add_argument('args', metavar='args', nargs=argparse.REMAINDER,
help='Command')
args = parser.parse_args()
for fd, path in args.file:
fd = int(fd)
if path.startswith('/etc'):
# Ignore write requests to /etc
continue
print('Writing file from %s to %s' % (fd, path))
count = 0
with open(path, 'wb') as output:
data = os.read(fd, 32000)
while data:
count += len(data)
output.write(data)
data = os.read(fd, 32000)
print('Wrote file (%s bytes)' % count)
if args.chdir:
os.chdir(args.chdir[0][0])
result = subprocess.run(args.args, shell=False, check=False)
exit(result.returncode)
if __name__ == '__main__':
main()
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/fake_bwrap.py
|
fake_bwrap.py
|
#!/usr/bin/env python
# Copyright (C) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script updates the Zuul v3 Storyboard. It uses a .boartty.yaml
# file to get credential information.
import requests
import boartty.config
import boartty.sync
import logging # noqa
from pprint import pprint as p # noqa
class App(object):
pass
def get_tasks(sync):
task_list = []
for story in sync.get('/v1/stories?tags=zuulv3'):
print("Story %s: %s" % (story['id'], story['title']))
for task in sync.get('/v1/stories/%s/tasks' % (story['id'])):
print(" %s" % (task['title'],))
task_list.append(task)
return task_list
def task_in_lane(task, lane):
for item in lane['worklist']['items']:
if 'task' in item and item['task']['id'] == task['id']:
return True
return False
def add_task(sync, task, lane):
print("Add task %s to %s" % (task['id'], lane['worklist']['id']))
r = sync.post('v1/worklists/%s/items/' % lane['worklist']['id'],
dict(item_id=task['id'],
item_type='task',
list_position=0))
print(r)
def remove_task(sync, task, lane):
print("Remove task %s from %s" % (task['id'], lane['worklist']['id']))
for item in lane['worklist']['items']:
if 'task' in item and item['task']['id'] == task['id']:
r = sync.delete('v1/worklists/%s/items/' % lane['worklist']['id'],
dict(item_id=item['id']))
print(r)
MAP = {
'todo': ['New', 'Backlog', 'Todo'],
'inprogress': ['In Progress', 'Blocked'],
'review': ['In Progress', 'Blocked'],
'merged': None,
'invalid': None,
}
def main():
requests.packages.urllib3.disable_warnings()
# logging.basicConfig(level=logging.DEBUG)
app = App()
app.config = boartty.config.Config('openstack')
sync = boartty.sync.Sync(app, False)
board = sync.get('v1/boards/41')
tasks = get_tasks(sync)
lanes = dict()
for lane in board['lanes']:
lanes[lane['worklist']['title']] = lane
for task in tasks:
ok_lanes = MAP[task['status']]
task_found = False
for lane_name, lane in lanes.items():
if task_in_lane(task, lane):
if ok_lanes and lane_name in ok_lanes:
task_found = True
else:
remove_task(sync, task, lane)
if ok_lanes and not task_found:
add_task(sync, task, lanes[ok_lanes[0]])
if __name__ == '__main__':
main()
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/update-storyboard.py
|
update-storyboard.py
|
# Copyright 2022 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Analyze the contents of the ZK tree (whether in ZK or a dump on the
# local filesystem) to identify large objects.
import argparse
import json
import os
import sys
import zlib
import kazoo.client
KB = 1024
MB = 1024**2
GB = 1024**3
def convert_human(size):
if size >= GB:
return f'{int(size/GB)}G'
if size >= MB:
return f'{int(size/MB)}M'
if size >= KB:
return f'{int(size/KB)}K'
if size > 0:
return f'{size}B'
return '0'
def convert_null(size):
return size
def unconvert_human(size):
suffix = size[-1]
val = size[:-1]
if suffix in ['G', 'g']:
return int(val) * GB
if suffix in ['M', 'm']:
return int(val) * MB
if suffix in ['K', 'k']:
return int(val) * KB
return int(size)
class SummaryLine:
def __init__(self, kind, path, size=0, zk_size=0):
self.kind = kind
self.path = path
self.size = size
self.zk_size = zk_size
self.attrs = {}
self.children = []
@property
def tree_size(self):
return sum([x.tree_size for x in self.children] + [self.size])
@property
def zk_tree_size(self):
return sum([x.zk_tree_size for x in self.children] + [self.zk_size])
def add(self, child):
self.children.append(child)
def __str__(self):
indent = 0
return self.toStr(indent)
def matchesLimit(self, limit, zk):
if not limit:
return True
if zk:
size = self.zk_size
else:
size = self.size
if size >= limit:
return True
for child in self.children:
if child.matchesLimit(limit, zk):
return True
return False
def toStr(self, indent, depth=None, conv=convert_null, limit=0, zk=False):
"""Convert this item and its children to a str representation
:param indent int: How many levels to indent
:param depth int: How many levels deep to display
:param conv func: A function to convert sizes to text
:param limit int: Don't display items smaller than this
:param zk bool: Whether to use the data size (False)
or ZK storage size (True)
"""
if depth and indent >= depth:
return ''
if self.matchesLimit(limit, zk):
attrs = ' '.join([f'{k}={conv(v)}' for k, v in self.attrs.items()])
if attrs:
attrs = ' ' + attrs
if zk:
size = conv(self.zk_size)
tree_size = conv(self.zk_tree_size)
else:
size = conv(self.size)
tree_size = conv(self.tree_size)
ret = (' ' * indent + f"{self.kind} {self.path} "
f"size={size} tree={tree_size}{attrs}\n")
for child in self.children:
ret += child.toStr(indent + 1, depth, conv, limit, zk)
else:
ret = ''
return ret
class Data:
def __init__(self, path, raw, zk_size=None, failed=False):
self.path = path
self.raw = raw
self.failed = failed
self.zk_size = zk_size or len(raw)
if not failed:
self.data = json.loads(raw)
else:
print(f"!!! {path} failed to load data")
self.data = {}
@property
def size(self):
return len(self.raw)
class Tree:
def getNode(self, path):
pass
def listChildren(self, path):
pass
def listConnections(self):
return self.listChildren('/zuul/cache/connection')
def getBranchCache(self, connection):
return self.getShardedNode(f'/zuul/cache/connection/{connection}'
'/branches/data')
def listCacheKeys(self, connection):
return self.listChildren(f'/zuul/cache/connection/{connection}/cache')
def getCacheKey(self, connection, key):
return self.getNode(f'/zuul/cache/connection/{connection}/cache/{key}')
def listCacheData(self, connection):
return self.listChildren(f'/zuul/cache/connection/{connection}/data')
def getCacheData(self, connection, key):
return self.getShardedNode(f'/zuul/cache/connection/{connection}'
f'/data/{key}')
def listTenants(self):
return self.listChildren('/zuul/tenant')
def listPipelines(self, tenant):
return self.listChildren(f'/zuul/tenant/{tenant}/pipeline')
def getPipeline(self, tenant, pipeline):
return self.getNode(f'/zuul/tenant/{tenant}/pipeline/{pipeline}')
def getItems(self, tenant, pipeline):
pdata = self.getPipeline(tenant, pipeline)
for queue in pdata.data.get('queues', []):
qdata = self.getNode(queue)
for item in qdata.data.get('queue', []):
idata = self.getNode(item)
yield idata
def listBuildsets(self, item):
return self.listChildren(f'{item}/buildset')
def getBuildset(self, item, buildset):
return self.getNode(f'{item}/buildset/{buildset}')
def listJobs(self, buildset):
return self.listChildren(f'{buildset}/job')
def getJob(self, buildset, job_name):
return self.getNode(f'{buildset}/job/{job_name}')
def listBuilds(self, buildset, job_name):
return self.listChildren(f'{buildset}/job/{job_name}/build')
def getBuild(self, buildset, job_name, build):
return self.getNode(f'{buildset}/job/{job_name}/build/{build}')
class FilesystemTree(Tree):
def __init__(self, root):
self.root = root
def getNode(self, path):
path = path.lstrip('/')
fullpath = os.path.join(self.root, path)
if not os.path.exists(fullpath):
return Data(path, '', failed=True)
try:
with open(os.path.join(fullpath, 'ZKDATA'), 'rb') as f:
zk_data = f.read()
data = zk_data
try:
data = zlib.decompress(zk_data)
except Exception:
pass
return Data(path, data, zk_size=len(zk_data))
except Exception:
return Data(path, '', failed=True)
def getShardedNode(self, path):
path = path.lstrip('/')
fullpath = os.path.join(self.root, path)
if not os.path.exists(fullpath):
return Data(path, '', failed=True)
shards = sorted([x for x in os.listdir(fullpath)
if x != 'ZKDATA'])
data = b''
compressed_data_len = 0
try:
for shard in shards:
with open(os.path.join(fullpath, shard, 'ZKDATA'), 'rb') as f:
compressed_data = f.read()
compressed_data_len += len(compressed_data)
data += zlib.decompress(compressed_data)
return Data(path, data, zk_size=compressed_data_len)
except Exception:
return Data(path, data, failed=True)
def listChildren(self, path):
path = path.lstrip('/')
fullpath = os.path.join(self.root, path)
if not os.path.exists(fullpath):
return []
return [x for x in os.listdir(fullpath)
if x != 'ZKDATA']
class ZKTree(Tree):
def __init__(self, host, cert, key, ca):
kwargs = {}
if cert:
kwargs['use_ssl'] = True
kwargs['keyfile'] = key
kwargs['certfile'] = cert
kwargs['ca'] = ca
self.client = kazoo.client.KazooClient(host, **kwargs)
self.client.start()
def getNode(self, path):
path = path.lstrip('/')
if not self.client.exists(path):
return Data(path, '', failed=True)
try:
zk_data, _ = self.client.get(path)
data = zk_data
try:
data = zlib.decompress(zk_data)
except Exception:
pass
return Data(path, data, zk_size=len(zk_data))
except Exception:
return Data(path, '', failed=True)
def getShardedNode(self, path):
path = path.lstrip('/')
if not self.client.exists(path):
return Data(path, '', failed=True)
shards = sorted(self.listChildren(path))
data = b''
compressed_data_len = 0
try:
for shard in shards:
compressed_data, _ = self.client.get(os.path.join(path, shard))
compressed_data_len += len(compressed_data)
data += zlib.decompress(compressed_data)
return Data(path, data, zk_size=compressed_data_len)
except Exception:
return Data(path, data, failed=True)
def listChildren(self, path):
path = path.lstrip('/')
try:
return self.client.get_children(path)
except kazoo.client.NoNodeError:
return []
class Analyzer:
def __init__(self, args):
if args.path:
self.tree = FilesystemTree(args.path)
else:
self.tree = ZKTree(args.host, args.cert, args.key, args.ca)
if args.depth is not None:
self.depth = int(args.depth)
else:
self.depth = None
if args.human:
self.conv = convert_human
else:
self.conv = convert_null
if args.limit:
self.limit = unconvert_human(args.limit)
else:
self.limit = 0
self.use_zk_size = args.zk_size
def summarizeItem(self, item):
# Start with an item
item_summary = SummaryLine('Item', item.path, item.size, item.zk_size)
buildsets = self.tree.listBuildsets(item.path)
for bs_i, bs_id in enumerate(buildsets):
# Add each buildset
buildset = self.tree.getBuildset(item.path, bs_id)
buildset_summary = SummaryLine(
'Buildset', buildset.path,
buildset.size, buildset.zk_size)
item_summary.add(buildset_summary)
# Some attributes are offloaded, gather them and include
# the size.
for x in ['merge_repo_state', 'extra_repo_state', 'files',
'config_errors']:
if buildset.data.get(x):
node = self.tree.getShardedNode(buildset.data.get(x))
buildset_summary.attrs[x] = \
self.use_zk_size and node.zk_size or node.size
buildset_summary.size += node.size
buildset_summary.zk_size += node.zk_size
jobs = self.tree.listJobs(buildset.path)
for job_i, job_name in enumerate(jobs):
# Add each job
job = self.tree.getJob(buildset.path, job_name)
job_summary = SummaryLine('Job', job.path,
job.size, job.zk_size)
buildset_summary.add(job_summary)
# Handle offloaded job data
for job_attr in ('artifact_data',
'extra_variables',
'group_variables',
'host_variables',
'secret_parent_data',
'variables',
'parent_data',
'secrets'):
job_data = job.data.get(job_attr, None)
if job_data and job_data['storage'] == 'offload':
node = self.tree.getShardedNode(job_data['path'])
job_summary.attrs[job_attr] = \
self.use_zk_size and node.zk_size or node.size
job_summary.size += node.size
job_summary.zk_size += node.zk_size
builds = self.tree.listBuilds(buildset.path, job_name)
for build_i, build_id in enumerate(builds):
# Add each build
build = self.tree.getBuild(
buildset.path, job_name, build_id)
build_summary = SummaryLine(
'Build', build.path, build.size, build.zk_size)
job_summary.add(build_summary)
# Add the offloaded build attributes
result_len = 0
result_zk_len = 0
if build.data.get('_result_data'):
result_data = self.tree.getShardedNode(
build.data['_result_data'])
result_len += result_data.size
result_zk_len += result_data.zk_size
if build.data.get('_secret_result_data'):
secret_result_data = self.tree.getShardedNode(
build.data['_secret_result_data'])
result_len += secret_result_data.size
result_zk_len += secret_result_data.zk_size
build_summary.attrs['results'] = \
self.use_zk_size and result_zk_len or result_len
build_summary.size += result_len
build_summary.zk_size += result_zk_len
sys.stdout.write(item_summary.toStr(0, self.depth, self.conv,
self.limit, self.use_zk_size))
def summarizePipelines(self):
for tenant_name in self.tree.listTenants():
for pipeline_name in self.tree.listPipelines(tenant_name):
for item in self.tree.getItems(tenant_name, pipeline_name):
self.summarizeItem(item)
def summarizeConnectionCache(self, connection_name):
connection_summary = SummaryLine('Connection', connection_name, 0, 0)
branch_cache = self.tree.getBranchCache(connection_name)
branch_summary = SummaryLine(
'Branch Cache', connection_name,
branch_cache.size, branch_cache.zk_size)
connection_summary.add(branch_summary)
cache_key_summary = SummaryLine(
'Change Cache Keys', connection_name, 0, 0)
cache_key_summary.attrs['count'] = 0
connection_summary.add(cache_key_summary)
for key in self.tree.listCacheKeys(connection_name):
cache_key = self.tree.getCacheKey(connection_name, key)
cache_key_summary.size += cache_key.size
cache_key_summary.zk_size += cache_key.zk_size
cache_key_summary.attrs['count'] += 1
cache_data_summary = SummaryLine(
'Change Cache Data', connection_name, 0, 0)
cache_data_summary.attrs['count'] = 0
connection_summary.add(cache_data_summary)
for key in self.tree.listCacheData(connection_name):
cache_data = self.tree.getCacheData(connection_name, key)
cache_data_summary.size += cache_data.size
cache_data_summary.zk_size += cache_data.zk_size
cache_data_summary.attrs['count'] += 1
sys.stdout.write(connection_summary.toStr(
0, self.depth, self.conv, self.limit, self.use_zk_size))
def summarizeConnections(self):
for connection_name in self.tree.listConnections():
self.summarizeConnectionCache(connection_name)
def summarize(self):
self.summarizeConnections()
self.summarizePipelines()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path',
help='Filesystem path for previously dumped data')
parser.add_argument('--host',
help='ZK host string (exclusive with --path)')
parser.add_argument('--cert', help='Path to TLS certificate')
parser.add_argument('--key', help='Path to TLS key')
parser.add_argument('--ca', help='Path to TLS CA cert')
parser.add_argument('-d', '--depth', help='Limit depth when printing')
parser.add_argument('-H', '--human', dest='human', action='store_true',
help='Use human-readable sizes')
parser.add_argument('-l', '--limit', dest='limit',
help='Only print nodes greater than limit')
parser.add_argument('-Z', '--zksize', dest='zk_size', action='store_true',
help='Use the possibly compressed ZK storage size '
'instead of plain data size')
args = parser.parse_args()
az = Analyzer(args)
az.summarize()
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/zk-analyze.py
|
zk-analyze.py
|
#!/bin/bash
# Copyright 2018 Red Hat, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
# This script checks if yarn is installed in the current path. If it is not,
# it will use nodeenv to install node, npm and yarn.
# Finally, it will install pip things.
if [[ ! $(command -v yarn) ]]
then
pip install nodeenv
# Initialize nodeenv and tell it to re-use the currently active virtualenv
attempts=0
set +e
until nodeenv --python-virtualenv -n 16.14.0 ; do
((attempts++))
if [[ $attempts > 2 ]]
then
echo "Failed creating nodeenv"
exit 1
fi
done
set -e
# Use -g because inside of the virtualenv '-g' means 'install into the'
# virtualenv - as opposed to installing into the local node_modules.
# Avoid writing a package-lock.json file since we don't use it.
# Avoid writing yarn into package.json.
npm install -g --no-package-lock --no-save yarn
fi
if [[ ! -f zuul/web/static/index.html ]]
then
mkdir -p zuul/web/static
ln -sfn ../zuul/web/static web/build
pushd web/
if [[ -n "${YARN_REGISTRY}" ]]
then
echo "Using yarn registry: ${YARN_REGISTRY}"
sed -i "s#https://registry.yarnpkg.com#${YARN_REGISTRY}#" yarn.lock
fi
# Be forgiving of package retrieval errors
attempts=0
set +e
until yarn install; do
((attempts++))
if [[ $attempts > 2 ]]
then
echo "Failed installing npm packages"
exit 1
fi
done
set -e
yarn build
if [[ -n "${YARN_REGISTRY}" ]]
then
echo "Resetting yarn registry"
sed -i "s#${YARN_REGISTRY}#https://registry.yarnpkg.com#" yarn.lock
fi
popd
fi
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/yarn-build.sh
|
yarn-build.sh
|
# Copyright 2022 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Dump the data in ZK to the local filesystem.
import argparse
import os
import zlib
import kazoo.client
def getTree(client, root, path, decompress=False):
try:
data, zstat = client.get(path)
except kazoo.exceptions.NoNodeError:
print(f"No node at {path}")
return
if decompress:
try:
data = zlib.decompress(data)
except Exception:
pass
os.makedirs(root + path)
with open(root + path + '/ZKDATA', 'wb') as f:
f.write(data)
for child in client.get_children(path):
getTree(client, root, path + '/' + child, decompress)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('host', help='ZK host string')
parser.add_argument('path', help='Filesystem output path for data dump')
parser.add_argument('--cert', help='Path to TLS certificate')
parser.add_argument('--key', help='Path to TLS key')
parser.add_argument('--ca', help='Path to TLS CA cert')
parser.add_argument('--decompress', action='store_true',
help='Decompress data')
args = parser.parse_args()
kwargs = {}
if args.cert:
kwargs['use_ssl'] = True
kwargs['keyfile'] = args.key
kwargs['certfile'] = args.cert
kwargs['ca'] = args.ca
client = kazoo.client.KazooClient(args.host, **kwargs)
client.start()
getTree(client, args.path, '/zuul', args.decompress)
if __name__ == '__main__':
main()
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/zk-dump.py
|
zk-dump.py
|
#!/bin/bash
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $EUID -ne 0 ] ; then
SUDO='sudo -E'
fi
if type apt-get; then
# Install https transport - otherwise apt-get HANGS on https urls
# Install curl so the curl commands work
# Install gnupg2 so that the apt-key add works
$SUDO apt-get update
$SUDO apt-get install -y apt-transport-https curl gnupg2
# Install recent NodeJS repo
curl -sS https://deb.nodesource.com/gpgkey/nodesource.gpg.key | $SUDO apt-key add -
echo "deb https://deb.nodesource.com/node_16.x focal main" | $SUDO tee /etc/apt/sources.list.d/nodesource.list
# Install yarn repo
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | $SUDO apt-key add -
echo "deb https://dl.yarnpkg.com/debian/ stable main" | $SUDO tee /etc/apt/sources.list.d/yarn.list
$SUDO apt-get update
DEBIAN_FRONTEND=noninteractive \
$SUDO apt-get -q --option "Dpkg::Options::=--force-confold" --assume-yes \
install nodejs yarn
elif type yum; then
$SUDO curl https://dl.yarnpkg.com/rpm/yarn.repo -o /etc/yum.repos.d/yarn.repo
$SUDO $(dirname $0)/install-js-repos-rpm.sh
$SUDO yum -y install nodejs yarn
elif type zypper; then
$SUDO zypper install -y nodejs10 npm10
$SUDO npm install yarn
elif type brew; then
brew install nodejs yarn
elif type pamac; then
$SUDO pamac install nodejs yarn --no-confirm
else
echo "Unsupported platform"
fi
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/install-js-tools.sh
|
install-js-tools.sh
|
#!/bin/bash
# This runs ZooKeeper and databases in docker containers, which are
# required for tests.
# This setup needs to be run as a user that can run docker or podman, or by
# setting $ROOTCMD to a user substitution tool like "sudo" in the calling
# environment.
set -xeu
# Default ROOTCMD to the 'env' command, otherwise variable assignments will be
# interpreted as command when no ROOTCMD is given. The reason for that is
# Bash's simple command expansion.
ROOTCMD=${ROOTCMD:-env}
cd $(dirname $0)
SCRIPT_DIR="$(pwd)"
# Select docker or podman
if command -v docker > /dev/null; then
DOCKER=docker
elif command -v podman > /dev/null; then
DOCKER=podman
else
echo "Please install docker or podman."
exit 1
fi
# Select docker-compose or podman-compose
if command -v docker-compose > /dev/null; then
COMPOSE=docker-compose
elif command -v podman-compose > /dev/null; then
COMPOSE=podman-compose
else
echo "Please install docker-compose or podman-compose."
exit 1
fi
MYSQL="${DOCKER} exec zuul-test-mysql mysql -u root -pinsecure_worker"
if [ "${COMPOSE}" == "docker-compose" ]; then
${ROOTCMD} docker-compose rm -sf
else
${ROOTCMD} podman-compose down
fi
CA_DIR=$SCRIPT_DIR/ca
mkdir -p $CA_DIR
$SCRIPT_DIR/zk-ca.sh $CA_DIR zuul-test-zookeeper
${ROOTCMD} USER_ID=$(id -u) ${COMPOSE} up -d
echo "Waiting for mysql"
timeout 30 bash -c "until ${ROOTCMD} ${MYSQL} -e 'show databases'; do sleep 0.5; done"
echo
echo "Setting up permissions for zuul tests"
${ROOTCMD} ${MYSQL} -e "CREATE USER 'openstack_citest'@'%' identified by 'openstack_citest';"
${ROOTCMD} ${MYSQL} -e "GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest'@'%' WITH GRANT OPTION;"
${ROOTCMD} ${MYSQL} -u openstack_citest -popenstack_citest -e "SET default_storage_engine=MYISAM; DROP DATABASE IF EXISTS openstack_citest; CREATE DATABASE openstack_citest CHARACTER SET utf8;"
echo "Finished"
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/test-setup-docker.sh
|
test-setup-docker.sh
|
#!/bin/bash
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ZUUL_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
# Initialize tox environment if it's not set up
if [[ ! -d "${ZUUL_DIR}/.tox/venv" ]]; then
pushd $ZUUL_DIR
echo "Virtualenv doesn't exist... creating."
tox -e venv --notest
popd
fi
ANSIBLE_VERSION="2.7"
ANSIBLE_ROOT="${ZUUL_DIR}/.tox/venv/lib/zuul/ansible/${ANSIBLE_VERSION}"
ARA_DIR=$(dirname $("${ANSIBLE_ROOT}/bin/python3" -c 'import ara; print(ara.__file__)'))
# Source tox environment
source ${ZUUL_DIR}/.tox/venv/bin/activate
WORK_DIR=$(mktemp -d /tmp/zuul_logs_XXXX)
# Copy zuul ansible modules into workdir
ZUUL_ANSIBLE="${WORK_DIR}/zuul-ansible"
mkdir -p "${ZUUL_ANSIBLE}/zuul"
cp -Lr "${ZUUL_DIR}/zuul/ansible/${ANSIBLE_VERSION}" "${ZUUL_ANSIBLE}/zuul/ansible"
touch "${ZUUL_ANSIBLE}/zuul/ansible/__init__.py"
touch "${ZUUL_ANSIBLE}/zuul/__init__.py"
if [ -z $1 ] ; then
INVENTORY=$WORK_DIR/hosts.yaml
cat >$INVENTORY <<EOF
all:
hosts:
controller:
ansible_connection: local
node1:
ansible_connection: local
node2:
ansible_connection: local
node:
hosts:
node1: null
node2: null
EOF
else
INVENTORY=$(realpath $1)
fi
cat >$WORK_DIR/ansible.cfg <<EOF
[defaults]
inventory = $INVENTORY
gathering = smart
gather_subset = !all
fact_caching = jsonfile
fact_caching_connection = ~/.cache/facts
lookup_plugins = ${ZUUL_ANSIBLE}/zuul/ansible/base/lookup
callback_plugins = ${ZUUL_ANSIBLE}/zuul/ansible/base/callback:$ARA_DIR/plugins/callbacks
action_plugins = ${ZUUL_ANSIBLE}/zuul/ansible/base/action
module_utils = ${ZUUL_ANSIBLE}/zuul/ansible/base/module_utils
stdout_callback = zuul_stream
library = ${ZUUL_ANSIBLE}/zuul/ansible/base/library
retry_files_enabled = False
EOF
cd $WORK_DIR
python3 $ZUUL_DIR/zuul/ansible/logconfig.py
export ZUUL_JOB_LOG_CONFIG=$WORK_DIR/logging.json
export ARA_DIR=$WORK_DIR/.ara
export ARA_LOG_CONFIG=$ZUUL_JOB_LOG_CONFIG
export PYTHONPATH="${ZUUL_ANSIBLE}:${PYTHONPATH}"
export ZUUL_JOBDIR=$WORK_DIR
rm -rf $ARA_DIR
"${ANSIBLE_ROOT}/bin/ansible" all -m zuul_console
ANSIBLE="${ANSIBLE_ROOT}/bin/ansible-playbook"
"${ANSIBLE}" "${ZUUL_DIR}/playbooks/zuul-stream/fixtures/test-stream.yaml"
"${ANSIBLE}" "${ZUUL_DIR}/playbooks/zuul-stream/fixtures/test-stream-failure.yaml"
# ansible-playbook $ZUUL_DIR/playbooks/zuul-stream/functional.yaml
echo "Logs are in $WORK_DIR"
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/test-logs.sh
|
test-logs.sh
|
import gzip
import os
import re
import yaml
def get_log_age(path):
filename = os.path.basename(path)
parts = filename.split('.')
if len(parts) < 4:
return 0
else:
return int(parts[2])
class LogScraper(object):
# Example log line
# 2018-10-26 16:14:47,527 INFO zuul.nodepool: Nodeset <NodeSet two-centos-7-nodes [<Node 0000058431 ('primary',):centos-7>, <Node 0000058468 ('secondary',):centos-7>]> with 2 nodes was in use for 6241.08082151413 seconds for build <Build 530c4ca7af9e44dcb535e7074258e803 of tripleo-ci-centos-7-scenario008-multinode-oooq-container voting:False on <Worker ze05.openstack.org>> for project openstack/tripleo-quickstart-extras # noqa
r = re.compile(r'(?P<timestamp>\d+-\d+-\d+ \d\d:\d\d:\d\d,\d\d\d) INFO zuul.nodepool: Nodeset <.*> with (?P<nodes>\d+) nodes was in use for (?P<secs>\d+(.[\d\-e]+)?) seconds for build <Build \w+ of (?P<job>[^\s]+) voting:\w+ on .* for project (?P<repos>[^\s]+)') # noqa
def __init__(self):
self.repos = {}
self.sorted_repos = []
self.jobs = {}
self.sorted_jobs = []
self.total_usage = 0.0
self.projects = {}
self.sorted_projects = []
self.start_time = None
self.end_time = None
def scrape_file(self, fn):
if fn.endswith('.gz'):
open_f = gzip.open
else:
open_f = open
with open_f(fn, 'rt') as f:
for line in f:
if 'nodes was in use for' in line:
m = self.r.match(line)
if not m:
continue
g = m.groupdict()
repo = g['repos']
secs = float(g['secs'])
nodes = int(g['nodes'])
job = g['job']
if not self.start_time:
self.start_time = g['timestamp']
self.end_time = g['timestamp']
if repo not in self.repos:
self.repos[repo] = {}
self.repos[repo]['total'] = 0.0
node_time = nodes * secs
self.total_usage += node_time
self.repos[repo]['total'] += node_time
if job not in self.jobs:
self.jobs[job] = 0.0
if job not in self.repos[repo]:
self.repos[repo][job] = 0.0
self.jobs[job] += node_time
self.repos[repo][job] += node_time
def list_log_files(self, path='/var/log/zuul'):
ret = []
entries = os.listdir(path)
prefix = os.path.join(path, 'zuul.log')
for entry in entries:
entry = os.path.join(path, entry)
if os.path.isfile(entry) and entry.startswith(prefix):
ret.append(entry)
ret.sort(key=get_log_age, reverse=True)
return ret
def sort_repos(self):
for repo in self.repos:
self.sorted_repos.append((repo, self.repos[repo]['total']))
self.sorted_repos.sort(key=lambda x: x[1], reverse=True)
def sort_jobs(self):
for job, usage in self.jobs.items():
self.sorted_jobs.append((job, usage))
self.sorted_jobs.sort(key=lambda x: x[1], reverse=True)
def calculate_project_usage(self):
'''Group usage by logical project/effort
It is often the case that a single repo doesn't capture the work
of a logical project or effort. If this is the case in your situation
you can create a projects.yaml file that groups together repos
under logical project names to report usage by that logical grouping.
The projects.yaml should be in your current directory and have this
format:
project_name:
deliverables:
logical_deliverable_name:
repos:
- repo1
- repo2
project_name2:
deliverables:
logical_deliverable_name2:
repos:
- repo3
- repo4
'''
if not os.path.exists('projects.yaml'):
return self.sorted_projects
with open('projects.yaml') as f:
y = yaml.load(f)
for name, v in y.items():
self.projects[name] = 0.0
for deliverable in v['deliverables'].values():
for repo in deliverable['repos']:
if repo in self.repos:
self.projects[name] += self.repos[repo]['total']
for project, usage in self.projects.items():
self.sorted_projects.append((project, usage))
self.sorted_projects.sort(key=lambda x: x[1], reverse=True)
scraper = LogScraper()
for fn in scraper.list_log_files():
scraper.scrape_file(fn)
print('For period from %s to %s' % (scraper.start_time, scraper.end_time))
print('Total node time used: %.2fs' % scraper.total_usage)
print()
scraper.calculate_project_usage()
if scraper.sorted_projects:
print('Top 20 logical projects by resource usage:')
for project, total in scraper.sorted_projects[:20]:
percentage = (total / scraper.total_usage) * 100
print('%s: %.2fs, %.2f%%' % (project, total, percentage))
print()
scraper.sort_repos()
print('Top 20 repos by resource usage:')
for repo, total in scraper.sorted_repos[:20]:
percentage = (total / scraper.total_usage) * 100
print('%s: %.2fs, %.2f%%' % (repo, total, percentage))
print()
scraper.sort_jobs()
print('Top 20 jobs by resource usage:')
for job, total in scraper.sorted_jobs[:20]:
percentage = (total / scraper.total_usage) * 100
print('%s: %.2fs, %.2f%%' % (job, total, percentage))
print()
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/node_usage.py
|
node_usage.py
|
#!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import base64
import json
import math
import os
import re
import subprocess
import sys
import tempfile
import textwrap
import ssl
# we to import Request and urlopen differently for python 2 and 3
try:
from urllib.request import Request
from urllib.request import urlopen
from urllib.parse import urlparse
except ImportError:
from urllib2 import Request
from urllib2 import urlopen
from urlparse import urlparse
DESCRIPTION = """Encrypt a secret for Zuul.
This program fetches a project-specific public key from a Zuul server and
uses that to encrypt a secret. The only pre-requisite is an installed
OpenSSL binary.
"""
def main():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('url',
help="The base URL of the zuul server. "
"E.g., https://zuul.example.com/ or path"
" to project public key file. E.g.,"
" file:///path/to/key.pub")
parser.add_argument('project', default=None, nargs="?",
help="The name of the project. Required when using"
" the Zuul API to fetch the public key.")
parser.add_argument('--tenant',
default=None,
help="The name of the Zuul tenant. This may be "
"required in a multi-tenant environment.")
parser.add_argument('--strip', default=None,
help='Unused, kept for backward compatibility.')
parser.add_argument('--no-strip', action='store_true', default=False,
help="Do not strip whitespace from beginning or "
"end of input.")
parser.add_argument('--infile',
default=None,
help="A filename whose contents will be encrypted. "
"If not supplied, the value will be read from "
"standard input.")
parser.add_argument('--outfile',
default=None,
help="A filename to which the encrypted value will be "
"written. If not supplied, the value will be written "
"to standard output.")
parser.add_argument('--insecure', action='store_true', default=False,
help="Do not verify remote certificate")
args = parser.parse_args()
# We should not use unencrypted connections for retrieving the public key.
# Otherwise our secret can be compromised. The schemes file and https are
# considered safe.
url = urlparse(args.url)
if url.scheme not in ('file', 'https'):
sys.stderr.write("WARNING: Retrieving encryption key via an "
"unencrypted connection. Your secret may get "
"compromised.\n")
ssl_ctx = None
if url.scheme == 'file':
req = Request(args.url)
else:
if args.insecure:
ssl_ctx = ssl.create_default_context()
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
# Check if tenant is white label
req = Request("%s/api/info" % (args.url.rstrip('/'),))
info = json.loads(urlopen(req, context=ssl_ctx).read().decode('utf8'))
api_tenant = info.get('info', {}).get('tenant')
if not api_tenant and not args.tenant:
print("Error: the --tenant argument is required")
exit(1)
if api_tenant:
req = Request("%s/api/key/%s.pub" % (
args.url.rstrip('/'), args.project))
else:
req = Request("%s/api/tenant/%s/key/%s.pub" % (
args.url.rstrip('/'), args.tenant, args.project))
try:
pubkey = urlopen(req, context=ssl_ctx)
except Exception:
sys.stderr.write(
"ERROR: Couldn't retrieve project key via %s\n" % req.full_url)
raise
if args.infile:
with open(args.infile) as f:
plaintext = f.read()
else:
plaintext = sys.stdin.read()
plaintext = plaintext.encode("utf-8")
if not args.no_strip:
plaintext = plaintext.strip()
pubkey_file = tempfile.NamedTemporaryFile(delete=False)
try:
pubkey_file.write(pubkey.read())
pubkey_file.close()
p = subprocess.Popen(['openssl', 'rsa', '-text',
'-pubin', '-in',
pubkey_file.name],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
raise Exception("Return code %s from openssl" % p.returncode)
output = stdout.decode('utf-8')
openssl_version = subprocess.check_output(
['openssl', 'version']).split()[1]
if openssl_version.startswith(b'0.'):
key_length_re = r'^Modulus \((?P<key_length>\d+) bit\):$'
else:
key_length_re = r'^(|RSA )Public-Key: \((?P<key_length>\d+) bit\)$'
m = re.match(key_length_re, output, re.MULTILINE)
nbits = int(m.group('key_length'))
nbytes = int(nbits / 8)
max_bytes = nbytes - 42 # PKCS1-OAEP overhead
chunks = int(math.ceil(float(len(plaintext)) / max_bytes))
ciphertext_chunks = []
print("Public key length: {} bits ({} bytes)".format(nbits, nbytes))
print("Max plaintext length per chunk: {} bytes".format(max_bytes))
print("Input plaintext length: {} bytes".format(len(plaintext)))
print("Number of chunks: {}".format(chunks))
for count in range(chunks):
chunk = plaintext[int(count * max_bytes):
int((count + 1) * max_bytes)]
p = subprocess.Popen(['openssl', 'rsautl', '-encrypt',
'-oaep', '-pubin', '-inkey',
pubkey_file.name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate(chunk)
if p.returncode != 0:
raise Exception("Return code %s from openssl" % p.returncode)
ciphertext_chunks.append(base64.b64encode(stdout).decode('utf-8'))
finally:
os.unlink(pubkey_file.name)
output = textwrap.dedent(
'''
- secret:
name: <name>
data:
<fieldname>: !encrypted/pkcs1-oaep
''')
twrap = textwrap.TextWrapper(width=79,
initial_indent=' ' * 8,
subsequent_indent=' ' * 10)
for chunk in ciphertext_chunks:
chunk = twrap.fill('- ' + chunk)
output += chunk + '\n'
if args.outfile:
with open(args.outfile, "w") as f:
f.write(output)
else:
print(output)
if __name__ == '__main__':
print(
"This script is deprecated. Use `zuul-client encrypt` instead. "
"Please refer to https://zuul-ci.org/docs/zuul-client/ "
"for more details on how to use zuul-client."
)
main()
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/encrypt_secret.py
|
encrypt_secret.py
|
#!/bin/sh -e
# Copyright 2020 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage a CA for Zookeeper
CAROOT=$1
SERVER=$2
SUBJECT='/C=US/ST=California/L=Oakland/O=Company Name/OU=Org'
TOOLSDIR=$(dirname $0)
ABSTOOLSDIR=$(cd $TOOLSDIR ;pwd)
CONFIG="-config $ABSTOOLSDIR/openssl.cnf"
make_ca() {
mkdir $CAROOT/demoCA
mkdir $CAROOT/demoCA/reqs
mkdir $CAROOT/demoCA/newcerts
mkdir $CAROOT/demoCA/crl
mkdir $CAROOT/demoCA/private
chmod 700 $CAROOT/demoCA/private
touch $CAROOT/demoCA/index.txt
touch $CAROOT/demoCA/index.txt.attr
mkdir $CAROOT/certs
mkdir $CAROOT/keys
mkdir $CAROOT/keystores
chmod 700 $CAROOT/keys
chmod 700 $CAROOT/keystores
openssl req $CONFIG -new -nodes -subj "$SUBJECT/CN=caroot" \
-keyout $CAROOT/demoCA/private/cakey.pem \
-out $CAROOT/demoCA/reqs/careq.pem
openssl ca $CONFIG -create_serial -days 3560 -batch -selfsign -extensions v3_ca \
-out $CAROOT/demoCA/cacert.pem \
-keyfile $CAROOT/demoCA/private/cakey.pem \
-infiles $CAROOT/demoCA/reqs/careq.pem
cp $CAROOT/demoCA/cacert.pem $CAROOT/certs
}
make_client() {
openssl req $CONFIG -new -nodes -subj "$SUBJECT/CN=client" \
-keyout $CAROOT/keys/clientkey.pem \
-out $CAROOT/demoCA/reqs/clientreq.pem
openssl ca $CONFIG -batch -policy policy_anything -days 3560 \
-out $CAROOT/certs/client.pem \
-infiles $CAROOT/demoCA/reqs/clientreq.pem
}
make_server() {
openssl req $CONFIG -new -nodes -subj "$SUBJECT/CN=$SERVER" \
-keyout $CAROOT/keys/${SERVER}key.pem \
-out $CAROOT/demoCA/reqs/${SERVER}req.pem
openssl ca $CONFIG -batch -policy policy_anything -days 3560 \
-out $CAROOT/certs/$SERVER.pem \
-infiles $CAROOT/demoCA/reqs/${SERVER}req.pem
cat $CAROOT/certs/$SERVER.pem $CAROOT/keys/${SERVER}key.pem \
> $CAROOT/keystores/$SERVER.pem
}
help() {
echo "$0 CAROOT [SERVER]"
echo
echo " CAROOT is the path to a directory in which to store the CA"
echo " and certificates."
echo " SERVER is the FQDN of a server for which a certificate should"
echo " be generated"
}
if [ ! -d "$CAROOT" ]; then
echo "CAROOT must be a directory"
help
exit 1
fi
cd $CAROOT
CAROOT=`pwd`
if [ ! -d "$CAROOT/demoCA" ]; then
echo 'Generate CA'
make_ca
echo 'Generate client certificate'
make_client
fi
if [ -f "$CAROOT/certs/$SERVER.pem" ]; then
echo "Certificate for $SERVER already exists"
exit 0
fi
if [ "$SERVER" != "" ]; then
make_server
fi
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/zk-ca.sh
|
zk-ca.sh
|
#!/usr/bin/env python
# Copyright 2014-2015 Antoine "hashar" Musso
# Copyright 2014-2015 Wikimedia Foundation Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# pylint: disable=locally-disabled, invalid-name
"""
Zuul references cleaner.
Clear up references under /refs/zuul/ by inspecting the age of the commit the
reference points to. If the commit date is older than a number of days
specificed by --until, the reference is deleted from the git repository.
Use --dry-run --verbose to finely inspect the script behavior.
"""
import argparse
import git
import logging
import time
import sys
NOW = int(time.time())
DEFAULT_DAYS = 360
ZUUL_REF_PREFIX = 'refs/zuul/'
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--until', dest='days_ago', default=DEFAULT_DAYS, type=int,
help='references older than this number of day will '
'be deleted. Default: %s' % DEFAULT_DAYS)
parser.add_argument('-n', '--dry-run', dest='dryrun', action='store_true',
help='do not delete references')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='set log level from info to debug')
parser.add_argument('gitrepo', help='path to a Zuul git repository')
args = parser.parse_args()
logging.basicConfig()
log = logging.getLogger('zuul-clear-refs')
if args.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
try:
repo = git.Repo(args.gitrepo)
except git.exc.InvalidGitRepositoryError:
log.error("Invalid git repo: %s" % args.gitrepo)
sys.exit(1)
for ref in repo.references:
if not ref.path.startswith(ZUUL_REF_PREFIX):
continue
if type(ref) is not git.refs.reference.Reference:
# Paranoia: ignore heads/tags/remotes ..
continue
try:
commit_ts = ref.commit.committed_date
except LookupError:
# GitPython does not properly handle PGP signed tags
log.exception("Error in commit: %s, ref: %s. Type: %s",
ref.commit, ref.path, type(ref))
continue
commit_age = int((NOW - commit_ts) / 86400) # days
log.debug(
"%s at %s is %3s days old",
ref.commit,
ref.path,
commit_age,
)
if commit_age > args.days_ago:
if args.dryrun:
log.info("Would delete old ref: %s (%s)", ref.path, ref.commit)
else:
log.info("Deleting old ref: %s (%s)", ref.path, ref.commit)
ref.delete(repo, ref.path)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/zuul-clear-refs.py
|
zuul-clear-refs.py
|
# Copyright 2022 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Inspect ZK contents like zk-shell; handles compressed and sharded
# data.
import argparse
import pathlib
import cmd
import sys
import textwrap
import zlib
import kazoo.client
from kazoo.exceptions import NoNodeError
def resolve_path(path, rest):
newpath = path / rest
newparts = []
for part in newpath.parts:
if part == '.':
continue
elif part == '..':
newparts.pop()
else:
newparts.append(part)
return pathlib.PurePosixPath(*newparts)
class REPL(cmd.Cmd):
def __init__(self, args):
self.path = pathlib.PurePosixPath('/')
super().__init__()
kwargs = {}
if args.cert:
kwargs['use_ssl'] = True
kwargs['keyfile'] = args.key
kwargs['certfile'] = args.cert
kwargs['ca'] = args.ca
self.client = kazoo.client.KazooClient(args.host, **kwargs)
self.client.start()
@property
def prompt(self):
return f'{self.path}> '
def do_EOF(self, path):
sys.exit(0)
def do_ls(self, path):
'List znodes: ls [PATH]'
if path:
mypath = self.path / path
else:
mypath = self.path
try:
for child in self.client.get_children(str(mypath)):
print(child)
except NoNodeError:
print(f'No such node: {mypath}')
def do_cd(self, path):
'Change the working path: cd PATH'
if path:
newpath = resolve_path(self.path, path)
if self.client.exists(str(newpath)):
self.path = newpath
else:
print(f'No such node: {newpath}')
def do_pwd(self):
'Print the working path'
print(self.path)
def help_get(self):
print(textwrap.dedent(self.do_get.__doc__))
def do_get(self, args):
"""\
Get znode value: get PATH [-v]
-v: output metadata about the path
"""
args = args.split(' ')
path = args[0]
args = args[1:]
path = resolve_path(self.path, path)
try:
compressed_data, zstat = self.client.get(str(path))
except NoNodeError:
print(f'No such node: {path}')
return
was_compressed = False
try:
data = zlib.decompress(compressed_data)
was_compressed = True
except zlib.error:
data = compressed_data
if '-v' in args:
print(f'Compressed: {was_compressed}')
print(f'Size: {len(data)}')
print(f'Compressed size: {len(compressed_data)}')
print(f'Zstat: {zstat}')
print(data)
def help_unshard(self):
print(textwrap.dedent(self.do_unshard.__doc__))
def do_unshard(self, args):
"""\
Get the unsharded value: get PATH [-v]
-v: output metadata about the path
"""
args = args.split(' ')
path = args[0]
args = args[1:]
path = resolve_path(self.path, path)
try:
shards = sorted(self.client.get_children(str(path)))
except NoNodeError:
print(f'No such node: {path}')
return
compressed_data = b''
data = b''
for shard in shards:
d, _ = self.client.get(str(path / shard))
compressed_data += d
if compressed_data:
data = zlib.decompress(compressed_data)
if '-v' in args:
print(f'Size: {len(data)}')
print(f'Compressed size: {len(compressed_data)}')
print(data)
def do_rm(self, args):
'Delete znode: rm PATH [-r]'
args = args.split(' ')
path = args[0]
args = args[1:]
path = resolve_path(self.path, path)
if '-r' in args:
recursive = True
else:
recursive = False
try:
self.client.delete(str(path), recursive=recursive)
except NoNodeError:
print(f'No such node: {path}')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('host', help='ZK host string')
parser.add_argument('--cert', help='Path to TLS certificate')
parser.add_argument('--key', help='Path to TLS key')
parser.add_argument('--ca', help='Path to TLS CA cert')
args = parser.parse_args()
repl = REPL(args)
repl.cmdloop()
if __name__ == '__main__':
main()
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/zk-shell.py
|
zk-shell.py
|
#!/bin/bash
# Copyright (c) 2016 NodeSource LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# The above license is inferred from the
# https://github.com/nodesource/distributions source repository.
# Discussion, issues and change requests at:
# https://github.com/nodesource/distributions
#
# Script to install the NodeSource Node.js 10.x repo onto an
# Enterprise Linux or Fedora Core based system.
#
# This was downloaded from https://rpm.nodesource.com/setup_10.x
# A few modifications have been made.
SCRSUFFIX="_10.x"
NODENAME="Node.js 10.x"
NODEREPO="pub_10.x"
NODEPKG="nodejs"
print_status() {
local outp=$(echo "$1") # | sed -r 's/\\n/\\n## /mg')
echo
echo -e "## ${outp}"
echo
}
if test -t 1; then # if terminal
ncolors=$(which tput > /dev/null && tput colors) # supports color
if test -n "$ncolors" && test $ncolors -ge 8; then
termcols=$(tput cols)
bold="$(tput bold)"
underline="$(tput smul)"
standout="$(tput smso)"
normal="$(tput sgr0)"
black="$(tput setaf 0)"
red="$(tput setaf 1)"
green="$(tput setaf 2)"
yellow="$(tput setaf 3)"
blue="$(tput setaf 4)"
magenta="$(tput setaf 5)"
cyan="$(tput setaf 6)"
white="$(tput setaf 7)"
fi
fi
print_bold() {
title="$1"
text="$2"
echo
echo "${red}================================================================================${normal}"
echo "${red}================================================================================${normal}"
echo
echo -e " ${bold}${yellow}${title}${normal}"
echo
echo -en " ${text}"
echo
echo "${red}================================================================================${normal}"
echo "${red}================================================================================${normal}"
}
bail() {
echo 'Error executing command, exiting'
exit 1
}
exec_cmd_nobail() {
echo "+ $1"
bash -c "$1"
}
exec_cmd() {
exec_cmd_nobail "$1" || bail
}
node_deprecation_warning() {
if [[ "X${NODENAME}" == "Xio.js 1.x" ||
"X${NODENAME}" == "Xio.js 2.x" ||
"X${NODENAME}" == "Xio.js 3.x" ||
"X${NODENAME}" == "XNode.js 0.10" ||
"X${NODENAME}" == "XNode.js 0.12" ||
"X${NODENAME}" == "XNode.js 4.x LTS Argon" ||
"X${NODENAME}" == "XNode.js 5.x" ||
"X${NODENAME}" == "XNode.js 7.x" ]]; then
print_bold \
" DEPRECATION WARNING " "\
${bold}${NODENAME} is no longer actively supported!${normal}
${bold}You will not receive security or critical stability updates${normal} for this version.
You should migrate to a supported version of Node.js as soon as possible.
Use the installation script that corresponds to the version of Node.js you
wish to install. e.g.
* ${green}https://deb.nodesource.com/setup_8.x — Node.js v8 LTS \"Carbon\"${normal} (recommended)
* ${green}https://deb.nodesource.com/setup_10.x — Node.js v10 Current${normal}
Please see ${bold}https://github.com/nodejs/Release${normal} for details about which
version may be appropriate for you.
The ${bold}NodeSource${normal} Node.js distributions repository contains
information both about supported versions of Node.js and supported Linux
distributions. To learn more about usage, see the repository:
${bold}https://github.com/nodesource/distributions${normal}
"
echo
echo "Continuing in 20 seconds ..."
echo
sleep 20
fi
}
script_deprecation_warning() {
if [ "X${SCRSUFFIX}" == "X" ]; then
print_bold \
" SCRIPT DEPRECATION WARNING " "\
This script, located at ${bold}https://rpm.nodesource.com/setup${normal}, used to
install Node.js v0.10, is deprecated and will eventually be made inactive.
You should use the script that corresponds to the version of Node.js you
wish to install. e.g.
* ${green}https://deb.nodesource.com/setup_8.x — Node.js v8 LTS \"Carbon\"${normal} (recommended)
* ${green}https://deb.nodesource.com/setup_10.x — Node.js v10 Current${normal}
Please see ${bold}https://github.com/nodejs/Release${normal} for details about which
version may be appropriate for you.
The ${bold}NodeSource${normal} Node.js Linux distributions GitHub repository contains
information about which versions of Node.js and which Linux distributions
are supported and how to use the install scripts.
${bold}https://github.com/nodesource/distributions${normal}
"
echo
echo "Continuing in 20 seconds (press Ctrl-C to abort) ..."
echo
sleep 20
fi
}
setup() {
script_deprecation_warning
node_deprecation_warning
print_status "Installing the NodeSource ${NODENAME} repo..."
print_status "Inspecting system..."
if [ ! -x /bin/rpm ]; then
print_status """You don't appear to be running an Enterprise Linux based system,
please contact NodeSource at https://github.com/nodesource/distributions/issues
if you think this is incorrect or would like your distribution to be considered
for support.
"""
exit 1
fi
## Annotated section for auto extraction in test.sh
#-check-distro-#
## Check distro and arch
echo "+ rpm -q --whatprovides redhat-release || rpm -q --whatprovides centos-release || rpm -q --whatprovides cloudlinux-release || rpm -q --whatprovides sl-release"
DISTRO_PKG=$(rpm -q --whatprovides redhat-release || rpm -q --whatprovides centos-release || rpm -q --whatprovides cloudlinux-release || rpm -q --whatprovides sl-release)
echo "+ uname -m"
UNAME_ARCH=$(uname -m)
if [ "X${UNAME_ARCH}" == "Xi686" ]; then
DIST_ARCH=i386
elif [ "X${UNAME_ARCH}" == "Xx86_64" ]; then
DIST_ARCH=x86_64
else
print_status "\
You don't appear to be running a supported machine architecture: ${UNAME_ARCH}. \
Please contact NodeSource at \
https://github.com/nodesource/distributions/issues if you think this is \
incorrect or would like your architecture to be considered for support. \
"
exit 1
fi
if [[ $DISTRO_PKG =~ ^(redhat|centos|cloudlinux|sl)- ]]; then
DIST_TYPE=el
elif [[ $DISTRO_PKG =~ ^(enterprise|system)-release- ]]; then # Oracle Linux & Amazon Linux
DIST_TYPE=el
elif [[ $DISTRO_PKG =~ ^(fedora|korora)- ]]; then
DIST_TYPE=fc
else
print_status "\
You don't appear to be running a supported version of Enterprise Linux. \
Please contact NodeSource at \
https://github.com/nodesource/distributions/issues if you think this is \
incorrect or would like your architecture to be considered for support. \
Include your 'distribution package' name: ${DISTRO_PKG}. \
"
exit 1
fi
if [[ $DISTRO_PKG =~ ^system-release ]]; then
# Amazon Linux, for 2014.* use el7, older versions are unknown, perhaps el6
DIST_VERSION=7
else
## Using the redhat-release-server-X, centos-release-X, etc. pattern
## extract the major version number of the distro
DIST_VERSION=$(echo $DISTRO_PKG | sed -r 's/^[[:alpha:]]+-release(-server|-workstation|-client)?-([0-9]+).*$/\2/')
if ! [[ $DIST_VERSION =~ ^[0-9][0-9]?$ ]]; then
print_status "\
Could not determine your distribution version, you may not be running a \
supported version of Enterprise Linux. \
Please contact NodeSource at \
https://github.com/nodesource/distributions/issues if you think this is \
incorrect. Include your 'distribution package' name: ${DISTRO_PKG}. \
"
exit 1
fi
fi
## Given the distro, version and arch, construct the url for
## the appropriate nodesource-release package (it's noarch but
## we include the arch in the directory tree anyway)
RELEASE_URL_VERSION_STRING="${DIST_TYPE}${DIST_VERSION}"
RELEASE_URL="\
https://rpm.nodesource.com/${NODEREPO}/\
${DIST_TYPE}/\
${DIST_VERSION}/\
${DIST_ARCH}/\
nodesource-release-${RELEASE_URL_VERSION_STRING}-1.noarch.rpm"
#-check-distro-#
print_status "Confirming \"${DIST_TYPE}${DIST_VERSION}-${DIST_ARCH}\" is supported..."
## Simple fetch & fast-fail to see if the nodesource-release
## file exists for this distro/version/arch
exec_cmd_nobail "curl -sLf -o /dev/null '${RELEASE_URL}'"
RC=$?
if [[ $RC != 0 ]]; then
print_status "\
Your distribution, identified as \"${DISTRO_PKG}\", \
is not currently supported, please contact NodeSource at \
https://github.com/nodesource/distributions/issues \
if you think this is incorrect or would like your distribution to be considered for support"
exit 1
fi
## EPEL is needed for EL5, we don't install it if it's missing but
## we can give guidance
if [ "$DIST_TYPE" == "el" ] && [ "$DIST_VERSION" == "5" ]; then
print_status "Checking if EPEL is enabled..."
echo "+ yum repolist enabled 2> /dev/null | grep epel"
repolist=$(yum repolist enabled 2> /dev/null | grep epel)
if [ "X${repolist}" == "X" ]; then
print_status "Finding current EPEL release RPM..."
## We can scrape the html to find the latest epel-release (likely 5.4)
epel_url="http://dl.fedoraproject.org/pub/epel/5/${DIST_ARCH}/"
epel_release_view="${epel_url}repoview/epel-release.html"
echo "+ curl -s $epel_release_view | grep -oE 'epel-release-[0-9\-]+\.noarch\.rpm'"
epel=$(curl -s $epel_release_view | grep -oE 'epel-release-[0-9\-]+\.noarch\.rpm')
if [ "X${epel}" = "X" ]; then
print_status "Error: Could not find current EPEL release RPM!"
exit 1
fi
print_status """The EPEL (Extra Packages for Enterprise Linux) repository is a
prerequisite for installing Node.js on your operating system. Please
add it and re-run this setup script.
The EPEL repository RPM is available at:
${epel_url}${epel}
You can try installing with: \`rpm -ivh <url>\`
"""
exit 1
fi
fi
print_status "Downloading release setup RPM..."
## Two-step process to install the nodesource-release RPM,
## Download to a tmp file then install it directly with `rpm`.
## We don't rely on RPM's ability to fetch from HTTPS directly
echo "+ mktemp"
RPM_TMP=$(mktemp || bail)
exec_cmd "curl -sL -o '${RPM_TMP}' '${RELEASE_URL}'"
print_status "Installing release setup RPM..."
## --nosignature because nodesource-release contains the signature!
exec_cmd "rpm -i --nosignature --force '${RPM_TMP}'"
print_status "Cleaning up..."
exec_cmd "rm -f '${RPM_TMP}'"
print_status "Checking for existing installations..."
## Nasty consequences if you have an existing Node or npm package
## installed, need to inform if they are there
echo "+ rpm -qa 'node|npm' | grep -v nodesource"
EXISTING_NODE=$(rpm -qa 'node|npm|iojs' | grep -v nodesource)
if [ "X${EXISTING_NODE}" != "X" ]; then
print_status """Your system appears to already have Node.js installed from an alternative source.
Run \`${bold}sudo yum remove -y ${NODEPKG} npm${normal}\` to remove these first.
"""
fi
print_status """Run \`${bold}sudo yum install -y ${NODEPKG}${normal}\` to install ${NODENAME} and npm.
## You may also need development tools to build native addons:
sudo yum install gcc-c++ make
## To install the Yarn package manager, run:
curl -sL https://dl.yarnpkg.com/rpm/yarn.repo | sudo tee /etc/yum.repos.d/yarn.repo
sudo yum install yarn
"""
exit 0
}
## Defer setup until we have the complete script
setup
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/install-js-repos-rpm.sh
|
install-js-repos-rpm.sh
|
#!/bin/bash -xe
# This script will be run by OpenStack CI before unit tests are run,
# it sets up the test system as needed.
# Developers should setup their test systems in a similar way.
# This setup needs to be run as a user that can run sudo.
TOOLSDIR=$(dirname $0)
# Prepare a tmpfs for Zuul test root
if [[ -n "${ZUUL_TEST_ROOT:-}" ]]; then
sudo mkdir -p "$ZUUL_TEST_ROOT"
sudo mount -t tmpfs -o noatime,nodev,nosuid,size=64M none "$ZUUL_TEST_ROOT"
fi
# Be sure mysql is started.
sudo service mysql start
sudo service postgresql start
# The root password for the MySQL database; pass it in via
# MYSQL_ROOT_PW.
DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_worker}
# This user and its password are used by the tests, if you change it,
# your tests might fail.
DB_USER=openstack_citest
DB_PW=openstack_citest
sudo -H mysqladmin -u root password $DB_ROOT_PW
# It's best practice to remove anonymous users from the database. If
# a anonymous user exists, then it matches first for connections and
# other connections from that host will not work.
sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e "
DELETE FROM mysql.user WHERE User='';
FLUSH PRIVILEGES;
CREATE USER '$DB_USER'@'%' IDENTIFIED BY '$DB_PW';
GRANT ALL PRIVILEGES ON *.* TO '$DB_USER'@'%' WITH GRANT OPTION;"
# Now create our database.
mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e "
SET default_storage_engine=MYISAM;
DROP DATABASE IF EXISTS openstack_citest;
CREATE DATABASE openstack_citest CHARACTER SET utf8;"
# setup postgres user and database
sudo -Hi -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN SUPERUSER PASSWORD '$DB_PW';"
sudo -Hi -u postgres psql -c "CREATE DATABASE openstack_citest OWNER $DB_USER TEMPLATE template0 ENCODING 'UTF8';"
LSBDISTCODENAME=$(lsb_release -cs)
if [ $LSBDISTCODENAME == 'xenial' ]; then
# TODO(pabelanger): Move this into bindep after we figure out how to enable our
# PPA.
# NOTE(pabelanger): Avoid hitting http://keyserver.ubuntu.com
sudo apt-key add $TOOLSDIR/018D05F5.gpg
echo "deb http://ppa.launchpad.net/openstack-ci-core/bubblewrap/ubuntu $LSBDISTCODENAME main" | \
sudo tee /etc/apt/sources.list.d/openstack-ci-core-ubuntu-bubblewrap-xenial.list
sudo apt-get update
sudo apt-get --assume-yes install bubblewrap
fi
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/test-setup.sh
|
test-setup.sh
|
# Copyright 2020 Red Hat Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import json
import sys
import datetime
import requests
from pathlib import Path
def usage(argv):
two_weeks_ago = datetime.datetime.utcnow() - datetime.timedelta(days=14)
parser = argparse.ArgumentParser(
description="Look for unstrusted command in builds log")
parser.add_argument(
"--since", default=two_weeks_ago, help="Date in YYYY-MM-DD format")
parser.add_argument("zuul_url", help="The url of a zuul-web service")
args = parser.parse_args(argv)
args.zuul_url = args.zuul_url.rstrip("/")
if not args.zuul_url.endswith("/api"):
args.zuul_url += "/api"
if not isinstance(args.since, datetime.datetime):
args.since = datetime.datetime.strptime(args.since, "%Y-%m-%d")
return args
def get_tenants(zuul_url):
""" Fetch list of tenant names """
is_witelabel = requests.get(
"%s/info" % zuul_url).json().get('tenant', None) is not None
if is_witelabel:
raise RuntimeError("Need multitenant api")
return [
tenant["name"]
for tenant in requests.get("%s/tenants" % zuul_url).json()
]
def is_build_in_range(build, since):
""" Check if a build is in range """
try:
build_date = datetime.datetime.strptime(
build["start_time"], "%Y-%m-%dT%H:%M:%S")
return build_date > since
except TypeError:
return False
def get_builds(zuul_builds_url, since):
""" Fecth list of builds that are in range """
builds = []
pos = 0
step = 50
while not builds or is_build_in_range(builds[-1], since):
url = "%s?skip=%d&limit=%d" % (zuul_builds_url, pos, step)
print("Querying %s" % url)
builds += requests.get(url).json()
pos += step
return builds
def filter_unique_builds(builds):
""" Filter the list of build to keep only one per job name """
jobs = dict()
for build in builds:
if build["job_name"] not in jobs:
jobs[build["job_name"]] = build
unique_builds = list(jobs.values())
print("Found %d unique job builds" % len(unique_builds))
return unique_builds
def download(source_url, local_filename):
""" Download a file using streaming request """
with requests.get(source_url, local_filename, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
def download_build_job_output(zuul_build_url, local_path):
""" Download the job-output.json of a build """
build = requests.get(zuul_build_url).json()
if not build.get("log_url"):
return "No log url"
try:
download(build["log_url"] + "job-output.json", local_path)
except Exception as e:
return str(e)
def examine(path):
""" Look for forbidden tasks in a job-output.json file path """
data = json.load(open(path))
to_fix = False
for playbook in data:
if playbook['trusted']:
continue
for play in playbook['plays']:
for task in play['tasks']:
for hostname, host in task['hosts'].items():
if hostname != 'localhost':
continue
if host['action'] in ['command', 'shell']:
print("Found disallowed task:")
print(" Playbook: %s" % playbook['playbook'])
print(" Role: %s" % task.get('role', {}).get('name'))
print(" Task: %s" % task.get('task', {}).get('name'))
to_fix = True
return to_fix
def main(argv):
args = usage(argv)
cache_dir = Path("/tmp/zuul-logs")
if not cache_dir.exists():
cache_dir.mkdir()
to_fix = set()
failed_to_examine = set()
for tenant in get_tenants(args.zuul_url):
zuul_tenant_url = args.zuul_url + "/tenant/" + tenant
print("Looking for unique build in %s" % zuul_tenant_url)
for build in filter_unique_builds(
get_builds(zuul_tenant_url + "/builds", args.since)):
if not build.get("uuid"):
# Probably a SKIPPED build, no need to examine
continue
local_path = cache_dir / (build["uuid"] + ".json")
build_url = zuul_tenant_url + "/build/" + build["uuid"]
if not local_path.exists():
err = download_build_job_output(build_url, str(local_path))
if err:
failed_to_examine.add((build_url, err))
continue
try:
if not examine(str(local_path)):
print("%s: ok" % build_url)
else:
to_fix.add(build_url)
except Exception as e:
failed_to_examine.add((build_url, str(e)))
if failed_to_examine:
print("The following builds could not be examined:")
for build_url, err in failed_to_examine:
print("%s: %s" % (build_url, err))
if not to_fix:
exit(1)
if to_fix:
print("The following builds are using localhost command:")
for build in to_fix:
print(build.replace("/api/", "/t/"))
exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/find-untrusted-exec.py
|
find-untrusted-exec.py
|
#!/usr/bin/env python3
# Copyright 2022 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import requests
def main():
parser = argparse.ArgumentParser(
description="Find where a project declares a queue")
parser.add_argument("url", help="Zuul URL")
parser.add_argument("tenant", help="Zuul tenant name")
parser.add_argument("--verbose", help="Display progress",
action='store_true')
args = parser.parse_args()
projects = requests.get(
f'{args.url}/api/tenant/{args.tenant}/projects',
).json()
pipeline_contexts = set()
for tenant_project in projects:
if args.verbose:
print(f"Checking {tenant_project['name']}")
project = requests.get(
f"{args.url}/api/tenant/{args.tenant}/project/"
f"{tenant_project['name']}",
).json()
for config in project['configs']:
for pipeline in config['pipelines']:
if pipeline['queue_name']:
pipeline_contexts.add(repr(config['source_context']))
if pipeline_contexts:
print("The following project-pipeline stanzas define a queue.")
print("This syntax is deprecated and queue definitions should")
print("be moved to the project level.")
print("See https://zuul-ci.org/docs/zuul/latest/"
"releasenotes.html#relnotes-4-1-0-deprecation-notes")
for c in pipeline_contexts:
print(c)
else:
print("Good, no project-pipeline queue definitions found.")
if __name__ == '__main__':
main()
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/deprecated-queues.py
|
deprecated-queues.py
|
#!/usr/bin/env python3
# Copyright 2013 OpenStack Foundation
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('url', help='The URL of the running Zuul instance')
parser.add_argument('tenant', help='The Zuul tenant', nargs='?')
parser.add_argument('pipeline', help='The name of the Zuul pipeline',
nargs='?')
parser.add_argument('--use-config',
metavar='CONFIG',
help='The name of the zuul-client config to use')
options = parser.parse_args()
command = 'zuul-client'
if options.use_config:
command += f' --use-config {options.use_config}'
# Check if tenant is white label
info = json.loads(urlopen('%s/api/info' % options.url).read())
api_tenant = info.get('info', {}).get('tenant')
tenants = []
if api_tenant:
if api_tenant == options.tenant:
tenants.append(None)
else:
print("Error: %s doesn't match tenant %s (!= %s)" % (
options.url, options.tenant, api_tenant))
exit(1)
else:
tenants_url = '%s/api/tenants' % options.url
data = json.loads(urlopen(tenants_url).read())
for tenant in data:
tenants.append(tenant['name'])
for tenant in tenants:
if tenant is None:
status_url = '%s/api/status' % options.url
else:
status_url = '%s/api/tenant/%s/status' % (options.url, tenant)
data = json.loads(urlopen(status_url).read())
for pipeline in data['pipelines']:
if options.pipeline and pipeline['name'] != options.pipeline:
continue
for queue in pipeline.get('change_queues', []):
for head in queue['heads']:
for change in head:
if not change['live']:
continue
if change['id'] and ',' in change['id']:
# change triggered
cid, cps = change['id'].split(',')
print("%s enqueue"
" --tenant %s"
" --pipeline %s"
" --project %s"
" --change %s,%s" % (command, tenant,
pipeline['name'],
change['project_canonical'],
cid, cps))
else:
# ref triggered
cmd = '%s enqueue-ref' \
' --tenant %s' \
' --pipeline %s' \
' --project %s' \
' --ref %s' % (command, tenant,
pipeline['name'],
change['project_canonical'],
change['ref'])
if change['id']:
cmd += ' --newrev %s' % change['id']
print(cmd)
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/zuul-changes.py
|
zuul-changes.py
|
#!/usr/bin/env python3
import logging
from collections import UserDict
from zuul.driver.github.githubconnection import GithubConnection
from zuul.driver.github import GithubDriver
from zuul.model import Change
from zuul.zk.change_cache import ChangeKey
# This is a template with boilerplate code for debugging github issues
# TODO: for real use override the following variables
server = 'github.com'
api_token = 'xxxx'
appid = 2
appkey = '/opt/project/appkey'
org = 'example'
repo = 'sandbox'
pull_nr = 8
class DummyChangeCache(UserDict):
def updateChangeWithRetry(self, key, change, update_func, retry_count=5):
update_func(change)
self[key] = change
return change
def configure_logging(context):
stream_handler = logging.StreamHandler()
logger = logging.getLogger(context)
logger.addHandler(stream_handler)
logger.setLevel(logging.DEBUG)
# uncomment for more logging
# configure_logging('urllib3')
# configure_logging('github3')
# configure_logging('cachecontrol')
# This is all that's needed for getting a usable github connection
def create_connection(server, api_token):
driver = GithubDriver()
connection_config = {
'server': server,
'api_token': api_token,
}
conn = GithubConnection(driver, 'github', connection_config)
conn._github_client_manager.initialize()
conn._change_cache = DummyChangeCache()
return conn
def create_connection_app(server, appid, appkey):
driver = GithubDriver()
connection_config = {
'server': server,
'app_id': appid,
'app_key': appkey,
}
conn = GithubConnection(driver, 'github', connection_config)
conn._github_client_manager.initialize()
conn._change_cache = DummyChangeCache()
return conn
def get_change(connection: GithubConnection,
org: str,
repo: str,
pull: int) -> Change:
project_name = f"{org}/{repo}"
github = connection.getGithubClient(project_name)
pr = github.pull_request(org, repo, pull)
sha = pr.head.sha
change_key = ChangeKey('github', project_name, 'PullRequest', pull, sha)
return conn._getChange(change_key, refresh=True)
# create github connection with api token
conn = create_connection(server, api_token)
# create github connection with app key
# conn = create_connection_app(server, appid, appkey)
# Now we can do anything we want with the connection, e.g. check canMerge for
# a pull request.
change = get_change(conn, org, repo, pull_nr)
print(conn.canMerge(change, {'cc/gate2'}))
# Or just use the github object.
# github = conn.getGithubClient()
#
# repository = github.repository(org, repo)
# print(repository.as_dict())
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/tools/github-debugging.py
|
github-debugging.py
|
const rewiredEsbuild = require("react-app-rewired-esbuild");
module.exports = function override(config, env) {
// No additional config just want esbuild instead of babel
return rewiredEsbuild()(config, env);
};
// use `customize-cra`
const { override } = require("customize-cra");
module.exports = override(rewiredEsbuild());
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/web/config-overrides.js
|
config-overrides.js
|
// Copyright 2018 Red Hat, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
import Axios from 'axios'
let authToken = undefined
export function setAuthToken(token) {
authToken = token
}
function getHomepageUrl() {
//
// Discover serving location from href.
//
// This is only needed for sub-directory serving. Serving the application
// from 'scheme://domain/' may simply default to 'scheme://domain/'
//
// Note that this is not enough for sub-directory serving,
// The static files location also needs to be adapted with the 'homepage'
// settings of the package.json file.
//
// This homepage url is used for the Router and Link resolution logic
//
let url = new URL(window.location.href)
if ('PUBLIC_URL' in process.env) {
url.pathname = process.env.PUBLIC_URL
} else {
url.pathname = ''
}
if (!url.pathname.endsWith('/')) {
url.pathname = url.pathname + '/'
}
return url.origin + url.pathname
}
function getZuulUrl() {
// Return the zuul root api absolute url
const ZUUL_API = process.env.REACT_APP_ZUUL_API
let apiUrl
if (ZUUL_API) {
// Api url set at build time, use it
apiUrl = ZUUL_API
} else {
// Api url is relative to homepage path
apiUrl = getHomepageUrl() + 'api/'
}
if (!apiUrl.endsWith('/')) {
apiUrl = apiUrl + '/'
}
if (!apiUrl.endsWith('/api/')) {
apiUrl = apiUrl + 'api/'
}
// console.log('Api url is ', apiUrl)
return apiUrl
}
const apiUrl = getZuulUrl()
function getStreamUrl(apiPrefix) {
const streamUrl = (apiUrl + apiPrefix)
.replace(/(http)(s)?:\/\//, 'ws$2://') + 'console-stream'
// console.log('Stream url is ', streamUrl)
return streamUrl
}
function makeRequest(url, method, data) {
if (method === undefined) {
method = 'get'
}
// This performs a simple GET and tries to detect if CORS errors are
// due to proxy authentication errors.
const instance = Axios.create({
baseURL: apiUrl
})
if (authToken) {
instance.defaults.headers.common['Authorization'] = 'Bearer ' + authToken
}
const config = {method, url, data}
// First try the request as normal
let res = instance.request(config).catch(err => {
if (err.response === undefined) {
// This is either a Network, DNS, or CORS error, but we can't tell which.
// If we're behind an authz proxy, it's possible our creds have timed out
// and the CORS error is because we're getting a redirect.
// Apache mod_auth_mellon (and possibly other authz proxies) will avoid
// issuing a redirect if X-Requested-With is set to 'XMLHttpRequest' and
// will instead issue a 403. We can use this to detect that case.
instance.defaults.headers.common['X-Requested-With'] = 'XMLHttpRequest'
let res2 = instance.request(config).catch(err2 => {
if (err2.response && err2.response.status === 403) {
// We might be getting a redirect or something else,
// so reload the page.
console.log('Received 403 after unknown error; reloading')
window.location.reload()
}
// If we're still getting an error, we don't know the cause,
// it could be a transient network error, so we won't reload, we'll just
// wait for it to clear.
throw (err2)
})
return res2
}
throw (err)
})
return res
}
// Direct APIs
function fetchInfo() {
return makeRequest('info')
}
function fetchComponents() {
return makeRequest('components')
}
function fetchTenantInfo(apiPrefix) {
return makeRequest(apiPrefix + 'info')
}
function fetchOpenApi() {
return Axios.get(getHomepageUrl() + 'openapi.yaml')
}
function fetchTenants() {
return makeRequest(apiUrl + 'tenants')
}
function fetchConfigErrors(apiPrefix) {
return makeRequest(apiPrefix + 'config-errors')
}
function fetchStatus(apiPrefix) {
return makeRequest(apiPrefix + 'status')
}
function fetchChangeStatus(apiPrefix, changeId) {
return makeRequest(apiPrefix + 'status/change/' + changeId)
}
function fetchFreezeJob(apiPrefix, pipelineName, projectName, branchName, jobName) {
return makeRequest(apiPrefix +
'pipeline/' + pipelineName +
'/project/' + projectName +
'/branch/' + branchName +
'/freeze-job/' + jobName)
}
function fetchBuild(apiPrefix, buildId) {
return makeRequest(apiPrefix + 'build/' + buildId)
}
function fetchBuilds(apiPrefix, queryString) {
let path = 'builds'
if (queryString) {
path += '?' + queryString.slice(1)
}
return makeRequest(apiPrefix + path)
}
function fetchBuildset(apiPrefix, buildsetId) {
return makeRequest(apiPrefix + 'buildset/' + buildsetId)
}
function fetchBuildsets(apiPrefix, queryString) {
let path = 'buildsets'
if (queryString) {
path += '?' + queryString.slice(1)
}
return makeRequest(apiPrefix + path)
}
function fetchPipelines(apiPrefix) {
return makeRequest(apiPrefix + 'pipelines')
}
function fetchProject(apiPrefix, projectName) {
return makeRequest(apiPrefix + 'project/' + projectName)
}
function fetchProjects(apiPrefix) {
return makeRequest(apiPrefix + 'projects')
}
function fetchJob(apiPrefix, jobName) {
return makeRequest(apiPrefix + 'job/' + jobName)
}
function fetchJobGraph(apiPrefix, projectName, pipelineName, branchName) {
return makeRequest(apiPrefix +
'pipeline/' + pipelineName +
'/project/' + projectName +
'/branch/' + branchName +
'/freeze-jobs')
}
function fetchJobs(apiPrefix) {
return makeRequest(apiPrefix + 'jobs')
}
function fetchLabels(apiPrefix) {
return makeRequest(apiPrefix + 'labels')
}
function fetchNodes(apiPrefix) {
return makeRequest(apiPrefix + 'nodes')
}
function fetchSemaphores(apiPrefix) {
return makeRequest(apiPrefix + 'semaphores')
}
function fetchAutoholds(apiPrefix) {
return makeRequest(apiPrefix + 'autohold')
}
function fetchAutohold(apiPrefix, requestId) {
return makeRequest(apiPrefix + 'autohold/' + requestId)
}
function fetchUserAuthorizations(apiPrefix) {
return makeRequest(apiPrefix + 'authorizations')
}
function dequeue(apiPrefix, projectName, pipeline, change) {
return makeRequest(
apiPrefix + 'project/' + projectName + '/dequeue',
'post',
{
pipeline: pipeline,
change: change,
}
)
}
function dequeue_ref(apiPrefix, projectName, pipeline, ref) {
return makeRequest(
apiPrefix + 'project/' + projectName + '/dequeue',
'post',
{
pipeline: pipeline,
ref: ref,
}
)
}
function enqueue(apiPrefix, projectName, pipeline, change) {
return makeRequest(
apiPrefix + 'project/' + projectName + '/enqueue',
'post',
{
pipeline: pipeline,
change: change,
}
)
}
function enqueue_ref(apiPrefix, projectName, pipeline, ref, oldrev, newrev) {
return makeRequest(
apiPrefix + 'project/' + projectName + '/enqueue',
'post',
{
pipeline: pipeline,
ref: ref,
oldrev: oldrev,
newrev: newrev,
}
)
}
function autohold(apiPrefix, projectName, job, change, ref,
reason, count, node_hold_expiration) {
return makeRequest(
apiPrefix + 'project/' + projectName + '/autohold',
'post',
{
change: change,
job: job,
ref: ref,
reason: reason,
count: count,
node_hold_expiration: node_hold_expiration,
}
)
}
function autohold_delete(apiPrefix, requestId) {
return makeRequest(
apiPrefix + '/autohold/' + requestId,
'delete'
)
}
function promote(apiPrefix, pipeline, changes) {
return makeRequest(
apiPrefix + '/promote',
'post',
{
pipeline: pipeline,
changes: changes,
}
)
}
export {
apiUrl,
getHomepageUrl,
getStreamUrl,
fetchChangeStatus,
fetchConfigErrors,
fetchStatus,
fetchBuild,
fetchBuilds,
fetchBuildset,
fetchBuildsets,
fetchFreezeJob,
fetchPipelines,
fetchProject,
fetchProjects,
fetchJob,
fetchJobGraph,
fetchJobs,
fetchLabels,
fetchNodes,
fetchOpenApi,
fetchSemaphores,
fetchTenants,
fetchInfo,
fetchComponents,
fetchTenantInfo,
fetchUserAuthorizations,
fetchAutoholds,
fetchAutohold,
autohold,
autohold_delete,
dequeue,
dequeue_ref,
enqueue,
enqueue_ref,
promote,
}
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/web/src/api.js
|
api.js
|
import { createStore } from 'redux'
import rootReducer from './reducers'
import initialState from './reducers/initialState'
import * as buildActions from './actions/build'
it('should fetch a build', () => {
const store = createStore(rootReducer, initialState)
const build = {
uuid: '1234',
job_name: 'run-tox',
}
const action = buildActions.receiveBuild(build.uuid, build)
store.dispatch(action)
const fetchedBuild = store.getState().build.builds[build.uuid]
expect(fetchedBuild).toEqual(build)
})
it('should fetch an output', () => {
const store = createStore(rootReducer, initialState)
const build = {
uuid: '1234',
job_name: 'run-tox',
}
const output = [
{
branch: 'master',
index: '0',
phase: 'pre',
playbook: 'opendev.org/opendev/base-jobs/playbooks/base/pre.yaml',
plays: [
{
play: {
duration: {
end: '2020-09-24T23:24:02.272988Z',
start: '2020-09-24T23:23:52.900231Z',
},
id: 'bc764e04-8d26-889a-2270-000000000006',
name: 'localhost',
},
tasks: [
{
hosts: {
localhost: {
action: 'include_role',
changed: false,
include_args: {
name: 'set-zuul-log-path-fact',
},
},
},
role: {
id: 'bc764e04-8d26-889a-2270-000000000009',
name: 'emit-job-header',
path:
'/var/lib/zuul/builds/79dea00ae4dd4943a09a8bb701488bb5/trusted/project_1/opendev.org/zuul/zuul-jobs/roles/emit-job-header',
},
task: {
duration: {
end: '2020-09-24T23:23:55.818592Z',
start: '2020-09-24T23:23:55.724571Z',
},
id: 'bc764e04-8d26-889a-2270-00000000000c',
name: 'Setup log path fact',
},
},
],
},
],
stats: {
localhost: {
changed: 2,
failures: 0,
ignored: 0,
ok: 6,
rescued: 0,
skipped: 5,
unreachable: 0,
},
'ubuntu-bionic': {
changed: 22,
failures: 0,
ignored: 0,
ok: 47,
rescued: 0,
skipped: 7,
unreachable: 0,
},
},
trusted: true,
},
]
// Fetch the output
store.dispatch(buildActions.receiveBuildOutput(build.uuid, output))
const newState = store.getState()
expect(Object.keys(newState.build.outputs).length).toEqual(1)
expect(Object.keys(newState.build.hosts).length).toEqual(1)
expect(Object.keys(newState.build.errorIds).length).toEqual(1)
const expectedHosts = {
localhost: {
changed: 2,
failures: 0,
ignored: 0,
ok: 6,
rescued: 0,
skipped: 5,
unreachable: 0,
failed: [],
},
'ubuntu-bionic': {
changed: 22,
failures: 0,
ignored: 0,
ok: 47,
rescued: 0,
skipped: 7,
unreachable: 0,
failed: [],
},
}
const fetchedOutput = newState.build.outputs[build.uuid]
const fetchedHosts = newState.build.hosts[build.uuid]
const fetchedErrorIds = newState.build.errorIds[build.uuid]
expect(fetchedOutput).toEqual(output)
expect(fetchedHosts).toEqual(expectedHosts)
expect(fetchedErrorIds).toEqual(new Set())
})
it('should fetch a manifest file', () => {
const store = createStore(rootReducer, initialState)
const build = {
uuid: '1234',
job_name: 'run-tox',
}
const manifest = {
tree: [
{
name: 'zuul-info',
mimetype: 'application/directory',
encoding: null,
children: [
{
name: 'host-info.ubuntu-bionic.yaml',
mimetype: 'text/plain',
encoding: null,
last_modified: 1600989879,
size: 12895,
},
{
name: 'inventory.yaml',
mimetype: 'text/plain',
encoding: null,
last_modified: 1600989840,
size: 3734,
},
{
name: 'zuul-info.ubuntu-bionic.txt',
mimetype: 'text/plain',
encoding: null,
last_modified: 1600989881,
size: 2584,
},
],
},
{
name: 'job-output.json',
mimetype: 'application/json',
encoding: null,
last_modified: 1600990084,
size: 612933,
},
{
name: 'job-output.txt',
mimetype: 'text/plain',
encoding: null,
last_modified: 1600990088,
size: 84764,
},
],
}
// Fetch the manifest
store.dispatch(buildActions.receiveBuildManifest(build.uuid, manifest))
const newState = store.getState()
expect(Object.keys(newState.build.manifests).length).toEqual(1)
const expectedManifestIndex = {
'/zuul-info/host-info.ubuntu-bionic.yaml': {
name: 'host-info.ubuntu-bionic.yaml',
mimetype: 'text/plain',
encoding: null,
last_modified: 1600989879,
size: 12895,
},
'/zuul-info/inventory.yaml': {
name: 'inventory.yaml',
mimetype: 'text/plain',
encoding: null,
last_modified: 1600989840,
size: 3734,
},
'/zuul-info/zuul-info.ubuntu-bionic.txt': {
name: 'zuul-info.ubuntu-bionic.txt',
mimetype: 'text/plain',
encoding: null,
last_modified: 1600989881,
size: 2584,
},
'/job-output.json': {
name: 'job-output.json',
mimetype: 'application/json',
encoding: null,
last_modified: 1600990084,
size: 612933,
},
'/job-output.txt': {
name: 'job-output.txt',
mimetype: 'text/plain',
encoding: null,
last_modified: 1600990088,
size: 84764,
},
}
const fetchedManifest = newState.build.manifests[build.uuid]
expect(fetchedManifest).toEqual({
index: expectedManifestIndex,
tree: manifest.tree,
})
})
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/web/src/store.test.js
|
store.test.js
|
// Copyright 2018 Red Hat, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
// Use CommonJS require so we can dynamically import during build-time.
if (process.env.NODE_ENV === 'production') {
module.exports = require('./store.prod')
} else {
module.exports = require('./store.dev')
}
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/web/src/store.js
|
store.js
|
// In production, we register a service worker to serve assets from local cache.
// This lets the app load faster on subsequent visits in production, and gives
// it offline capabilities. However, it also means that developers (and users)
// will only see deployed updates on the "N+1" visit to a page, since previously
// cached resources are updated in the background.
// To learn more about the benefits of this model, read
// https://github.com/facebook/create-react-app/blob/master/packages/react-scripts/template/README.md#making-a-progressive-web-app
// This link also includes instructions on opting out of this behavior.
const isLocalhost = Boolean(
window.location.hostname === 'localhost' ||
// [::1] is the IPv6 localhost address.
window.location.hostname === '[::1]' ||
// 127.0.0.1/8 is considered localhost for IPv4.
window.location.hostname.match(
/^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/
)
)
export default function register () {
if (process.env.REACT_APP_ENABLE_SERVICE_WORKER !== 'true') {
console.log('Disabled service worker')
unregister()
return
}
if (process.env.NODE_ENV === 'production' && 'serviceWorker' in navigator) {
// The URL constructor is available in all browsers that support SW.
const publicUrl = new URL(process.env.PUBLIC_URL, window.location)
if (publicUrl.origin !== window.location.origin) {
// Our service worker won't work if PUBLIC_URL is on a different origin
// from what our page is served on. This might happen if a CDN is used to
// serve assets; see https://github.com/facebookincubator/create-react-app/issues/2374
return
}
window.addEventListener('load', () => {
const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`
if (isLocalhost) {
// This is running on localhost. Lets check if a service worker still exists or not.
checkValidServiceWorker(swUrl)
// Add some additional logging to localhost, pointing developers to the
// service worker/PWA documentation.
navigator.serviceWorker.ready.then(() => {
console.log(
'This web app is being served cache-first by a service ' +
'worker. To learn more, visit https://goo.gl/SC7cgQ'
)
})
} else {
// Is not local host. Just register service worker
registerValidSW(swUrl)
}
})
}
}
function registerValidSW (swUrl) {
navigator.serviceWorker
.register(swUrl)
.then(registration => {
registration.onupdatefound = () => {
const installingWorker = registration.installing
installingWorker.onstatechange = () => {
if (installingWorker.state === 'installed') {
if (navigator.serviceWorker.controller) {
// At this point, the old content will have been purged and
// the fresh content will have been added to the cache.
// It's the perfect time to display a "New content is
// available; please refresh." message in your web app.
console.log('New content is available; please refresh.')
} else {
// At this point, everything has been precached.
// It's the perfect time to display a
// "Content is cached for offline use." message.
console.log('Content is cached for offline use.')
}
}
}
}
})
.catch(error => {
console.error('Error during service worker registration:', error)
})
}
function checkValidServiceWorker (swUrl) {
// Check if the service worker can be found. If it can't reload the page.
fetch(swUrl)
.then(response => {
// Ensure service worker exists, and that we really are getting a JS file.
if (
response.status === 404 ||
response.headers.get('content-type').indexOf('javascript') === -1
) {
// No service worker found. Probably a different app. Reload the page.
navigator.serviceWorker.ready.then(registration => {
registration.unregister().then(() => {
window.location.reload()
})
})
} else {
// Service worker found. Proceed as normal.
registerValidSW(swUrl)
}
})
.catch(() => {
console.log(
'No internet connection found. App is running in offline mode.'
)
})
}
export function unregister () {
if ('serviceWorker' in navigator) {
navigator.serviceWorker.ready.then(registration => {
registration.unregister()
})
}
}
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/web/src/registerServiceWorker.js
|
registerServiceWorker.js
|
// Copyright 2018 Red Hat, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
import ComponentsPage from './pages/Components'
import FreezeJobPage from './pages/FreezeJob'
import StatusPage from './pages/Status'
import ChangeStatusPage from './pages/ChangeStatus'
import ProjectPage from './pages/Project'
import ProjectsPage from './pages/Projects'
import JobPage from './pages/Job'
import JobsPage from './pages/Jobs'
import LabelsPage from './pages/Labels'
import NodesPage from './pages/Nodes'
import SemaphorePage from './pages/Semaphore'
import SemaphoresPage from './pages/Semaphores'
import AutoholdsPage from './pages/Autoholds'
import AutoholdPage from './pages/Autohold'
import BuildPage from './pages/Build'
import BuildsPage from './pages/Builds'
import BuildsetPage from './pages/Buildset'
import BuildsetsPage from './pages/Buildsets'
import ConfigErrorsPage from './pages/ConfigErrors'
import TenantsPage from './pages/Tenants'
import StreamPage from './pages/Stream'
import OpenApiPage from './pages/OpenApi'
// The Route object are created in the App component.
// Object with a title are created in the menu.
// Object with globalRoute are not tenant scoped.
// Remember to update the api getHomepageUrl subDir list for route with params
const routes = () => [
{
title: 'Status',
to: '/status',
component: StatusPage
},
{
title: 'Projects',
to: '/projects',
component: ProjectsPage
},
{
title: 'Jobs',
to: '/jobs',
component: JobsPage
},
{
title: 'Labels',
to: '/labels',
component: LabelsPage
},
{
title: 'Nodes',
to: '/nodes',
component: NodesPage
},
{
title: 'Autoholds',
to: '/autoholds',
component: AutoholdsPage
},
{
title: 'Semaphores',
to: '/semaphores',
component: SemaphoresPage
},
{
title: 'Builds',
to: '/builds',
component: BuildsPage
},
{
title: 'Buildsets',
to: '/buildsets',
component: BuildsetsPage
},
{
to: '/freeze-job',
component: FreezeJobPage
},
{
to: '/status/change/:changeId',
component: ChangeStatusPage
},
{
to: '/stream/:buildId',
component: StreamPage
},
{
to: '/project/:projectName*',
component: ProjectPage
},
{
to: '/job/:jobName',
component: JobPage
},
{
to: '/build/:buildId',
component: BuildPage,
props: { 'activeTab': 'results' },
},
{
to: '/build/:buildId/artifacts',
component: BuildPage,
props: { 'activeTab': 'artifacts' },
},
{
to: '/build/:buildId/logs',
component: BuildPage,
props: { 'activeTab': 'logs' },
},
{
to: '/build/:buildId/console',
component: BuildPage,
props: { 'activeTab': 'console' },
},
{
to: '/build/:buildId/log/:file*',
component: BuildPage,
props: { 'activeTab': 'logs', 'logfile': true },
},
{
to: '/buildset/:buildsetId',
component: BuildsetPage
},
{
to: '/autohold/:requestId',
component: AutoholdPage
},
{
to: '/semaphore/:semaphoreName',
component: SemaphorePage
},
{
to: '/config-errors',
component: ConfigErrorsPage,
},
{
to: '/tenants',
component: TenantsPage,
globalRoute: true
},
{
to: '/openapi',
component: OpenApiPage,
noTenantPrefix: true,
},
{
to: '/components',
component: ComponentsPage,
noTenantPrefix: true,
},
// auth_callback is handled in App.jsx
]
export { routes }
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/web/src/routes.js
|
routes.js
|
// Copyright 2018 Red Hat, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
// The index is the main of the project. The App is wrapped with
// a Provider to share the redux store and a Router to manage the location.
import React from 'react'
import ReactDOM from 'react-dom'
import { BrowserRouter as Router } from 'react-router-dom'
import { Provider } from 'react-redux'
import { BroadcastChannel, createLeaderElection } from 'broadcast-channel'
import 'patternfly/dist/css/patternfly.min.css'
import 'patternfly/dist/css/patternfly-additions.min.css'
// NOTE (felix): The Patternfly 4 CSS file must be imported before the App
// component. Otherwise, the CSS rules are imported in the wrong order and some
// wildcard expressions could break the layout:
// https://forum.patternfly.org/t/wildcard-selector-more-specific-after-upgrade-to-patternfly-4-react-version-3-75-2/261
// Usually it should be imported at the uppermost positon, but as we don't want
// PF3 to overrule PF4, we import PF4 styles after PF3.
import '@patternfly/react-core/dist/styles/base.css'
import '@patternfly/react-styles/css/utilities/Sizing/sizing.css'
import '@patternfly/react-styles/css/utilities/Spacing/spacing.css'
// To avoid that PF4 breaks existing PF3 components by some wildcard CSS rules,
// we include our own migration CSS file that restores relevant parts of those
// rules.
// TODO (felix): Remove this import after the PF4 migration
import './pf4-migration.css'
import { getHomepageUrl } from './api'
import registerServiceWorker from './registerServiceWorker'
import { fetchInfoIfNeeded } from './actions/info'
import configureStore from './store'
import App from './App'
// Importing our custom css file after the App allows us to also overwrite the
// style attributes of PF4 component (as their CSS is loaded when the component
// is imported within the App).
import './index.css'
import ZuulAuthProvider from './ZuulAuthProvider'
import SilentCallback from './pages/SilentCallback'
// Uncomment the next 3 lines to enable debug-level logging from
// oidc-client.
// import { Log } from 'oidc-client'
// Log.logger = console
// Log.level = Log.DEBUG
// Don't render the entire application to handle a silent
// authentication callback.
if ((window.location.origin + window.location.pathname) ===
(getHomepageUrl() + 'silent_callback')) {
ReactDOM.render(
<SilentCallback/>,
document.getElementById('root'))
} else {
const store = configureStore()
// Load info endpoint
store.dispatch(fetchInfoIfNeeded())
// Create a broadcast channel for sending auth (or other)
// information between tabs.
const channel = new BroadcastChannel('zuul')
// Create an election so that only one tab will renew auth tokens. We run the
// election perpetually and just check whether we are the leader when it's time
// to renew tokens.
const auth_election = createLeaderElection(channel)
const waitForever = new Promise(function () {})
auth_election.awaitLeadership().then(()=> {
waitForever.then(function() {})
})
ReactDOM.render(
<Provider store={store}>
<ZuulAuthProvider channel={channel} election={auth_election}>
<Router basename={new URL(getHomepageUrl()).pathname}><App /></Router>
</ZuulAuthProvider>
</Provider>, document.getElementById('root'))
registerServiceWorker()
}
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/web/src/index.js
|
index.js
|
// Copyright 2020 BMW Group
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
import { applyMiddleware, compose, createStore } from 'redux'
import appReducer from './reducers'
import reduxImmutableStateInvariant from 'redux-immutable-state-invariant'
import thunk from 'redux-thunk'
export default function configureStore(initialState) {
// Add support for Redux devtools
const composeEnhancers =
window.__REDUX_DEVTOOLS_EXTENSION_COMPOSE__ || compose
return createStore(
appReducer,
initialState,
// Warn us if we accidentially mutate state directly in the Redux store
// (only during development).
composeEnhancers(
applyMiddleware(
thunk,
// TODO (felix): Re-enable the status.status path once we know how to
// solve the weird state mutations that are done somewhere deep within
// the logic of the status page (or its child components).
reduxImmutableStateInvariant({
ignore: [
'status.status',
]
})
)
)
)
}
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/web/src/store.dev.js
|
store.dev.js
|
import { getHomepageUrl } from './api'
it('should should return the homepage url', () => {
const homepage = 'https://my-zuul.com/'
Object.defineProperty(window, 'location', {
value: new URL(homepage)
} )
// Test some of the known, possible, URLs to verify
// that the origin is returned.
const urls = [
// auth_callback test as some providers build
// different callback urls
'https://my-zuul.com/auth_callback',
'https://my-zuul.com/auth_callback#state=12345',
// Regular browser navigation urls
'https://my-zuul.com/status',
'https://my-zuul.com/t/zuul-tenant/status',
'https://my-zuul.com/t/zuul-tenant/jobs',
// API urls
'https://my-zuul.com/api/tenant/zuul-tenant/status',
'https://my-zuul.com/api/tenant/zuul-tenant/authorization',
]
for (let url of urls) {
window.location.href = url
expect(getHomepageUrl()).toEqual(homepage)
}
})
it('should return the subdir homepage url', () => {
const homepage = 'https://example.com/zuul/'
Object.defineProperty(window, 'location', {
value: new URL(homepage)
} )
// The build process strips trailing slashes from PUBLIC_URL,
// so make sure we don't include any in our tests
Object.defineProperty(process.env, 'PUBLIC_URL', {
value: '/zuul'
} )
// Test some of the known, possible, URLs to verify
// that the origin is returned.
const urls = [
// auth_callback test as some providers build
// different callback urls
'https://example.com/zuul/auth_callback',
'https://example.com/zuul/auth_callback#state=12345',
// Regular browser navigation urls
'https://example.com/zuul/status',
'https://example.com/zuul/t/zuul-tenant/status',
'https://example.com/zuul/t/zuul-tenant/jobs',
]
for (let url of urls) {
window.location.href = url
expect(getHomepageUrl()).toEqual(homepage)
}
})
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/web/src/api.test.js
|
api.test.js
|
// Copyright 2020 BMW Group
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
import { applyMiddleware, createStore } from 'redux'
import appReducer from './reducers'
import thunk from 'redux-thunk'
export default function configureStore(initialState) {
return createStore(appReducer, initialState, applyMiddleware(thunk))
}
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/web/src/store.prod.js
|
store.prod.js
|
// Copyright 2020 Red Hat, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
import {
USER_LOGGED_IN,
USER_LOGGED_OUT,
} from '../actions/user'
import {
USER_ACL_REQUEST,
USER_ACL_SUCCESS,
USER_ACL_FAIL,
} from '../actions/auth'
export default (state = {
isFetching: false,
data: null,
scope: [],
isAdmin: false,
// undefined tenant means we haven't loaded anything yet; null means
// outside of tenant context.
tenant: undefined,
redirect: null,
}, action) => {
switch (action.type) {
case USER_LOGGED_IN: {
return {
isFetching: false,
data: action.user,
token: action.token,
redirect: action.redirect,
scope: [],
isAdmin: false,
tenant: undefined,
}
}
case USER_LOGGED_OUT:
return {
isFetching: false,
data: null,
token: null,
redirect: null,
scope: [],
isAdmin: false,
tenant: undefined,
}
case USER_ACL_REQUEST:
return {
...state,
tenant: action.tenant,
isFetching: true
}
case USER_ACL_FAIL:
return {
...state,
isFetching: false,
scope: [],
isAdmin: false
}
case USER_ACL_SUCCESS:
return {
...state,
isFetching: false,
scope: action.scope,
isAdmin: action.isAdmin
}
default:
return state
}
}
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/web/src/reducers/user.js
|
user.js
|
// Copyright 2018 Red Hat, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
import {
PROJECTS_FETCH_FAIL,
PROJECTS_FETCH_REQUEST,
PROJECTS_FETCH_SUCCESS
} from '../actions/projects'
export default (state = {
isFetching: false,
projects: {},
}, action) => {
switch (action.type) {
case PROJECTS_FETCH_REQUEST:
return {
isFetching: true,
projects: state.projects,
}
case PROJECTS_FETCH_SUCCESS:
return {
isFetching: false,
projects: { ...state.projects, [action.tenant]: action.projects },
}
case PROJECTS_FETCH_FAIL:
return {
isFetching: false,
projects: state.projects,
}
default:
return state
}
}
|
zuul
|
/zuul-9.1.0.tar.gz/zuul-9.1.0/web/src/reducers/projects.js
|
projects.js
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.